# Architectures and modes to be compiled. Consider these to be internal
# variables, don't override them (use the targets instead).
-ARCHES = ia32 x64 arm mipsel
+ARCHES = ia32 x64 arm a64 mipsel
DEFAULT_ARCHES = ia32 x64 arm
MODES = release debug optdebug
DEFAULT_MODES = release debug
-ANDROID_ARCHES = android_ia32 android_arm android_mipsel
+ANDROID_ARCHES = android_ia32 android_arm android_a64 android_mipsel
NACL_ARCHES = nacl_ia32 nacl_x64
# List of files that trigger Makefile regeneration:
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Those definitions should be consistent with the main Makefile
-ANDROID_ARCHES = android_ia32 android_arm android_mipsel
+ANDROID_ARCHES = android_ia32 android_arm android_a64 android_mipsel
MODES = release debug
# Generates all combinations of ANDROID ARCHES and MODES,
ifeq ($(ARCH), android_arm)
DEFINES = target_arch=arm v8_target_arch=arm android_target_arch=arm
DEFINES += arm_neon=0 arm_version=7
- TOOLCHAIN_ARCH = arm-linux-androideabi-4.6
+ TOOLCHAIN_ARCH = arm-linux-androideabi
+ TOOLCHAIN_PREFIX = $(TOOLCHAIN_ARCH)
+ TOOLCHAIN_VER = 4.6
else
- ifeq ($(ARCH), android_mipsel)
- DEFINES = target_arch=mipsel v8_target_arch=mipsel android_target_arch=mips
- DEFINES += mips_arch_variant=mips32r2
- TOOLCHAIN_ARCH = mipsel-linux-android-4.6
+ ifeq ($(ARCH), android_a64)
+ DEFINES = target_arch=a64 v8_target_arch=a64 android_target_arch=arm64
+ TOOLCHAIN_ARCH = aarch64-linux-android
+ TOOLCHAIN_PREFIX = $(TOOLCHAIN_ARCH)
+ TOOLCHAIN_VER = 4.8
else
- ifeq ($(ARCH), android_ia32)
- DEFINES = target_arch=ia32 v8_target_arch=ia32 android_target_arch=x86
- TOOLCHAIN_ARCH = x86-4.6
+ ifeq ($(ARCH), android_mipsel)
+ DEFINES = target_arch=mipsel v8_target_arch=mipsel
+ DEFINES += android_target_arch=mips mips_arch_variant=mips32r2
+ TOOLCHAIN_ARCH = mipsel-linux-android
+ TOOLCHAIN_PREFIX = $(TOOLCHAIN_ARCH)
+ TOOLCHAIN_VER = 4.6
+
else
- $(error Target architecture "${ARCH}" is not supported)
+ ifeq ($(ARCH), android_ia32)
+ DEFINES = target_arch=ia32 v8_target_arch=ia32 android_target_arch=x86
+ TOOLCHAIN_ARCH = x86
+ TOOLCHAIN_PREFIX = i686-linux-android
+ TOOLCHAIN_VER = 4.6
+ else
+ $(error Target architecture "${ARCH}" is not supported)
+ endif
endif
endif
endif
-TOOLCHAIN_PATH = ${ANDROID_NDK_ROOT}/toolchains/${TOOLCHAIN_ARCH}/prebuilt
+TOOLCHAIN_PATH = \
+ ${ANDROID_NDK_ROOT}/toolchains/${TOOLCHAIN_ARCH}-${TOOLCHAIN_VER}/prebuilt
ANDROID_TOOLCHAIN ?= ${TOOLCHAIN_PATH}/${TOOLCHAIN_DIR}
+
ifeq ($(wildcard $(ANDROID_TOOLCHAIN)),)
$(error Cannot find Android toolchain in "${ANDROID_TOOLCHAIN}". Please \
check that ANDROID_NDK_ROOT and ANDROID_NDK_HOST_ARCH are set \
.SECONDEXPANSION:
$(ANDROID_BUILDS): $(OUTDIR)/Makefile.$$@
@$(MAKE) -C "$(OUTDIR)" -f Makefile.$@ \
- CXX="$(ANDROID_TOOLCHAIN)/bin/*-g++" \
- AR="$(ANDROID_TOOLCHAIN)/bin/*-ar" \
- RANLIB="$(ANDROID_TOOLCHAIN)/bin/*-ranlib" \
- CC="$(ANDROID_TOOLCHAIN)/bin/*-gcc" \
- LD="$(ANDROID_TOOLCHAIN)/bin/*-ld" \
- LINK="$(ANDROID_TOOLCHAIN)/bin/*-g++" \
- BUILDTYPE=$(shell echo $(subst .,,$(suffix $@)) | \
- python -c "print raw_input().capitalize()") \
- builddir="$(shell pwd)/$(OUTDIR)/$@"
+ CXX="$(ANDROID_TOOLCHAIN)/bin/${TOOLCHAIN_PREFIX}-g++" \
+ AR="$(ANDROID_TOOLCHAIN)/bin/${TOOLCHAIN_PREFIX}-ar" \
+ RANLIB="$(ANDROID_TOOLCHAIN)/bin/${TOOLCHAIN_PREFIX}-ranlib" \
+ CC="$(ANDROID_TOOLCHAIN)/bin/${TOOLCHAIN_PREFIX}-gcc" \
+ LD="$(ANDROID_TOOLCHAIN)/bin/${TOOLCHAIN_PREFIX}-ld" \
+ LINK="$(ANDROID_TOOLCHAIN)/bin/${TOOLCHAIN_PREFIX}-g++" \
+ BUILDTYPE=$(shell echo $(subst .,,$(suffix $@)) | \
+ python -c "print raw_input().capitalize()") \
+ builddir="$(shell pwd)/$(OUTDIR)/$@"
# Android GYP file generation targets.
ANDROID_MAKEFILES = $(addprefix $(OUTDIR)/Makefile.,$(ANDROID_BUILDS))
$(ANDROID_MAKEFILES):
GYP_GENERATORS=make-android \
GYP_DEFINES="${DEFINES}" \
- CC="${ANDROID_TOOLCHAIN}/bin/*-gcc" \
- CXX="${ANDROID_TOOLCHAIN}/bin/*-g++" \
+ CC="${ANDROID_TOOLCHAIN}/bin/${TOOLCHAIN_PREFIX}-gcc" \
+ CXX="${ANDROID_TOOLCHAIN}/bin/${TOOLCHAIN_PREFIX}-g++" \
PYTHONPATH="$(shell pwd)/tools/generate_shim_headers:$(PYTHONPATH)" \
build/gyp/gyp --generator-output="${OUTDIR}" build/all.gyp \
-Ibuild/standalone.gypi --depth=. -Ibuild/android.gypi \
'-L<(android_stlport_libs)/x86',
],
}],
+ ['target_arch=="a64"', {
+ 'ldflags': [
+ '-L<(android_stlport_libs)/arm64',
+ ],
+ }],
],
}],
['target_arch=="ia32"', {
],
'target_conditions': [
['_type=="executable"', {
+ 'conditions': [
+ ['target_arch=="a64"', {
+ 'ldflags': [
+ '-Wl,-dynamic-linker,/system/bin/linker64',
+ ],
+ }, {
+ 'ldflags': [
+ '-Wl,-dynamic-linker,/system/bin/linker',
+ ],
+ }]
+ ],
'ldflags': [
'-Bdynamic',
- '-Wl,-dynamic-linker,/system/bin/linker',
- '-Wl,--gc-sections',
'-Wl,-z,nocopyreloc',
# crtbegin_dynamic.o should be the last item in ldflags.
'<(android_lib)/crtbegin_dynamic.o',
# to gyp.
'host_arch%':
'<!(uname -m | sed -e "s/i.86/ia32/;\
- s/x86_64/x64/;s/amd64/x64/;s/arm.*/arm/;s/mips.*/mipsel/")',
+ s/x86_64/x64/;\
+ s/amd64/x64/;\
+ s/aarch64/a64/;\
+ s/arm.*/arm/;\
+ s/mips.*/mipsel/")',
}, {
# OS!="linux" and OS!="freebsd" and OS!="openbsd" and
# OS!="netbsd" and OS!="mac"
'conditions': [
['(v8_target_arch=="arm" and host_arch!="arm") or \
+ (v8_target_arch=="a64" and host_arch!="a64") or \
(v8_target_arch=="mipsel" and host_arch!="mipsel") or \
(v8_target_arch=="x64" and host_arch!="x64") or \
(OS=="android" or OS=="qnx")', {
}], # _toolset=="target"
],
}], # v8_target_arch=="arm"
+ ['v8_target_arch=="a64"', {
+ 'defines': [
+ 'V8_TARGET_ARCH_A64',
+ ],
+ }],
['v8_target_arch=="ia32"', {
'defines': [
'V8_TARGET_ARCH_IA32',
}],
],
}],
- ['(OS=="linux") and (v8_target_arch=="x64")', {
+ ['(OS=="linux" or OS=="android") and \
+ (v8_target_arch=="x64" or v8_target_arch=="a64")', {
# Check whether the host compiler and target compiler support the
# '-m64' option and set it if so.
'target_conditions': [
OS=="qnx"', {
'cflags': [ '-Wall', '<(werror)', '-W', '-Wno-unused-parameter',
'-Wnon-virtual-dtor', '-Woverloaded-virtual',
- '<(wno_array_bounds)' ],
+ '<(wno_array_bounds)',
+ ],
'conditions': [
['v8_optimized_debug==0', {
'cflags!': [
--- /dev/null
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_A64_ASSEMBLER_A64_INL_H_
+#define V8_A64_ASSEMBLER_A64_INL_H_
+
+#include "a64/assembler-a64.h"
+#include "cpu.h"
+#include "debug.h"
+
+
+namespace v8 {
+namespace internal {
+
+
+void RelocInfo::apply(intptr_t delta) {
+ UNIMPLEMENTED();
+}
+
+
+void RelocInfo::set_target_address(Address target, WriteBarrierMode mode) {
+ ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
+ Assembler::set_target_address_at(pc_, target);
+ if (mode == UPDATE_WRITE_BARRIER && host() != NULL && IsCodeTarget(rmode_)) {
+ Object* target_code = Code::GetCodeFromTargetAddress(target);
+ host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
+ host(), this, HeapObject::cast(target_code));
+ }
+}
+
+
+inline unsigned CPURegister::code() const {
+ ASSERT(IsValid());
+ return reg_code;
+}
+
+
+inline CPURegister::RegisterType CPURegister::type() const {
+ ASSERT(IsValidOrNone());
+ return reg_type;
+}
+
+
+inline RegList CPURegister::Bit() const {
+ ASSERT(reg_code < (sizeof(RegList) * kBitsPerByte));
+ return IsValid() ? 1UL << reg_code : 0;
+}
+
+
+inline unsigned CPURegister::SizeInBits() const {
+ ASSERT(IsValid());
+ return reg_size;
+}
+
+
+inline int CPURegister::SizeInBytes() const {
+ ASSERT(IsValid());
+ ASSERT(SizeInBits() % 8 == 0);
+ return reg_size / 8;
+}
+
+
+inline bool CPURegister::Is32Bits() const {
+ ASSERT(IsValid());
+ return reg_size == 32;
+}
+
+
+inline bool CPURegister::Is64Bits() const {
+ ASSERT(IsValid());
+ return reg_size == 64;
+}
+
+
+inline bool CPURegister::IsValid() const {
+ if (IsValidRegister() || IsValidFPRegister()) {
+ ASSERT(!IsNone());
+ return true;
+ } else {
+ ASSERT(IsNone());
+ return false;
+ }
+}
+
+
+inline bool CPURegister::IsValidRegister() const {
+ return IsRegister() &&
+ ((reg_size == kWRegSize) || (reg_size == kXRegSize)) &&
+ ((reg_code < kNumberOfRegisters) || (reg_code == kSPRegInternalCode));
+}
+
+
+inline bool CPURegister::IsValidFPRegister() const {
+ return IsFPRegister() &&
+ ((reg_size == kSRegSize) || (reg_size == kDRegSize)) &&
+ (reg_code < kNumberOfFPRegisters);
+}
+
+
+inline bool CPURegister::IsNone() const {
+ // kNoRegister types should always have size 0 and code 0.
+ ASSERT((reg_type != kNoRegister) || (reg_code == 0));
+ ASSERT((reg_type != kNoRegister) || (reg_size == 0));
+
+ return reg_type == kNoRegister;
+}
+
+
+inline bool CPURegister::Is(const CPURegister& other) const {
+ ASSERT(IsValidOrNone() && other.IsValidOrNone());
+ return (reg_code == other.reg_code) && (reg_size == other.reg_size) &&
+ (reg_type == other.reg_type);
+}
+
+
+inline bool CPURegister::IsRegister() const {
+ return reg_type == kRegister;
+}
+
+
+inline bool CPURegister::IsFPRegister() const {
+ return reg_type == kFPRegister;
+}
+
+
+inline bool CPURegister::IsSameSizeAndType(const CPURegister& other) const {
+ return (reg_size == other.reg_size) && (reg_type == other.reg_type);
+}
+
+
+inline bool CPURegister::IsValidOrNone() const {
+ return IsValid() || IsNone();
+}
+
+
+inline bool CPURegister::IsZero() const {
+ ASSERT(IsValid());
+ return IsRegister() && (reg_code == kZeroRegCode);
+}
+
+
+inline bool CPURegister::IsSP() const {
+ ASSERT(IsValid());
+ return IsRegister() && (reg_code == kSPRegInternalCode);
+}
+
+
+inline void CPURegList::Combine(const CPURegList& other) {
+ ASSERT(IsValid());
+ ASSERT(other.type() == type_);
+ ASSERT(other.RegisterSizeInBits() == size_);
+ list_ |= other.list();
+}
+
+
+inline void CPURegList::Remove(const CPURegList& other) {
+ ASSERT(IsValid());
+ ASSERT(other.type() == type_);
+ ASSERT(other.RegisterSizeInBits() == size_);
+ list_ &= ~other.list();
+}
+
+
+inline void CPURegList::Combine(const CPURegister& other) {
+ ASSERT(other.type() == type_);
+ ASSERT(other.SizeInBits() == size_);
+ Combine(other.code());
+}
+
+
+inline void CPURegList::Remove(const CPURegister& other) {
+ ASSERT(other.type() == type_);
+ ASSERT(other.SizeInBits() == size_);
+ Remove(other.code());
+}
+
+
+inline void CPURegList::Combine(int code) {
+ ASSERT(IsValid());
+ ASSERT(CPURegister::Create(code, size_, type_).IsValid());
+ list_ |= (1UL << code);
+}
+
+
+inline void CPURegList::Remove(int code) {
+ ASSERT(IsValid());
+ ASSERT(CPURegister::Create(code, size_, type_).IsValid());
+ list_ &= ~(1UL << code);
+}
+
+
+inline Register Register::XRegFromCode(unsigned code) {
+ // This function returns the zero register when code = 31. The stack pointer
+ // can not be returned.
+ ASSERT(code < kNumberOfRegisters);
+ return Register::Create(code, kXRegSize);
+}
+
+
+inline Register Register::WRegFromCode(unsigned code) {
+ ASSERT(code < kNumberOfRegisters);
+ return Register::Create(code, kWRegSize);
+}
+
+
+inline FPRegister FPRegister::SRegFromCode(unsigned code) {
+ ASSERT(code < kNumberOfFPRegisters);
+ return FPRegister::Create(code, kSRegSize);
+}
+
+
+inline FPRegister FPRegister::DRegFromCode(unsigned code) {
+ ASSERT(code < kNumberOfFPRegisters);
+ return FPRegister::Create(code, kDRegSize);
+}
+
+
+inline Register CPURegister::W() const {
+ ASSERT(IsValidRegister());
+ return Register::WRegFromCode(reg_code);
+}
+
+
+inline Register CPURegister::X() const {
+ ASSERT(IsValidRegister());
+ return Register::XRegFromCode(reg_code);
+}
+
+
+inline FPRegister CPURegister::S() const {
+ ASSERT(IsValidFPRegister());
+ return FPRegister::SRegFromCode(reg_code);
+}
+
+
+inline FPRegister CPURegister::D() const {
+ ASSERT(IsValidFPRegister());
+ return FPRegister::DRegFromCode(reg_code);
+}
+
+
+// Operand.
+#define DECLARE_INT_OPERAND_CONSTRUCTOR(type) \
+Operand::Operand(type immediate, RelocInfo::Mode rmode) \
+ : immediate_(immediate), \
+ reg_(NoReg), \
+ rmode_(rmode) {}
+DECLARE_INT_OPERAND_CONSTRUCTOR(int64_t)
+DECLARE_INT_OPERAND_CONSTRUCTOR(uint64_t)
+DECLARE_INT_OPERAND_CONSTRUCTOR(int32_t) // NOLINT(readability/casting)
+DECLARE_INT_OPERAND_CONSTRUCTOR(uint32_t)
+#undef DECLARE_INT_OPERAND_CONSTRUCTOR
+
+Operand::Operand(Register reg, Shift shift, unsigned shift_amount)
+ : reg_(reg),
+ shift_(shift),
+ extend_(NO_EXTEND),
+ shift_amount_(shift_amount),
+ rmode_(reg.Is64Bits() ? RelocInfo::NONE64 : RelocInfo::NONE32) {
+ ASSERT(reg.Is64Bits() || (shift_amount < kWRegSize));
+ ASSERT(reg.Is32Bits() || (shift_amount < kXRegSize));
+ ASSERT(!reg.IsSP());
+}
+
+
+Operand::Operand(Register reg, Extend extend, unsigned shift_amount)
+ : reg_(reg),
+ shift_(NO_SHIFT),
+ extend_(extend),
+ shift_amount_(shift_amount),
+ rmode_(reg.Is64Bits() ? RelocInfo::NONE64 : RelocInfo::NONE32) {
+ ASSERT(reg.IsValid());
+ ASSERT(shift_amount <= 4);
+ ASSERT(!reg.IsSP());
+
+ // Extend modes SXTX and UXTX require a 64-bit register.
+ ASSERT(reg.Is64Bits() || ((extend != SXTX) && (extend != UXTX)));
+}
+
+
+Operand::Operand(Smi* value)
+ : immediate_(reinterpret_cast<intptr_t>(value)),
+ reg_(NoReg),
+ rmode_(RelocInfo::NONE64) {}
+
+
+bool Operand::IsImmediate() const {
+ return reg_.Is(NoReg);
+}
+
+
+bool Operand::IsShiftedRegister() const {
+ return reg_.IsValid() && (shift_ != NO_SHIFT);
+}
+
+
+bool Operand::IsExtendedRegister() const {
+ return reg_.IsValid() && (extend_ != NO_EXTEND);
+}
+
+
+bool Operand::IsZero() const {
+ if (IsImmediate()) {
+ return immediate() == 0;
+ } else {
+ return reg().IsZero();
+ }
+}
+
+
+Operand Operand::ToExtendedRegister() const {
+ ASSERT(IsShiftedRegister());
+ ASSERT((shift_ == LSL) && (shift_amount_ <= 4));
+ return Operand(reg_, reg_.Is64Bits() ? UXTX : UXTW, shift_amount_);
+}
+
+
+int64_t Operand::immediate() const {
+ ASSERT(IsImmediate());
+ return immediate_;
+}
+
+
+Register Operand::reg() const {
+ ASSERT(IsShiftedRegister() || IsExtendedRegister());
+ return reg_;
+}
+
+
+Shift Operand::shift() const {
+ ASSERT(IsShiftedRegister());
+ return shift_;
+}
+
+
+Extend Operand::extend() const {
+ ASSERT(IsExtendedRegister());
+ return extend_;
+}
+
+
+unsigned Operand::shift_amount() const {
+ ASSERT(IsShiftedRegister() || IsExtendedRegister());
+ return shift_amount_;
+}
+
+
+Operand Operand::UntagSmi(Register smi) {
+ ASSERT(smi.Is64Bits());
+ return Operand(smi, ASR, kSmiShift);
+}
+
+
+Operand Operand::UntagSmiAndScale(Register smi, int scale) {
+ ASSERT(smi.Is64Bits());
+ ASSERT((scale >= 0) && (scale <= (64 - kSmiValueSize)));
+ if (scale > kSmiShift) {
+ return Operand(smi, LSL, scale - kSmiShift);
+ } else if (scale < kSmiShift) {
+ return Operand(smi, ASR, kSmiShift - scale);
+ }
+ return Operand(smi);
+}
+
+
+MemOperand::MemOperand(Register base, ptrdiff_t offset, AddrMode addrmode)
+ : base_(base), regoffset_(NoReg), offset_(offset), addrmode_(addrmode),
+ shift_(NO_SHIFT), extend_(NO_EXTEND), shift_amount_(0) {
+ ASSERT(base.Is64Bits() && !base.IsZero());
+}
+
+
+MemOperand::MemOperand(Register base,
+ Register regoffset,
+ Extend extend,
+ unsigned shift_amount)
+ : base_(base), regoffset_(regoffset), offset_(0), addrmode_(Offset),
+ shift_(NO_SHIFT), extend_(extend), shift_amount_(shift_amount) {
+ ASSERT(base.Is64Bits() && !base.IsZero());
+ ASSERT(!regoffset.IsSP());
+ ASSERT((extend == UXTW) || (extend == SXTW) || (extend == SXTX));
+
+ // SXTX extend mode requires a 64-bit offset register.
+ ASSERT(regoffset.Is64Bits() || (extend != SXTX));
+}
+
+
+MemOperand::MemOperand(Register base,
+ Register regoffset,
+ Shift shift,
+ unsigned shift_amount)
+ : base_(base), regoffset_(regoffset), offset_(0), addrmode_(Offset),
+ shift_(shift), extend_(NO_EXTEND), shift_amount_(shift_amount) {
+ ASSERT(base.Is64Bits() && !base.IsZero());
+ ASSERT(regoffset.Is64Bits() && !regoffset.IsSP());
+ ASSERT(shift == LSL);
+}
+
+
+MemOperand::MemOperand(Register base, const Operand& offset, AddrMode addrmode)
+ : base_(base), addrmode_(addrmode) {
+ ASSERT(base.Is64Bits() && !base.IsZero());
+
+ if (offset.IsImmediate()) {
+ offset_ = offset.immediate();
+
+ regoffset_ = NoReg;
+ } else if (offset.IsShiftedRegister()) {
+ ASSERT(addrmode == Offset);
+
+ regoffset_ = offset.reg();
+ shift_= offset.shift();
+ shift_amount_ = offset.shift_amount();
+
+ extend_ = NO_EXTEND;
+ offset_ = 0;
+
+ // These assertions match those in the shifted-register constructor.
+ ASSERT(regoffset_.Is64Bits() && !regoffset_.IsSP());
+ ASSERT(shift_ == LSL);
+ } else {
+ ASSERT(offset.IsExtendedRegister());
+ ASSERT(addrmode == Offset);
+
+ regoffset_ = offset.reg();
+ extend_ = offset.extend();
+ shift_amount_ = offset.shift_amount();
+
+ shift_= NO_SHIFT;
+ offset_ = 0;
+
+ // These assertions match those in the extended-register constructor.
+ ASSERT(!regoffset_.IsSP());
+ ASSERT((extend_ == UXTW) || (extend_ == SXTW) || (extend_ == SXTX));
+ ASSERT((regoffset_.Is64Bits() || (extend_ != SXTX)));
+ }
+}
+
+bool MemOperand::IsImmediateOffset() const {
+ return (addrmode_ == Offset) && regoffset_.Is(NoReg);
+}
+
+
+bool MemOperand::IsRegisterOffset() const {
+ return (addrmode_ == Offset) && !regoffset_.Is(NoReg);
+}
+
+
+bool MemOperand::IsPreIndex() const {
+ return addrmode_ == PreIndex;
+}
+
+
+bool MemOperand::IsPostIndex() const {
+ return addrmode_ == PostIndex;
+}
+
+Operand MemOperand::OffsetAsOperand() const {
+ if (IsImmediateOffset()) {
+ return offset();
+ } else {
+ ASSERT(IsRegisterOffset());
+ if (extend() == NO_EXTEND) {
+ return Operand(regoffset(), shift(), shift_amount());
+ } else {
+ return Operand(regoffset(), extend(), shift_amount());
+ }
+ }
+}
+
+
+Address Assembler::target_pointer_address_at(Address pc) {
+ Instruction* instr = reinterpret_cast<Instruction*>(pc);
+ ASSERT(instr->IsLdrLiteralX());
+ return reinterpret_cast<Address>(instr->ImmPCOffsetTarget());
+}
+
+
+// Read/Modify the code target address in the branch/call instruction at pc.
+Address Assembler::target_address_at(Address pc) {
+ return Memory::Address_at(target_pointer_address_at(pc));
+}
+
+
+Address Assembler::target_address_from_return_address(Address pc) {
+ // Returns the address of the call target from the return address that will
+ // be returned to after a call.
+ // Call sequence on A64 is:
+ // ldr ip0, #... @ load from literal pool
+ // blr ip0
+ Address candidate = pc - 2 * kInstructionSize;
+ Instruction* instr = reinterpret_cast<Instruction*>(candidate);
+ USE(instr);
+ ASSERT(instr->IsLdrLiteralX());
+ return candidate;
+}
+
+
+Address Assembler::return_address_from_call_start(Address pc) {
+ // The call, generated by MacroAssembler::Call, is one of two possible
+ // sequences:
+ //
+ // Without relocation:
+ // movz ip0, #(target & 0x000000000000ffff)
+ // movk ip0, #(target & 0x00000000ffff0000)
+ // movk ip0, #(target & 0x0000ffff00000000)
+ // movk ip0, #(target & 0xffff000000000000)
+ // blr ip0
+ //
+ // With relocation:
+ // ldr ip0, =target
+ // blr ip0
+ //
+ // The return address is immediately after the blr instruction in both cases,
+ // so it can be found by adding the call size to the address at the start of
+ // the call sequence.
+ STATIC_ASSERT(Assembler::kCallSizeWithoutRelocation == 5 * kInstructionSize);
+ STATIC_ASSERT(Assembler::kCallSizeWithRelocation == 2 * kInstructionSize);
+
+ Instruction* instr = reinterpret_cast<Instruction*>(pc);
+ if (instr->IsMovz()) {
+ // Verify the instruction sequence.
+ ASSERT(instr->following(1)->IsMovk());
+ ASSERT(instr->following(2)->IsMovk());
+ ASSERT(instr->following(3)->IsMovk());
+ ASSERT(instr->following(4)->IsBranchAndLinkToRegister());
+ return pc + Assembler::kCallSizeWithoutRelocation;
+ } else {
+ // Verify the instruction sequence.
+ ASSERT(instr->IsLdrLiteralX());
+ ASSERT(instr->following(1)->IsBranchAndLinkToRegister());
+ return pc + Assembler::kCallSizeWithRelocation;
+ }
+}
+
+
+void Assembler::deserialization_set_special_target_at(
+ Address constant_pool_entry, Address target) {
+ Memory::Address_at(constant_pool_entry) = target;
+}
+
+
+void Assembler::set_target_address_at(Address pc, Address target) {
+ Memory::Address_at(target_pointer_address_at(pc)) = target;
+ // Intuitively, we would think it is necessary to always flush the
+ // instruction cache after patching a target address in the code as follows:
+ // CPU::FlushICache(pc, sizeof(target));
+ // However, on ARM, an instruction is actually patched in the case of
+ // embedded constants of the form:
+ // ldr ip, [pc, #...]
+ // since the instruction accessing this address in the constant pool remains
+ // unchanged, a flush is not required.
+}
+
+
+int RelocInfo::target_address_size() {
+ return kPointerSize;
+}
+
+
+Address RelocInfo::target_address() {
+ ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
+ return Assembler::target_address_at(pc_);
+}
+
+
+Address RelocInfo::target_address_address() {
+ ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)
+ || rmode_ == EMBEDDED_OBJECT
+ || rmode_ == EXTERNAL_REFERENCE);
+ return Assembler::target_pointer_address_at(pc_);
+}
+
+
+Object* RelocInfo::target_object() {
+ ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
+ return reinterpret_cast<Object*>(Assembler::target_address_at(pc_));
+}
+
+
+Handle<Object> RelocInfo::target_object_handle(Assembler* origin) {
+ ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
+ return Handle<Object>(reinterpret_cast<Object**>(
+ Assembler::target_address_at(pc_)));
+}
+
+
+void RelocInfo::set_target_object(Object* target, WriteBarrierMode mode) {
+ ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
+ ASSERT(!target->IsConsString());
+ Assembler::set_target_address_at(pc_, reinterpret_cast<Address>(target));
+ if (mode == UPDATE_WRITE_BARRIER &&
+ host() != NULL &&
+ target->IsHeapObject()) {
+ host()->GetHeap()->incremental_marking()->RecordWrite(
+ host(), &Memory::Object_at(pc_), HeapObject::cast(target));
+ }
+}
+
+
+Address RelocInfo::target_reference() {
+ ASSERT(rmode_ == EXTERNAL_REFERENCE);
+ return Assembler::target_address_at(pc_);
+}
+
+
+Address RelocInfo::target_runtime_entry(Assembler* origin) {
+ ASSERT(IsRuntimeEntry(rmode_));
+ return target_address();
+}
+
+
+void RelocInfo::set_target_runtime_entry(Address target,
+ WriteBarrierMode mode) {
+ ASSERT(IsRuntimeEntry(rmode_));
+ if (target_address() != target) set_target_address(target, mode);
+}
+
+
+Handle<Cell> RelocInfo::target_cell_handle() {
+ UNIMPLEMENTED();
+ Cell *null_cell = NULL;
+ return Handle<Cell>(null_cell);
+}
+
+
+Cell* RelocInfo::target_cell() {
+ ASSERT(rmode_ == RelocInfo::CELL);
+ return Cell::FromValueAddress(Memory::Address_at(pc_));
+}
+
+
+void RelocInfo::set_target_cell(Cell* cell, WriteBarrierMode mode) {
+ UNIMPLEMENTED();
+}
+
+
+static const int kCodeAgeSequenceSize = 5 * kInstructionSize;
+static const int kCodeAgeStubEntryOffset = 3 * kInstructionSize;
+
+
+Handle<Object> RelocInfo::code_age_stub_handle(Assembler* origin) {
+ UNREACHABLE(); // This should never be reached on A64.
+ return Handle<Object>();
+}
+
+
+Code* RelocInfo::code_age_stub() {
+ ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
+ ASSERT(!Code::IsYoungSequence(pc_));
+ // Read the stub entry point from the code age sequence.
+ Address stub_entry_address = pc_ + kCodeAgeStubEntryOffset;
+ return Code::GetCodeFromTargetAddress(Memory::Address_at(stub_entry_address));
+}
+
+
+void RelocInfo::set_code_age_stub(Code* stub) {
+ ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
+ ASSERT(!Code::IsYoungSequence(pc_));
+ // Overwrite the stub entry point in the code age sequence. This is loaded as
+ // a literal so there is no need to call FlushICache here.
+ Address stub_entry_address = pc_ + kCodeAgeStubEntryOffset;
+ Memory::Address_at(stub_entry_address) = stub->instruction_start();
+}
+
+
+Address RelocInfo::call_address() {
+ ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
+ (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
+ // For the above sequences the Relocinfo points to the load literal loading
+ // the call address.
+ return Assembler::target_address_at(pc_);
+}
+
+
+void RelocInfo::set_call_address(Address target) {
+ ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
+ (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
+ Assembler::set_target_address_at(pc_, target);
+ if (host() != NULL) {
+ Object* target_code = Code::GetCodeFromTargetAddress(target);
+ host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
+ host(), this, HeapObject::cast(target_code));
+ }
+}
+
+
+void RelocInfo::WipeOut() {
+ ASSERT(IsEmbeddedObject(rmode_) ||
+ IsCodeTarget(rmode_) ||
+ IsRuntimeEntry(rmode_) ||
+ IsExternalReference(rmode_));
+ Assembler::set_target_address_at(pc_, NULL);
+}
+
+
+bool RelocInfo::IsPatchedReturnSequence() {
+ // The sequence must be:
+ // ldr ip0, [pc, #offset]
+ // blr ip0
+ // See a64/debug-a64.cc BreakLocationIterator::SetDebugBreakAtReturn().
+ Instruction* i1 = reinterpret_cast<Instruction*>(pc_);
+ Instruction* i2 = i1->following();
+ return i1->IsLdrLiteralX() && (i1->Rt() == ip0.code()) &&
+ i2->IsBranchAndLinkToRegister() && (i2->Rn() == ip0.code());
+}
+
+
+bool RelocInfo::IsPatchedDebugBreakSlotSequence() {
+ Instruction* current_instr = reinterpret_cast<Instruction*>(pc_);
+ return !current_instr->IsNop(Assembler::DEBUG_BREAK_NOP);
+}
+
+
+void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
+ RelocInfo::Mode mode = rmode();
+ if (mode == RelocInfo::EMBEDDED_OBJECT) {
+ visitor->VisitEmbeddedPointer(this);
+ } else if (RelocInfo::IsCodeTarget(mode)) {
+ visitor->VisitCodeTarget(this);
+ } else if (mode == RelocInfo::CELL) {
+ visitor->VisitCell(this);
+ } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
+ visitor->VisitExternalReference(this);
+#ifdef ENABLE_DEBUGGER_SUPPORT
+ } else if (((RelocInfo::IsJSReturn(mode) &&
+ IsPatchedReturnSequence()) ||
+ (RelocInfo::IsDebugBreakSlot(mode) &&
+ IsPatchedDebugBreakSlotSequence())) &&
+ isolate->debug()->has_break_points()) {
+ visitor->VisitDebugTarget(this);
+#endif
+ } else if (RelocInfo::IsRuntimeEntry(mode)) {
+ visitor->VisitRuntimeEntry(this);
+ }
+}
+
+
+template<typename StaticVisitor>
+void RelocInfo::Visit(Heap* heap) {
+ RelocInfo::Mode mode = rmode();
+ if (mode == RelocInfo::EMBEDDED_OBJECT) {
+ StaticVisitor::VisitEmbeddedPointer(heap, this);
+ } else if (RelocInfo::IsCodeTarget(mode)) {
+ StaticVisitor::VisitCodeTarget(heap, this);
+ } else if (mode == RelocInfo::CELL) {
+ StaticVisitor::VisitCell(heap, this);
+ } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
+ StaticVisitor::VisitExternalReference(this);
+#ifdef ENABLE_DEBUGGER_SUPPORT
+ } else if (heap->isolate()->debug()->has_break_points() &&
+ ((RelocInfo::IsJSReturn(mode) &&
+ IsPatchedReturnSequence()) ||
+ (RelocInfo::IsDebugBreakSlot(mode) &&
+ IsPatchedDebugBreakSlotSequence()))) {
+ StaticVisitor::VisitDebugTarget(heap, this);
+#endif
+ } else if (RelocInfo::IsRuntimeEntry(mode)) {
+ StaticVisitor::VisitRuntimeEntry(this);
+ }
+}
+
+
+LoadStoreOp Assembler::LoadOpFor(const CPURegister& rt) {
+ ASSERT(rt.IsValid());
+ if (rt.IsRegister()) {
+ return rt.Is64Bits() ? LDR_x : LDR_w;
+ } else {
+ ASSERT(rt.IsFPRegister());
+ return rt.Is64Bits() ? LDR_d : LDR_s;
+ }
+}
+
+
+LoadStorePairOp Assembler::LoadPairOpFor(const CPURegister& rt,
+ const CPURegister& rt2) {
+ ASSERT(AreSameSizeAndType(rt, rt2));
+ USE(rt2);
+ if (rt.IsRegister()) {
+ return rt.Is64Bits() ? LDP_x : LDP_w;
+ } else {
+ ASSERT(rt.IsFPRegister());
+ return rt.Is64Bits() ? LDP_d : LDP_s;
+ }
+}
+
+
+LoadStoreOp Assembler::StoreOpFor(const CPURegister& rt) {
+ ASSERT(rt.IsValid());
+ if (rt.IsRegister()) {
+ return rt.Is64Bits() ? STR_x : STR_w;
+ } else {
+ ASSERT(rt.IsFPRegister());
+ return rt.Is64Bits() ? STR_d : STR_s;
+ }
+}
+
+
+LoadStorePairOp Assembler::StorePairOpFor(const CPURegister& rt,
+ const CPURegister& rt2) {
+ ASSERT(AreSameSizeAndType(rt, rt2));
+ USE(rt2);
+ if (rt.IsRegister()) {
+ return rt.Is64Bits() ? STP_x : STP_w;
+ } else {
+ ASSERT(rt.IsFPRegister());
+ return rt.Is64Bits() ? STP_d : STP_s;
+ }
+}
+
+
+LoadStorePairNonTemporalOp Assembler::LoadPairNonTemporalOpFor(
+ const CPURegister& rt, const CPURegister& rt2) {
+ ASSERT(AreSameSizeAndType(rt, rt2));
+ USE(rt2);
+ if (rt.IsRegister()) {
+ return rt.Is64Bits() ? LDNP_x : LDNP_w;
+ } else {
+ ASSERT(rt.IsFPRegister());
+ return rt.Is64Bits() ? LDNP_d : LDNP_s;
+ }
+}
+
+
+LoadStorePairNonTemporalOp Assembler::StorePairNonTemporalOpFor(
+ const CPURegister& rt, const CPURegister& rt2) {
+ ASSERT(AreSameSizeAndType(rt, rt2));
+ USE(rt2);
+ if (rt.IsRegister()) {
+ return rt.Is64Bits() ? STNP_x : STNP_w;
+ } else {
+ ASSERT(rt.IsFPRegister());
+ return rt.Is64Bits() ? STNP_d : STNP_s;
+ }
+}
+
+
+int Assembler::LinkAndGetInstructionOffsetTo(Label* label) {
+ ASSERT(kStartOfLabelLinkChain == 0);
+ int offset = LinkAndGetByteOffsetTo(label);
+ ASSERT(IsAligned(offset, kInstructionSize));
+ return offset >> kInstructionSizeLog2;
+}
+
+
+Instr Assembler::Flags(FlagsUpdate S) {
+ if (S == SetFlags) {
+ return 1 << FlagsUpdate_offset;
+ } else if (S == LeaveFlags) {
+ return 0 << FlagsUpdate_offset;
+ }
+ UNREACHABLE();
+ return 0;
+}
+
+
+Instr Assembler::Cond(Condition cond) {
+ return cond << Condition_offset;
+}
+
+
+Instr Assembler::ImmPCRelAddress(int imm21) {
+ CHECK(is_int21(imm21));
+ Instr imm = static_cast<Instr>(truncate_to_int21(imm21));
+ Instr immhi = (imm >> ImmPCRelLo_width) << ImmPCRelHi_offset;
+ Instr immlo = imm << ImmPCRelLo_offset;
+ return (immhi & ImmPCRelHi_mask) | (immlo & ImmPCRelLo_mask);
+}
+
+
+Instr Assembler::ImmUncondBranch(int imm26) {
+ CHECK(is_int26(imm26));
+ return truncate_to_int26(imm26) << ImmUncondBranch_offset;
+}
+
+
+Instr Assembler::ImmCondBranch(int imm19) {
+ CHECK(is_int19(imm19));
+ return truncate_to_int19(imm19) << ImmCondBranch_offset;
+}
+
+
+Instr Assembler::ImmCmpBranch(int imm19) {
+ CHECK(is_int19(imm19));
+ return truncate_to_int19(imm19) << ImmCmpBranch_offset;
+}
+
+
+Instr Assembler::ImmTestBranch(int imm14) {
+ CHECK(is_int14(imm14));
+ return truncate_to_int14(imm14) << ImmTestBranch_offset;
+}
+
+
+Instr Assembler::ImmTestBranchBit(unsigned bit_pos) {
+ ASSERT(is_uint6(bit_pos));
+ // Subtract five from the shift offset, as we need bit 5 from bit_pos.
+ unsigned b5 = bit_pos << (ImmTestBranchBit5_offset - 5);
+ unsigned b40 = bit_pos << ImmTestBranchBit40_offset;
+ b5 &= ImmTestBranchBit5_mask;
+ b40 &= ImmTestBranchBit40_mask;
+ return b5 | b40;
+}
+
+
+Instr Assembler::SF(Register rd) {
+ return rd.Is64Bits() ? SixtyFourBits : ThirtyTwoBits;
+}
+
+
+Instr Assembler::ImmAddSub(int64_t imm) {
+ ASSERT(IsImmAddSub(imm));
+ if (is_uint12(imm)) { // No shift required.
+ return imm << ImmAddSub_offset;
+ } else {
+ return ((imm >> 12) << ImmAddSub_offset) | (1 << ShiftAddSub_offset);
+ }
+}
+
+
+Instr Assembler::ImmS(unsigned imms, unsigned reg_size) {
+ ASSERT(((reg_size == kXRegSize) && is_uint6(imms)) ||
+ ((reg_size == kWRegSize) && is_uint5(imms)));
+ USE(reg_size);
+ return imms << ImmS_offset;
+}
+
+
+Instr Assembler::ImmR(unsigned immr, unsigned reg_size) {
+ ASSERT(((reg_size == kXRegSize) && is_uint6(immr)) ||
+ ((reg_size == kWRegSize) && is_uint5(immr)));
+ USE(reg_size);
+ ASSERT(is_uint6(immr));
+ return immr << ImmR_offset;
+}
+
+
+Instr Assembler::ImmSetBits(unsigned imms, unsigned reg_size) {
+ ASSERT((reg_size == kWRegSize) || (reg_size == kXRegSize));
+ ASSERT(is_uint6(imms));
+ ASSERT((reg_size == kXRegSize) || is_uint6(imms + 3));
+ USE(reg_size);
+ return imms << ImmSetBits_offset;
+}
+
+
+Instr Assembler::ImmRotate(unsigned immr, unsigned reg_size) {
+ ASSERT((reg_size == kWRegSize) || (reg_size == kXRegSize));
+ ASSERT(((reg_size == kXRegSize) && is_uint6(immr)) ||
+ ((reg_size == kWRegSize) && is_uint5(immr)));
+ USE(reg_size);
+ return immr << ImmRotate_offset;
+}
+
+
+Instr Assembler::ImmLLiteral(int imm19) {
+ CHECK(is_int19(imm19));
+ return truncate_to_int19(imm19) << ImmLLiteral_offset;
+}
+
+
+Instr Assembler::BitN(unsigned bitn, unsigned reg_size) {
+ ASSERT((reg_size == kWRegSize) || (reg_size == kXRegSize));
+ ASSERT((reg_size == kXRegSize) || (bitn == 0));
+ USE(reg_size);
+ return bitn << BitN_offset;
+}
+
+
+Instr Assembler::ShiftDP(Shift shift) {
+ ASSERT(shift == LSL || shift == LSR || shift == ASR || shift == ROR);
+ return shift << ShiftDP_offset;
+}
+
+
+Instr Assembler::ImmDPShift(unsigned amount) {
+ ASSERT(is_uint6(amount));
+ return amount << ImmDPShift_offset;
+}
+
+
+Instr Assembler::ExtendMode(Extend extend) {
+ return extend << ExtendMode_offset;
+}
+
+
+Instr Assembler::ImmExtendShift(unsigned left_shift) {
+ ASSERT(left_shift <= 4);
+ return left_shift << ImmExtendShift_offset;
+}
+
+
+Instr Assembler::ImmCondCmp(unsigned imm) {
+ ASSERT(is_uint5(imm));
+ return imm << ImmCondCmp_offset;
+}
+
+
+Instr Assembler::Nzcv(StatusFlags nzcv) {
+ return ((nzcv >> Flags_offset) & 0xf) << Nzcv_offset;
+}
+
+
+Instr Assembler::ImmLSUnsigned(int imm12) {
+ ASSERT(is_uint12(imm12));
+ return imm12 << ImmLSUnsigned_offset;
+}
+
+
+Instr Assembler::ImmLS(int imm9) {
+ ASSERT(is_int9(imm9));
+ return truncate_to_int9(imm9) << ImmLS_offset;
+}
+
+
+Instr Assembler::ImmLSPair(int imm7, LSDataSize size) {
+ ASSERT(((imm7 >> size) << size) == imm7);
+ int scaled_imm7 = imm7 >> size;
+ ASSERT(is_int7(scaled_imm7));
+ return truncate_to_int7(scaled_imm7) << ImmLSPair_offset;
+}
+
+
+Instr Assembler::ImmShiftLS(unsigned shift_amount) {
+ ASSERT(is_uint1(shift_amount));
+ return shift_amount << ImmShiftLS_offset;
+}
+
+
+Instr Assembler::ImmException(int imm16) {
+ ASSERT(is_uint16(imm16));
+ return imm16 << ImmException_offset;
+}
+
+
+Instr Assembler::ImmSystemRegister(int imm15) {
+ ASSERT(is_uint15(imm15));
+ return imm15 << ImmSystemRegister_offset;
+}
+
+
+Instr Assembler::ImmHint(int imm7) {
+ ASSERT(is_uint7(imm7));
+ return imm7 << ImmHint_offset;
+}
+
+
+Instr Assembler::ImmBarrierDomain(int imm2) {
+ ASSERT(is_uint2(imm2));
+ return imm2 << ImmBarrierDomain_offset;
+}
+
+
+Instr Assembler::ImmBarrierType(int imm2) {
+ ASSERT(is_uint2(imm2));
+ return imm2 << ImmBarrierType_offset;
+}
+
+
+LSDataSize Assembler::CalcLSDataSize(LoadStoreOp op) {
+ ASSERT((SizeLS_offset + SizeLS_width) == (kInstructionSize * 8));
+ return static_cast<LSDataSize>(op >> SizeLS_offset);
+}
+
+
+Instr Assembler::ImmMoveWide(uint64_t imm) {
+ ASSERT(is_uint16(imm));
+ return imm << ImmMoveWide_offset;
+}
+
+
+Instr Assembler::ShiftMoveWide(int64_t shift) {
+ ASSERT(is_uint2(shift));
+ return shift << ShiftMoveWide_offset;
+}
+
+
+Instr Assembler::FPType(FPRegister fd) {
+ return fd.Is64Bits() ? FP64 : FP32;
+}
+
+
+Instr Assembler::FPScale(unsigned scale) {
+ ASSERT(is_uint6(scale));
+ return scale << FPScale_offset;
+}
+
+
+const Register& Assembler::AppropriateZeroRegFor(const CPURegister& reg) const {
+ return reg.Is64Bits() ? xzr : wzr;
+}
+
+
+void Assembler::LoadRelocated(const CPURegister& rt, const Operand& operand) {
+ LoadRelocatedValue(rt, operand, LDR_x_lit);
+}
+
+
+inline void Assembler::CheckBuffer() {
+ ASSERT(pc_ < (buffer_ + buffer_size_));
+ if (buffer_space() < kGap) {
+ GrowBuffer();
+ }
+ if (pc_offset() >= next_buffer_check_) {
+ CheckConstPool(false, true);
+ }
+}
+
+
+TypeFeedbackId Assembler::RecordedAstId() {
+ ASSERT(!recorded_ast_id_.IsNone());
+ return recorded_ast_id_;
+}
+
+
+void Assembler::ClearRecordedAstId() {
+ recorded_ast_id_ = TypeFeedbackId::None();
+}
+
+
+} } // namespace v8::internal
+
+#endif // V8_A64_ASSEMBLER_A64_INL_H_
--- /dev/null
+// Copyright 2013 the V8 project authors. All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#if V8_TARGET_ARCH_A64
+
+#define A64_DEFINE_REG_STATICS
+
+#include "a64/assembler-a64-inl.h"
+
+namespace v8 {
+namespace internal {
+
+
+// -----------------------------------------------------------------------------
+// CpuFeatures utilities (for V8 compatibility).
+
+ExternalReference ExternalReference::cpu_features() {
+ return ExternalReference(&CpuFeatures::supported_);
+}
+
+
+// -----------------------------------------------------------------------------
+// CPURegList utilities.
+
+CPURegister CPURegList::PopLowestIndex() {
+ ASSERT(IsValid());
+ if (IsEmpty()) {
+ return NoCPUReg;
+ }
+ int index = CountTrailingZeros(list_, kRegListSizeInBits);
+ ASSERT((1 << index) & list_);
+ Remove(index);
+ return CPURegister::Create(index, size_, type_);
+}
+
+
+CPURegister CPURegList::PopHighestIndex() {
+ ASSERT(IsValid());
+ if (IsEmpty()) {
+ return NoCPUReg;
+ }
+ int index = CountLeadingZeros(list_, kRegListSizeInBits);
+ index = kRegListSizeInBits - 1 - index;
+ ASSERT((1 << index) & list_);
+ Remove(index);
+ return CPURegister::Create(index, size_, type_);
+}
+
+
+void CPURegList::RemoveCalleeSaved() {
+ if (type() == CPURegister::kRegister) {
+ Remove(GetCalleeSaved(RegisterSizeInBits()));
+ } else if (type() == CPURegister::kFPRegister) {
+ Remove(GetCalleeSavedFP(RegisterSizeInBits()));
+ } else {
+ ASSERT(type() == CPURegister::kNoRegister);
+ ASSERT(IsEmpty());
+ // The list must already be empty, so do nothing.
+ }
+}
+
+
+CPURegList CPURegList::GetCalleeSaved(unsigned size) {
+ return CPURegList(CPURegister::kRegister, size, 19, 29);
+}
+
+
+CPURegList CPURegList::GetCalleeSavedFP(unsigned size) {
+ return CPURegList(CPURegister::kFPRegister, size, 8, 15);
+}
+
+
+CPURegList CPURegList::GetCallerSaved(unsigned size) {
+ // Registers x0-x18 and lr (x30) are caller-saved.
+ CPURegList list = CPURegList(CPURegister::kRegister, size, 0, 18);
+ list.Combine(lr);
+ return list;
+}
+
+
+CPURegList CPURegList::GetCallerSavedFP(unsigned size) {
+ // Registers d0-d7 and d16-d31 are caller-saved.
+ CPURegList list = CPURegList(CPURegister::kFPRegister, size, 0, 7);
+ list.Combine(CPURegList(CPURegister::kFPRegister, size, 16, 31));
+ return list;
+}
+
+
+// This function defines the list of registers which are associated with a
+// safepoint slot. Safepoint register slots are saved contiguously on the stack.
+// MacroAssembler::SafepointRegisterStackIndex handles mapping from register
+// code to index in the safepoint register slots. Any change here can affect
+// this mapping.
+CPURegList CPURegList::GetSafepointSavedRegisters() {
+ CPURegList list = CPURegList::GetCalleeSaved();
+ list.Combine(CPURegList(CPURegister::kRegister, kXRegSize, kJSCallerSaved));
+
+ // Note that unfortunately we can't use symbolic names for registers and have
+ // to directly use register codes. This is because this function is used to
+ // initialize some static variables and we can't rely on register variables
+ // to be initialized due to static initialization order issues in C++.
+
+ // Drop ip0 and ip1 (i.e. x16 and x17), as they should not be expected to be
+ // preserved outside of the macro assembler.
+ list.Remove(16);
+ list.Remove(17);
+
+ // Add x18 to the safepoint list, as although it's not in kJSCallerSaved, it
+ // is a caller-saved register according to the procedure call standard.
+ list.Combine(18);
+
+ // Drop jssp as the stack pointer doesn't need to be included.
+ list.Remove(28);
+
+ // Add the link register (x30) to the safepoint list.
+ list.Combine(30);
+
+ return list;
+}
+
+
+// -----------------------------------------------------------------------------
+// Implementation of RelocInfo
+
+const int RelocInfo::kApplyMask = 0;
+
+
+bool RelocInfo::IsCodedSpecially() {
+ // The deserializer needs to know whether a pointer is specially coded. Being
+ // specially coded on A64 means that it is a movz/movk sequence. We don't
+ // generate those for relocatable pointers.
+ return false;
+}
+
+
+void RelocInfo::PatchCode(byte* instructions, int instruction_count) {
+ // Patch the code at the current address with the supplied instructions.
+ Instr* pc = reinterpret_cast<Instr*>(pc_);
+ Instr* instr = reinterpret_cast<Instr*>(instructions);
+ for (int i = 0; i < instruction_count; i++) {
+ *(pc + i) = *(instr + i);
+ }
+
+ // Indicate that code has changed.
+ CPU::FlushICache(pc_, instruction_count * kInstructionSize);
+}
+
+
+// Patch the code at the current PC with a call to the target address.
+// Additional guard instructions can be added if required.
+void RelocInfo::PatchCodeWithCall(Address target, int guard_bytes) {
+ UNIMPLEMENTED();
+}
+
+
+bool AreAliased(const CPURegister& reg1, const CPURegister& reg2,
+ const CPURegister& reg3, const CPURegister& reg4,
+ const CPURegister& reg5, const CPURegister& reg6,
+ const CPURegister& reg7, const CPURegister& reg8) {
+ int number_of_valid_regs = 0;
+ int number_of_valid_fpregs = 0;
+
+ RegList unique_regs = 0;
+ RegList unique_fpregs = 0;
+
+ const CPURegister regs[] = {reg1, reg2, reg3, reg4, reg5, reg6, reg7, reg8};
+
+ for (unsigned i = 0; i < sizeof(regs) / sizeof(regs[0]); i++) {
+ if (regs[i].IsRegister()) {
+ number_of_valid_regs++;
+ unique_regs |= regs[i].Bit();
+ } else if (regs[i].IsFPRegister()) {
+ number_of_valid_fpregs++;
+ unique_fpregs |= regs[i].Bit();
+ } else {
+ ASSERT(!regs[i].IsValid());
+ }
+ }
+
+ int number_of_unique_regs =
+ CountSetBits(unique_regs, sizeof(unique_regs) * kBitsPerByte);
+ int number_of_unique_fpregs =
+ CountSetBits(unique_fpregs, sizeof(unique_fpregs) * kBitsPerByte);
+
+ ASSERT(number_of_valid_regs >= number_of_unique_regs);
+ ASSERT(number_of_valid_fpregs >= number_of_unique_fpregs);
+
+ return (number_of_valid_regs != number_of_unique_regs) ||
+ (number_of_valid_fpregs != number_of_unique_fpregs);
+}
+
+
+bool AreSameSizeAndType(const CPURegister& reg1, const CPURegister& reg2,
+ const CPURegister& reg3, const CPURegister& reg4,
+ const CPURegister& reg5, const CPURegister& reg6,
+ const CPURegister& reg7, const CPURegister& reg8) {
+ ASSERT(reg1.IsValid());
+ bool match = true;
+ match &= !reg2.IsValid() || reg2.IsSameSizeAndType(reg1);
+ match &= !reg3.IsValid() || reg3.IsSameSizeAndType(reg1);
+ match &= !reg4.IsValid() || reg4.IsSameSizeAndType(reg1);
+ match &= !reg5.IsValid() || reg5.IsSameSizeAndType(reg1);
+ match &= !reg6.IsValid() || reg6.IsSameSizeAndType(reg1);
+ match &= !reg7.IsValid() || reg7.IsSameSizeAndType(reg1);
+ match &= !reg8.IsValid() || reg8.IsSameSizeAndType(reg1);
+ return match;
+}
+
+
+Operand::Operand(const ExternalReference& f)
+ : immediate_(reinterpret_cast<intptr_t>(f.address())),
+ reg_(NoReg),
+ rmode_(RelocInfo::EXTERNAL_REFERENCE) {}
+
+
+Operand::Operand(Handle<Object> handle) : reg_(NoReg) {
+ AllowDeferredHandleDereference using_raw_address;
+
+ // Verify all Objects referred by code are NOT in new space.
+ Object* obj = *handle;
+ if (obj->IsHeapObject()) {
+ ASSERT(!HeapObject::cast(obj)->GetHeap()->InNewSpace(obj));
+ immediate_ = reinterpret_cast<intptr_t>(handle.location());
+ rmode_ = RelocInfo::EMBEDDED_OBJECT;
+ } else {
+ STATIC_ASSERT(sizeof(intptr_t) == sizeof(int64_t));
+ immediate_ = reinterpret_cast<intptr_t>(obj);
+ rmode_ = RelocInfo::NONE64;
+ }
+}
+
+
+bool Operand::NeedsRelocation() const {
+ if (rmode_ == RelocInfo::EXTERNAL_REFERENCE) {
+#ifdef DEBUG
+ if (!Serializer::enabled()) {
+ Serializer::TooLateToEnableNow();
+ }
+#endif
+ return Serializer::enabled();
+ }
+
+ return !RelocInfo::IsNone(rmode_);
+}
+
+
+// Assembler
+
+Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
+ : AssemblerBase(isolate, buffer, buffer_size),
+ recorded_ast_id_(TypeFeedbackId::None()),
+ positions_recorder_(this) {
+ const_pool_blocked_nesting_ = 0;
+ Reset();
+}
+
+
+Assembler::~Assembler() {
+ ASSERT(num_pending_reloc_info_ == 0);
+ ASSERT(const_pool_blocked_nesting_ == 0);
+}
+
+
+void Assembler::Reset() {
+#ifdef DEBUG
+ ASSERT((pc_ >= buffer_) && (pc_ < buffer_ + buffer_size_));
+ ASSERT(const_pool_blocked_nesting_ == 0);
+ memset(buffer_, 0, pc_ - buffer_);
+#endif
+ pc_ = buffer_;
+ reloc_info_writer.Reposition(reinterpret_cast<byte*>(buffer_ + buffer_size_),
+ reinterpret_cast<byte*>(pc_));
+ num_pending_reloc_info_ = 0;
+ next_buffer_check_ = 0;
+ no_const_pool_before_ = 0;
+ first_const_pool_use_ = -1;
+ ClearRecordedAstId();
+}
+
+
+void Assembler::GetCode(CodeDesc* desc) {
+ // Emit constant pool if necessary.
+ CheckConstPool(true, false);
+ ASSERT(num_pending_reloc_info_ == 0);
+
+ // Set up code descriptor.
+ if (desc) {
+ desc->buffer = reinterpret_cast<byte*>(buffer_);
+ desc->buffer_size = buffer_size_;
+ desc->instr_size = pc_offset();
+ desc->reloc_size = (reinterpret_cast<byte*>(buffer_) + buffer_size_) -
+ reloc_info_writer.pos();
+ desc->origin = this;
+ }
+}
+
+
+void Assembler::Align(int m) {
+ ASSERT(m >= 4 && IsPowerOf2(m));
+ while ((pc_offset() & (m - 1)) != 0) {
+ nop();
+ }
+}
+
+
+void Assembler::CheckLabelLinkChain(Label const * label) {
+#ifdef DEBUG
+ if (label->is_linked()) {
+ int linkoffset = label->pos();
+ bool start_of_chain = false;
+ while (!start_of_chain) {
+ Instruction * link = InstructionAt(linkoffset);
+ int linkpcoffset = link->ImmPCOffset();
+ int prevlinkoffset = linkoffset + linkpcoffset;
+
+ start_of_chain = (linkoffset == prevlinkoffset);
+ linkoffset = linkoffset + linkpcoffset;
+ }
+ }
+#endif
+}
+
+
+void Assembler::bind(Label* label) {
+ // Bind label to the address at pc_. All instructions (most likely branches)
+ // that are linked to this label will be updated to point to the newly-bound
+ // label.
+
+ ASSERT(!label->is_near_linked());
+ ASSERT(!label->is_bound());
+
+ // If the label is linked, the link chain looks something like this:
+ //
+ // |--I----I-------I-------L
+ // |---------------------->| pc_offset
+ // |-------------->| linkoffset = label->pos()
+ // |<------| link->ImmPCOffset()
+ // |------>| prevlinkoffset = linkoffset + link->ImmPCOffset()
+ //
+ // On each iteration, the last link is updated and then removed from the
+ // chain until only one remains. At that point, the label is bound.
+ //
+ // If the label is not linked, no preparation is required before binding.
+ while (label->is_linked()) {
+ int linkoffset = label->pos();
+ Instruction* link = InstructionAt(linkoffset);
+ int prevlinkoffset = linkoffset + link->ImmPCOffset();
+
+ CheckLabelLinkChain(label);
+
+ ASSERT(linkoffset >= 0);
+ ASSERT(linkoffset < pc_offset());
+ ASSERT((linkoffset > prevlinkoffset) ||
+ (linkoffset - prevlinkoffset == kStartOfLabelLinkChain));
+ ASSERT(prevlinkoffset >= 0);
+
+ // Update the link to point to the label.
+ link->SetImmPCOffsetTarget(reinterpret_cast<Instruction*>(pc_));
+
+ // Link the label to the previous link in the chain.
+ if (linkoffset - prevlinkoffset == kStartOfLabelLinkChain) {
+ // We hit kStartOfLabelLinkChain, so the chain is fully processed.
+ label->Unuse();
+ } else {
+ // Update the label for the next iteration.
+ label->link_to(prevlinkoffset);
+ }
+ }
+ label->bind_to(pc_offset());
+
+ ASSERT(label->is_bound());
+ ASSERT(!label->is_linked());
+}
+
+
+int Assembler::LinkAndGetByteOffsetTo(Label* label) {
+ ASSERT(sizeof(*pc_) == 1);
+ CheckLabelLinkChain(label);
+
+ int offset;
+ if (label->is_bound()) {
+ // The label is bound, so it does not need to be updated. Referring
+ // instructions must link directly to the label as they will not be
+ // updated.
+ //
+ // In this case, label->pos() returns the offset of the label from the
+ // start of the buffer.
+ //
+ // Note that offset can be zero for self-referential instructions. (This
+ // could be useful for ADR, for example.)
+ offset = label->pos() - pc_offset();
+ ASSERT(offset <= 0);
+ } else {
+ if (label->is_linked()) {
+ // The label is linked, so the referring instruction should be added onto
+ // the end of the label's link chain.
+ //
+ // In this case, label->pos() returns the offset of the last linked
+ // instruction from the start of the buffer.
+ offset = label->pos() - pc_offset();
+ ASSERT(offset != kStartOfLabelLinkChain);
+ // Note that the offset here needs to be PC-relative only so that the
+ // first instruction in a buffer can link to an unbound label. Otherwise,
+ // the offset would be 0 for this case, and 0 is reserved for
+ // kStartOfLabelLinkChain.
+ } else {
+ // The label is unused, so it now becomes linked and the referring
+ // instruction is at the start of the new link chain.
+ offset = kStartOfLabelLinkChain;
+ }
+ // The instruction at pc is now the last link in the label's chain.
+ label->link_to(pc_offset());
+ }
+
+ return offset;
+}
+
+
+void Assembler::StartBlockConstPool() {
+ if (const_pool_blocked_nesting_++ == 0) {
+ // Prevent constant pool checks happening by setting the next check to
+ // the biggest possible offset.
+ next_buffer_check_ = kMaxInt;
+ }
+}
+
+
+void Assembler::EndBlockConstPool() {
+ if (--const_pool_blocked_nesting_ == 0) {
+ // Check the constant pool hasn't been blocked for too long.
+ ASSERT((num_pending_reloc_info_ == 0) ||
+ (pc_offset() < (first_const_pool_use_ + kMaxDistToPool)));
+ // Two cases:
+ // * no_const_pool_before_ >= next_buffer_check_ and the emission is
+ // still blocked
+ // * no_const_pool_before_ < next_buffer_check_ and the next emit will
+ // trigger a check.
+ next_buffer_check_ = no_const_pool_before_;
+ }
+}
+
+
+bool Assembler::is_const_pool_blocked() const {
+ return (const_pool_blocked_nesting_ > 0) ||
+ (pc_offset() < no_const_pool_before_);
+}
+
+
+bool Assembler::IsConstantPoolAt(Instruction* instr) {
+ // The constant pool marker is made of two instructions. These instructions
+ // will never be emitted by the JIT, so checking for the first one is enough:
+ // 0: ldr xzr, #<size of pool>
+ bool result = instr->IsLdrLiteralX() && (instr->Rt() == xzr.code());
+
+ // It is still worth asserting the marker is complete.
+ // 4: blr xzr
+ ASSERT(!result || (instr->following()->IsBranchAndLinkToRegister() &&
+ instr->following()->Rn() == xzr.code()));
+
+ return result;
+}
+
+
+int Assembler::ConstantPoolSizeAt(Instruction* instr) {
+ if (IsConstantPoolAt(instr)) {
+ return instr->ImmLLiteral();
+ } else {
+ return -1;
+ }
+}
+
+
+void Assembler::ConstantPoolMarker(uint32_t size) {
+ ASSERT(is_const_pool_blocked());
+ // + 1 is for the crash guard.
+ Emit(LDR_x_lit | ImmLLiteral(2 * size + 1) | Rt(xzr));
+}
+
+
+void Assembler::ConstantPoolGuard() {
+#ifdef DEBUG
+ // Currently this is only used after a constant pool marker.
+ ASSERT(is_const_pool_blocked());
+ Instruction* instr = reinterpret_cast<Instruction*>(pc_);
+ ASSERT(instr->preceding()->IsLdrLiteralX() &&
+ instr->preceding()->Rt() == xzr.code());
+#endif
+
+ // Crash by branching to 0. lr now points near the fault.
+ // TODO(all): update the simulator to trap this pattern.
+ Emit(BLR | Rn(xzr));
+}
+
+
+void Assembler::br(const Register& xn) {
+ positions_recorder()->WriteRecordedPositions();
+ ASSERT(xn.Is64Bits());
+ Emit(BR | Rn(xn));
+}
+
+
+void Assembler::blr(const Register& xn) {
+ positions_recorder()->WriteRecordedPositions();
+ ASSERT(xn.Is64Bits());
+ // The pattern 'blr xzr' is used as a guard to detect when execution falls
+ // through the constant pool. It should not be emitted.
+ ASSERT(!xn.Is(xzr));
+ Emit(BLR | Rn(xn));
+}
+
+
+void Assembler::ret(const Register& xn) {
+ positions_recorder()->WriteRecordedPositions();
+ ASSERT(xn.Is64Bits());
+ Emit(RET | Rn(xn));
+}
+
+
+void Assembler::b(int imm26) {
+ Emit(B | ImmUncondBranch(imm26));
+}
+
+
+void Assembler::b(Label* label) {
+ positions_recorder()->WriteRecordedPositions();
+ b(LinkAndGetInstructionOffsetTo(label));
+}
+
+
+void Assembler::b(int imm19, Condition cond) {
+ Emit(B_cond | ImmCondBranch(imm19) | cond);
+}
+
+
+void Assembler::b(Label* label, Condition cond) {
+ positions_recorder()->WriteRecordedPositions();
+ b(LinkAndGetInstructionOffsetTo(label), cond);
+}
+
+
+void Assembler::bl(int imm26) {
+ positions_recorder()->WriteRecordedPositions();
+ Emit(BL | ImmUncondBranch(imm26));
+}
+
+
+void Assembler::bl(Label* label) {
+ positions_recorder()->WriteRecordedPositions();
+ bl(LinkAndGetInstructionOffsetTo(label));
+}
+
+
+void Assembler::cbz(const Register& rt,
+ int imm19) {
+ positions_recorder()->WriteRecordedPositions();
+ Emit(SF(rt) | CBZ | ImmCmpBranch(imm19) | Rt(rt));
+}
+
+
+void Assembler::cbz(const Register& rt,
+ Label* label) {
+ positions_recorder()->WriteRecordedPositions();
+ cbz(rt, LinkAndGetInstructionOffsetTo(label));
+}
+
+
+void Assembler::cbnz(const Register& rt,
+ int imm19) {
+ positions_recorder()->WriteRecordedPositions();
+ Emit(SF(rt) | CBNZ | ImmCmpBranch(imm19) | Rt(rt));
+}
+
+
+void Assembler::cbnz(const Register& rt,
+ Label* label) {
+ positions_recorder()->WriteRecordedPositions();
+ cbnz(rt, LinkAndGetInstructionOffsetTo(label));
+}
+
+
+void Assembler::tbz(const Register& rt,
+ unsigned bit_pos,
+ int imm14) {
+ positions_recorder()->WriteRecordedPositions();
+ ASSERT(rt.Is64Bits() || (rt.Is32Bits() && (bit_pos < kWRegSize)));
+ Emit(TBZ | ImmTestBranchBit(bit_pos) | ImmTestBranch(imm14) | Rt(rt));
+}
+
+
+void Assembler::tbz(const Register& rt,
+ unsigned bit_pos,
+ Label* label) {
+ positions_recorder()->WriteRecordedPositions();
+ tbz(rt, bit_pos, LinkAndGetInstructionOffsetTo(label));
+}
+
+
+void Assembler::tbnz(const Register& rt,
+ unsigned bit_pos,
+ int imm14) {
+ positions_recorder()->WriteRecordedPositions();
+ ASSERT(rt.Is64Bits() || (rt.Is32Bits() && (bit_pos < kWRegSize)));
+ Emit(TBNZ | ImmTestBranchBit(bit_pos) | ImmTestBranch(imm14) | Rt(rt));
+}
+
+
+void Assembler::tbnz(const Register& rt,
+ unsigned bit_pos,
+ Label* label) {
+ positions_recorder()->WriteRecordedPositions();
+ tbnz(rt, bit_pos, LinkAndGetInstructionOffsetTo(label));
+}
+
+
+void Assembler::adr(const Register& rd, int imm21) {
+ ASSERT(rd.Is64Bits());
+ Emit(ADR | ImmPCRelAddress(imm21) | Rd(rd));
+}
+
+
+void Assembler::adr(const Register& rd, Label* label) {
+ adr(rd, LinkAndGetByteOffsetTo(label));
+}
+
+
+void Assembler::add(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ AddSub(rd, rn, operand, LeaveFlags, ADD);
+}
+
+
+void Assembler::adds(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ AddSub(rd, rn, operand, SetFlags, ADD);
+}
+
+
+void Assembler::cmn(const Register& rn,
+ const Operand& operand) {
+ Register zr = AppropriateZeroRegFor(rn);
+ adds(zr, rn, operand);
+}
+
+
+void Assembler::sub(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ AddSub(rd, rn, operand, LeaveFlags, SUB);
+}
+
+
+void Assembler::subs(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ AddSub(rd, rn, operand, SetFlags, SUB);
+}
+
+
+void Assembler::cmp(const Register& rn, const Operand& operand) {
+ Register zr = AppropriateZeroRegFor(rn);
+ subs(zr, rn, operand);
+}
+
+
+void Assembler::neg(const Register& rd, const Operand& operand) {
+ Register zr = AppropriateZeroRegFor(rd);
+ sub(rd, zr, operand);
+}
+
+
+void Assembler::negs(const Register& rd, const Operand& operand) {
+ Register zr = AppropriateZeroRegFor(rd);
+ subs(rd, zr, operand);
+}
+
+
+void Assembler::adc(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ AddSubWithCarry(rd, rn, operand, LeaveFlags, ADC);
+}
+
+
+void Assembler::adcs(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ AddSubWithCarry(rd, rn, operand, SetFlags, ADC);
+}
+
+
+void Assembler::sbc(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ AddSubWithCarry(rd, rn, operand, LeaveFlags, SBC);
+}
+
+
+void Assembler::sbcs(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ AddSubWithCarry(rd, rn, operand, SetFlags, SBC);
+}
+
+
+void Assembler::ngc(const Register& rd, const Operand& operand) {
+ Register zr = AppropriateZeroRegFor(rd);
+ sbc(rd, zr, operand);
+}
+
+
+void Assembler::ngcs(const Register& rd, const Operand& operand) {
+ Register zr = AppropriateZeroRegFor(rd);
+ sbcs(rd, zr, operand);
+}
+
+
+// Logical instructions.
+void Assembler::and_(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ Logical(rd, rn, operand, AND);
+}
+
+
+void Assembler::ands(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ Logical(rd, rn, operand, ANDS);
+}
+
+
+void Assembler::tst(const Register& rn,
+ const Operand& operand) {
+ ands(AppropriateZeroRegFor(rn), rn, operand);
+}
+
+
+void Assembler::bic(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ Logical(rd, rn, operand, BIC);
+}
+
+
+void Assembler::bics(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ Logical(rd, rn, operand, BICS);
+}
+
+
+void Assembler::orr(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ Logical(rd, rn, operand, ORR);
+}
+
+
+void Assembler::orn(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ Logical(rd, rn, operand, ORN);
+}
+
+
+void Assembler::eor(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ Logical(rd, rn, operand, EOR);
+}
+
+
+void Assembler::eon(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ Logical(rd, rn, operand, EON);
+}
+
+
+void Assembler::lslv(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ ASSERT(rd.SizeInBits() == rn.SizeInBits());
+ ASSERT(rd.SizeInBits() == rm.SizeInBits());
+ Emit(SF(rd) | LSLV | Rm(rm) | Rn(rn) | Rd(rd));
+}
+
+
+void Assembler::lsrv(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ ASSERT(rd.SizeInBits() == rn.SizeInBits());
+ ASSERT(rd.SizeInBits() == rm.SizeInBits());
+ Emit(SF(rd) | LSRV | Rm(rm) | Rn(rn) | Rd(rd));
+}
+
+
+void Assembler::asrv(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ ASSERT(rd.SizeInBits() == rn.SizeInBits());
+ ASSERT(rd.SizeInBits() == rm.SizeInBits());
+ Emit(SF(rd) | ASRV | Rm(rm) | Rn(rn) | Rd(rd));
+}
+
+
+void Assembler::rorv(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ ASSERT(rd.SizeInBits() == rn.SizeInBits());
+ ASSERT(rd.SizeInBits() == rm.SizeInBits());
+ Emit(SF(rd) | RORV | Rm(rm) | Rn(rn) | Rd(rd));
+}
+
+
+// Bitfield operations.
+void Assembler::bfm(const Register& rd,
+ const Register& rn,
+ unsigned immr,
+ unsigned imms) {
+ ASSERT(rd.SizeInBits() == rn.SizeInBits());
+ Instr N = SF(rd) >> (kSFOffset - kBitfieldNOffset);
+ Emit(SF(rd) | BFM | N |
+ ImmR(immr, rd.SizeInBits()) |
+ ImmS(imms, rn.SizeInBits()) |
+ Rn(rn) | Rd(rd));
+}
+
+
+void Assembler::sbfm(const Register& rd,
+ const Register& rn,
+ unsigned immr,
+ unsigned imms) {
+ ASSERT(rd.Is64Bits() || rn.Is32Bits());
+ Instr N = SF(rd) >> (kSFOffset - kBitfieldNOffset);
+ Emit(SF(rd) | SBFM | N |
+ ImmR(immr, rd.SizeInBits()) |
+ ImmS(imms, rn.SizeInBits()) |
+ Rn(rn) | Rd(rd));
+}
+
+
+void Assembler::ubfm(const Register& rd,
+ const Register& rn,
+ unsigned immr,
+ unsigned imms) {
+ ASSERT(rd.SizeInBits() == rn.SizeInBits());
+ Instr N = SF(rd) >> (kSFOffset - kBitfieldNOffset);
+ Emit(SF(rd) | UBFM | N |
+ ImmR(immr, rd.SizeInBits()) |
+ ImmS(imms, rn.SizeInBits()) |
+ Rn(rn) | Rd(rd));
+}
+
+
+void Assembler::extr(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ unsigned lsb) {
+ ASSERT(rd.SizeInBits() == rn.SizeInBits());
+ ASSERT(rd.SizeInBits() == rm.SizeInBits());
+ Instr N = SF(rd) >> (kSFOffset - kBitfieldNOffset);
+ Emit(SF(rd) | EXTR | N | Rm(rm) |
+ ImmS(lsb, rn.SizeInBits()) | Rn(rn) | Rd(rd));
+}
+
+
+void Assembler::csel(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ Condition cond) {
+ ConditionalSelect(rd, rn, rm, cond, CSEL);
+}
+
+
+void Assembler::csinc(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ Condition cond) {
+ ConditionalSelect(rd, rn, rm, cond, CSINC);
+}
+
+
+void Assembler::csinv(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ Condition cond) {
+ ConditionalSelect(rd, rn, rm, cond, CSINV);
+}
+
+
+void Assembler::csneg(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ Condition cond) {
+ ConditionalSelect(rd, rn, rm, cond, CSNEG);
+}
+
+
+void Assembler::cset(const Register &rd, Condition cond) {
+ ASSERT((cond != al) && (cond != nv));
+ Register zr = AppropriateZeroRegFor(rd);
+ csinc(rd, zr, zr, InvertCondition(cond));
+}
+
+
+void Assembler::csetm(const Register &rd, Condition cond) {
+ ASSERT((cond != al) && (cond != nv));
+ Register zr = AppropriateZeroRegFor(rd);
+ csinv(rd, zr, zr, InvertCondition(cond));
+}
+
+
+void Assembler::cinc(const Register &rd, const Register &rn, Condition cond) {
+ ASSERT((cond != al) && (cond != nv));
+ csinc(rd, rn, rn, InvertCondition(cond));
+}
+
+
+void Assembler::cinv(const Register &rd, const Register &rn, Condition cond) {
+ ASSERT((cond != al) && (cond != nv));
+ csinv(rd, rn, rn, InvertCondition(cond));
+}
+
+
+void Assembler::cneg(const Register &rd, const Register &rn, Condition cond) {
+ ASSERT((cond != al) && (cond != nv));
+ csneg(rd, rn, rn, InvertCondition(cond));
+}
+
+
+void Assembler::ConditionalSelect(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ Condition cond,
+ ConditionalSelectOp op) {
+ ASSERT(rd.SizeInBits() == rn.SizeInBits());
+ ASSERT(rd.SizeInBits() == rm.SizeInBits());
+ Emit(SF(rd) | op | Rm(rm) | Cond(cond) | Rn(rn) | Rd(rd));
+}
+
+
+void Assembler::ccmn(const Register& rn,
+ const Operand& operand,
+ StatusFlags nzcv,
+ Condition cond) {
+ ConditionalCompare(rn, operand, nzcv, cond, CCMN);
+}
+
+
+void Assembler::ccmp(const Register& rn,
+ const Operand& operand,
+ StatusFlags nzcv,
+ Condition cond) {
+ ConditionalCompare(rn, operand, nzcv, cond, CCMP);
+}
+
+
+void Assembler::DataProcessing3Source(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra,
+ DataProcessing3SourceOp op) {
+ Emit(SF(rd) | op | Rm(rm) | Ra(ra) | Rn(rn) | Rd(rd));
+}
+
+
+void Assembler::mul(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ ASSERT(AreSameSizeAndType(rd, rn, rm));
+ Register zr = AppropriateZeroRegFor(rn);
+ DataProcessing3Source(rd, rn, rm, zr, MADD);
+}
+
+
+void Assembler::madd(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra) {
+ ASSERT(AreSameSizeAndType(rd, rn, rm, ra));
+ DataProcessing3Source(rd, rn, rm, ra, MADD);
+}
+
+
+void Assembler::mneg(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ ASSERT(AreSameSizeAndType(rd, rn, rm));
+ Register zr = AppropriateZeroRegFor(rn);
+ DataProcessing3Source(rd, rn, rm, zr, MSUB);
+}
+
+
+void Assembler::msub(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra) {
+ ASSERT(AreSameSizeAndType(rd, rn, rm, ra));
+ DataProcessing3Source(rd, rn, rm, ra, MSUB);
+}
+
+
+void Assembler::smaddl(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra) {
+ ASSERT(rd.Is64Bits() && ra.Is64Bits());
+ ASSERT(rn.Is32Bits() && rm.Is32Bits());
+ DataProcessing3Source(rd, rn, rm, ra, SMADDL_x);
+}
+
+
+void Assembler::smsubl(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra) {
+ ASSERT(rd.Is64Bits() && ra.Is64Bits());
+ ASSERT(rn.Is32Bits() && rm.Is32Bits());
+ DataProcessing3Source(rd, rn, rm, ra, SMSUBL_x);
+}
+
+
+void Assembler::umaddl(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra) {
+ ASSERT(rd.Is64Bits() && ra.Is64Bits());
+ ASSERT(rn.Is32Bits() && rm.Is32Bits());
+ DataProcessing3Source(rd, rn, rm, ra, UMADDL_x);
+}
+
+
+void Assembler::umsubl(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra) {
+ ASSERT(rd.Is64Bits() && ra.Is64Bits());
+ ASSERT(rn.Is32Bits() && rm.Is32Bits());
+ DataProcessing3Source(rd, rn, rm, ra, UMSUBL_x);
+}
+
+
+void Assembler::smull(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ ASSERT(rd.Is64Bits());
+ ASSERT(rn.Is32Bits() && rm.Is32Bits());
+ DataProcessing3Source(rd, rn, rm, xzr, SMADDL_x);
+}
+
+
+void Assembler::smulh(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ ASSERT(AreSameSizeAndType(rd, rn, rm));
+ DataProcessing3Source(rd, rn, rm, xzr, SMULH_x);
+}
+
+
+void Assembler::sdiv(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ ASSERT(rd.SizeInBits() == rn.SizeInBits());
+ ASSERT(rd.SizeInBits() == rm.SizeInBits());
+ Emit(SF(rd) | SDIV | Rm(rm) | Rn(rn) | Rd(rd));
+}
+
+
+void Assembler::udiv(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ ASSERT(rd.SizeInBits() == rn.SizeInBits());
+ ASSERT(rd.SizeInBits() == rm.SizeInBits());
+ Emit(SF(rd) | UDIV | Rm(rm) | Rn(rn) | Rd(rd));
+}
+
+
+void Assembler::rbit(const Register& rd,
+ const Register& rn) {
+ DataProcessing1Source(rd, rn, RBIT);
+}
+
+
+void Assembler::rev16(const Register& rd,
+ const Register& rn) {
+ DataProcessing1Source(rd, rn, REV16);
+}
+
+
+void Assembler::rev32(const Register& rd,
+ const Register& rn) {
+ ASSERT(rd.Is64Bits());
+ DataProcessing1Source(rd, rn, REV);
+}
+
+
+void Assembler::rev(const Register& rd,
+ const Register& rn) {
+ DataProcessing1Source(rd, rn, rd.Is64Bits() ? REV_x : REV_w);
+}
+
+
+void Assembler::clz(const Register& rd,
+ const Register& rn) {
+ DataProcessing1Source(rd, rn, CLZ);
+}
+
+
+void Assembler::cls(const Register& rd,
+ const Register& rn) {
+ DataProcessing1Source(rd, rn, CLS);
+}
+
+
+void Assembler::ldp(const CPURegister& rt,
+ const CPURegister& rt2,
+ const MemOperand& src) {
+ LoadStorePair(rt, rt2, src, LoadPairOpFor(rt, rt2));
+}
+
+
+void Assembler::stp(const CPURegister& rt,
+ const CPURegister& rt2,
+ const MemOperand& dst) {
+ LoadStorePair(rt, rt2, dst, StorePairOpFor(rt, rt2));
+}
+
+
+void Assembler::ldpsw(const Register& rt,
+ const Register& rt2,
+ const MemOperand& src) {
+ ASSERT(rt.Is64Bits());
+ LoadStorePair(rt, rt2, src, LDPSW_x);
+}
+
+
+void Assembler::LoadStorePair(const CPURegister& rt,
+ const CPURegister& rt2,
+ const MemOperand& addr,
+ LoadStorePairOp op) {
+ // 'rt' and 'rt2' can only be aliased for stores.
+ ASSERT(((op & LoadStorePairLBit) == 0) || !rt.Is(rt2));
+ ASSERT(AreSameSizeAndType(rt, rt2));
+
+ Instr memop = op | Rt(rt) | Rt2(rt2) | RnSP(addr.base()) |
+ ImmLSPair(addr.offset(), CalcLSPairDataSize(op));
+
+ Instr addrmodeop;
+ if (addr.IsImmediateOffset()) {
+ addrmodeop = LoadStorePairOffsetFixed;
+ } else {
+ // Pre-index and post-index modes.
+ ASSERT(!rt.Is(addr.base()));
+ ASSERT(!rt2.Is(addr.base()));
+ ASSERT(addr.offset() != 0);
+ if (addr.IsPreIndex()) {
+ addrmodeop = LoadStorePairPreIndexFixed;
+ } else {
+ ASSERT(addr.IsPostIndex());
+ addrmodeop = LoadStorePairPostIndexFixed;
+ }
+ }
+ Emit(addrmodeop | memop);
+}
+
+
+void Assembler::ldnp(const CPURegister& rt,
+ const CPURegister& rt2,
+ const MemOperand& src) {
+ LoadStorePairNonTemporal(rt, rt2, src,
+ LoadPairNonTemporalOpFor(rt, rt2));
+}
+
+
+void Assembler::stnp(const CPURegister& rt,
+ const CPURegister& rt2,
+ const MemOperand& dst) {
+ LoadStorePairNonTemporal(rt, rt2, dst,
+ StorePairNonTemporalOpFor(rt, rt2));
+}
+
+
+void Assembler::LoadStorePairNonTemporal(const CPURegister& rt,
+ const CPURegister& rt2,
+ const MemOperand& addr,
+ LoadStorePairNonTemporalOp op) {
+ ASSERT(!rt.Is(rt2));
+ ASSERT(AreSameSizeAndType(rt, rt2));
+ ASSERT(addr.IsImmediateOffset());
+
+ LSDataSize size = CalcLSPairDataSize(
+ static_cast<LoadStorePairOp>(op & LoadStorePairMask));
+ Emit(op | Rt(rt) | Rt2(rt2) | RnSP(addr.base()) |
+ ImmLSPair(addr.offset(), size));
+}
+
+
+// Memory instructions.
+void Assembler::ldrb(const Register& rt, const MemOperand& src) {
+ LoadStore(rt, src, LDRB_w);
+}
+
+
+void Assembler::strb(const Register& rt, const MemOperand& dst) {
+ LoadStore(rt, dst, STRB_w);
+}
+
+
+void Assembler::ldrsb(const Register& rt, const MemOperand& src) {
+ LoadStore(rt, src, rt.Is64Bits() ? LDRSB_x : LDRSB_w);
+}
+
+
+void Assembler::ldrh(const Register& rt, const MemOperand& src) {
+ LoadStore(rt, src, LDRH_w);
+}
+
+
+void Assembler::strh(const Register& rt, const MemOperand& dst) {
+ LoadStore(rt, dst, STRH_w);
+}
+
+
+void Assembler::ldrsh(const Register& rt, const MemOperand& src) {
+ LoadStore(rt, src, rt.Is64Bits() ? LDRSH_x : LDRSH_w);
+}
+
+
+void Assembler::ldr(const CPURegister& rt, const MemOperand& src) {
+ LoadStore(rt, src, LoadOpFor(rt));
+}
+
+
+void Assembler::str(const CPURegister& rt, const MemOperand& src) {
+ LoadStore(rt, src, StoreOpFor(rt));
+}
+
+
+void Assembler::ldrsw(const Register& rt, const MemOperand& src) {
+ ASSERT(rt.Is64Bits());
+ LoadStore(rt, src, LDRSW_x);
+}
+
+
+void Assembler::ldr(const Register& rt, uint64_t imm) {
+ // TODO(all): Constant pool may be garbage collected. Hence we cannot store
+ // TODO(all): arbitrary values in them. Manually move it for now.
+ // TODO(all): Fix MacroAssembler::Fmov when this is implemented.
+ UNIMPLEMENTED();
+}
+
+
+void Assembler::ldr(const FPRegister& ft, double imm) {
+ // TODO(all): Constant pool may be garbage collected. Hence we cannot store
+ // TODO(all): arbitrary values in them. Manually move it for now.
+ // TODO(all): Fix MacroAssembler::Fmov when this is implemented.
+ UNIMPLEMENTED();
+}
+
+
+void Assembler::mov(const Register& rd, const Register& rm) {
+ // Moves involving the stack pointer are encoded as add immediate with
+ // second operand of zero. Otherwise, orr with first operand zr is
+ // used.
+ if (rd.IsSP() || rm.IsSP()) {
+ add(rd, rm, 0);
+ } else {
+ orr(rd, AppropriateZeroRegFor(rd), rm);
+ }
+}
+
+
+void Assembler::mvn(const Register& rd, const Operand& operand) {
+ orn(rd, AppropriateZeroRegFor(rd), operand);
+}
+
+
+void Assembler::mrs(const Register& rt, SystemRegister sysreg) {
+ ASSERT(rt.Is64Bits());
+ Emit(MRS | ImmSystemRegister(sysreg) | Rt(rt));
+}
+
+
+void Assembler::msr(SystemRegister sysreg, const Register& rt) {
+ ASSERT(rt.Is64Bits());
+ Emit(MSR | Rt(rt) | ImmSystemRegister(sysreg));
+}
+
+
+void Assembler::hint(SystemHint code) {
+ Emit(HINT | ImmHint(code) | Rt(xzr));
+}
+
+
+void Assembler::dmb(BarrierDomain domain, BarrierType type) {
+ Emit(DMB | ImmBarrierDomain(domain) | ImmBarrierType(type));
+}
+
+
+void Assembler::dsb(BarrierDomain domain, BarrierType type) {
+ Emit(DSB | ImmBarrierDomain(domain) | ImmBarrierType(type));
+}
+
+
+void Assembler::isb() {
+ Emit(ISB | ImmBarrierDomain(FullSystem) | ImmBarrierType(BarrierAll));
+}
+
+
+void Assembler::fmov(FPRegister fd, double imm) {
+ if (fd.Is64Bits() && IsImmFP64(imm)) {
+ Emit(FMOV_d_imm | Rd(fd) | ImmFP64(imm));
+ } else if (fd.Is32Bits() && IsImmFP32(imm)) {
+ Emit(FMOV_s_imm | Rd(fd) | ImmFP32(static_cast<float>(imm)));
+ } else if ((imm == 0.0) && (copysign(1.0, imm) == 1.0)) {
+ Register zr = AppropriateZeroRegFor(fd);
+ fmov(fd, zr);
+ } else {
+ ldr(fd, imm);
+ }
+}
+
+
+void Assembler::fmov(Register rd, FPRegister fn) {
+ ASSERT(rd.SizeInBits() == fn.SizeInBits());
+ FPIntegerConvertOp op = rd.Is32Bits() ? FMOV_ws : FMOV_xd;
+ Emit(op | Rd(rd) | Rn(fn));
+}
+
+
+void Assembler::fmov(FPRegister fd, Register rn) {
+ ASSERT(fd.SizeInBits() == rn.SizeInBits());
+ FPIntegerConvertOp op = fd.Is32Bits() ? FMOV_sw : FMOV_dx;
+ Emit(op | Rd(fd) | Rn(rn));
+}
+
+
+void Assembler::fmov(FPRegister fd, FPRegister fn) {
+ ASSERT(fd.SizeInBits() == fn.SizeInBits());
+ Emit(FPType(fd) | FMOV | Rd(fd) | Rn(fn));
+}
+
+
+void Assembler::fadd(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm) {
+ FPDataProcessing2Source(fd, fn, fm, FADD);
+}
+
+
+void Assembler::fsub(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm) {
+ FPDataProcessing2Source(fd, fn, fm, FSUB);
+}
+
+
+void Assembler::fmul(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm) {
+ FPDataProcessing2Source(fd, fn, fm, FMUL);
+}
+
+
+void Assembler::fmadd(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm,
+ const FPRegister& fa) {
+ FPDataProcessing3Source(fd, fn, fm, fa, fd.Is32Bits() ? FMADD_s : FMADD_d);
+}
+
+
+void Assembler::fmsub(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm,
+ const FPRegister& fa) {
+ FPDataProcessing3Source(fd, fn, fm, fa, fd.Is32Bits() ? FMSUB_s : FMSUB_d);
+}
+
+
+void Assembler::fnmadd(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm,
+ const FPRegister& fa) {
+ FPDataProcessing3Source(fd, fn, fm, fa, fd.Is32Bits() ? FNMADD_s : FNMADD_d);
+}
+
+
+void Assembler::fnmsub(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm,
+ const FPRegister& fa) {
+ FPDataProcessing3Source(fd, fn, fm, fa, fd.Is32Bits() ? FNMSUB_s : FNMSUB_d);
+}
+
+
+void Assembler::fdiv(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm) {
+ FPDataProcessing2Source(fd, fn, fm, FDIV);
+}
+
+
+void Assembler::fmax(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm) {
+ FPDataProcessing2Source(fd, fn, fm, FMAX);
+}
+
+
+void Assembler::fmaxnm(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm) {
+ FPDataProcessing2Source(fd, fn, fm, FMAXNM);
+}
+
+
+void Assembler::fmin(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm) {
+ FPDataProcessing2Source(fd, fn, fm, FMIN);
+}
+
+
+void Assembler::fminnm(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm) {
+ FPDataProcessing2Source(fd, fn, fm, FMINNM);
+}
+
+
+void Assembler::fabs(const FPRegister& fd,
+ const FPRegister& fn) {
+ ASSERT(fd.SizeInBits() == fn.SizeInBits());
+ FPDataProcessing1Source(fd, fn, FABS);
+}
+
+
+void Assembler::fneg(const FPRegister& fd,
+ const FPRegister& fn) {
+ ASSERT(fd.SizeInBits() == fn.SizeInBits());
+ FPDataProcessing1Source(fd, fn, FNEG);
+}
+
+
+void Assembler::fsqrt(const FPRegister& fd,
+ const FPRegister& fn) {
+ ASSERT(fd.SizeInBits() == fn.SizeInBits());
+ FPDataProcessing1Source(fd, fn, FSQRT);
+}
+
+
+void Assembler::frinta(const FPRegister& fd,
+ const FPRegister& fn) {
+ ASSERT(fd.SizeInBits() == fn.SizeInBits());
+ FPDataProcessing1Source(fd, fn, FRINTA);
+}
+
+
+void Assembler::frintn(const FPRegister& fd,
+ const FPRegister& fn) {
+ ASSERT(fd.SizeInBits() == fn.SizeInBits());
+ FPDataProcessing1Source(fd, fn, FRINTN);
+}
+
+
+void Assembler::frintz(const FPRegister& fd,
+ const FPRegister& fn) {
+ ASSERT(fd.SizeInBits() == fn.SizeInBits());
+ FPDataProcessing1Source(fd, fn, FRINTZ);
+}
+
+
+void Assembler::fcmp(const FPRegister& fn,
+ const FPRegister& fm) {
+ ASSERT(fn.SizeInBits() == fm.SizeInBits());
+ Emit(FPType(fn) | FCMP | Rm(fm) | Rn(fn));
+}
+
+
+void Assembler::fcmp(const FPRegister& fn,
+ double value) {
+ USE(value);
+ // Although the fcmp instruction can strictly only take an immediate value of
+ // +0.0, we don't need to check for -0.0 because the sign of 0.0 doesn't
+ // affect the result of the comparison.
+ ASSERT(value == 0.0);
+ Emit(FPType(fn) | FCMP_zero | Rn(fn));
+}
+
+
+void Assembler::fccmp(const FPRegister& fn,
+ const FPRegister& fm,
+ StatusFlags nzcv,
+ Condition cond) {
+ ASSERT(fn.SizeInBits() == fm.SizeInBits());
+ Emit(FPType(fn) | FCCMP | Rm(fm) | Cond(cond) | Rn(fn) | Nzcv(nzcv));
+}
+
+
+void Assembler::fcsel(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm,
+ Condition cond) {
+ ASSERT(fd.SizeInBits() == fn.SizeInBits());
+ ASSERT(fd.SizeInBits() == fm.SizeInBits());
+ Emit(FPType(fd) | FCSEL | Rm(fm) | Cond(cond) | Rn(fn) | Rd(fd));
+}
+
+
+void Assembler::FPConvertToInt(const Register& rd,
+ const FPRegister& fn,
+ FPIntegerConvertOp op) {
+ Emit(SF(rd) | FPType(fn) | op | Rn(fn) | Rd(rd));
+}
+
+
+void Assembler::fcvt(const FPRegister& fd,
+ const FPRegister& fn) {
+ if (fd.Is64Bits()) {
+ // Convert float to double.
+ ASSERT(fn.Is32Bits());
+ FPDataProcessing1Source(fd, fn, FCVT_ds);
+ } else {
+ // Convert double to float.
+ ASSERT(fn.Is64Bits());
+ FPDataProcessing1Source(fd, fn, FCVT_sd);
+ }
+}
+
+
+void Assembler::fcvtau(const Register& rd, const FPRegister& fn) {
+ FPConvertToInt(rd, fn, FCVTAU);
+}
+
+
+void Assembler::fcvtas(const Register& rd, const FPRegister& fn) {
+ FPConvertToInt(rd, fn, FCVTAS);
+}
+
+
+void Assembler::fcvtmu(const Register& rd, const FPRegister& fn) {
+ FPConvertToInt(rd, fn, FCVTMU);
+}
+
+
+void Assembler::fcvtms(const Register& rd, const FPRegister& fn) {
+ FPConvertToInt(rd, fn, FCVTMS);
+}
+
+
+void Assembler::fcvtnu(const Register& rd, const FPRegister& fn) {
+ FPConvertToInt(rd, fn, FCVTNU);
+}
+
+
+void Assembler::fcvtns(const Register& rd, const FPRegister& fn) {
+ FPConvertToInt(rd, fn, FCVTNS);
+}
+
+
+void Assembler::fcvtzu(const Register& rd, const FPRegister& fn) {
+ FPConvertToInt(rd, fn, FCVTZU);
+}
+
+
+void Assembler::fcvtzs(const Register& rd, const FPRegister& fn) {
+ FPConvertToInt(rd, fn, FCVTZS);
+}
+
+
+void Assembler::scvtf(const FPRegister& fd,
+ const Register& rn,
+ unsigned fbits) {
+ if (fbits == 0) {
+ Emit(SF(rn) | FPType(fd) | SCVTF | Rn(rn) | Rd(fd));
+ } else {
+ Emit(SF(rn) | FPType(fd) | SCVTF_fixed | FPScale(64 - fbits) | Rn(rn) |
+ Rd(fd));
+ }
+}
+
+
+void Assembler::ucvtf(const FPRegister& fd,
+ const Register& rn,
+ unsigned fbits) {
+ if (fbits == 0) {
+ Emit(SF(rn) | FPType(fd) | UCVTF | Rn(rn) | Rd(fd));
+ } else {
+ Emit(SF(rn) | FPType(fd) | UCVTF_fixed | FPScale(64 - fbits) | Rn(rn) |
+ Rd(fd));
+ }
+}
+
+
+// Note:
+// Below, a difference in case for the same letter indicates a
+// negated bit.
+// If b is 1, then B is 0.
+Instr Assembler::ImmFP32(float imm) {
+ ASSERT(IsImmFP32(imm));
+ // bits: aBbb.bbbc.defg.h000.0000.0000.0000.0000
+ uint32_t bits = float_to_rawbits(imm);
+ // bit7: a000.0000
+ uint32_t bit7 = ((bits >> 31) & 0x1) << 7;
+ // bit6: 0b00.0000
+ uint32_t bit6 = ((bits >> 29) & 0x1) << 6;
+ // bit5_to_0: 00cd.efgh
+ uint32_t bit5_to_0 = (bits >> 19) & 0x3f;
+
+ return (bit7 | bit6 | bit5_to_0) << ImmFP_offset;
+}
+
+
+Instr Assembler::ImmFP64(double imm) {
+ ASSERT(IsImmFP64(imm));
+ // bits: aBbb.bbbb.bbcd.efgh.0000.0000.0000.0000
+ // 0000.0000.0000.0000.0000.0000.0000.0000
+ uint64_t bits = double_to_rawbits(imm);
+ // bit7: a000.0000
+ uint32_t bit7 = ((bits >> 63) & 0x1) << 7;
+ // bit6: 0b00.0000
+ uint32_t bit6 = ((bits >> 61) & 0x1) << 6;
+ // bit5_to_0: 00cd.efgh
+ uint32_t bit5_to_0 = (bits >> 48) & 0x3f;
+
+ return (bit7 | bit6 | bit5_to_0) << ImmFP_offset;
+}
+
+
+// Code generation helpers.
+void Assembler::MoveWide(const Register& rd,
+ uint64_t imm,
+ int shift,
+ MoveWideImmediateOp mov_op) {
+ if (shift >= 0) {
+ // Explicit shift specified.
+ ASSERT((shift == 0) || (shift == 16) || (shift == 32) || (shift == 48));
+ ASSERT(rd.Is64Bits() || (shift == 0) || (shift == 16));
+ shift /= 16;
+ } else {
+ // Calculate a new immediate and shift combination to encode the immediate
+ // argument.
+ shift = 0;
+ if ((imm & ~0xffffUL) == 0) {
+ // Nothing to do.
+ } else if ((imm & ~(0xffffUL << 16)) == 0) {
+ imm >>= 16;
+ shift = 1;
+ } else if ((imm & ~(0xffffUL << 32)) == 0) {
+ ASSERT(rd.Is64Bits());
+ imm >>= 32;
+ shift = 2;
+ } else if ((imm & ~(0xffffUL << 48)) == 0) {
+ ASSERT(rd.Is64Bits());
+ imm >>= 48;
+ shift = 3;
+ }
+ }
+
+ ASSERT(is_uint16(imm));
+
+ Emit(SF(rd) | MoveWideImmediateFixed | mov_op |
+ Rd(rd) | ImmMoveWide(imm) | ShiftMoveWide(shift));
+}
+
+
+void Assembler::AddSub(const Register& rd,
+ const Register& rn,
+ const Operand& operand,
+ FlagsUpdate S,
+ AddSubOp op) {
+ ASSERT(rd.SizeInBits() == rn.SizeInBits());
+ ASSERT(!operand.NeedsRelocation());
+ if (operand.IsImmediate()) {
+ int64_t immediate = operand.immediate();
+ ASSERT(IsImmAddSub(immediate));
+ Instr dest_reg = (S == SetFlags) ? Rd(rd) : RdSP(rd);
+ Emit(SF(rd) | AddSubImmediateFixed | op | Flags(S) |
+ ImmAddSub(immediate) | dest_reg | RnSP(rn));
+ } else if (operand.IsShiftedRegister()) {
+ ASSERT(operand.reg().SizeInBits() == rd.SizeInBits());
+ ASSERT(operand.shift() != ROR);
+
+ // For instructions of the form:
+ // add/sub wsp, <Wn>, <Wm> [, LSL #0-3 ]
+ // add/sub <Wd>, wsp, <Wm> [, LSL #0-3 ]
+ // add/sub wsp, wsp, <Wm> [, LSL #0-3 ]
+ // adds/subs <Wd>, wsp, <Wm> [, LSL #0-3 ]
+ // or their 64-bit register equivalents, convert the operand from shifted to
+ // extended register mode, and emit an add/sub extended instruction.
+ if (rn.IsSP() || rd.IsSP()) {
+ ASSERT(!(rd.IsSP() && (S == SetFlags)));
+ DataProcExtendedRegister(rd, rn, operand.ToExtendedRegister(), S,
+ AddSubExtendedFixed | op);
+ } else {
+ DataProcShiftedRegister(rd, rn, operand, S, AddSubShiftedFixed | op);
+ }
+ } else {
+ ASSERT(operand.IsExtendedRegister());
+ DataProcExtendedRegister(rd, rn, operand, S, AddSubExtendedFixed | op);
+ }
+}
+
+
+void Assembler::AddSubWithCarry(const Register& rd,
+ const Register& rn,
+ const Operand& operand,
+ FlagsUpdate S,
+ AddSubWithCarryOp op) {
+ ASSERT(rd.SizeInBits() == rn.SizeInBits());
+ ASSERT(rd.SizeInBits() == operand.reg().SizeInBits());
+ ASSERT(operand.IsShiftedRegister() && (operand.shift_amount() == 0));
+ ASSERT(!operand.NeedsRelocation());
+ Emit(SF(rd) | op | Flags(S) | Rm(operand.reg()) | Rn(rn) | Rd(rd));
+}
+
+
+void Assembler::hlt(int code) {
+ ASSERT(is_uint16(code));
+ Emit(HLT | ImmException(code));
+}
+
+
+void Assembler::brk(int code) {
+ ASSERT(is_uint16(code));
+ Emit(BRK | ImmException(code));
+}
+
+
+void Assembler::debug(const char* message, uint32_t code, Instr params) {
+#ifdef USE_SIMULATOR
+ // Don't generate simulator specific code if we are building a snapshot, which
+ // might be run on real hardware.
+ if (!Serializer::enabled()) {
+#ifdef DEBUG
+ Serializer::TooLateToEnableNow();
+#endif
+ // The arguments to the debug marker need to be contiguous in memory, so
+ // make sure we don't try to emit a literal pool.
+ BlockConstPoolScope scope(this);
+
+ Label start;
+ bind(&start);
+
+ // Refer to instructions-a64.h for a description of the marker and its
+ // arguments.
+ hlt(kImmExceptionIsDebug);
+ ASSERT(SizeOfCodeGeneratedSince(&start) == kDebugCodeOffset);
+ dc32(code);
+ ASSERT(SizeOfCodeGeneratedSince(&start) == kDebugParamsOffset);
+ dc32(params);
+ ASSERT(SizeOfCodeGeneratedSince(&start) == kDebugMessageOffset);
+ EmitStringData(message);
+ hlt(kImmExceptionIsUnreachable);
+
+ return;
+ }
+ // Fall through if Serializer is enabled.
+#endif
+
+ if (params & BREAK) {
+ hlt(kImmExceptionIsDebug);
+ }
+}
+
+
+void Assembler::Logical(const Register& rd,
+ const Register& rn,
+ const Operand& operand,
+ LogicalOp op) {
+ ASSERT(rd.SizeInBits() == rn.SizeInBits());
+ ASSERT(!operand.NeedsRelocation());
+ if (operand.IsImmediate()) {
+ int64_t immediate = operand.immediate();
+ unsigned reg_size = rd.SizeInBits();
+
+ ASSERT(immediate != 0);
+ ASSERT(immediate != -1);
+ ASSERT(rd.Is64Bits() || is_uint32(immediate));
+
+ // If the operation is NOT, invert the operation and immediate.
+ if ((op & NOT) == NOT) {
+ op = static_cast<LogicalOp>(op & ~NOT);
+ immediate = rd.Is64Bits() ? ~immediate : (~immediate & kWRegMask);
+ }
+
+ unsigned n, imm_s, imm_r;
+ if (IsImmLogical(immediate, reg_size, &n, &imm_s, &imm_r)) {
+ // Immediate can be encoded in the instruction.
+ LogicalImmediate(rd, rn, n, imm_s, imm_r, op);
+ } else {
+ // This case is handled in the macro assembler.
+ UNREACHABLE();
+ }
+ } else {
+ ASSERT(operand.IsShiftedRegister());
+ ASSERT(operand.reg().SizeInBits() == rd.SizeInBits());
+ Instr dp_op = static_cast<Instr>(op | LogicalShiftedFixed);
+ DataProcShiftedRegister(rd, rn, operand, LeaveFlags, dp_op);
+ }
+}
+
+
+void Assembler::LogicalImmediate(const Register& rd,
+ const Register& rn,
+ unsigned n,
+ unsigned imm_s,
+ unsigned imm_r,
+ LogicalOp op) {
+ unsigned reg_size = rd.SizeInBits();
+ Instr dest_reg = (op == ANDS) ? Rd(rd) : RdSP(rd);
+ Emit(SF(rd) | LogicalImmediateFixed | op | BitN(n, reg_size) |
+ ImmSetBits(imm_s, reg_size) | ImmRotate(imm_r, reg_size) | dest_reg |
+ Rn(rn));
+}
+
+
+void Assembler::ConditionalCompare(const Register& rn,
+ const Operand& operand,
+ StatusFlags nzcv,
+ Condition cond,
+ ConditionalCompareOp op) {
+ Instr ccmpop;
+ ASSERT(!operand.NeedsRelocation());
+ if (operand.IsImmediate()) {
+ int64_t immediate = operand.immediate();
+ ASSERT(IsImmConditionalCompare(immediate));
+ ccmpop = ConditionalCompareImmediateFixed | op | ImmCondCmp(immediate);
+ } else {
+ ASSERT(operand.IsShiftedRegister() && (operand.shift_amount() == 0));
+ ccmpop = ConditionalCompareRegisterFixed | op | Rm(operand.reg());
+ }
+ Emit(SF(rn) | ccmpop | Cond(cond) | Rn(rn) | Nzcv(nzcv));
+}
+
+
+void Assembler::DataProcessing1Source(const Register& rd,
+ const Register& rn,
+ DataProcessing1SourceOp op) {
+ ASSERT(rd.SizeInBits() == rn.SizeInBits());
+ Emit(SF(rn) | op | Rn(rn) | Rd(rd));
+}
+
+
+void Assembler::FPDataProcessing1Source(const FPRegister& fd,
+ const FPRegister& fn,
+ FPDataProcessing1SourceOp op) {
+ Emit(FPType(fn) | op | Rn(fn) | Rd(fd));
+}
+
+
+void Assembler::FPDataProcessing2Source(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm,
+ FPDataProcessing2SourceOp op) {
+ ASSERT(fd.SizeInBits() == fn.SizeInBits());
+ ASSERT(fd.SizeInBits() == fm.SizeInBits());
+ Emit(FPType(fd) | op | Rm(fm) | Rn(fn) | Rd(fd));
+}
+
+
+void Assembler::FPDataProcessing3Source(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm,
+ const FPRegister& fa,
+ FPDataProcessing3SourceOp op) {
+ ASSERT(AreSameSizeAndType(fd, fn, fm, fa));
+ Emit(FPType(fd) | op | Rm(fm) | Rn(fn) | Rd(fd) | Ra(fa));
+}
+
+
+void Assembler::EmitShift(const Register& rd,
+ const Register& rn,
+ Shift shift,
+ unsigned shift_amount) {
+ switch (shift) {
+ case LSL:
+ lsl(rd, rn, shift_amount);
+ break;
+ case LSR:
+ lsr(rd, rn, shift_amount);
+ break;
+ case ASR:
+ asr(rd, rn, shift_amount);
+ break;
+ case ROR:
+ ror(rd, rn, shift_amount);
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+void Assembler::EmitExtendShift(const Register& rd,
+ const Register& rn,
+ Extend extend,
+ unsigned left_shift) {
+ ASSERT(rd.SizeInBits() >= rn.SizeInBits());
+ unsigned reg_size = rd.SizeInBits();
+ // Use the correct size of register.
+ Register rn_ = Register::Create(rn.code(), rd.SizeInBits());
+ // Bits extracted are high_bit:0.
+ unsigned high_bit = (8 << (extend & 0x3)) - 1;
+ // Number of bits left in the result that are not introduced by the shift.
+ unsigned non_shift_bits = (reg_size - left_shift) & (reg_size - 1);
+
+ if ((non_shift_bits > high_bit) || (non_shift_bits == 0)) {
+ switch (extend) {
+ case UXTB:
+ case UXTH:
+ case UXTW: ubfm(rd, rn_, non_shift_bits, high_bit); break;
+ case SXTB:
+ case SXTH:
+ case SXTW: sbfm(rd, rn_, non_shift_bits, high_bit); break;
+ case UXTX:
+ case SXTX: {
+ ASSERT(rn.SizeInBits() == kXRegSize);
+ // Nothing to extend. Just shift.
+ lsl(rd, rn_, left_shift);
+ break;
+ }
+ default: UNREACHABLE();
+ }
+ } else {
+ // No need to extend as the extended bits would be shifted away.
+ lsl(rd, rn_, left_shift);
+ }
+}
+
+
+void Assembler::DataProcShiftedRegister(const Register& rd,
+ const Register& rn,
+ const Operand& operand,
+ FlagsUpdate S,
+ Instr op) {
+ ASSERT(operand.IsShiftedRegister());
+ ASSERT(rn.Is64Bits() || (rn.Is32Bits() && is_uint5(operand.shift_amount())));
+ ASSERT(!operand.NeedsRelocation());
+ Emit(SF(rd) | op | Flags(S) |
+ ShiftDP(operand.shift()) | ImmDPShift(operand.shift_amount()) |
+ Rm(operand.reg()) | Rn(rn) | Rd(rd));
+}
+
+
+void Assembler::DataProcExtendedRegister(const Register& rd,
+ const Register& rn,
+ const Operand& operand,
+ FlagsUpdate S,
+ Instr op) {
+ ASSERT(!operand.NeedsRelocation());
+ Instr dest_reg = (S == SetFlags) ? Rd(rd) : RdSP(rd);
+ Emit(SF(rd) | op | Flags(S) | Rm(operand.reg()) |
+ ExtendMode(operand.extend()) | ImmExtendShift(operand.shift_amount()) |
+ dest_reg | RnSP(rn));
+}
+
+
+bool Assembler::IsImmAddSub(int64_t immediate) {
+ return is_uint12(immediate) ||
+ (is_uint12(immediate >> 12) && ((immediate & 0xfff) == 0));
+}
+
+void Assembler::LoadStore(const CPURegister& rt,
+ const MemOperand& addr,
+ LoadStoreOp op) {
+ Instr memop = op | Rt(rt) | RnSP(addr.base());
+ ptrdiff_t offset = addr.offset();
+
+ if (addr.IsImmediateOffset()) {
+ LSDataSize size = CalcLSDataSize(op);
+ if (IsImmLSScaled(offset, size)) {
+ // Use the scaled addressing mode.
+ Emit(LoadStoreUnsignedOffsetFixed | memop |
+ ImmLSUnsigned(offset >> size));
+ } else if (IsImmLSUnscaled(offset)) {
+ // Use the unscaled addressing mode.
+ Emit(LoadStoreUnscaledOffsetFixed | memop | ImmLS(offset));
+ } else {
+ // This case is handled in the macro assembler.
+ UNREACHABLE();
+ }
+ } else if (addr.IsRegisterOffset()) {
+ Extend ext = addr.extend();
+ Shift shift = addr.shift();
+ unsigned shift_amount = addr.shift_amount();
+
+ // LSL is encoded in the option field as UXTX.
+ if (shift == LSL) {
+ ext = UXTX;
+ }
+
+ // Shifts are encoded in one bit, indicating a left shift by the memory
+ // access size.
+ ASSERT((shift_amount == 0) ||
+ (shift_amount == static_cast<unsigned>(CalcLSDataSize(op))));
+ Emit(LoadStoreRegisterOffsetFixed | memop | Rm(addr.regoffset()) |
+ ExtendMode(ext) | ImmShiftLS((shift_amount > 0) ? 1 : 0));
+ } else {
+ // Pre-index and post-index modes.
+ ASSERT(!rt.Is(addr.base()));
+ if (IsImmLSUnscaled(offset)) {
+ if (addr.IsPreIndex()) {
+ Emit(LoadStorePreIndexFixed | memop | ImmLS(offset));
+ } else {
+ ASSERT(addr.IsPostIndex());
+ Emit(LoadStorePostIndexFixed | memop | ImmLS(offset));
+ }
+ } else {
+ // This case is handled in the macro assembler.
+ UNREACHABLE();
+ }
+ }
+}
+
+
+bool Assembler::IsImmLSUnscaled(ptrdiff_t offset) {
+ return is_int9(offset);
+}
+
+
+bool Assembler::IsImmLSScaled(ptrdiff_t offset, LSDataSize size) {
+ bool offset_is_size_multiple = (((offset >> size) << size) == offset);
+ return offset_is_size_multiple && is_uint12(offset >> size);
+}
+
+
+void Assembler::LoadLiteral(const CPURegister& rt, int offset_from_pc) {
+ ASSERT((offset_from_pc & ((1 << kLiteralEntrySizeLog2) - 1)) == 0);
+ // The pattern 'ldr xzr, #offset' is used to indicate the beginning of a
+ // constant pool. It should not be emitted.
+ ASSERT(!rt.Is(xzr));
+ Emit(LDR_x_lit |
+ ImmLLiteral(offset_from_pc >> kLiteralEntrySizeLog2) |
+ Rt(rt));
+}
+
+
+void Assembler::LoadRelocatedValue(const CPURegister& rt,
+ const Operand& operand,
+ LoadLiteralOp op) {
+ int64_t imm = operand.immediate();
+ ASSERT(is_int32(imm) || is_uint32(imm) || (rt.Is64Bits()));
+ RecordRelocInfo(operand.rmode(), imm);
+ BlockConstPoolFor(1);
+ Emit(op | ImmLLiteral(0) | Rt(rt));
+}
+
+
+// Test if a given value can be encoded in the immediate field of a logical
+// instruction.
+// If it can be encoded, the function returns true, and values pointed to by n,
+// imm_s and imm_r are updated with immediates encoded in the format required
+// by the corresponding fields in the logical instruction.
+// If it can not be encoded, the function returns false, and the values pointed
+// to by n, imm_s and imm_r are undefined.
+bool Assembler::IsImmLogical(uint64_t value,
+ unsigned width,
+ unsigned* n,
+ unsigned* imm_s,
+ unsigned* imm_r) {
+ ASSERT((n != NULL) && (imm_s != NULL) && (imm_r != NULL));
+ ASSERT((width == kWRegSize) || (width == kXRegSize));
+
+ // Logical immediates are encoded using parameters n, imm_s and imm_r using
+ // the following table:
+ //
+ // N imms immr size S R
+ // 1 ssssss rrrrrr 64 UInt(ssssss) UInt(rrrrrr)
+ // 0 0sssss xrrrrr 32 UInt(sssss) UInt(rrrrr)
+ // 0 10ssss xxrrrr 16 UInt(ssss) UInt(rrrr)
+ // 0 110sss xxxrrr 8 UInt(sss) UInt(rrr)
+ // 0 1110ss xxxxrr 4 UInt(ss) UInt(rr)
+ // 0 11110s xxxxxr 2 UInt(s) UInt(r)
+ // (s bits must not be all set)
+ //
+ // A pattern is constructed of size bits, where the least significant S+1
+ // bits are set. The pattern is rotated right by R, and repeated across a
+ // 32 or 64-bit value, depending on destination register width.
+ //
+ // To test if an arbitary immediate can be encoded using this scheme, an
+ // iterative algorithm is used.
+ //
+ // TODO(mcapewel) This code does not consider using X/W register overlap to
+ // support 64-bit immediates where the top 32-bits are zero, and the bottom
+ // 32-bits are an encodable logical immediate.
+
+ // 1. If the value has all set or all clear bits, it can't be encoded.
+ if ((value == 0) || (value == 0xffffffffffffffffUL) ||
+ ((width == kWRegSize) && (value == 0xffffffff))) {
+ return false;
+ }
+
+ unsigned lead_zero = CountLeadingZeros(value, width);
+ unsigned lead_one = CountLeadingZeros(~value, width);
+ unsigned trail_zero = CountTrailingZeros(value, width);
+ unsigned trail_one = CountTrailingZeros(~value, width);
+ unsigned set_bits = CountSetBits(value, width);
+
+ // The fixed bits in the immediate s field.
+ // If width == 64 (X reg), start at 0xFFFFFF80.
+ // If width == 32 (W reg), start at 0xFFFFFFC0, as the iteration for 64-bit
+ // widths won't be executed.
+ int imm_s_fixed = (width == kXRegSize) ? -128 : -64;
+ int imm_s_mask = 0x3F;
+
+ for (;;) {
+ // 2. If the value is two bits wide, it can be encoded.
+ if (width == 2) {
+ *n = 0;
+ *imm_s = 0x3C;
+ *imm_r = (value & 3) - 1;
+ return true;
+ }
+
+ *n = (width == 64) ? 1 : 0;
+ *imm_s = ((imm_s_fixed | (set_bits - 1)) & imm_s_mask);
+ if ((lead_zero + set_bits) == width) {
+ *imm_r = 0;
+ } else {
+ *imm_r = (lead_zero > 0) ? (width - trail_zero) : lead_one;
+ }
+
+ // 3. If the sum of leading zeros, trailing zeros and set bits is equal to
+ // the bit width of the value, it can be encoded.
+ if (lead_zero + trail_zero + set_bits == width) {
+ return true;
+ }
+
+ // 4. If the sum of leading ones, trailing ones and unset bits in the
+ // value is equal to the bit width of the value, it can be encoded.
+ if (lead_one + trail_one + (width - set_bits) == width) {
+ return true;
+ }
+
+ // 5. If the most-significant half of the bitwise value is equal to the
+ // least-significant half, return to step 2 using the least-significant
+ // half of the value.
+ uint64_t mask = (1UL << (width >> 1)) - 1;
+ if ((value & mask) == ((value >> (width >> 1)) & mask)) {
+ width >>= 1;
+ set_bits >>= 1;
+ imm_s_fixed >>= 1;
+ continue;
+ }
+
+ // 6. Otherwise, the value can't be encoded.
+ return false;
+ }
+}
+
+
+bool Assembler::IsImmConditionalCompare(int64_t immediate) {
+ return is_uint5(immediate);
+}
+
+
+bool Assembler::IsImmFP32(float imm) {
+ // Valid values will have the form:
+ // aBbb.bbbc.defg.h000.0000.0000.0000.0000
+ uint32_t bits = float_to_rawbits(imm);
+ // bits[19..0] are cleared.
+ if ((bits & 0x7ffff) != 0) {
+ return false;
+ }
+
+ // bits[29..25] are all set or all cleared.
+ uint32_t b_pattern = (bits >> 16) & 0x3e00;
+ if (b_pattern != 0 && b_pattern != 0x3e00) {
+ return false;
+ }
+
+ // bit[30] and bit[29] are opposite.
+ if (((bits ^ (bits << 1)) & 0x40000000) == 0) {
+ return false;
+ }
+
+ return true;
+}
+
+
+bool Assembler::IsImmFP64(double imm) {
+ // Valid values will have the form:
+ // aBbb.bbbb.bbcd.efgh.0000.0000.0000.0000
+ // 0000.0000.0000.0000.0000.0000.0000.0000
+ uint64_t bits = double_to_rawbits(imm);
+ // bits[47..0] are cleared.
+ if ((bits & 0xffffffffffffL) != 0) {
+ return false;
+ }
+
+ // bits[61..54] are all set or all cleared.
+ uint32_t b_pattern = (bits >> 48) & 0x3fc0;
+ if (b_pattern != 0 && b_pattern != 0x3fc0) {
+ return false;
+ }
+
+ // bit[62] and bit[61] are opposite.
+ if (((bits ^ (bits << 1)) & 0x4000000000000000L) == 0) {
+ return false;
+ }
+
+ return true;
+}
+
+
+void Assembler::GrowBuffer() {
+ if (!own_buffer_) FATAL("external code buffer is too small");
+
+ // Compute new buffer size.
+ CodeDesc desc; // the new buffer
+ if (buffer_size_ < 4 * KB) {
+ desc.buffer_size = 4 * KB;
+ } else if (buffer_size_ < 1 * MB) {
+ desc.buffer_size = 2 * buffer_size_;
+ } else {
+ desc.buffer_size = buffer_size_ + 1 * MB;
+ }
+ CHECK_GT(desc.buffer_size, 0); // No overflow.
+
+ byte* buffer = reinterpret_cast<byte*>(buffer_);
+
+ // Set up new buffer.
+ desc.buffer = NewArray<byte>(desc.buffer_size);
+
+ desc.instr_size = pc_offset();
+ desc.reloc_size = (buffer + buffer_size_) - reloc_info_writer.pos();
+
+ // Copy the data.
+ intptr_t pc_delta = desc.buffer - buffer;
+ intptr_t rc_delta = (desc.buffer + desc.buffer_size) -
+ (buffer + buffer_size_);
+ memmove(desc.buffer, buffer, desc.instr_size);
+ memmove(reloc_info_writer.pos() + rc_delta,
+ reloc_info_writer.pos(), desc.reloc_size);
+
+ // Switch buffers.
+ DeleteArray(buffer_);
+ buffer_ = desc.buffer;
+ buffer_size_ = desc.buffer_size;
+ pc_ = reinterpret_cast<byte*>(pc_) + pc_delta;
+ reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
+ reloc_info_writer.last_pc() + pc_delta);
+
+ // None of our relocation types are pc relative pointing outside the code
+ // buffer nor pc absolute pointing inside the code buffer, so there is no need
+ // to relocate any emitted relocation entries.
+
+ // Relocate pending relocation entries.
+ for (int i = 0; i < num_pending_reloc_info_; i++) {
+ RelocInfo& rinfo = pending_reloc_info_[i];
+ ASSERT(rinfo.rmode() != RelocInfo::COMMENT &&
+ rinfo.rmode() != RelocInfo::POSITION);
+ if (rinfo.rmode() != RelocInfo::JS_RETURN) {
+ rinfo.set_pc(rinfo.pc() + pc_delta);
+ }
+ }
+}
+
+
+void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, int64_t data) {
+ // We do not try to reuse pool constants.
+ RelocInfo rinfo(reinterpret_cast<byte*>(pc_), rmode, data, NULL);
+ if (((rmode >= RelocInfo::JS_RETURN) &&
+ (rmode <= RelocInfo::DEBUG_BREAK_SLOT)) ||
+ (rmode == RelocInfo::CONST_POOL)) {
+ // Adjust code for new modes.
+ ASSERT(RelocInfo::IsDebugBreakSlot(rmode)
+ || RelocInfo::IsJSReturn(rmode)
+ || RelocInfo::IsComment(rmode)
+ || RelocInfo::IsPosition(rmode)
+ || RelocInfo::IsConstPool(rmode));
+ // These modes do not need an entry in the constant pool.
+ } else {
+ ASSERT(num_pending_reloc_info_ < kMaxNumPendingRelocInfo);
+ if (num_pending_reloc_info_ == 0) {
+ first_const_pool_use_ = pc_offset();
+ }
+ pending_reloc_info_[num_pending_reloc_info_++] = rinfo;
+ // Make sure the constant pool is not emitted in place of the next
+ // instruction for which we just recorded relocation info.
+ BlockConstPoolFor(1);
+ }
+
+ if (!RelocInfo::IsNone(rmode)) {
+ // Don't record external references unless the heap will be serialized.
+ if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
+#ifdef DEBUG
+ if (!Serializer::enabled()) {
+ Serializer::TooLateToEnableNow();
+ }
+#endif
+ if (!Serializer::enabled() && !emit_debug_code()) {
+ return;
+ }
+ }
+ ASSERT(buffer_space() >= kMaxRelocSize); // too late to grow buffer here
+ if (rmode == RelocInfo::CODE_TARGET_WITH_ID) {
+ RelocInfo reloc_info_with_ast_id(
+ reinterpret_cast<byte*>(pc_), rmode, RecordedAstId().ToInt(), NULL);
+ ClearRecordedAstId();
+ reloc_info_writer.Write(&reloc_info_with_ast_id);
+ } else {
+ reloc_info_writer.Write(&rinfo);
+ }
+ }
+}
+
+
+void Assembler::BlockConstPoolFor(int instructions) {
+ int pc_limit = pc_offset() + instructions * kInstructionSize;
+ if (no_const_pool_before_ < pc_limit) {
+ // If there are some pending entries, the constant pool cannot be blocked
+ // further than first_const_pool_use_ + kMaxDistToPool
+ ASSERT((num_pending_reloc_info_ == 0) ||
+ (pc_limit < (first_const_pool_use_ + kMaxDistToPool)));
+ no_const_pool_before_ = pc_limit;
+ }
+
+ if (next_buffer_check_ < no_const_pool_before_) {
+ next_buffer_check_ = no_const_pool_before_;
+ }
+}
+
+
+// TODO(all): We are never trying to emit constant pools after unconditional
+// branches, because we only call it from Assembler::Emit() (or manually).
+// We should try to enable that.
+void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
+ // Some short sequence of instruction mustn't be broken up by constant pool
+ // emission, such sequences are protected by calls to BlockConstPoolFor and
+ // BlockConstPoolScope.
+ if (is_const_pool_blocked()) {
+ // Something is wrong if emission is forced and blocked at the same time.
+ ASSERT(!force_emit);
+ return;
+ }
+
+ // There is nothing to do if there are no pending constant pool entries.
+ if (num_pending_reloc_info_ == 0) {
+ // Calculate the offset of the next check.
+ next_buffer_check_ = pc_offset() + kCheckPoolInterval;
+ return;
+ }
+
+ // We emit a constant pool when:
+ // * requested to do so by parameter force_emit (e.g. after each function).
+ // * the distance to the first instruction accessing the constant pool is
+ // kAvgDistToPool or more.
+ // * no jump is required and the distance to the first instruction accessing
+ // the constant pool is at least kMaxDistToPool / 2.
+ ASSERT(first_const_pool_use_ >= 0);
+ int dist = pc_offset() - first_const_pool_use_;
+ if (!force_emit && dist < kAvgDistToPool &&
+ (require_jump || (dist < (kMaxDistToPool / 2)))) {
+ return;
+ }
+
+ // Check that the code buffer is large enough before emitting the constant
+ // pool (include the jump over the pool and the constant pool marker and
+ // the gap to the relocation information).
+ int jump_instr = require_jump ? kInstructionSize : 0;
+ int size = jump_instr + kInstructionSize +
+ num_pending_reloc_info_ * kPointerSize;
+ int needed_space = size + kGap;
+ while (buffer_space() <= needed_space) {
+ GrowBuffer();
+ }
+
+ {
+ // Block recursive calls to CheckConstPool.
+ BlockConstPoolScope block_const_pool(this);
+ RecordComment("[ Constant Pool");
+ RecordConstPool(size);
+
+ // Emit jump over constant pool if necessary.
+ Label after_pool;
+ if (require_jump) {
+ b(&after_pool);
+ }
+
+ // Emit a constant pool header. The header has two goals:
+ // 1) Encode the size of the constant pool, for use by the disassembler.
+ // 2) Terminate the program, to try to prevent execution from accidentally
+ // flowing into the constant pool.
+ // The header is therefore made of two a64 instructions:
+ // ldr xzr, #<size of the constant pool in 32-bit words>
+ // blr xzr
+ // If executed the code will likely segfault and lr will point to the
+ // beginning of the constant pool.
+ // TODO(all): currently each relocated constant is 64 bits, consider adding
+ // support for 32-bit entries.
+ ConstantPoolMarker(2 * num_pending_reloc_info_);
+ ConstantPoolGuard();
+
+ // Emit constant pool entries.
+ for (int i = 0; i < num_pending_reloc_info_; i++) {
+ RelocInfo& rinfo = pending_reloc_info_[i];
+ ASSERT(rinfo.rmode() != RelocInfo::COMMENT &&
+ rinfo.rmode() != RelocInfo::POSITION &&
+ rinfo.rmode() != RelocInfo::STATEMENT_POSITION &&
+ rinfo.rmode() != RelocInfo::CONST_POOL);
+
+ Instruction* instr = reinterpret_cast<Instruction*>(rinfo.pc());
+ // Instruction to patch must be 'ldr rd, [pc, #offset]' with offset == 0.
+ ASSERT(instr->IsLdrLiteral() &&
+ instr->ImmLLiteral() == 0);
+
+ instr->SetImmPCOffsetTarget(reinterpret_cast<Instruction*>(pc_));
+ dc64(rinfo.data());
+ }
+
+ num_pending_reloc_info_ = 0;
+ first_const_pool_use_ = -1;
+
+ RecordComment("]");
+
+ if (after_pool.is_linked()) {
+ bind(&after_pool);
+ }
+ }
+
+ // Since a constant pool was just emitted, move the check offset forward by
+ // the standard interval.
+ next_buffer_check_ = pc_offset() + kCheckPoolInterval;
+}
+
+
+void Assembler::RecordComment(const char* msg) {
+ if (FLAG_code_comments) {
+ CheckBuffer();
+ RecordRelocInfo(RelocInfo::COMMENT, reinterpret_cast<intptr_t>(msg));
+ }
+}
+
+
+int Assembler::buffer_space() const {
+ return reloc_info_writer.pos() - reinterpret_cast<byte*>(pc_);
+}
+
+
+void Assembler::RecordJSReturn() {
+ positions_recorder()->WriteRecordedPositions();
+ CheckBuffer();
+ RecordRelocInfo(RelocInfo::JS_RETURN);
+}
+
+
+void Assembler::RecordDebugBreakSlot() {
+ positions_recorder()->WriteRecordedPositions();
+ CheckBuffer();
+ RecordRelocInfo(RelocInfo::DEBUG_BREAK_SLOT);
+}
+
+
+void Assembler::RecordConstPool(int size) {
+ // We only need this for debugger support, to correctly compute offsets in the
+ // code.
+#ifdef ENABLE_DEBUGGER_SUPPORT
+ RecordRelocInfo(RelocInfo::CONST_POOL, static_cast<intptr_t>(size));
+#endif
+}
+
+
+} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_A64
--- /dev/null
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_A64_ASSEMBLER_A64_H_
+#define V8_A64_ASSEMBLER_A64_H_
+
+#include <list>
+
+#include "globals.h"
+#include "utils.h"
+#include "assembler.h"
+#include "serialize.h"
+#include "a64/instructions-a64.h"
+#include "a64/cpu-a64.h"
+
+
+namespace v8 {
+namespace internal {
+
+
+// -----------------------------------------------------------------------------
+// Registers.
+#define REGISTER_CODE_LIST(R) \
+R(0) R(1) R(2) R(3) R(4) R(5) R(6) R(7) \
+R(8) R(9) R(10) R(11) R(12) R(13) R(14) R(15) \
+R(16) R(17) R(18) R(19) R(20) R(21) R(22) R(23) \
+R(24) R(25) R(26) R(27) R(28) R(29) R(30) R(31)
+
+
+static const int kRegListSizeInBits = sizeof(RegList) * kBitsPerByte;
+
+
+// Some CPURegister methods can return Register and FPRegister types, so we
+// need to declare them in advance.
+class Register;
+class FPRegister;
+
+
+struct CPURegister {
+ enum RegisterType {
+ // The kInvalid value is used to detect uninitialized static instances,
+ // which are always zero-initialized before any constructors are called.
+ kInvalid = 0,
+ kRegister,
+ kFPRegister,
+ kNoRegister
+ };
+
+ static CPURegister Create(unsigned code, unsigned size, RegisterType type) {
+ CPURegister r = {code, size, type};
+ return r;
+ }
+
+ unsigned code() const;
+ RegisterType type() const;
+ RegList Bit() const;
+ unsigned SizeInBits() const;
+ int SizeInBytes() const;
+ bool Is32Bits() const;
+ bool Is64Bits() const;
+ bool IsValid() const;
+ bool IsValidOrNone() const;
+ bool IsValidRegister() const;
+ bool IsValidFPRegister() const;
+ bool IsNone() const;
+ bool Is(const CPURegister& other) const;
+
+ bool IsZero() const;
+ bool IsSP() const;
+
+ bool IsRegister() const;
+ bool IsFPRegister() const;
+
+ Register X() const;
+ Register W() const;
+ FPRegister D() const;
+ FPRegister S() const;
+
+ bool IsSameSizeAndType(const CPURegister& other) const;
+
+ // V8 compatibility.
+ bool is(const CPURegister& other) const { return Is(other); }
+ bool is_valid() const { return IsValid(); }
+
+ unsigned reg_code;
+ unsigned reg_size;
+ RegisterType reg_type;
+};
+
+
+struct Register : public CPURegister {
+ static Register Create(unsigned code, unsigned size) {
+ return CPURegister::Create(code, size, CPURegister::kRegister);
+ }
+
+ Register() {
+ reg_code = 0;
+ reg_size = 0;
+ reg_type = CPURegister::kNoRegister;
+ }
+
+ Register(const CPURegister& r) { // NOLINT(runtime/explicit)
+ reg_code = r.reg_code;
+ reg_size = r.reg_size;
+ reg_type = r.reg_type;
+ ASSERT(IsValidOrNone());
+ }
+
+ bool IsValid() const {
+ ASSERT(IsRegister() || IsNone());
+ return IsValidRegister();
+ }
+
+ static Register XRegFromCode(unsigned code);
+ static Register WRegFromCode(unsigned code);
+
+ // Start of V8 compatibility section ---------------------
+ // These memebers are necessary for compilation.
+ // A few of them may be unused for now.
+
+ static const int kNumRegisters = kNumberOfRegisters;
+ static int NumRegisters() { return kNumRegisters; }
+
+ // We allow crankshaft to use the following registers:
+ // - x0 to x15
+ // - x18 to x24
+ // - x27 (also context)
+ //
+ // TODO(all): Register x25 is currently free and could be available for
+ // crankshaft, but we don't use it as we might use it as a per function
+ // literal pool pointer in the future.
+ //
+ // TODO(all): Consider storing cp in x25 to have only two ranges.
+ // We split allocatable registers in three ranges called
+ // - "low range"
+ // - "high range"
+ // - "context"
+ static const unsigned kAllocatableLowRangeBegin = 0;
+ static const unsigned kAllocatableLowRangeEnd = 15;
+ static const unsigned kAllocatableHighRangeBegin = 18;
+ static const unsigned kAllocatableHighRangeEnd = 24;
+ static const unsigned kAllocatableContext = 27;
+
+ // Gap between low and high ranges.
+ static const int kAllocatableRangeGapSize =
+ (kAllocatableHighRangeBegin - kAllocatableLowRangeEnd) - 1;
+
+ static const int kMaxNumAllocatableRegisters =
+ (kAllocatableLowRangeEnd - kAllocatableLowRangeBegin + 1) +
+ (kAllocatableHighRangeEnd - kAllocatableHighRangeBegin + 1) + 1; // cp
+ static int NumAllocatableRegisters() { return kMaxNumAllocatableRegisters; }
+
+ // Return true if the register is one that crankshaft can allocate.
+ bool IsAllocatable() const {
+ return ((reg_code == kAllocatableContext) ||
+ (reg_code <= kAllocatableLowRangeEnd) ||
+ ((reg_code >= kAllocatableHighRangeBegin) &&
+ (reg_code <= kAllocatableHighRangeEnd)));
+ }
+
+ static Register FromAllocationIndex(unsigned index) {
+ ASSERT(index < static_cast<unsigned>(NumAllocatableRegisters()));
+ // cp is the last allocatable register.
+ if (index == (static_cast<unsigned>(NumAllocatableRegisters() - 1))) {
+ return from_code(kAllocatableContext);
+ }
+
+ // Handle low and high ranges.
+ return (index <= kAllocatableLowRangeEnd)
+ ? from_code(index)
+ : from_code(index + kAllocatableRangeGapSize);
+ }
+
+ static const char* AllocationIndexToString(int index) {
+ ASSERT((index >= 0) && (index < NumAllocatableRegisters()));
+ ASSERT((kAllocatableLowRangeBegin == 0) &&
+ (kAllocatableLowRangeEnd == 15) &&
+ (kAllocatableHighRangeBegin == 18) &&
+ (kAllocatableHighRangeEnd == 24) &&
+ (kAllocatableContext == 27));
+ const char* const names[] = {
+ "x0", "x1", "x2", "x3", "x4",
+ "x5", "x6", "x7", "x8", "x9",
+ "x10", "x11", "x12", "x13", "x14",
+ "x15", "x18", "x19", "x20", "x21",
+ "x22", "x23", "x24", "x27",
+ };
+ return names[index];
+ }
+
+ static int ToAllocationIndex(Register reg) {
+ ASSERT(reg.IsAllocatable());
+ unsigned code = reg.code();
+ if (code == kAllocatableContext) {
+ return NumAllocatableRegisters() - 1;
+ }
+
+ return (code <= kAllocatableLowRangeEnd)
+ ? code
+ : code - kAllocatableRangeGapSize;
+ }
+
+ static Register from_code(int code) {
+ // Always return an X register.
+ return Register::Create(code, kXRegSize);
+ }
+
+ // End of V8 compatibility section -----------------------
+};
+
+
+struct FPRegister : public CPURegister {
+ static FPRegister Create(unsigned code, unsigned size) {
+ return CPURegister::Create(code, size, CPURegister::kFPRegister);
+ }
+
+ FPRegister() {
+ reg_code = 0;
+ reg_size = 0;
+ reg_type = CPURegister::kNoRegister;
+ }
+
+ FPRegister(const CPURegister& r) { // NOLINT(runtime/explicit)
+ reg_code = r.reg_code;
+ reg_size = r.reg_size;
+ reg_type = r.reg_type;
+ ASSERT(IsValidOrNone());
+ }
+
+ bool IsValid() const {
+ ASSERT(IsFPRegister() || IsNone());
+ return IsValidFPRegister();
+ }
+
+ static FPRegister SRegFromCode(unsigned code);
+ static FPRegister DRegFromCode(unsigned code);
+
+ // Start of V8 compatibility section ---------------------
+ static const int kMaxNumRegisters = kNumberOfFPRegisters;
+
+ // Crankshaft can use all the FP registers except:
+ // - d29 which is used in crankshaft as a double scratch register
+ // - d30 which is used to keep the 0 double value
+ // - d31 which is used in the MacroAssembler as a double scratch register
+ static const int kNumReservedRegisters = 3;
+ static const int kMaxNumAllocatableRegisters =
+ kNumberOfFPRegisters - kNumReservedRegisters;
+ static int NumAllocatableRegisters() { return kMaxNumAllocatableRegisters; }
+ static const RegList kAllocatableFPRegisters =
+ (1 << kMaxNumAllocatableRegisters) - 1;
+
+ static FPRegister FromAllocationIndex(int index) {
+ ASSERT((index >= 0) && (index < NumAllocatableRegisters()));
+ return from_code(index);
+ }
+
+ static const char* AllocationIndexToString(int index) {
+ ASSERT((index >= 0) && (index < NumAllocatableRegisters()));
+ const char* const names[] = {
+ "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7",
+ "d8", "d9", "d10", "d11", "d12", "d13", "d14", "d15",
+ "d16", "d17", "d18", "d19", "d20", "d21", "d22", "d23",
+ "d24", "d25", "d26", "d27", "d28",
+ };
+ return names[index];
+ }
+
+ static int ToAllocationIndex(FPRegister reg) {
+ int code = reg.code();
+ ASSERT(code < NumAllocatableRegisters());
+ return code;
+ }
+
+ static FPRegister from_code(int code) {
+ // Always return a D register.
+ return FPRegister::Create(code, kDRegSize);
+ }
+ // End of V8 compatibility section -----------------------
+};
+
+
+STATIC_ASSERT(sizeof(CPURegister) == sizeof(Register));
+STATIC_ASSERT(sizeof(CPURegister) == sizeof(FPRegister));
+
+
+#if defined(A64_DEFINE_REG_STATICS)
+#define INITIALIZE_REGISTER(register_class, name, code, size, type) \
+ const CPURegister init_##register_class##_##name = {code, size, type}; \
+ const register_class& name = *reinterpret_cast<const register_class*>( \
+ &init_##register_class##_##name)
+#define ALIAS_REGISTER(register_class, alias, name) \
+ const register_class& alias = *reinterpret_cast<const register_class*>( \
+ &init_##register_class##_##name)
+#else
+#define INITIALIZE_REGISTER(register_class, name, code, size, type) \
+ extern const register_class& name
+#define ALIAS_REGISTER(register_class, alias, name) \
+ extern const register_class& alias
+#endif // defined(A64_DEFINE_REG_STATICS)
+
+// No*Reg is used to indicate an unused argument, or an error case. Note that
+// these all compare equal (using the Is() method). The Register and FPRegister
+// variants are provided for convenience.
+INITIALIZE_REGISTER(Register, NoReg, 0, 0, CPURegister::kNoRegister);
+INITIALIZE_REGISTER(FPRegister, NoFPReg, 0, 0, CPURegister::kNoRegister);
+INITIALIZE_REGISTER(CPURegister, NoCPUReg, 0, 0, CPURegister::kNoRegister);
+
+// v8 compatibility.
+INITIALIZE_REGISTER(Register, no_reg, 0, 0, CPURegister::kNoRegister);
+
+#define DEFINE_REGISTERS(N) \
+ INITIALIZE_REGISTER(Register, w##N, N, kWRegSize, CPURegister::kRegister); \
+ INITIALIZE_REGISTER(Register, x##N, N, kXRegSize, CPURegister::kRegister);
+REGISTER_CODE_LIST(DEFINE_REGISTERS)
+#undef DEFINE_REGISTERS
+
+INITIALIZE_REGISTER(Register, wcsp, kSPRegInternalCode, kWRegSize,
+ CPURegister::kRegister);
+INITIALIZE_REGISTER(Register, csp, kSPRegInternalCode, kXRegSize,
+ CPURegister::kRegister);
+
+#define DEFINE_FPREGISTERS(N) \
+ INITIALIZE_REGISTER(FPRegister, s##N, N, kSRegSize, \
+ CPURegister::kFPRegister); \
+ INITIALIZE_REGISTER(FPRegister, d##N, N, kDRegSize, CPURegister::kFPRegister);
+REGISTER_CODE_LIST(DEFINE_FPREGISTERS)
+#undef DEFINE_FPREGISTERS
+
+#undef INITIALIZE_REGISTER
+
+// Registers aliases.
+ALIAS_REGISTER(Register, ip0, x16);
+ALIAS_REGISTER(Register, ip1, x17);
+ALIAS_REGISTER(Register, wip0, w16);
+ALIAS_REGISTER(Register, wip1, w17);
+// Root register.
+ALIAS_REGISTER(Register, root, x26);
+ALIAS_REGISTER(Register, rr, x26);
+// Context pointer register.
+ALIAS_REGISTER(Register, cp, x27);
+// We use a register as a JS stack pointer to overcome the restriction on the
+// architectural SP alignment.
+// We chose x28 because it is contiguous with the other specific purpose
+// registers.
+STATIC_ASSERT(kJSSPCode == 28);
+ALIAS_REGISTER(Register, jssp, x28);
+ALIAS_REGISTER(Register, wjssp, w28);
+ALIAS_REGISTER(Register, fp, x29);
+ALIAS_REGISTER(Register, lr, x30);
+ALIAS_REGISTER(Register, xzr, x31);
+ALIAS_REGISTER(Register, wzr, w31);
+
+// Crankshaft double scratch register.
+ALIAS_REGISTER(FPRegister, crankshaft_fp_scratch, d29);
+// Keeps the 0 double value.
+ALIAS_REGISTER(FPRegister, fp_zero, d30);
+// MacroAssembler double scratch register.
+ALIAS_REGISTER(FPRegister, fp_scratch, d31);
+
+#undef ALIAS_REGISTER
+
+// AreAliased returns true if any of the named registers overlap. Arguments set
+// to NoReg are ignored. The system stack pointer may be specified.
+bool AreAliased(const CPURegister& reg1,
+ const CPURegister& reg2,
+ const CPURegister& reg3 = NoReg,
+ const CPURegister& reg4 = NoReg,
+ const CPURegister& reg5 = NoReg,
+ const CPURegister& reg6 = NoReg,
+ const CPURegister& reg7 = NoReg,
+ const CPURegister& reg8 = NoReg);
+
+// AreSameSizeAndType returns true if all of the specified registers have the
+// same size, and are of the same type. The system stack pointer may be
+// specified. Arguments set to NoReg are ignored, as are any subsequent
+// arguments. At least one argument (reg1) must be valid (not NoCPUReg).
+bool AreSameSizeAndType(const CPURegister& reg1,
+ const CPURegister& reg2,
+ const CPURegister& reg3 = NoCPUReg,
+ const CPURegister& reg4 = NoCPUReg,
+ const CPURegister& reg5 = NoCPUReg,
+ const CPURegister& reg6 = NoCPUReg,
+ const CPURegister& reg7 = NoCPUReg,
+ const CPURegister& reg8 = NoCPUReg);
+
+
+typedef FPRegister DoubleRegister;
+
+
+// -----------------------------------------------------------------------------
+// Lists of registers.
+class CPURegList {
+ public:
+ explicit CPURegList(CPURegister reg1,
+ CPURegister reg2 = NoCPUReg,
+ CPURegister reg3 = NoCPUReg,
+ CPURegister reg4 = NoCPUReg)
+ : list_(reg1.Bit() | reg2.Bit() | reg3.Bit() | reg4.Bit()),
+ size_(reg1.SizeInBits()), type_(reg1.type()) {
+ ASSERT(AreSameSizeAndType(reg1, reg2, reg3, reg4));
+ ASSERT(IsValid());
+ }
+
+ CPURegList(CPURegister::RegisterType type, unsigned size, RegList list)
+ : list_(list), size_(size), type_(type) {
+ ASSERT(IsValid());
+ }
+
+ CPURegList(CPURegister::RegisterType type, unsigned size,
+ unsigned first_reg, unsigned last_reg)
+ : size_(size), type_(type) {
+ ASSERT(((type == CPURegister::kRegister) &&
+ (last_reg < kNumberOfRegisters)) ||
+ ((type == CPURegister::kFPRegister) &&
+ (last_reg < kNumberOfFPRegisters)));
+ ASSERT(last_reg >= first_reg);
+ list_ = (1UL << (last_reg + 1)) - 1;
+ list_ &= ~((1UL << first_reg) - 1);
+ ASSERT(IsValid());
+ }
+
+ CPURegister::RegisterType type() const {
+ ASSERT(IsValid());
+ return type_;
+ }
+
+ RegList list() const {
+ ASSERT(IsValid());
+ return list_;
+ }
+
+ // Combine another CPURegList into this one. Registers that already exist in
+ // this list are left unchanged. The type and size of the registers in the
+ // 'other' list must match those in this list.
+ void Combine(const CPURegList& other);
+
+ // Remove every register in the other CPURegList from this one. Registers that
+ // do not exist in this list are ignored. The type and size of the registers
+ // in the 'other' list must match those in this list.
+ void Remove(const CPURegList& other);
+
+ // Variants of Combine and Remove which take a single register.
+ void Combine(const CPURegister& other);
+ void Remove(const CPURegister& other);
+
+ // Variants of Combine and Remove which take a single register by its code;
+ // the type and size of the register is inferred from this list.
+ void Combine(int code);
+ void Remove(int code);
+
+ // Remove all callee-saved registers from the list. This can be useful when
+ // preparing registers for an AAPCS64 function call, for example.
+ void RemoveCalleeSaved();
+
+ CPURegister PopLowestIndex();
+ CPURegister PopHighestIndex();
+
+ // AAPCS64 callee-saved registers.
+ static CPURegList GetCalleeSaved(unsigned size = kXRegSize);
+ static CPURegList GetCalleeSavedFP(unsigned size = kDRegSize);
+
+ // AAPCS64 caller-saved registers. Note that this includes lr.
+ static CPURegList GetCallerSaved(unsigned size = kXRegSize);
+ static CPURegList GetCallerSavedFP(unsigned size = kDRegSize);
+
+ // Registers saved as safepoints.
+ static CPURegList GetSafepointSavedRegisters();
+
+ bool IsEmpty() const {
+ ASSERT(IsValid());
+ return list_ == 0;
+ }
+
+ bool IncludesAliasOf(const CPURegister& other) const {
+ ASSERT(IsValid());
+ return (type_ == other.type()) && (other.Bit() & list_);
+ }
+
+ int Count() const {
+ ASSERT(IsValid());
+ return CountSetBits(list_, kRegListSizeInBits);
+ }
+
+ unsigned RegisterSizeInBits() const {
+ ASSERT(IsValid());
+ return size_;
+ }
+
+ unsigned RegisterSizeInBytes() const {
+ int size_in_bits = RegisterSizeInBits();
+ ASSERT((size_in_bits % kBitsPerByte) == 0);
+ return size_in_bits / kBitsPerByte;
+ }
+
+ private:
+ RegList list_;
+ unsigned size_;
+ CPURegister::RegisterType type_;
+
+ bool IsValid() const {
+ if ((type_ == CPURegister::kRegister) ||
+ (type_ == CPURegister::kFPRegister)) {
+ bool is_valid = true;
+ // Try to create a CPURegister for each element in the list.
+ for (int i = 0; i < kRegListSizeInBits; i++) {
+ if (((list_ >> i) & 1) != 0) {
+ is_valid &= CPURegister::Create(i, size_, type_).IsValid();
+ }
+ }
+ return is_valid;
+ } else if (type_ == CPURegister::kNoRegister) {
+ // The kNoRegister type is valid only for empty lists.
+ // We can't use IsEmpty here because that asserts IsValid().
+ return list_ == 0;
+ } else {
+ return false;
+ }
+ }
+};
+
+
+// AAPCS64 callee-saved registers.
+#define kCalleeSaved CPURegList::GetCalleeSaved()
+#define kCalleeSavedFP CPURegList::GetCalleeSavedFP()
+
+
+// AAPCS64 caller-saved registers. Note that this includes lr.
+#define kCallerSaved CPURegList::GetCallerSaved()
+#define kCallerSavedFP CPURegList::GetCallerSavedFP()
+
+
+// -----------------------------------------------------------------------------
+// Operands.
+const int kSmiShift = kSmiTagSize + kSmiShiftSize;
+const uint64_t kSmiShiftMask = (1UL << kSmiShift) - 1;
+
+// Represents an operand in a machine instruction.
+class Operand {
+ // TODO(all): If necessary, study more in details which methods
+ // TODO(all): should be inlined or not.
+ public:
+ // #<immediate>
+ // where <immediate> is int64_t.
+ // GCC complains about ambiguous aliasing if we don't explicitly declare the
+ // variants.
+ // The simple literal-value wrappers are allowed to be implicit constructors
+ // because Operand is a wrapper class that doesn't normally perform any type
+ // conversion.
+ inline Operand(int64_t immediate,
+ RelocInfo::Mode rmode = RelocInfo::NONE64); // NOLINT(runtime/explicit)
+ inline Operand(uint64_t immediate,
+ RelocInfo::Mode rmode = RelocInfo::NONE64); // NOLINT(runtime/explicit)
+ inline Operand(int32_t immediate,
+ RelocInfo::Mode rmode = RelocInfo::NONE32); // NOLINT(runtime/explicit)
+ inline Operand(uint32_t immediate,
+ RelocInfo::Mode rmode = RelocInfo::NONE32); // NOLINT(runtime/explicit)
+
+
+ // rm, {<shift> {#<shift_amount>}}
+ // where <shift> is one of {LSL, LSR, ASR, ROR}.
+ // <shift_amount> is uint6_t.
+ // This is allowed to be an implicit constructor because Operand is
+ // a wrapper class that doesn't normally perform any type conversion.
+ inline Operand(Register reg,
+ Shift shift = LSL,
+ unsigned shift_amount = 0); // NOLINT(runtime/explicit)
+
+ // rm, <extend> {#<shift_amount>}
+ // where <extend> is one of {UXTB, UXTH, UXTW, UXTX, SXTB, SXTH, SXTW, SXTX}.
+ // <shift_amount> is uint2_t.
+ inline Operand(Register reg,
+ Extend extend,
+ unsigned shift_amount = 0);
+
+ inline explicit Operand(Smi* value);
+ explicit Operand(const ExternalReference& f);
+ explicit Operand(Handle<Object> handle);
+
+ inline bool IsImmediate() const;
+ inline bool IsShiftedRegister() const;
+ inline bool IsExtendedRegister() const;
+ inline bool IsZero() const;
+
+ // This returns an LSL shift (<= 4) operand as an equivalent extend operand,
+ // which helps in the encoding of instructions that use the stack pointer.
+ inline Operand ToExtendedRegister() const;
+
+ inline int64_t immediate() const;
+ inline Register reg() const;
+ inline Shift shift() const;
+ inline Extend extend() const;
+ inline unsigned shift_amount() const;
+
+ // Relocation information.
+ RelocInfo::Mode rmode() const { return rmode_; }
+ void set_rmode(RelocInfo::Mode rmode) { rmode_ = rmode; }
+ bool NeedsRelocation() const;
+
+ // Helpers
+ inline static Operand UntagSmi(Register smi);
+ inline static Operand UntagSmiAndScale(Register smi, int scale);
+
+ private:
+ int64_t immediate_;
+ Register reg_;
+ Shift shift_;
+ Extend extend_;
+ unsigned shift_amount_;
+ RelocInfo::Mode rmode_;
+};
+
+
+// MemOperand represents a memory operand in a load or store instruction.
+class MemOperand {
+ public:
+ inline explicit MemOperand(Register base,
+ ptrdiff_t offset = 0,
+ AddrMode addrmode = Offset);
+ inline explicit MemOperand(Register base,
+ Register regoffset,
+ Shift shift = LSL,
+ unsigned shift_amount = 0);
+ inline explicit MemOperand(Register base,
+ Register regoffset,
+ Extend extend,
+ unsigned shift_amount = 0);
+ inline explicit MemOperand(Register base,
+ const Operand& offset,
+ AddrMode addrmode = Offset);
+
+ const Register& base() const { return base_; }
+ const Register& regoffset() const { return regoffset_; }
+ ptrdiff_t offset() const { return offset_; }
+ AddrMode addrmode() const { return addrmode_; }
+ Shift shift() const { return shift_; }
+ Extend extend() const { return extend_; }
+ unsigned shift_amount() const { return shift_amount_; }
+ inline bool IsImmediateOffset() const;
+ inline bool IsRegisterOffset() const;
+ inline bool IsPreIndex() const;
+ inline bool IsPostIndex() const;
+
+ // For offset modes, return the offset as an Operand. This helper cannot
+ // handle indexed modes.
+ inline Operand OffsetAsOperand() const;
+
+ private:
+ Register base_;
+ Register regoffset_;
+ ptrdiff_t offset_;
+ AddrMode addrmode_;
+ Shift shift_;
+ Extend extend_;
+ unsigned shift_amount_;
+};
+
+
+// -----------------------------------------------------------------------------
+// Assembler.
+
+class Assembler : public AssemblerBase {
+ public:
+ // Create an assembler. Instructions and relocation information are emitted
+ // into a buffer, with the instructions starting from the beginning and the
+ // relocation information starting from the end of the buffer. See CodeDesc
+ // for a detailed comment on the layout (globals.h).
+ //
+ // If the provided buffer is NULL, the assembler allocates and grows its own
+ // buffer, and buffer_size determines the initial buffer size. The buffer is
+ // owned by the assembler and deallocated upon destruction of the assembler.
+ //
+ // If the provided buffer is not NULL, the assembler uses the provided buffer
+ // for code generation and assumes its size to be buffer_size. If the buffer
+ // is too small, a fatal error occurs. No deallocation of the buffer is done
+ // upon destruction of the assembler.
+ Assembler(Isolate* arg_isolate, void* buffer, int buffer_size);
+
+ virtual ~Assembler();
+
+ // System functions ---------------------------------------------------------
+ // Start generating code from the beginning of the buffer, discarding any code
+ // and data that has already been emitted into the buffer.
+ //
+ // In order to avoid any accidental transfer of state, Reset ASSERTs that the
+ // constant pool is not blocked.
+ void Reset();
+
+ // GetCode emits any pending (non-emitted) code and fills the descriptor
+ // desc. GetCode() is idempotent; it returns the same result if no other
+ // Assembler functions are invoked in between GetCode() calls.
+ //
+ // The descriptor (desc) can be NULL. In that case, the code is finalized as
+ // usual, but the descriptor is not populated.
+ void GetCode(CodeDesc* desc);
+
+ // Insert the smallest number of nop instructions
+ // possible to align the pc offset to a multiple
+ // of m. m must be a power of 2 (>= 4).
+ void Align(int m);
+
+ // Label --------------------------------------------------------------------
+ // Bind a label to the current pc. Note that labels can only be bound once,
+ // and if labels are linked to other instructions, they _must_ be bound
+ // before they go out of scope.
+ void bind(Label* label);
+
+
+ // RelocInfo and constant pool ----------------------------------------------
+
+ // Record relocation information for current pc_.
+ void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0);
+
+ // Return the address in the constant pool of the code target address used by
+ // the branch/call instruction at pc.
+ inline static Address target_pointer_address_at(Address pc);
+
+ // Read/Modify the code target address in the branch/call instruction at pc.
+ inline static Address target_address_at(Address pc);
+ inline static void set_target_address_at(Address pc, Address target);
+
+ // Return the code target address at a call site from the return address of
+ // that call in the instruction stream.
+ inline static Address target_address_from_return_address(Address pc);
+
+ // Given the address of the beginning of a call, return the address in the
+ // instruction stream that call will return from.
+ inline static Address return_address_from_call_start(Address pc);
+
+ // This sets the branch destination (which is in the constant pool on ARM).
+ // This is for calls and branches within generated code.
+ inline static void deserialization_set_special_target_at(
+ Address constant_pool_entry, Address target);
+
+ // All addresses in the constant pool are the same size as pointers.
+ static const int kSpecialTargetSize = kPointerSize;
+
+ // The sizes of the call sequences emitted by MacroAssembler::Call.
+ // Wherever possible, use MacroAssembler::CallSize instead of these constants,
+ // as it will choose the correct value for a given relocation mode.
+ //
+ // Without relocation:
+ // movz ip0, #(target & 0x000000000000ffff)
+ // movk ip0, #(target & 0x00000000ffff0000)
+ // movk ip0, #(target & 0x0000ffff00000000)
+ // movk ip0, #(target & 0xffff000000000000)
+ // blr ip0
+ //
+ // With relocation:
+ // ldr ip0, =target
+ // blr ip0
+ static const int kCallSizeWithoutRelocation = 5 * kInstructionSize;
+ static const int kCallSizeWithRelocation = 2 * kInstructionSize;
+
+ // Size of the generated code in bytes
+ uint64_t SizeOfGeneratedCode() const {
+ ASSERT((pc_ >= buffer_) && (pc_ < (buffer_ + buffer_size_)));
+ return pc_ - buffer_;
+ }
+
+ // Return the code size generated from label to the current position.
+ uint64_t SizeOfCodeGeneratedSince(const Label* label) {
+ ASSERT(label->is_bound());
+ ASSERT(pc_offset() >= label->pos());
+ ASSERT(pc_offset() < buffer_size_);
+ return pc_offset() - label->pos();
+ }
+
+ // Check the size of the code generated since the given label. This function
+ // is used primarily to work around comparisons between signed and unsigned
+ // quantities, since V8 uses both.
+ // TODO(jbramley): Work out what sign to use for these things and if possible,
+ // change things to be consistent.
+ void AssertSizeOfCodeGeneratedSince(const Label* label, ptrdiff_t size) {
+ ASSERT(size >= 0);
+ ASSERT(static_cast<uint64_t>(size) == SizeOfCodeGeneratedSince(label));
+ }
+
+ // Return the number of instructions generated from label to the
+ // current position.
+ int InstructionsGeneratedSince(const Label* label) {
+ return SizeOfCodeGeneratedSince(label) / kInstructionSize;
+ }
+
+ // TODO(all): Initialize these constants related with code patching.
+ // TODO(all): Set to -1 to hopefully crash if mistakenly used.
+
+ // Number of instructions generated for the return sequence in
+ // FullCodeGenerator::EmitReturnSequence.
+ static const int kJSRetSequenceInstructions = 7;
+ // Distance between start of patched return sequence and the emitted address
+ // to jump to.
+ static const int kPatchReturnSequenceAddressOffset = 0;
+ static const int kPatchDebugBreakSlotAddressOffset = 0;
+
+ // Number of instructions necessary to be able to later patch it to a call.
+ // See Debug::GenerateSlot() and BreakLocationIterator::SetDebugBreakAtSlot().
+ static const int kDebugBreakSlotInstructions = 4;
+ static const int kDebugBreakSlotLength =
+ kDebugBreakSlotInstructions * kInstructionSize;
+
+ static const int kPatchDebugBreakSlotReturnOffset = 2 * kInstructionSize;
+
+ // Prevent contant pool emission until EndBlockConstPool is called.
+ // Call to this function can be nested but must be followed by an equal
+ // number of call to EndBlockConstpool.
+ void StartBlockConstPool();
+
+ // Resume constant pool emission. Need to be called as many time as
+ // StartBlockConstPool to have an effect.
+ void EndBlockConstPool();
+
+ bool is_const_pool_blocked() const;
+ static bool IsConstantPoolAt(Instruction* instr);
+ static int ConstantPoolSizeAt(Instruction* instr);
+ // See Assembler::CheckConstPool for more info.
+ void ConstantPoolMarker(uint32_t size);
+ void ConstantPoolGuard();
+
+
+ // Debugging ----------------------------------------------------------------
+ PositionsRecorder* positions_recorder() { return &positions_recorder_; }
+ void RecordComment(const char* msg);
+ int buffer_space() const;
+
+ // Mark address of the ExitJSFrame code.
+ void RecordJSReturn();
+
+ // Mark address of a debug break slot.
+ void RecordDebugBreakSlot();
+
+ // Record the emission of a constant pool.
+ //
+ // The emission of constant pool depends on the size of the code generated and
+ // the number of RelocInfo recorded.
+ // The Debug mechanism needs to map code offsets between two versions of a
+ // function, compiled with and without debugger support (see for example
+ // Debug::PrepareForBreakPoints()).
+ // Compiling functions with debugger support generates additional code
+ // (Debug::GenerateSlot()). This may affect the emission of the constant
+ // pools and cause the version of the code with debugger support to have
+ // constant pools generated in different places.
+ // Recording the position and size of emitted constant pools allows to
+ // correctly compute the offset mappings between the different versions of a
+ // function in all situations.
+ //
+ // The parameter indicates the size of the constant pool (in bytes), including
+ // the marker and branch over the data.
+ void RecordConstPool(int size);
+
+
+ // Instruction set functions ------------------------------------------------
+
+ // Branch / Jump instructions.
+ // For branches offsets are scaled, i.e. they in instrcutions not in bytes.
+ // Branch to register.
+ void br(const Register& xn);
+
+ // Branch-link to register.
+ void blr(const Register& xn);
+
+ // Branch to register with return hint.
+ void ret(const Register& xn = lr);
+
+ // Unconditional branch to label.
+ void b(Label* label);
+
+ // Conditional branch to label.
+ void b(Label* label, Condition cond);
+
+ // Unconditional branch to PC offset.
+ void b(int imm26);
+
+ // Conditional branch to PC offset.
+ void b(int imm19, Condition cond);
+
+ // Branch-link to label / pc offset.
+ void bl(Label* label);
+ void bl(int imm26);
+
+ // Compare and branch to label / pc offset if zero.
+ void cbz(const Register& rt, Label* label);
+ void cbz(const Register& rt, int imm19);
+
+ // Compare and branch to label / pc offset if not zero.
+ void cbnz(const Register& rt, Label* label);
+ void cbnz(const Register& rt, int imm19);
+
+ // Test bit and branch to label / pc offset if zero.
+ void tbz(const Register& rt, unsigned bit_pos, Label* label);
+ void tbz(const Register& rt, unsigned bit_pos, int imm14);
+
+ // Test bit and branch to label / pc offset if not zero.
+ void tbnz(const Register& rt, unsigned bit_pos, Label* label);
+ void tbnz(const Register& rt, unsigned bit_pos, int imm14);
+
+ // Address calculation instructions.
+ // Calculate a PC-relative address. Unlike for branches the offset in adr is
+ // unscaled (i.e. the result can be unaligned).
+ void adr(const Register& rd, Label* label);
+ void adr(const Register& rd, int imm21);
+
+ // Data Processing instructions.
+ // Add.
+ void add(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+
+ // Add and update status flags.
+ void adds(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+
+ // Compare negative.
+ void cmn(const Register& rn, const Operand& operand);
+
+ // Subtract.
+ void sub(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+
+ // Subtract and update status flags.
+ void subs(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+
+ // Compare.
+ void cmp(const Register& rn, const Operand& operand);
+
+ // Negate.
+ void neg(const Register& rd,
+ const Operand& operand);
+
+ // Negate and update status flags.
+ void negs(const Register& rd,
+ const Operand& operand);
+
+ // Add with carry bit.
+ void adc(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+
+ // Add with carry bit and update status flags.
+ void adcs(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+
+ // Subtract with carry bit.
+ void sbc(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+
+ // Subtract with carry bit and update status flags.
+ void sbcs(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+
+ // Negate with carry bit.
+ void ngc(const Register& rd,
+ const Operand& operand);
+
+ // Negate with carry bit and update status flags.
+ void ngcs(const Register& rd,
+ const Operand& operand);
+
+ // Logical instructions.
+ // Bitwise and (A & B).
+ void and_(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+
+ // Bitwise and (A & B) and update status flags.
+ void ands(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+
+ // Bit test, and set flags.
+ void tst(const Register& rn, const Operand& operand);
+
+ // Bit clear (A & ~B).
+ void bic(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+
+ // Bit clear (A & ~B) and update status flags.
+ void bics(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+
+ // Bitwise or (A | B).
+ void orr(const Register& rd, const Register& rn, const Operand& operand);
+
+ // Bitwise nor (A | ~B).
+ void orn(const Register& rd, const Register& rn, const Operand& operand);
+
+ // Bitwise eor/xor (A ^ B).
+ void eor(const Register& rd, const Register& rn, const Operand& operand);
+
+ // Bitwise enor/xnor (A ^ ~B).
+ void eon(const Register& rd, const Register& rn, const Operand& operand);
+
+ // Logical shift left variable.
+ void lslv(const Register& rd, const Register& rn, const Register& rm);
+
+ // Logical shift right variable.
+ void lsrv(const Register& rd, const Register& rn, const Register& rm);
+
+ // Arithmetic shift right variable.
+ void asrv(const Register& rd, const Register& rn, const Register& rm);
+
+ // Rotate right variable.
+ void rorv(const Register& rd, const Register& rn, const Register& rm);
+
+ // Bitfield instructions.
+ // Bitfield move.
+ void bfm(const Register& rd,
+ const Register& rn,
+ unsigned immr,
+ unsigned imms);
+
+ // Signed bitfield move.
+ void sbfm(const Register& rd,
+ const Register& rn,
+ unsigned immr,
+ unsigned imms);
+
+ // Unsigned bitfield move.
+ void ubfm(const Register& rd,
+ const Register& rn,
+ unsigned immr,
+ unsigned imms);
+
+ // Bfm aliases.
+ // Bitfield insert.
+ void bfi(const Register& rd,
+ const Register& rn,
+ unsigned lsb,
+ unsigned width) {
+ ASSERT(width >= 1);
+ ASSERT(lsb + width <= rn.SizeInBits());
+ bfm(rd, rn, (rd.SizeInBits() - lsb) & (rd.SizeInBits() - 1), width - 1);
+ }
+
+ // Bitfield extract and insert low.
+ void bfxil(const Register& rd,
+ const Register& rn,
+ unsigned lsb,
+ unsigned width) {
+ ASSERT(width >= 1);
+ ASSERT(lsb + width <= rn.SizeInBits());
+ bfm(rd, rn, lsb, lsb + width - 1);
+ }
+
+ // Sbfm aliases.
+ // Arithmetic shift right.
+ void asr(const Register& rd, const Register& rn, unsigned shift) {
+ ASSERT(shift < rd.SizeInBits());
+ sbfm(rd, rn, shift, rd.SizeInBits() - 1);
+ }
+
+ // Signed bitfield insert in zero.
+ void sbfiz(const Register& rd,
+ const Register& rn,
+ unsigned lsb,
+ unsigned width) {
+ ASSERT(width >= 1);
+ ASSERT(lsb + width <= rn.SizeInBits());
+ sbfm(rd, rn, (rd.SizeInBits() - lsb) & (rd.SizeInBits() - 1), width - 1);
+ }
+
+ // Signed bitfield extract.
+ void sbfx(const Register& rd,
+ const Register& rn,
+ unsigned lsb,
+ unsigned width) {
+ ASSERT(width >= 1);
+ ASSERT(lsb + width <= rn.SizeInBits());
+ sbfm(rd, rn, lsb, lsb + width - 1);
+ }
+
+ // Signed extend byte.
+ void sxtb(const Register& rd, const Register& rn) {
+ sbfm(rd, rn, 0, 7);
+ }
+
+ // Signed extend halfword.
+ void sxth(const Register& rd, const Register& rn) {
+ sbfm(rd, rn, 0, 15);
+ }
+
+ // Signed extend word.
+ void sxtw(const Register& rd, const Register& rn) {
+ sbfm(rd, rn, 0, 31);
+ }
+
+ // Ubfm aliases.
+ // Logical shift left.
+ void lsl(const Register& rd, const Register& rn, unsigned shift) {
+ unsigned reg_size = rd.SizeInBits();
+ ASSERT(shift < reg_size);
+ ubfm(rd, rn, (reg_size - shift) % reg_size, reg_size - shift - 1);
+ }
+
+ // Logical shift right.
+ void lsr(const Register& rd, const Register& rn, unsigned shift) {
+ ASSERT(shift < rd.SizeInBits());
+ ubfm(rd, rn, shift, rd.SizeInBits() - 1);
+ }
+
+ // Unsigned bitfield insert in zero.
+ void ubfiz(const Register& rd,
+ const Register& rn,
+ unsigned lsb,
+ unsigned width) {
+ ASSERT(width >= 1);
+ ASSERT(lsb + width <= rn.SizeInBits());
+ ubfm(rd, rn, (rd.SizeInBits() - lsb) & (rd.SizeInBits() - 1), width - 1);
+ }
+
+ // Unsigned bitfield extract.
+ void ubfx(const Register& rd,
+ const Register& rn,
+ unsigned lsb,
+ unsigned width) {
+ ASSERT(width >= 1);
+ ASSERT(lsb + width <= rn.SizeInBits());
+ ubfm(rd, rn, lsb, lsb + width - 1);
+ }
+
+ // Unsigned extend byte.
+ void uxtb(const Register& rd, const Register& rn) {
+ ubfm(rd, rn, 0, 7);
+ }
+
+ // Unsigned extend halfword.
+ void uxth(const Register& rd, const Register& rn) {
+ ubfm(rd, rn, 0, 15);
+ }
+
+ // Unsigned extend word.
+ void uxtw(const Register& rd, const Register& rn) {
+ ubfm(rd, rn, 0, 31);
+ }
+
+ // Extract.
+ void extr(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ unsigned lsb);
+
+ // Conditional select: rd = cond ? rn : rm.
+ void csel(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ Condition cond);
+
+ // Conditional select increment: rd = cond ? rn : rm + 1.
+ void csinc(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ Condition cond);
+
+ // Conditional select inversion: rd = cond ? rn : ~rm.
+ void csinv(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ Condition cond);
+
+ // Conditional select negation: rd = cond ? rn : -rm.
+ void csneg(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ Condition cond);
+
+ // Conditional set: rd = cond ? 1 : 0.
+ void cset(const Register& rd, Condition cond);
+
+ // Conditional set minus: rd = cond ? -1 : 0.
+ void csetm(const Register& rd, Condition cond);
+
+ // Conditional increment: rd = cond ? rn + 1 : rn.
+ void cinc(const Register& rd, const Register& rn, Condition cond);
+
+ // Conditional invert: rd = cond ? ~rn : rn.
+ void cinv(const Register& rd, const Register& rn, Condition cond);
+
+ // Conditional negate: rd = cond ? -rn : rn.
+ void cneg(const Register& rd, const Register& rn, Condition cond);
+
+ // Extr aliases.
+ void ror(const Register& rd, const Register& rs, unsigned shift) {
+ extr(rd, rs, rs, shift);
+ }
+
+ // Conditional comparison.
+ // Conditional compare negative.
+ void ccmn(const Register& rn,
+ const Operand& operand,
+ StatusFlags nzcv,
+ Condition cond);
+
+ // Conditional compare.
+ void ccmp(const Register& rn,
+ const Operand& operand,
+ StatusFlags nzcv,
+ Condition cond);
+
+ // Multiplication.
+ // 32 x 32 -> 32-bit and 64 x 64 -> 64-bit multiply.
+ void mul(const Register& rd, const Register& rn, const Register& rm);
+
+ // 32 + 32 x 32 -> 32-bit and 64 + 64 x 64 -> 64-bit multiply accumulate.
+ void madd(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra);
+
+ // -(32 x 32) -> 32-bit and -(64 x 64) -> 64-bit multiply.
+ void mneg(const Register& rd, const Register& rn, const Register& rm);
+
+ // 32 - 32 x 32 -> 32-bit and 64 - 64 x 64 -> 64-bit multiply subtract.
+ void msub(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra);
+
+ // 32 x 32 -> 64-bit multiply.
+ void smull(const Register& rd, const Register& rn, const Register& rm);
+
+ // Xd = bits<127:64> of Xn * Xm.
+ void smulh(const Register& rd, const Register& rn, const Register& rm);
+
+ // Signed 32 x 32 -> 64-bit multiply and accumulate.
+ void smaddl(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra);
+
+ // Unsigned 32 x 32 -> 64-bit multiply and accumulate.
+ void umaddl(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra);
+
+ // Signed 32 x 32 -> 64-bit multiply and subtract.
+ void smsubl(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra);
+
+ // Unsigned 32 x 32 -> 64-bit multiply and subtract.
+ void umsubl(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra);
+
+ // Signed integer divide.
+ void sdiv(const Register& rd, const Register& rn, const Register& rm);
+
+ // Unsigned integer divide.
+ void udiv(const Register& rd, const Register& rn, const Register& rm);
+
+ // Bit count, bit reverse and endian reverse.
+ void rbit(const Register& rd, const Register& rn);
+ void rev16(const Register& rd, const Register& rn);
+ void rev32(const Register& rd, const Register& rn);
+ void rev(const Register& rd, const Register& rn);
+ void clz(const Register& rd, const Register& rn);
+ void cls(const Register& rd, const Register& rn);
+
+ // Memory instructions.
+
+ // Load literal from pc + offset_from_pc.
+ void LoadLiteral(const CPURegister& rt, int offset_from_pc);
+
+ // Load integer or FP register.
+ void ldr(const CPURegister& rt, const MemOperand& src);
+
+ // Store integer or FP register.
+ void str(const CPURegister& rt, const MemOperand& dst);
+
+ // Load word with sign extension.
+ void ldrsw(const Register& rt, const MemOperand& src);
+
+ // Load byte.
+ void ldrb(const Register& rt, const MemOperand& src);
+
+ // Store byte.
+ void strb(const Register& rt, const MemOperand& dst);
+
+ // Load byte with sign extension.
+ void ldrsb(const Register& rt, const MemOperand& src);
+
+ // Load half-word.
+ void ldrh(const Register& rt, const MemOperand& src);
+
+ // Store half-word.
+ void strh(const Register& rt, const MemOperand& dst);
+
+ // Load half-word with sign extension.
+ void ldrsh(const Register& rt, const MemOperand& src);
+
+ // Load integer or FP register pair.
+ void ldp(const CPURegister& rt, const CPURegister& rt2,
+ const MemOperand& src);
+
+ // Store integer or FP register pair.
+ void stp(const CPURegister& rt, const CPURegister& rt2,
+ const MemOperand& dst);
+
+ // Load word pair with sign extension.
+ void ldpsw(const Register& rt, const Register& rt2, const MemOperand& src);
+
+ // Load integer or FP register pair, non-temporal.
+ void ldnp(const CPURegister& rt, const CPURegister& rt2,
+ const MemOperand& src);
+
+ // Store integer or FP register pair, non-temporal.
+ void stnp(const CPURegister& rt, const CPURegister& rt2,
+ const MemOperand& dst);
+
+ // Load literal to register.
+ void ldr(const Register& rt, uint64_t imm);
+
+ // Load literal to FP register.
+ void ldr(const FPRegister& ft, double imm);
+
+ // Move instructions. The default shift of -1 indicates that the move
+ // instruction will calculate an appropriate 16-bit immediate and left shift
+ // that is equal to the 64-bit immediate argument. If an explicit left shift
+ // is specified (0, 16, 32 or 48), the immediate must be a 16-bit value.
+ //
+ // For movk, an explicit shift can be used to indicate which half word should
+ // be overwritten, eg. movk(x0, 0, 0) will overwrite the least-significant
+ // half word with zero, whereas movk(x0, 0, 48) will overwrite the
+ // most-significant.
+
+ // Move and keep.
+ void movk(const Register& rd, uint64_t imm, int shift = -1) {
+ MoveWide(rd, imm, shift, MOVK);
+ }
+
+ // Move with non-zero.
+ void movn(const Register& rd, uint64_t imm, int shift = -1) {
+ MoveWide(rd, imm, shift, MOVN);
+ }
+
+ // Move with zero.
+ void movz(const Register& rd, uint64_t imm, int shift = -1) {
+ MoveWide(rd, imm, shift, MOVZ);
+ }
+
+ // Misc instructions.
+ // Monitor debug-mode breakpoint.
+ void brk(int code);
+
+ // Halting debug-mode breakpoint.
+ void hlt(int code);
+
+ // Move register to register.
+ void mov(const Register& rd, const Register& rn);
+
+ // Move NOT(operand) to register.
+ void mvn(const Register& rd, const Operand& operand);
+
+ // System instructions.
+ // Move to register from system register.
+ void mrs(const Register& rt, SystemRegister sysreg);
+
+ // Move from register to system register.
+ void msr(SystemRegister sysreg, const Register& rt);
+
+ // System hint.
+ void hint(SystemHint code);
+
+ // Data memory barrier
+ void dmb(BarrierDomain domain, BarrierType type);
+
+ // Data synchronization barrier
+ void dsb(BarrierDomain domain, BarrierType type);
+
+ // Instruction synchronization barrier
+ void isb();
+
+ // Alias for system instructions.
+ void nop() { hint(NOP); }
+
+ // Different nop operations are used by the code generator to detect certain
+ // states of the generated code.
+ enum NopMarkerTypes {
+ DEBUG_BREAK_NOP,
+ INTERRUPT_CODE_NOP,
+ FIRST_NOP_MARKER = DEBUG_BREAK_NOP,
+ LAST_NOP_MARKER = INTERRUPT_CODE_NOP
+ };
+
+ void nop(NopMarkerTypes n) {
+ ASSERT((FIRST_NOP_MARKER <= n) && (n <= LAST_NOP_MARKER));
+ mov(Register::XRegFromCode(n), Register::XRegFromCode(n));
+ }
+
+ // FP instructions.
+ // Move immediate to FP register.
+ void fmov(FPRegister fd, double imm);
+
+ // Move FP register to register.
+ void fmov(Register rd, FPRegister fn);
+
+ // Move register to FP register.
+ void fmov(FPRegister fd, Register rn);
+
+ // Move FP register to FP register.
+ void fmov(FPRegister fd, FPRegister fn);
+
+ // FP add.
+ void fadd(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm);
+
+ // FP subtract.
+ void fsub(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm);
+
+ // FP multiply.
+ void fmul(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm);
+
+ // FP fused multiply and add.
+ void fmadd(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm,
+ const FPRegister& fa);
+
+ // FP fused multiply and subtract.
+ void fmsub(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm,
+ const FPRegister& fa);
+
+ // FP fused multiply, add and negate.
+ void fnmadd(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm,
+ const FPRegister& fa);
+
+ // FP fused multiply, subtract and negate.
+ void fnmsub(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm,
+ const FPRegister& fa);
+
+ // FP divide.
+ void fdiv(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm);
+
+ // FP maximum.
+ void fmax(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm);
+
+ // FP minimum.
+ void fmin(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm);
+
+ // FP maximum.
+ void fmaxnm(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm);
+
+ // FP minimum.
+ void fminnm(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm);
+
+ // FP absolute.
+ void fabs(const FPRegister& fd, const FPRegister& fn);
+
+ // FP negate.
+ void fneg(const FPRegister& fd, const FPRegister& fn);
+
+ // FP square root.
+ void fsqrt(const FPRegister& fd, const FPRegister& fn);
+
+ // FP round to integer (nearest with ties to away).
+ void frinta(const FPRegister& fd, const FPRegister& fn);
+
+ // FP round to integer (nearest with ties to even).
+ void frintn(const FPRegister& fd, const FPRegister& fn);
+
+ // FP round to integer (towards zero.)
+ void frintz(const FPRegister& fd, const FPRegister& fn);
+
+ // FP compare registers.
+ void fcmp(const FPRegister& fn, const FPRegister& fm);
+
+ // FP compare immediate.
+ void fcmp(const FPRegister& fn, double value);
+
+ // FP conditional compare.
+ void fccmp(const FPRegister& fn,
+ const FPRegister& fm,
+ StatusFlags nzcv,
+ Condition cond);
+
+ // FP conditional select.
+ void fcsel(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm,
+ Condition cond);
+
+ // Common FP Convert function
+ void FPConvertToInt(const Register& rd,
+ const FPRegister& fn,
+ FPIntegerConvertOp op);
+
+ // FP convert between single and double precision.
+ void fcvt(const FPRegister& fd, const FPRegister& fn);
+
+ // Convert FP to unsigned integer (nearest with ties to away).
+ void fcvtau(const Register& rd, const FPRegister& fn);
+
+ // Convert FP to signed integer (nearest with ties to away).
+ void fcvtas(const Register& rd, const FPRegister& fn);
+
+ // Convert FP to unsigned integer (round towards -infinity).
+ void fcvtmu(const Register& rd, const FPRegister& fn);
+
+ // Convert FP to signed integer (round towards -infinity).
+ void fcvtms(const Register& rd, const FPRegister& fn);
+
+ // Convert FP to unsigned integer (nearest with ties to even).
+ void fcvtnu(const Register& rd, const FPRegister& fn);
+
+ // Convert FP to signed integer (nearest with ties to even).
+ void fcvtns(const Register& rd, const FPRegister& fn);
+
+ // Convert FP to unsigned integer (round towards zero).
+ void fcvtzu(const Register& rd, const FPRegister& fn);
+
+ // Convert FP to signed integer (rounf towards zero).
+ void fcvtzs(const Register& rd, const FPRegister& fn);
+
+ // Convert signed integer or fixed point to FP.
+ void scvtf(const FPRegister& fd, const Register& rn, unsigned fbits = 0);
+
+ // Convert unsigned integer or fixed point to FP.
+ void ucvtf(const FPRegister& fd, const Register& rn, unsigned fbits = 0);
+
+ // Instruction functions used only for test, debug, and patching.
+ // Emit raw instructions in the instruction stream.
+ void dci(Instr raw_inst) { Emit(raw_inst); }
+
+ // Emit 8 bits of data in the instruction stream.
+ void dc8(uint8_t data) { EmitData(&data, sizeof(data)); }
+
+ // Emit 32 bits of data in the instruction stream.
+ void dc32(uint32_t data) { EmitData(&data, sizeof(data)); }
+
+ // Emit 64 bits of data in the instruction stream.
+ void dc64(uint64_t data) { EmitData(&data, sizeof(data)); }
+
+ // Copy a string into the instruction stream, including the terminating NULL
+ // character. The instruction pointer (pc_) is then aligned correctly for
+ // subsequent instructions.
+ void EmitStringData(const char * string) {
+ size_t len = strlen(string) + 1;
+ ASSERT(RoundUp(len, kInstructionSize) <= static_cast<size_t>(kGap));
+ EmitData(string, len);
+ // Pad with NULL characters until pc_ is aligned.
+ const char pad[] = {'\0', '\0', '\0', '\0'};
+ STATIC_ASSERT(sizeof(pad) == kInstructionSize);
+ byte* next_pc = AlignUp(pc_, kInstructionSize);
+ EmitData(&pad, next_pc - pc_);
+ }
+
+ // Pseudo-instructions ------------------------------------------------------
+
+ // Parameters are described in a64/instructions-a64.h.
+ void debug(const char* message, uint32_t code, Instr params = BREAK);
+
+ // Required by V8.
+ void dd(uint32_t data) { dc32(data); }
+ void db(uint8_t data) { dc8(data); }
+
+ // Code generation helpers --------------------------------------------------
+
+ unsigned num_pending_reloc_info() const { return num_pending_reloc_info_; }
+
+ Instruction* InstructionAt(int offset) const {
+ return reinterpret_cast<Instruction*>(buffer_ + offset);
+ }
+
+ // Register encoding.
+ static Instr Rd(CPURegister rd) {
+ ASSERT(rd.code() != kSPRegInternalCode);
+ return rd.code() << Rd_offset;
+ }
+
+ static Instr Rn(CPURegister rn) {
+ ASSERT(rn.code() != kSPRegInternalCode);
+ return rn.code() << Rn_offset;
+ }
+
+ static Instr Rm(CPURegister rm) {
+ ASSERT(rm.code() != kSPRegInternalCode);
+ return rm.code() << Rm_offset;
+ }
+
+ static Instr Ra(CPURegister ra) {
+ ASSERT(ra.code() != kSPRegInternalCode);
+ return ra.code() << Ra_offset;
+ }
+
+ static Instr Rt(CPURegister rt) {
+ ASSERT(rt.code() != kSPRegInternalCode);
+ return rt.code() << Rt_offset;
+ }
+
+ static Instr Rt2(CPURegister rt2) {
+ ASSERT(rt2.code() != kSPRegInternalCode);
+ return rt2.code() << Rt2_offset;
+ }
+
+ // These encoding functions allow the stack pointer to be encoded, and
+ // disallow the zero register.
+ static Instr RdSP(Register rd) {
+ ASSERT(!rd.IsZero());
+ return (rd.code() & kRegCodeMask) << Rd_offset;
+ }
+
+ static Instr RnSP(Register rn) {
+ ASSERT(!rn.IsZero());
+ return (rn.code() & kRegCodeMask) << Rn_offset;
+ }
+
+ // Flags encoding.
+ inline static Instr Flags(FlagsUpdate S);
+ inline static Instr Cond(Condition cond);
+
+ // PC-relative address encoding.
+ inline static Instr ImmPCRelAddress(int imm21);
+
+ // Branch encoding.
+ inline static Instr ImmUncondBranch(int imm26);
+ inline static Instr ImmCondBranch(int imm19);
+ inline static Instr ImmCmpBranch(int imm19);
+ inline static Instr ImmTestBranch(int imm14);
+ inline static Instr ImmTestBranchBit(unsigned bit_pos);
+
+ // Data Processing encoding.
+ inline static Instr SF(Register rd);
+ inline static Instr ImmAddSub(int64_t imm);
+ inline static Instr ImmS(unsigned imms, unsigned reg_size);
+ inline static Instr ImmR(unsigned immr, unsigned reg_size);
+ inline static Instr ImmSetBits(unsigned imms, unsigned reg_size);
+ inline static Instr ImmRotate(unsigned immr, unsigned reg_size);
+ inline static Instr ImmLLiteral(int imm19);
+ inline static Instr BitN(unsigned bitn, unsigned reg_size);
+ inline static Instr ShiftDP(Shift shift);
+ inline static Instr ImmDPShift(unsigned amount);
+ inline static Instr ExtendMode(Extend extend);
+ inline static Instr ImmExtendShift(unsigned left_shift);
+ inline static Instr ImmCondCmp(unsigned imm);
+ inline static Instr Nzcv(StatusFlags nzcv);
+
+ // MemOperand offset encoding.
+ inline static Instr ImmLSUnsigned(int imm12);
+ inline static Instr ImmLS(int imm9);
+ inline static Instr ImmLSPair(int imm7, LSDataSize size);
+ inline static Instr ImmShiftLS(unsigned shift_amount);
+ inline static Instr ImmException(int imm16);
+ inline static Instr ImmSystemRegister(int imm15);
+ inline static Instr ImmHint(int imm7);
+ inline static Instr ImmBarrierDomain(int imm2);
+ inline static Instr ImmBarrierType(int imm2);
+ inline static LSDataSize CalcLSDataSize(LoadStoreOp op);
+
+ // Move immediates encoding.
+ inline static Instr ImmMoveWide(uint64_t imm);
+ inline static Instr ShiftMoveWide(int64_t shift);
+
+ // FP Immediates.
+ static Instr ImmFP32(float imm);
+ static Instr ImmFP64(double imm);
+ inline static Instr FPScale(unsigned scale);
+
+ // FP register type.
+ inline static Instr FPType(FPRegister fd);
+
+ // Class for scoping postponing the constant pool generation.
+ class BlockConstPoolScope {
+ public:
+ explicit BlockConstPoolScope(Assembler* assem) : assem_(assem) {
+ assem_->StartBlockConstPool();
+ }
+ ~BlockConstPoolScope() {
+ assem_->EndBlockConstPool();
+ }
+
+ private:
+ Assembler* assem_;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(BlockConstPoolScope);
+ };
+
+ // Check if is time to emit a constant pool.
+ void CheckConstPool(bool force_emit, bool require_jump);
+
+ // Available for constrained code generation scopes. Prefer
+ // MacroAssembler::Mov() when possible.
+ inline void LoadRelocated(const CPURegister& rt, const Operand& operand);
+
+ protected:
+ inline const Register& AppropriateZeroRegFor(const CPURegister& reg) const;
+
+ void LoadStore(const CPURegister& rt,
+ const MemOperand& addr,
+ LoadStoreOp op);
+ static bool IsImmLSUnscaled(ptrdiff_t offset);
+ static bool IsImmLSScaled(ptrdiff_t offset, LSDataSize size);
+
+ void Logical(const Register& rd,
+ const Register& rn,
+ const Operand& operand,
+ LogicalOp op);
+ void LogicalImmediate(const Register& rd,
+ const Register& rn,
+ unsigned n,
+ unsigned imm_s,
+ unsigned imm_r,
+ LogicalOp op);
+ static bool IsImmLogical(uint64_t value,
+ unsigned width,
+ unsigned* n,
+ unsigned* imm_s,
+ unsigned* imm_r);
+
+ void ConditionalCompare(const Register& rn,
+ const Operand& operand,
+ StatusFlags nzcv,
+ Condition cond,
+ ConditionalCompareOp op);
+ static bool IsImmConditionalCompare(int64_t immediate);
+
+ void AddSubWithCarry(const Register& rd,
+ const Register& rn,
+ const Operand& operand,
+ FlagsUpdate S,
+ AddSubWithCarryOp op);
+
+ // Functions for emulating operands not directly supported by the instruction
+ // set.
+ void EmitShift(const Register& rd,
+ const Register& rn,
+ Shift shift,
+ unsigned amount);
+ void EmitExtendShift(const Register& rd,
+ const Register& rn,
+ Extend extend,
+ unsigned left_shift);
+
+ void AddSub(const Register& rd,
+ const Register& rn,
+ const Operand& operand,
+ FlagsUpdate S,
+ AddSubOp op);
+ static bool IsImmAddSub(int64_t immediate);
+
+ static bool IsImmFP32(float imm);
+ static bool IsImmFP64(double imm);
+
+ // Find an appropriate LoadStoreOp or LoadStorePairOp for the specified
+ // registers. Only simple loads are supported; sign- and zero-extension (such
+ // as in LDPSW_x or LDRB_w) are not supported.
+ static inline LoadStoreOp LoadOpFor(const CPURegister& rt);
+ static inline LoadStorePairOp LoadPairOpFor(const CPURegister& rt,
+ const CPURegister& rt2);
+ static inline LoadStoreOp StoreOpFor(const CPURegister& rt);
+ static inline LoadStorePairOp StorePairOpFor(const CPURegister& rt,
+ const CPURegister& rt2);
+ static inline LoadStorePairNonTemporalOp LoadPairNonTemporalOpFor(
+ const CPURegister& rt, const CPURegister& rt2);
+ static inline LoadStorePairNonTemporalOp StorePairNonTemporalOpFor(
+ const CPURegister& rt, const CPURegister& rt2);
+
+
+ private:
+ // Instruction helpers.
+ void MoveWide(const Register& rd,
+ uint64_t imm,
+ int shift,
+ MoveWideImmediateOp mov_op);
+ void DataProcShiftedRegister(const Register& rd,
+ const Register& rn,
+ const Operand& operand,
+ FlagsUpdate S,
+ Instr op);
+ void DataProcExtendedRegister(const Register& rd,
+ const Register& rn,
+ const Operand& operand,
+ FlagsUpdate S,
+ Instr op);
+ void LoadStorePair(const CPURegister& rt,
+ const CPURegister& rt2,
+ const MemOperand& addr,
+ LoadStorePairOp op);
+ void LoadStorePairNonTemporal(const CPURegister& rt,
+ const CPURegister& rt2,
+ const MemOperand& addr,
+ LoadStorePairNonTemporalOp op);
+ // Register the relocation information for the operand and load its value
+ // into rt.
+ void LoadRelocatedValue(const CPURegister& rt,
+ const Operand& operand,
+ LoadLiteralOp op);
+ void ConditionalSelect(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ Condition cond,
+ ConditionalSelectOp op);
+ void DataProcessing1Source(const Register& rd,
+ const Register& rn,
+ DataProcessing1SourceOp op);
+ void DataProcessing3Source(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra,
+ DataProcessing3SourceOp op);
+ void FPDataProcessing1Source(const FPRegister& fd,
+ const FPRegister& fn,
+ FPDataProcessing1SourceOp op);
+ void FPDataProcessing2Source(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm,
+ FPDataProcessing2SourceOp op);
+ void FPDataProcessing3Source(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm,
+ const FPRegister& fa,
+ FPDataProcessing3SourceOp op);
+
+ // Label helpers.
+
+ // Return an offset for a label-referencing instruction, typically a branch.
+ int LinkAndGetByteOffsetTo(Label* label);
+
+ // This is the same as LinkAndGetByteOffsetTo, but return an offset
+ // suitable for fields that take instruction offsets.
+ inline int LinkAndGetInstructionOffsetTo(Label* label);
+
+ static const int kStartOfLabelLinkChain = 0;
+
+ // Verify that a label's link chain is intact.
+ void CheckLabelLinkChain(Label const * label);
+
+ void RecordLiteral(int64_t imm, unsigned size);
+
+ // Postpone the generation of the constant pool for the specified number of
+ // instructions.
+ void BlockConstPoolFor(int instructions);
+
+ // Emit the instruction at pc_.
+ void Emit(Instr instruction) {
+ STATIC_ASSERT(sizeof(*pc_) == 1);
+ STATIC_ASSERT(sizeof(instruction) == kInstructionSize);
+ ASSERT((pc_ + sizeof(instruction)) <= (buffer_ + buffer_size_));
+
+ memcpy(pc_, &instruction, sizeof(instruction));
+ pc_ += sizeof(instruction);
+ CheckBuffer();
+ }
+
+ // Emit data inline in the instruction stream.
+ void EmitData(void const * data, unsigned size) {
+ ASSERT(sizeof(*pc_) == 1);
+ ASSERT((pc_ + size) <= (buffer_ + buffer_size_));
+
+ // TODO(all): Somehow register we have some data here. Then we can
+ // disassemble it correctly.
+ memcpy(pc_, data, size);
+ pc_ += size;
+ CheckBuffer();
+ }
+
+ void GrowBuffer();
+ void CheckBuffer();
+
+ // Pc offset of the next buffer check.
+ int next_buffer_check_;
+
+ // Constant pool generation
+ // Pools are emitted in the instruction stream, preferably after unconditional
+ // jumps or after returns from functions (in dead code locations).
+ // If a long code sequence does not contain unconditional jumps, it is
+ // necessary to emit the constant pool before the pool gets too far from the
+ // location it is accessed from. In this case, we emit a jump over the emitted
+ // constant pool.
+ // Constants in the pool may be addresses of functions that gets relocated;
+ // if so, a relocation info entry is associated to the constant pool entry.
+
+ // Repeated checking whether the constant pool should be emitted is rather
+ // expensive. By default we only check again once a number of instructions
+ // has been generated. That also means that the sizing of the buffers is not
+ // an exact science, and that we rely on some slop to not overrun buffers.
+ static const int kCheckPoolIntervalInst = 128;
+ static const int kCheckPoolInterval =
+ kCheckPoolIntervalInst * kInstructionSize;
+
+ // Constants in pools are accessed via pc relative addressing, which can
+ // reach +/-4KB thereby defining a maximum distance between the instruction
+ // and the accessed constant.
+ static const int kMaxDistToPool = 4 * KB;
+ static const int kMaxNumPendingRelocInfo = kMaxDistToPool / kInstructionSize;
+
+
+ // Average distance beetween a constant pool and the first instruction
+ // accessing the constant pool. Longer distance should result in less I-cache
+ // pollution.
+ // In practice the distance will be smaller since constant pool emission is
+ // forced after function return and sometimes after unconditional branches.
+ static const int kAvgDistToPool = kMaxDistToPool - kCheckPoolInterval;
+
+ // Emission of the constant pool may be blocked in some code sequences.
+ int const_pool_blocked_nesting_; // Block emission if this is not zero.
+ int no_const_pool_before_; // Block emission before this pc offset.
+
+ // Keep track of the first instruction requiring a constant pool entry
+ // since the previous constant pool was emitted.
+ int first_const_pool_use_;
+
+ // Relocation info generation
+ // Each relocation is encoded as a variable size value
+ static const int kMaxRelocSize = RelocInfoWriter::kMaxSize;
+ RelocInfoWriter reloc_info_writer;
+
+ // Relocation info records are also used during code generation as temporary
+ // containers for constants and code target addresses until they are emitted
+ // to the constant pool. These pending relocation info records are temporarily
+ // stored in a separate buffer until a constant pool is emitted.
+ // If every instruction in a long sequence is accessing the pool, we need one
+ // pending relocation entry per instruction.
+
+ // the buffer of pending relocation info
+ RelocInfo pending_reloc_info_[kMaxNumPendingRelocInfo];
+ // number of pending reloc info entries in the buffer
+ int num_pending_reloc_info_;
+
+ // Relocation for a type-recording IC has the AST id added to it. This
+ // member variable is a way to pass the information from the call site to
+ // the relocation info.
+ TypeFeedbackId recorded_ast_id_;
+
+ inline TypeFeedbackId RecordedAstId();
+ inline void ClearRecordedAstId();
+
+ protected:
+ // Record the AST id of the CallIC being compiled, so that it can be placed
+ // in the relocation information.
+ void SetRecordedAstId(TypeFeedbackId ast_id) {
+ ASSERT(recorded_ast_id_.IsNone());
+ recorded_ast_id_ = ast_id;
+ }
+
+ // Code generation
+ // The relocation writer's position is at least kGap bytes below the end of
+ // the generated instructions. This is so that multi-instruction sequences do
+ // not have to check for overflow. The same is true for writes of large
+ // relocation info entries, and debug strings encoded in the instruction
+ // stream.
+ static const int kGap = 128;
+
+ private:
+ // TODO(jbramley): VIXL uses next_literal_pool_check_ and
+ // literal_pool_monitor_ to determine when to consider emitting a literal
+ // pool. V8 doesn't use them, so they should either not be here at all, or
+ // should replace or be merged with next_buffer_check_ and
+ // const_pool_blocked_nesting_.
+ Instruction* next_literal_pool_check_;
+ unsigned literal_pool_monitor_;
+
+ PositionsRecorder positions_recorder_;
+ friend class PositionsRecorder;
+ friend class EnsureSpace;
+};
+
+class PatchingAssembler : public Assembler {
+ public:
+ // Create an Assembler with a buffer starting at 'start'.
+ // The buffer size is
+ // size of instructions to patch + kGap
+ // Where kGap is the distance from which the Assembler tries to grow the
+ // buffer.
+ // If more or fewer instructions than expected are generated or if some
+ // relocation information takes space in the buffer, the PatchingAssembler
+ // will crash trying to grow the buffer.
+ PatchingAssembler(Instruction* start, unsigned count)
+ : Assembler(NULL,
+ reinterpret_cast<byte*>(start),
+ count * kInstructionSize + kGap) {
+ // Block constant pool emission.
+ StartBlockConstPool();
+ }
+
+ PatchingAssembler(byte* start, unsigned count)
+ : Assembler(NULL, start, count * kInstructionSize + kGap) {
+ // Block constant pool emission.
+ StartBlockConstPool();
+ }
+
+ ~PatchingAssembler() {
+ // Const pool should still be blocked.
+ ASSERT(is_const_pool_blocked());
+ EndBlockConstPool();
+ // Verify we have generated the number of instruction we expected.
+ ASSERT((pc_offset() + kGap) == buffer_size_);
+ // Verify no relocation information has been emitted.
+ ASSERT(num_pending_reloc_info() == 0);
+ // Flush the Instruction cache.
+ size_t length = buffer_size_ - kGap;
+ CPU::FlushICache(buffer_, length);
+ }
+};
+
+
+class EnsureSpace BASE_EMBEDDED {
+ public:
+ explicit EnsureSpace(Assembler* assembler) {
+ assembler->CheckBuffer();
+ }
+};
+
+} } // namespace v8::internal
+
+#endif // V8_A64_ASSEMBLER_A64_H_
--- /dev/null
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#if V8_TARGET_ARCH_A64
+
+#include "codegen.h"
+#include "debug.h"
+#include "deoptimizer.h"
+#include "full-codegen.h"
+#include "runtime.h"
+#include "stub-cache.h"
+
+namespace v8 {
+namespace internal {
+
+
+#define __ ACCESS_MASM(masm)
+
+
+// Load the built-in Array function from the current context.
+static void GenerateLoadArrayFunction(MacroAssembler* masm, Register result) {
+ // Load the native context.
+ __ Ldr(result, GlobalObjectMemOperand());
+ __ Ldr(result,
+ FieldMemOperand(result, GlobalObject::kNativeContextOffset));
+ // Load the InternalArray function from the native context.
+ __ Ldr(result,
+ MemOperand(result,
+ Context::SlotOffset(Context::ARRAY_FUNCTION_INDEX)));
+}
+
+
+// Load the built-in InternalArray function from the current context.
+static void GenerateLoadInternalArrayFunction(MacroAssembler* masm,
+ Register result) {
+ // Load the native context.
+ __ Ldr(result, GlobalObjectMemOperand());
+ __ Ldr(result,
+ FieldMemOperand(result, GlobalObject::kNativeContextOffset));
+ // Load the InternalArray function from the native context.
+ __ Ldr(result, ContextMemOperand(result,
+ Context::INTERNAL_ARRAY_FUNCTION_INDEX));
+}
+
+
+void Builtins::Generate_Adaptor(MacroAssembler* masm,
+ CFunctionId id,
+ BuiltinExtraArguments extra_args) {
+ // ----------- S t a t e -------------
+ // -- x0 : number of arguments excluding receiver
+ // -- x1 : called function (only guaranteed when
+ // extra_args requires it)
+ // -- cp : context
+ // -- sp[0] : last argument
+ // -- ...
+ // -- sp[4 * (argc - 1)] : first argument (argc == x0)
+ // -- sp[4 * argc] : receiver
+ // -----------------------------------
+
+ // Insert extra arguments.
+ int num_extra_args = 0;
+ if (extra_args == NEEDS_CALLED_FUNCTION) {
+ num_extra_args = 1;
+ __ Push(x1);
+ } else {
+ ASSERT(extra_args == NO_EXTRA_ARGUMENTS);
+ }
+
+ // JumpToExternalReference expects x0 to contain the number of arguments
+ // including the receiver and the extra arguments.
+ __ Add(x0, x0, num_extra_args + 1);
+ __ JumpToExternalReference(ExternalReference(id, masm->isolate()));
+}
+
+
+void Builtins::Generate_InternalArrayCode(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- x0 : number of arguments
+ // -- lr : return address
+ // -- sp[...]: constructor arguments
+ // -----------------------------------
+ ASM_LOCATION("Builtins::Generate_InternalArrayCode");
+ Label generic_array_code;
+
+ // Get the InternalArray function.
+ GenerateLoadInternalArrayFunction(masm, x1);
+
+ if (FLAG_debug_code) {
+ // Initial map for the builtin InternalArray functions should be maps.
+ __ Ldr(x10, FieldMemOperand(x1, JSFunction::kPrototypeOrInitialMapOffset));
+ __ Tst(x10, kSmiTagMask);
+ __ Assert(ne, kUnexpectedInitialMapForInternalArrayFunction);
+ __ CompareObjectType(x10, x11, x12, MAP_TYPE);
+ __ Assert(eq, kUnexpectedInitialMapForInternalArrayFunction);
+ }
+
+ // Run the native code for the InternalArray function called as a normal
+ // function.
+ InternalArrayConstructorStub stub(masm->isolate());
+ __ TailCallStub(&stub);
+}
+
+
+void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- x0 : number of arguments
+ // -- lr : return address
+ // -- sp[...]: constructor arguments
+ // -----------------------------------
+ ASM_LOCATION("Builtins::Generate_ArrayCode");
+ Label generic_array_code, one_or_more_arguments, two_or_more_arguments;
+
+ // Get the Array function.
+ GenerateLoadArrayFunction(masm, x1);
+
+ if (FLAG_debug_code) {
+ // Initial map for the builtin Array functions should be maps.
+ __ Ldr(x10, FieldMemOperand(x1, JSFunction::kPrototypeOrInitialMapOffset));
+ __ Tst(x10, kSmiTagMask);
+ __ Assert(ne, kUnexpectedInitialMapForArrayFunction);
+ __ CompareObjectType(x10, x11, x12, MAP_TYPE);
+ __ Assert(eq, kUnexpectedInitialMapForArrayFunction);
+ }
+
+ // Run the native code for the Array function called as a normal function.
+ Handle<Object> undefined_sentinel(
+ masm->isolate()->heap()->undefined_value(),
+ masm->isolate());
+ __ Mov(x2, Operand(undefined_sentinel));
+ ArrayConstructorStub stub(masm->isolate());
+ __ TailCallStub(&stub);
+}
+
+
+void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- x0 : number of arguments
+ // -- x1 : constructor function
+ // -- lr : return address
+ // -- sp[(argc - n - 1) * 8] : arg[n] (zero based)
+ // -- sp[argc * 8] : receiver
+ // -----------------------------------
+ ASM_LOCATION("Builtins::Generate_StringConstructCode");
+ Counters* counters = masm->isolate()->counters();
+ __ IncrementCounter(counters->string_ctor_calls(), 1, x10, x11);
+
+ Register argc = x0;
+ Register function = x1;
+ if (FLAG_debug_code) {
+ __ LoadGlobalFunction(Context::STRING_FUNCTION_INDEX, x10);
+ __ Cmp(function, x10);
+ __ Assert(eq, kUnexpectedStringFunction);
+ }
+
+ // Load the first arguments in x0 and get rid of the rest.
+ Label no_arguments;
+ __ Cbz(argc, &no_arguments);
+ // First args = sp[(argc - 1) * 8].
+ __ Sub(argc, argc, 1);
+ __ Claim(argc, kXRegSizeInBytes);
+ // jssp now point to args[0], load and drop args[0] + receiver.
+ // TODO(jbramley): Consider adding ClaimAndPoke.
+ __ Ldr(argc, MemOperand(jssp, 2 * kPointerSize, PostIndex));
+
+ Register argument = x2;
+ Label not_cached, argument_is_string;
+ __ LookupNumberStringCache(argc, // Input.
+ argument, // Result.
+ x10, // Scratch.
+ x11, // Scratch.
+ x12, // Scratch.
+ ¬_cached);
+ __ IncrementCounter(counters->string_ctor_cached_number(), 1, x10, x11);
+ __ Bind(&argument_is_string);
+
+ // ----------- S t a t e -------------
+ // -- x2 : argument converted to string
+ // -- x1 : constructor function
+ // -- lr : return address
+ // -----------------------------------
+
+ Label gc_required;
+ Register new_obj = x0;
+ __ Allocate(JSValue::kSize, new_obj, x10, x11, &gc_required, TAG_OBJECT);
+
+ // Initialize the String object.
+ Register map = x3;
+ __ LoadGlobalFunctionInitialMap(function, map, x10);
+ if (FLAG_debug_code) {
+ __ Ldrb(x4, FieldMemOperand(map, Map::kInstanceSizeOffset));
+ __ Cmp(x4, JSValue::kSize >> kPointerSizeLog2);
+ __ Assert(eq, kUnexpectedStringWrapperInstanceSize);
+ __ Ldrb(x4, FieldMemOperand(map, Map::kUnusedPropertyFieldsOffset));
+ __ Cmp(x4, 0);
+ __ Assert(eq, kUnexpectedUnusedPropertiesOfStringWrapper);
+ }
+ __ Str(map, FieldMemOperand(new_obj, HeapObject::kMapOffset));
+
+ Register empty = x3;
+ __ LoadRoot(empty, Heap::kEmptyFixedArrayRootIndex);
+ __ Str(empty, FieldMemOperand(new_obj, JSObject::kPropertiesOffset));
+ __ Str(empty, FieldMemOperand(new_obj, JSObject::kElementsOffset));
+
+ __ Str(argument, FieldMemOperand(new_obj, JSValue::kValueOffset));
+
+ // Ensure the object is fully initialized.
+ STATIC_ASSERT(JSValue::kSize == (4 * kPointerSize));
+
+ __ Ret();
+
+ // The argument was not found in the number to string cache. Check
+ // if it's a string already before calling the conversion builtin.
+ Label convert_argument;
+ __ Bind(¬_cached);
+ __ JumpIfSmi(argc, &convert_argument);
+
+ // Is it a String?
+ __ Ldr(x10, FieldMemOperand(x0, HeapObject::kMapOffset));
+ __ Ldrb(x11, FieldMemOperand(x10, Map::kInstanceTypeOffset));
+ __ Tbnz(x11, MaskToBit(kIsNotStringMask), &convert_argument);
+ __ Mov(argument, argc);
+ __ IncrementCounter(counters->string_ctor_string_value(), 1, x10, x11);
+ __ B(&argument_is_string);
+
+ // Invoke the conversion builtin and put the result into x2.
+ __ Bind(&convert_argument);
+ __ Push(function); // Preserve the function.
+ __ IncrementCounter(counters->string_ctor_conversions(), 1, x10, x11);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(argc);
+ __ InvokeBuiltin(Builtins::TO_STRING, CALL_FUNCTION);
+ }
+ __ Pop(function);
+ __ Mov(argument, x0);
+ __ B(&argument_is_string);
+
+ // Load the empty string into x2, remove the receiver from the
+ // stack, and jump back to the case where the argument is a string.
+ __ Bind(&no_arguments);
+ __ LoadRoot(argument, Heap::kempty_stringRootIndex);
+ __ Drop(1);
+ __ B(&argument_is_string);
+
+ // At this point the argument is already a string. Call runtime to create a
+ // string wrapper.
+ __ Bind(&gc_required);
+ __ IncrementCounter(counters->string_ctor_gc_required(), 1, x10, x11);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(argument);
+ __ CallRuntime(Runtime::kNewStringWrapper, 1);
+ }
+ __ Ret();
+}
+
+
+static void CallRuntimePassFunction(MacroAssembler* masm,
+ Runtime::FunctionId function_id) {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ // - Push a copy of the function onto the stack.
+ // - Push another copy as a parameter to the runtime call.
+ __ Push(x1, x1);
+
+ __ CallRuntime(function_id, 1);
+
+ // - Restore receiver.
+ __ Pop(x1);
+}
+
+
+static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
+ __ Ldr(x2, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
+ __ Ldr(x2, FieldMemOperand(x2, SharedFunctionInfo::kCodeOffset));
+ __ Add(x2, x2, Code::kHeaderSize - kHeapObjectTag);
+ __ Br(x2);
+}
+
+
+static void GenerateTailCallToReturnedCode(MacroAssembler* masm) {
+ __ Add(x0, x0, Code::kHeaderSize - kHeapObjectTag);
+ __ Br(x0);
+}
+
+
+void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
+ // Checking whether the queued function is ready for install is optional,
+ // since we come across interrupts and stack checks elsewhere. However, not
+ // checking may delay installing ready functions, and always checking would be
+ // quite expensive. A good compromise is to first check against stack limit as
+ // a cue for an interrupt signal.
+ Label ok;
+ __ CompareRoot(masm->StackPointer(), Heap::kStackLimitRootIndex);
+ __ B(hs, &ok);
+
+ CallRuntimePassFunction(masm, Runtime::kTryInstallOptimizedCode);
+ GenerateTailCallToReturnedCode(masm);
+
+ __ Bind(&ok);
+ GenerateTailCallToSharedCode(masm);
+}
+
+
+static void Generate_JSConstructStubHelper(MacroAssembler* masm,
+ bool is_api_function,
+ bool count_constructions) {
+ // ----------- S t a t e -------------
+ // -- x0 : number of arguments
+ // -- x1 : constructor function
+ // -- lr : return address
+ // -- sp[...]: constructor arguments
+ // -----------------------------------
+
+ ASM_LOCATION("Builtins::Generate_JSConstructStubHelper");
+ // Should never count constructions for api objects.
+ ASSERT(!is_api_function || !count_constructions);
+
+ Isolate* isolate = masm->isolate();
+
+ // Enter a construct frame.
+ {
+ FrameScope scope(masm, StackFrame::CONSTRUCT);
+
+ // Preserve the two incoming parameters on the stack.
+ Register argc = x0;
+ Register constructor = x1;
+ // x1: constructor function
+ __ SmiTag(argc);
+ __ Push(argc, constructor);
+ // sp[0] : Constructor function.
+ // sp[1]: number of arguments (smi-tagged)
+
+ // Try to allocate the object without transitioning into C code. If any of
+ // the preconditions is not met, the code bails out to the runtime call.
+ Label rt_call, allocated;
+ if (FLAG_inline_new) {
+ Label undo_allocation;
+#if ENABLE_DEBUGGER_SUPPORT
+ ExternalReference debug_step_in_fp =
+ ExternalReference::debug_step_in_fp_address(isolate);
+ __ Mov(x2, Operand(debug_step_in_fp));
+ __ Ldr(x2, MemOperand(x2));
+ __ Cbnz(x2, &rt_call);
+#endif
+ // Load the initial map and verify that it is in fact a map.
+ Register init_map = x2;
+ __ Ldr(init_map,
+ FieldMemOperand(constructor,
+ JSFunction::kPrototypeOrInitialMapOffset));
+ __ JumpIfSmi(init_map, &rt_call);
+ __ JumpIfNotObjectType(init_map, x10, x11, MAP_TYPE, &rt_call);
+
+ // Check that the constructor is not constructing a JSFunction (see
+ // comments in Runtime_NewObject in runtime.cc). In which case the initial
+ // map's instance type would be JS_FUNCTION_TYPE.
+ __ CompareInstanceType(init_map, x10, JS_FUNCTION_TYPE);
+ __ B(eq, &rt_call);
+
+ if (count_constructions) {
+ Label allocate;
+ // Decrease generous allocation count.
+ __ Ldr(x3, FieldMemOperand(constructor,
+ JSFunction::kSharedFunctionInfoOffset));
+ MemOperand constructor_count =
+ FieldMemOperand(x3, SharedFunctionInfo::kConstructionCountOffset);
+ __ Ldrb(x4, constructor_count);
+ __ Subs(x4, x4, 1);
+ __ Strb(x4, constructor_count);
+ __ B(ne, &allocate);
+
+ // Push the constructor and map to the stack, and the constructor again
+ // as argument to the runtime call.
+ __ Push(constructor, init_map, constructor);
+ // The call will replace the stub, so the countdown is only done once.
+ __ CallRuntime(Runtime::kFinalizeInstanceSize, 1);
+ __ Pop(init_map, constructor);
+ __ Bind(&allocate);
+ }
+
+ // Now allocate the JSObject on the heap.
+ Register obj_size = x3;
+ Register new_obj = x4;
+ __ Ldrb(obj_size, FieldMemOperand(init_map, Map::kInstanceSizeOffset));
+ __ Allocate(obj_size, new_obj, x10, x11, &rt_call, SIZE_IN_WORDS);
+
+ // Allocated the JSObject, now initialize the fields. Map is set to
+ // initial map and properties and elements are set to empty fixed array.
+ // NB. the object pointer is not tagged, so MemOperand is used.
+ Register empty = x5;
+ __ LoadRoot(empty, Heap::kEmptyFixedArrayRootIndex);
+ __ Str(init_map, MemOperand(new_obj, JSObject::kMapOffset));
+ __ Str(empty, MemOperand(new_obj, JSObject::kPropertiesOffset));
+ __ Str(empty, MemOperand(new_obj, JSObject::kElementsOffset));
+
+ Register first_prop = x5;
+ __ Add(first_prop, new_obj, JSObject::kHeaderSize);
+
+ // Fill all of the in-object properties with the appropriate filler.
+ Register obj_end = x6;
+ __ Add(obj_end, new_obj, Operand(obj_size, LSL, kPointerSizeLog2));
+ Register undef = x7;
+ __ LoadRoot(undef, Heap::kUndefinedValueRootIndex);
+
+ // Obtain number of pre-allocated property fields and in-object
+ // properties.
+ Register prealloc_fields = x10;
+ Register inobject_props = x11;
+ Register inst_sizes = x11;
+ __ Ldr(inst_sizes, FieldMemOperand(init_map, Map::kInstanceSizesOffset));
+ __ Ubfx(prealloc_fields, inst_sizes,
+ Map::kPreAllocatedPropertyFieldsByte * kBitsPerByte,
+ kBitsPerByte);
+ __ Ubfx(inobject_props, inst_sizes,
+ Map::kInObjectPropertiesByte * kBitsPerByte, kBitsPerByte);
+
+ if (count_constructions) {
+ // Register first_non_prealloc is the offset of the first field after
+ // pre-allocated fields.
+ Register first_non_prealloc = x12;
+ __ Add(first_non_prealloc, first_prop,
+ Operand(prealloc_fields, LSL, kPointerSizeLog2));
+
+ if (FLAG_debug_code) {
+ __ Cmp(first_non_prealloc, obj_end);
+ __ Assert(le, kUnexpectedNumberOfPreAllocatedPropertyFields);
+ }
+ __ InitializeFieldsWithFiller(first_prop, first_non_prealloc, undef);
+ // To allow for truncation.
+ __ LoadRoot(x12, Heap::kOnePointerFillerMapRootIndex);
+ __ InitializeFieldsWithFiller(first_prop, obj_end, x12);
+ } else {
+ __ InitializeFieldsWithFiller(first_prop, obj_end, undef);
+ }
+
+ // Add the object tag to make the JSObject real, so that we can continue
+ // and jump into the continuation code at any time from now on. Any
+ // failures need to undo the allocation, so that the heap is in a
+ // consistent state and verifiable.
+ __ Add(new_obj, new_obj, kHeapObjectTag);
+
+ // Check if a non-empty properties array is needed. Continue with
+ // allocated object if not, or fall through to runtime call if it is.
+ Register element_count = x3;
+ __ Ldrb(x3, FieldMemOperand(init_map, Map::kUnusedPropertyFieldsOffset));
+ // The field instance sizes contains both pre-allocated property fields
+ // and in-object properties.
+ __ Add(x3, x3, prealloc_fields);
+ __ Subs(element_count, x3, inobject_props);
+
+ // Done if no extra properties are to be allocated.
+ __ B(eq, &allocated);
+ __ Assert(pl, kPropertyAllocationCountFailed);
+
+ // Scale the number of elements by pointer size and add the header for
+ // FixedArrays to the start of the next object calculation from above.
+ Register new_array = x5;
+ Register array_size = x6;
+ __ Add(array_size, element_count, FixedArray::kHeaderSize / kPointerSize);
+ __ Allocate(array_size, new_array, x11, x12, &undo_allocation,
+ static_cast<AllocationFlags>(RESULT_CONTAINS_TOP |
+ SIZE_IN_WORDS));
+
+ Register array_map = x10;
+ __ LoadRoot(array_map, Heap::kFixedArrayMapRootIndex);
+ __ Str(array_map, MemOperand(new_array, FixedArray::kMapOffset));
+ __ SmiTag(x0, element_count);
+ __ Str(x0, MemOperand(new_array, FixedArray::kLengthOffset));
+
+ // Initialize the fields to undefined.
+ Register elements = x10;
+ Register elements_end = x11;
+ __ Add(elements, new_array, FixedArray::kHeaderSize);
+ __ Add(elements_end, elements,
+ Operand(element_count, LSL, kPointerSizeLog2));
+ __ InitializeFieldsWithFiller(elements, elements_end, undef);
+
+ // Store the initialized FixedArray into the properties field of the
+ // JSObject.
+ __ Add(new_array, new_array, kHeapObjectTag);
+ __ Str(new_array, FieldMemOperand(new_obj, JSObject::kPropertiesOffset));
+
+ // Continue with JSObject being successfully allocated.
+ __ B(&allocated);
+
+ // Undo the setting of the new top so that the heap is verifiable. For
+ // example, the map's unused properties potentially do not match the
+ // allocated objects unused properties.
+ __ Bind(&undo_allocation);
+ __ UndoAllocationInNewSpace(new_obj, x14);
+ }
+
+ // Allocate the new receiver object using the runtime call.
+ __ Bind(&rt_call);
+ __ Push(constructor); // Argument for Runtime_NewObject.
+ __ CallRuntime(Runtime::kNewObject, 1);
+ __ Mov(x4, x0);
+
+ // Receiver for constructor call allocated.
+ // x4: JSObject
+ __ Bind(&allocated);
+ __ Push(x4, x4);
+
+ // Reload the number of arguments from the stack.
+ // Set it up in x0 for the function call below.
+ // jssp[0]: receiver
+ // jssp[1]: receiver
+ // jssp[2]: constructor function
+ // jssp[3]: number of arguments (smi-tagged)
+ __ Peek(constructor, 2 * kXRegSizeInBytes); // Load constructor.
+ __ Peek(argc, 3 * kXRegSizeInBytes); // Load number of arguments.
+ __ SmiUntag(argc);
+
+ // Set up pointer to last argument.
+ __ Add(x2, fp, StandardFrameConstants::kCallerSPOffset);
+
+ // Copy arguments and receiver to the expression stack.
+ // Copy 2 values every loop to use ldp/stp.
+ // x0: number of arguments
+ // x1: constructor function
+ // x2: address of last argument (caller sp)
+ // jssp[0]: receiver
+ // jssp[1]: receiver
+ // jssp[2]: constructor function
+ // jssp[3]: number of arguments (smi-tagged)
+ // Compute the start address of the copy in x3.
+ __ Add(x3, x2, Operand(argc, LSL, kPointerSizeLog2));
+ Label loop, entry, done_copying_arguments;
+ __ B(&entry);
+ __ Bind(&loop);
+ __ Ldp(x10, x11, MemOperand(x3, -2 * kPointerSize, PreIndex));
+ __ Push(x11, x10);
+ __ Bind(&entry);
+ __ Cmp(x3, x2);
+ __ B(gt, &loop);
+ // Because we copied values 2 by 2 we may have copied one extra value.
+ // Drop it if that is the case.
+ __ B(eq, &done_copying_arguments);
+ __ Drop(1);
+ __ Bind(&done_copying_arguments);
+
+ // Call the function.
+ // x0: number of arguments
+ // x1: constructor function
+ if (is_api_function) {
+ __ Ldr(cp, FieldMemOperand(constructor, JSFunction::kContextOffset));
+ Handle<Code> code =
+ masm->isolate()->builtins()->HandleApiCallConstruct();
+ __ Call(code, RelocInfo::CODE_TARGET);
+ } else {
+ ParameterCount actual(argc);
+ __ InvokeFunction(constructor, actual, CALL_FUNCTION, NullCallWrapper());
+ }
+
+ // Store offset of return address for deoptimizer.
+ if (!is_api_function && !count_constructions) {
+ masm->isolate()->heap()->SetConstructStubDeoptPCOffset(masm->pc_offset());
+ }
+
+ // Restore the context from the frame.
+ // x0: result
+ // jssp[0]: receiver
+ // jssp[1]: constructor function
+ // jssp[2]: number of arguments (smi-tagged)
+ __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+
+ // If the result is an object (in the ECMA sense), we should get rid
+ // of the receiver and use the result; see ECMA-262 section 13.2.2-7
+ // on page 74.
+ Label use_receiver, exit;
+
+ // If the result is a smi, it is *not* an object in the ECMA sense.
+ // x0: result
+ // jssp[0]: receiver (newly allocated object)
+ // jssp[1]: constructor function
+ // jssp[2]: number of arguments (smi-tagged)
+ __ JumpIfSmi(x0, &use_receiver);
+
+ // If the type of the result (stored in its map) is less than
+ // FIRST_SPEC_OBJECT_TYPE, it is not an object in the ECMA sense.
+ __ JumpIfObjectType(x0, x1, x3, FIRST_SPEC_OBJECT_TYPE, &exit, ge);
+
+ // Throw away the result of the constructor invocation and use the
+ // on-stack receiver as the result.
+ __ Bind(&use_receiver);
+ __ Peek(x0, 0);
+
+ // Remove the receiver from the stack, remove caller arguments, and
+ // return.
+ __ Bind(&exit);
+ // x0: result
+ // jssp[0]: receiver (newly allocated object)
+ // jssp[1]: constructor function
+ // jssp[2]: number of arguments (smi-tagged)
+ __ Peek(x1, 2 * kXRegSizeInBytes);
+
+ // Leave construct frame.
+ }
+
+ __ DropBySMI(x1);
+ __ Drop(1);
+ __ IncrementCounter(isolate->counters()->constructed_objects(), 1, x1, x2);
+ __ Ret();
+}
+
+
+void Builtins::Generate_JSConstructStubCountdown(MacroAssembler* masm) {
+ Generate_JSConstructStubHelper(masm, false, true);
+}
+
+
+void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
+ Generate_JSConstructStubHelper(masm, false, false);
+}
+
+
+void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
+ Generate_JSConstructStubHelper(masm, true, false);
+}
+
+
+// Input:
+// x0: code entry.
+// x1: function.
+// x2: receiver.
+// x3: argc.
+// x4: argv.
+// Output:
+// x0: result.
+static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
+ bool is_construct) {
+ // Called from JSEntryStub::GenerateBody().
+ Register function = x1;
+ Register receiver = x2;
+ Register argc = x3;
+ Register argv = x4;
+
+ ProfileEntryHookStub::MaybeCallEntryHook(masm);
+
+ // Clear the context before we push it when entering the internal frame.
+ __ Mov(cp, 0);
+
+ {
+ // Enter an internal frame.
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ // Set up the context from the function argument.
+ __ Ldr(cp, FieldMemOperand(function, JSFunction::kContextOffset));
+
+ __ InitializeRootRegister();
+
+ // Push the function and the receiver onto the stack.
+ __ Push(function, receiver);
+
+ // Copy arguments to the stack in a loop, in reverse order.
+ // x3: argc.
+ // x4: argv.
+ Label loop, entry;
+ // Compute the copy end address.
+ __ Add(x10, argv, Operand(argc, LSL, kPointerSizeLog2));
+
+ // TODO(all): This can potentially be optimized with ldp/stp to speed up
+ // arguments passing from C++ to JS.
+ __ B(&entry);
+ __ Bind(&loop);
+ __ Ldr(x11, MemOperand(argv, kPointerSize, PostIndex));
+ __ Ldr(x12, MemOperand(x11)); // Dereference the handle.
+ __ Push(x12); // Push the argument.
+ __ Bind(&entry);
+ __ Cmp(x10, argv);
+ __ B(ne, &loop);
+
+ // Initialize all JavaScript callee-saved registers, since they will be seen
+ // by the garbage collector as part of handlers.
+ // The original values have been saved in JSEntryStub::GenerateBody().
+ __ LoadRoot(x19, Heap::kUndefinedValueRootIndex);
+ __ Mov(x20, x19);
+ __ Mov(x21, x19);
+ __ Mov(x22, x19);
+ __ Mov(x23, x19);
+ __ Mov(x24, x19);
+ __ Mov(x25, x19);
+ // Don't initialize the reserved registers.
+ // x26 : root register (root).
+ // x27 : context pointer (cp).
+ // x28 : JS stack pointer (jssp).
+ // x29 : frame pointer (fp).
+
+ // TODO(alexandre): Revisit the MAsm function invocation mechanisms.
+ // Currently there is a mix of statically and dynamically allocated
+ // registers.
+ __ Mov(x0, argc);
+ if (is_construct) {
+ // No type feedback cell is available.
+ Handle<Object> undefined_sentinel(
+ masm->isolate()->heap()->undefined_value(), masm->isolate());
+ __ Mov(x2, Operand(undefined_sentinel));
+
+ CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
+ __ CallStub(&stub);
+ } else {
+ ParameterCount actual(x0);
+ __ InvokeFunction(function, actual, CALL_FUNCTION, NullCallWrapper());
+ }
+ // Exit the JS internal frame and remove the parameters (except function),
+ // and return.
+ }
+
+ // Result is in x0. Return.
+ __ Ret();
+}
+
+
+void Builtins::Generate_JSEntryTrampoline(MacroAssembler* masm) {
+ Generate_JSEntryTrampolineHelper(masm, false);
+}
+
+
+void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
+ Generate_JSEntryTrampolineHelper(masm, true);
+}
+
+
+void Builtins::Generate_CompileUnoptimized(MacroAssembler* masm) {
+ CallRuntimePassFunction(masm, Runtime::kCompileUnoptimized);
+ GenerateTailCallToReturnedCode(masm);
+}
+
+
+static void CallCompileOptimized(MacroAssembler* masm, bool concurrent) {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ Register function = x1;
+
+ // Preserve function. At the same time, push arguments for
+ // kCompileOptimized.
+ __ LoadObject(x10, masm->isolate()->factory()->ToBoolean(concurrent));
+ __ Push(function, function, x10);
+
+ __ CallRuntime(Runtime::kCompileOptimized, 2);
+
+ // Restore receiver.
+ __ Pop(function);
+}
+
+
+void Builtins::Generate_CompileOptimized(MacroAssembler* masm) {
+ CallCompileOptimized(masm, false);
+ GenerateTailCallToReturnedCode(masm);
+}
+
+
+void Builtins::Generate_CompileOptimizedConcurrent(MacroAssembler* masm) {
+ CallCompileOptimized(masm, true);
+ GenerateTailCallToReturnedCode(masm);
+}
+
+
+static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) {
+ // For now, we are relying on the fact that make_code_young doesn't do any
+ // garbage collection which allows us to save/restore the registers without
+ // worrying about which of them contain pointers. We also don't build an
+ // internal frame to make the code fast, since we shouldn't have to do stack
+ // crawls in MakeCodeYoung. This seems a bit fragile.
+
+ // The following caller-saved registers must be saved and restored when
+ // calling through to the runtime:
+ // x0 - The address from which to resume execution.
+ // x1 - isolate
+ // lr - The return address for the JSFunction itself. It has not yet been
+ // preserved on the stack because the frame setup code was replaced
+ // with a call to this stub, to handle code ageing.
+ {
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ Push(x0, x1, fp, lr);
+ __ Mov(x1, Operand(ExternalReference::isolate_address(masm->isolate())));
+ __ CallCFunction(
+ ExternalReference::get_make_code_young_function(masm->isolate()), 2);
+ __ Pop(lr, fp, x1, x0);
+ }
+
+ // The calling function has been made young again, so return to execute the
+ // real frame set-up code.
+ __ Br(x0);
+}
+
+#define DEFINE_CODE_AGE_BUILTIN_GENERATOR(C) \
+void Builtins::Generate_Make##C##CodeYoungAgainEvenMarking( \
+ MacroAssembler* masm) { \
+ GenerateMakeCodeYoungAgainCommon(masm); \
+} \
+void Builtins::Generate_Make##C##CodeYoungAgainOddMarking( \
+ MacroAssembler* masm) { \
+ GenerateMakeCodeYoungAgainCommon(masm); \
+}
+CODE_AGE_LIST(DEFINE_CODE_AGE_BUILTIN_GENERATOR)
+#undef DEFINE_CODE_AGE_BUILTIN_GENERATOR
+
+
+void Builtins::Generate_MarkCodeAsExecutedOnce(MacroAssembler* masm) {
+ // For now, as in GenerateMakeCodeYoungAgainCommon, we are relying on the fact
+ // that make_code_young doesn't do any garbage collection which allows us to
+ // save/restore the registers without worrying about which of them contain
+ // pointers.
+
+ // The following caller-saved registers must be saved and restored when
+ // calling through to the runtime:
+ // x0 - The address from which to resume execution.
+ // x1 - isolate
+ // lr - The return address for the JSFunction itself. It has not yet been
+ // preserved on the stack because the frame setup code was replaced
+ // with a call to this stub, to handle code ageing.
+ {
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ Push(x0, x1, fp, lr);
+ __ Mov(x1, Operand(ExternalReference::isolate_address(masm->isolate())));
+ __ CallCFunction(
+ ExternalReference::get_mark_code_as_executed_function(
+ masm->isolate()), 2);
+ __ Pop(lr, fp, x1, x0);
+
+ // Perform prologue operations usually performed by the young code stub.
+ __ EmitFrameSetupForCodeAgePatching(masm);
+ }
+
+ // Jump to point after the code-age stub.
+ __ Add(x0, x0, kCodeAgeSequenceSize);
+ __ Br(x0);
+}
+
+
+void Builtins::Generate_MarkCodeAsExecutedTwice(MacroAssembler* masm) {
+ GenerateMakeCodeYoungAgainCommon(masm);
+}
+
+
+static void Generate_NotifyStubFailureHelper(MacroAssembler* masm,
+ SaveFPRegsMode save_doubles) {
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ // Preserve registers across notification, this is important for compiled
+ // stubs that tail call the runtime on deopts passing their parameters in
+ // registers.
+ // TODO(jbramley): Is it correct (and appropriate) to use safepoint
+ // registers here? According to the comment above, we should only need to
+ // preserve the registers with parameters.
+ __ PushXRegList(kSafepointSavedRegisters);
+ // Pass the function and deoptimization type to the runtime system.
+ __ CallRuntime(Runtime::kNotifyStubFailure, 0, save_doubles);
+ __ PopXRegList(kSafepointSavedRegisters);
+ }
+
+ // Ignore state (pushed by Deoptimizer::EntryGenerator::Generate).
+ __ Drop(1);
+
+ // Jump to the miss handler. Deoptimizer::EntryGenerator::Generate loads this
+ // into lr before it jumps here.
+ __ Br(lr);
+}
+
+
+void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) {
+ Generate_NotifyStubFailureHelper(masm, kDontSaveFPRegs);
+}
+
+
+void Builtins::Generate_NotifyStubFailureSaveDoubles(MacroAssembler* masm) {
+ Generate_NotifyStubFailureHelper(masm, kSaveFPRegs);
+}
+
+
+static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
+ Deoptimizer::BailoutType type) {
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ // Pass the deoptimization type to the runtime system.
+ __ Mov(x0, Operand(Smi::FromInt(static_cast<int>(type))));
+ __ Push(x0);
+ __ CallRuntime(Runtime::kNotifyDeoptimized, 1);
+ }
+
+ // Get the full codegen state from the stack and untag it.
+ Register state = x6;
+ __ Peek(state, 0);
+ __ SmiUntag(state);
+
+ // Switch on the state.
+ Label with_tos_register, unknown_state;
+ __ CompareAndBranch(
+ state, FullCodeGenerator::NO_REGISTERS, ne, &with_tos_register);
+ __ Drop(1); // Remove state.
+ __ Ret();
+
+ __ Bind(&with_tos_register);
+ // Reload TOS register.
+ __ Peek(x0, kPointerSize);
+ __ CompareAndBranch(state, FullCodeGenerator::TOS_REG, ne, &unknown_state);
+ __ Drop(2); // Remove state and TOS.
+ __ Ret();
+
+ __ Bind(&unknown_state);
+ __ Abort(kInvalidFullCodegenState);
+}
+
+
+void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
+ Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::EAGER);
+}
+
+
+void Builtins::Generate_NotifyLazyDeoptimized(MacroAssembler* masm) {
+ Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::LAZY);
+}
+
+
+void Builtins::Generate_NotifySoftDeoptimized(MacroAssembler* masm) {
+ Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::SOFT);
+}
+
+
+void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
+ // Lookup the function in the JavaScript frame.
+ __ Ldr(x0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ // Pass function as argument.
+ __ Push(x0);
+ __ CallRuntime(Runtime::kCompileForOnStackReplacement, 1);
+ }
+
+ // If the code object is null, just return to the unoptimized code.
+ Label skip;
+ __ CompareAndBranch(x0, Operand(Smi::FromInt(0)), ne, &skip);
+ __ Ret();
+
+ __ Bind(&skip);
+
+ // Load deoptimization data from the code object.
+ // <deopt_data> = <code>[#deoptimization_data_offset]
+ __ Ldr(x1, MemOperand(x0, Code::kDeoptimizationDataOffset - kHeapObjectTag));
+
+ // Load the OSR entrypoint offset from the deoptimization data.
+ // <osr_offset> = <deopt_data>[#header_size + #osr_pc_offset]
+ __ Ldrsw(w1, UntagSmiFieldMemOperand(x1, FixedArray::OffsetOfElementAt(
+ DeoptimizationInputData::kOsrPcOffsetIndex)));
+
+ // Compute the target address = code_obj + header_size + osr_offset
+ // <entry_addr> = <code_obj> + #header_size + <osr_offset>
+ __ Add(x0, x0, x1);
+ __ Add(lr, x0, Code::kHeaderSize - kHeapObjectTag);
+
+ // And "return" to the OSR entry point of the function.
+ __ Ret();
+}
+
+
+void Builtins::Generate_OsrAfterStackCheck(MacroAssembler* masm) {
+ // We check the stack limit as indicator that recompilation might be done.
+ Label ok;
+ __ CompareRoot(jssp, Heap::kStackLimitRootIndex);
+ __ B(hs, &ok);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ CallRuntime(Runtime::kStackGuard, 0);
+ }
+ __ Jump(masm->isolate()->builtins()->OnStackReplacement(),
+ RelocInfo::CODE_TARGET);
+
+ __ Bind(&ok);
+ __ Ret();
+}
+
+
+void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
+ Register receiver_type = x13;
+
+ ASM_LOCATION("Builtins::Generate_FunctionCall");
+ // TODO(all/rames): Optimize and use named registers.
+ // 1. Make sure we have at least one argument.
+ // x0: actual number of arguments
+ { Label done;
+ __ Cbnz(x0, &done);
+ __ LoadRoot(x10, Heap::kUndefinedValueRootIndex);
+ __ Push(x10);
+ __ Mov(x0, 1);
+ __ Bind(&done);
+ }
+
+ // 2. Get the function to call (passed as receiver) from the stack, check
+ // if it is a function.
+ // x0: actual number of arguments
+ Label slow, non_function;
+ // TODO(jbramley): Consider giving Peek a unit_size parameter, like Claim and
+ // Drop. This usage pattern is very common.
+ __ Peek(x1, Operand(x0, LSL, kXRegSizeInBytesLog2));
+ __ JumpIfSmi(x1, &non_function);
+ __ JumpIfNotObjectType(x1, x10, receiver_type, JS_FUNCTION_TYPE, &slow);
+
+ // 3a. Patch the first argument if necessary when calling a function.
+ // x0: actual number of arguments
+ // x1: function
+ Label shift_arguments;
+ __ Mov(x4, 0); // Indicates a regular JS_FUNCTION.
+ { Label convert_to_object, use_global_receiver, patch_receiver;
+ // Change context eagerly in case we need the global receiver.
+ __ Ldr(cp, FieldMemOperand(x1, JSFunction::kContextOffset));
+
+ // Do not transform the receiver for strict mode functions.
+ __ Ldr(x10, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
+ __ Ldr(w11, FieldMemOperand(x10, SharedFunctionInfo::kCompilerHintsOffset));
+ __ Tbnz(x11, SharedFunctionInfo::kStrictModeFunction, &shift_arguments);
+
+ // TODO(all): Shoudld we insert space to avoid BTAC collisions?
+ // Do not transform the receiver for native (Compilerhints already in x3).
+ __ Tbnz(x11, SharedFunctionInfo::kNative, &shift_arguments);
+
+ // Compute the receiver in non-strict mode.
+ __ Sub(x10, x0, 1);
+ __ Peek(x2, Operand(x10, LSL, kXRegSizeInBytesLog2));
+ // x0: actual number of arguments
+ // x1: function
+ // x2: first argument
+ __ JumpIfSmi(x2, &convert_to_object);
+
+ // TODO(all): We could potentially work to optimize loads of root values.
+ // TODO(all): If the indexes are successive we can use 'ldp'.
+ __ JumpIfRoot(x2, Heap::kUndefinedValueRootIndex, &use_global_receiver);
+ __ JumpIfRoot(x2, Heap::kNullValueRootIndex, &use_global_receiver);
+
+ STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
+ __ JumpIfObjectType(x2, x10, x11, FIRST_SPEC_OBJECT_TYPE, &shift_arguments,
+ ge);
+
+ __ Bind(&convert_to_object);
+
+ {
+ // Enter an internal frame in order to preserve argument count.
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ SmiTag(x0);
+
+ __ Push(x0, x2);
+ __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+ __ Mov(x2, x0);
+
+ __ Pop(x0);
+ __ SmiUntag(x0);
+
+ // Exit the internal frame.
+ }
+
+ // Restore the function to x1, and the flag to x4.
+ __ Peek(x1, Operand(x0, LSL, kXRegSizeInBytesLog2));
+ __ Mov(x4, 0);
+ __ B(&patch_receiver);
+
+ __ Bind(&use_global_receiver);
+ __ Ldr(x2, GlobalObjectMemOperand());
+ __ Ldr(x2, FieldMemOperand(x2, GlobalObject::kGlobalReceiverOffset));
+
+ __ Bind(&patch_receiver);
+ __ Sub(x10, x0, 1);
+ __ Poke(x2, Operand(x10, LSL, kXRegSizeInBytesLog2));
+
+ __ B(&shift_arguments);
+ }
+
+ // 3b. Check for function proxy.
+ __ Bind(&slow);
+ __ Mov(x4, 1); // Indicate function proxy.
+ __ Cmp(receiver_type, JS_FUNCTION_PROXY_TYPE);
+ __ B(eq, &shift_arguments);
+ __ Bind(&non_function);
+ __ Mov(x4, 2); // Indicate non-function.
+
+ // 3c. Patch the first argument when calling a non-function. The
+ // CALL_NON_FUNCTION builtin expects the non-function callee as
+ // receiver, so overwrite the first argument which will ultimately
+ // become the receiver.
+ // x0: actual number of arguments
+ // x1: function
+ // x4: call type (0: JS function, 1: function proxy, 2: non-function)
+ __ Sub(x10, x0, 1);
+ __ Poke(x1, Operand(x10, LSL, kXRegSizeInBytesLog2));
+
+ // 4. Shift arguments and return address one slot down on the stack
+ // (overwriting the original receiver). Adjust argument count to make
+ // the original first argument the new receiver.
+ // x0: actual number of arguments
+ // x1: function
+ // x4: call type (0: JS function, 1: function proxy, 2: non-function)
+ __ Bind(&shift_arguments);
+ { Label loop;
+ // Calculate the copy start address (destination). Copy end address is jssp.
+ __ Add(x11, jssp, Operand(x0, LSL, kPointerSizeLog2));
+ __ Sub(x10, x11, kPointerSize);
+
+ // TODO(all): Optimize to copy values 2 by 2?
+ __ Bind(&loop);
+ __ Ldr(x12, MemOperand(x10, -kPointerSize, PostIndex));
+ __ Str(x12, MemOperand(x11, -kPointerSize, PostIndex));
+ __ Cmp(x10, jssp);
+ __ B(ge, &loop);
+ // Adjust the actual number of arguments and remove the top element
+ // (which is a copy of the last argument).
+ __ Sub(x0, x0, 1);
+ __ Drop(1);
+ }
+
+ // 5a. Call non-function via tail call to CALL_NON_FUNCTION builtin,
+ // or a function proxy via CALL_FUNCTION_PROXY.
+ // x0: actual number of arguments
+ // x1: function
+ // x4: call type (0: JS function, 1: function proxy, 2: non-function)
+ { Label function, non_proxy;
+ __ Cbz(x4, &function);
+ // Expected number of arguments is 0 for CALL_NON_FUNCTION.
+ __ Mov(x2, 0);
+ __ Cmp(x4, 1);
+ __ B(ne, &non_proxy);
+
+ __ Push(x1); // Re-add proxy object as additional argument.
+ __ Add(x0, x0, 1);
+ __ GetBuiltinFunction(x1, Builtins::CALL_FUNCTION_PROXY);
+ __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
+ RelocInfo::CODE_TARGET);
+
+ __ Bind(&non_proxy);
+ __ GetBuiltinFunction(x1, Builtins::CALL_NON_FUNCTION);
+ __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
+ RelocInfo::CODE_TARGET);
+ __ Bind(&function);
+ }
+
+ // 5b. Get the code to call from the function and check that the number of
+ // expected arguments matches what we're providing. If so, jump
+ // (tail-call) to the code in register edx without checking arguments.
+ // x0: actual number of arguments
+ // x1: function
+ __ Ldr(x3, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
+ __ Ldrsw(x2,
+ FieldMemOperand(x3,
+ SharedFunctionInfo::kFormalParameterCountOffset));
+ Label dont_adapt_args;
+ __ Cmp(x2, x0); // Check formal and actual parameter counts.
+ __ B(eq, &dont_adapt_args);
+ __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
+ RelocInfo::CODE_TARGET);
+ __ Bind(&dont_adapt_args);
+
+ __ Ldr(x3, FieldMemOperand(x1, JSFunction::kCodeEntryOffset));
+ ParameterCount expected(0);
+ __ InvokeCode(x3, expected, expected, JUMP_FUNCTION, NullCallWrapper());
+}
+
+
+void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
+ ASM_LOCATION("Builtins::Generate_FunctionApply");
+ const int kIndexOffset =
+ StandardFrameConstants::kExpressionsOffset - (2 * kPointerSize);
+ const int kLimitOffset =
+ StandardFrameConstants::kExpressionsOffset - (1 * kPointerSize);
+ const int kArgsOffset = 2 * kPointerSize;
+ const int kReceiverOffset = 3 * kPointerSize;
+ const int kFunctionOffset = 4 * kPointerSize;
+
+ {
+ FrameScope frame_scope(masm, StackFrame::INTERNAL);
+
+ Register args = x12;
+ Register receiver = x14;
+ Register function = x15;
+
+ // Get the length of the arguments via a builtin call.
+ __ Ldr(function, MemOperand(fp, kFunctionOffset));
+ __ Ldr(args, MemOperand(fp, kArgsOffset));
+ __ Push(function, args);
+ __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION);
+ Register argc = x0;
+
+ // Check the stack for overflow.
+ // We are not trying to catch interruptions (e.g. debug break and
+ // preemption) here, so the "real stack limit" is checked.
+ Label enough_stack_space;
+ __ LoadRoot(x10, Heap::kRealStackLimitRootIndex);
+ __ Ldr(function, MemOperand(fp, kFunctionOffset));
+ // Make x10 the space we have left. The stack might already be overflowed
+ // here which will cause x10 to become negative.
+ // TODO(jbramley): Check that the stack usage here is safe.
+ __ Sub(x10, jssp, x10);
+ // Check if the arguments will overflow the stack.
+ __ Cmp(x10, Operand(argc, LSR, kSmiShift - kPointerSizeLog2));
+ __ B(gt, &enough_stack_space);
+ // There is not enough stack space, so use a builtin to throw an appropriate
+ // error.
+ __ Push(function, argc);
+ __ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_FUNCTION);
+ // We should never return from the APPLY_OVERFLOW builtin.
+ if (__ emit_debug_code()) {
+ __ Unreachable();
+ }
+
+ __ Bind(&enough_stack_space);
+ // Push current limit and index.
+ __ Mov(x1, 0); // Initial index.
+ __ Push(argc, x1);
+
+ Label push_receiver;
+ __ Ldr(receiver, MemOperand(fp, kReceiverOffset));
+
+ // Check that the function is a JS function. Otherwise it must be a proxy.
+ // When it is not the function proxy will be invoked later.
+ __ JumpIfNotObjectType(function, x10, x11, JS_FUNCTION_TYPE,
+ &push_receiver);
+
+ // Change context eagerly to get the right global object if necessary.
+ __ Ldr(cp, FieldMemOperand(function, JSFunction::kContextOffset));
+ // Load the shared function info.
+ __ Ldr(x2, FieldMemOperand(function,
+ JSFunction::kSharedFunctionInfoOffset));
+
+ // Compute and push the receiver.
+ // Do not transform the receiver for strict mode functions.
+ Label convert_receiver_to_object, use_global_receiver;
+ __ Ldr(w10, FieldMemOperand(x2, SharedFunctionInfo::kCompilerHintsOffset));
+ __ Tbnz(x10, SharedFunctionInfo::kStrictModeFunction, &push_receiver);
+ // Do not transform the receiver for native functions.
+ __ Tbnz(x10, SharedFunctionInfo::kNative, &push_receiver);
+
+ // Compute the receiver in non-strict mode.
+ __ JumpIfSmi(receiver, &convert_receiver_to_object);
+ __ JumpIfRoot(receiver, Heap::kNullValueRootIndex, &use_global_receiver);
+ __ JumpIfRoot(receiver, Heap::kUndefinedValueRootIndex,
+ &use_global_receiver);
+
+ // Check if the receiver is already a JavaScript object.
+ STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
+ __ JumpIfObjectType(receiver, x10, x11, FIRST_SPEC_OBJECT_TYPE,
+ &push_receiver, ge);
+
+ // Call a builtin to convert the receiver to a regular object.
+ __ Bind(&convert_receiver_to_object);
+ __ Push(receiver);
+ __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+ __ Mov(receiver, x0);
+ __ B(&push_receiver);
+
+ __ Bind(&use_global_receiver);
+ __ Ldr(x10, GlobalObjectMemOperand());
+ __ Ldr(receiver, FieldMemOperand(x10, GlobalObject::kGlobalReceiverOffset));
+
+ // Push the receiver
+ __ Bind(&push_receiver);
+ __ Push(receiver);
+
+ // Copy all arguments from the array to the stack.
+ Label entry, loop;
+ Register current = x0;
+ __ Ldr(current, MemOperand(fp, kIndexOffset));
+ __ B(&entry);
+
+ __ Bind(&loop);
+ // Load the current argument from the arguments array and push it.
+ // TODO(all): Couldn't we optimize this for JS arrays?
+
+ __ Ldr(x1, MemOperand(fp, kArgsOffset));
+ __ Push(x1, current);
+
+ // Call the runtime to access the property in the arguments array.
+ __ CallRuntime(Runtime::kGetProperty, 2);
+ __ Push(x0);
+
+ // Use inline caching to access the arguments.
+ __ Ldr(current, MemOperand(fp, kIndexOffset));
+ __ Add(current, current, Operand(Smi::FromInt(1)));
+ __ Str(current, MemOperand(fp, kIndexOffset));
+
+ // Test if the copy loop has finished copying all the elements from the
+ // arguments object.
+ __ Bind(&entry);
+ __ Ldr(x1, MemOperand(fp, kLimitOffset));
+ __ Cmp(current, x1);
+ __ B(ne, &loop);
+
+ // At the end of the loop, the number of arguments is stored in 'current',
+ // represented as a smi.
+
+ function = x1; // From now on we want the function to be kept in x1;
+ __ Ldr(function, MemOperand(fp, kFunctionOffset));
+
+ // Call the function.
+ Label call_proxy;
+ ParameterCount actual(current);
+ __ SmiUntag(current);
+ __ JumpIfNotObjectType(function, x10, x11, JS_FUNCTION_TYPE, &call_proxy);
+ __ InvokeFunction(function, actual, CALL_FUNCTION, NullCallWrapper());
+ frame_scope.GenerateLeaveFrame();
+ __ Drop(3);
+ __ Ret();
+
+ // Call the function proxy.
+ __ Bind(&call_proxy);
+ // x0 : argc
+ // x1 : function
+ __ Push(function); // Add function proxy as last argument.
+ __ Add(x0, x0, 1);
+ __ Mov(x2, 0);
+ __ GetBuiltinFunction(x1, Builtins::CALL_FUNCTION_PROXY);
+ __ Call(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
+ RelocInfo::CODE_TARGET);
+ }
+ __ Drop(3);
+ __ Ret();
+}
+
+
+static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
+ __ SmiTag(x10, x0);
+ __ Mov(x11, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ Push(lr, fp);
+ __ Push(x11, x1, x10);
+ __ Add(fp, jssp,
+ StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize);
+}
+
+
+static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- x0 : result being passed through
+ // -----------------------------------
+ // Get the number of arguments passed (as a smi), tear down the frame and
+ // then drop the parameters and the receiver.
+ __ Ldr(x10, MemOperand(fp, -(StandardFrameConstants::kFixedFrameSizeFromFp +
+ kPointerSize)));
+ __ Mov(jssp, fp);
+ __ Pop(fp, lr);
+ __ DropBySMI(x10, kXRegSizeInBytes);
+ __ Drop(1);
+}
+
+
+void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
+ ASM_LOCATION("Builtins::Generate_ArgumentsAdaptorTrampoline");
+ // ----------- S t a t e -------------
+ // -- x0 : actual number of arguments
+ // -- x1 : function (passed through to callee)
+ // -- x2 : expected number of arguments
+ // -----------------------------------
+
+ Label invoke, dont_adapt_arguments;
+
+ Label enough, too_few;
+ __ Ldr(x3, FieldMemOperand(x1, JSFunction::kCodeEntryOffset));
+ __ Cmp(x0, x2);
+ __ B(lt, &too_few);
+ __ Cmp(x2, SharedFunctionInfo::kDontAdaptArgumentsSentinel);
+ __ B(eq, &dont_adapt_arguments);
+
+ { // Enough parameters: actual >= expected
+ EnterArgumentsAdaptorFrame(masm);
+
+ // Calculate copy start address into x10 and end address into x11.
+ // x0: actual number of arguments
+ // x1: function
+ // x2: expected number of arguments
+ // x3: code entry to call
+ __ Add(x10, fp, Operand(x0, LSL, kPointerSizeLog2));
+ // Adjust for return address and receiver
+ __ Add(x10, x10, 2 * kPointerSize);
+ __ Sub(x11, x10, Operand(x2, LSL, kPointerSizeLog2));
+
+ // Copy the arguments (including the receiver) to the new stack frame.
+ // x0: actual number of arguments
+ // x1: function
+ // x2: expected number of arguments
+ // x3: code entry to call
+ // x10: copy start address
+ // x11: copy end address
+
+ // TODO(all): Should we push values 2 by 2?
+ Label copy;
+ __ Bind(©);
+ __ Cmp(x10, x11);
+ __ Ldr(x12, MemOperand(x10, -kPointerSize, PostIndex));
+ __ Push(x12);
+ __ B(gt, ©);
+
+ __ B(&invoke);
+ }
+
+ { // Too few parameters: Actual < expected
+ __ Bind(&too_few);
+ EnterArgumentsAdaptorFrame(masm);
+
+ // Calculate copy start address into x10 and copy end address into x11.
+ // x0: actual number of arguments
+ // x1: function
+ // x2: expected number of arguments
+ // x3: code entry to call
+ // Adjust for return address.
+ __ Add(x11, fp, 1 * kPointerSize);
+ __ Add(x10, x11, Operand(x0, LSL, kPointerSizeLog2));
+ __ Add(x10, x10, 1 * kPointerSize);
+
+ // Copy the arguments (including the receiver) to the new stack frame.
+ // x0: actual number of arguments
+ // x1: function
+ // x2: expected number of arguments
+ // x3: code entry to call
+ // x10: copy start address
+ // x11: copy end address
+ Label copy;
+ __ Bind(©);
+ __ Ldr(x12, MemOperand(x10, -kPointerSize, PostIndex));
+ __ Push(x12);
+ __ Cmp(x10, x11); // Compare before moving to next argument.
+ __ B(ne, ©);
+
+ // Fill the remaining expected arguments with undefined.
+ // x0: actual number of arguments
+ // x1: function
+ // x2: expected number of arguments
+ // x3: code entry to call
+ __ LoadRoot(x10, Heap::kUndefinedValueRootIndex);
+ __ Sub(x11, fp, Operand(x2, LSL, kPointerSizeLog2));
+ // Adjust for the arguments adaptor frame and already pushed receiver.
+ __ Sub(x11, x11,
+ StandardFrameConstants::kFixedFrameSizeFromFp + (2 * kPointerSize));
+
+ // TODO(all): Optimize this to use ldp?
+ Label fill;
+ __ Bind(&fill);
+ __ Push(x10);
+ __ Cmp(jssp, x11);
+ __ B(ne, &fill);
+ }
+
+ // Arguments have been adapted. Now call the entry point.
+ __ Bind(&invoke);
+ __ Call(x3);
+
+ // Store offset of return address for deoptimizer.
+ masm->isolate()->heap()->SetArgumentsAdaptorDeoptPCOffset(masm->pc_offset());
+
+ // Exit frame and return.
+ LeaveArgumentsAdaptorFrame(masm);
+ __ Ret();
+
+ // Call the entry point without adapting the arguments.
+ __ Bind(&dont_adapt_arguments);
+ __ Jump(x3);
+}
+
+
+#undef __
+
+} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_ARM
--- /dev/null
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#if V8_TARGET_ARCH_A64
+
+#include "bootstrapper.h"
+#include "code-stubs.h"
+#include "regexp-macro-assembler.h"
+#include "stub-cache.h"
+
+namespace v8 {
+namespace internal {
+
+
+void FastNewClosureStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ // x2: function info
+ static Register registers[] = { x2 };
+ descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ Runtime::FunctionForId(Runtime::kNewClosureFromStubFailure)->entry;
+}
+
+
+void FastNewContextStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ // x1: function
+ static Register registers[] = { x1 };
+ descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ = NULL;
+}
+
+
+void ToNumberStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ // x0: value
+ static Register registers[] = { x0 };
+ descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ = NULL;
+}
+
+
+void NumberToStringStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ // x0: value
+ static Register registers[] = { x0 };
+ descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ Runtime::FunctionForId(Runtime::kNumberToString)->entry;
+}
+
+
+void FastCloneShallowArrayStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ // x3: array literals array
+ // x2: array literal index
+ // x1: constant elements
+ static Register registers[] = { x3, x2, x1 };
+ descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ Runtime::FunctionForId(Runtime::kCreateArrayLiteralStubBailout)->entry;
+}
+
+
+void FastCloneShallowObjectStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ // x3: object literals array
+ // x2: object literal index
+ // x1: constant properties
+ // x0: object literal flags
+ static Register registers[] = { x3, x2, x1, x0 };
+ descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ Runtime::FunctionForId(Runtime::kCreateObjectLiteral)->entry;
+}
+
+
+void CreateAllocationSiteStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ // x2: feedback vector
+ // x3: call feedback slot
+ static Register registers[] = { x2, x3 };
+ descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ = NULL;
+}
+
+
+void KeyedLoadFastElementStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ // x1: receiver
+ // x0: key
+ static Register registers[] = { x1, x0 };
+ descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ FUNCTION_ADDR(KeyedLoadIC_MissFromStubFailure);
+}
+
+
+void KeyedLoadDictionaryElementStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ // x1: receiver
+ // x0: key
+ static Register registers[] = { x1, x0 };
+ descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ FUNCTION_ADDR(KeyedLoadIC_MissFromStubFailure);
+}
+
+
+void RegExpConstructResultStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ // x2: length
+ // x1: index (of last match)
+ // x0: string
+ static Register registers[] = { x2, x1, x0 };
+ descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ Runtime::FunctionForId(Runtime::kRegExpConstructResult)->entry;
+}
+
+
+void LoadFieldStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ // x0: receiver
+ static Register registers[] = { x0 };
+ descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ = NULL;
+}
+
+
+void KeyedLoadFieldStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ // x1: receiver
+ static Register registers[] = { x1 };
+ descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ = NULL;
+}
+
+
+void KeyedStoreFastElementStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ // x2: receiver
+ // x1: key
+ // x0: value
+ static Register registers[] = { x2, x1, x0 };
+ descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ FUNCTION_ADDR(KeyedStoreIC_MissFromStubFailure);
+}
+
+
+void TransitionElementsKindStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ // x0: value (js_array)
+ // x1: to_map
+ static Register registers[] = { x0, x1 };
+ descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
+ descriptor->register_params_ = registers;
+ Address entry =
+ Runtime::FunctionForId(Runtime::kTransitionElementsKind)->entry;
+ descriptor->deoptimization_handler_ = FUNCTION_ADDR(entry);
+}
+
+
+void CompareNilICStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ // x0: value to compare
+ static Register registers[] = { x0 };
+ descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ FUNCTION_ADDR(CompareNilIC_Miss);
+ descriptor->SetMissHandler(
+ ExternalReference(IC_Utility(IC::kCompareNilIC_Miss), isolate));
+}
+
+
+static void InitializeArrayConstructorDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor,
+ int constant_stack_parameter_count) {
+ // x1: function
+ // x2: allocation site with elements kind
+ // x0: number of arguments to the constructor function
+ static Register registers_variable_args[] = { x1, x2, x0 };
+ static Register registers_no_args[] = { x1, x2 };
+
+ if (constant_stack_parameter_count == 0) {
+ descriptor->register_param_count_ =
+ sizeof(registers_no_args) / sizeof(registers_no_args[0]);
+ descriptor->register_params_ = registers_no_args;
+ } else {
+ // stack param count needs (constructor pointer, and single argument)
+ descriptor->handler_arguments_mode_ = PASS_ARGUMENTS;
+ descriptor->stack_parameter_count_ = x0;
+ descriptor->register_param_count_ =
+ sizeof(registers_variable_args) / sizeof(registers_variable_args[0]);
+ descriptor->register_params_ = registers_variable_args;
+ }
+
+ descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count;
+ descriptor->function_mode_ = JS_FUNCTION_STUB_MODE;
+ descriptor->deoptimization_handler_ =
+ Runtime::FunctionForId(Runtime::kArrayConstructor)->entry;
+}
+
+
+void ArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ InitializeArrayConstructorDescriptor(isolate, descriptor, 0);
+}
+
+
+void ArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ InitializeArrayConstructorDescriptor(isolate, descriptor, 1);
+}
+
+
+void ArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ InitializeArrayConstructorDescriptor(isolate, descriptor, -1);
+}
+
+
+static void InitializeInternalArrayConstructorDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor,
+ int constant_stack_parameter_count) {
+ // x1: constructor function
+ // x0: number of arguments to the constructor function
+ static Register registers_variable_args[] = { x1, x0 };
+ static Register registers_no_args[] = { x1 };
+
+ if (constant_stack_parameter_count == 0) {
+ descriptor->register_param_count_ =
+ sizeof(registers_no_args) / sizeof(registers_no_args[0]);
+ descriptor->register_params_ = registers_no_args;
+ } else {
+ // stack param count needs (constructor pointer, and single argument)
+ descriptor->handler_arguments_mode_ = PASS_ARGUMENTS;
+ descriptor->stack_parameter_count_ = x0;
+ descriptor->register_param_count_ =
+ sizeof(registers_variable_args) / sizeof(registers_variable_args[0]);
+ descriptor->register_params_ = registers_variable_args;
+ }
+
+ descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count;
+ descriptor->function_mode_ = JS_FUNCTION_STUB_MODE;
+ descriptor->deoptimization_handler_ =
+ Runtime::FunctionForId(Runtime::kInternalArrayConstructor)->entry;
+}
+
+
+void InternalArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ InitializeInternalArrayConstructorDescriptor(isolate, descriptor, 0);
+}
+
+
+void InternalArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ InitializeInternalArrayConstructorDescriptor(isolate, descriptor, 1);
+}
+
+
+void InternalArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ InitializeInternalArrayConstructorDescriptor(isolate, descriptor, -1);
+}
+
+
+void ToBooleanStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ // x0: value
+ static Register registers[] = { x0 };
+ descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ = FUNCTION_ADDR(ToBooleanIC_Miss);
+ descriptor->SetMissHandler(
+ ExternalReference(IC_Utility(IC::kToBooleanIC_Miss), isolate));
+}
+
+
+void StoreGlobalStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ // x1: receiver
+ // x2: key (unused)
+ // x0: value
+ static Register registers[] = { x1, x2, x0 };
+ descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ FUNCTION_ADDR(StoreIC_MissFromStubFailure);
+}
+
+
+void ElementsTransitionAndStoreStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ // x0: value
+ // x3: target map
+ // x1: key
+ // x2: receiver
+ static Register registers[] = { x0, x3, x1, x2 };
+ descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ FUNCTION_ADDR(ElementsTransitionAndStoreIC_Miss);
+}
+
+
+void BinaryOpICStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ // x1: left operand
+ // x0: right operand
+ static Register registers[] = { x1, x0 };
+ descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ = FUNCTION_ADDR(BinaryOpIC_Miss);
+ descriptor->SetMissHandler(
+ ExternalReference(IC_Utility(IC::kBinaryOpIC_Miss), isolate));
+}
+
+
+void BinaryOpWithAllocationSiteStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ // x2: allocation site
+ // x1: left operand
+ // x0: right operand
+ static Register registers[] = { x2, x1, x0 };
+ descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ FUNCTION_ADDR(BinaryOpIC_MissWithAllocationSite);
+}
+
+
+void StringAddStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ // x1: left operand
+ // x0: right operand
+ static Register registers[] = { x1, x0 };
+ descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ Runtime::FunctionForId(Runtime::kStringAdd)->entry;
+}
+
+
+void CallDescriptors::InitializeForIsolate(Isolate* isolate) {
+ static PlatformCallInterfaceDescriptor default_descriptor =
+ PlatformCallInterfaceDescriptor(CAN_INLINE_TARGET_ADDRESS);
+
+ static PlatformCallInterfaceDescriptor noInlineDescriptor =
+ PlatformCallInterfaceDescriptor(NEVER_INLINE_TARGET_ADDRESS);
+
+ {
+ CallInterfaceDescriptor* descriptor =
+ isolate->call_descriptor(Isolate::ArgumentAdaptorCall);
+ static Register registers[] = { x1, // JSFunction
+ cp, // context
+ x0, // actual number of arguments
+ x2, // expected number of arguments
+ };
+ static Representation representations[] = {
+ Representation::Tagged(), // JSFunction
+ Representation::Tagged(), // context
+ Representation::Integer32(), // actual number of arguments
+ Representation::Integer32(), // expected number of arguments
+ };
+ descriptor->register_param_count_ = 4;
+ descriptor->register_params_ = registers;
+ descriptor->param_representations_ = representations;
+ descriptor->platform_specific_descriptor_ = &default_descriptor;
+ }
+ {
+ CallInterfaceDescriptor* descriptor =
+ isolate->call_descriptor(Isolate::KeyedCall);
+ static Register registers[] = { cp, // context
+ x2, // key
+ };
+ static Representation representations[] = {
+ Representation::Tagged(), // context
+ Representation::Tagged(), // key
+ };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->param_representations_ = representations;
+ descriptor->platform_specific_descriptor_ = &noInlineDescriptor;
+ }
+ {
+ CallInterfaceDescriptor* descriptor =
+ isolate->call_descriptor(Isolate::NamedCall);
+ static Register registers[] = { cp, // context
+ x2, // name
+ };
+ static Representation representations[] = {
+ Representation::Tagged(), // context
+ Representation::Tagged(), // name
+ };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->param_representations_ = representations;
+ descriptor->platform_specific_descriptor_ = &noInlineDescriptor;
+ }
+ {
+ CallInterfaceDescriptor* descriptor =
+ isolate->call_descriptor(Isolate::CallHandler);
+ static Register registers[] = { cp, // context
+ x0, // receiver
+ };
+ static Representation representations[] = {
+ Representation::Tagged(), // context
+ Representation::Tagged(), // receiver
+ };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->param_representations_ = representations;
+ descriptor->platform_specific_descriptor_ = &default_descriptor;
+ }
+ {
+ CallInterfaceDescriptor* descriptor =
+ isolate->call_descriptor(Isolate::ApiFunctionCall);
+ static Register registers[] = { x0, // callee
+ x4, // call_data
+ x2, // holder
+ x1, // api_function_address
+ cp, // context
+ };
+ static Representation representations[] = {
+ Representation::Tagged(), // callee
+ Representation::Tagged(), // call_data
+ Representation::Tagged(), // holder
+ Representation::External(), // api_function_address
+ Representation::Tagged(), // context
+ };
+ descriptor->register_param_count_ = 5;
+ descriptor->register_params_ = registers;
+ descriptor->param_representations_ = representations;
+ descriptor->platform_specific_descriptor_ = &default_descriptor;
+ }
+}
+
+
+#define __ ACCESS_MASM(masm)
+
+
+void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm) {
+ // Update the static counter each time a new code stub is generated.
+ Isolate* isolate = masm->isolate();
+ isolate->counters()->code_stubs()->Increment();
+
+ CodeStubInterfaceDescriptor* descriptor = GetInterfaceDescriptor(isolate);
+ int param_count = descriptor->register_param_count_;
+ {
+ // Call the runtime system in a fresh internal frame.
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ ASSERT((descriptor->register_param_count_ == 0) ||
+ x0.Is(descriptor->register_params_[param_count - 1]));
+ // Push arguments
+ // TODO(jbramley): Try to push these in blocks.
+ for (int i = 0; i < param_count; ++i) {
+ __ Push(descriptor->register_params_[i]);
+ }
+ ExternalReference miss = descriptor->miss_handler();
+ __ CallExternalReference(miss, descriptor->register_param_count_);
+ }
+
+ __ Ret();
+}
+
+
+// See call site for description.
+static void EmitIdenticalObjectComparison(MacroAssembler* masm,
+ Register left,
+ Register right,
+ Register scratch,
+ FPRegister double_scratch,
+ Label* slow,
+ Condition cond) {
+ ASSERT(!AreAliased(left, right, scratch));
+ Label not_identical, return_equal, heap_number;
+ Register result = x0;
+
+ __ Cmp(right, left);
+ __ B(ne, ¬_identical);
+
+ // Test for NaN. Sadly, we can't just compare to factory::nan_value(),
+ // so we do the second best thing - test it ourselves.
+ // They are both equal and they are not both Smis so both of them are not
+ // Smis. If it's not a heap number, then return equal.
+ if ((cond == lt) || (cond == gt)) {
+ __ JumpIfObjectType(right, scratch, scratch, FIRST_SPEC_OBJECT_TYPE, slow,
+ ge);
+ } else {
+ Register right_type = scratch;
+ __ JumpIfObjectType(right, right_type, right_type, HEAP_NUMBER_TYPE,
+ &heap_number);
+ // Comparing JS objects with <=, >= is complicated.
+ if (cond != eq) {
+ __ Cmp(right_type, FIRST_SPEC_OBJECT_TYPE);
+ __ B(ge, slow);
+ // Normally here we fall through to return_equal, but undefined is
+ // special: (undefined == undefined) == true, but
+ // (undefined <= undefined) == false! See ECMAScript 11.8.5.
+ if ((cond == le) || (cond == ge)) {
+ __ Cmp(right_type, ODDBALL_TYPE);
+ __ B(ne, &return_equal);
+ __ JumpIfNotRoot(right, Heap::kUndefinedValueRootIndex, &return_equal);
+ if (cond == le) {
+ // undefined <= undefined should fail.
+ __ Mov(result, GREATER);
+ } else {
+ // undefined >= undefined should fail.
+ __ Mov(result, LESS);
+ }
+ __ Ret();
+ }
+ }
+ }
+
+ __ Bind(&return_equal);
+ if (cond == lt) {
+ __ Mov(result, GREATER); // Things aren't less than themselves.
+ } else if (cond == gt) {
+ __ Mov(result, LESS); // Things aren't greater than themselves.
+ } else {
+ __ Mov(result, EQUAL); // Things are <=, >=, ==, === themselves.
+ }
+ __ Ret();
+
+ // Cases lt and gt have been handled earlier, and case ne is never seen, as
+ // it is handled in the parser (see Parser::ParseBinaryExpression). We are
+ // only concerned with cases ge, le and eq here.
+ if ((cond != lt) && (cond != gt)) {
+ ASSERT((cond == ge) || (cond == le) || (cond == eq));
+ __ Bind(&heap_number);
+ // Left and right are identical pointers to a heap number object. Return
+ // non-equal if the heap number is a NaN, and equal otherwise. Comparing
+ // the number to itself will set the overflow flag iff the number is NaN.
+ __ Ldr(double_scratch, FieldMemOperand(right, HeapNumber::kValueOffset));
+ __ Fcmp(double_scratch, double_scratch);
+ __ B(vc, &return_equal); // Not NaN, so treat as normal heap number.
+
+ if (cond == le) {
+ __ Mov(result, GREATER);
+ } else {
+ __ Mov(result, LESS);
+ }
+ __ Ret();
+ }
+
+ // No fall through here.
+ if (FLAG_debug_code) {
+ __ Unreachable();
+ }
+
+ __ Bind(¬_identical);
+}
+
+
+// See call site for description.
+static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
+ Register left,
+ Register right,
+ Register left_type,
+ Register right_type,
+ Register scratch) {
+ ASSERT(!AreAliased(left, right, left_type, right_type, scratch));
+
+ if (masm->emit_debug_code()) {
+ // We assume that the arguments are not identical.
+ __ Cmp(left, right);
+ __ Assert(ne, kExpectedNonIdenticalObjects);
+ }
+
+ // If either operand is a JS object or an oddball value, then they are not
+ // equal since their pointers are different.
+ // There is no test for undetectability in strict equality.
+ STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE);
+ Label right_non_object;
+
+ __ Cmp(right_type, FIRST_SPEC_OBJECT_TYPE);
+ __ B(lt, &right_non_object);
+
+ // Return non-zero - x0 already contains a non-zero pointer.
+ ASSERT(left.is(x0) || right.is(x0));
+ Label return_not_equal;
+ __ Bind(&return_not_equal);
+ __ Ret();
+
+ __ Bind(&right_non_object);
+
+ // Check for oddballs: true, false, null, undefined.
+ __ Cmp(right_type, ODDBALL_TYPE);
+
+ // If right is not ODDBALL, test left. Otherwise, set eq condition.
+ __ Ccmp(left_type, ODDBALL_TYPE, ZFlag, ne);
+
+ // If right or left is not ODDBALL, test left >= FIRST_SPEC_OBJECT_TYPE.
+ // Otherwise, right or left is ODDBALL, so set a ge condition.
+ __ Ccmp(left_type, FIRST_SPEC_OBJECT_TYPE, NVFlag, ne);
+
+ __ B(ge, &return_not_equal);
+
+ // Internalized strings are unique, so they can only be equal if they are the
+ // same object. We have already tested that case, so if left and right are
+ // both internalized strings, they cannot be equal.
+ STATIC_ASSERT((kInternalizedTag == 0) && (kStringTag == 0));
+ __ Orr(scratch, left_type, right_type);
+ __ TestAndBranchIfAllClear(
+ scratch, kIsNotStringMask | kIsNotInternalizedMask, &return_not_equal);
+}
+
+
+// See call site for description.
+static void EmitSmiNonsmiComparison(MacroAssembler* masm,
+ Register left,
+ Register right,
+ FPRegister left_d,
+ FPRegister right_d,
+ Register scratch,
+ Label* slow,
+ bool strict) {
+ ASSERT(!AreAliased(left, right, scratch));
+ ASSERT(!AreAliased(left_d, right_d));
+ ASSERT((left.is(x0) && right.is(x1)) ||
+ (right.is(x0) && left.is(x1)));
+ Register result = x0;
+
+ Label right_is_smi, done;
+ __ JumpIfSmi(right, &right_is_smi);
+
+ // Left is the smi. Check whether right is a heap number.
+ if (strict) {
+ // If right is not a number and left is a smi, then strict equality cannot
+ // succeed. Return non-equal.
+ Label is_heap_number;
+ __ JumpIfObjectType(right, scratch, scratch, HEAP_NUMBER_TYPE,
+ &is_heap_number);
+ // Register right is a non-zero pointer, which is a valid NOT_EQUAL result.
+ if (!right.is(result)) {
+ __ Mov(result, NOT_EQUAL);
+ }
+ __ Ret();
+ __ Bind(&is_heap_number);
+ } else {
+ // Smi compared non-strictly with a non-smi, non-heap-number. Call the
+ // runtime.
+ __ JumpIfNotObjectType(right, scratch, scratch, HEAP_NUMBER_TYPE, slow);
+ }
+
+ // Left is the smi. Right is a heap number. Load right value into right_d, and
+ // convert left smi into double in left_d.
+ __ Ldr(right_d, FieldMemOperand(right, HeapNumber::kValueOffset));
+ __ SmiUntagToDouble(left_d, left);
+ __ B(&done);
+
+ __ Bind(&right_is_smi);
+ // Right is a smi. Check whether the non-smi left is a heap number.
+ if (strict) {
+ // If left is not a number and right is a smi then strict equality cannot
+ // succeed. Return non-equal.
+ Label is_heap_number;
+ __ JumpIfObjectType(left, scratch, scratch, HEAP_NUMBER_TYPE,
+ &is_heap_number);
+ // Register left is a non-zero pointer, which is a valid NOT_EQUAL result.
+ if (!left.is(result)) {
+ __ Mov(result, NOT_EQUAL);
+ }
+ __ Ret();
+ __ Bind(&is_heap_number);
+ } else {
+ // Smi compared non-strictly with a non-smi, non-heap-number. Call the
+ // runtime.
+ __ JumpIfNotObjectType(left, scratch, scratch, HEAP_NUMBER_TYPE, slow);
+ }
+
+ // Right is the smi. Left is a heap number. Load left value into left_d, and
+ // convert right smi into double in right_d.
+ __ Ldr(left_d, FieldMemOperand(left, HeapNumber::kValueOffset));
+ __ SmiUntagToDouble(right_d, right);
+
+ // Fall through to both_loaded_as_doubles.
+ __ Bind(&done);
+}
+
+
+// Fast negative check for internalized-to-internalized equality.
+// See call site for description.
+static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm,
+ Register left,
+ Register right,
+ Register left_map,
+ Register right_map,
+ Register left_type,
+ Register right_type,
+ Label* possible_strings,
+ Label* not_both_strings) {
+ ASSERT(!AreAliased(left, right, left_map, right_map, left_type, right_type));
+ Register result = x0;
+
+ Label object_test;
+ STATIC_ASSERT((kInternalizedTag == 0) && (kStringTag == 0));
+ // TODO(all): reexamine this branch sequence for optimisation wrt branch
+ // prediction.
+ __ Tbnz(right_type, MaskToBit(kIsNotStringMask), &object_test);
+ __ Tbnz(right_type, MaskToBit(kIsNotInternalizedMask), possible_strings);
+ __ Tbnz(left_type, MaskToBit(kIsNotStringMask), not_both_strings);
+ __ Tbnz(left_type, MaskToBit(kIsNotInternalizedMask), possible_strings);
+
+ // Both are internalized. We already checked that they weren't the same
+ // pointer, so they are not equal.
+ __ Mov(result, NOT_EQUAL);
+ __ Ret();
+
+ __ Bind(&object_test);
+
+ __ Cmp(right_type, FIRST_SPEC_OBJECT_TYPE);
+
+ // If right >= FIRST_SPEC_OBJECT_TYPE, test left.
+ // Otherwise, right < FIRST_SPEC_OBJECT_TYPE, so set lt condition.
+ __ Ccmp(left_type, FIRST_SPEC_OBJECT_TYPE, NFlag, ge);
+
+ __ B(lt, not_both_strings);
+
+ // If both objects are undetectable, they are equal. Otherwise, they are not
+ // equal, since they are different objects and an object is not equal to
+ // undefined.
+
+ // Returning here, so we can corrupt right_type and left_type.
+ Register right_bitfield = right_type;
+ Register left_bitfield = left_type;
+ __ Ldrb(right_bitfield, FieldMemOperand(right_map, Map::kBitFieldOffset));
+ __ Ldrb(left_bitfield, FieldMemOperand(left_map, Map::kBitFieldOffset));
+ __ And(result, right_bitfield, left_bitfield);
+ __ And(result, result, 1 << Map::kIsUndetectable);
+ __ Eor(result, result, 1 << Map::kIsUndetectable);
+ __ Ret();
+}
+
+
+static void ICCompareStub_CheckInputType(MacroAssembler* masm,
+ Register input,
+ Register scratch,
+ CompareIC::State expected,
+ Label* fail) {
+ Label ok;
+ if (expected == CompareIC::SMI) {
+ __ JumpIfNotSmi(input, fail);
+ } else if (expected == CompareIC::NUMBER) {
+ __ JumpIfSmi(input, &ok);
+ __ CheckMap(input, scratch, Heap::kHeapNumberMapRootIndex, fail,
+ DONT_DO_SMI_CHECK);
+ }
+ // We could be strict about internalized/non-internalized here, but as long as
+ // hydrogen doesn't care, the stub doesn't have to care either.
+ __ Bind(&ok);
+}
+
+
+void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
+ Register lhs = x1;
+ Register rhs = x0;
+ Register result = x0;
+ Condition cond = GetCondition();
+
+ Label miss;
+ ICCompareStub_CheckInputType(masm, lhs, x2, left_, &miss);
+ ICCompareStub_CheckInputType(masm, rhs, x3, right_, &miss);
+
+ Label slow; // Call builtin.
+ Label not_smis, both_loaded_as_doubles;
+ Label not_two_smis, smi_done;
+ __ JumpIfEitherNotSmi(lhs, rhs, ¬_two_smis);
+ __ SmiUntag(lhs);
+ __ Sub(result, lhs, Operand::UntagSmi(rhs));
+ __ Ret();
+
+ __ Bind(¬_two_smis);
+
+ // NOTICE! This code is only reached after a smi-fast-case check, so it is
+ // certain that at least one operand isn't a smi.
+
+ // Handle the case where the objects are identical. Either returns the answer
+ // or goes to slow. Only falls through if the objects were not identical.
+ EmitIdenticalObjectComparison(masm, lhs, rhs, x10, d0, &slow, cond);
+
+ // If either is a smi (we know that at least one is not a smi), then they can
+ // only be strictly equal if the other is a HeapNumber.
+ __ JumpIfBothNotSmi(lhs, rhs, ¬_smis);
+
+ // Exactly one operand is a smi. EmitSmiNonsmiComparison generates code that
+ // can:
+ // 1) Return the answer.
+ // 2) Branch to the slow case.
+ // 3) Fall through to both_loaded_as_doubles.
+ // In case 3, we have found out that we were dealing with a number-number
+ // comparison. The double values of the numbers have been loaded, right into
+ // rhs_d, left into lhs_d.
+ FPRegister rhs_d = d0;
+ FPRegister lhs_d = d1;
+ EmitSmiNonsmiComparison(masm, lhs, rhs, lhs_d, rhs_d, x10, &slow, strict());
+
+ __ Bind(&both_loaded_as_doubles);
+ // The arguments have been converted to doubles and stored in rhs_d and
+ // lhs_d.
+ Label nan;
+ __ Fcmp(lhs_d, rhs_d);
+ __ B(vs, &nan); // Overflow flag set if either is NaN.
+ STATIC_ASSERT((LESS == -1) && (EQUAL == 0) && (GREATER == 1));
+ __ Cset(result, gt); // gt => 1, otherwise (lt, eq) => 0 (EQUAL).
+ __ Csinv(result, result, xzr, ge); // lt => -1, gt => 1, eq => 0.
+ __ Ret();
+
+ __ Bind(&nan);
+ // Left and/or right is a NaN. Load the result register with whatever makes
+ // the comparison fail, since comparisons with NaN always fail (except ne,
+ // which is filtered out at a higher level.)
+ ASSERT(cond != ne);
+ if ((cond == lt) || (cond == le)) {
+ __ Mov(result, GREATER);
+ } else {
+ __ Mov(result, LESS);
+ }
+ __ Ret();
+
+ __ Bind(¬_smis);
+ // At this point we know we are dealing with two different objects, and
+ // neither of them is a smi. The objects are in rhs_ and lhs_.
+
+ // Load the maps and types of the objects.
+ Register rhs_map = x10;
+ Register rhs_type = x11;
+ Register lhs_map = x12;
+ Register lhs_type = x13;
+ __ Ldr(rhs_map, FieldMemOperand(rhs, HeapObject::kMapOffset));
+ __ Ldr(lhs_map, FieldMemOperand(lhs, HeapObject::kMapOffset));
+ __ Ldrb(rhs_type, FieldMemOperand(rhs_map, Map::kInstanceTypeOffset));
+ __ Ldrb(lhs_type, FieldMemOperand(lhs_map, Map::kInstanceTypeOffset));
+
+ if (strict()) {
+ // This emits a non-equal return sequence for some object types, or falls
+ // through if it was not lucky.
+ EmitStrictTwoHeapObjectCompare(masm, lhs, rhs, lhs_type, rhs_type, x14);
+ }
+
+ Label check_for_internalized_strings;
+ Label flat_string_check;
+ // Check for heap number comparison. Branch to earlier double comparison code
+ // if they are heap numbers, otherwise, branch to internalized string check.
+ __ Cmp(rhs_type, HEAP_NUMBER_TYPE);
+ __ B(ne, &check_for_internalized_strings);
+ __ Cmp(lhs_map, rhs_map);
+
+ // If maps aren't equal, lhs_ and rhs_ are not heap numbers. Branch to flat
+ // string check.
+ __ B(ne, &flat_string_check);
+
+ // Both lhs_ and rhs_ are heap numbers. Load them and branch to the double
+ // comparison code.
+ __ Ldr(lhs_d, FieldMemOperand(lhs, HeapNumber::kValueOffset));
+ __ Ldr(rhs_d, FieldMemOperand(rhs, HeapNumber::kValueOffset));
+ __ B(&both_loaded_as_doubles);
+
+ __ Bind(&check_for_internalized_strings);
+ // In the strict case, the EmitStrictTwoHeapObjectCompare already took care
+ // of internalized strings.
+ if ((cond == eq) && !strict()) {
+ // Returns an answer for two internalized strings or two detectable objects.
+ // Otherwise branches to the string case or not both strings case.
+ EmitCheckForInternalizedStringsOrObjects(masm, lhs, rhs, lhs_map, rhs_map,
+ lhs_type, rhs_type,
+ &flat_string_check, &slow);
+ }
+
+ // Check for both being sequential ASCII strings, and inline if that is the
+ // case.
+ __ Bind(&flat_string_check);
+ __ JumpIfBothInstanceTypesAreNotSequentialAscii(lhs_type, rhs_type, x14,
+ x15, &slow);
+
+ Isolate* isolate = masm->isolate();
+ __ IncrementCounter(isolate->counters()->string_compare_native(), 1, x10,
+ x11);
+ if (cond == eq) {
+ StringCompareStub::GenerateFlatAsciiStringEquals(masm, lhs, rhs,
+ x10, x11, x12);
+ } else {
+ StringCompareStub::GenerateCompareFlatAsciiStrings(masm, lhs, rhs,
+ x10, x11, x12, x13);
+ }
+
+ // Never fall through to here.
+ if (FLAG_debug_code) {
+ __ Unreachable();
+ }
+
+ __ Bind(&slow);
+
+ __ Push(lhs, rhs);
+ // Figure out which native to call and setup the arguments.
+ Builtins::JavaScript native;
+ if (cond == eq) {
+ native = strict() ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
+ } else {
+ native = Builtins::COMPARE;
+ int ncr; // NaN compare result
+ if ((cond == lt) || (cond == le)) {
+ ncr = GREATER;
+ } else {
+ ASSERT((cond == gt) || (cond == ge)); // remaining cases
+ ncr = LESS;
+ }
+ __ Mov(x10, Operand(Smi::FromInt(ncr)));
+ __ Push(x10);
+ }
+
+ // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
+ // tagged as a small integer.
+ __ InvokeBuiltin(native, JUMP_FUNCTION);
+
+ __ Bind(&miss);
+ GenerateMiss(masm);
+}
+
+
+void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
+ // Preserve caller-saved registers x0-x7 and x10-x15. We don't care if x8, x9,
+ // ip0 and ip1 are corrupted by the call into C.
+ CPURegList saved_regs = kCallerSaved;
+ saved_regs.Remove(ip0);
+ saved_regs.Remove(ip1);
+ saved_regs.Remove(x8);
+ saved_regs.Remove(x9);
+
+ // We don't allow a GC during a store buffer overflow so there is no need to
+ // store the registers in any particular way, but we do have to store and
+ // restore them.
+ __ PushCPURegList(saved_regs);
+ if (save_doubles_ == kSaveFPRegs) {
+ __ PushCPURegList(kCallerSavedFP);
+ }
+
+ AllowExternalCallThatCantCauseGC scope(masm);
+ __ Mov(x0, Operand(ExternalReference::isolate_address(masm->isolate())));
+ __ CallCFunction(
+ ExternalReference::store_buffer_overflow_function(masm->isolate()),
+ 1, 0);
+
+ if (save_doubles_ == kSaveFPRegs) {
+ __ PopCPURegList(kCallerSavedFP);
+ }
+ __ PopCPURegList(saved_regs);
+ __ Ret();
+}
+
+
+void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(
+ Isolate* isolate) {
+ StoreBufferOverflowStub stub1(kDontSaveFPRegs);
+ stub1.GetCode(isolate);
+ StoreBufferOverflowStub stub2(kSaveFPRegs);
+ stub2.GetCode(isolate);
+}
+
+
+void MathPowStub::Generate(MacroAssembler* masm) {
+ // Stack on entry:
+ // jssp[0]: Exponent (as a tagged value).
+ // jssp[1]: Base (as a tagged value).
+ //
+ // The (tagged) result will be returned in x0, as a heap number.
+
+ Register result_tagged = x0;
+ Register base_tagged = x10;
+ Register exponent_tagged = x11;
+ Register exponent_integer = x12;
+ Register scratch1 = x14;
+ Register scratch0 = x15;
+ Register saved_lr = x19;
+ FPRegister result_double = d0;
+ FPRegister base_double = d0;
+ FPRegister exponent_double = d1;
+ FPRegister base_double_copy = d2;
+ FPRegister scratch1_double = d6;
+ FPRegister scratch0_double = d7;
+
+ // A fast-path for integer exponents.
+ Label exponent_is_smi, exponent_is_integer;
+ // Bail out to runtime.
+ Label call_runtime;
+ // Allocate a heap number for the result, and return it.
+ Label done;
+
+ // Unpack the inputs.
+ if (exponent_type_ == ON_STACK) {
+ Label base_is_smi;
+ Label unpack_exponent;
+
+ __ Pop(exponent_tagged, base_tagged);
+
+ __ JumpIfSmi(base_tagged, &base_is_smi);
+ __ JumpIfNotHeapNumber(base_tagged, &call_runtime);
+ // base_tagged is a heap number, so load its double value.
+ __ Ldr(base_double, FieldMemOperand(base_tagged, HeapNumber::kValueOffset));
+ __ B(&unpack_exponent);
+ __ Bind(&base_is_smi);
+ // base_tagged is a SMI, so untag it and convert it to a double.
+ __ SmiUntagToDouble(base_double, base_tagged);
+
+ __ Bind(&unpack_exponent);
+ // x10 base_tagged The tagged base (input).
+ // x11 exponent_tagged The tagged exponent (input).
+ // d1 base_double The base as a double.
+ __ JumpIfSmi(exponent_tagged, &exponent_is_smi);
+ __ JumpIfNotHeapNumber(exponent_tagged, &call_runtime);
+ // exponent_tagged is a heap number, so load its double value.
+ __ Ldr(exponent_double,
+ FieldMemOperand(exponent_tagged, HeapNumber::kValueOffset));
+ } else if (exponent_type_ == TAGGED) {
+ __ JumpIfSmi(exponent_tagged, &exponent_is_smi);
+ __ Ldr(exponent_double,
+ FieldMemOperand(exponent_tagged, HeapNumber::kValueOffset));
+ }
+
+ // Handle double (heap number) exponents.
+ if (exponent_type_ != INTEGER) {
+ // Detect integer exponents stored as doubles and handle those in the
+ // integer fast-path.
+ __ TryConvertDoubleToInt64(exponent_integer, exponent_double,
+ scratch0_double, &exponent_is_integer);
+
+ if (exponent_type_ == ON_STACK) {
+ FPRegister half_double = d3;
+ FPRegister minus_half_double = d4;
+ FPRegister zero_double = d5;
+ // Detect square root case. Crankshaft detects constant +/-0.5 at compile
+ // time and uses DoMathPowHalf instead. We then skip this check for
+ // non-constant cases of +/-0.5 as these hardly occur.
+
+ __ Fmov(minus_half_double, -0.5);
+ __ Fmov(half_double, 0.5);
+ __ Fcmp(minus_half_double, exponent_double);
+ __ Fccmp(half_double, exponent_double, NZFlag, ne);
+ // Condition flags at this point:
+ // 0.5; nZCv // Identified by eq && pl
+ // -0.5: NZcv // Identified by eq && mi
+ // other: ?z?? // Identified by ne
+ __ B(ne, &call_runtime);
+
+ // The exponent is 0.5 or -0.5.
+
+ // Given that exponent is known to be either 0.5 or -0.5, the following
+ // special cases could apply (according to ECMA-262 15.8.2.13):
+ //
+ // base.isNaN(): The result is NaN.
+ // (base == +INFINITY) || (base == -INFINITY)
+ // exponent == 0.5: The result is +INFINITY.
+ // exponent == -0.5: The result is +0.
+ // (base == +0) || (base == -0)
+ // exponent == 0.5: The result is +0.
+ // exponent == -0.5: The result is +INFINITY.
+ // (base < 0) && base.isFinite(): The result is NaN.
+ //
+ // Fsqrt (and Fdiv for the -0.5 case) can handle all of those except
+ // where base is -INFINITY or -0.
+
+ // Add +0 to base. This has no effect other than turning -0 into +0.
+ __ Fmov(zero_double, 0.0);
+ __ Fadd(base_double, base_double, zero_double);
+ // The operation -0+0 results in +0 in all cases except where the
+ // FPCR rounding mode is 'round towards minus infinity' (RM). The
+ // A64 simulator does not currently simulate FPCR (where the rounding
+ // mode is set), so test the operation with some debug code.
+ if (masm->emit_debug_code()) {
+ Register temp = masm->Tmp1();
+ // d5 zero_double The value +0.0 as a double.
+ __ Fneg(scratch0_double, zero_double);
+ // Verify that we correctly generated +0.0 and -0.0.
+ // bits(+0.0) = 0x0000000000000000
+ // bits(-0.0) = 0x8000000000000000
+ __ Fmov(temp, zero_double);
+ __ CheckRegisterIsClear(temp, kCouldNotGenerateZero);
+ __ Fmov(temp, scratch0_double);
+ __ Eor(temp, temp, kDSignMask);
+ __ CheckRegisterIsClear(temp, kCouldNotGenerateNegativeZero);
+ // Check that -0.0 + 0.0 == +0.0.
+ __ Fadd(scratch0_double, scratch0_double, zero_double);
+ __ Fmov(temp, scratch0_double);
+ __ CheckRegisterIsClear(temp, kExpectedPositiveZero);
+ }
+
+ // If base is -INFINITY, make it +INFINITY.
+ // * Calculate base - base: All infinities will become NaNs since both
+ // -INFINITY+INFINITY and +INFINITY-INFINITY are NaN in A64.
+ // * If the result is NaN, calculate abs(base).
+ __ Fsub(scratch0_double, base_double, base_double);
+ __ Fcmp(scratch0_double, 0.0);
+ __ Fabs(scratch1_double, base_double);
+ __ Fcsel(base_double, scratch1_double, base_double, vs);
+
+ // Calculate the square root of base.
+ __ Fsqrt(result_double, base_double);
+ __ Fcmp(exponent_double, 0.0);
+ __ B(ge, &done); // Finish now for exponents of 0.5.
+ // Find the inverse for exponents of -0.5.
+ __ Fmov(scratch0_double, 1.0);
+ __ Fdiv(result_double, scratch0_double, result_double);
+ __ B(&done);
+ }
+
+ {
+ AllowExternalCallThatCantCauseGC scope(masm);
+ __ Mov(saved_lr, lr);
+ __ CallCFunction(
+ ExternalReference::power_double_double_function(masm->isolate()),
+ 0, 2);
+ __ Mov(lr, saved_lr);
+ __ B(&done);
+ }
+
+ // Handle SMI exponents.
+ __ Bind(&exponent_is_smi);
+ // x10 base_tagged The tagged base (input).
+ // x11 exponent_tagged The tagged exponent (input).
+ // d1 base_double The base as a double.
+ __ SmiUntag(exponent_integer, exponent_tagged);
+ }
+
+ __ Bind(&exponent_is_integer);
+ // x10 base_tagged The tagged base (input).
+ // x11 exponent_tagged The tagged exponent (input).
+ // x12 exponent_integer The exponent as an integer.
+ // d1 base_double The base as a double.
+
+ // Find abs(exponent). For negative exponents, we can find the inverse later.
+ Register exponent_abs = x13;
+ __ Cmp(exponent_integer, 0);
+ __ Cneg(exponent_abs, exponent_integer, mi);
+ // x13 exponent_abs The value of abs(exponent_integer).
+
+ // Repeatedly multiply to calculate the power.
+ // result = 1.0;
+ // For each bit n (exponent_integer{n}) {
+ // if (exponent_integer{n}) {
+ // result *= base;
+ // }
+ // base *= base;
+ // if (remaining bits in exponent_integer are all zero) {
+ // break;
+ // }
+ // }
+ Label power_loop, power_loop_entry, power_loop_exit;
+ __ Fmov(scratch1_double, base_double);
+ __ Fmov(base_double_copy, base_double);
+ __ Fmov(result_double, 1.0);
+ __ B(&power_loop_entry);
+
+ __ Bind(&power_loop);
+ __ Fmul(scratch1_double, scratch1_double, scratch1_double);
+ __ Lsr(exponent_abs, exponent_abs, 1);
+ __ Cbz(exponent_abs, &power_loop_exit);
+
+ __ Bind(&power_loop_entry);
+ __ Tbz(exponent_abs, 0, &power_loop);
+ __ Fmul(result_double, result_double, scratch1_double);
+ __ B(&power_loop);
+
+ __ Bind(&power_loop_exit);
+
+ // If the exponent was positive, result_double holds the result.
+ __ Tbz(exponent_integer, kXSignBit, &done);
+
+ // The exponent was negative, so find the inverse.
+ __ Fmov(scratch0_double, 1.0);
+ __ Fdiv(result_double, scratch0_double, result_double);
+ // ECMA-262 only requires Math.pow to return an 'implementation-dependent
+ // approximation' of base^exponent. However, mjsunit/math-pow uses Math.pow
+ // to calculate the subnormal value 2^-1074. This method of calculating
+ // negative powers doesn't work because 2^1074 overflows to infinity. To
+ // catch this corner-case, we bail out if the result was 0. (This can only
+ // occur if the divisor is infinity or the base is zero.)
+ __ Fcmp(result_double, 0.0);
+ __ B(&done, ne);
+
+ if (exponent_type_ == ON_STACK) {
+ // Bail out to runtime code.
+ __ Bind(&call_runtime);
+ // Put the arguments back on the stack.
+ __ Push(base_tagged, exponent_tagged);
+ __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1);
+
+ // Return.
+ __ Bind(&done);
+ __ AllocateHeapNumber(result_tagged, &call_runtime, scratch0, scratch1);
+ __ Str(result_double,
+ FieldMemOperand(result_tagged, HeapNumber::kValueOffset));
+ ASSERT(result_tagged.is(x0));
+ __ IncrementCounter(
+ masm->isolate()->counters()->math_pow(), 1, scratch0, scratch1);
+ __ Ret();
+ } else {
+ AllowExternalCallThatCantCauseGC scope(masm);
+ __ Mov(saved_lr, lr);
+ __ Fmov(base_double, base_double_copy);
+ __ Scvtf(exponent_double, exponent_integer);
+ __ CallCFunction(
+ ExternalReference::power_double_double_function(masm->isolate()),
+ 0, 2);
+ __ Mov(lr, saved_lr);
+ __ Bind(&done);
+ __ IncrementCounter(
+ masm->isolate()->counters()->math_pow(), 1, scratch0, scratch1);
+ __ Ret();
+ }
+}
+
+
+void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
+ // It is important that the following stubs are generated in this order
+ // because pregenerated stubs can only call other pregenerated stubs.
+ // RecordWriteStub uses StoreBufferOverflowStub, which in turn uses
+ // CEntryStub.
+ CEntryStub::GenerateAheadOfTime(isolate);
+ StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
+ StubFailureTrampolineStub::GenerateAheadOfTime(isolate);
+ ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
+ CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
+ BinaryOpICStub::GenerateAheadOfTime(isolate);
+ BinaryOpICWithAllocationSiteStub::GenerateAheadOfTime(isolate);
+}
+
+
+void CodeStub::GenerateFPStubs(Isolate* isolate) {
+ // Floating-point code doesn't get special handling in A64, so there's
+ // nothing to do here.
+ USE(isolate);
+}
+
+
+static void JumpIfOOM(MacroAssembler* masm,
+ Register value,
+ Register scratch,
+ Label* oom_label) {
+ STATIC_ASSERT(Failure::OUT_OF_MEMORY_EXCEPTION == 3);
+ STATIC_ASSERT(kFailureTag == 3);
+ __ And(scratch, value, 0xf);
+ __ Cmp(scratch, 0xf);
+ __ B(eq, oom_label);
+}
+
+
+bool CEntryStub::NeedsImmovableCode() {
+ // CEntryStub stores the return address on the stack before calling into
+ // C++ code. In some cases, the VM accesses this address, but it is not used
+ // when the C++ code returns to the stub because LR holds the return address
+ // in AAPCS64. If the stub is moved (perhaps during a GC), we could end up
+ // returning to dead code.
+ // TODO(jbramley): Whilst this is the only analysis that makes sense, I can't
+ // find any comment to confirm this, and I don't hit any crashes whatever
+ // this function returns. The anaylsis should be properly confirmed.
+ return true;
+}
+
+
+void CEntryStub::GenerateAheadOfTime(Isolate* isolate) {
+ CEntryStub stub(1, kDontSaveFPRegs);
+ stub.GetCode(isolate);
+ CEntryStub stub_fp(1, kSaveFPRegs);
+ stub_fp.GetCode(isolate);
+}
+
+
+void CEntryStub::GenerateCore(MacroAssembler* masm,
+ Label* throw_normal,
+ Label* throw_termination,
+ Label* throw_out_of_memory,
+ bool do_gc,
+ bool always_allocate) {
+ // x0 : Result parameter for PerformGC, if do_gc is true.
+ // x21 : argv
+ // x22 : argc
+ // x23 : target
+ //
+ // The stack (on entry) holds the arguments and the receiver, with the
+ // receiver at the highest address:
+ //
+ // argv[8]: receiver
+ // argv -> argv[0]: arg[argc-2]
+ // ... ...
+ // argv[...]: arg[1]
+ // argv[...]: arg[0]
+ //
+ // Immediately below (after) this is the exit frame, as constructed by
+ // EnterExitFrame:
+ // fp[8]: CallerPC (lr)
+ // fp -> fp[0]: CallerFP (old fp)
+ // fp[-8]: Space reserved for SPOffset.
+ // fp[-16]: CodeObject()
+ // csp[...]: Saved doubles, if saved_doubles is true.
+ // csp[32]: Alignment padding, if necessary.
+ // csp[24]: Preserved x23 (used for target).
+ // csp[16]: Preserved x22 (used for argc).
+ // csp[8]: Preserved x21 (used for argv).
+ // csp -> csp[0]: Space reserved for the return address.
+ //
+ // After a successful call, the exit frame, preserved registers (x21-x23) and
+ // the arguments (including the receiver) are dropped or popped as
+ // appropriate. The stub then returns.
+ //
+ // After an unsuccessful call, the exit frame and suchlike are left
+ // untouched, and the stub either throws an exception by jumping to one of
+ // the provided throw_ labels, or it falls through. The failure details are
+ // passed through in x0.
+ ASSERT(csp.Is(__ StackPointer()));
+
+ Isolate* isolate = masm->isolate();
+
+ const Register& argv = x21;
+ const Register& argc = x22;
+ const Register& target = x23;
+
+ if (do_gc) {
+ // Call Runtime::PerformGC, passing x0 (the result parameter for
+ // PerformGC) and x1 (the isolate).
+ __ Mov(x1, Operand(ExternalReference::isolate_address(masm->isolate())));
+ __ CallCFunction(
+ ExternalReference::perform_gc_function(isolate), 2, 0);
+ }
+
+ ExternalReference scope_depth =
+ ExternalReference::heap_always_allocate_scope_depth(isolate);
+ if (always_allocate) {
+ __ Mov(x10, Operand(scope_depth));
+ __ Ldr(x11, MemOperand(x10));
+ __ Add(x11, x11, 1);
+ __ Str(x11, MemOperand(x10));
+ }
+
+ // Prepare AAPCS64 arguments to pass to the builtin.
+ __ Mov(x0, argc);
+ __ Mov(x1, argv);
+ __ Mov(x2, Operand(ExternalReference::isolate_address(isolate)));
+
+ // Store the return address on the stack, in the space previously allocated
+ // by EnterExitFrame. The return address is queried by
+ // ExitFrame::GetStateForFramePointer.
+ Label return_location;
+ __ Adr(x12, &return_location);
+ __ Poke(x12, 0);
+ if (__ emit_debug_code()) {
+ // Verify that the slot below fp[kSPOffset]-8 points to the return location
+ // (currently in x12).
+ Register temp = masm->Tmp1();
+ __ Ldr(temp, MemOperand(fp, ExitFrameConstants::kSPOffset));
+ __ Ldr(temp, MemOperand(temp, -static_cast<int64_t>(kXRegSizeInBytes)));
+ __ Cmp(temp, x12);
+ __ Check(eq, kReturnAddressNotFoundInFrame);
+ }
+
+ // Call the builtin.
+ __ Blr(target);
+ __ Bind(&return_location);
+ const Register& result = x0;
+
+ if (always_allocate) {
+ __ Mov(x10, Operand(scope_depth));
+ __ Ldr(x11, MemOperand(x10));
+ __ Sub(x11, x11, 1);
+ __ Str(x11, MemOperand(x10));
+ }
+
+ // x0 result The return code from the call.
+ // x21 argv
+ // x22 argc
+ // x23 target
+ //
+ // If all of the result bits matching kFailureTagMask are '1', the result is
+ // a failure. Otherwise, it's an ordinary tagged object and the call was a
+ // success.
+ Label failure;
+ __ And(x10, result, kFailureTagMask);
+ __ Cmp(x10, kFailureTagMask);
+ __ B(&failure, eq);
+
+ // The call succeeded, so unwind the stack and return.
+
+ // Restore callee-saved registers x21-x23.
+ __ Mov(x11, argc);
+
+ __ Peek(argv, 1 * kPointerSize);
+ __ Peek(argc, 2 * kPointerSize);
+ __ Peek(target, 3 * kPointerSize);
+
+ __ LeaveExitFrame(save_doubles_, x10, true);
+ ASSERT(jssp.Is(__ StackPointer()));
+ // Pop or drop the remaining stack slots and return from the stub.
+ // jssp[24]: Arguments array (of size argc), including receiver.
+ // jssp[16]: Preserved x23 (used for target).
+ // jssp[8]: Preserved x22 (used for argc).
+ // jssp[0]: Preserved x21 (used for argv).
+ __ Drop(x11);
+ __ Ret();
+
+ // The stack pointer is still csp if we aren't returning, and the frame
+ // hasn't changed (except for the return address).
+ __ SetStackPointer(csp);
+
+ __ Bind(&failure);
+ // The call failed, so check if we need to throw an exception, and fall
+ // through (to retry) otherwise.
+
+ Label retry;
+ // x0 result The return code from the call, including the failure
+ // code and details.
+ // x21 argv
+ // x22 argc
+ // x23 target
+ // Refer to the Failure class for details of the bit layout.
+ STATIC_ASSERT(Failure::RETRY_AFTER_GC == 0);
+ __ Tst(result, kFailureTypeTagMask << kFailureTagSize);
+ __ B(eq, &retry); // RETRY_AFTER_GC
+
+ // Special handling of out-of-memory exceptions: Pass the failure result,
+ // rather than the exception descriptor.
+ JumpIfOOM(masm, result, x10, throw_out_of_memory);
+
+ // Retrieve the pending exception.
+ const Register& exception = result;
+ const Register& exception_address = x11;
+ __ Mov(exception_address,
+ Operand(ExternalReference(Isolate::kPendingExceptionAddress,
+ isolate)));
+ __ Ldr(exception, MemOperand(exception_address));
+
+ // See if we just retrieved an OOM exception.
+ JumpIfOOM(masm, exception, x10, throw_out_of_memory);
+
+ // Clear the pending exception.
+ __ Mov(x10, Operand(isolate->factory()->the_hole_value()));
+ __ Str(x10, MemOperand(exception_address));
+
+ // x0 exception The exception descriptor.
+ // x21 argv
+ // x22 argc
+ // x23 target
+
+ // Special handling of termination exceptions, which are uncatchable by
+ // JavaScript code.
+ __ Cmp(exception, Operand(isolate->factory()->termination_exception()));
+ __ B(eq, throw_termination);
+
+ // Handle normal exception.
+ __ B(throw_normal);
+
+ __ Bind(&retry);
+ // The result (x0) is passed through as the next PerformGC parameter.
+}
+
+
+void CEntryStub::Generate(MacroAssembler* masm) {
+ // The Abort mechanism relies on CallRuntime, which in turn relies on
+ // CEntryStub, so until this stub has been generated, we have to use a
+ // fall-back Abort mechanism.
+ //
+ // Note that this stub must be generated before any use of Abort.
+ MacroAssembler::NoUseRealAbortsScope no_use_real_aborts(masm);
+
+ ASM_LOCATION("CEntryStub::Generate entry");
+ ProfileEntryHookStub::MaybeCallEntryHook(masm);
+
+ // Register parameters:
+ // x0: argc (including receiver, untagged)
+ // x1: target
+ //
+ // The stack on entry holds the arguments and the receiver, with the receiver
+ // at the highest address:
+ //
+ // jssp]argc-1]: receiver
+ // jssp[argc-2]: arg[argc-2]
+ // ... ...
+ // jssp[1]: arg[1]
+ // jssp[0]: arg[0]
+ //
+ // The arguments are in reverse order, so that arg[argc-2] is actually the
+ // first argument to the target function and arg[0] is the last.
+ ASSERT(jssp.Is(__ StackPointer()));
+ const Register& argc_input = x0;
+ const Register& target_input = x1;
+
+ // Calculate argv, argc and the target address, and store them in
+ // callee-saved registers so we can retry the call without having to reload
+ // these arguments.
+ // TODO(jbramley): If the first call attempt succeeds in the common case (as
+ // it should), then we might be better off putting these parameters directly
+ // into their argument registers, rather than using callee-saved registers and
+ // preserving them on the stack.
+ const Register& argv = x21;
+ const Register& argc = x22;
+ const Register& target = x23;
+
+ // Derive argv from the stack pointer so that it points to the first argument
+ // (arg[argc-2]), or just below the receiver in case there are no arguments.
+ // - Adjust for the arg[] array.
+ Register temp_argv = x11;
+ __ Add(temp_argv, jssp, Operand(x0, LSL, kPointerSizeLog2));
+ // - Adjust for the receiver.
+ __ Sub(temp_argv, temp_argv, 1 * kPointerSize);
+
+ // Enter the exit frame. Reserve three slots to preserve x21-x23 callee-saved
+ // registers.
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ EnterExitFrame(save_doubles_, x10, 3);
+ ASSERT(csp.Is(__ StackPointer()));
+
+ // Poke callee-saved registers into reserved space.
+ __ Poke(argv, 1 * kPointerSize);
+ __ Poke(argc, 2 * kPointerSize);
+ __ Poke(target, 3 * kPointerSize);
+
+ // We normally only keep tagged values in callee-saved registers, as they
+ // could be pushed onto the stack by called stubs and functions, and on the
+ // stack they can confuse the GC. However, we're only calling C functions
+ // which can push arbitrary data onto the stack anyway, and so the GC won't
+ // examine that part of the stack.
+ __ Mov(argc, argc_input);
+ __ Mov(target, target_input);
+ __ Mov(argv, temp_argv);
+
+ Label throw_normal;
+ Label throw_termination;
+ Label throw_out_of_memory;
+
+ // Call the runtime function.
+ GenerateCore(masm,
+ &throw_normal,
+ &throw_termination,
+ &throw_out_of_memory,
+ false,
+ false);
+
+ // If successful, the previous GenerateCore will have returned to the
+ // calling code. Otherwise, we fall through into the following.
+
+ // Do space-specific GC and retry runtime call.
+ GenerateCore(masm,
+ &throw_normal,
+ &throw_termination,
+ &throw_out_of_memory,
+ true,
+ false);
+
+ // Do full GC and retry runtime call one final time.
+ __ Mov(x0, reinterpret_cast<uint64_t>(Failure::InternalError()));
+ GenerateCore(masm,
+ &throw_normal,
+ &throw_termination,
+ &throw_out_of_memory,
+ true,
+ true);
+
+ // We didn't execute a return case, so the stack frame hasn't been updated
+ // (except for the return address slot). However, we don't need to initialize
+ // jssp because the throw method will immediately overwrite it when it
+ // unwinds the stack.
+ if (__ emit_debug_code()) {
+ __ Mov(jssp, kDebugZapValue);
+ }
+ __ SetStackPointer(jssp);
+
+ // Throw exceptions.
+ // If we throw an exception, we can end up re-entering CEntryStub before we
+ // pop the exit frame, so need to ensure that x21-x23 contain GC-safe values
+ // here.
+ __ Bind(&throw_out_of_memory);
+ ASM_LOCATION("Throw out of memory");
+ __ Mov(argv, 0);
+ __ Mov(argc, 0);
+ __ Mov(target, 0);
+ // Set external caught exception to false.
+ Isolate* isolate = masm->isolate();
+ __ Mov(x2, Operand(ExternalReference(Isolate::kExternalCaughtExceptionAddress,
+ isolate)));
+ __ Str(xzr, MemOperand(x2));
+
+ // Set pending exception and x0 to out of memory exception.
+ Label already_have_failure;
+ JumpIfOOM(masm, x0, x10, &already_have_failure);
+ Failure* out_of_memory = Failure::OutOfMemoryException(0x1);
+ __ Mov(x0, Operand(reinterpret_cast<uint64_t>(out_of_memory)));
+ __ Bind(&already_have_failure);
+ __ Mov(x2, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
+ isolate)));
+ __ Str(x0, MemOperand(x2));
+ // Fall through to the next label.
+
+ __ Bind(&throw_termination);
+ ASM_LOCATION("Throw termination");
+ __ Mov(argv, 0);
+ __ Mov(argc, 0);
+ __ Mov(target, 0);
+ __ ThrowUncatchable(x0, x10, x11, x12, x13);
+
+ __ Bind(&throw_normal);
+ ASM_LOCATION("Throw normal");
+ __ Mov(argv, 0);
+ __ Mov(argc, 0);
+ __ Mov(target, 0);
+ __ Throw(x0, x10, x11, x12, x13);
+}
+
+
+// This is the entry point from C++. 5 arguments are provided in x0-x4.
+// See use of the CALL_GENERATED_CODE macro for example in src/execution.cc.
+// Input:
+// x0: code entry.
+// x1: function.
+// x2: receiver.
+// x3: argc.
+// x4: argv.
+// Output:
+// x0: result.
+void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
+ ASSERT(jssp.Is(__ StackPointer()));
+ Register code_entry = x0;
+
+ // Enable instruction instrumentation. This only works on the simulator, and
+ // will have no effect on the model or real hardware.
+ __ EnableInstrumentation();
+
+ Label invoke, handler_entry, exit;
+
+ // Push callee-saved registers and synchronize the system stack pointer (csp)
+ // and the JavaScript stack pointer (jssp).
+ //
+ // We must not write to jssp until after the PushCalleeSavedRegisters()
+ // call, since jssp is itself a callee-saved register.
+ __ SetStackPointer(csp);
+ __ PushCalleeSavedRegisters();
+ __ Mov(jssp, csp);
+ __ SetStackPointer(jssp);
+
+ ProfileEntryHookStub::MaybeCallEntryHook(masm);
+
+ // Build an entry frame (see layout below).
+ Isolate* isolate = masm->isolate();
+
+ // Build an entry frame.
+ int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
+ int64_t bad_frame_pointer = -1L; // Bad frame pointer to fail if it is used.
+ __ Mov(x13, bad_frame_pointer);
+ __ Mov(x12, Operand(Smi::FromInt(marker)));
+ __ Mov(x11, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate)));
+ __ Ldr(x10, MemOperand(x11));
+
+ // TODO(all): Pushing the marker twice seems unnecessary.
+ // In this case perhaps we could push xzr in the slot for the context
+ // (see MAsm::EnterFrame).
+ __ Push(x13, x12, x12, x10);
+ // Set up fp.
+ __ Sub(fp, jssp, EntryFrameConstants::kCallerFPOffset);
+
+ // Push the JS entry frame marker. Also set js_entry_sp if this is the
+ // outermost JS call.
+ Label non_outermost_js, done;
+ ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate);
+ __ Mov(x10, Operand(ExternalReference(js_entry_sp)));
+ __ Ldr(x11, MemOperand(x10));
+ __ Cbnz(x11, &non_outermost_js);
+ __ Str(fp, MemOperand(x10));
+ __ Mov(x12, Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
+ __ Push(x12);
+ __ B(&done);
+ __ Bind(&non_outermost_js);
+ // We spare one instruction by pushing xzr since the marker is 0.
+ ASSERT(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME) == NULL);
+ __ Push(xzr);
+ __ Bind(&done);
+
+ // The frame set up looks like this:
+ // jssp[0] : JS entry frame marker.
+ // jssp[1] : C entry FP.
+ // jssp[2] : stack frame marker.
+ // jssp[3] : stack frmae marker.
+ // jssp[4] : bad frame pointer 0xfff...ff <- fp points here.
+
+
+ // Jump to a faked try block that does the invoke, with a faked catch
+ // block that sets the pending exception.
+ __ B(&invoke);
+
+ // Prevent the constant pool from being emitted between the record of the
+ // handler_entry position and the first instruction of the sequence here.
+ // There is no risk because Assembler::Emit() emits the instruction before
+ // checking for constant pool emission, but we do not want to depend on
+ // that.
+ {
+ Assembler::BlockConstPoolScope block_const_pool(masm);
+ __ bind(&handler_entry);
+ handler_offset_ = handler_entry.pos();
+ // Caught exception: Store result (exception) in the pending exception
+ // field in the JSEnv and return a failure sentinel. Coming in here the
+ // fp will be invalid because the PushTryHandler below sets it to 0 to
+ // signal the existence of the JSEntry frame.
+ // TODO(jbramley): Do this in the Assembler.
+ __ Mov(x10, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
+ isolate)));
+ }
+ __ Str(code_entry, MemOperand(x10));
+ __ Mov(x0, Operand(reinterpret_cast<int64_t>(Failure::Exception())));
+ __ B(&exit);
+
+ // Invoke: Link this frame into the handler chain. There's only one
+ // handler block in this code object, so its index is 0.
+ __ Bind(&invoke);
+ __ PushTryHandler(StackHandler::JS_ENTRY, 0);
+ // If an exception not caught by another handler occurs, this handler
+ // returns control to the code after the B(&invoke) above, which
+ // restores all callee-saved registers (including cp and fp) to their
+ // saved values before returning a failure to C.
+
+ // Clear any pending exceptions.
+ __ Mov(x10, Operand(isolate->factory()->the_hole_value()));
+ __ Mov(x11, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
+ isolate)));
+ __ Str(x10, MemOperand(x11));
+
+ // Invoke the function by calling through the JS entry trampoline builtin.
+ // Notice that we cannot store a reference to the trampoline code directly in
+ // this stub, because runtime stubs are not traversed when doing GC.
+
+ // Expected registers by Builtins::JSEntryTrampoline
+ // x0: code entry.
+ // x1: function.
+ // x2: receiver.
+ // x3: argc.
+ // x4: argv.
+ // TODO(jbramley): The latest ARM code checks is_construct and conditionally
+ // uses construct_entry. We probably need to do the same here.
+ ExternalReference entry(is_construct ? Builtins::kJSConstructEntryTrampoline
+ : Builtins::kJSEntryTrampoline,
+ isolate);
+ __ Mov(x10, Operand(entry));
+
+ // Call the JSEntryTrampoline.
+ __ Ldr(x11, MemOperand(x10)); // Dereference the address.
+ __ Add(x12, x11, Code::kHeaderSize - kHeapObjectTag);
+ __ Blr(x12);
+
+ // Unlink this frame from the handler chain.
+ __ PopTryHandler();
+
+
+ __ Bind(&exit);
+ // x0 holds the result.
+ // The stack pointer points to the top of the entry frame pushed on entry from
+ // C++ (at the beginning of this stub):
+ // jssp[0] : JS entry frame marker.
+ // jssp[1] : C entry FP.
+ // jssp[2] : stack frame marker.
+ // jssp[3] : stack frmae marker.
+ // jssp[4] : bad frame pointer 0xfff...ff <- fp points here.
+
+ // Check if the current stack frame is marked as the outermost JS frame.
+ Label non_outermost_js_2;
+ __ Pop(x10);
+ __ Cmp(x10, Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
+ __ B(ne, &non_outermost_js_2);
+ __ Mov(x11, Operand(ExternalReference(js_entry_sp)));
+ __ Str(xzr, MemOperand(x11));
+ __ Bind(&non_outermost_js_2);
+
+ // Restore the top frame descriptors from the stack.
+ __ Pop(x10);
+ __ Mov(x11, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate)));
+ __ Str(x10, MemOperand(x11));
+
+ // Reset the stack to the callee saved registers.
+ __ Drop(-EntryFrameConstants::kCallerFPOffset, kByteSizeInBytes);
+ // Restore the callee-saved registers and return.
+ ASSERT(jssp.Is(__ StackPointer()));
+ __ Mov(csp, jssp);
+ __ SetStackPointer(csp);
+ __ PopCalleeSavedRegisters();
+ // After this point, we must not modify jssp because it is a callee-saved
+ // register which we have just restored.
+ __ Ret();
+}
+
+
+void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
+ Label miss;
+ Register receiver;
+ if (kind() == Code::KEYED_LOAD_IC) {
+ // ----------- S t a t e -------------
+ // -- lr : return address
+ // -- x1 : receiver
+ // -- x0 : key
+ // -----------------------------------
+ Register key = x0;
+ receiver = x1;
+ __ Cmp(key, Operand(masm->isolate()->factory()->prototype_string()));
+ __ B(ne, &miss);
+ } else {
+ ASSERT(kind() == Code::LOAD_IC);
+ // ----------- S t a t e -------------
+ // -- lr : return address
+ // -- x2 : name
+ // -- x0 : receiver
+ // -- sp[0] : receiver
+ // -----------------------------------
+ receiver = x0;
+ }
+
+ StubCompiler::GenerateLoadFunctionPrototype(masm, receiver, x10, x11, &miss);
+
+ __ Bind(&miss);
+ StubCompiler::TailCallBuiltin(masm,
+ BaseLoadStoreStubCompiler::MissBuiltin(kind()));
+}
+
+
+void StringLengthStub::Generate(MacroAssembler* masm) {
+ Label miss;
+ Register receiver;
+ if (kind() == Code::KEYED_LOAD_IC) {
+ // ----------- S t a t e -------------
+ // -- lr : return address
+ // -- x1 : receiver
+ // -- x0 : key
+ // -----------------------------------
+ Register key = x0;
+ receiver = x1;
+ __ Cmp(key, Operand(masm->isolate()->factory()->length_string()));
+ __ B(ne, &miss);
+ } else {
+ ASSERT(kind() == Code::LOAD_IC);
+ // ----------- S t a t e -------------
+ // -- lr : return address
+ // -- x2 : name
+ // -- x0 : receiver
+ // -- sp[0] : receiver
+ // -----------------------------------
+ receiver = x0;
+ }
+
+ StubCompiler::GenerateLoadStringLength(masm, receiver, x10, x11, &miss);
+
+ __ Bind(&miss);
+ StubCompiler::TailCallBuiltin(masm,
+ BaseLoadStoreStubCompiler::MissBuiltin(kind()));
+}
+
+
+void StoreArrayLengthStub::Generate(MacroAssembler* masm) {
+ ASM_LOCATION("StoreArrayLengthStub::Generate");
+ // This accepts as a receiver anything JSArray::SetElementsLength accepts
+ // (currently anything except for external arrays which means anything with
+ // elements of FixedArray type). Value must be a number, but only smis are
+ // accepted as the most common case.
+ Label miss;
+
+ Register receiver;
+ Register value;
+ if (kind() == Code::KEYED_STORE_IC) {
+ // ----------- S t a t e -------------
+ // -- lr : return address
+ // -- x2 : receiver
+ // -- x1 : key
+ // -- x0 : value
+ // -----------------------------------
+ Register key = x1;
+ receiver = x2;
+ value = x0;
+ __ Cmp(key, Operand(masm->isolate()->factory()->length_string()));
+ __ B(ne, &miss);
+ } else {
+ ASSERT(kind() == Code::STORE_IC);
+ // ----------- S t a t e -------------
+ // -- lr : return address
+ // -- x2 : key
+ // -- x1 : receiver
+ // -- x0 : value
+ // -----------------------------------
+ receiver = x1;
+ value = x0;
+ }
+
+ // Check that the receiver isn't a smi.
+ __ JumpIfSmi(receiver, &miss);
+
+ // Check that the object is a JS array.
+ __ CompareObjectType(receiver, x10, x11, JS_ARRAY_TYPE);
+ __ B(ne, &miss);
+
+ // Check that elements are FixedArray.
+ // We rely on StoreIC_ArrayLength below to deal with all types of
+ // fast elements (including COW).
+ __ Ldr(x10, FieldMemOperand(receiver, JSArray::kElementsOffset));
+ __ CompareObjectType(x10, x11, x12, FIXED_ARRAY_TYPE);
+ __ B(ne, &miss);
+
+ // Check that the array has fast properties, otherwise the length
+ // property might have been redefined.
+ __ Ldr(x10, FieldMemOperand(receiver, JSArray::kPropertiesOffset));
+ __ Ldr(x10, FieldMemOperand(x10, FixedArray::kMapOffset));
+ __ CompareRoot(x10, Heap::kHashTableMapRootIndex);
+ __ B(eq, &miss);
+
+ // Check that value is a smi.
+ __ JumpIfNotSmi(value, &miss);
+
+ // Prepare tail call to StoreIC_ArrayLength.
+ __ Push(receiver, value);
+
+ ExternalReference ref =
+ ExternalReference(IC_Utility(IC::kStoreIC_ArrayLength), masm->isolate());
+ __ TailCallExternalReference(ref, 2, 1);
+
+ __ Bind(&miss);
+ StubCompiler::TailCallBuiltin(masm,
+ BaseLoadStoreStubCompiler::MissBuiltin(kind()));
+}
+
+
+void InstanceofStub::Generate(MacroAssembler* masm) {
+ // Stack on entry:
+ // jssp[0]: function.
+ // jssp[8]: object.
+ //
+ // Returns result in x0. Zero indicates instanceof, smi 1 indicates not
+ // instanceof.
+
+ Register result = x0;
+ Register function = right();
+ Register object = left();
+ Register scratch1 = x6;
+ Register scratch2 = x7;
+ Register res_true = x8;
+ Register res_false = x9;
+ // Only used if there was an inline map check site. (See
+ // LCodeGen::DoInstanceOfKnownGlobal().)
+ Register map_check_site = x4;
+ // Delta for the instructions generated between the inline map check and the
+ // instruction setting the result.
+ const int32_t kDeltaToLoadBoolResult = 4 * kInstructionSize;
+
+ Label not_js_object, slow;
+
+ if (!HasArgsInRegisters()) {
+ __ Pop(function, object);
+ }
+
+ if (ReturnTrueFalseObject()) {
+ __ LoadTrueFalseRoots(res_true, res_false);
+ } else {
+ // This is counter-intuitive, but correct.
+ __ Mov(res_true, Operand(Smi::FromInt(0)));
+ __ Mov(res_false, Operand(Smi::FromInt(1)));
+ }
+
+ // Check that the left hand side is a JS object and load its map as a side
+ // effect.
+ Register map = x12;
+ __ JumpIfSmi(object, ¬_js_object);
+ __ IsObjectJSObjectType(object, map, scratch2, ¬_js_object);
+
+ // If there is a call site cache, don't look in the global cache, but do the
+ // real lookup and update the call site cache.
+ if (!HasCallSiteInlineCheck()) {
+ Label miss;
+ __ JumpIfNotRoot(function, Heap::kInstanceofCacheFunctionRootIndex, &miss);
+ __ JumpIfNotRoot(map, Heap::kInstanceofCacheMapRootIndex, &miss);
+ __ LoadRoot(result, Heap::kInstanceofCacheAnswerRootIndex);
+ __ Ret();
+ __ Bind(&miss);
+ }
+
+ // Get the prototype of the function.
+ Register prototype = x13;
+ __ TryGetFunctionPrototype(function, prototype, scratch2, &slow,
+ MacroAssembler::kMissOnBoundFunction);
+
+ // Check that the function prototype is a JS object.
+ __ JumpIfSmi(prototype, &slow);
+ __ IsObjectJSObjectType(prototype, scratch1, scratch2, &slow);
+
+ // Update the global instanceof or call site inlined cache with the current
+ // map and function. The cached answer will be set when it is known below.
+ if (HasCallSiteInlineCheck()) {
+ // Patch the (relocated) inlined map check.
+ __ GetRelocatedValueLocation(map_check_site, scratch1);
+ // We have a cell, so need another level of dereferencing.
+ __ Ldr(scratch1, MemOperand(scratch1));
+ __ Str(map, FieldMemOperand(scratch1, Cell::kValueOffset));
+ } else {
+ __ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
+ __ StoreRoot(map, Heap::kInstanceofCacheMapRootIndex);
+ }
+
+ Label return_true, return_result;
+ {
+ // Loop through the prototype chain looking for the function prototype.
+ Register chain_map = x1;
+ Register chain_prototype = x14;
+ Register null_value = x15;
+ Label loop;
+ __ Ldr(chain_prototype, FieldMemOperand(map, Map::kPrototypeOffset));
+ __ LoadRoot(null_value, Heap::kNullValueRootIndex);
+ // Speculatively set a result.
+ __ Mov(result, res_false);
+
+ __ Bind(&loop);
+
+ // If the chain prototype is the object prototype, return true.
+ __ Cmp(chain_prototype, prototype);
+ __ B(eq, &return_true);
+
+ // If the chain prototype is null, we've reached the end of the chain, so
+ // return false.
+ __ Cmp(chain_prototype, null_value);
+ __ B(eq, &return_result);
+
+ // Otherwise, load the next prototype in the chain, and loop.
+ __ Ldr(chain_map, FieldMemOperand(chain_prototype, HeapObject::kMapOffset));
+ __ Ldr(chain_prototype, FieldMemOperand(chain_map, Map::kPrototypeOffset));
+ __ B(&loop);
+ }
+
+ // Return sequence when no arguments are on the stack.
+ // We cannot fall through to here.
+ __ Bind(&return_true);
+ __ Mov(result, res_true);
+ __ Bind(&return_result);
+ if (HasCallSiteInlineCheck()) {
+ ASSERT(ReturnTrueFalseObject());
+ __ Add(map_check_site, map_check_site, kDeltaToLoadBoolResult);
+ __ GetRelocatedValueLocation(map_check_site, scratch2);
+ __ Str(result, MemOperand(scratch2));
+ } else {
+ __ StoreRoot(result, Heap::kInstanceofCacheAnswerRootIndex);
+ }
+ __ Ret();
+
+ Label object_not_null, object_not_null_or_smi;
+
+ __ Bind(¬_js_object);
+ Register object_type = x14;
+ // x0 result result return register (uninit)
+ // x10 function pointer to function
+ // x11 object pointer to object
+ // x14 object_type type of object (uninit)
+
+ // Before null, smi and string checks, check that the rhs is a function.
+ // For a non-function rhs, an exception must be thrown.
+ __ JumpIfSmi(function, &slow);
+ __ JumpIfNotObjectType(
+ function, scratch1, object_type, JS_FUNCTION_TYPE, &slow);
+
+ __ Mov(result, res_false);
+
+ // Null is not instance of anything.
+ __ Cmp(object_type, Operand(masm->isolate()->factory()->null_value()));
+ __ B(ne, &object_not_null);
+ __ Ret();
+
+ __ Bind(&object_not_null);
+ // Smi values are not instances of anything.
+ __ JumpIfNotSmi(object, &object_not_null_or_smi);
+ __ Ret();
+
+ __ Bind(&object_not_null_or_smi);
+ // String values are not instances of anything.
+ __ IsObjectJSStringType(object, scratch2, &slow);
+ __ Ret();
+
+ // Slow-case. Tail call builtin.
+ __ Bind(&slow);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ // Arguments have either been passed into registers or have been previously
+ // popped. We need to push them before calling builtin.
+ __ Push(object, function);
+ __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION);
+ }
+ if (ReturnTrueFalseObject()) {
+ // Reload true/false because they were clobbered in the builtin call.
+ __ LoadTrueFalseRoots(res_true, res_false);
+ __ Cmp(result, 0);
+ __ Csel(result, res_true, res_false, eq);
+ }
+ __ Ret();
+}
+
+
+Register InstanceofStub::left() {
+ // Object to check (instanceof lhs).
+ return x11;
+}
+
+
+Register InstanceofStub::right() {
+ // Constructor function (instanceof rhs).
+ return x10;
+}
+
+
+void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
+ Register arg_count = x0;
+ Register key = x1;
+
+ // The displacement is the offset of the last parameter (if any) relative
+ // to the frame pointer.
+ static const int kDisplacement =
+ StandardFrameConstants::kCallerSPOffset - kPointerSize;
+
+ // Check that the key is a smi.
+ Label slow;
+ __ JumpIfNotSmi(key, &slow);
+
+ // Check if the calling frame is an arguments adaptor frame.
+ Register local_fp = x11;
+ Register caller_fp = x11;
+ Register caller_ctx = x12;
+ Label skip_adaptor;
+ __ Ldr(caller_fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ Ldr(caller_ctx, MemOperand(caller_fp,
+ StandardFrameConstants::kContextOffset));
+ __ Cmp(caller_ctx, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ Csel(local_fp, fp, caller_fp, ne);
+ __ B(ne, &skip_adaptor);
+
+ // Load the actual arguments limit found in the arguments adaptor frame.
+ __ Ldr(arg_count, MemOperand(caller_fp,
+ ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ Bind(&skip_adaptor);
+
+ // Check index against formal parameters count limit. Use unsigned comparison
+ // to get negative check for free: branch if key < 0 or key >= arg_count.
+ __ Cmp(key, arg_count);
+ __ B(hs, &slow);
+
+ // Read the argument from the stack and return it.
+ __ Sub(x10, arg_count, key);
+ __ Add(x10, local_fp, Operand::UntagSmiAndScale(x10, kPointerSizeLog2));
+ __ Ldr(x0, MemOperand(x10, kDisplacement));
+ __ Ret();
+
+ // Slow case: handle non-smi or out-of-bounds access to arguments by calling
+ // the runtime system.
+ __ Bind(&slow);
+ __ Push(key);
+ __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1);
+}
+
+
+void ArgumentsAccessStub::GenerateNewNonStrictSlow(MacroAssembler* masm) {
+ // Stack layout on entry.
+ // jssp[0]: number of parameters (tagged)
+ // jssp[8]: address of receiver argument
+ // jssp[16]: function
+
+ // Check if the calling frame is an arguments adaptor frame.
+ Label runtime;
+ Register caller_fp = x10;
+ __ Ldr(caller_fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ // Load and untag the context.
+ STATIC_ASSERT((kSmiShift / kBitsPerByte) == 4);
+ __ Ldr(w11, MemOperand(caller_fp, StandardFrameConstants::kContextOffset +
+ (kSmiShift / kBitsPerByte)));
+ __ Cmp(w11, StackFrame::ARGUMENTS_ADAPTOR);
+ __ B(ne, &runtime);
+
+ // Patch the arguments.length and parameters pointer in the current frame.
+ __ Ldr(x11, MemOperand(caller_fp,
+ ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ Poke(x11, 0 * kXRegSizeInBytes);
+ __ Add(x10, caller_fp, Operand::UntagSmiAndScale(x11, kPointerSizeLog2));
+ __ Add(x10, x10, Operand(StandardFrameConstants::kCallerSPOffset));
+ __ Poke(x10, 1 * kXRegSizeInBytes);
+
+ __ Bind(&runtime);
+ __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
+}
+
+
+void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
+ // Stack layout on entry.
+ // jssp[0]: number of parameters (tagged)
+ // jssp[8]: address of receiver argument
+ // jssp[16]: function
+ //
+ // Returns pointer to result object in x0.
+
+ // Note: arg_count_smi is an alias of param_count_smi.
+ Register arg_count_smi = x3;
+ Register param_count_smi = x3;
+ Register param_count = x7;
+ Register recv_arg = x14;
+ Register function = x4;
+ __ Pop(param_count_smi, recv_arg, function);
+ __ SmiUntag(param_count, param_count_smi);
+
+ // Check if the calling frame is an arguments adaptor frame.
+ Register caller_fp = x11;
+ Register caller_ctx = x12;
+ Label runtime;
+ Label adaptor_frame, try_allocate;
+ __ Ldr(caller_fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ Ldr(caller_ctx, MemOperand(caller_fp,
+ StandardFrameConstants::kContextOffset));
+ __ Cmp(caller_ctx, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ B(eq, &adaptor_frame);
+
+ // No adaptor, parameter count = argument count.
+
+ // x1 mapped_params number of mapped params, min(params, args) (uninit)
+ // x2 arg_count number of function arguments (uninit)
+ // x3 arg_count_smi number of function arguments (smi)
+ // x4 function function pointer
+ // x7 param_count number of function parameters
+ // x11 caller_fp caller's frame pointer
+ // x14 recv_arg pointer to receiver arguments
+
+ Register arg_count = x2;
+ __ Mov(arg_count, param_count);
+ __ B(&try_allocate);
+
+ // We have an adaptor frame. Patch the parameters pointer.
+ __ Bind(&adaptor_frame);
+ __ Ldr(arg_count_smi,
+ MemOperand(caller_fp,
+ ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ SmiUntag(arg_count, arg_count_smi);
+ __ Add(x10, caller_fp, Operand(arg_count, LSL, kPointerSizeLog2));
+ __ Add(recv_arg, x10, StandardFrameConstants::kCallerSPOffset);
+
+ // Compute the mapped parameter count = min(param_count, arg_count)
+ Register mapped_params = x1;
+ __ Cmp(param_count, arg_count);
+ __ Csel(mapped_params, param_count, arg_count, lt);
+
+ __ Bind(&try_allocate);
+
+ // x0 alloc_obj pointer to allocated objects: param map, backing
+ // store, arguments (uninit)
+ // x1 mapped_params number of mapped parameters, min(params, args)
+ // x2 arg_count number of function arguments
+ // x3 arg_count_smi number of function arguments (smi)
+ // x4 function function pointer
+ // x7 param_count number of function parameters
+ // x10 size size of objects to allocate (uninit)
+ // x14 recv_arg pointer to receiver arguments
+
+ // Compute the size of backing store, parameter map, and arguments object.
+ // 1. Parameter map, has two extra words containing context and backing
+ // store.
+ const int kParameterMapHeaderSize =
+ FixedArray::kHeaderSize + 2 * kPointerSize;
+
+ // Calculate the parameter map size, assuming it exists.
+ Register size = x10;
+ __ Mov(size, Operand(mapped_params, LSL, kPointerSizeLog2));
+ __ Add(size, size, kParameterMapHeaderSize);
+
+ // If there are no mapped parameters, set the running size total to zero.
+ // Otherwise, use the parameter map size calculated earlier.
+ __ Cmp(mapped_params, 0);
+ __ CzeroX(size, eq);
+
+ // 2. Add the size of the backing store and arguments object.
+ __ Add(size, size, Operand(arg_count, LSL, kPointerSizeLog2));
+ __ Add(size, size, FixedArray::kHeaderSize + Heap::kArgumentsObjectSize);
+
+ // Do the allocation of all three objects in one go. Assign this to x0, as it
+ // will be returned to the caller.
+ Register alloc_obj = x0;
+ __ Allocate(size, alloc_obj, x11, x12, &runtime, TAG_OBJECT);
+
+ // Get the arguments boilerplate from the current (global) context.
+
+ // x0 alloc_obj pointer to allocated objects (param map, backing
+ // store, arguments)
+ // x1 mapped_params number of mapped parameters, min(params, args)
+ // x2 arg_count number of function arguments
+ // x3 arg_count_smi number of function arguments (smi)
+ // x4 function function pointer
+ // x7 param_count number of function parameters
+ // x11 args_offset offset to args (or aliased args) boilerplate (uninit)
+ // x14 recv_arg pointer to receiver arguments
+
+ Register global_object = x10;
+ Register global_ctx = x10;
+ Register args_offset = x11;
+ Register aliased_args_offset = x10;
+ __ Ldr(global_object, GlobalObjectMemOperand());
+ __ Ldr(global_ctx, FieldMemOperand(global_object,
+ GlobalObject::kNativeContextOffset));
+
+ __ Ldr(args_offset, ContextMemOperand(global_ctx,
+ Context::ARGUMENTS_BOILERPLATE_INDEX));
+ __ Ldr(aliased_args_offset,
+ ContextMemOperand(global_ctx,
+ Context::ALIASED_ARGUMENTS_BOILERPLATE_INDEX));
+ __ Cmp(mapped_params, 0);
+ __ CmovX(args_offset, aliased_args_offset, ne);
+
+ // Copy the JS object part.
+ __ CopyFields(alloc_obj, args_offset, CPURegList(x10, x12, x13),
+ JSObject::kHeaderSize / kPointerSize);
+
+ // Set up the callee in-object property.
+ STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
+ const int kCalleeOffset = JSObject::kHeaderSize +
+ Heap::kArgumentsCalleeIndex * kPointerSize;
+ __ Str(function, FieldMemOperand(alloc_obj, kCalleeOffset));
+
+ // Use the length and set that as an in-object property.
+ STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
+ const int kLengthOffset = JSObject::kHeaderSize +
+ Heap::kArgumentsLengthIndex * kPointerSize;
+ __ Str(arg_count_smi, FieldMemOperand(alloc_obj, kLengthOffset));
+
+ // Set up the elements pointer in the allocated arguments object.
+ // If we allocated a parameter map, "elements" will point there, otherwise
+ // it will point to the backing store.
+
+ // x0 alloc_obj pointer to allocated objects (param map, backing
+ // store, arguments)
+ // x1 mapped_params number of mapped parameters, min(params, args)
+ // x2 arg_count number of function arguments
+ // x3 arg_count_smi number of function arguments (smi)
+ // x4 function function pointer
+ // x5 elements pointer to parameter map or backing store (uninit)
+ // x6 backing_store pointer to backing store (uninit)
+ // x7 param_count number of function parameters
+ // x14 recv_arg pointer to receiver arguments
+
+ Register elements = x5;
+ __ Add(elements, alloc_obj, Heap::kArgumentsObjectSize);
+ __ Str(elements, FieldMemOperand(alloc_obj, JSObject::kElementsOffset));
+
+ // Initialize parameter map. If there are no mapped arguments, we're done.
+ Label skip_parameter_map;
+ __ Cmp(mapped_params, 0);
+ // Set up backing store address, because it is needed later for filling in
+ // the unmapped arguments.
+ Register backing_store = x6;
+ __ CmovX(backing_store, elements, eq);
+ __ B(eq, &skip_parameter_map);
+
+ __ LoadRoot(x10, Heap::kNonStrictArgumentsElementsMapRootIndex);
+ __ Str(x10, FieldMemOperand(elements, FixedArray::kMapOffset));
+ __ Add(x10, mapped_params, 2);
+ __ SmiTag(x10);
+ __ Str(x10, FieldMemOperand(elements, FixedArray::kLengthOffset));
+ __ Str(cp, FieldMemOperand(elements,
+ FixedArray::kHeaderSize + 0 * kPointerSize));
+ __ Add(x10, elements, Operand(mapped_params, LSL, kPointerSizeLog2));
+ __ Add(x10, x10, kParameterMapHeaderSize);
+ __ Str(x10, FieldMemOperand(elements,
+ FixedArray::kHeaderSize + 1 * kPointerSize));
+
+ // Copy the parameter slots and the holes in the arguments.
+ // We need to fill in mapped_parameter_count slots. Then index the context,
+ // where parameters are stored in reverse order, at:
+ //
+ // MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS + parameter_count - 1
+ //
+ // The mapped parameter thus needs to get indices:
+ //
+ // MIN_CONTEXT_SLOTS + parameter_count - 1 ..
+ // MIN_CONTEXT_SLOTS + parameter_count - mapped_parameter_count
+ //
+ // We loop from right to left.
+
+ // x0 alloc_obj pointer to allocated objects (param map, backing
+ // store, arguments)
+ // x1 mapped_params number of mapped parameters, min(params, args)
+ // x2 arg_count number of function arguments
+ // x3 arg_count_smi number of function arguments (smi)
+ // x4 function function pointer
+ // x5 elements pointer to parameter map or backing store (uninit)
+ // x6 backing_store pointer to backing store (uninit)
+ // x7 param_count number of function parameters
+ // x11 loop_count parameter loop counter (uninit)
+ // x12 index parameter index (smi, uninit)
+ // x13 the_hole hole value (uninit)
+ // x14 recv_arg pointer to receiver arguments
+
+ Register loop_count = x11;
+ Register index = x12;
+ Register the_hole = x13;
+ Label parameters_loop, parameters_test;
+ __ Mov(loop_count, mapped_params);
+ __ Add(index, param_count, Context::MIN_CONTEXT_SLOTS);
+ __ Sub(index, index, mapped_params);
+ __ SmiTag(index);
+ __ LoadRoot(the_hole, Heap::kTheHoleValueRootIndex);
+ __ Add(backing_store, elements, Operand(loop_count, LSL, kPointerSizeLog2));
+ __ Add(backing_store, backing_store, kParameterMapHeaderSize);
+
+ __ B(¶meters_test);
+
+ __ Bind(¶meters_loop);
+ __ Sub(loop_count, loop_count, 1);
+ __ Mov(x10, Operand(loop_count, LSL, kPointerSizeLog2));
+ __ Add(x10, x10, kParameterMapHeaderSize - kHeapObjectTag);
+ __ Str(index, MemOperand(elements, x10));
+ __ Sub(x10, x10, kParameterMapHeaderSize - FixedArray::kHeaderSize);
+ __ Str(the_hole, MemOperand(backing_store, x10));
+ __ Add(index, index, Operand(Smi::FromInt(1)));
+ __ Bind(¶meters_test);
+ __ Cbnz(loop_count, ¶meters_loop);
+
+ __ Bind(&skip_parameter_map);
+ // Copy arguments header and remaining slots (if there are any.)
+ __ LoadRoot(x10, Heap::kFixedArrayMapRootIndex);
+ __ Str(x10, FieldMemOperand(backing_store, FixedArray::kMapOffset));
+ __ Str(arg_count_smi, FieldMemOperand(backing_store,
+ FixedArray::kLengthOffset));
+
+ // x0 alloc_obj pointer to allocated objects (param map, backing
+ // store, arguments)
+ // x1 mapped_params number of mapped parameters, min(params, args)
+ // x2 arg_count number of function arguments
+ // x4 function function pointer
+ // x3 arg_count_smi number of function arguments (smi)
+ // x6 backing_store pointer to backing store (uninit)
+ // x14 recv_arg pointer to receiver arguments
+
+ Label arguments_loop, arguments_test;
+ __ Mov(x10, mapped_params);
+ __ Sub(recv_arg, recv_arg, Operand(x10, LSL, kPointerSizeLog2));
+ __ B(&arguments_test);
+
+ __ Bind(&arguments_loop);
+ __ Sub(recv_arg, recv_arg, kPointerSize);
+ __ Ldr(x11, MemOperand(recv_arg));
+ __ Add(x12, backing_store, Operand(x10, LSL, kPointerSizeLog2));
+ __ Str(x11, FieldMemOperand(x12, FixedArray::kHeaderSize));
+ __ Add(x10, x10, 1);
+
+ __ Bind(&arguments_test);
+ __ Cmp(x10, arg_count);
+ __ B(lt, &arguments_loop);
+
+ __ Ret();
+
+ // Do the runtime call to allocate the arguments object.
+ __ Bind(&runtime);
+ __ Push(function, recv_arg, arg_count_smi);
+ __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
+}
+
+
+void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
+ // Stack layout on entry.
+ // jssp[0]: number of parameters (tagged)
+ // jssp[8]: address of receiver argument
+ // jssp[16]: function
+ //
+ // Returns pointer to result object in x0.
+
+ // Get the stub arguments from the frame, and make an untagged copy of the
+ // parameter count.
+ Register param_count_smi = x1;
+ Register params = x2;
+ Register function = x3;
+ Register param_count = x13;
+ __ Pop(param_count_smi, params, function);
+ __ SmiUntag(param_count, param_count_smi);
+
+ // Test if arguments adaptor needed.
+ Register caller_fp = x11;
+ Register caller_ctx = x12;
+ Label try_allocate, runtime;
+ __ Ldr(caller_fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ Ldr(caller_ctx, MemOperand(caller_fp,
+ StandardFrameConstants::kContextOffset));
+ __ Cmp(caller_ctx, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ B(ne, &try_allocate);
+
+ // x1 param_count_smi number of parameters passed to function (smi)
+ // x2 params pointer to parameters
+ // x3 function function pointer
+ // x11 caller_fp caller's frame pointer
+ // x13 param_count number of parameters passed to function
+
+ // Patch the argument length and parameters pointer.
+ __ Ldr(param_count_smi,
+ MemOperand(caller_fp,
+ ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ SmiUntag(param_count, param_count_smi);
+ __ Add(x10, caller_fp, Operand(param_count, LSL, kPointerSizeLog2));
+ __ Add(params, x10, StandardFrameConstants::kCallerSPOffset);
+
+ // Try the new space allocation. Start out with computing the size of the
+ // arguments object and the elements array in words.
+ Register size = x10;
+ __ Bind(&try_allocate);
+ __ Add(size, param_count, FixedArray::kHeaderSize / kPointerSize);
+ __ Cmp(param_count, 0);
+ __ CzeroX(size, eq);
+ __ Add(size, size, Heap::kArgumentsObjectSizeStrict / kPointerSize);
+
+ // Do the allocation of both objects in one go. Assign this to x0, as it will
+ // be returned to the caller.
+ Register alloc_obj = x0;
+ __ Allocate(size, alloc_obj, x11, x12, &runtime,
+ static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
+
+ // Get the arguments boilerplate from the current (native) context.
+ Register global_object = x10;
+ Register global_ctx = x10;
+ Register args_offset = x4;
+ __ Ldr(global_object, GlobalObjectMemOperand());
+ __ Ldr(global_ctx, FieldMemOperand(global_object,
+ GlobalObject::kNativeContextOffset));
+ __ Ldr(args_offset,
+ ContextMemOperand(global_ctx,
+ Context::STRICT_MODE_ARGUMENTS_BOILERPLATE_INDEX));
+
+ // x0 alloc_obj pointer to allocated objects: parameter array and
+ // arguments object
+ // x1 param_count_smi number of parameters passed to function (smi)
+ // x2 params pointer to parameters
+ // x3 function function pointer
+ // x4 args_offset offset to arguments boilerplate
+ // x13 param_count number of parameters passed to function
+
+ // Copy the JS object part.
+ __ CopyFields(alloc_obj, args_offset, CPURegList(x5, x6, x7),
+ JSObject::kHeaderSize / kPointerSize);
+
+ // Set the smi-tagged length as an in-object property.
+ STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
+ const int kLengthOffset = JSObject::kHeaderSize +
+ Heap::kArgumentsLengthIndex * kPointerSize;
+ __ Str(param_count_smi, FieldMemOperand(alloc_obj, kLengthOffset));
+
+ // If there are no actual arguments, we're done.
+ Label done;
+ __ Cbz(param_count, &done);
+
+ // Set up the elements pointer in the allocated arguments object and
+ // initialize the header in the elements fixed array.
+ Register elements = x5;
+ __ Add(elements, alloc_obj, Heap::kArgumentsObjectSizeStrict);
+ __ Str(elements, FieldMemOperand(alloc_obj, JSObject::kElementsOffset));
+ __ LoadRoot(x10, Heap::kFixedArrayMapRootIndex);
+ __ Str(x10, FieldMemOperand(elements, FixedArray::kMapOffset));
+ __ Str(param_count_smi, FieldMemOperand(elements, FixedArray::kLengthOffset));
+
+ // x0 alloc_obj pointer to allocated objects: parameter array and
+ // arguments object
+ // x1 param_count_smi number of parameters passed to function (smi)
+ // x2 params pointer to parameters
+ // x3 function function pointer
+ // x4 array pointer to array slot (uninit)
+ // x5 elements pointer to elements array of alloc_obj
+ // x13 param_count number of parameters passed to function
+
+ // Copy the fixed array slots.
+ Label loop;
+ Register array = x4;
+ // Set up pointer to first array slot.
+ __ Add(array, elements, FixedArray::kHeaderSize - kHeapObjectTag);
+
+ __ Bind(&loop);
+ // Pre-decrement the parameters pointer by kPointerSize on each iteration.
+ // Pre-decrement in order to skip receiver.
+ __ Ldr(x10, MemOperand(params, -kPointerSize, PreIndex));
+ // Post-increment elements by kPointerSize on each iteration.
+ __ Str(x10, MemOperand(array, kPointerSize, PostIndex));
+ __ Sub(param_count, param_count, 1);
+ __ Cbnz(param_count, &loop);
+
+ // Return from stub.
+ __ Bind(&done);
+ __ Ret();
+
+ // Do the runtime call to allocate the arguments object.
+ __ Bind(&runtime);
+ __ Push(function, params, param_count_smi);
+ __ TailCallRuntime(Runtime::kNewStrictArgumentsFast, 3, 1);
+}
+
+
+void RegExpExecStub::Generate(MacroAssembler* masm) {
+#ifdef V8_INTERPRETED_REGEXP
+ __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
+#else // V8_INTERPRETED_REGEXP
+
+ // Stack frame on entry.
+ // jssp[0]: last_match_info (expected JSArray)
+ // jssp[8]: previous index
+ // jssp[16]: subject string
+ // jssp[24]: JSRegExp object
+ Label runtime;
+
+ // Use of registers for this function.
+
+ // Variable registers:
+ // x10-x13 used as scratch registers
+ // w0 string_type type of subject string
+ // x2 jsstring_length subject string length
+ // x3 jsregexp_object JSRegExp object
+ // w4 string_encoding ASCII or UC16
+ // w5 sliced_string_offset if the string is a SlicedString
+ // offset to the underlying string
+ // w6 string_representation groups attributes of the string:
+ // - is a string
+ // - type of the string
+ // - is a short external string
+ Register string_type = w0;
+ Register jsstring_length = x2;
+ Register jsregexp_object = x3;
+ Register string_encoding = w4;
+ Register sliced_string_offset = w5;
+ Register string_representation = w6;
+
+ // These are in callee save registers and will be preserved by the call
+ // to the native RegExp code, as this code is called using the normal
+ // C calling convention. When calling directly from generated code the
+ // native RegExp code will not do a GC and therefore the content of
+ // these registers are safe to use after the call.
+
+ // x19 subject subject string
+ // x20 regexp_data RegExp data (FixedArray)
+ // x21 last_match_info_elements info relative to the last match
+ // (FixedArray)
+ // x22 code_object generated regexp code
+ Register subject = x19;
+ Register regexp_data = x20;
+ Register last_match_info_elements = x21;
+ Register code_object = x22;
+
+ // TODO(jbramley): Is it necessary to preserve these? I don't think ARM does.
+ CPURegList used_callee_saved_registers(subject,
+ regexp_data,
+ last_match_info_elements,
+ code_object);
+ __ PushCPURegList(used_callee_saved_registers);
+
+ // Stack frame.
+ // jssp[0] : x19
+ // jssp[8] : x20
+ // jssp[16]: x21
+ // jssp[24]: x22
+ // jssp[32]: last_match_info (JSArray)
+ // jssp[40]: previous index
+ // jssp[48]: subject string
+ // jssp[56]: JSRegExp object
+
+ const int kLastMatchInfoOffset = 4 * kPointerSize;
+ const int kPreviousIndexOffset = 5 * kPointerSize;
+ const int kSubjectOffset = 6 * kPointerSize;
+ const int kJSRegExpOffset = 7 * kPointerSize;
+
+ // Ensure that a RegExp stack is allocated.
+ Isolate* isolate = masm->isolate();
+ ExternalReference address_of_regexp_stack_memory_address =
+ ExternalReference::address_of_regexp_stack_memory_address(isolate);
+ ExternalReference address_of_regexp_stack_memory_size =
+ ExternalReference::address_of_regexp_stack_memory_size(isolate);
+ __ Mov(x10, Operand(address_of_regexp_stack_memory_size));
+ __ Ldr(x10, MemOperand(x10));
+ __ Cbz(x10, &runtime);
+
+ // Check that the first argument is a JSRegExp object.
+ ASSERT(jssp.Is(__ StackPointer()));
+ __ Peek(jsregexp_object, kJSRegExpOffset);
+ __ JumpIfSmi(jsregexp_object, &runtime);
+ __ JumpIfNotObjectType(jsregexp_object, x10, x10, JS_REGEXP_TYPE, &runtime);
+
+ // Check that the RegExp has been compiled (data contains a fixed array).
+ __ Ldr(regexp_data, FieldMemOperand(jsregexp_object, JSRegExp::kDataOffset));
+ if (FLAG_debug_code) {
+ STATIC_ASSERT(kSmiTag == 0);
+ __ Tst(regexp_data, kSmiTagMask);
+ __ Check(ne, kUnexpectedTypeForRegExpDataFixedArrayExpected);
+ __ CompareObjectType(regexp_data, x10, x10, FIXED_ARRAY_TYPE);
+ __ Check(eq, kUnexpectedTypeForRegExpDataFixedArrayExpected);
+ }
+
+ // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
+ __ Ldr(x10, FieldMemOperand(regexp_data, JSRegExp::kDataTagOffset));
+ __ Cmp(x10, Operand(Smi::FromInt(JSRegExp::IRREGEXP)));
+ __ B(ne, &runtime);
+
+ // Check that the number of captures fit in the static offsets vector buffer.
+ // We have always at least one capture for the whole match, plus additional
+ // ones due to capturing parentheses. A capture takes 2 registers.
+ // The number of capture registers then is (number_of_captures + 1) * 2.
+ __ Ldrsw(x10,
+ UntagSmiFieldMemOperand(regexp_data,
+ JSRegExp::kIrregexpCaptureCountOffset));
+ // Check (number_of_captures + 1) * 2 <= offsets vector size
+ // number_of_captures * 2 <= offsets vector size - 2
+ STATIC_ASSERT(Isolate::kJSRegexpStaticOffsetsVectorSize >= 2);
+ __ Add(x10, x10, x10);
+ __ Cmp(x10, Isolate::kJSRegexpStaticOffsetsVectorSize - 2);
+ __ B(hi, &runtime);
+
+ // Initialize offset for possibly sliced string.
+ __ Mov(sliced_string_offset, 0);
+
+ ASSERT(jssp.Is(__ StackPointer()));
+ __ Peek(subject, kSubjectOffset);
+ __ JumpIfSmi(subject, &runtime);
+
+ __ Ldr(x10, FieldMemOperand(subject, HeapObject::kMapOffset));
+ __ Ldrb(string_type, FieldMemOperand(x10, Map::kInstanceTypeOffset));
+
+ __ Ldr(jsstring_length, FieldMemOperand(subject, String::kLengthOffset));
+
+ // Handle subject string according to its encoding and representation:
+ // (1) Sequential string? If yes, go to (5).
+ // (2) Anything but sequential or cons? If yes, go to (6).
+ // (3) Cons string. If the string is flat, replace subject with first string.
+ // Otherwise bailout.
+ // (4) Is subject external? If yes, go to (7).
+ // (5) Sequential string. Load regexp code according to encoding.
+ // (E) Carry on.
+ /// [...]
+
+ // Deferred code at the end of the stub:
+ // (6) Not a long external string? If yes, go to (8).
+ // (7) External string. Make it, offset-wise, look like a sequential string.
+ // Go to (5).
+ // (8) Short external string or not a string? If yes, bail out to runtime.
+ // (9) Sliced string. Replace subject with parent. Go to (4).
+
+ Label check_underlying; // (4)
+ Label seq_string; // (5)
+ Label not_seq_nor_cons; // (6)
+ Label external_string; // (7)
+ Label not_long_external; // (8)
+
+ // (1) Sequential string? If yes, go to (5).
+ __ And(string_representation,
+ string_type,
+ kIsNotStringMask |
+ kStringRepresentationMask |
+ kShortExternalStringMask);
+ // We depend on the fact that Strings of type
+ // SeqString and not ShortExternalString are defined
+ // by the following pattern:
+ // string_type: 0XX0 XX00
+ // ^ ^ ^^
+ // | | ||
+ // | | is a SeqString
+ // | is not a short external String
+ // is a String
+ STATIC_ASSERT((kStringTag | kSeqStringTag) == 0);
+ STATIC_ASSERT(kShortExternalStringTag != 0);
+ __ Cbz(string_representation, &seq_string); // Go to (5).
+
+ // (2) Anything but sequential or cons? If yes, go to (6).
+ STATIC_ASSERT(kConsStringTag < kExternalStringTag);
+ STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
+ STATIC_ASSERT(kIsNotStringMask > kExternalStringTag);
+ STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag);
+ __ Cmp(string_representation, kExternalStringTag);
+ __ B(ge, ¬_seq_nor_cons); // Go to (6).
+
+ // (3) Cons string. Check that it's flat.
+ __ Ldr(x10, FieldMemOperand(subject, ConsString::kSecondOffset));
+ __ JumpIfNotRoot(x10, Heap::kempty_stringRootIndex, &runtime);
+ // Replace subject with first string.
+ __ Ldr(subject, FieldMemOperand(subject, ConsString::kFirstOffset));
+
+ // (4) Is subject external? If yes, go to (7).
+ __ Bind(&check_underlying);
+ // Reload the string type.
+ __ Ldr(x10, FieldMemOperand(subject, HeapObject::kMapOffset));
+ __ Ldrb(string_type, FieldMemOperand(x10, Map::kInstanceTypeOffset));
+ STATIC_ASSERT(kSeqStringTag == 0);
+ // The underlying external string is never a short external string.
+ STATIC_CHECK(ExternalString::kMaxShortLength < ConsString::kMinLength);
+ STATIC_CHECK(ExternalString::kMaxShortLength < SlicedString::kMinLength);
+ __ TestAndBranchIfAnySet(string_type.X(),
+ kStringRepresentationMask,
+ &external_string); // Go to (7).
+
+ // (5) Sequential string. Load regexp code according to encoding.
+ __ Bind(&seq_string);
+
+ // Check that the third argument is a positive smi less than the subject
+ // string length. A negative value will be greater (unsigned comparison).
+ ASSERT(jssp.Is(__ StackPointer()));
+ __ Peek(x10, kPreviousIndexOffset);
+ __ JumpIfNotSmi(x10, &runtime);
+ __ Cmp(jsstring_length, x10);
+ __ B(ls, &runtime);
+
+ // Argument 2 (x1): We need to load argument 2 (the previous index) into x1
+ // before entering the exit frame.
+ __ SmiUntag(x1, x10);
+
+ // The third bit determines the string encoding in string_type.
+ STATIC_ASSERT(kOneByteStringTag == 0x04);
+ STATIC_ASSERT(kTwoByteStringTag == 0x00);
+ STATIC_ASSERT(kStringEncodingMask == 0x04);
+
+ // Find the code object based on the assumptions above.
+ // kDataAsciiCodeOffset and kDataUC16CodeOffset are adjacent, adds an offset
+ // of kPointerSize to reach the latter.
+ ASSERT_EQ(JSRegExp::kDataAsciiCodeOffset + kPointerSize,
+ JSRegExp::kDataUC16CodeOffset);
+ __ Mov(x10, kPointerSize);
+ // We will need the encoding later: ASCII = 0x04
+ // UC16 = 0x00
+ __ Ands(string_encoding, string_type, kStringEncodingMask);
+ __ CzeroX(x10, ne);
+ __ Add(x10, regexp_data, x10);
+ __ Ldr(code_object, FieldMemOperand(x10, JSRegExp::kDataAsciiCodeOffset));
+
+ // (E) Carry on. String handling is done.
+
+ // Check that the irregexp code has been generated for the actual string
+ // encoding. If it has, the field contains a code object otherwise it contains
+ // a smi (code flushing support).
+ __ JumpIfSmi(code_object, &runtime);
+
+ // All checks done. Now push arguments for native regexp code.
+ __ IncrementCounter(isolate->counters()->regexp_entry_native(), 1,
+ x10,
+ x11);
+
+ // Isolates: note we add an additional parameter here (isolate pointer).
+ __ EnterExitFrame(false, x10, 1);
+ ASSERT(csp.Is(__ StackPointer()));
+
+ // We have 9 arguments to pass to the regexp code, therefore we have to pass
+ // one on the stack and the rest as registers.
+
+ // Note that the placement of the argument on the stack isn't standard
+ // AAPCS64:
+ // csp[0]: Space for the return address placed by DirectCEntryStub.
+ // csp[8]: Argument 9, the current isolate address.
+
+ __ Mov(x10, Operand(ExternalReference::isolate_address(isolate)));
+ __ Poke(x10, kPointerSize);
+
+ Register length = w11;
+ Register previous_index_in_bytes = w12;
+ Register start = x13;
+
+ // Load start of the subject string.
+ __ Add(start, subject, SeqString::kHeaderSize - kHeapObjectTag);
+ // Load the length from the original subject string from the previous stack
+ // frame. Therefore we have to use fp, which points exactly to two pointer
+ // sizes below the previous sp. (Because creating a new stack frame pushes
+ // the previous fp onto the stack and decrements sp by 2 * kPointerSize.)
+ __ Ldr(subject, MemOperand(fp, kSubjectOffset + 2 * kPointerSize));
+ __ Ldr(length, UntagSmiFieldMemOperand(subject, String::kLengthOffset));
+
+ // Handle UC16 encoding, two bytes make one character.
+ // string_encoding: if ASCII: 0x04
+ // if UC16: 0x00
+ STATIC_ASSERT(kStringEncodingMask == 0x04);
+ __ Ubfx(string_encoding, string_encoding, 2, 1);
+ __ Eor(string_encoding, string_encoding, 1);
+ // string_encoding: if ASCII: 0
+ // if UC16: 1
+
+ // Convert string positions from characters to bytes.
+ // Previous index is in x1.
+ __ Lsl(previous_index_in_bytes, w1, string_encoding);
+ __ Lsl(length, length, string_encoding);
+ __ Lsl(sliced_string_offset, sliced_string_offset, string_encoding);
+
+ // Argument 1 (x0): Subject string.
+ __ Mov(x0, subject);
+
+ // Argument 2 (x1): Previous index, already there.
+
+ // Argument 3 (x2): Get the start of input.
+ // Start of input = start of string + previous index + substring offset
+ // (0 if the string
+ // is not sliced).
+ __ Add(w10, previous_index_in_bytes, sliced_string_offset);
+ __ Add(x2, start, Operand(w10, UXTW));
+
+ // Argument 4 (x3):
+ // End of input = start of input + (length of input - previous index)
+ __ Sub(w10, length, previous_index_in_bytes);
+ __ Add(x3, x2, Operand(w10, UXTW));
+
+ // Argument 5 (x4): static offsets vector buffer.
+ __ Mov(x4,
+ Operand(ExternalReference::address_of_static_offsets_vector(isolate)));
+
+ // Argument 6 (x5): Set the number of capture registers to zero to force
+ // global regexps to behave as non-global. This stub is not used for global
+ // regexps.
+ __ Mov(x5, 0);
+
+ // Argument 7 (x6): Start (high end) of backtracking stack memory area.
+ __ Mov(x10, Operand(address_of_regexp_stack_memory_address));
+ __ Ldr(x10, MemOperand(x10));
+ __ Mov(x11, Operand(address_of_regexp_stack_memory_size));
+ __ Ldr(x11, MemOperand(x11));
+ __ Add(x6, x10, x11);
+
+ // Argument 8 (x7): Indicate that this is a direct call from JavaScript.
+ __ Mov(x7, 1);
+
+ // Locate the code entry and call it.
+ __ Add(code_object, code_object, Code::kHeaderSize - kHeapObjectTag);
+ DirectCEntryStub stub;
+ stub.GenerateCall(masm, code_object);
+
+ __ LeaveExitFrame(false, x10, true);
+
+ // The generated regexp code returns an int32 in w0.
+ Label failure, exception;
+ __ CompareAndBranch(w0, NativeRegExpMacroAssembler::FAILURE, eq, &failure);
+ __ CompareAndBranch(w0,
+ NativeRegExpMacroAssembler::EXCEPTION,
+ eq,
+ &exception);
+ __ CompareAndBranch(w0, NativeRegExpMacroAssembler::RETRY, eq, &runtime);
+
+ // Success: process the result from the native regexp code.
+ Register number_of_capture_registers = x12;
+
+ // Calculate number of capture registers (number_of_captures + 1) * 2
+ // and store it in the last match info.
+ __ Ldrsw(x10,
+ UntagSmiFieldMemOperand(regexp_data,
+ JSRegExp::kIrregexpCaptureCountOffset));
+ __ Add(x10, x10, x10);
+ __ Add(number_of_capture_registers, x10, 2);
+
+ // Check that the fourth object is a JSArray object.
+ ASSERT(jssp.Is(__ StackPointer()));
+ __ Peek(x10, kLastMatchInfoOffset);
+ __ JumpIfSmi(x10, &runtime);
+ __ JumpIfNotObjectType(x10, x11, x11, JS_ARRAY_TYPE, &runtime);
+
+ // Check that the JSArray is the fast case.
+ __ Ldr(last_match_info_elements,
+ FieldMemOperand(x10, JSArray::kElementsOffset));
+ __ Ldr(x10,
+ FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset));
+ __ JumpIfNotRoot(x10, Heap::kFixedArrayMapRootIndex, &runtime);
+
+ // Check that the last match info has space for the capture registers and the
+ // additional information (overhead).
+ // (number_of_captures + 1) * 2 + overhead <= last match info size
+ // (number_of_captures * 2) + 2 + overhead <= last match info size
+ // number_of_capture_registers + overhead <= last match info size
+ __ Ldrsw(x10,
+ UntagSmiFieldMemOperand(last_match_info_elements,
+ FixedArray::kLengthOffset));
+ __ Add(x11, number_of_capture_registers, RegExpImpl::kLastMatchOverhead);
+ __ Cmp(x11, x10);
+ __ B(gt, &runtime);
+
+ // Store the capture count.
+ __ SmiTag(x10, number_of_capture_registers);
+ __ Str(x10,
+ FieldMemOperand(last_match_info_elements,
+ RegExpImpl::kLastCaptureCountOffset));
+ // Store last subject and last input.
+ __ Str(subject,
+ FieldMemOperand(last_match_info_elements,
+ RegExpImpl::kLastSubjectOffset));
+ // Use x10 as the subject string in order to only need
+ // one RecordWriteStub.
+ __ Mov(x10, subject);
+ __ RecordWriteField(last_match_info_elements,
+ RegExpImpl::kLastSubjectOffset,
+ x10,
+ x11,
+ kLRHasNotBeenSaved,
+ kDontSaveFPRegs);
+ __ Str(subject,
+ FieldMemOperand(last_match_info_elements,
+ RegExpImpl::kLastInputOffset));
+ __ Mov(x10, subject);
+ __ RecordWriteField(last_match_info_elements,
+ RegExpImpl::kLastInputOffset,
+ x10,
+ x11,
+ kLRHasNotBeenSaved,
+ kDontSaveFPRegs);
+
+ Register last_match_offsets = x13;
+ Register offsets_vector_index = x14;
+ Register current_offset = x15;
+
+ // Get the static offsets vector filled by the native regexp code
+ // and fill the last match info.
+ ExternalReference address_of_static_offsets_vector =
+ ExternalReference::address_of_static_offsets_vector(isolate);
+ __ Mov(offsets_vector_index, Operand(address_of_static_offsets_vector));
+
+ Label next_capture, done;
+ // Capture register counter starts from number of capture registers and
+ // iterates down to zero (inclusive).
+ __ Add(last_match_offsets,
+ last_match_info_elements,
+ RegExpImpl::kFirstCaptureOffset - kHeapObjectTag);
+ __ Bind(&next_capture);
+ __ Subs(number_of_capture_registers, number_of_capture_registers, 2);
+ __ B(mi, &done);
+ // Read two 32 bit values from the static offsets vector buffer into
+ // an X register
+ __ Ldr(current_offset,
+ MemOperand(offsets_vector_index, kWRegSizeInBytes * 2, PostIndex));
+ // Store the smi values in the last match info.
+ __ SmiTag(x10, current_offset);
+ // Clearing the 32 bottom bits gives us a Smi.
+ STATIC_ASSERT(kSmiShift == 32);
+ __ And(x11, current_offset, ~kWRegMask);
+ __ Stp(x10,
+ x11,
+ MemOperand(last_match_offsets, kXRegSizeInBytes * 2, PostIndex));
+ __ B(&next_capture);
+ __ Bind(&done);
+
+ // Return last match info.
+ __ Peek(x0, kLastMatchInfoOffset);
+ __ PopCPURegList(used_callee_saved_registers);
+ // Drop the 4 arguments of the stub from the stack.
+ __ Drop(4);
+ __ Ret();
+
+ __ Bind(&exception);
+ Register exception_value = x0;
+ // A stack overflow (on the backtrack stack) may have occured
+ // in the RegExp code but no exception has been created yet.
+ // If there is no pending exception, handle that in the runtime system.
+ __ Mov(x10, Operand(isolate->factory()->the_hole_value()));
+ __ Mov(x11,
+ Operand(ExternalReference(Isolate::kPendingExceptionAddress,
+ isolate)));
+ __ Ldr(exception_value, MemOperand(x11));
+ __ Cmp(x10, exception_value);
+ __ B(eq, &runtime);
+
+ __ Str(x10, MemOperand(x11)); // Clear pending exception.
+
+ // Check if the exception is a termination. If so, throw as uncatchable.
+ Label termination_exception;
+ __ JumpIfRoot(exception_value,
+ Heap::kTerminationExceptionRootIndex,
+ &termination_exception);
+
+ __ Throw(exception_value, x10, x11, x12, x13);
+
+ __ Bind(&termination_exception);
+ __ ThrowUncatchable(exception_value, x10, x11, x12, x13);
+
+ __ Bind(&failure);
+ __ Mov(x0, Operand(masm->isolate()->factory()->null_value()));
+ __ PopCPURegList(used_callee_saved_registers);
+ // Drop the 4 arguments of the stub from the stack.
+ __ Drop(4);
+ __ Ret();
+
+ __ Bind(&runtime);
+ __ PopCPURegList(used_callee_saved_registers);
+ __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
+
+ // Deferred code for string handling.
+ // (6) Not a long external string? If yes, go to (8).
+ __ Bind(¬_seq_nor_cons);
+ // Compare flags are still set.
+ __ B(ne, ¬_long_external); // Go to (8).
+
+ // (7) External string. Make it, offset-wise, look like a sequential string.
+ __ Bind(&external_string);
+ if (masm->emit_debug_code()) {
+ // Assert that we do not have a cons or slice (indirect strings) here.
+ // Sequential strings have already been ruled out.
+ __ Ldr(x10, FieldMemOperand(subject, HeapObject::kMapOffset));
+ __ Ldrb(x10, FieldMemOperand(x10, Map::kInstanceTypeOffset));
+ __ Tst(x10, kIsIndirectStringMask);
+ __ Check(eq, kExternalStringExpectedButNotFound);
+ __ And(x10, x10, kStringRepresentationMask);
+ __ Cmp(x10, 0);
+ __ Check(ne, kExternalStringExpectedButNotFound);
+ }
+ __ Ldr(subject,
+ FieldMemOperand(subject, ExternalString::kResourceDataOffset));
+ // Move the pointer so that offset-wise, it looks like a sequential string.
+ STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
+ __ Sub(subject, subject, SeqTwoByteString::kHeaderSize - kHeapObjectTag);
+ __ B(&seq_string); // Go to (5).
+
+ // (8) If this is a short external string or not a string, bail out to
+ // runtime.
+ __ Bind(¬_long_external);
+ STATIC_ASSERT(kShortExternalStringTag != 0);
+ __ TestAndBranchIfAnySet(string_representation,
+ kShortExternalStringMask | kIsNotStringMask,
+ &runtime);
+
+ // (9) Sliced string. Replace subject with parent.
+ __ Ldr(sliced_string_offset,
+ UntagSmiFieldMemOperand(subject, SlicedString::kOffsetOffset));
+ __ Ldr(subject, FieldMemOperand(subject, SlicedString::kParentOffset));
+ __ B(&check_underlying); // Go to (4).
+#endif
+}
+
+
+// TODO(jbramley): Don't use static registers here, but take them as arguments.
+static void GenerateRecordCallTarget(MacroAssembler* masm) {
+ ASM_LOCATION("GenerateRecordCallTarget");
+ // Cache the called function in a feedback vector slot. Cache states are
+ // uninitialized, monomorphic (indicated by a JSFunction), and megamorphic.
+ // x0 : number of arguments to the construct function
+ // x1 : the function to call
+ // x2 : feedback vector
+ // x3 : slot in feedback vector (smi)
+ Label initialize, done, miss, megamorphic, not_array_function;
+
+ ASSERT_EQ(*TypeFeedbackInfo::MegamorphicSentinel(masm->isolate()),
+ masm->isolate()->heap()->undefined_value());
+ ASSERT_EQ(*TypeFeedbackInfo::UninitializedSentinel(masm->isolate()),
+ masm->isolate()->heap()->the_hole_value());
+
+ // Load the cache state.
+ __ Add(x4, x2, Operand::UntagSmiAndScale(x3, kPointerSizeLog2));
+ __ Ldr(x4, FieldMemOperand(x4, FixedArray::kHeaderSize));
+
+ // A monomorphic cache hit or an already megamorphic state: invoke the
+ // function without changing the state.
+ __ Cmp(x4, x1);
+ __ B(eq, &done);
+
+ // If we came here, we need to see if we are the array function.
+ // If we didn't have a matching function, and we didn't find the megamorph
+ // sentinel, then we have in the slot either some other function or an
+ // AllocationSite. Do a map check on the object in ecx.
+ __ Ldr(x5, FieldMemOperand(x4, AllocationSite::kMapOffset));
+ __ JumpIfNotRoot(x5, Heap::kAllocationSiteMapRootIndex, &miss);
+
+ // Make sure the function is the Array() function
+ __ LoadArrayFunction(x4);
+ __ Cmp(x1, x4);
+ __ B(ne, &megamorphic);
+ __ B(&done);
+
+ __ Bind(&miss);
+
+ // A monomorphic miss (i.e, here the cache is not uninitialized) goes
+ // megamorphic.
+ __ JumpIfRoot(x4, Heap::kTheHoleValueRootIndex, &initialize);
+ // MegamorphicSentinel is an immortal immovable object (undefined) so no
+ // write-barrier is needed.
+ __ Bind(&megamorphic);
+ __ Add(x4, x2, Operand::UntagSmiAndScale(x3, kPointerSizeLog2));
+ __ LoadRoot(x10, Heap::kUndefinedValueRootIndex);
+ __ Str(x10, FieldMemOperand(x4, FixedArray::kHeaderSize));
+ __ B(&done);
+
+ // An uninitialized cache is patched with the function or sentinel to
+ // indicate the ElementsKind if function is the Array constructor.
+ __ Bind(&initialize);
+ // Make sure the function is the Array() function
+ __ LoadArrayFunction(x4);
+ __ Cmp(x1, x4);
+ __ B(ne, ¬_array_function);
+
+ // The target function is the Array constructor,
+ // Create an AllocationSite if we don't already have it, store it in the slot.
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ CreateAllocationSiteStub create_stub;
+
+ // Arguments register must be smi-tagged to call out.
+ __ SmiTag(x0);
+ __ Push(x0, x1, x2, x3);
+
+ __ CallStub(&create_stub);
+
+ __ Pop(x3, x2, x1, x0);
+ __ SmiUntag(x0);
+ }
+ __ B(&done);
+
+ __ Bind(¬_array_function);
+ // An uninitialized cache is patched with the function.
+
+ __ Add(x4, x2, Operand::UntagSmiAndScale(x3, kPointerSizeLog2));
+ // TODO(all): Does the value need to be left in x4? If not, FieldMemOperand
+ // could be used to avoid this add.
+ __ Add(x4, x4, FixedArray::kHeaderSize - kHeapObjectTag);
+ __ Str(x1, MemOperand(x4, 0));
+
+ __ Push(x4, x2, x1);
+ __ RecordWrite(x2, x4, x1, kLRHasNotBeenSaved, kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+ __ Pop(x1, x2, x4);
+
+ // TODO(all): Are x4, x2 and x1 outputs? This isn't clear.
+
+ __ Bind(&done);
+}
+
+
+void CallFunctionStub::Generate(MacroAssembler* masm) {
+ ASM_LOCATION("CallFunctionStub::Generate");
+ // x1 function the function to call
+ // x2 : feedback vector
+ // x3 : slot in feedback vector (smi) (if x2 is not undefined)
+ Register function = x1;
+ Register cache_cell = x2;
+ Register slot = x3;
+ Register type = x4;
+ Label slow, non_function, wrap, cont;
+
+ // TODO(jbramley): This function has a lot of unnamed registers. Name them,
+ // and tidy things up a bit.
+
+ if (NeedsChecks()) {
+ // Check that the function is really a JavaScript function.
+ __ JumpIfSmi(function, &non_function);
+
+ // Goto slow case if we do not have a function.
+ __ JumpIfNotObjectType(function, x10, type, JS_FUNCTION_TYPE, &slow);
+
+ if (RecordCallTarget()) {
+ GenerateRecordCallTarget(masm);
+ }
+ }
+
+ // Fast-case: Invoke the function now.
+ // x1 function pushed function
+ ParameterCount actual(argc_);
+
+ if (CallAsMethod()) {
+ if (NeedsChecks()) {
+ // Do not transform the receiver for strict mode functions.
+ __ Ldr(x3, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
+ __ Ldr(w4, FieldMemOperand(x3, SharedFunctionInfo::kCompilerHintsOffset));
+ __ Tbnz(w4, SharedFunctionInfo::kStrictModeFunction, &cont);
+
+ // Do not transform the receiver for native (Compilerhints already in x3).
+ __ Tbnz(w4, SharedFunctionInfo::kNative, &cont);
+ }
+
+ // Compute the receiver in non-strict mode.
+ __ Peek(x3, argc_ * kPointerSize);
+
+ if (NeedsChecks()) {
+ __ JumpIfSmi(x3, &wrap);
+ __ JumpIfObjectType(x3, x10, type, FIRST_SPEC_OBJECT_TYPE, &wrap, lt);
+ } else {
+ __ B(&wrap);
+ }
+
+ __ Bind(&cont);
+ }
+ __ InvokeFunction(function,
+ actual,
+ JUMP_FUNCTION,
+ NullCallWrapper());
+
+ if (NeedsChecks()) {
+ // Slow-case: Non-function called.
+ __ Bind(&slow);
+ if (RecordCallTarget()) {
+ // If there is a call target cache, mark it megamorphic in the
+ // non-function case. MegamorphicSentinel is an immortal immovable object
+ // (undefined) so no write barrier is needed.
+ ASSERT_EQ(*TypeFeedbackInfo::MegamorphicSentinel(masm->isolate()),
+ masm->isolate()->heap()->undefined_value());
+ __ Add(x12, cache_cell, Operand::UntagSmiAndScale(slot,
+ kPointerSizeLog2));
+ __ LoadRoot(x11, Heap::kUndefinedValueRootIndex);
+ __ Str(x11, FieldMemOperand(x12, FixedArray::kHeaderSize));
+ }
+ // Check for function proxy.
+ // x10 : function type.
+ __ CompareAndBranch(type, JS_FUNCTION_PROXY_TYPE, ne, &non_function);
+ __ Push(function); // put proxy as additional argument
+ __ Mov(x0, argc_ + 1);
+ __ Mov(x2, 0);
+ __ GetBuiltinFunction(x1, Builtins::CALL_FUNCTION_PROXY);
+ {
+ Handle<Code> adaptor =
+ masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
+ __ Jump(adaptor, RelocInfo::CODE_TARGET);
+ }
+
+ // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
+ // of the original receiver from the call site).
+ __ Bind(&non_function);
+ __ Poke(function, argc_ * kXRegSizeInBytes);
+ __ Mov(x0, argc_); // Set up the number of arguments.
+ __ Mov(x2, 0);
+ __ GetBuiltinFunction(function, Builtins::CALL_NON_FUNCTION);
+ __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
+ RelocInfo::CODE_TARGET);
+ }
+
+ if (CallAsMethod()) {
+ __ Bind(&wrap);
+ // Wrap the receiver and patch it back onto the stack.
+ { FrameScope frame_scope(masm, StackFrame::INTERNAL);
+ __ Push(x1, x3);
+ __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+ __ Pop(x1);
+ }
+ __ Poke(x0, argc_ * kPointerSize);
+ __ B(&cont);
+ }
+}
+
+
+void CallConstructStub::Generate(MacroAssembler* masm) {
+ ASM_LOCATION("CallConstructStub::Generate");
+ // x0 : number of arguments
+ // x1 : the function to call
+ // x2 : feedback vector
+ // x3 : slot in feedback vector (smi) (if r2 is not undefined)
+ Register function = x1;
+ Label slow, non_function_call;
+
+ // Check that the function is not a smi.
+ __ JumpIfSmi(function, &non_function_call);
+ // Check that the function is a JSFunction.
+ Register object_type = x10;
+ __ JumpIfNotObjectType(function, object_type, object_type, JS_FUNCTION_TYPE,
+ &slow);
+
+ if (RecordCallTarget()) {
+ GenerateRecordCallTarget(masm);
+ }
+
+ // Jump to the function-specific construct stub.
+ Register jump_reg = x4;
+ Register shared_func_info = jump_reg;
+ Register cons_stub = jump_reg;
+ Register cons_stub_code = jump_reg;
+ __ Ldr(shared_func_info,
+ FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
+ __ Ldr(cons_stub,
+ FieldMemOperand(shared_func_info,
+ SharedFunctionInfo::kConstructStubOffset));
+ __ Add(cons_stub_code, cons_stub, Code::kHeaderSize - kHeapObjectTag);
+ __ Br(cons_stub_code);
+
+ Label do_call;
+ __ Bind(&slow);
+ __ Cmp(object_type, JS_FUNCTION_PROXY_TYPE);
+ __ B(ne, &non_function_call);
+ __ GetBuiltinFunction(x1, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR);
+ __ B(&do_call);
+
+ __ Bind(&non_function_call);
+ __ GetBuiltinFunction(x1, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
+
+ __ Bind(&do_call);
+ // Set expected number of arguments to zero (not changing x0).
+ __ Mov(x2, 0);
+ __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
+ RelocInfo::CODE_TARGET);
+}
+
+
+void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
+ // If the receiver is a smi trigger the non-string case.
+ __ JumpIfSmi(object_, receiver_not_string_);
+
+ // Fetch the instance type of the receiver into result register.
+ __ Ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
+ __ Ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
+
+ // If the receiver is not a string trigger the non-string case.
+ __ TestAndBranchIfAnySet(result_, kIsNotStringMask, receiver_not_string_);
+
+ // If the index is non-smi trigger the non-smi case.
+ __ JumpIfNotSmi(index_, &index_not_smi_);
+
+ __ Bind(&got_smi_index_);
+ // Check for index out of range.
+ __ Ldrsw(result_, UntagSmiFieldMemOperand(object_, String::kLengthOffset));
+ __ Cmp(result_, Operand::UntagSmi(index_));
+ __ B(ls, index_out_of_range_);
+
+ __ SmiUntag(index_);
+
+ StringCharLoadGenerator::Generate(masm,
+ object_,
+ index_,
+ result_,
+ &call_runtime_);
+ __ SmiTag(result_);
+ __ Bind(&exit_);
+}
+
+
+void StringCharCodeAtGenerator::GenerateSlow(
+ MacroAssembler* masm,
+ const RuntimeCallHelper& call_helper) {
+ __ Abort(kUnexpectedFallthroughToCharCodeAtSlowCase);
+
+ __ Bind(&index_not_smi_);
+ // If index is a heap number, try converting it to an integer.
+ __ CheckMap(index_,
+ result_,
+ Heap::kHeapNumberMapRootIndex,
+ index_not_number_,
+ DONT_DO_SMI_CHECK);
+ call_helper.BeforeCall(masm);
+ // Save object_ on the stack and pass index_ as argument for runtime call.
+ __ Push(object_, index_);
+ if (index_flags_ == STRING_INDEX_IS_NUMBER) {
+ __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
+ } else {
+ ASSERT(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
+ // NumberToSmi discards numbers that are not exact integers.
+ __ CallRuntime(Runtime::kNumberToSmi, 1);
+ }
+ // Save the conversion result before the pop instructions below
+ // have a chance to overwrite it.
+ __ Mov(index_, x0);
+ __ Pop(object_);
+ // Reload the instance type.
+ __ Ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
+ __ Ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
+ call_helper.AfterCall(masm);
+
+ // If index is still not a smi, it must be out of range.
+ __ JumpIfNotSmi(index_, index_out_of_range_);
+ // Otherwise, return to the fast path.
+ __ B(&got_smi_index_);
+
+ // Call runtime. We get here when the receiver is a string and the
+ // index is a number, but the code of getting the actual character
+ // is too complex (e.g., when the string needs to be flattened).
+ __ Bind(&call_runtime_);
+ call_helper.BeforeCall(masm);
+ __ SmiTag(index_);
+ __ Push(object_, index_);
+ __ CallRuntime(Runtime::kStringCharCodeAt, 2);
+ __ Mov(result_, x0);
+ call_helper.AfterCall(masm);
+ __ B(&exit_);
+
+ __ Abort(kUnexpectedFallthroughFromCharCodeAtSlowCase);
+}
+
+
+void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
+ __ JumpIfNotSmi(code_, &slow_case_);
+ __ Cmp(code_, Operand(Smi::FromInt(String::kMaxOneByteCharCode)));
+ __ B(hi, &slow_case_);
+
+ __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
+ // At this point code register contains smi tagged ASCII char code.
+ STATIC_ASSERT(kSmiShift > kPointerSizeLog2);
+ __ Add(result_, result_, Operand(code_, LSR, kSmiShift - kPointerSizeLog2));
+ __ Ldr(result_, FieldMemOperand(result_, FixedArray::kHeaderSize));
+ __ JumpIfRoot(result_, Heap::kUndefinedValueRootIndex, &slow_case_);
+ __ Bind(&exit_);
+}
+
+
+void StringCharFromCodeGenerator::GenerateSlow(
+ MacroAssembler* masm,
+ const RuntimeCallHelper& call_helper) {
+ __ Abort(kUnexpectedFallthroughToCharFromCodeSlowCase);
+
+ __ Bind(&slow_case_);
+ call_helper.BeforeCall(masm);
+ __ Push(code_);
+ __ CallRuntime(Runtime::kCharFromCode, 1);
+ __ Mov(result_, x0);
+ call_helper.AfterCall(masm);
+ __ B(&exit_);
+
+ __ Abort(kUnexpectedFallthroughFromCharFromCodeSlowCase);
+}
+
+
+void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
+ // Inputs are in x0 (lhs) and x1 (rhs).
+ ASSERT(state_ == CompareIC::SMI);
+ ASM_LOCATION("ICCompareStub[Smis]");
+ Label miss;
+ // Bail out (to 'miss') unless both x0 and x1 are smis.
+ __ JumpIfEitherNotSmi(x0, x1, &miss);
+
+ // TODO(jbramley): Why do we only set the flags for EQ?
+ if (GetCondition() == eq) {
+ // For equality we do not care about the sign of the result.
+ __ Subs(x0, x0, x1);
+ } else {
+ // Untag before subtracting to avoid handling overflow.
+ __ SmiUntag(x1);
+ __ Sub(x0, x1, Operand::UntagSmi(x0));
+ }
+ __ Ret();
+
+ __ Bind(&miss);
+ GenerateMiss(masm);
+}
+
+
+void ICCompareStub::GenerateNumbers(MacroAssembler* masm) {
+ ASSERT(state_ == CompareIC::NUMBER);
+ ASM_LOCATION("ICCompareStub[HeapNumbers]");
+
+ Label unordered, maybe_undefined1, maybe_undefined2;
+ Label miss, handle_lhs, values_in_d_regs;
+ Label untag_rhs, untag_lhs;
+
+ Register result = x0;
+ Register rhs = x0;
+ Register lhs = x1;
+ FPRegister rhs_d = d0;
+ FPRegister lhs_d = d1;
+
+ if (left_ == CompareIC::SMI) {
+ __ JumpIfNotSmi(lhs, &miss);
+ }
+ if (right_ == CompareIC::SMI) {
+ __ JumpIfNotSmi(rhs, &miss);
+ }
+
+ __ SmiUntagToDouble(rhs_d, rhs, kSpeculativeUntag);
+ __ SmiUntagToDouble(lhs_d, lhs, kSpeculativeUntag);
+
+ // Load rhs if it's a heap number.
+ __ JumpIfSmi(rhs, &handle_lhs);
+ __ CheckMap(rhs, x10, Heap::kHeapNumberMapRootIndex, &maybe_undefined1,
+ DONT_DO_SMI_CHECK);
+ __ Ldr(rhs_d, FieldMemOperand(rhs, HeapNumber::kValueOffset));
+
+ // Load lhs if it's a heap number.
+ __ Bind(&handle_lhs);
+ __ JumpIfSmi(lhs, &values_in_d_regs);
+ __ CheckMap(lhs, x10, Heap::kHeapNumberMapRootIndex, &maybe_undefined2,
+ DONT_DO_SMI_CHECK);
+ __ Ldr(lhs_d, FieldMemOperand(lhs, HeapNumber::kValueOffset));
+
+ __ Bind(&values_in_d_regs);
+ __ Fcmp(lhs_d, rhs_d);
+ __ B(vs, &unordered); // Overflow flag set if either is NaN.
+ STATIC_ASSERT((LESS == -1) && (EQUAL == 0) && (GREATER == 1));
+ __ Cset(result, gt); // gt => 1, otherwise (lt, eq) => 0 (EQUAL).
+ __ Csinv(result, result, xzr, ge); // lt => -1, gt => 1, eq => 0.
+ __ Ret();
+
+ __ Bind(&unordered);
+ ICCompareStub stub(op_, CompareIC::GENERIC, CompareIC::GENERIC,
+ CompareIC::GENERIC);
+ __ Jump(stub.GetCode(masm->isolate()), RelocInfo::CODE_TARGET);
+
+ __ Bind(&maybe_undefined1);
+ if (Token::IsOrderedRelationalCompareOp(op_)) {
+ __ JumpIfNotRoot(rhs, Heap::kUndefinedValueRootIndex, &miss);
+ __ JumpIfSmi(lhs, &unordered);
+ __ JumpIfNotObjectType(lhs, x10, x10, HEAP_NUMBER_TYPE, &maybe_undefined2);
+ __ B(&unordered);
+ }
+
+ __ Bind(&maybe_undefined2);
+ if (Token::IsOrderedRelationalCompareOp(op_)) {
+ __ JumpIfRoot(lhs, Heap::kUndefinedValueRootIndex, &unordered);
+ }
+
+ __ Bind(&miss);
+ GenerateMiss(masm);
+}
+
+
+void ICCompareStub::GenerateInternalizedStrings(MacroAssembler* masm) {
+ ASSERT(state_ == CompareIC::INTERNALIZED_STRING);
+ ASM_LOCATION("ICCompareStub[InternalizedStrings]");
+ Label miss;
+
+ Register result = x0;
+ Register rhs = x0;
+ Register lhs = x1;
+
+ // Check that both operands are heap objects.
+ __ JumpIfEitherSmi(lhs, rhs, &miss);
+
+ // Check that both operands are internalized strings.
+ Register rhs_map = x10;
+ Register lhs_map = x11;
+ Register rhs_type = x10;
+ Register lhs_type = x11;
+ __ Ldr(lhs_map, FieldMemOperand(lhs, HeapObject::kMapOffset));
+ __ Ldr(rhs_map, FieldMemOperand(rhs, HeapObject::kMapOffset));
+ __ Ldrb(lhs_type, FieldMemOperand(lhs_map, Map::kInstanceTypeOffset));
+ __ Ldrb(rhs_type, FieldMemOperand(rhs_map, Map::kInstanceTypeOffset));
+
+ STATIC_ASSERT((kInternalizedTag == 0) && (kStringTag == 0));
+ __ Orr(x12, lhs_type, rhs_type);
+ __ TestAndBranchIfAnySet(
+ x12, kIsNotStringMask | kIsNotInternalizedMask, &miss);
+
+ // Internalized strings are compared by identity.
+ STATIC_ASSERT(EQUAL == 0);
+ __ Cmp(lhs, rhs);
+ __ Cset(result, ne);
+ __ Ret();
+
+ __ Bind(&miss);
+ GenerateMiss(masm);
+}
+
+
+void ICCompareStub::GenerateUniqueNames(MacroAssembler* masm) {
+ ASSERT(state_ == CompareIC::UNIQUE_NAME);
+ ASM_LOCATION("ICCompareStub[UniqueNames]");
+ ASSERT(GetCondition() == eq);
+ Label miss;
+
+ Register result = x0;
+ Register rhs = x0;
+ Register lhs = x1;
+
+ Register lhs_instance_type = w2;
+ Register rhs_instance_type = w3;
+
+ // Check that both operands are heap objects.
+ __ JumpIfEitherSmi(lhs, rhs, &miss);
+
+ // Check that both operands are unique names. This leaves the instance
+ // types loaded in tmp1 and tmp2.
+ __ Ldr(x10, FieldMemOperand(lhs, HeapObject::kMapOffset));
+ __ Ldr(x11, FieldMemOperand(rhs, HeapObject::kMapOffset));
+ __ Ldrb(lhs_instance_type, FieldMemOperand(x10, Map::kInstanceTypeOffset));
+ __ Ldrb(rhs_instance_type, FieldMemOperand(x11, Map::kInstanceTypeOffset));
+
+ // To avoid a miss, each instance type should be either SYMBOL_TYPE or it
+ // should have kInternalizedTag set.
+ __ JumpIfNotUniqueName(lhs_instance_type, &miss);
+ __ JumpIfNotUniqueName(rhs_instance_type, &miss);
+
+ // Unique names are compared by identity.
+ STATIC_ASSERT(EQUAL == 0);
+ __ Cmp(lhs, rhs);
+ __ Cset(result, ne);
+ __ Ret();
+
+ __ Bind(&miss);
+ GenerateMiss(masm);
+}
+
+
+void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
+ ASSERT(state_ == CompareIC::STRING);
+ ASM_LOCATION("ICCompareStub[Strings]");
+
+ Label miss;
+
+ bool equality = Token::IsEqualityOp(op_);
+
+ Register result = x0;
+ Register rhs = x0;
+ Register lhs = x1;
+
+ // Check that both operands are heap objects.
+ __ JumpIfEitherSmi(rhs, lhs, &miss);
+
+ // Check that both operands are strings.
+ Register rhs_map = x10;
+ Register lhs_map = x11;
+ Register rhs_type = x10;
+ Register lhs_type = x11;
+ __ Ldr(lhs_map, FieldMemOperand(lhs, HeapObject::kMapOffset));
+ __ Ldr(rhs_map, FieldMemOperand(rhs, HeapObject::kMapOffset));
+ __ Ldrb(lhs_type, FieldMemOperand(lhs_map, Map::kInstanceTypeOffset));
+ __ Ldrb(rhs_type, FieldMemOperand(rhs_map, Map::kInstanceTypeOffset));
+ STATIC_ASSERT(kNotStringTag != 0);
+ __ Orr(x12, lhs_type, rhs_type);
+ __ Tbnz(x12, MaskToBit(kIsNotStringMask), &miss);
+
+ // Fast check for identical strings.
+ Label not_equal;
+ __ Cmp(lhs, rhs);
+ __ B(ne, ¬_equal);
+ __ Mov(result, EQUAL);
+ __ Ret();
+
+ __ Bind(¬_equal);
+ // Handle not identical strings
+
+ // Check that both strings are internalized strings. If they are, we're done
+ // because we already know they are not identical. We know they are both
+ // strings.
+ if (equality) {
+ ASSERT(GetCondition() == eq);
+ STATIC_ASSERT(kInternalizedTag == 0);
+ Label not_internalized_strings;
+ __ Orr(x12, lhs_type, rhs_type);
+ __ TestAndBranchIfAnySet(
+ x12, kIsNotInternalizedMask, ¬_internalized_strings);
+ // Result is in rhs (x0), and not EQUAL, as rhs is not a smi.
+ __ Ret();
+ __ Bind(¬_internalized_strings);
+ }
+
+ // Check that both strings are sequential ASCII.
+ Label runtime;
+ __ JumpIfBothInstanceTypesAreNotSequentialAscii(
+ lhs_type, rhs_type, x12, x13, &runtime);
+
+ // Compare flat ASCII strings. Returns when done.
+ if (equality) {
+ StringCompareStub::GenerateFlatAsciiStringEquals(
+ masm, lhs, rhs, x10, x11, x12);
+ } else {
+ StringCompareStub::GenerateCompareFlatAsciiStrings(
+ masm, lhs, rhs, x10, x11, x12, x13);
+ }
+
+ // Handle more complex cases in runtime.
+ __ Bind(&runtime);
+ __ Push(lhs, rhs);
+ if (equality) {
+ __ TailCallRuntime(Runtime::kStringEquals, 2, 1);
+ } else {
+ __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
+ }
+
+ __ Bind(&miss);
+ GenerateMiss(masm);
+}
+
+
+void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
+ ASSERT(state_ == CompareIC::OBJECT);
+ ASM_LOCATION("ICCompareStub[Objects]");
+
+ Label miss;
+
+ Register result = x0;
+ Register rhs = x0;
+ Register lhs = x1;
+
+ __ JumpIfEitherSmi(rhs, lhs, &miss);
+
+ __ JumpIfNotObjectType(rhs, x10, x10, JS_OBJECT_TYPE, &miss);
+ __ JumpIfNotObjectType(lhs, x10, x10, JS_OBJECT_TYPE, &miss);
+
+ ASSERT(GetCondition() == eq);
+ __ Sub(result, rhs, lhs);
+ __ Ret();
+
+ __ Bind(&miss);
+ GenerateMiss(masm);
+}
+
+
+void ICCompareStub::GenerateKnownObjects(MacroAssembler* masm) {
+ ASM_LOCATION("ICCompareStub[KnownObjects]");
+
+ Label miss;
+
+ Register result = x0;
+ Register rhs = x0;
+ Register lhs = x1;
+
+ __ JumpIfEitherSmi(rhs, lhs, &miss);
+
+ Register rhs_map = x10;
+ Register lhs_map = x11;
+ __ Ldr(rhs_map, FieldMemOperand(rhs, HeapObject::kMapOffset));
+ __ Ldr(lhs_map, FieldMemOperand(lhs, HeapObject::kMapOffset));
+ __ Cmp(rhs_map, Operand(known_map_));
+ __ B(ne, &miss);
+ __ Cmp(lhs_map, Operand(known_map_));
+ __ B(ne, &miss);
+
+ __ Sub(result, rhs, lhs);
+ __ Ret();
+
+ __ Bind(&miss);
+ GenerateMiss(masm);
+}
+
+
+// This method handles the case where a compare stub had the wrong
+// implementation. It calls a miss handler, which re-writes the stub. All other
+// ICCompareStub::Generate* methods should fall back into this one if their
+// operands were not the expected types.
+void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
+ ASM_LOCATION("ICCompareStub[Miss]");
+
+ Register stub_entry = x11;
+ {
+ ExternalReference miss =
+ ExternalReference(IC_Utility(IC::kCompareIC_Miss), masm->isolate());
+
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ Register op = x10;
+ Register left = x1;
+ Register right = x0;
+ // Preserve some caller-saved registers.
+ __ Push(x1, x0, lr);
+ // Push the arguments.
+ __ Mov(op, Operand(Smi::FromInt(op_)));
+ __ Push(left, right, op);
+
+ // Call the miss handler. This also pops the arguments.
+ __ CallExternalReference(miss, 3);
+
+ // Compute the entry point of the rewritten stub.
+ __ Add(stub_entry, x0, Code::kHeaderSize - kHeapObjectTag);
+ // Restore caller-saved registers.
+ __ Pop(lr, x0, x1);
+ }
+
+ // Tail-call to the new stub.
+ __ Jump(stub_entry);
+}
+
+
+void StringHelper::GenerateHashInit(MacroAssembler* masm,
+ Register hash,
+ Register character) {
+ ASSERT(!AreAliased(hash, character));
+
+ // hash = character + (character << 10);
+ __ LoadRoot(hash, Heap::kHashSeedRootIndex);
+ // Untag smi seed and add the character.
+ __ Add(hash, character, Operand(hash, LSR, kSmiShift));
+
+ // Compute hashes modulo 2^32 using a 32-bit W register.
+ Register hash_w = hash.W();
+
+ // hash += hash << 10;
+ __ Add(hash_w, hash_w, Operand(hash_w, LSL, 10));
+ // hash ^= hash >> 6;
+ __ Eor(hash_w, hash_w, Operand(hash_w, LSR, 6));
+}
+
+
+void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm,
+ Register hash,
+ Register character) {
+ ASSERT(!AreAliased(hash, character));
+
+ // hash += character;
+ __ Add(hash, hash, character);
+
+ // Compute hashes modulo 2^32 using a 32-bit W register.
+ Register hash_w = hash.W();
+
+ // hash += hash << 10;
+ __ Add(hash_w, hash_w, Operand(hash_w, LSL, 10));
+ // hash ^= hash >> 6;
+ __ Eor(hash_w, hash_w, Operand(hash_w, LSR, 6));
+}
+
+
+void StringHelper::GenerateHashGetHash(MacroAssembler* masm,
+ Register hash,
+ Register scratch) {
+ // Compute hashes modulo 2^32 using a 32-bit W register.
+ Register hash_w = hash.W();
+ Register scratch_w = scratch.W();
+ ASSERT(!AreAliased(hash_w, scratch_w));
+
+ // hash += hash << 3;
+ __ Add(hash_w, hash_w, Operand(hash_w, LSL, 3));
+ // hash ^= hash >> 11;
+ __ Eor(hash_w, hash_w, Operand(hash_w, LSR, 11));
+ // hash += hash << 15;
+ __ Add(hash_w, hash_w, Operand(hash_w, LSL, 15));
+
+ __ Ands(hash_w, hash_w, String::kHashBitMask);
+
+ // if (hash == 0) hash = 27;
+ __ Mov(scratch_w, StringHasher::kZeroHash);
+ __ Csel(hash_w, scratch_w, hash_w, eq);
+}
+
+
+void SubStringStub::Generate(MacroAssembler* masm) {
+ ASM_LOCATION("SubStringStub::Generate");
+ Label runtime;
+
+ // Stack frame on entry.
+ // lr: return address
+ // jssp[0]: substring "to" offset
+ // jssp[8]: substring "from" offset
+ // jssp[16]: pointer to string object
+
+ // This stub is called from the native-call %_SubString(...), so
+ // nothing can be assumed about the arguments. It is tested that:
+ // "string" is a sequential string,
+ // both "from" and "to" are smis, and
+ // 0 <= from <= to <= string.length (in debug mode.)
+ // If any of these assumptions fail, we call the runtime system.
+
+ static const int kToOffset = 0 * kPointerSize;
+ static const int kFromOffset = 1 * kPointerSize;
+ static const int kStringOffset = 2 * kPointerSize;
+
+ Register to = x0;
+ Register from = x15;
+ Register input_string = x10;
+ Register input_length = x11;
+ Register input_type = x12;
+ Register result_string = x0;
+ Register result_length = x1;
+ Register temp = x3;
+
+ __ Peek(to, kToOffset);
+ __ Peek(from, kFromOffset);
+
+ // Check that both from and to are smis. If not, jump to runtime.
+ __ JumpIfEitherNotSmi(from, to, &runtime);
+ __ SmiUntag(from);
+ __ SmiUntag(to);
+
+ // Calculate difference between from and to. If to < from, branch to runtime.
+ __ Subs(result_length, to, from);
+ __ B(mi, &runtime);
+
+ // Check from is positive.
+ __ Tbnz(from, kWSignBit, &runtime);
+
+ // Make sure first argument is a string.
+ __ Peek(input_string, kStringOffset);
+ __ JumpIfSmi(input_string, &runtime);
+ __ IsObjectJSStringType(input_string, input_type, &runtime);
+
+ Label single_char;
+ __ Cmp(result_length, 1);
+ __ B(eq, &single_char);
+
+ // Short-cut for the case of trivial substring.
+ Label return_x0;
+ __ Ldrsw(input_length,
+ UntagSmiFieldMemOperand(input_string, String::kLengthOffset));
+
+ __ Cmp(result_length, input_length);
+ __ CmovX(x0, input_string, eq);
+ // Return original string.
+ __ B(eq, &return_x0);
+
+ // Longer than original string's length or negative: unsafe arguments.
+ __ B(hi, &runtime);
+
+ // Shorter than original string's length: an actual substring.
+
+ // x0 to substring end character offset
+ // x1 result_length length of substring result
+ // x10 input_string pointer to input string object
+ // x10 unpacked_string pointer to unpacked string object
+ // x11 input_length length of input string
+ // x12 input_type instance type of input string
+ // x15 from substring start character offset
+
+ // Deal with different string types: update the index if necessary and put
+ // the underlying string into register unpacked_string.
+ Label underlying_unpacked, sliced_string, seq_or_external_string;
+ Label update_instance_type;
+ // If the string is not indirect, it can only be sequential or external.
+ STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag));
+ STATIC_ASSERT(kIsIndirectStringMask != 0);
+
+ // Test for string types, and branch/fall through to appropriate unpacking
+ // code.
+ __ Tst(input_type, kIsIndirectStringMask);
+ __ B(eq, &seq_or_external_string);
+ __ Tst(input_type, kSlicedNotConsMask);
+ __ B(ne, &sliced_string);
+
+ Register unpacked_string = input_string;
+
+ // Cons string. Check whether it is flat, then fetch first part.
+ __ Ldr(temp, FieldMemOperand(input_string, ConsString::kSecondOffset));
+ __ JumpIfNotRoot(temp, Heap::kempty_stringRootIndex, &runtime);
+ __ Ldr(unpacked_string,
+ FieldMemOperand(input_string, ConsString::kFirstOffset));
+ __ B(&update_instance_type);
+
+ __ Bind(&sliced_string);
+ // Sliced string. Fetch parent and correct start index by offset.
+ __ Ldrsw(temp,
+ UntagSmiFieldMemOperand(input_string, SlicedString::kOffsetOffset));
+ __ Add(from, from, temp);
+ __ Ldr(unpacked_string,
+ FieldMemOperand(input_string, SlicedString::kParentOffset));
+
+ __ Bind(&update_instance_type);
+ __ Ldr(temp, FieldMemOperand(unpacked_string, HeapObject::kMapOffset));
+ __ Ldrb(input_type, FieldMemOperand(temp, Map::kInstanceTypeOffset));
+ // TODO(all): This generates "b #+0x4". Can these be optimised out?
+ __ B(&underlying_unpacked);
+
+ __ Bind(&seq_or_external_string);
+ // Sequential or external string. Registers unpacked_string and input_string
+ // alias, so there's nothing to do here.
+
+ // x0 result_string pointer to result string object (uninit)
+ // x1 result_length length of substring result
+ // x10 unpacked_string pointer to unpacked string object
+ // x11 input_length length of input string
+ // x12 input_type instance type of input string
+ // x15 from substring start character offset
+ __ Bind(&underlying_unpacked);
+
+ if (FLAG_string_slices) {
+ Label copy_routine;
+ __ Cmp(result_length, SlicedString::kMinLength);
+ // Short slice. Copy instead of slicing.
+ __ B(lt, ©_routine);
+ // Allocate new sliced string. At this point we do not reload the instance
+ // type including the string encoding because we simply rely on the info
+ // provided by the original string. It does not matter if the original
+ // string's encoding is wrong because we always have to recheck encoding of
+ // the newly created string's parent anyway due to externalized strings.
+ Label two_byte_slice, set_slice_header;
+ STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
+ STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
+ __ Tbz(input_type, MaskToBit(kStringEncodingMask), &two_byte_slice);
+ __ AllocateAsciiSlicedString(result_string, result_length, x3, x4,
+ &runtime);
+ __ B(&set_slice_header);
+
+ __ Bind(&two_byte_slice);
+ __ AllocateTwoByteSlicedString(result_string, result_length, x3, x4,
+ &runtime);
+
+ __ Bind(&set_slice_header);
+ __ SmiTag(from);
+ __ Str(from, FieldMemOperand(result_string, SlicedString::kOffsetOffset));
+ __ Str(unpacked_string,
+ FieldMemOperand(result_string, SlicedString::kParentOffset));
+ __ B(&return_x0);
+
+ __ Bind(©_routine);
+ }
+
+ // x0 result_string pointer to result string object (uninit)
+ // x1 result_length length of substring result
+ // x10 unpacked_string pointer to unpacked string object
+ // x11 input_length length of input string
+ // x12 input_type instance type of input string
+ // x13 unpacked_char0 pointer to first char of unpacked string (uninit)
+ // x13 substring_char0 pointer to first char of substring (uninit)
+ // x14 result_char0 pointer to first char of result (uninit)
+ // x15 from substring start character offset
+ Register unpacked_char0 = x13;
+ Register substring_char0 = x13;
+ Register result_char0 = x14;
+ Label two_byte_sequential, sequential_string, allocate_result;
+ STATIC_ASSERT(kExternalStringTag != 0);
+ STATIC_ASSERT(kSeqStringTag == 0);
+
+ __ Tst(input_type, kExternalStringTag);
+ __ B(eq, &sequential_string);
+
+ __ Tst(input_type, kShortExternalStringTag);
+ __ B(ne, &runtime);
+ __ Ldr(unpacked_char0,
+ FieldMemOperand(unpacked_string, ExternalString::kResourceDataOffset));
+ // unpacked_char0 points to the first character of the underlying string.
+ __ B(&allocate_result);
+
+ __ Bind(&sequential_string);
+ // Locate first character of underlying subject string.
+ STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
+ __ Add(unpacked_char0, unpacked_string,
+ SeqOneByteString::kHeaderSize - kHeapObjectTag);
+
+ __ Bind(&allocate_result);
+ // Sequential ASCII string. Allocate the result.
+ STATIC_ASSERT((kOneByteStringTag & kStringEncodingMask) != 0);
+ __ Tbz(input_type, MaskToBit(kStringEncodingMask), &two_byte_sequential);
+
+ // Allocate and copy the resulting ASCII string.
+ __ AllocateAsciiString(result_string, result_length, x3, x4, x5, &runtime);
+
+ // Locate first character of substring to copy.
+ __ Add(substring_char0, unpacked_char0, from);
+
+ // Locate first character of result.
+ __ Add(result_char0, result_string,
+ SeqOneByteString::kHeaderSize - kHeapObjectTag);
+
+ STATIC_ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
+ __ CopyBytes(result_char0, substring_char0, result_length, x3, kCopyLong);
+ __ B(&return_x0);
+
+ // Allocate and copy the resulting two-byte string.
+ __ Bind(&two_byte_sequential);
+ __ AllocateTwoByteString(result_string, result_length, x3, x4, x5, &runtime);
+
+ // Locate first character of substring to copy.
+ __ Add(substring_char0, unpacked_char0, Operand(from, LSL, 1));
+
+ // Locate first character of result.
+ __ Add(result_char0, result_string,
+ SeqTwoByteString::kHeaderSize - kHeapObjectTag);
+
+ STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
+ __ Add(result_length, result_length, result_length);
+ __ CopyBytes(result_char0, substring_char0, result_length, x3, kCopyLong);
+
+ __ Bind(&return_x0);
+ Counters* counters = masm->isolate()->counters();
+ __ IncrementCounter(counters->sub_string_native(), 1, x3, x4);
+ __ Drop(3);
+ __ Ret();
+
+ __ Bind(&runtime);
+ __ TailCallRuntime(Runtime::kSubString, 3, 1);
+
+ __ bind(&single_char);
+ // x1: result_length
+ // x10: input_string
+ // x12: input_type
+ // x15: from (untagged)
+ __ SmiTag(from);
+ StringCharAtGenerator generator(
+ input_string, from, result_length, x0,
+ &runtime, &runtime, &runtime, STRING_INDEX_IS_NUMBER);
+ generator.GenerateFast(masm);
+ // TODO(jbramley): Why doesn't this jump to return_x0?
+ __ Drop(3);
+ __ Ret();
+ generator.SkipSlow(masm, &runtime);
+}
+
+
+void StringCompareStub::GenerateFlatAsciiStringEquals(MacroAssembler* masm,
+ Register left,
+ Register right,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3) {
+ ASSERT(!AreAliased(left, right, scratch1, scratch2, scratch3));
+ Register result = x0;
+ Register left_length = scratch1;
+ Register right_length = scratch2;
+
+ // Compare lengths. If lengths differ, strings can't be equal. Lengths are
+ // smis, and don't need to be untagged.
+ Label strings_not_equal, check_zero_length;
+ __ Ldr(left_length, FieldMemOperand(left, String::kLengthOffset));
+ __ Ldr(right_length, FieldMemOperand(right, String::kLengthOffset));
+ __ Cmp(left_length, right_length);
+ __ B(eq, &check_zero_length);
+
+ __ Bind(&strings_not_equal);
+ __ Mov(result, Operand(Smi::FromInt(NOT_EQUAL)));
+ __ Ret();
+
+ // Check if the length is zero. If so, the strings must be equal (and empty.)
+ Label compare_chars;
+ __ Bind(&check_zero_length);
+ STATIC_ASSERT(kSmiTag == 0);
+ __ Cbnz(left_length, &compare_chars);
+ __ Mov(result, Operand(Smi::FromInt(EQUAL)));
+ __ Ret();
+
+ // Compare characters. Falls through if all characters are equal.
+ __ Bind(&compare_chars);
+ GenerateAsciiCharsCompareLoop(masm, left, right, left_length, scratch2,
+ scratch3, &strings_not_equal);
+
+ // Characters in strings are equal.
+ __ Mov(result, Operand(Smi::FromInt(EQUAL)));
+ __ Ret();
+}
+
+
+void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
+ Register left,
+ Register right,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Register scratch4) {
+ ASSERT(!AreAliased(left, right, scratch1, scratch2, scratch3, scratch4));
+ Label result_not_equal, compare_lengths;
+
+ // Find minimum length and length difference.
+ Register length_delta = scratch3;
+ __ Ldr(scratch1, FieldMemOperand(left, String::kLengthOffset));
+ __ Ldr(scratch2, FieldMemOperand(right, String::kLengthOffset));
+ __ Subs(length_delta, scratch1, scratch2);
+
+ Register min_length = scratch1;
+ __ Csel(min_length, scratch2, scratch1, gt);
+ __ Cbz(min_length, &compare_lengths);
+
+ // Compare loop.
+ GenerateAsciiCharsCompareLoop(masm,
+ left, right, min_length, scratch2, scratch4,
+ &result_not_equal);
+
+ // Compare lengths - strings up to min-length are equal.
+ __ Bind(&compare_lengths);
+
+ ASSERT(Smi::FromInt(EQUAL) == static_cast<Smi*>(0));
+
+ // Use length_delta as result if it's zero.
+ Register result = x0;
+ __ Subs(result, length_delta, 0);
+
+ __ Bind(&result_not_equal);
+ Register greater = x10;
+ Register less = x11;
+ __ Mov(greater, Operand(Smi::FromInt(GREATER)));
+ __ Mov(less, Operand(Smi::FromInt(LESS)));
+ __ CmovX(result, greater, gt);
+ __ CmovX(result, less, lt);
+ __ Ret();
+}
+
+
+void StringCompareStub::GenerateAsciiCharsCompareLoop(
+ MacroAssembler* masm,
+ Register left,
+ Register right,
+ Register length,
+ Register scratch1,
+ Register scratch2,
+ Label* chars_not_equal) {
+ ASSERT(!AreAliased(left, right, length, scratch1, scratch2));
+
+ // Change index to run from -length to -1 by adding length to string
+ // start. This means that loop ends when index reaches zero, which
+ // doesn't need an additional compare.
+ __ SmiUntag(length);
+ __ Add(scratch1, length, SeqOneByteString::kHeaderSize - kHeapObjectTag);
+ __ Add(left, left, scratch1);
+ __ Add(right, right, scratch1);
+
+ Register index = length;
+ __ Neg(index, length); // index = -length;
+
+ // Compare loop
+ Label loop;
+ __ Bind(&loop);
+ __ Ldrb(scratch1, MemOperand(left, index));
+ __ Ldrb(scratch2, MemOperand(right, index));
+ __ Cmp(scratch1, scratch2);
+ __ B(ne, chars_not_equal);
+ __ Add(index, index, 1);
+ __ Cbnz(index, &loop);
+}
+
+
+void StringCompareStub::Generate(MacroAssembler* masm) {
+ Label runtime;
+
+ Counters* counters = masm->isolate()->counters();
+
+ // Stack frame on entry.
+ // sp[0]: right string
+ // sp[8]: left string
+ Register right = x10;
+ Register left = x11;
+ Register result = x0;
+ __ Pop(right, left);
+
+ Label not_same;
+ __ Subs(result, right, left);
+ __ B(ne, ¬_same);
+ STATIC_ASSERT(EQUAL == 0);
+ __ IncrementCounter(counters->string_compare_native(), 1, x3, x4);
+ __ Ret();
+
+ __ Bind(¬_same);
+
+ // Check that both objects are sequential ASCII strings.
+ __ JumpIfEitherIsNotSequentialAsciiStrings(left, right, x12, x13, &runtime);
+
+ // Compare flat ASCII strings natively. Remove arguments from stack first,
+ // as this function will generate a return.
+ __ IncrementCounter(counters->string_compare_native(), 1, x3, x4);
+ GenerateCompareFlatAsciiStrings(masm, left, right, x12, x13, x14, x15);
+
+ __ Bind(&runtime);
+
+ // Push arguments back on to the stack.
+ // sp[0] = right string
+ // sp[8] = left string.
+ __ Push(left, right);
+
+ // Call the runtime.
+ // Returns -1 (less), 0 (equal), or 1 (greater) tagged as a small integer.
+ __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
+}
+
+
+void ArrayPushStub::Generate(MacroAssembler* masm) {
+ Register receiver = x0;
+
+ int argc = arguments_count();
+
+ if (argc == 0) {
+ // Nothing to do, just return the length.
+ __ Ldr(x0, FieldMemOperand(receiver, JSArray::kLengthOffset));
+ __ Drop(argc + 1);
+ __ Ret();
+ return;
+ }
+
+ Isolate* isolate = masm->isolate();
+
+ if (argc != 1) {
+ __ TailCallExternalReference(
+ ExternalReference(Builtins::c_ArrayPush, isolate), argc + 1, 1);
+ return;
+ }
+
+ Label call_builtin, attempt_to_grow_elements, with_write_barrier;
+
+ Register elements_length = x8;
+ Register length = x7;
+ Register elements = x6;
+ Register end_elements = x5;
+ Register value = x4;
+ // Get the elements array of the object.
+ __ Ldr(elements, FieldMemOperand(receiver, JSArray::kElementsOffset));
+
+ if (IsFastSmiOrObjectElementsKind(elements_kind())) {
+ // Check that the elements are in fast mode and writable.
+ __ CheckMap(elements,
+ x10,
+ Heap::kFixedArrayMapRootIndex,
+ &call_builtin,
+ DONT_DO_SMI_CHECK);
+ }
+
+ // Get the array's length and calculate new length.
+ __ Ldr(length, FieldMemOperand(receiver, JSArray::kLengthOffset));
+ STATIC_ASSERT(kSmiTag == 0);
+ __ Add(length, length, Operand(Smi::FromInt(argc)));
+
+ // Check if we could survive without allocation.
+ __ Ldr(elements_length,
+ FieldMemOperand(elements, FixedArray::kLengthOffset));
+ __ Cmp(length, elements_length);
+
+ const int kEndElementsOffset =
+ FixedArray::kHeaderSize - kHeapObjectTag - argc * kPointerSize;
+
+ if (IsFastSmiOrObjectElementsKind(elements_kind())) {
+ __ B(gt, &attempt_to_grow_elements);
+
+ // Check if value is a smi.
+ __ Peek(value, (argc - 1) * kPointerSize);
+ __ JumpIfNotSmi(value, &with_write_barrier);
+
+ // Store the value.
+ // We may need a register containing the address end_elements below,
+ // so write back the value in end_elements.
+ __ Add(end_elements, elements,
+ Operand::UntagSmiAndScale(length, kPointerSizeLog2));
+ __ Str(value, MemOperand(end_elements, kEndElementsOffset, PreIndex));
+ } else {
+ // TODO(all): ARM has a redundant cmp here.
+ __ B(gt, &call_builtin);
+
+ __ Peek(value, (argc - 1) * kPointerSize);
+ __ StoreNumberToDoubleElements(value, length, elements, x10, d0, d1,
+ &call_builtin, argc * kDoubleSize);
+ }
+
+ // Save new length.
+ __ Str(length, FieldMemOperand(receiver, JSArray::kLengthOffset));
+
+ // Return length.
+ __ Drop(argc + 1);
+ __ Mov(x0, length);
+ __ Ret();
+
+ if (IsFastDoubleElementsKind(elements_kind())) {
+ __ Bind(&call_builtin);
+ __ TailCallExternalReference(
+ ExternalReference(Builtins::c_ArrayPush, isolate), argc + 1, 1);
+ return;
+ }
+
+ __ Bind(&with_write_barrier);
+
+ if (IsFastSmiElementsKind(elements_kind())) {
+ if (FLAG_trace_elements_transitions) {
+ __ B(&call_builtin);
+ }
+
+ __ Ldr(x10, FieldMemOperand(value, HeapObject::kMapOffset));
+ __ JumpIfHeapNumber(x10, &call_builtin);
+
+ ElementsKind target_kind = IsHoleyElementsKind(elements_kind())
+ ? FAST_HOLEY_ELEMENTS : FAST_ELEMENTS;
+ __ Ldr(x10, GlobalObjectMemOperand());
+ __ Ldr(x10, FieldMemOperand(x10, GlobalObject::kNativeContextOffset));
+ __ Ldr(x10, ContextMemOperand(x10, Context::JS_ARRAY_MAPS_INDEX));
+ const int header_size = FixedArrayBase::kHeaderSize;
+ // Verify that the object can be transitioned in place.
+ const int origin_offset = header_size + elements_kind() * kPointerSize;
+ __ ldr(x11, FieldMemOperand(receiver, origin_offset));
+ __ ldr(x12, FieldMemOperand(x10, HeapObject::kMapOffset));
+ __ cmp(x11, x12);
+ __ B(ne, &call_builtin);
+
+ const int target_offset = header_size + target_kind * kPointerSize;
+ __ Ldr(x10, FieldMemOperand(x10, target_offset));
+ __ Mov(x11, receiver);
+ ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
+ masm, DONT_TRACK_ALLOCATION_SITE, NULL);
+ }
+
+ // Save new length.
+ __ Str(length, FieldMemOperand(receiver, JSArray::kLengthOffset));
+
+ // Store the value.
+ // We may need a register containing the address end_elements below,
+ // so write back the value in end_elements.
+ __ Add(end_elements, elements,
+ Operand::UntagSmiAndScale(length, kPointerSizeLog2));
+ __ Str(value, MemOperand(end_elements, kEndElementsOffset, PreIndex));
+
+ __ RecordWrite(elements,
+ end_elements,
+ value,
+ kLRHasNotBeenSaved,
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+ __ Drop(argc + 1);
+ __ Mov(x0, length);
+ __ Ret();
+
+ __ Bind(&attempt_to_grow_elements);
+
+ if (!FLAG_inline_new) {
+ __ B(&call_builtin);
+ }
+
+ Register argument = x2;
+ __ Peek(argument, (argc - 1) * kPointerSize);
+ // Growing elements that are SMI-only requires special handling in case
+ // the new element is non-Smi. For now, delegate to the builtin.
+ if (IsFastSmiElementsKind(elements_kind())) {
+ __ JumpIfNotSmi(argument, &call_builtin);
+ }
+
+ // We could be lucky and the elements array could be at the top of new-space.
+ // In this case we can just grow it in place by moving the allocation pointer
+ // up.
+ ExternalReference new_space_allocation_top =
+ ExternalReference::new_space_allocation_top_address(isolate);
+ ExternalReference new_space_allocation_limit =
+ ExternalReference::new_space_allocation_limit_address(isolate);
+
+ const int kAllocationDelta = 4;
+ ASSERT(kAllocationDelta >= argc);
+ Register allocation_top_addr = x5;
+ Register allocation_top = x9;
+ // Load top and check if it is the end of elements.
+ __ Add(end_elements, elements,
+ Operand::UntagSmiAndScale(length, kPointerSizeLog2));
+ __ Add(end_elements, end_elements, kEndElementsOffset);
+ __ Mov(allocation_top_addr, Operand(new_space_allocation_top));
+ __ Ldr(allocation_top, MemOperand(allocation_top_addr));
+ __ Cmp(end_elements, allocation_top);
+ __ B(ne, &call_builtin);
+
+ __ Mov(x10, Operand(new_space_allocation_limit));
+ __ Ldr(x10, MemOperand(x10));
+ __ Add(allocation_top, allocation_top, kAllocationDelta * kPointerSize);
+ __ Cmp(allocation_top, x10);
+ __ B(hi, &call_builtin);
+
+ // We fit and could grow elements.
+ // Update new_space_allocation_top.
+ __ Str(allocation_top, MemOperand(allocation_top_addr));
+ // Push the argument.
+ __ Str(argument, MemOperand(end_elements));
+ // Fill the rest with holes.
+ __ LoadRoot(x10, Heap::kTheHoleValueRootIndex);
+ for (int i = 1; i < kAllocationDelta; i++) {
+ // TODO(all): Try to use stp here.
+ __ Str(x10, MemOperand(end_elements, i * kPointerSize));
+ }
+
+ // Update elements' and array's sizes.
+ __ Str(length, FieldMemOperand(receiver, JSArray::kLengthOffset));
+ __ Add(elements_length,
+ elements_length,
+ Operand(Smi::FromInt(kAllocationDelta)));
+ __ Str(elements_length,
+ FieldMemOperand(elements, FixedArray::kLengthOffset));
+
+ // Elements are in new space, so write barrier is not required.
+ __ Drop(argc + 1);
+ __ Mov(x0, length);
+ __ Ret();
+
+ __ Bind(&call_builtin);
+ __ TailCallExternalReference(
+ ExternalReference(Builtins::c_ArrayPush, isolate), argc + 1, 1);
+}
+
+
+void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- x1 : left
+ // -- x0 : right
+ // -- lr : return address
+ // -----------------------------------
+ Isolate* isolate = masm->isolate();
+
+ // Load x2 with the allocation site. We stick an undefined dummy value here
+ // and replace it with the real allocation site later when we instantiate this
+ // stub in BinaryOpICWithAllocationSiteStub::GetCodeCopyFromTemplate().
+ __ LoadObject(x2, handle(isolate->heap()->undefined_value()));
+
+ // Make sure that we actually patched the allocation site.
+ if (FLAG_debug_code) {
+ __ AssertNotSmi(x2, kExpectedAllocationSite);
+ __ Ldr(x10, FieldMemOperand(x2, HeapObject::kMapOffset));
+ __ AssertRegisterIsRoot(x10, Heap::kAllocationSiteMapRootIndex,
+ kExpectedAllocationSite);
+ }
+
+ // Tail call into the stub that handles binary operations with allocation
+ // sites.
+ BinaryOpWithAllocationSiteStub stub(state_);
+ __ TailCallStub(&stub);
+}
+
+
+bool CodeStub::CanUseFPRegisters() {
+ // FP registers always available on A64.
+ return true;
+}
+
+
+void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
+ // We need some extra registers for this stub, they have been allocated
+ // but we need to save them before using them.
+ regs_.Save(masm);
+
+ if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
+ Label dont_need_remembered_set;
+
+ Register value = regs_.scratch0();
+ __ Ldr(value, MemOperand(regs_.address()));
+ __ JumpIfNotInNewSpace(value, &dont_need_remembered_set);
+
+ __ CheckPageFlagSet(regs_.object(),
+ value,
+ 1 << MemoryChunk::SCAN_ON_SCAVENGE,
+ &dont_need_remembered_set);
+
+ // First notify the incremental marker if necessary, then update the
+ // remembered set.
+ CheckNeedsToInformIncrementalMarker(
+ masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode);
+ InformIncrementalMarker(masm, mode);
+ regs_.Restore(masm); // Restore the extra scratch registers we used.
+ __ RememberedSetHelper(object_,
+ address_,
+ value_,
+ save_fp_regs_mode_,
+ MacroAssembler::kReturnAtEnd);
+
+ __ Bind(&dont_need_remembered_set);
+ }
+
+ CheckNeedsToInformIncrementalMarker(
+ masm, kReturnOnNoNeedToInformIncrementalMarker, mode);
+ InformIncrementalMarker(masm, mode);
+ regs_.Restore(masm); // Restore the extra scratch registers we used.
+ __ Ret();
+}
+
+
+void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm, Mode mode) {
+ regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode_);
+ Register address =
+ x0.Is(regs_.address()) ? regs_.scratch0() : regs_.address();
+ ASSERT(!address.Is(regs_.object()));
+ ASSERT(!address.Is(x0));
+ __ Mov(address, regs_.address());
+ __ Mov(x0, regs_.object());
+ __ Mov(x1, address);
+ __ Mov(x2, Operand(ExternalReference::isolate_address(masm->isolate())));
+
+ AllowExternalCallThatCantCauseGC scope(masm);
+ ExternalReference function = (mode == INCREMENTAL_COMPACTION)
+ ? ExternalReference::incremental_evacuation_record_write_function(
+ masm->isolate())
+ : ExternalReference::incremental_marking_record_write_function(
+ masm->isolate());
+ __ CallCFunction(function, 3, 0);
+
+ regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode_);
+}
+
+
+void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
+ MacroAssembler* masm,
+ OnNoNeedToInformIncrementalMarker on_no_need,
+ Mode mode) {
+ Label on_black;
+ Label need_incremental;
+ Label need_incremental_pop_scratch;
+
+ Register mem_chunk = regs_.scratch0();
+ Register counter = regs_.scratch1();
+ __ Bic(mem_chunk, regs_.object(), Page::kPageAlignmentMask);
+ __ Ldr(counter,
+ MemOperand(mem_chunk, MemoryChunk::kWriteBarrierCounterOffset));
+ __ Subs(counter, counter, 1);
+ __ Str(counter,
+ MemOperand(mem_chunk, MemoryChunk::kWriteBarrierCounterOffset));
+ __ B(mi, &need_incremental);
+
+ // If the object is not black we don't have to inform the incremental marker.
+ __ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black);
+
+ regs_.Restore(masm); // Restore the extra scratch registers we used.
+ if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
+ __ RememberedSetHelper(object_,
+ address_,
+ value_,
+ save_fp_regs_mode_,
+ MacroAssembler::kReturnAtEnd);
+ } else {
+ __ Ret();
+ }
+
+ __ Bind(&on_black);
+ // Get the value from the slot.
+ Register value = regs_.scratch0();
+ __ Ldr(value, MemOperand(regs_.address()));
+
+ if (mode == INCREMENTAL_COMPACTION) {
+ Label ensure_not_white;
+
+ __ CheckPageFlagClear(value,
+ regs_.scratch1(),
+ MemoryChunk::kEvacuationCandidateMask,
+ &ensure_not_white);
+
+ __ CheckPageFlagClear(regs_.object(),
+ regs_.scratch1(),
+ MemoryChunk::kSkipEvacuationSlotsRecordingMask,
+ &need_incremental);
+
+ __ Bind(&ensure_not_white);
+ }
+
+ // We need extra registers for this, so we push the object and the address
+ // register temporarily.
+ __ Push(regs_.address(), regs_.object());
+ __ EnsureNotWhite(value,
+ regs_.scratch1(), // Scratch.
+ regs_.object(), // Scratch.
+ regs_.address(), // Scratch.
+ regs_.scratch2(), // Scratch.
+ &need_incremental_pop_scratch);
+ __ Pop(regs_.object(), regs_.address());
+
+ regs_.Restore(masm); // Restore the extra scratch registers we used.
+ if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
+ __ RememberedSetHelper(object_,
+ address_,
+ value_,
+ save_fp_regs_mode_,
+ MacroAssembler::kReturnAtEnd);
+ } else {
+ __ Ret();
+ }
+
+ __ Bind(&need_incremental_pop_scratch);
+ __ Pop(regs_.object(), regs_.address());
+
+ __ Bind(&need_incremental);
+ // Fall through when we need to inform the incremental marker.
+}
+
+
+void RecordWriteStub::Generate(MacroAssembler* masm) {
+ Label skip_to_incremental_noncompacting;
+ Label skip_to_incremental_compacting;
+
+ // We patch these two first instructions back and forth between a nop and
+ // real branch when we start and stop incremental heap marking.
+ // Initially the stub is expected to be in STORE_BUFFER_ONLY mode, so 2 nops
+ // are generated.
+ // See RecordWriteStub::Patch for details.
+ {
+ InstructionAccurateScope scope(masm, 2);
+ __ adr(xzr, &skip_to_incremental_noncompacting);
+ __ adr(xzr, &skip_to_incremental_compacting);
+ }
+
+ if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
+ __ RememberedSetHelper(object_,
+ address_,
+ value_,
+ save_fp_regs_mode_,
+ MacroAssembler::kReturnAtEnd);
+ }
+ __ Ret();
+
+ __ Bind(&skip_to_incremental_noncompacting);
+ GenerateIncremental(masm, INCREMENTAL);
+
+ __ Bind(&skip_to_incremental_compacting);
+ GenerateIncremental(masm, INCREMENTAL_COMPACTION);
+}
+
+
+void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
+ // TODO(all): Possible optimisations in this function:
+ // 1. Merge CheckFastElements and CheckFastSmiElements, so that the map
+ // bitfield is loaded only once.
+ // 2. Refactor the Ldr/Add sequence at the start of fast_elements and
+ // smi_element.
+
+ // x0 value element value to store
+ // x3 index_smi element index as smi
+ // sp[0] array_index_smi array literal index in function as smi
+ // sp[1] array array literal
+
+ Register value = x0;
+ Register index_smi = x3;
+
+ Register array = x1;
+ Register array_map = x2;
+ Register array_index_smi = x4;
+ __ PeekPair(array_index_smi, array, 0);
+ __ Ldr(array_map, FieldMemOperand(array, JSObject::kMapOffset));
+
+ Label double_elements, smi_element, fast_elements, slow_elements;
+ __ CheckFastElements(array_map, x10, &double_elements);
+ __ JumpIfSmi(value, &smi_element);
+ __ CheckFastSmiElements(array_map, x10, &fast_elements);
+
+ // Store into the array literal requires an elements transition. Call into
+ // the runtime.
+ __ Bind(&slow_elements);
+ __ Push(array, index_smi, value);
+ __ Ldr(x10, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ __ Ldr(x11, FieldMemOperand(x10, JSFunction::kLiteralsOffset));
+ __ Push(x11, array_index_smi);
+ __ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1);
+
+ // Array literal has ElementsKind of FAST_*_ELEMENTS and value is an object.
+ __ Bind(&fast_elements);
+ __ Ldr(x10, FieldMemOperand(array, JSObject::kElementsOffset));
+ __ Add(x11, x10, Operand::UntagSmiAndScale(index_smi, kPointerSizeLog2));
+ __ Add(x11, x11, FixedArray::kHeaderSize - kHeapObjectTag);
+ __ Str(value, MemOperand(x11));
+ // Update the write barrier for the array store.
+ __ RecordWrite(x10, x11, value, kLRHasNotBeenSaved, kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+ __ Ret();
+
+ // Array literal has ElementsKind of FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS,
+ // and value is Smi.
+ __ Bind(&smi_element);
+ __ Ldr(x10, FieldMemOperand(array, JSObject::kElementsOffset));
+ __ Add(x11, x10, Operand::UntagSmiAndScale(index_smi, kPointerSizeLog2));
+ __ Str(value, FieldMemOperand(x11, FixedArray::kHeaderSize));
+ __ Ret();
+
+ __ Bind(&double_elements);
+ __ Ldr(x10, FieldMemOperand(array, JSObject::kElementsOffset));
+ __ StoreNumberToDoubleElements(value, index_smi, x10, x11, d0, d1,
+ &slow_elements);
+ __ Ret();
+}
+
+
+void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
+ // TODO(jbramley): The ARM code leaves the (shifted) offset in r1. Why?
+ CEntryStub ces(1, kSaveFPRegs);
+ __ Call(ces.GetCode(masm->isolate()), RelocInfo::CODE_TARGET);
+ int parameter_count_offset =
+ StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset;
+ __ Ldr(x1, MemOperand(fp, parameter_count_offset));
+ if (function_mode_ == JS_FUNCTION_STUB_MODE) {
+ __ Add(x1, x1, 1);
+ }
+ masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE);
+ __ Drop(x1);
+ // Return to IC Miss stub, continuation still on stack.
+ __ Ret();
+}
+
+
+void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
+ if (masm->isolate()->function_entry_hook() != NULL) {
+ // TODO(all): This needs to be reliably consistent with
+ // kReturnAddressDistanceFromFunctionStart in ::Generate.
+ Assembler::BlockConstPoolScope no_const_pools(masm);
+ ProfileEntryHookStub stub;
+ __ Push(lr);
+ __ CallStub(&stub);
+ __ Pop(lr);
+ }
+}
+
+
+void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
+ MacroAssembler::NoUseRealAbortsScope no_use_real_aborts(masm);
+ // The entry hook is a "BumpSystemStackPointer" instruction (sub), followed by
+ // a "Push lr" instruction, followed by a call.
+ // TODO(jbramley): Verify that this call is always made with relocation.
+ static const int kReturnAddressDistanceFromFunctionStart =
+ Assembler::kCallSizeWithRelocation + (2 * kInstructionSize);
+
+ // Save all kCallerSaved registers (including lr), since this can be called
+ // from anywhere.
+ // TODO(jbramley): What about FP registers?
+ __ PushCPURegList(kCallerSaved);
+ ASSERT(kCallerSaved.IncludesAliasOf(lr));
+ const int kNumSavedRegs = kCallerSaved.Count();
+
+ // Compute the function's address as the first argument.
+ __ Sub(x0, lr, kReturnAddressDistanceFromFunctionStart);
+
+#if V8_HOST_ARCH_A64
+ uintptr_t entry_hook =
+ reinterpret_cast<uintptr_t>(masm->isolate()->function_entry_hook());
+ __ Mov(x10, entry_hook);
+#else
+ // Under the simulator we need to indirect the entry hook through a trampoline
+ // function at a known address.
+ ApiFunction dispatcher(FUNCTION_ADDR(EntryHookTrampoline));
+ __ Mov(x10, Operand(ExternalReference(&dispatcher,
+ ExternalReference::BUILTIN_CALL,
+ masm->isolate())));
+ // It additionally takes an isolate as a third parameter
+ __ Mov(x2, Operand(ExternalReference::isolate_address(masm->isolate())));
+#endif
+
+ // The caller's return address is above the saved temporaries.
+ // Grab its location for the second argument to the hook.
+ __ Add(x1, __ StackPointer(), kNumSavedRegs * kPointerSize);
+
+ {
+ // Create a dummy frame, as CallCFunction requires this.
+ FrameScope frame(masm, StackFrame::MANUAL);
+ __ CallCFunction(x10, 2, 0);
+ }
+
+ __ PopCPURegList(kCallerSaved);
+ __ Ret();
+}
+
+
+void DirectCEntryStub::Generate(MacroAssembler* masm) {
+ // When calling into C++ code the stack pointer must be csp.
+ // Therefore this code must use csp for peek/poke operations when the
+ // stub is generated. When the stub is called
+ // (via DirectCEntryStub::GenerateCall), the caller must setup an ExitFrame
+ // and configure the stack pointer *before* doing the call.
+ const Register old_stack_pointer = __ StackPointer();
+ __ SetStackPointer(csp);
+
+ // Put return address on the stack (accessible to GC through exit frame pc).
+ __ Poke(lr, 0);
+ // Call the C++ function.
+ __ Blr(x10);
+ // Return to calling code.
+ __ Peek(lr, 0);
+ __ Ret();
+
+ __ SetStackPointer(old_stack_pointer);
+}
+
+void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
+ Register target) {
+ // Make sure the caller configured the stack pointer (see comment in
+ // DirectCEntryStub::Generate).
+ ASSERT(csp.Is(__ StackPointer()));
+
+ intptr_t code =
+ reinterpret_cast<intptr_t>(GetCode(masm->isolate()).location());
+ __ Mov(lr, Operand(code, RelocInfo::CODE_TARGET));
+ __ Mov(x10, target);
+ // Branch to the stub.
+ __ Blr(lr);
+}
+
+
+// Probe the name dictionary in the 'elements' register.
+// Jump to the 'done' label if a property with the given name is found.
+// Jump to the 'miss' label otherwise.
+//
+// If lookup was successful 'scratch2' will be equal to elements + 4 * index.
+// 'elements' and 'name' registers are preserved on miss.
+void NameDictionaryLookupStub::GeneratePositiveLookup(
+ MacroAssembler* masm,
+ Label* miss,
+ Label* done,
+ Register elements,
+ Register name,
+ Register scratch1,
+ Register scratch2) {
+ ASSERT(!AreAliased(elements, name, scratch1, scratch2));
+
+ // Assert that name contains a string.
+ __ AssertName(name);
+
+ // Compute the capacity mask.
+ __ Ldrsw(scratch1, UntagSmiFieldMemOperand(elements, kCapacityOffset));
+ __ Sub(scratch1, scratch1, 1);
+
+ // Generate an unrolled loop that performs a few probes before giving up.
+ for (int i = 0; i < kInlinedProbes; i++) {
+ // Compute the masked index: (hash + i + i * i) & mask.
+ __ Ldr(scratch2, FieldMemOperand(name, Name::kHashFieldOffset));
+ if (i > 0) {
+ // Add the probe offset (i + i * i) left shifted to avoid right shifting
+ // the hash in a separate instruction. The value hash + i + i * i is right
+ // shifted in the following and instruction.
+ ASSERT(NameDictionary::GetProbeOffset(i) <
+ 1 << (32 - Name::kHashFieldOffset));
+ __ Add(scratch2, scratch2, Operand(
+ NameDictionary::GetProbeOffset(i) << Name::kHashShift));
+ }
+ __ And(scratch2, scratch1, Operand(scratch2, LSR, Name::kHashShift));
+
+ // Scale the index by multiplying by the element size.
+ ASSERT(NameDictionary::kEntrySize == 3);
+ __ Add(scratch2, scratch2, Operand(scratch2, LSL, 1));
+
+ // Check if the key is identical to the name.
+ __ Add(scratch2, elements, Operand(scratch2, LSL, kPointerSizeLog2));
+ // TODO(jbramley): We need another scratch here, but some callers can't
+ // provide a scratch3 so we have to use Tmp1(). We should find a clean way
+ // to make it unavailable to the MacroAssembler for a short time.
+ __ Ldr(__ Tmp1(), FieldMemOperand(scratch2, kElementsStartOffset));
+ __ Cmp(name, __ Tmp1());
+ __ B(eq, done);
+ }
+
+ // The inlined probes didn't find the entry.
+ // Call the complete stub to scan the whole dictionary.
+
+ CPURegList spill_list(CPURegister::kRegister, kXRegSize, 0, 6);
+ spill_list.Combine(lr);
+ spill_list.Remove(scratch1);
+ spill_list.Remove(scratch2);
+
+ __ PushCPURegList(spill_list);
+
+ if (name.is(x0)) {
+ ASSERT(!elements.is(x1));
+ __ Mov(x1, name);
+ __ Mov(x0, elements);
+ } else {
+ __ Mov(x0, elements);
+ __ Mov(x1, name);
+ }
+
+ Label not_found;
+ NameDictionaryLookupStub stub(POSITIVE_LOOKUP);
+ __ CallStub(&stub);
+ __ Cbz(x0, ¬_found);
+ __ Mov(scratch2, x2); // Move entry index into scratch2.
+ __ PopCPURegList(spill_list);
+ __ B(done);
+
+ __ Bind(¬_found);
+ __ PopCPURegList(spill_list);
+ __ B(miss);
+}
+
+
+void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
+ Label* miss,
+ Label* done,
+ Register receiver,
+ Register properties,
+ Handle<Name> name,
+ Register scratch0) {
+ ASSERT(!AreAliased(receiver, properties, scratch0));
+ ASSERT(name->IsUniqueName());
+ // If names of slots in range from 1 to kProbes - 1 for the hash value are
+ // not equal to the name and kProbes-th slot is not used (its name is the
+ // undefined value), it guarantees the hash table doesn't contain the
+ // property. It's true even if some slots represent deleted properties
+ // (their names are the hole value).
+ for (int i = 0; i < kInlinedProbes; i++) {
+ // scratch0 points to properties hash.
+ // Compute the masked index: (hash + i + i * i) & mask.
+ Register index = scratch0;
+ // Capacity is smi 2^n.
+ __ Ldrsw(index, UntagSmiFieldMemOperand(properties, kCapacityOffset));
+ __ Sub(index, index, 1);
+ __ And(index, index, name->Hash() + NameDictionary::GetProbeOffset(i));
+
+ // Scale the index by multiplying by the entry size.
+ ASSERT(NameDictionary::kEntrySize == 3);
+ __ Add(index, index, Operand(index, LSL, 1)); // index *= 3.
+
+ Register entity_name = scratch0;
+ // Having undefined at this place means the name is not contained.
+ Register tmp = index;
+ __ Add(tmp, properties, Operand(index, LSL, kPointerSizeLog2));
+ __ Ldr(entity_name, FieldMemOperand(tmp, kElementsStartOffset));
+
+ __ JumpIfRoot(entity_name, Heap::kUndefinedValueRootIndex, done);
+
+ // Stop if found the property.
+ __ Cmp(entity_name, Operand(name));
+ __ B(eq, miss);
+
+ Label good;
+ __ JumpIfRoot(entity_name, Heap::kTheHoleValueRootIndex, &good);
+
+ // Check if the entry name is not a unique name.
+ __ Ldr(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset));
+ __ Ldrb(entity_name,
+ FieldMemOperand(entity_name, Map::kInstanceTypeOffset));
+ __ JumpIfNotUniqueName(entity_name, miss);
+ __ Bind(&good);
+ }
+
+ CPURegList spill_list(CPURegister::kRegister, kXRegSize, 0, 6);
+ spill_list.Combine(lr);
+ spill_list.Remove(scratch0); // Scratch registers don't need to be preserved.
+
+ __ PushCPURegList(spill_list);
+
+ __ Ldr(x0, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+ __ Mov(x1, Operand(name));
+ NameDictionaryLookupStub stub(NEGATIVE_LOOKUP);
+ __ CallStub(&stub);
+ // Move stub return value to scratch0. Note that scratch0 is not included in
+ // spill_list and won't be clobbered by PopCPURegList.
+ __ Mov(scratch0, x0);
+ __ PopCPURegList(spill_list);
+
+ __ Cbz(scratch0, done);
+ __ B(miss);
+}
+
+
+void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
+ // This stub overrides SometimesSetsUpAFrame() to return false. That means
+ // we cannot call anything that could cause a GC from this stub.
+ //
+ // Arguments are in x0 and x1:
+ // x0: property dictionary.
+ // x1: the name of the property we are looking for.
+ //
+ // Return value is in x0 and is zero if lookup failed, non zero otherwise.
+ // If the lookup is successful, x2 will contains the index of the entry.
+
+ Register result = x0;
+ Register dictionary = x0;
+ Register key = x1;
+ Register index = x2;
+ Register mask = x3;
+ Register hash = x4;
+ Register undefined = x5;
+ Register entry_key = x6;
+
+ Label in_dictionary, maybe_in_dictionary, not_in_dictionary;
+
+ __ Ldrsw(mask, UntagSmiFieldMemOperand(dictionary, kCapacityOffset));
+ __ Sub(mask, mask, 1);
+
+ __ Ldr(hash, FieldMemOperand(key, Name::kHashFieldOffset));
+ __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
+
+ for (int i = kInlinedProbes; i < kTotalProbes; i++) {
+ // Compute the masked index: (hash + i + i * i) & mask.
+ // Capacity is smi 2^n.
+ if (i > 0) {
+ // Add the probe offset (i + i * i) left shifted to avoid right shifting
+ // the hash in a separate instruction. The value hash + i + i * i is right
+ // shifted in the following and instruction.
+ ASSERT(NameDictionary::GetProbeOffset(i) <
+ 1 << (32 - Name::kHashFieldOffset));
+ __ Add(index, hash,
+ NameDictionary::GetProbeOffset(i) << Name::kHashShift);
+ } else {
+ __ Mov(index, hash);
+ }
+ __ And(index, mask, Operand(index, LSR, Name::kHashShift));
+
+ // Scale the index by multiplying by the entry size.
+ ASSERT(NameDictionary::kEntrySize == 3);
+ __ Add(index, index, Operand(index, LSL, 1)); // index *= 3.
+
+ __ Add(index, dictionary, Operand(index, LSL, kPointerSizeLog2));
+ __ Ldr(entry_key, FieldMemOperand(index, kElementsStartOffset));
+
+ // Having undefined at this place means the name is not contained.
+ __ Cmp(entry_key, undefined);
+ __ B(eq, ¬_in_dictionary);
+
+ // Stop if found the property.
+ __ Cmp(entry_key, key);
+ __ B(eq, &in_dictionary);
+
+ if (i != kTotalProbes - 1 && mode_ == NEGATIVE_LOOKUP) {
+ // Check if the entry name is not a unique name.
+ __ Ldr(entry_key, FieldMemOperand(entry_key, HeapObject::kMapOffset));
+ __ Ldrb(entry_key, FieldMemOperand(entry_key, Map::kInstanceTypeOffset));
+ __ JumpIfNotUniqueName(entry_key, &maybe_in_dictionary);
+ }
+ }
+
+ __ Bind(&maybe_in_dictionary);
+ // If we are doing negative lookup then probing failure should be
+ // treated as a lookup success. For positive lookup, probing failure
+ // should be treated as lookup failure.
+ if (mode_ == POSITIVE_LOOKUP) {
+ __ Mov(result, 0);
+ __ Ret();
+ }
+
+ __ Bind(&in_dictionary);
+ __ Mov(result, 1);
+ __ Ret();
+
+ __ Bind(¬_in_dictionary);
+ __ Mov(result, 0);
+ __ Ret();
+}
+
+
+template<class T>
+static void CreateArrayDispatch(MacroAssembler* masm,
+ AllocationSiteOverrideMode mode) {
+ ASM_LOCATION("CreateArrayDispatch");
+ if (mode == DISABLE_ALLOCATION_SITES) {
+ T stub(GetInitialFastElementsKind(), mode);
+ __ TailCallStub(&stub);
+
+ } else if (mode == DONT_OVERRIDE) {
+ Register kind = x3;
+ int last_index =
+ GetSequenceIndexFromFastElementsKind(TERMINAL_FAST_ELEMENTS_KIND);
+ for (int i = 0; i <= last_index; ++i) {
+ Label next;
+ ElementsKind candidate_kind = GetFastElementsKindFromSequenceIndex(i);
+ // TODO(jbramley): Is this the best way to handle this? Can we make the
+ // tail calls conditional, rather than hopping over each one?
+ __ CompareAndBranch(kind, candidate_kind, ne, &next);
+ T stub(candidate_kind);
+ __ TailCallStub(&stub);
+ __ Bind(&next);
+ }
+
+ // If we reached this point there is a problem.
+ __ Abort(kUnexpectedElementsKindInArrayConstructor);
+
+ } else {
+ UNREACHABLE();
+ }
+}
+
+
+// TODO(jbramley): If this needs to be a special case, make it a proper template
+// specialization, and not a separate function.
+static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
+ AllocationSiteOverrideMode mode) {
+ ASM_LOCATION("CreateArrayDispatchOneArgument");
+ // x0 - argc
+ // x1 - constructor?
+ // x2 - allocation site (if mode != DISABLE_ALLOCATION_SITES)
+ // x3 - kind (if mode != DISABLE_ALLOCATION_SITES)
+ // sp[0] - last argument
+
+ Register allocation_site = x2;
+ Register kind = x3;
+
+ Label normal_sequence;
+ if (mode == DONT_OVERRIDE) {
+ STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
+ STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
+ STATIC_ASSERT(FAST_ELEMENTS == 2);
+ STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
+ STATIC_ASSERT(FAST_DOUBLE_ELEMENTS == 4);
+ STATIC_ASSERT(FAST_HOLEY_DOUBLE_ELEMENTS == 5);
+
+ // Is the low bit set? If so, the array is holey.
+ __ Tbnz(kind, 0, &normal_sequence);
+ }
+
+ // Look at the last argument.
+ // TODO(jbramley): What does a 0 argument represent?
+ __ Peek(x10, 0);
+ __ Cbz(x10, &normal_sequence);
+
+ if (mode == DISABLE_ALLOCATION_SITES) {
+ ElementsKind initial = GetInitialFastElementsKind();
+ ElementsKind holey_initial = GetHoleyElementsKind(initial);
+
+ ArraySingleArgumentConstructorStub stub_holey(holey_initial,
+ DISABLE_ALLOCATION_SITES);
+ __ TailCallStub(&stub_holey);
+
+ __ Bind(&normal_sequence);
+ ArraySingleArgumentConstructorStub stub(initial,
+ DISABLE_ALLOCATION_SITES);
+ __ TailCallStub(&stub);
+ } else if (mode == DONT_OVERRIDE) {
+ // We are going to create a holey array, but our kind is non-holey.
+ // Fix kind and retry (only if we have an allocation site in the slot).
+ __ Orr(kind, kind, 1);
+
+ if (FLAG_debug_code) {
+ __ Ldr(x10, FieldMemOperand(allocation_site, 0));
+ __ JumpIfNotRoot(x10, Heap::kAllocationSiteMapRootIndex,
+ &normal_sequence);
+ __ Assert(eq, kExpectedAllocationSite);
+ }
+
+ // Save the resulting elements kind in type info. We can't just store 'kind'
+ // in the AllocationSite::transition_info field because elements kind is
+ // restricted to a portion of the field; upper bits need to be left alone.
+ STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
+ __ Ldr(x11, FieldMemOperand(allocation_site,
+ AllocationSite::kTransitionInfoOffset));
+ __ Add(x11, x11, Operand(Smi::FromInt(kFastElementsKindPackedToHoley)));
+ __ Str(x11, FieldMemOperand(allocation_site,
+ AllocationSite::kTransitionInfoOffset));
+
+ __ Bind(&normal_sequence);
+ int last_index =
+ GetSequenceIndexFromFastElementsKind(TERMINAL_FAST_ELEMENTS_KIND);
+ for (int i = 0; i <= last_index; ++i) {
+ Label next;
+ ElementsKind candidate_kind = GetFastElementsKindFromSequenceIndex(i);
+ // TODO(jbramley): Is this the best way to handle this? Can we make the
+ // tail calls conditional, rather than hopping over each one?
+ __ CompareAndBranch(kind, candidate_kind, ne, &next);
+ ArraySingleArgumentConstructorStub stub(candidate_kind);
+ __ TailCallStub(&stub);
+ __ Bind(&next);
+ }
+
+ // If we reached this point there is a problem.
+ __ Abort(kUnexpectedElementsKindInArrayConstructor);
+ } else {
+ UNREACHABLE();
+ }
+}
+
+
+template<class T>
+static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) {
+ int to_index = GetSequenceIndexFromFastElementsKind(
+ TERMINAL_FAST_ELEMENTS_KIND);
+ for (int i = 0; i <= to_index; ++i) {
+ ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
+ T stub(kind);
+ stub.GetCode(isolate);
+ if (AllocationSite::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE) {
+ T stub1(kind, DISABLE_ALLOCATION_SITES);
+ stub1.GetCode(isolate);
+ }
+ }
+}
+
+
+void ArrayConstructorStubBase::GenerateStubsAheadOfTime(Isolate* isolate) {
+ ArrayConstructorStubAheadOfTimeHelper<ArrayNoArgumentConstructorStub>(
+ isolate);
+ ArrayConstructorStubAheadOfTimeHelper<ArraySingleArgumentConstructorStub>(
+ isolate);
+ ArrayConstructorStubAheadOfTimeHelper<ArrayNArgumentsConstructorStub>(
+ isolate);
+}
+
+
+void InternalArrayConstructorStubBase::GenerateStubsAheadOfTime(
+ Isolate* isolate) {
+ ElementsKind kinds[2] = { FAST_ELEMENTS, FAST_HOLEY_ELEMENTS };
+ for (int i = 0; i < 2; i++) {
+ // For internal arrays we only need a few things
+ InternalArrayNoArgumentConstructorStub stubh1(kinds[i]);
+ stubh1.GetCode(isolate);
+ InternalArraySingleArgumentConstructorStub stubh2(kinds[i]);
+ stubh2.GetCode(isolate);
+ InternalArrayNArgumentsConstructorStub stubh3(kinds[i]);
+ stubh3.GetCode(isolate);
+ }
+}
+
+
+void ArrayConstructorStub::GenerateDispatchToArrayStub(
+ MacroAssembler* masm,
+ AllocationSiteOverrideMode mode) {
+ Register argc = x0;
+ if (argument_count_ == ANY) {
+ Label zero_case, n_case;
+ __ Cbz(argc, &zero_case);
+ __ Cmp(argc, 1);
+ __ B(ne, &n_case);
+
+ // One argument.
+ CreateArrayDispatchOneArgument(masm, mode);
+
+ __ Bind(&zero_case);
+ // No arguments.
+ CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
+
+ __ Bind(&n_case);
+ // N arguments.
+ CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
+
+ } else if (argument_count_ == NONE) {
+ CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
+ } else if (argument_count_ == ONE) {
+ CreateArrayDispatchOneArgument(masm, mode);
+ } else if (argument_count_ == MORE_THAN_ONE) {
+ CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
+ } else {
+ UNREACHABLE();
+ }
+}
+
+
+void ArrayConstructorStub::Generate(MacroAssembler* masm) {
+ ASM_LOCATION("ArrayConstructorStub::Generate");
+ // ----------- S t a t e -------------
+ // -- x0 : argc (only if argument_count_ == ANY)
+ // -- x1 : constructor
+ // -- x2 : feedback vector (fixed array or undefined)
+ // -- x3 : slot index (if x2 is fixed array)
+ // -- sp[0] : return address
+ // -- sp[4] : last argument
+ // -----------------------------------
+ Register constructor = x1;
+ Register feedback_vector = x2;
+ Register slot_index = x3;
+
+ if (FLAG_debug_code) {
+ // The array construct code is only set for the global and natives
+ // builtin Array functions which always have maps.
+
+ Label unexpected_map, map_ok;
+ // Initial map for the builtin Array function should be a map.
+ __ Ldr(x10, FieldMemOperand(constructor,
+ JSFunction::kPrototypeOrInitialMapOffset));
+ // Will both indicate a NULL and a Smi.
+ __ JumpIfSmi(x10, &unexpected_map);
+ __ JumpIfObjectType(x10, x10, x11, MAP_TYPE, &map_ok);
+ __ Bind(&unexpected_map);
+ __ Abort(kUnexpectedInitialMapForArrayFunction);
+ __ Bind(&map_ok);
+
+ // In feedback_vector, we expect either undefined or a valid fixed array.
+ Label okay_here;
+ Handle<Map> fixed_array_map = masm->isolate()->factory()->fixed_array_map();
+ __ JumpIfRoot(feedback_vector, Heap::kUndefinedValueRootIndex, &okay_here);
+ __ Ldr(x10, FieldMemOperand(feedback_vector, FixedArray::kMapOffset));
+ __ Cmp(x10, Operand(fixed_array_map));
+ __ Assert(eq, kExpectedFixedArrayInFeedbackVector);
+
+ // slot_index should be a smi if we don't have undefined in feedback_vector.
+ __ AssertSmi(slot_index);
+
+ __ Bind(&okay_here);
+ }
+
+ Register allocation_site = x2; // Overwrites feedback_vector.
+ Register kind = x3;
+ Label no_info;
+ // Get the elements kind and case on that.
+ __ JumpIfRoot(feedback_vector, Heap::kUndefinedValueRootIndex, &no_info);
+ __ Add(feedback_vector, feedback_vector,
+ Operand::UntagSmiAndScale(slot_index, kPointerSizeLog2));
+ __ Ldr(allocation_site, FieldMemOperand(feedback_vector,
+ FixedArray::kHeaderSize));
+
+ // If the feedback vector is undefined, or contains anything other than an
+ // AllocationSite, call an array constructor that doesn't use AllocationSites.
+ __ Ldr(x10, FieldMemOperand(allocation_site, AllocationSite::kMapOffset));
+ __ JumpIfNotRoot(x10, Heap::kAllocationSiteMapRootIndex, &no_info);
+
+ __ Ldrsw(kind,
+ UntagSmiFieldMemOperand(allocation_site,
+ AllocationSite::kTransitionInfoOffset));
+ __ And(kind, kind, AllocationSite::ElementsKindBits::kMask);
+ GenerateDispatchToArrayStub(masm, DONT_OVERRIDE);
+
+ __ Bind(&no_info);
+ GenerateDispatchToArrayStub(masm, DISABLE_ALLOCATION_SITES);
+}
+
+
+void InternalArrayConstructorStub::GenerateCase(
+ MacroAssembler* masm, ElementsKind kind) {
+ Label zero_case, n_case;
+ Register argc = x0;
+
+ __ Cbz(argc, &zero_case);
+ __ CompareAndBranch(argc, 1, ne, &n_case);
+
+ // One argument.
+ if (IsFastPackedElementsKind(kind)) {
+ Label packed_case;
+
+ // We might need to create a holey array; look at the first argument.
+ __ Peek(x10, 0);
+ __ Cbz(x10, &packed_case);
+
+ InternalArraySingleArgumentConstructorStub
+ stub1_holey(GetHoleyElementsKind(kind));
+ __ TailCallStub(&stub1_holey);
+
+ __ Bind(&packed_case);
+ }
+ InternalArraySingleArgumentConstructorStub stub1(kind);
+ __ TailCallStub(&stub1);
+
+ __ Bind(&zero_case);
+ // No arguments.
+ InternalArrayNoArgumentConstructorStub stub0(kind);
+ __ TailCallStub(&stub0);
+
+ __ Bind(&n_case);
+ // N arguments.
+ InternalArrayNArgumentsConstructorStub stubN(kind);
+ __ TailCallStub(&stubN);
+}
+
+
+void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- x0 : argc
+ // -- x1 : constructor
+ // -- sp[0] : return address
+ // -- sp[4] : last argument
+ // -----------------------------------
+ Handle<Object> undefined_sentinel(
+ masm->isolate()->heap()->undefined_value(), masm->isolate());
+
+ Register constructor = x1;
+
+ if (FLAG_debug_code) {
+ // The array construct code is only set for the global and natives
+ // builtin Array functions which always have maps.
+
+ Label unexpected_map, map_ok;
+ // Initial map for the builtin Array function should be a map.
+ __ Ldr(x10, FieldMemOperand(constructor,
+ JSFunction::kPrototypeOrInitialMapOffset));
+ // Will both indicate a NULL and a Smi.
+ __ JumpIfSmi(x10, &unexpected_map);
+ __ JumpIfObjectType(x10, x10, x11, MAP_TYPE, &map_ok);
+ __ Bind(&unexpected_map);
+ __ Abort(kUnexpectedInitialMapForArrayFunction);
+ __ Bind(&map_ok);
+ }
+
+ Register kind = w3;
+ // Figure out the right elements kind
+ __ Ldr(x10, FieldMemOperand(constructor,
+ JSFunction::kPrototypeOrInitialMapOffset));
+
+ // TODO(jbramley): Add a helper function to read elements kind from an
+ // existing map.
+ // Load the map's "bit field 2" into result.
+ __ Ldr(kind, FieldMemOperand(x10, Map::kBitField2Offset));
+ // Retrieve elements_kind from bit field 2.
+ __ Ubfx(kind, kind, Map::kElementsKindShift, Map::kElementsKindBitCount);
+
+ if (FLAG_debug_code) {
+ Label done;
+ __ Cmp(x3, FAST_ELEMENTS);
+ __ Ccmp(x3, FAST_HOLEY_ELEMENTS, ZFlag, ne);
+ __ Assert(eq, kInvalidElementsKindForInternalArrayOrInternalPackedArray);
+ }
+
+ Label fast_elements_case;
+ __ CompareAndBranch(kind, FAST_ELEMENTS, eq, &fast_elements_case);
+ GenerateCase(masm, FAST_HOLEY_ELEMENTS);
+
+ __ Bind(&fast_elements_case);
+ GenerateCase(masm, FAST_ELEMENTS);
+}
+
+
+void CallApiFunctionStub::Generate(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- x0 : callee
+ // -- x4 : call_data
+ // -- x2 : holder
+ // -- x1 : api_function_address
+ // -- cp : context
+ // --
+ // -- sp[0] : last argument
+ // -- ...
+ // -- sp[(argc - 1) * 8] : first argument
+ // -- sp[argc * 8] : receiver
+ // -----------------------------------
+
+ Register callee = x0;
+ Register call_data = x4;
+ Register holder = x2;
+ Register api_function_address = x1;
+ Register context = cp;
+
+ int argc = ArgumentBits::decode(bit_field_);
+ bool restore_context = RestoreContextBits::decode(bit_field_);
+ bool call_data_undefined = CallDataUndefinedBits::decode(bit_field_);
+
+ typedef FunctionCallbackArguments FCA;
+
+ STATIC_ASSERT(FCA::kContextSaveIndex == 6);
+ STATIC_ASSERT(FCA::kCalleeIndex == 5);
+ STATIC_ASSERT(FCA::kDataIndex == 4);
+ STATIC_ASSERT(FCA::kReturnValueOffset == 3);
+ STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2);
+ STATIC_ASSERT(FCA::kIsolateIndex == 1);
+ STATIC_ASSERT(FCA::kHolderIndex == 0);
+ STATIC_ASSERT(FCA::kArgsLength == 7);
+
+ Isolate* isolate = masm->isolate();
+
+ // FunctionCallbackArguments: context, callee and call data.
+ __ Push(context, callee, call_data);
+
+ // Load context from callee
+ __ Ldr(context, FieldMemOperand(callee, JSFunction::kContextOffset));
+
+ if (!call_data_undefined) {
+ __ LoadRoot(call_data, Heap::kUndefinedValueRootIndex);
+ }
+ Register isolate_reg = x5;
+ __ Mov(isolate_reg, Operand(ExternalReference::isolate_address(isolate)));
+
+ // FunctionCallbackArguments:
+ // return value, return value default, isolate, holder.
+ __ Push(call_data, call_data, isolate_reg, holder);
+
+ // Prepare arguments.
+ Register args = x6;
+ __ Mov(args, masm->StackPointer());
+
+ // Allocate the v8::Arguments structure in the arguments' space, since it's
+ // not controlled by GC.
+ const int kApiStackSpace = 4;
+
+ // Allocate space for CallApiFunctionAndReturn can store some scratch
+ // registeres on the stack.
+ const int kCallApiFunctionSpillSpace = 4;
+
+ FrameScope frame_scope(masm, StackFrame::MANUAL);
+ __ EnterExitFrame(false, x10, kApiStackSpace + kCallApiFunctionSpillSpace);
+
+ // TODO(all): Optimize this with stp and suchlike.
+ ASSERT(!AreAliased(x0, api_function_address));
+ // x0 = FunctionCallbackInfo&
+ // Arguments is after the return address.
+ __ Add(x0, masm->StackPointer(), 1 * kPointerSize);
+ // FunctionCallbackInfo::implicit_args_
+ __ Str(args, MemOperand(x0, 0 * kPointerSize));
+ // FunctionCallbackInfo::values_
+ __ Add(x10, args, Operand((FCA::kArgsLength - 1 + argc) * kPointerSize));
+ __ Str(x10, MemOperand(x0, 1 * kPointerSize));
+ // FunctionCallbackInfo::length_ = argc
+ __ Mov(x10, argc);
+ __ Str(x10, MemOperand(x0, 2 * kPointerSize));
+ // FunctionCallbackInfo::is_construct_call = 0
+ __ Str(xzr, MemOperand(x0, 3 * kPointerSize));
+
+ const int kStackUnwindSpace = argc + FCA::kArgsLength + 1;
+ Address thunk_address = FUNCTION_ADDR(&InvokeFunctionCallback);
+ ExternalReference::Type thunk_type = ExternalReference::PROFILING_API_CALL;
+ ApiFunction thunk_fun(thunk_address);
+ ExternalReference thunk_ref = ExternalReference(&thunk_fun, thunk_type,
+ masm->isolate());
+
+ AllowExternalCallThatCantCauseGC scope(masm);
+ MemOperand context_restore_operand(
+ fp, (2 + FCA::kContextSaveIndex) * kPointerSize);
+ MemOperand return_value_operand(fp,
+ (2 + FCA::kReturnValueOffset) * kPointerSize);
+
+ const int spill_offset = 1 + kApiStackSpace;
+ __ CallApiFunctionAndReturn(api_function_address,
+ thunk_ref,
+ kStackUnwindSpace,
+ spill_offset,
+ return_value_operand,
+ restore_context ?
+ &context_restore_operand : NULL);
+}
+
+
+void CallApiGetterStub::Generate(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- sp[0] : name
+ // -- sp[8 - kArgsLength*8] : PropertyCallbackArguments object
+ // -- ...
+ // -- x2 : api_function_address
+ // -----------------------------------
+
+ Register api_function_address = x2;
+
+ __ Mov(x0, masm->StackPointer()); // x0 = Handle<Name>
+ __ Add(x1, x0, 1 * kPointerSize); // x1 = PCA
+
+ const int kApiStackSpace = 1;
+
+ // Allocate space for CallApiFunctionAndReturn can store some scratch
+ // registeres on the stack.
+ const int kCallApiFunctionSpillSpace = 4;
+
+ FrameScope frame_scope(masm, StackFrame::MANUAL);
+ __ EnterExitFrame(false, x10, kApiStackSpace + kCallApiFunctionSpillSpace);
+
+ // Create PropertyAccessorInfo instance on the stack above the exit frame with
+ // x1 (internal::Object** args_) as the data.
+ __ Poke(x1, 1 * kPointerSize);
+ __ Add(x1, masm->StackPointer(), 1 * kPointerSize); // x1 = AccessorInfo&
+
+ const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
+
+ Address thunk_address = FUNCTION_ADDR(&InvokeAccessorGetterCallback);
+ ExternalReference::Type thunk_type =
+ ExternalReference::PROFILING_GETTER_CALL;
+ ApiFunction thunk_fun(thunk_address);
+ ExternalReference thunk_ref = ExternalReference(&thunk_fun, thunk_type,
+ masm->isolate());
+
+ const int spill_offset = 1 + kApiStackSpace;
+ __ CallApiFunctionAndReturn(api_function_address,
+ thunk_ref,
+ kStackUnwindSpace,
+ spill_offset,
+ MemOperand(fp, 6 * kPointerSize),
+ NULL);
+}
+
+
+#undef __
+
+} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_A64
--- /dev/null
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_A64_CODE_STUBS_A64_H_
+#define V8_A64_CODE_STUBS_A64_H_
+
+#include "ic-inl.h"
+
+namespace v8 {
+namespace internal {
+
+
+void ArrayNativeCode(MacroAssembler* masm, Label* call_generic_code);
+
+
+class StoreBufferOverflowStub: public PlatformCodeStub {
+ public:
+ explicit StoreBufferOverflowStub(SaveFPRegsMode save_fp)
+ : save_doubles_(save_fp) { }
+
+ void Generate(MacroAssembler* masm);
+
+ static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate);
+ virtual bool SometimesSetsUpAFrame() { return false; }
+
+ private:
+ SaveFPRegsMode save_doubles_;
+
+ Major MajorKey() { return StoreBufferOverflow; }
+ int MinorKey() { return (save_doubles_ == kSaveFPRegs) ? 1 : 0; }
+};
+
+
+class StringHelper : public AllStatic {
+ public:
+ // TODO(all): These don't seem to be used any more. Delete them.
+
+ // Generate string hash.
+ static void GenerateHashInit(MacroAssembler* masm,
+ Register hash,
+ Register character);
+
+ static void GenerateHashAddCharacter(MacroAssembler* masm,
+ Register hash,
+ Register character);
+
+ static void GenerateHashGetHash(MacroAssembler* masm,
+ Register hash,
+ Register scratch);
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper);
+};
+
+
+class RecordWriteStub: public PlatformCodeStub {
+ public:
+ // Stub to record the write of 'value' at 'address' in 'object'.
+ // Typically 'address' = 'object' + <some offset>.
+ // See MacroAssembler::RecordWriteField() for example.
+ RecordWriteStub(Register object,
+ Register value,
+ Register address,
+ RememberedSetAction remembered_set_action,
+ SaveFPRegsMode fp_mode)
+ : object_(object),
+ value_(value),
+ address_(address),
+ remembered_set_action_(remembered_set_action),
+ save_fp_regs_mode_(fp_mode),
+ regs_(object, // An input reg.
+ address, // An input reg.
+ value) { // One scratch reg.
+ }
+
+ enum Mode {
+ STORE_BUFFER_ONLY,
+ INCREMENTAL,
+ INCREMENTAL_COMPACTION
+ };
+
+ virtual bool SometimesSetsUpAFrame() { return false; }
+
+ static Mode GetMode(Code* stub) {
+ // Find the mode depending on the first two instructions.
+ Instruction* instr1 =
+ reinterpret_cast<Instruction*>(stub->instruction_start());
+ Instruction* instr2 = instr1->following();
+
+ if (instr1->IsUncondBranchImm()) {
+ ASSERT(instr2->IsPCRelAddressing() && (instr2->Rd() == xzr.code()));
+ return INCREMENTAL;
+ }
+
+ ASSERT(instr1->IsPCRelAddressing() && (instr1->Rd() == xzr.code()));
+
+ if (instr2->IsUncondBranchImm()) {
+ return INCREMENTAL_COMPACTION;
+ }
+
+ ASSERT(instr2->IsPCRelAddressing());
+
+ return STORE_BUFFER_ONLY;
+ }
+
+ // We patch the two first instructions of the stub back and forth between an
+ // adr and branch when we start and stop incremental heap marking.
+ // The branch is
+ // b label
+ // The adr is
+ // adr xzr label
+ // so effectively a nop.
+ static void Patch(Code* stub, Mode mode) {
+ // We are going to patch the two first instructions of the stub.
+ PatchingAssembler patcher(
+ reinterpret_cast<Instruction*>(stub->instruction_start()), 2);
+ Instruction* instr1 = patcher.InstructionAt(0);
+ Instruction* instr2 = patcher.InstructionAt(kInstructionSize);
+ // Instructions must be either 'adr' or 'b'.
+ ASSERT(instr1->IsPCRelAddressing() || instr1->IsUncondBranchImm());
+ ASSERT(instr2->IsPCRelAddressing() || instr2->IsUncondBranchImm());
+ // Retrieve the offsets to the labels.
+ int32_t offset_to_incremental_noncompacting = instr1->ImmPCOffset();
+ int32_t offset_to_incremental_compacting = instr2->ImmPCOffset();
+
+ switch (mode) {
+ case STORE_BUFFER_ONLY:
+ ASSERT(GetMode(stub) == INCREMENTAL ||
+ GetMode(stub) == INCREMENTAL_COMPACTION);
+ patcher.adr(xzr, offset_to_incremental_noncompacting);
+ patcher.adr(xzr, offset_to_incremental_compacting);
+ break;
+ case INCREMENTAL:
+ ASSERT(GetMode(stub) == STORE_BUFFER_ONLY);
+ patcher.b(offset_to_incremental_noncompacting >> kInstructionSizeLog2);
+ patcher.adr(xzr, offset_to_incremental_compacting);
+ break;
+ case INCREMENTAL_COMPACTION:
+ ASSERT(GetMode(stub) == STORE_BUFFER_ONLY);
+ patcher.adr(xzr, offset_to_incremental_noncompacting);
+ patcher.b(offset_to_incremental_compacting >> kInstructionSizeLog2);
+ break;
+ }
+ ASSERT(GetMode(stub) == mode);
+ }
+
+ private:
+ // This is a helper class to manage the registers associated with the stub.
+ // The 'object' and 'address' registers must be preserved.
+ class RegisterAllocation {
+ public:
+ RegisterAllocation(Register object,
+ Register address,
+ Register scratch)
+ : object_(object),
+ address_(address),
+ scratch0_(scratch),
+ saved_regs_(kCallerSaved) {
+ ASSERT(!AreAliased(scratch, object, address));
+
+ // We would like to require more scratch registers for this stub,
+ // but the number of registers comes down to the ones used in
+ // FullCodeGen::SetVar(), which is architecture independent.
+ // We allocate 2 extra scratch registers that we'll save on the stack.
+ CPURegList pool_available = GetValidRegistersForAllocation();
+ CPURegList used_regs(object, address, scratch);
+ pool_available.Remove(used_regs);
+ scratch1_ = Register(pool_available.PopLowestIndex());
+ scratch2_ = Register(pool_available.PopLowestIndex());
+
+ // SaveCallerRegisters method needs to save caller saved register, however
+ // we don't bother saving ip0 and ip1 because they are used as scratch
+ // registers by the MacroAssembler.
+ saved_regs_.Remove(ip0);
+ saved_regs_.Remove(ip1);
+
+ // The scratch registers will be restored by other means so we don't need
+ // to save them with the other caller saved registers.
+ saved_regs_.Remove(scratch0_);
+ saved_regs_.Remove(scratch1_);
+ saved_regs_.Remove(scratch2_);
+ }
+
+ void Save(MacroAssembler* masm) {
+ // We don't have to save scratch0_ because it was given to us as
+ // a scratch register.
+ masm->Push(scratch1_, scratch2_);
+ }
+
+ void Restore(MacroAssembler* masm) {
+ masm->Pop(scratch2_, scratch1_);
+ }
+
+ // If we have to call into C then we need to save and restore all caller-
+ // saved registers that were not already preserved.
+ void SaveCallerSaveRegisters(MacroAssembler* masm, SaveFPRegsMode mode) {
+ // TODO(all): This can be very expensive, and it is likely that not every
+ // register will need to be preserved. Can we improve this?
+ masm->PushCPURegList(saved_regs_);
+ if (mode == kSaveFPRegs) {
+ masm->PushCPURegList(kCallerSavedFP);
+ }
+ }
+
+ void RestoreCallerSaveRegisters(MacroAssembler*masm, SaveFPRegsMode mode) {
+ // TODO(all): This can be very expensive, and it is likely that not every
+ // register will need to be preserved. Can we improve this?
+ if (mode == kSaveFPRegs) {
+ masm->PopCPURegList(kCallerSavedFP);
+ }
+ masm->PopCPURegList(saved_regs_);
+ }
+
+ Register object() { return object_; }
+ Register address() { return address_; }
+ Register scratch0() { return scratch0_; }
+ Register scratch1() { return scratch1_; }
+ Register scratch2() { return scratch2_; }
+
+ private:
+ Register object_;
+ Register address_;
+ Register scratch0_;
+ Register scratch1_;
+ Register scratch2_;
+ CPURegList saved_regs_;
+
+ // TODO(all): We should consider moving this somewhere else.
+ static CPURegList GetValidRegistersForAllocation() {
+ // The list of valid registers for allocation is defined as all the
+ // registers without those with a special meaning.
+ //
+ // The default list excludes registers x26 to x31 because they are
+ // reserved for the following purpose:
+ // - x26 root register
+ // - x27 context pointer register
+ // - x28 jssp
+ // - x29 frame pointer
+ // - x30 link register(lr)
+ // - x31 xzr/stack pointer
+ CPURegList list(CPURegister::kRegister, kXRegSize, 0, 25);
+
+ // We also remove MacroAssembler's scratch registers.
+ list.Remove(ip0);
+ list.Remove(ip1);
+ list.Remove(x8);
+ list.Remove(x9);
+
+ return list;
+ }
+
+ friend class RecordWriteStub;
+ };
+
+ // A list of stub variants which are pregenerated.
+ // The variants are stored in the same format as the minor key, so
+ // MinorKeyFor() can be used to populate and check this list.
+ static const int kAheadOfTime[];
+
+ void Generate(MacroAssembler* masm);
+ void GenerateIncremental(MacroAssembler* masm, Mode mode);
+
+ enum OnNoNeedToInformIncrementalMarker {
+ kReturnOnNoNeedToInformIncrementalMarker,
+ kUpdateRememberedSetOnNoNeedToInformIncrementalMarker
+ };
+
+ void CheckNeedsToInformIncrementalMarker(
+ MacroAssembler* masm,
+ OnNoNeedToInformIncrementalMarker on_no_need,
+ Mode mode);
+ void InformIncrementalMarker(MacroAssembler* masm, Mode mode);
+
+ Major MajorKey() { return RecordWrite; }
+
+ int MinorKey() {
+ return MinorKeyFor(object_, value_, address_, remembered_set_action_,
+ save_fp_regs_mode_);
+ }
+
+ static int MinorKeyFor(Register object,
+ Register value,
+ Register address,
+ RememberedSetAction action,
+ SaveFPRegsMode fp_mode) {
+ ASSERT(object.Is64Bits());
+ ASSERT(value.Is64Bits());
+ ASSERT(address.Is64Bits());
+ return ObjectBits::encode(object.code()) |
+ ValueBits::encode(value.code()) |
+ AddressBits::encode(address.code()) |
+ RememberedSetActionBits::encode(action) |
+ SaveFPRegsModeBits::encode(fp_mode);
+ }
+
+ void Activate(Code* code) {
+ code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code);
+ }
+
+ class ObjectBits: public BitField<int, 0, 5> {};
+ class ValueBits: public BitField<int, 5, 5> {};
+ class AddressBits: public BitField<int, 10, 5> {};
+ class RememberedSetActionBits: public BitField<RememberedSetAction, 15, 1> {};
+ class SaveFPRegsModeBits: public BitField<SaveFPRegsMode, 16, 1> {};
+
+ Register object_;
+ Register value_;
+ Register address_;
+ RememberedSetAction remembered_set_action_;
+ SaveFPRegsMode save_fp_regs_mode_;
+ Label slow_;
+ RegisterAllocation regs_;
+};
+
+
+// Helper to call C++ functions from generated code. The caller must prepare
+// the exit frame before doing the call with GenerateCall.
+class DirectCEntryStub: public PlatformCodeStub {
+ public:
+ DirectCEntryStub() {}
+ void Generate(MacroAssembler* masm);
+ void GenerateCall(MacroAssembler* masm, Register target);
+
+ private:
+ Major MajorKey() { return DirectCEntry; }
+ int MinorKey() { return 0; }
+
+ bool NeedsImmovableCode() { return true; }
+};
+
+
+class NameDictionaryLookupStub: public PlatformCodeStub {
+ public:
+ enum LookupMode { POSITIVE_LOOKUP, NEGATIVE_LOOKUP };
+
+ explicit NameDictionaryLookupStub(LookupMode mode) : mode_(mode) { }
+
+ void Generate(MacroAssembler* masm);
+
+ static void GenerateNegativeLookup(MacroAssembler* masm,
+ Label* miss,
+ Label* done,
+ Register receiver,
+ Register properties,
+ Handle<Name> name,
+ Register scratch0);
+
+ static void GeneratePositiveLookup(MacroAssembler* masm,
+ Label* miss,
+ Label* done,
+ Register elements,
+ Register name,
+ Register scratch1,
+ Register scratch2);
+
+ virtual bool SometimesSetsUpAFrame() { return false; }
+
+ private:
+ static const int kInlinedProbes = 4;
+ static const int kTotalProbes = 20;
+
+ static const int kCapacityOffset =
+ NameDictionary::kHeaderSize +
+ NameDictionary::kCapacityIndex * kPointerSize;
+
+ static const int kElementsStartOffset =
+ NameDictionary::kHeaderSize +
+ NameDictionary::kElementsStartIndex * kPointerSize;
+
+ Major MajorKey() { return NameDictionaryLookup; }
+
+ int MinorKey() {
+ return LookupModeBits::encode(mode_);
+ }
+
+ class LookupModeBits: public BitField<LookupMode, 0, 1> {};
+
+ LookupMode mode_;
+};
+
+
+class SubStringStub: public PlatformCodeStub {
+ public:
+ SubStringStub() {}
+
+ private:
+ Major MajorKey() { return SubString; }
+ int MinorKey() { return 0; }
+
+ void Generate(MacroAssembler* masm);
+};
+
+
+class StringCompareStub: public PlatformCodeStub {
+ public:
+ StringCompareStub() { }
+
+ // Compares two flat ASCII strings and returns result in x0.
+ static void GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
+ Register left,
+ Register right,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Register scratch4);
+
+ // Compare two flat ASCII strings for equality and returns result
+ // in x0.
+ static void GenerateFlatAsciiStringEquals(MacroAssembler* masm,
+ Register left,
+ Register right,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3);
+
+ private:
+ virtual Major MajorKey() { return StringCompare; }
+ virtual int MinorKey() { return 0; }
+ virtual void Generate(MacroAssembler* masm);
+
+ static void GenerateAsciiCharsCompareLoop(MacroAssembler* masm,
+ Register left,
+ Register right,
+ Register length,
+ Register scratch1,
+ Register scratch2,
+ Label* chars_not_equal);
+};
+
+
+struct PlatformCallInterfaceDescriptor {
+ explicit PlatformCallInterfaceDescriptor(
+ TargetAddressStorageMode storage_mode)
+ : storage_mode_(storage_mode) { }
+
+ TargetAddressStorageMode storage_mode() { return storage_mode_; }
+
+ private:
+ TargetAddressStorageMode storage_mode_;
+};
+
+
+} } // namespace v8::internal
+
+#endif // V8_A64_CODE_STUBS_A64_H_
--- /dev/null
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#if V8_TARGET_ARCH_A64
+
+#include "codegen.h"
+#include "macro-assembler.h"
+#include "simulator-a64.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm)
+
+#if defined(USE_SIMULATOR)
+byte* fast_exp_a64_machine_code = NULL;
+double fast_exp_simulator(double x) {
+ Simulator * simulator = Simulator::current(Isolate::Current());
+ return simulator->CallDouble(fast_exp_a64_machine_code,
+ Simulator::CallArgument(x),
+ Simulator::CallArgument::End());
+}
+#endif
+
+
+UnaryMathFunction CreateExpFunction() {
+ if (!FLAG_fast_math) return &std::exp;
+
+ // Use the Math.exp implemetation in MathExpGenerator::EmitMathExp() to create
+ // an AAPCS64-compliant exp() function. This will be faster than the C
+ // library's exp() function, but probably less accurate.
+ size_t actual_size;
+ byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true));
+ if (buffer == NULL) return &std::exp;
+
+ ExternalReference::InitializeMathExpData();
+ MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
+ masm.SetStackPointer(csp);
+
+ // The argument will be in d0 on entry.
+ DoubleRegister input = d0;
+ // Use other caller-saved registers for all other values.
+ DoubleRegister result = d1;
+ DoubleRegister double_temp1 = d2;
+ DoubleRegister double_temp2 = d3;
+ Register temp1 = x10;
+ Register temp2 = x11;
+ Register temp3 = x12;
+
+ MathExpGenerator::EmitMathExp(&masm, input, result,
+ double_temp1, double_temp2,
+ temp1, temp2, temp3);
+ // Move the result to the return register.
+ masm.Fmov(d0, result);
+ masm.Ret();
+
+ CodeDesc desc;
+ masm.GetCode(&desc);
+ ASSERT(!RelocInfo::RequiresRelocation(desc));
+
+ CPU::FlushICache(buffer, actual_size);
+ OS::ProtectCode(buffer, actual_size);
+
+#if !defined(USE_SIMULATOR)
+ return FUNCTION_CAST<UnaryMathFunction>(buffer);
+#else
+ fast_exp_a64_machine_code = buffer;
+ return &fast_exp_simulator;
+#endif
+}
+
+
+UnaryMathFunction CreateSqrtFunction() {
+ return &std::sqrt;
+}
+
+
+// -------------------------------------------------------------------------
+// Platform-specific RuntimeCallHelper functions.
+
+void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
+ masm->EnterFrame(StackFrame::INTERNAL);
+ ASSERT(!masm->has_frame());
+ masm->set_has_frame(true);
+}
+
+
+void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
+ masm->LeaveFrame(StackFrame::INTERNAL);
+ ASSERT(masm->has_frame());
+ masm->set_has_frame(false);
+}
+
+
+// -------------------------------------------------------------------------
+// Code generators
+
+void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
+ MacroAssembler* masm, AllocationSiteMode mode,
+ Label* allocation_memento_found) {
+ // ----------- S t a t e -------------
+ // -- x2 : receiver
+ // -- x3 : target map
+ // -----------------------------------
+ Register receiver = x2;
+ Register map = x3;
+
+ if (mode == TRACK_ALLOCATION_SITE) {
+ ASSERT(allocation_memento_found != NULL);
+ __ JumpIfJSArrayHasAllocationMemento(receiver, x10, x11,
+ allocation_memento_found);
+ }
+
+ // Set transitioned map.
+ __ Str(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ RecordWriteField(receiver,
+ HeapObject::kMapOffset,
+ map,
+ x10,
+ kLRHasNotBeenSaved,
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+}
+
+
+void ElementsTransitionGenerator::GenerateSmiToDouble(
+ MacroAssembler* masm, AllocationSiteMode mode, Label* fail) {
+ ASM_LOCATION("ElementsTransitionGenerator::GenerateSmiToDouble");
+ // ----------- S t a t e -------------
+ // -- lr : return address
+ // -- x0 : value
+ // -- x1 : key
+ // -- x2 : receiver
+ // -- x3 : target map, scratch for subsequent call
+ // -----------------------------------
+ Register receiver = x2;
+ Register target_map = x3;
+
+ Label gc_required, only_change_map;
+
+ if (mode == TRACK_ALLOCATION_SITE) {
+ __ JumpIfJSArrayHasAllocationMemento(receiver, x10, x11, fail);
+ }
+
+ // Check for empty arrays, which only require a map transition and no changes
+ // to the backing store.
+ Register elements = x4;
+ __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+ __ JumpIfRoot(elements, Heap::kEmptyFixedArrayRootIndex, &only_change_map);
+
+ __ Push(lr);
+ Register length = x5;
+ __ Ldrsw(length, UntagSmiFieldMemOperand(elements,
+ FixedArray::kLengthOffset));
+
+ // Allocate new FixedDoubleArray.
+ Register array_size = x6;
+ Register array = x7;
+ __ Lsl(array_size, length, kDoubleSizeLog2);
+ __ Add(array_size, array_size, FixedDoubleArray::kHeaderSize);
+ __ Allocate(array_size, array, x10, x11, &gc_required, DOUBLE_ALIGNMENT);
+ // Register array is non-tagged heap object.
+
+ // Set the destination FixedDoubleArray's length and map.
+ Register map_root = x6;
+ __ LoadRoot(map_root, Heap::kFixedDoubleArrayMapRootIndex);
+ __ SmiTag(x11, length);
+ __ Str(x11, MemOperand(array, FixedDoubleArray::kLengthOffset));
+ __ Str(map_root, MemOperand(array, HeapObject::kMapOffset));
+
+ __ Str(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, x6,
+ kLRHasBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+
+ // Replace receiver's backing store with newly created FixedDoubleArray.
+ __ Add(x10, array, kHeapObjectTag);
+ __ Str(x10, FieldMemOperand(receiver, JSObject::kElementsOffset));
+ __ RecordWriteField(receiver, JSObject::kElementsOffset, x10,
+ x6, kLRHasBeenSaved, kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+
+ // Prepare for conversion loop.
+ Register src_elements = x10;
+ Register dst_elements = x11;
+ Register dst_end = x12;
+ __ Add(src_elements, elements, FixedArray::kHeaderSize - kHeapObjectTag);
+ __ Add(dst_elements, array, FixedDoubleArray::kHeaderSize);
+ __ Add(dst_end, dst_elements, Operand(length, LSL, kDoubleSizeLog2));
+
+ FPRegister nan_d = d1;
+ __ Fmov(nan_d, rawbits_to_double(kHoleNanInt64));
+
+ Label entry, done;
+ __ B(&entry);
+
+ __ Bind(&only_change_map);
+ __ Str(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, x6,
+ kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+ __ B(&done);
+
+ // Call into runtime if GC is required.
+ __ Bind(&gc_required);
+ __ Pop(lr);
+ __ B(fail);
+
+ // Iterate over the array, copying and coverting smis to doubles. If an
+ // element is non-smi, write a hole to the destination.
+ {
+ Label loop;
+ __ Bind(&loop);
+ __ Ldr(x13, MemOperand(src_elements, kPointerSize, PostIndex));
+ __ SmiUntagToDouble(d0, x13, kSpeculativeUntag);
+ __ Tst(x13, kSmiTagMask);
+ __ Fcsel(d0, d0, nan_d, eq);
+ __ Str(d0, MemOperand(dst_elements, kDoubleSize, PostIndex));
+
+ __ Bind(&entry);
+ __ Cmp(dst_elements, dst_end);
+ __ B(lt, &loop);
+ }
+
+ __ Pop(lr);
+ __ Bind(&done);
+}
+
+
+void ElementsTransitionGenerator::GenerateDoubleToObject(
+ MacroAssembler* masm, AllocationSiteMode mode, Label* fail) {
+ ASM_LOCATION("ElementsTransitionGenerator::GenerateDoubleToObject");
+ // ----------- S t a t e -------------
+ // -- x0 : value
+ // -- x1 : key
+ // -- x2 : receiver
+ // -- lr : return address
+ // -- x3 : target map, scratch for subsequent call
+ // -- x4 : scratch (elements)
+ // -----------------------------------
+ Register value = x0;
+ Register key = x1;
+ Register receiver = x2;
+ Register target_map = x3;
+
+ if (mode == TRACK_ALLOCATION_SITE) {
+ __ JumpIfJSArrayHasAllocationMemento(receiver, x10, x11, fail);
+ }
+
+ // Check for empty arrays, which only require a map transition and no changes
+ // to the backing store.
+ Label only_change_map;
+ Register elements = x4;
+ __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+ __ JumpIfRoot(elements, Heap::kEmptyFixedArrayRootIndex, &only_change_map);
+
+ __ Push(lr);
+ // TODO(all): These registers may not need to be pushed. Examine
+ // RecordWriteStub and check whether it's needed.
+ __ Push(target_map, receiver, key, value);
+ Register length = x5;
+ __ Ldrsw(length, UntagSmiFieldMemOperand(elements,
+ FixedArray::kLengthOffset));
+
+ // Allocate new FixedArray.
+ Register array_size = x6;
+ Register array = x7;
+ Label gc_required;
+ __ Mov(array_size, FixedDoubleArray::kHeaderSize);
+ __ Add(array_size, array_size, Operand(length, LSL, kPointerSizeLog2));
+ __ Allocate(array_size, array, x10, x11, &gc_required, NO_ALLOCATION_FLAGS);
+
+ // Set destination FixedDoubleArray's length and map.
+ Register map_root = x6;
+ __ LoadRoot(map_root, Heap::kFixedArrayMapRootIndex);
+ __ SmiTag(x11, length);
+ __ Str(x11, MemOperand(array, FixedDoubleArray::kLengthOffset));
+ __ Str(map_root, MemOperand(array, HeapObject::kMapOffset));
+
+ // Prepare for conversion loop.
+ Register src_elements = x10;
+ Register dst_elements = x11;
+ Register dst_end = x12;
+ __ Add(src_elements, elements,
+ FixedDoubleArray::kHeaderSize - kHeapObjectTag);
+ __ Add(dst_elements, array, FixedArray::kHeaderSize);
+ __ Add(array, array, kHeapObjectTag);
+ __ Add(dst_end, dst_elements, Operand(length, LSL, kPointerSizeLog2));
+
+ Register the_hole = x14;
+ Register heap_num_map = x15;
+ __ LoadRoot(the_hole, Heap::kTheHoleValueRootIndex);
+ __ LoadRoot(heap_num_map, Heap::kHeapNumberMapRootIndex);
+
+ Label entry;
+ __ B(&entry);
+
+ // Call into runtime if GC is required.
+ __ Bind(&gc_required);
+ __ Pop(value, key, receiver, target_map);
+ __ Pop(lr);
+ __ B(fail);
+
+ {
+ Label loop, convert_hole;
+ __ Bind(&loop);
+ __ Ldr(x13, MemOperand(src_elements, kPointerSize, PostIndex));
+ __ Cmp(x13, kHoleNanInt64);
+ __ B(eq, &convert_hole);
+
+ // Non-hole double, copy value into a heap number.
+ Register heap_num = x5;
+ __ AllocateHeapNumber(heap_num, &gc_required, x6, x4, heap_num_map);
+ __ Str(x13, FieldMemOperand(heap_num, HeapNumber::kValueOffset));
+ __ Mov(x13, dst_elements);
+ __ Str(heap_num, MemOperand(dst_elements, kPointerSize, PostIndex));
+ __ RecordWrite(array, x13, heap_num, kLRHasBeenSaved, kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+
+ __ B(&entry);
+
+ // Replace the-hole NaN with the-hole pointer.
+ __ Bind(&convert_hole);
+ __ Str(the_hole, MemOperand(dst_elements, kPointerSize, PostIndex));
+
+ __ Bind(&entry);
+ __ Cmp(dst_elements, dst_end);
+ __ B(lt, &loop);
+ }
+
+ __ Pop(value, key, receiver, target_map);
+ // Replace receiver's backing store with newly created and filled FixedArray.
+ __ Str(array, FieldMemOperand(receiver, JSObject::kElementsOffset));
+ __ RecordWriteField(receiver, JSObject::kElementsOffset, array, x13,
+ kLRHasBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+ __ Pop(lr);
+
+ __ Bind(&only_change_map);
+ __ Str(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, x13,
+ kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+}
+
+
+bool Code::IsYoungSequence(byte* sequence) {
+ return MacroAssembler::IsYoungSequence(sequence);
+}
+
+
+void Code::GetCodeAgeAndParity(byte* sequence, Age* age,
+ MarkingParity* parity) {
+ if (IsYoungSequence(sequence)) {
+ *age = kNoAgeCodeAge;
+ *parity = NO_MARKING_PARITY;
+ } else {
+ byte* target = sequence + kCodeAgeStubEntryOffset;
+ Code* stub = GetCodeFromTargetAddress(Memory::Address_at(target));
+ GetCodeAgeAndParity(stub, age, parity);
+ }
+}
+
+
+void Code::PatchPlatformCodeAge(Isolate* isolate,
+ byte* sequence,
+ Code::Age age,
+ MarkingParity parity) {
+ PatchingAssembler patcher(sequence, kCodeAgeSequenceSize / kInstructionSize);
+ if (age == kNoAgeCodeAge) {
+ MacroAssembler::EmitFrameSetupForCodeAgePatching(&patcher);
+ } else {
+ Code * stub = GetCodeAgeStub(isolate, age, parity);
+ MacroAssembler::EmitCodeAgeSequence(&patcher, stub);
+ }
+}
+
+
+void StringCharLoadGenerator::Generate(MacroAssembler* masm,
+ Register string,
+ Register index,
+ Register result,
+ Label* call_runtime) {
+ // Fetch the instance type of the receiver into result register.
+ __ Ldr(result, FieldMemOperand(string, HeapObject::kMapOffset));
+ __ Ldrb(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
+
+ // We need special handling for indirect strings.
+ Label check_sequential;
+ __ TestAndBranchIfAllClear(result, kIsIndirectStringMask, &check_sequential);
+
+ // Dispatch on the indirect string shape: slice or cons.
+ Label cons_string;
+ __ TestAndBranchIfAllClear(result, kSlicedNotConsMask, &cons_string);
+
+ // Handle slices.
+ Label indirect_string_loaded;
+ __ Ldrsw(result,
+ UntagSmiFieldMemOperand(string, SlicedString::kOffsetOffset));
+ __ Ldr(string, FieldMemOperand(string, SlicedString::kParentOffset));
+ __ Add(index, index, result);
+ __ B(&indirect_string_loaded);
+
+ // Handle cons strings.
+ // Check whether the right hand side is the empty string (i.e. if
+ // this is really a flat string in a cons string). If that is not
+ // the case we would rather go to the runtime system now to flatten
+ // the string.
+ __ Bind(&cons_string);
+ __ Ldr(result, FieldMemOperand(string, ConsString::kSecondOffset));
+ __ JumpIfNotRoot(result, Heap::kempty_stringRootIndex, call_runtime);
+ // Get the first of the two strings and load its instance type.
+ __ Ldr(string, FieldMemOperand(string, ConsString::kFirstOffset));
+
+ __ Bind(&indirect_string_loaded);
+ __ Ldr(result, FieldMemOperand(string, HeapObject::kMapOffset));
+ __ Ldrb(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
+
+ // Distinguish sequential and external strings. Only these two string
+ // representations can reach here (slices and flat cons strings have been
+ // reduced to the underlying sequential or external string).
+ Label external_string, check_encoding;
+ __ Bind(&check_sequential);
+ STATIC_ASSERT(kSeqStringTag == 0);
+ __ TestAndBranchIfAnySet(result, kStringRepresentationMask, &external_string);
+
+ // Prepare sequential strings
+ STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
+ __ Add(string, string, SeqTwoByteString::kHeaderSize - kHeapObjectTag);
+ __ B(&check_encoding);
+
+ // Handle external strings.
+ __ Bind(&external_string);
+ if (FLAG_debug_code) {
+ // Assert that we do not have a cons or slice (indirect strings) here.
+ // Sequential strings have already been ruled out.
+ __ Tst(result, kIsIndirectStringMask);
+ __ Assert(eq, kExternalStringExpectedButNotFound);
+ }
+ // Rule out short external strings.
+ STATIC_CHECK(kShortExternalStringTag != 0);
+ // TestAndBranchIfAnySet can emit Tbnz. Do not use it because call_runtime
+ // can be bound far away in deferred code.
+ __ Tst(result, kShortExternalStringMask);
+ __ B(ne, call_runtime);
+ __ Ldr(string, FieldMemOperand(string, ExternalString::kResourceDataOffset));
+
+ Label ascii, done;
+ __ Bind(&check_encoding);
+ STATIC_ASSERT(kTwoByteStringTag == 0);
+ __ TestAndBranchIfAnySet(result, kStringEncodingMask, &ascii);
+ // Two-byte string.
+ __ Ldrh(result, MemOperand(string, index, LSL, 1));
+ __ B(&done);
+ __ Bind(&ascii);
+ // Ascii string.
+ __ Ldrb(result, MemOperand(string, index));
+ __ Bind(&done);
+}
+
+
+static MemOperand ExpConstant(Register base, int index) {
+ return MemOperand(base, index * kDoubleSize);
+}
+
+
+void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
+ DoubleRegister input,
+ DoubleRegister result,
+ DoubleRegister double_temp1,
+ DoubleRegister double_temp2,
+ Register temp1,
+ Register temp2,
+ Register temp3) {
+ // TODO(jbramley): There are several instances where fnmsub could be used
+ // instead of fmul and fsub. Doing this changes the result, but since this is
+ // an estimation anyway, does it matter?
+
+ ASSERT(!AreAliased(input, result,
+ double_temp1, double_temp2,
+ temp1, temp2, temp3));
+ ASSERT(ExternalReference::math_exp_constants(0).address() != NULL);
+
+ Label done;
+ DoubleRegister double_temp3 = result;
+ Register constants = temp3;
+
+ // The algorithm used relies on some magic constants which are initialized in
+ // ExternalReference::InitializeMathExpData().
+
+ // Load the address of the start of the array.
+ __ Mov(constants, Operand(ExternalReference::math_exp_constants(0)));
+
+ // We have to do a four-way split here:
+ // - If input <= about -708.4, the output always rounds to zero.
+ // - If input >= about 709.8, the output always rounds to +infinity.
+ // - If the input is NaN, the output is NaN.
+ // - Otherwise, the result needs to be calculated.
+ Label result_is_finite_non_zero;
+ // Assert that we can load offset 0 (the small input threshold) and offset 1
+ // (the large input threshold) with a single ldp.
+ ASSERT(kDRegSizeInBytes == (ExpConstant(constants, 1).offset() -
+ ExpConstant(constants, 0).offset()));
+ __ Ldp(double_temp1, double_temp2, ExpConstant(constants, 0));
+
+ __ Fcmp(input, double_temp1);
+ __ Fccmp(input, double_temp2, NoFlag, hi);
+ // At this point, the condition flags can be in one of five states:
+ // NZCV
+ // 1000 -708.4 < input < 709.8 result = exp(input)
+ // 0110 input == 709.8 result = +infinity
+ // 0010 input > 709.8 result = +infinity
+ // 0011 input is NaN result = input
+ // 0000 input <= -708.4 result = +0.0
+
+ // Continue the common case first. 'mi' tests N == 1.
+ __ B(&result_is_finite_non_zero, mi);
+
+ // TODO(jbramley): Add (and use) a zero D register for A64.
+ // TODO(jbramley): Consider adding a +infinity register for A64.
+ __ Ldr(double_temp2, ExpConstant(constants, 2)); // Synthesize +infinity.
+ __ Fsub(double_temp1, double_temp1, double_temp1); // Synthesize +0.0.
+
+ // Select between +0.0 and +infinity. 'lo' tests C == 0.
+ __ Fcsel(result, double_temp1, double_temp2, lo);
+ // Select between {+0.0 or +infinity} and input. 'vc' tests V == 0.
+ __ Fcsel(result, result, input, vc);
+ __ B(&done);
+
+ // The rest is magic, as described in InitializeMathExpData().
+ __ Bind(&result_is_finite_non_zero);
+
+ // Assert that we can load offset 3 and offset 4 with a single ldp.
+ ASSERT(kDRegSizeInBytes == (ExpConstant(constants, 4).offset() -
+ ExpConstant(constants, 3).offset()));
+ __ Ldp(double_temp1, double_temp3, ExpConstant(constants, 3));
+ __ Fmadd(double_temp1, double_temp1, input, double_temp3);
+ __ Fmov(temp2.W(), double_temp1.S());
+ __ Fsub(double_temp1, double_temp1, double_temp3);
+
+ // Assert that we can load offset 5 and offset 6 with a single ldp.
+ ASSERT(kDRegSizeInBytes == (ExpConstant(constants, 6).offset() -
+ ExpConstant(constants, 5).offset()));
+ __ Ldp(double_temp2, double_temp3, ExpConstant(constants, 5));
+ // TODO(jbramley): Consider using Fnmsub here.
+ __ Fmul(double_temp1, double_temp1, double_temp2);
+ __ Fsub(double_temp1, double_temp1, input);
+
+ __ Fmul(double_temp2, double_temp1, double_temp1);
+ __ Fsub(double_temp3, double_temp3, double_temp1);
+ __ Fmul(double_temp3, double_temp3, double_temp2);
+
+ __ Mov(temp1.W(), Operand(temp2.W(), LSR, 11));
+
+ __ Ldr(double_temp2, ExpConstant(constants, 7));
+ // TODO(jbramley): Consider using Fnmsub here.
+ __ Fmul(double_temp3, double_temp3, double_temp2);
+ __ Fsub(double_temp3, double_temp3, double_temp1);
+
+ // The 8th constant is 1.0, so use an immediate move rather than a load.
+ // We can't generate a runtime assertion here as we would need to call Abort
+ // in the runtime and we don't have an Isolate when we generate this code.
+ __ Fmov(double_temp2, 1.0);
+ __ Fadd(double_temp3, double_temp3, double_temp2);
+
+ __ And(temp2, temp2, 0x7ff);
+ __ Add(temp1, temp1, 0x3ff);
+
+ // Do the final table lookup.
+ __ Mov(temp3, Operand(ExternalReference::math_exp_log_table()));
+
+ __ Add(temp3, temp3, Operand(temp2, LSL, kDRegSizeInBytesLog2));
+ __ Ldp(temp2.W(), temp3.W(), MemOperand(temp3));
+ __ Orr(temp1.W(), temp3.W(), Operand(temp1.W(), LSL, 20));
+ __ Bfi(temp2, temp1, 32, 32);
+ __ Fmov(double_temp1, temp2);
+
+ __ Fmul(result, double_temp3, double_temp1);
+
+ __ Bind(&done);
+}
+
+#undef __
+
+} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_A64
--- /dev/null
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_A64_CODEGEN_A64_H_
+#define V8_A64_CODEGEN_A64_H_
+
+#include "ast.h"
+#include "ic-inl.h"
+
+namespace v8 {
+namespace internal {
+
+class StringCharLoadGenerator : public AllStatic {
+ public:
+ // Generates the code for handling different string types and loading the
+ // indexed character into |result|. We expect |index| as untagged input and
+ // |result| as untagged output.
+ static void Generate(MacroAssembler* masm,
+ Register string,
+ Register index,
+ Register result,
+ Label* call_runtime);
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(StringCharLoadGenerator);
+};
+
+
+class MathExpGenerator : public AllStatic {
+ public:
+ static void EmitMathExp(MacroAssembler* masm,
+ DoubleRegister input,
+ DoubleRegister result,
+ DoubleRegister double_scratch1,
+ DoubleRegister double_scratch2,
+ Register temp1,
+ Register temp2,
+ Register temp3);
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(MathExpGenerator);
+};
+
+} } // namespace v8::internal
+
+#endif // V8_A64_CODEGEN_A64_H_
--- /dev/null
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_A64_CONSTANTS_A64_H_
+#define V8_A64_CONSTANTS_A64_H_
+
+
+// Assert that this is an LP64 system.
+STATIC_ASSERT(sizeof(int) == sizeof(int32_t)); // NOLINT(runtime/sizeof)
+STATIC_ASSERT(sizeof(long) == sizeof(int64_t)); // NOLINT(runtime/int)
+STATIC_ASSERT(sizeof(void *) == sizeof(int64_t)); // NOLINT(runtime/sizeof)
+STATIC_ASSERT(sizeof(1) == sizeof(int32_t)); // NOLINT(runtime/sizeof)
+STATIC_ASSERT(sizeof(1L) == sizeof(int64_t)); // NOLINT(runtime/sizeof)
+
+
+// Get the standard printf format macros for C99 stdint types.
+#define __STDC_FORMAT_MACROS
+#include <inttypes.h>
+
+
+namespace v8 {
+namespace internal {
+
+
+const unsigned kInstructionSize = 4;
+const unsigned kInstructionSizeLog2 = 2;
+const unsigned kLiteralEntrySize = 4;
+const unsigned kLiteralEntrySizeLog2 = 2;
+const unsigned kMaxLoadLiteralRange = 1 * MB;
+
+const unsigned kNumberOfRegisters = 32;
+const unsigned kNumberOfFPRegisters = 32;
+// Callee saved registers are x19-x30(lr).
+const int kNumberOfCalleeSavedRegisters = 11;
+const int kFirstCalleeSavedRegisterIndex = 19;
+// Callee saved FP registers are d8-d15.
+const int kNumberOfCalleeSavedFPRegisters = 8;
+const int kFirstCalleeSavedFPRegisterIndex = 8;
+// Callee saved registers with no specific purpose in JS are x19-x25.
+const unsigned kJSCalleeSavedRegList = 0x03f80000;
+// TODO(all): k<Y>RegSize should probably be k<Y>RegSizeInBits.
+const unsigned kWRegSize = 32;
+const unsigned kWRegSizeLog2 = 5;
+const unsigned kWRegSizeInBytes = kWRegSize >> 3;
+const unsigned kWRegSizeInBytesLog2 = kWRegSizeLog2 - 3;
+const unsigned kXRegSize = 64;
+const unsigned kXRegSizeLog2 = 6;
+const unsigned kXRegSizeInBytes = kXRegSize >> 3;
+const unsigned kXRegSizeInBytesLog2 = kXRegSizeLog2 - 3;
+const unsigned kSRegSize = 32;
+const unsigned kSRegSizeLog2 = 5;
+const unsigned kSRegSizeInBytes = kSRegSize >> 3;
+const unsigned kSRegSizeInBytesLog2 = kSRegSizeLog2 - 3;
+const unsigned kDRegSize = 64;
+const unsigned kDRegSizeLog2 = 6;
+const unsigned kDRegSizeInBytes = kDRegSize >> 3;
+const unsigned kDRegSizeInBytesLog2 = kDRegSizeLog2 - 3;
+const int64_t kWRegMask = 0x00000000ffffffffL;
+const int64_t kXRegMask = 0xffffffffffffffffL;
+const int64_t kSRegMask = 0x00000000ffffffffL;
+const int64_t kDRegMask = 0xffffffffffffffffL;
+// TODO(all) check if the expression below works on all compilers or if it
+// triggers an overflow error.
+const int64_t kDSignMask = 0x1L << 63;
+const int64_t kDSignBit = 63;
+const int64_t kXSignMask = 0x1L << 63;
+const int64_t kXSignBit = 63;
+const int64_t kWSignMask = 0x1L << 31;
+const int64_t kWSignBit = 31;
+const int64_t kByteMask = 0xffL;
+const int64_t kHalfWordMask = 0xffffL;
+const int64_t kWordMask = 0xffffffffL;
+const uint64_t kXMaxUInt = 0xffffffffffffffffUL;
+const uint64_t kWMaxUInt = 0xffffffffUL;
+const int64_t kXMaxInt = 0x7fffffffffffffffL;
+const int64_t kXMinInt = 0x8000000000000000L;
+const int32_t kWMaxInt = 0x7fffffff;
+const int32_t kWMinInt = 0x80000000;
+const unsigned kFramePointerRegCode = 29;
+const unsigned kLinkRegCode = 30;
+const unsigned kZeroRegCode = 31;
+const unsigned kJSSPCode = 28;
+const unsigned kSPRegInternalCode = 63;
+const unsigned kRegCodeMask = 0x1f;
+// Standard machine types defined by AAPCS64.
+const unsigned kByteSize = 8;
+const unsigned kByteSizeInBytes = kByteSize >> 3;
+const unsigned kHalfWordSize = 16;
+const unsigned kHalfWordSizeLog2 = 4;
+const unsigned kHalfWordSizeInBytes = kHalfWordSize >> 3;
+const unsigned kHalfWordSizeInBytesLog2 = kHalfWordSizeLog2 - 3;
+const unsigned kWordSize = 32;
+const unsigned kWordSizeLog2 = 5;
+const unsigned kWordSizeInBytes = kWordSize >> 3;
+const unsigned kWordSizeInBytesLog2 = kWordSizeLog2 - 3;
+const unsigned kDoubleWordSize = 64;
+const unsigned kDoubleWordSizeInBytes = kDoubleWordSize >> 3;
+const unsigned kQuadWordSize = 128;
+const unsigned kQuadWordSizeInBytes = kQuadWordSize >> 3;
+// AArch64 floating-point specifics. These match IEEE-754.
+const unsigned kDoubleMantissaBits = 52;
+const unsigned kDoubleExponentBits = 11;
+const unsigned kFloatMantissaBits = 23;
+const unsigned kFloatExponentBits = 8;
+
+#define REGISTER_CODE_LIST(R) \
+R(0) R(1) R(2) R(3) R(4) R(5) R(6) R(7) \
+R(8) R(9) R(10) R(11) R(12) R(13) R(14) R(15) \
+R(16) R(17) R(18) R(19) R(20) R(21) R(22) R(23) \
+R(24) R(25) R(26) R(27) R(28) R(29) R(30) R(31)
+
+#define INSTRUCTION_FIELDS_LIST(V_) \
+/* Register fields */ \
+V_(Rd, 4, 0, Bits) /* Destination register. */ \
+V_(Rn, 9, 5, Bits) /* First source register. */ \
+V_(Rm, 20, 16, Bits) /* Second source register. */ \
+V_(Ra, 14, 10, Bits) /* Third source register. */ \
+V_(Rt, 4, 0, Bits) /* Load dest / store source. */ \
+V_(Rt2, 14, 10, Bits) /* Load second dest / */ \
+ /* store second source. */ \
+V_(PrefetchMode, 4, 0, Bits) \
+ \
+/* Common bits */ \
+V_(SixtyFourBits, 31, 31, Bits) \
+V_(FlagsUpdate, 29, 29, Bits) \
+ \
+/* PC relative addressing */ \
+V_(ImmPCRelHi, 23, 5, SignedBits) \
+V_(ImmPCRelLo, 30, 29, Bits) \
+ \
+/* Add/subtract/logical shift register */ \
+V_(ShiftDP, 23, 22, Bits) \
+V_(ImmDPShift, 15, 10, Bits) \
+ \
+/* Add/subtract immediate */ \
+V_(ImmAddSub, 21, 10, Bits) \
+V_(ShiftAddSub, 23, 22, Bits) \
+ \
+/* Add/substract extend */ \
+V_(ImmExtendShift, 12, 10, Bits) \
+V_(ExtendMode, 15, 13, Bits) \
+ \
+/* Move wide */ \
+V_(ImmMoveWide, 20, 5, Bits) \
+V_(ShiftMoveWide, 22, 21, Bits) \
+ \
+/* Logical immediate, bitfield and extract */ \
+V_(BitN, 22, 22, Bits) \
+V_(ImmRotate, 21, 16, Bits) \
+V_(ImmSetBits, 15, 10, Bits) \
+V_(ImmR, 21, 16, Bits) \
+V_(ImmS, 15, 10, Bits) \
+ \
+/* Test and branch immediate */ \
+V_(ImmTestBranch, 18, 5, SignedBits) \
+V_(ImmTestBranchBit40, 23, 19, Bits) \
+V_(ImmTestBranchBit5, 31, 31, Bits) \
+ \
+/* Conditionals */ \
+V_(Condition, 15, 12, Bits) \
+V_(ConditionBranch, 3, 0, Bits) \
+V_(Nzcv, 3, 0, Bits) \
+V_(ImmCondCmp, 20, 16, Bits) \
+V_(ImmCondBranch, 23, 5, SignedBits) \
+ \
+/* Floating point */ \
+V_(FPType, 23, 22, Bits) \
+V_(ImmFP, 20, 13, Bits) \
+V_(FPScale, 15, 10, Bits) \
+ \
+/* Load Store */ \
+V_(ImmLS, 20, 12, SignedBits) \
+V_(ImmLSUnsigned, 21, 10, Bits) \
+V_(ImmLSPair, 21, 15, SignedBits) \
+V_(SizeLS, 31, 30, Bits) \
+V_(ImmShiftLS, 12, 12, Bits) \
+ \
+/* Other immediates */ \
+V_(ImmUncondBranch, 25, 0, SignedBits) \
+V_(ImmCmpBranch, 23, 5, SignedBits) \
+V_(ImmLLiteral, 23, 5, SignedBits) \
+V_(ImmException, 20, 5, Bits) \
+V_(ImmHint, 11, 5, Bits) \
+V_(ImmBarrierDomain, 11, 10, Bits) \
+V_(ImmBarrierType, 9, 8, Bits) \
+ \
+/* System (MRS, MSR) */ \
+V_(ImmSystemRegister, 19, 5, Bits) \
+V_(SysO0, 19, 19, Bits) \
+V_(SysOp1, 18, 16, Bits) \
+V_(SysOp2, 7, 5, Bits) \
+V_(CRn, 15, 12, Bits) \
+V_(CRm, 11, 8, Bits) \
+
+
+#define SYSTEM_REGISTER_FIELDS_LIST(V_, M_) \
+/* NZCV */ \
+V_(Flags, 31, 28, Bits) \
+V_(N, 31, 31, Bits) \
+V_(Z, 30, 30, Bits) \
+V_(C, 29, 29, Bits) \
+V_(V, 28, 28, Bits) \
+M_(NZCV, Flags_mask) \
+ \
+/* FPCR */ \
+V_(AHP, 26, 26, Bits) \
+V_(DN, 25, 25, Bits) \
+V_(FZ, 24, 24, Bits) \
+V_(RMode, 23, 22, Bits) \
+M_(FPCR, AHP_mask | DN_mask | FZ_mask | RMode_mask)
+
+
+// Fields offsets.
+#define DECLARE_FIELDS_OFFSETS(Name, HighBit, LowBit, X) \
+const int Name##_offset = LowBit; \
+const int Name##_width = HighBit - LowBit + 1; \
+const uint32_t Name##_mask = ((1 << Name##_width) - 1) << LowBit;
+#define NOTHING(A, B)
+INSTRUCTION_FIELDS_LIST(DECLARE_FIELDS_OFFSETS)
+SYSTEM_REGISTER_FIELDS_LIST(DECLARE_FIELDS_OFFSETS, NOTHING)
+#undef NOTHING
+#undef DECLARE_FIELDS_BITS
+
+// ImmPCRel is a compound field (not present in INSTRUCTION_FIELDS_LIST), formed
+// from ImmPCRelLo and ImmPCRelHi.
+const int ImmPCRel_mask = ImmPCRelLo_mask | ImmPCRelHi_mask;
+
+// Condition codes.
+enum Condition {
+ eq = 0,
+ ne = 1,
+ hs = 2,
+ lo = 3,
+ mi = 4,
+ pl = 5,
+ vs = 6,
+ vc = 7,
+ hi = 8,
+ ls = 9,
+ ge = 10,
+ lt = 11,
+ gt = 12,
+ le = 13,
+ al = 14,
+ nv = 15 // Behaves as always/al.
+};
+
+inline Condition InvertCondition(Condition cond) {
+ // Conditions al and nv behave identically, as "always true". They can't be
+ // inverted, because there is no never condition.
+ ASSERT((cond != al) && (cond != nv));
+ return static_cast<Condition>(cond ^ 1);
+}
+
+// Corresponds to transposing the operands of a comparison.
+inline Condition ReverseConditionForCmp(Condition cond) {
+ switch (cond) {
+ case lo:
+ return hi;
+ case hi:
+ return lo;
+ case hs:
+ return ls;
+ case ls:
+ return hs;
+ case lt:
+ return gt;
+ case gt:
+ return lt;
+ case ge:
+ return le;
+ case le:
+ return ge;
+ case eq:
+ return eq;
+ default:
+ // In practice this function is only used with a condition coming from
+ // TokenToCondition in lithium-codegen-a64.cc. Any other condition is
+ // invalid as it doesn't necessary make sense to reverse it (consider
+ // 'mi' for instance).
+ UNREACHABLE();
+ return nv;
+ };
+}
+
+enum FlagsUpdate {
+ SetFlags = 1,
+ LeaveFlags = 0
+};
+
+enum StatusFlags {
+ NoFlag = 0,
+
+ // Derive the flag combinations from the system register bit descriptions.
+ NFlag = N_mask,
+ ZFlag = Z_mask,
+ CFlag = C_mask,
+ VFlag = V_mask,
+ NZFlag = NFlag | ZFlag,
+ NCFlag = NFlag | CFlag,
+ NVFlag = NFlag | VFlag,
+ ZCFlag = ZFlag | CFlag,
+ ZVFlag = ZFlag | VFlag,
+ CVFlag = CFlag | VFlag,
+ NZCFlag = NFlag | ZFlag | CFlag,
+ NZVFlag = NFlag | ZFlag | VFlag,
+ NCVFlag = NFlag | CFlag | VFlag,
+ ZCVFlag = ZFlag | CFlag | VFlag,
+ NZCVFlag = NFlag | ZFlag | CFlag | VFlag,
+
+ // Floating-point comparison results.
+ FPEqualFlag = ZCFlag,
+ FPLessThanFlag = NFlag,
+ FPGreaterThanFlag = CFlag,
+ FPUnorderedFlag = CVFlag
+};
+
+enum Shift {
+ NO_SHIFT = -1,
+ LSL = 0x0,
+ LSR = 0x1,
+ ASR = 0x2,
+ ROR = 0x3
+};
+
+enum Extend {
+ NO_EXTEND = -1,
+ UXTB = 0,
+ UXTH = 1,
+ UXTW = 2,
+ UXTX = 3,
+ SXTB = 4,
+ SXTH = 5,
+ SXTW = 6,
+ SXTX = 7
+};
+
+enum SystemHint {
+ NOP = 0,
+ YIELD = 1,
+ WFE = 2,
+ WFI = 3,
+ SEV = 4,
+ SEVL = 5
+};
+
+enum BarrierDomain {
+ OuterShareable = 0,
+ NonShareable = 1,
+ InnerShareable = 2,
+ FullSystem = 3
+};
+
+enum BarrierType {
+ BarrierOther = 0,
+ BarrierReads = 1,
+ BarrierWrites = 2,
+ BarrierAll = 3
+};
+
+// System/special register names.
+// This information is not encoded as one field but as the concatenation of
+// multiple fields (Op0<0>, Op1, Crn, Crm, Op2).
+enum SystemRegister {
+ NZCV = ((0x1 << SysO0_offset) |
+ (0x3 << SysOp1_offset) |
+ (0x4 << CRn_offset) |
+ (0x2 << CRm_offset) |
+ (0x0 << SysOp2_offset)) >> ImmSystemRegister_offset,
+ FPCR = ((0x1 << SysO0_offset) |
+ (0x3 << SysOp1_offset) |
+ (0x4 << CRn_offset) |
+ (0x4 << CRm_offset) |
+ (0x0 << SysOp2_offset)) >> ImmSystemRegister_offset
+};
+
+// Instruction enumerations.
+//
+// These are the masks that define a class of instructions, and the list of
+// instructions within each class. Each enumeration has a Fixed, FMask and
+// Mask value.
+//
+// Fixed: The fixed bits in this instruction class.
+// FMask: The mask used to extract the fixed bits in the class.
+// Mask: The mask used to identify the instructions within a class.
+//
+// The enumerations can be used like this:
+//
+// ASSERT(instr->Mask(PCRelAddressingFMask) == PCRelAddressingFixed);
+// switch(instr->Mask(PCRelAddressingMask)) {
+// case ADR: Format("adr 'Xd, 'AddrPCRelByte"); break;
+// case ADRP: Format("adrp 'Xd, 'AddrPCRelPage"); break;
+// default: printf("Unknown instruction\n");
+// }
+
+
+// Generic fields.
+enum GenericInstrField {
+ SixtyFourBits = 0x80000000,
+ ThirtyTwoBits = 0x00000000,
+ FP32 = 0x00000000,
+ FP64 = 0x00400000
+};
+
+// PC relative addressing.
+enum PCRelAddressingOp {
+ PCRelAddressingFixed = 0x10000000,
+ PCRelAddressingFMask = 0x1F000000,
+ PCRelAddressingMask = 0x9F000000,
+ ADR = PCRelAddressingFixed | 0x00000000,
+ ADRP = PCRelAddressingFixed | 0x80000000
+};
+
+// Add/sub (immediate, shifted and extended.)
+const int kSFOffset = 31;
+enum AddSubOp {
+ AddSubOpMask = 0x60000000,
+ AddSubSetFlagsBit = 0x20000000,
+ ADD = 0x00000000,
+ ADDS = ADD | AddSubSetFlagsBit,
+ SUB = 0x40000000,
+ SUBS = SUB | AddSubSetFlagsBit
+};
+
+#define ADD_SUB_OP_LIST(V) \
+ V(ADD), \
+ V(ADDS), \
+ V(SUB), \
+ V(SUBS)
+
+enum AddSubImmediateOp {
+ AddSubImmediateFixed = 0x11000000,
+ AddSubImmediateFMask = 0x1F000000,
+ AddSubImmediateMask = 0xFF000000,
+ #define ADD_SUB_IMMEDIATE(A) \
+ A##_w_imm = AddSubImmediateFixed | A, \
+ A##_x_imm = AddSubImmediateFixed | A | SixtyFourBits
+ ADD_SUB_OP_LIST(ADD_SUB_IMMEDIATE)
+ #undef ADD_SUB_IMMEDIATE
+};
+
+enum AddSubShiftedOp {
+ AddSubShiftedFixed = 0x0B000000,
+ AddSubShiftedFMask = 0x1F200000,
+ AddSubShiftedMask = 0xFF200000,
+ #define ADD_SUB_SHIFTED(A) \
+ A##_w_shift = AddSubShiftedFixed | A, \
+ A##_x_shift = AddSubShiftedFixed | A | SixtyFourBits
+ ADD_SUB_OP_LIST(ADD_SUB_SHIFTED)
+ #undef ADD_SUB_SHIFTED
+};
+
+enum AddSubExtendedOp {
+ AddSubExtendedFixed = 0x0B200000,
+ AddSubExtendedFMask = 0x1F200000,
+ AddSubExtendedMask = 0xFFE00000,
+ #define ADD_SUB_EXTENDED(A) \
+ A##_w_ext = AddSubExtendedFixed | A, \
+ A##_x_ext = AddSubExtendedFixed | A | SixtyFourBits
+ ADD_SUB_OP_LIST(ADD_SUB_EXTENDED)
+ #undef ADD_SUB_EXTENDED
+};
+
+// Add/sub with carry.
+enum AddSubWithCarryOp {
+ AddSubWithCarryFixed = 0x1A000000,
+ AddSubWithCarryFMask = 0x1FE00000,
+ AddSubWithCarryMask = 0xFFE0FC00,
+ ADC_w = AddSubWithCarryFixed | ADD,
+ ADC_x = AddSubWithCarryFixed | ADD | SixtyFourBits,
+ ADC = ADC_w,
+ ADCS_w = AddSubWithCarryFixed | ADDS,
+ ADCS_x = AddSubWithCarryFixed | ADDS | SixtyFourBits,
+ SBC_w = AddSubWithCarryFixed | SUB,
+ SBC_x = AddSubWithCarryFixed | SUB | SixtyFourBits,
+ SBC = SBC_w,
+ SBCS_w = AddSubWithCarryFixed | SUBS,
+ SBCS_x = AddSubWithCarryFixed | SUBS | SixtyFourBits
+};
+
+
+// Logical (immediate and shifted register).
+enum LogicalOp {
+ LogicalOpMask = 0x60200000,
+ NOT = 0x00200000,
+ AND = 0x00000000,
+ BIC = AND | NOT,
+ ORR = 0x20000000,
+ ORN = ORR | NOT,
+ EOR = 0x40000000,
+ EON = EOR | NOT,
+ ANDS = 0x60000000,
+ BICS = ANDS | NOT
+};
+
+// Logical immediate.
+enum LogicalImmediateOp {
+ LogicalImmediateFixed = 0x12000000,
+ LogicalImmediateFMask = 0x1F800000,
+ LogicalImmediateMask = 0xFF800000,
+ AND_w_imm = LogicalImmediateFixed | AND,
+ AND_x_imm = LogicalImmediateFixed | AND | SixtyFourBits,
+ ORR_w_imm = LogicalImmediateFixed | ORR,
+ ORR_x_imm = LogicalImmediateFixed | ORR | SixtyFourBits,
+ EOR_w_imm = LogicalImmediateFixed | EOR,
+ EOR_x_imm = LogicalImmediateFixed | EOR | SixtyFourBits,
+ ANDS_w_imm = LogicalImmediateFixed | ANDS,
+ ANDS_x_imm = LogicalImmediateFixed | ANDS | SixtyFourBits
+};
+
+// Logical shifted register.
+enum LogicalShiftedOp {
+ LogicalShiftedFixed = 0x0A000000,
+ LogicalShiftedFMask = 0x1F000000,
+ LogicalShiftedMask = 0xFF200000,
+ AND_w = LogicalShiftedFixed | AND,
+ AND_x = LogicalShiftedFixed | AND | SixtyFourBits,
+ AND_shift = AND_w,
+ BIC_w = LogicalShiftedFixed | BIC,
+ BIC_x = LogicalShiftedFixed | BIC | SixtyFourBits,
+ BIC_shift = BIC_w,
+ ORR_w = LogicalShiftedFixed | ORR,
+ ORR_x = LogicalShiftedFixed | ORR | SixtyFourBits,
+ ORR_shift = ORR_w,
+ ORN_w = LogicalShiftedFixed | ORN,
+ ORN_x = LogicalShiftedFixed | ORN | SixtyFourBits,
+ ORN_shift = ORN_w,
+ EOR_w = LogicalShiftedFixed | EOR,
+ EOR_x = LogicalShiftedFixed | EOR | SixtyFourBits,
+ EOR_shift = EOR_w,
+ EON_w = LogicalShiftedFixed | EON,
+ EON_x = LogicalShiftedFixed | EON | SixtyFourBits,
+ EON_shift = EON_w,
+ ANDS_w = LogicalShiftedFixed | ANDS,
+ ANDS_x = LogicalShiftedFixed | ANDS | SixtyFourBits,
+ ANDS_shift = ANDS_w,
+ BICS_w = LogicalShiftedFixed | BICS,
+ BICS_x = LogicalShiftedFixed | BICS | SixtyFourBits,
+ BICS_shift = BICS_w
+};
+
+// Move wide immediate.
+enum MoveWideImmediateOp {
+ MoveWideImmediateFixed = 0x12800000,
+ MoveWideImmediateFMask = 0x1F800000,
+ MoveWideImmediateMask = 0xFF800000,
+ MOVN = 0x00000000,
+ MOVZ = 0x40000000,
+ MOVK = 0x60000000,
+ MOVN_w = MoveWideImmediateFixed | MOVN,
+ MOVN_x = MoveWideImmediateFixed | MOVN | SixtyFourBits,
+ MOVZ_w = MoveWideImmediateFixed | MOVZ,
+ MOVZ_x = MoveWideImmediateFixed | MOVZ | SixtyFourBits,
+ MOVK_w = MoveWideImmediateFixed | MOVK,
+ MOVK_x = MoveWideImmediateFixed | MOVK | SixtyFourBits
+};
+
+// Bitfield.
+const int kBitfieldNOffset = 22;
+enum BitfieldOp {
+ BitfieldFixed = 0x13000000,
+ BitfieldFMask = 0x1F800000,
+ BitfieldMask = 0xFF800000,
+ SBFM_w = BitfieldFixed | 0x00000000,
+ SBFM_x = BitfieldFixed | 0x80000000,
+ SBFM = SBFM_w,
+ BFM_w = BitfieldFixed | 0x20000000,
+ BFM_x = BitfieldFixed | 0xA0000000,
+ BFM = BFM_w,
+ UBFM_w = BitfieldFixed | 0x40000000,
+ UBFM_x = BitfieldFixed | 0xC0000000,
+ UBFM = UBFM_w
+ // Bitfield N field.
+};
+
+// Extract.
+enum ExtractOp {
+ ExtractFixed = 0x13800000,
+ ExtractFMask = 0x1F800000,
+ ExtractMask = 0xFFA00000,
+ EXTR_w = ExtractFixed | 0x00000000,
+ EXTR_x = ExtractFixed | 0x80000000,
+ EXTR = EXTR_w
+};
+
+// Unconditional branch.
+enum UnconditionalBranchOp {
+ UnconditionalBranchFixed = 0x14000000,
+ UnconditionalBranchFMask = 0x7C000000,
+ UnconditionalBranchMask = 0xFC000000,
+ B = UnconditionalBranchFixed | 0x00000000,
+ BL = UnconditionalBranchFixed | 0x80000000
+};
+
+// Unconditional branch to register.
+enum UnconditionalBranchToRegisterOp {
+ UnconditionalBranchToRegisterFixed = 0xD6000000,
+ UnconditionalBranchToRegisterFMask = 0xFE000000,
+ UnconditionalBranchToRegisterMask = 0xFFFFFC1F,
+ BR = UnconditionalBranchToRegisterFixed | 0x001F0000,
+ BLR = UnconditionalBranchToRegisterFixed | 0x003F0000,
+ RET = UnconditionalBranchToRegisterFixed | 0x005F0000
+};
+
+// Compare and branch.
+enum CompareBranchOp {
+ CompareBranchFixed = 0x34000000,
+ CompareBranchFMask = 0x7E000000,
+ CompareBranchMask = 0xFF000000,
+ CBZ_w = CompareBranchFixed | 0x00000000,
+ CBZ_x = CompareBranchFixed | 0x80000000,
+ CBZ = CBZ_w,
+ CBNZ_w = CompareBranchFixed | 0x01000000,
+ CBNZ_x = CompareBranchFixed | 0x81000000,
+ CBNZ = CBNZ_w
+};
+
+// Test and branch.
+enum TestBranchOp {
+ TestBranchFixed = 0x36000000,
+ TestBranchFMask = 0x7E000000,
+ TestBranchMask = 0x7F000000,
+ TBZ = TestBranchFixed | 0x00000000,
+ TBNZ = TestBranchFixed | 0x01000000
+};
+
+// Conditional branch.
+enum ConditionalBranchOp {
+ ConditionalBranchFixed = 0x54000000,
+ ConditionalBranchFMask = 0xFE000000,
+ ConditionalBranchMask = 0xFF000010,
+ B_cond = ConditionalBranchFixed | 0x00000000
+};
+
+// System.
+// System instruction encoding is complicated because some instructions use op
+// and CR fields to encode parameters. To handle this cleanly, the system
+// instructions are split into more than one enum.
+
+enum SystemOp {
+ SystemFixed = 0xD5000000,
+ SystemFMask = 0xFFC00000
+};
+
+enum SystemSysRegOp {
+ SystemSysRegFixed = 0xD5100000,
+ SystemSysRegFMask = 0xFFD00000,
+ SystemSysRegMask = 0xFFF00000,
+ MRS = SystemSysRegFixed | 0x00200000,
+ MSR = SystemSysRegFixed | 0x00000000
+};
+
+enum SystemHintOp {
+ SystemHintFixed = 0xD503201F,
+ SystemHintFMask = 0xFFFFF01F,
+ SystemHintMask = 0xFFFFF01F,
+ HINT = SystemHintFixed | 0x00000000
+};
+
+// Exception.
+enum ExceptionOp {
+ ExceptionFixed = 0xD4000000,
+ ExceptionFMask = 0xFF000000,
+ ExceptionMask = 0xFFE0001F,
+ HLT = ExceptionFixed | 0x00400000,
+ BRK = ExceptionFixed | 0x00200000,
+ SVC = ExceptionFixed | 0x00000001,
+ HVC = ExceptionFixed | 0x00000002,
+ SMC = ExceptionFixed | 0x00000003,
+ DCPS1 = ExceptionFixed | 0x00A00001,
+ DCPS2 = ExceptionFixed | 0x00A00002,
+ DCPS3 = ExceptionFixed | 0x00A00003
+};
+// Code used to spot hlt instructions that should not be hit.
+const int kHltBadCode = 0xbad;
+
+enum MemBarrierOp {
+ MemBarrierFixed = 0xD503309F,
+ MemBarrierFMask = 0xFFFFF09F,
+ MemBarrierMask = 0xFFFFF0FF,
+ DSB = MemBarrierFixed | 0x00000000,
+ DMB = MemBarrierFixed | 0x00000020,
+ ISB = MemBarrierFixed | 0x00000040
+};
+
+// Any load or store (including pair).
+enum LoadStoreAnyOp {
+ LoadStoreAnyFMask = 0x0a000000,
+ LoadStoreAnyFixed = 0x08000000
+};
+
+// Any load pair or store pair.
+enum LoadStorePairAnyOp {
+ LoadStorePairAnyFMask = 0x3a000000,
+ LoadStorePairAnyFixed = 0x28000000
+};
+
+#define LOAD_STORE_PAIR_OP_LIST(V) \
+ V(STP, w, 0x00000000), \
+ V(LDP, w, 0x00400000), \
+ V(LDPSW, x, 0x40400000), \
+ V(STP, x, 0x80000000), \
+ V(LDP, x, 0x80400000), \
+ V(STP, s, 0x04000000), \
+ V(LDP, s, 0x04400000), \
+ V(STP, d, 0x44000000), \
+ V(LDP, d, 0x44400000)
+
+// Load/store pair (post, pre and offset.)
+enum LoadStorePairOp {
+ LoadStorePairMask = 0xC4400000,
+ LoadStorePairLBit = 1 << 22,
+ #define LOAD_STORE_PAIR(A, B, C) \
+ A##_##B = C
+ LOAD_STORE_PAIR_OP_LIST(LOAD_STORE_PAIR)
+ #undef LOAD_STORE_PAIR
+};
+
+enum LoadStorePairPostIndexOp {
+ LoadStorePairPostIndexFixed = 0x28800000,
+ LoadStorePairPostIndexFMask = 0x3B800000,
+ LoadStorePairPostIndexMask = 0xFFC00000,
+ #define LOAD_STORE_PAIR_POST_INDEX(A, B, C) \
+ A##_##B##_post = LoadStorePairPostIndexFixed | A##_##B
+ LOAD_STORE_PAIR_OP_LIST(LOAD_STORE_PAIR_POST_INDEX)
+ #undef LOAD_STORE_PAIR_POST_INDEX
+};
+
+enum LoadStorePairPreIndexOp {
+ LoadStorePairPreIndexFixed = 0x29800000,
+ LoadStorePairPreIndexFMask = 0x3B800000,
+ LoadStorePairPreIndexMask = 0xFFC00000,
+ #define LOAD_STORE_PAIR_PRE_INDEX(A, B, C) \
+ A##_##B##_pre = LoadStorePairPreIndexFixed | A##_##B
+ LOAD_STORE_PAIR_OP_LIST(LOAD_STORE_PAIR_PRE_INDEX)
+ #undef LOAD_STORE_PAIR_PRE_INDEX
+};
+
+enum LoadStorePairOffsetOp {
+ LoadStorePairOffsetFixed = 0x29000000,
+ LoadStorePairOffsetFMask = 0x3B800000,
+ LoadStorePairOffsetMask = 0xFFC00000,
+ #define LOAD_STORE_PAIR_OFFSET(A, B, C) \
+ A##_##B##_off = LoadStorePairOffsetFixed | A##_##B
+ LOAD_STORE_PAIR_OP_LIST(LOAD_STORE_PAIR_OFFSET)
+ #undef LOAD_STORE_PAIR_OFFSET
+};
+
+enum LoadStorePairNonTemporalOp {
+ LoadStorePairNonTemporalFixed = 0x28000000,
+ LoadStorePairNonTemporalFMask = 0x3B800000,
+ LoadStorePairNonTemporalMask = 0xFFC00000,
+ STNP_w = LoadStorePairNonTemporalFixed | STP_w,
+ LDNP_w = LoadStorePairNonTemporalFixed | LDP_w,
+ STNP_x = LoadStorePairNonTemporalFixed | STP_x,
+ LDNP_x = LoadStorePairNonTemporalFixed | LDP_x,
+ STNP_s = LoadStorePairNonTemporalFixed | STP_s,
+ LDNP_s = LoadStorePairNonTemporalFixed | LDP_s,
+ STNP_d = LoadStorePairNonTemporalFixed | STP_d,
+ LDNP_d = LoadStorePairNonTemporalFixed | LDP_d
+};
+
+// Load literal.
+enum LoadLiteralOp {
+ LoadLiteralFixed = 0x18000000,
+ LoadLiteralFMask = 0x3B000000,
+ LoadLiteralMask = 0xFF000000,
+ LDR_w_lit = LoadLiteralFixed | 0x00000000,
+ LDR_x_lit = LoadLiteralFixed | 0x40000000,
+ LDRSW_x_lit = LoadLiteralFixed | 0x80000000,
+ PRFM_lit = LoadLiteralFixed | 0xC0000000,
+ LDR_s_lit = LoadLiteralFixed | 0x04000000,
+ LDR_d_lit = LoadLiteralFixed | 0x44000000
+};
+
+#define LOAD_STORE_OP_LIST(V) \
+ V(ST, RB, w, 0x00000000), \
+ V(ST, RH, w, 0x40000000), \
+ V(ST, R, w, 0x80000000), \
+ V(ST, R, x, 0xC0000000), \
+ V(LD, RB, w, 0x00400000), \
+ V(LD, RH, w, 0x40400000), \
+ V(LD, R, w, 0x80400000), \
+ V(LD, R, x, 0xC0400000), \
+ V(LD, RSB, x, 0x00800000), \
+ V(LD, RSH, x, 0x40800000), \
+ V(LD, RSW, x, 0x80800000), \
+ V(LD, RSB, w, 0x00C00000), \
+ V(LD, RSH, w, 0x40C00000), \
+ V(ST, R, s, 0x84000000), \
+ V(ST, R, d, 0xC4000000), \
+ V(LD, R, s, 0x84400000), \
+ V(LD, R, d, 0xC4400000)
+
+
+// Load/store unscaled offset.
+enum LoadStoreUnscaledOffsetOp {
+ LoadStoreUnscaledOffsetFixed = 0x38000000,
+ LoadStoreUnscaledOffsetFMask = 0x3B200C00,
+ LoadStoreUnscaledOffsetMask = 0xFFE00C00,
+ #define LOAD_STORE_UNSCALED(A, B, C, D) \
+ A##U##B##_##C = LoadStoreUnscaledOffsetFixed | D
+ LOAD_STORE_OP_LIST(LOAD_STORE_UNSCALED)
+ #undef LOAD_STORE_UNSCALED
+};
+
+// Load/store (post, pre, offset and unsigned.)
+enum LoadStoreOp {
+ LoadStoreOpMask = 0xC4C00000,
+ #define LOAD_STORE(A, B, C, D) \
+ A##B##_##C = D
+ LOAD_STORE_OP_LIST(LOAD_STORE),
+ #undef LOAD_STORE
+ PRFM = 0xC0800000
+};
+
+// Load/store post index.
+enum LoadStorePostIndex {
+ LoadStorePostIndexFixed = 0x38000400,
+ LoadStorePostIndexFMask = 0x3B200C00,
+ LoadStorePostIndexMask = 0xFFE00C00,
+ #define LOAD_STORE_POST_INDEX(A, B, C, D) \
+ A##B##_##C##_post = LoadStorePostIndexFixed | D
+ LOAD_STORE_OP_LIST(LOAD_STORE_POST_INDEX)
+ #undef LOAD_STORE_POST_INDEX
+};
+
+// Load/store pre index.
+enum LoadStorePreIndex {
+ LoadStorePreIndexFixed = 0x38000C00,
+ LoadStorePreIndexFMask = 0x3B200C00,
+ LoadStorePreIndexMask = 0xFFE00C00,
+ #define LOAD_STORE_PRE_INDEX(A, B, C, D) \
+ A##B##_##C##_pre = LoadStorePreIndexFixed | D
+ LOAD_STORE_OP_LIST(LOAD_STORE_PRE_INDEX)
+ #undef LOAD_STORE_PRE_INDEX
+};
+
+// Load/store unsigned offset.
+enum LoadStoreUnsignedOffset {
+ LoadStoreUnsignedOffsetFixed = 0x39000000,
+ LoadStoreUnsignedOffsetFMask = 0x3B000000,
+ LoadStoreUnsignedOffsetMask = 0xFFC00000,
+ PRFM_unsigned = LoadStoreUnsignedOffsetFixed | PRFM,
+ #define LOAD_STORE_UNSIGNED_OFFSET(A, B, C, D) \
+ A##B##_##C##_unsigned = LoadStoreUnsignedOffsetFixed | D
+ LOAD_STORE_OP_LIST(LOAD_STORE_UNSIGNED_OFFSET)
+ #undef LOAD_STORE_UNSIGNED_OFFSET
+};
+
+// Load/store register offset.
+enum LoadStoreRegisterOffset {
+ LoadStoreRegisterOffsetFixed = 0x38200800,
+ LoadStoreRegisterOffsetFMask = 0x3B200C00,
+ LoadStoreRegisterOffsetMask = 0xFFE00C00,
+ PRFM_reg = LoadStoreRegisterOffsetFixed | PRFM,
+ #define LOAD_STORE_REGISTER_OFFSET(A, B, C, D) \
+ A##B##_##C##_reg = LoadStoreRegisterOffsetFixed | D
+ LOAD_STORE_OP_LIST(LOAD_STORE_REGISTER_OFFSET)
+ #undef LOAD_STORE_REGISTER_OFFSET
+};
+
+// Conditional compare.
+enum ConditionalCompareOp {
+ ConditionalCompareMask = 0x60000000,
+ CCMN = 0x20000000,
+ CCMP = 0x60000000
+};
+
+// Conditional compare register.
+enum ConditionalCompareRegisterOp {
+ ConditionalCompareRegisterFixed = 0x1A400000,
+ ConditionalCompareRegisterFMask = 0x1FE00800,
+ ConditionalCompareRegisterMask = 0xFFE00C10,
+ CCMN_w = ConditionalCompareRegisterFixed | CCMN,
+ CCMN_x = ConditionalCompareRegisterFixed | SixtyFourBits | CCMN,
+ CCMP_w = ConditionalCompareRegisterFixed | CCMP,
+ CCMP_x = ConditionalCompareRegisterFixed | SixtyFourBits | CCMP
+};
+
+// Conditional compare immediate.
+enum ConditionalCompareImmediateOp {
+ ConditionalCompareImmediateFixed = 0x1A400800,
+ ConditionalCompareImmediateFMask = 0x1FE00800,
+ ConditionalCompareImmediateMask = 0xFFE00C10,
+ CCMN_w_imm = ConditionalCompareImmediateFixed | CCMN,
+ CCMN_x_imm = ConditionalCompareImmediateFixed | SixtyFourBits | CCMN,
+ CCMP_w_imm = ConditionalCompareImmediateFixed | CCMP,
+ CCMP_x_imm = ConditionalCompareImmediateFixed | SixtyFourBits | CCMP
+};
+
+// Conditional select.
+enum ConditionalSelectOp {
+ ConditionalSelectFixed = 0x1A800000,
+ ConditionalSelectFMask = 0x1FE00000,
+ ConditionalSelectMask = 0xFFE00C00,
+ CSEL_w = ConditionalSelectFixed | 0x00000000,
+ CSEL_x = ConditionalSelectFixed | 0x80000000,
+ CSEL = CSEL_w,
+ CSINC_w = ConditionalSelectFixed | 0x00000400,
+ CSINC_x = ConditionalSelectFixed | 0x80000400,
+ CSINC = CSINC_w,
+ CSINV_w = ConditionalSelectFixed | 0x40000000,
+ CSINV_x = ConditionalSelectFixed | 0xC0000000,
+ CSINV = CSINV_w,
+ CSNEG_w = ConditionalSelectFixed | 0x40000400,
+ CSNEG_x = ConditionalSelectFixed | 0xC0000400,
+ CSNEG = CSNEG_w
+};
+
+// Data processing 1 source.
+enum DataProcessing1SourceOp {
+ DataProcessing1SourceFixed = 0x5AC00000,
+ DataProcessing1SourceFMask = 0x5FE00000,
+ DataProcessing1SourceMask = 0xFFFFFC00,
+ RBIT = DataProcessing1SourceFixed | 0x00000000,
+ RBIT_w = RBIT,
+ RBIT_x = RBIT | SixtyFourBits,
+ REV16 = DataProcessing1SourceFixed | 0x00000400,
+ REV16_w = REV16,
+ REV16_x = REV16 | SixtyFourBits,
+ REV = DataProcessing1SourceFixed | 0x00000800,
+ REV_w = REV,
+ REV32_x = REV | SixtyFourBits,
+ REV_x = DataProcessing1SourceFixed | SixtyFourBits | 0x00000C00,
+ CLZ = DataProcessing1SourceFixed | 0x00001000,
+ CLZ_w = CLZ,
+ CLZ_x = CLZ | SixtyFourBits,
+ CLS = DataProcessing1SourceFixed | 0x00001400,
+ CLS_w = CLS,
+ CLS_x = CLS | SixtyFourBits
+};
+
+// Data processing 2 source.
+enum DataProcessing2SourceOp {
+ DataProcessing2SourceFixed = 0x1AC00000,
+ DataProcessing2SourceFMask = 0x5FE00000,
+ DataProcessing2SourceMask = 0xFFE0FC00,
+ UDIV_w = DataProcessing2SourceFixed | 0x00000800,
+ UDIV_x = DataProcessing2SourceFixed | 0x80000800,
+ UDIV = UDIV_w,
+ SDIV_w = DataProcessing2SourceFixed | 0x00000C00,
+ SDIV_x = DataProcessing2SourceFixed | 0x80000C00,
+ SDIV = SDIV_w,
+ LSLV_w = DataProcessing2SourceFixed | 0x00002000,
+ LSLV_x = DataProcessing2SourceFixed | 0x80002000,
+ LSLV = LSLV_w,
+ LSRV_w = DataProcessing2SourceFixed | 0x00002400,
+ LSRV_x = DataProcessing2SourceFixed | 0x80002400,
+ LSRV = LSRV_w,
+ ASRV_w = DataProcessing2SourceFixed | 0x00002800,
+ ASRV_x = DataProcessing2SourceFixed | 0x80002800,
+ ASRV = ASRV_w,
+ RORV_w = DataProcessing2SourceFixed | 0x00002C00,
+ RORV_x = DataProcessing2SourceFixed | 0x80002C00,
+ RORV = RORV_w,
+ CRC32B = DataProcessing2SourceFixed | 0x00004000,
+ CRC32H = DataProcessing2SourceFixed | 0x00004400,
+ CRC32W = DataProcessing2SourceFixed | 0x00004800,
+ CRC32X = DataProcessing2SourceFixed | SixtyFourBits | 0x00004C00,
+ CRC32CB = DataProcessing2SourceFixed | 0x00005000,
+ CRC32CH = DataProcessing2SourceFixed | 0x00005400,
+ CRC32CW = DataProcessing2SourceFixed | 0x00005800,
+ CRC32CX = DataProcessing2SourceFixed | SixtyFourBits | 0x00005C00
+};
+
+// Data processing 3 source.
+enum DataProcessing3SourceOp {
+ DataProcessing3SourceFixed = 0x1B000000,
+ DataProcessing3SourceFMask = 0x1F000000,
+ DataProcessing3SourceMask = 0xFFE08000,
+ MADD_w = DataProcessing3SourceFixed | 0x00000000,
+ MADD_x = DataProcessing3SourceFixed | 0x80000000,
+ MADD = MADD_w,
+ MSUB_w = DataProcessing3SourceFixed | 0x00008000,
+ MSUB_x = DataProcessing3SourceFixed | 0x80008000,
+ MSUB = MSUB_w,
+ SMADDL_x = DataProcessing3SourceFixed | 0x80200000,
+ SMSUBL_x = DataProcessing3SourceFixed | 0x80208000,
+ SMULH_x = DataProcessing3SourceFixed | 0x80400000,
+ UMADDL_x = DataProcessing3SourceFixed | 0x80A00000,
+ UMSUBL_x = DataProcessing3SourceFixed | 0x80A08000,
+ UMULH_x = DataProcessing3SourceFixed | 0x80C00000
+};
+
+// Floating point compare.
+enum FPCompareOp {
+ FPCompareFixed = 0x1E202000,
+ FPCompareFMask = 0x5F203C00,
+ FPCompareMask = 0xFFE0FC1F,
+ FCMP_s = FPCompareFixed | 0x00000000,
+ FCMP_d = FPCompareFixed | FP64 | 0x00000000,
+ FCMP = FCMP_s,
+ FCMP_s_zero = FPCompareFixed | 0x00000008,
+ FCMP_d_zero = FPCompareFixed | FP64 | 0x00000008,
+ FCMP_zero = FCMP_s_zero,
+ FCMPE_s = FPCompareFixed | 0x00000010,
+ FCMPE_d = FPCompareFixed | FP64 | 0x00000010,
+ FCMPE_s_zero = FPCompareFixed | 0x00000018,
+ FCMPE_d_zero = FPCompareFixed | FP64 | 0x00000018
+};
+
+// Floating point conditional compare.
+enum FPConditionalCompareOp {
+ FPConditionalCompareFixed = 0x1E200400,
+ FPConditionalCompareFMask = 0x5F200C00,
+ FPConditionalCompareMask = 0xFFE00C10,
+ FCCMP_s = FPConditionalCompareFixed | 0x00000000,
+ FCCMP_d = FPConditionalCompareFixed | FP64 | 0x00000000,
+ FCCMP = FCCMP_s,
+ FCCMPE_s = FPConditionalCompareFixed | 0x00000010,
+ FCCMPE_d = FPConditionalCompareFixed | FP64 | 0x00000010,
+ FCCMPE = FCCMPE_s
+};
+
+// Floating point conditional select.
+enum FPConditionalSelectOp {
+ FPConditionalSelectFixed = 0x1E200C00,
+ FPConditionalSelectFMask = 0x5F200C00,
+ FPConditionalSelectMask = 0xFFE00C00,
+ FCSEL_s = FPConditionalSelectFixed | 0x00000000,
+ FCSEL_d = FPConditionalSelectFixed | FP64 | 0x00000000,
+ FCSEL = FCSEL_s
+};
+
+// Floating point immediate.
+enum FPImmediateOp {
+ FPImmediateFixed = 0x1E201000,
+ FPImmediateFMask = 0x5F201C00,
+ FPImmediateMask = 0xFFE01C00,
+ FMOV_s_imm = FPImmediateFixed | 0x00000000,
+ FMOV_d_imm = FPImmediateFixed | FP64 | 0x00000000
+};
+
+// Floating point data processing 1 source.
+enum FPDataProcessing1SourceOp {
+ FPDataProcessing1SourceFixed = 0x1E204000,
+ FPDataProcessing1SourceFMask = 0x5F207C00,
+ FPDataProcessing1SourceMask = 0xFFFFFC00,
+ FMOV_s = FPDataProcessing1SourceFixed | 0x00000000,
+ FMOV_d = FPDataProcessing1SourceFixed | FP64 | 0x00000000,
+ FMOV = FMOV_s,
+ FABS_s = FPDataProcessing1SourceFixed | 0x00008000,
+ FABS_d = FPDataProcessing1SourceFixed | FP64 | 0x00008000,
+ FABS = FABS_s,
+ FNEG_s = FPDataProcessing1SourceFixed | 0x00010000,
+ FNEG_d = FPDataProcessing1SourceFixed | FP64 | 0x00010000,
+ FNEG = FNEG_s,
+ FSQRT_s = FPDataProcessing1SourceFixed | 0x00018000,
+ FSQRT_d = FPDataProcessing1SourceFixed | FP64 | 0x00018000,
+ FSQRT = FSQRT_s,
+ FCVT_ds = FPDataProcessing1SourceFixed | 0x00028000,
+ FCVT_sd = FPDataProcessing1SourceFixed | FP64 | 0x00020000,
+ FRINTN_s = FPDataProcessing1SourceFixed | 0x00040000,
+ FRINTN_d = FPDataProcessing1SourceFixed | FP64 | 0x00040000,
+ FRINTN = FRINTN_s,
+ FRINTP_s = FPDataProcessing1SourceFixed | 0x00048000,
+ FRINTP_d = FPDataProcessing1SourceFixed | FP64 | 0x00048000,
+ FRINTP = FRINTP_s,
+ FRINTM_s = FPDataProcessing1SourceFixed | 0x00050000,
+ FRINTM_d = FPDataProcessing1SourceFixed | FP64 | 0x00050000,
+ FRINTM = FRINTM_s,
+ FRINTZ_s = FPDataProcessing1SourceFixed | 0x00058000,
+ FRINTZ_d = FPDataProcessing1SourceFixed | FP64 | 0x00058000,
+ FRINTZ = FRINTZ_s,
+ FRINTA_s = FPDataProcessing1SourceFixed | 0x00060000,
+ FRINTA_d = FPDataProcessing1SourceFixed | FP64 | 0x00060000,
+ FRINTA = FRINTA_s,
+ FRINTX_s = FPDataProcessing1SourceFixed | 0x00070000,
+ FRINTX_d = FPDataProcessing1SourceFixed | FP64 | 0x00070000,
+ FRINTX = FRINTX_s,
+ FRINTI_s = FPDataProcessing1SourceFixed | 0x00078000,
+ FRINTI_d = FPDataProcessing1SourceFixed | FP64 | 0x00078000,
+ FRINTI = FRINTI_s
+};
+
+// Floating point data processing 2 source.
+enum FPDataProcessing2SourceOp {
+ FPDataProcessing2SourceFixed = 0x1E200800,
+ FPDataProcessing2SourceFMask = 0x5F200C00,
+ FPDataProcessing2SourceMask = 0xFFE0FC00,
+ FMUL = FPDataProcessing2SourceFixed | 0x00000000,
+ FMUL_s = FMUL,
+ FMUL_d = FMUL | FP64,
+ FDIV = FPDataProcessing2SourceFixed | 0x00001000,
+ FDIV_s = FDIV,
+ FDIV_d = FDIV | FP64,
+ FADD = FPDataProcessing2SourceFixed | 0x00002000,
+ FADD_s = FADD,
+ FADD_d = FADD | FP64,
+ FSUB = FPDataProcessing2SourceFixed | 0x00003000,
+ FSUB_s = FSUB,
+ FSUB_d = FSUB | FP64,
+ FMAX = FPDataProcessing2SourceFixed | 0x00004000,
+ FMAX_s = FMAX,
+ FMAX_d = FMAX | FP64,
+ FMIN = FPDataProcessing2SourceFixed | 0x00005000,
+ FMIN_s = FMIN,
+ FMIN_d = FMIN | FP64,
+ FMAXNM = FPDataProcessing2SourceFixed | 0x00006000,
+ FMAXNM_s = FMAXNM,
+ FMAXNM_d = FMAXNM | FP64,
+ FMINNM = FPDataProcessing2SourceFixed | 0x00007000,
+ FMINNM_s = FMINNM,
+ FMINNM_d = FMINNM | FP64,
+ FNMUL = FPDataProcessing2SourceFixed | 0x00008000,
+ FNMUL_s = FNMUL,
+ FNMUL_d = FNMUL | FP64
+};
+
+// Floating point data processing 3 source.
+enum FPDataProcessing3SourceOp {
+ FPDataProcessing3SourceFixed = 0x1F000000,
+ FPDataProcessing3SourceFMask = 0x5F000000,
+ FPDataProcessing3SourceMask = 0xFFE08000,
+ FMADD_s = FPDataProcessing3SourceFixed | 0x00000000,
+ FMSUB_s = FPDataProcessing3SourceFixed | 0x00008000,
+ FNMADD_s = FPDataProcessing3SourceFixed | 0x00200000,
+ FNMSUB_s = FPDataProcessing3SourceFixed | 0x00208000,
+ FMADD_d = FPDataProcessing3SourceFixed | 0x00400000,
+ FMSUB_d = FPDataProcessing3SourceFixed | 0x00408000,
+ FNMADD_d = FPDataProcessing3SourceFixed | 0x00600000,
+ FNMSUB_d = FPDataProcessing3SourceFixed | 0x00608000
+};
+
+// Conversion between floating point and integer.
+enum FPIntegerConvertOp {
+ FPIntegerConvertFixed = 0x1E200000,
+ FPIntegerConvertFMask = 0x5F20FC00,
+ FPIntegerConvertMask = 0xFFFFFC00,
+ FCVTNS = FPIntegerConvertFixed | 0x00000000,
+ FCVTNS_ws = FCVTNS,
+ FCVTNS_xs = FCVTNS | SixtyFourBits,
+ FCVTNS_wd = FCVTNS | FP64,
+ FCVTNS_xd = FCVTNS | SixtyFourBits | FP64,
+ FCVTNU = FPIntegerConvertFixed | 0x00010000,
+ FCVTNU_ws = FCVTNU,
+ FCVTNU_xs = FCVTNU | SixtyFourBits,
+ FCVTNU_wd = FCVTNU | FP64,
+ FCVTNU_xd = FCVTNU | SixtyFourBits | FP64,
+ FCVTPS = FPIntegerConvertFixed | 0x00080000,
+ FCVTPS_ws = FCVTPS,
+ FCVTPS_xs = FCVTPS | SixtyFourBits,
+ FCVTPS_wd = FCVTPS | FP64,
+ FCVTPS_xd = FCVTPS | SixtyFourBits | FP64,
+ FCVTPU = FPIntegerConvertFixed | 0x00090000,
+ FCVTPU_ws = FCVTPU,
+ FCVTPU_xs = FCVTPU | SixtyFourBits,
+ FCVTPU_wd = FCVTPU | FP64,
+ FCVTPU_xd = FCVTPU | SixtyFourBits | FP64,
+ FCVTMS = FPIntegerConvertFixed | 0x00100000,
+ FCVTMS_ws = FCVTMS,
+ FCVTMS_xs = FCVTMS | SixtyFourBits,
+ FCVTMS_wd = FCVTMS | FP64,
+ FCVTMS_xd = FCVTMS | SixtyFourBits | FP64,
+ FCVTMU = FPIntegerConvertFixed | 0x00110000,
+ FCVTMU_ws = FCVTMU,
+ FCVTMU_xs = FCVTMU | SixtyFourBits,
+ FCVTMU_wd = FCVTMU | FP64,
+ FCVTMU_xd = FCVTMU | SixtyFourBits | FP64,
+ FCVTZS = FPIntegerConvertFixed | 0x00180000,
+ FCVTZS_ws = FCVTZS,
+ FCVTZS_xs = FCVTZS | SixtyFourBits,
+ FCVTZS_wd = FCVTZS | FP64,
+ FCVTZS_xd = FCVTZS | SixtyFourBits | FP64,
+ FCVTZU = FPIntegerConvertFixed | 0x00190000,
+ FCVTZU_ws = FCVTZU,
+ FCVTZU_xs = FCVTZU | SixtyFourBits,
+ FCVTZU_wd = FCVTZU | FP64,
+ FCVTZU_xd = FCVTZU | SixtyFourBits | FP64,
+ SCVTF = FPIntegerConvertFixed | 0x00020000,
+ SCVTF_sw = SCVTF,
+ SCVTF_sx = SCVTF | SixtyFourBits,
+ SCVTF_dw = SCVTF | FP64,
+ SCVTF_dx = SCVTF | SixtyFourBits | FP64,
+ UCVTF = FPIntegerConvertFixed | 0x00030000,
+ UCVTF_sw = UCVTF,
+ UCVTF_sx = UCVTF | SixtyFourBits,
+ UCVTF_dw = UCVTF | FP64,
+ UCVTF_dx = UCVTF | SixtyFourBits | FP64,
+ FCVTAS = FPIntegerConvertFixed | 0x00040000,
+ FCVTAS_ws = FCVTAS,
+ FCVTAS_xs = FCVTAS | SixtyFourBits,
+ FCVTAS_wd = FCVTAS | FP64,
+ FCVTAS_xd = FCVTAS | SixtyFourBits | FP64,
+ FCVTAU = FPIntegerConvertFixed | 0x00050000,
+ FCVTAU_ws = FCVTAU,
+ FCVTAU_xs = FCVTAU | SixtyFourBits,
+ FCVTAU_wd = FCVTAU | FP64,
+ FCVTAU_xd = FCVTAU | SixtyFourBits | FP64,
+ FMOV_ws = FPIntegerConvertFixed | 0x00060000,
+ FMOV_sw = FPIntegerConvertFixed | 0x00070000,
+ FMOV_xd = FMOV_ws | SixtyFourBits | FP64,
+ FMOV_dx = FMOV_sw | SixtyFourBits | FP64
+};
+
+// Conversion between fixed point and floating point.
+enum FPFixedPointConvertOp {
+ FPFixedPointConvertFixed = 0x1E000000,
+ FPFixedPointConvertFMask = 0x5F200000,
+ FPFixedPointConvertMask = 0xFFFF0000,
+ FCVTZS_fixed = FPFixedPointConvertFixed | 0x00180000,
+ FCVTZS_ws_fixed = FCVTZS_fixed,
+ FCVTZS_xs_fixed = FCVTZS_fixed | SixtyFourBits,
+ FCVTZS_wd_fixed = FCVTZS_fixed | FP64,
+ FCVTZS_xd_fixed = FCVTZS_fixed | SixtyFourBits | FP64,
+ FCVTZU_fixed = FPFixedPointConvertFixed | 0x00190000,
+ FCVTZU_ws_fixed = FCVTZU_fixed,
+ FCVTZU_xs_fixed = FCVTZU_fixed | SixtyFourBits,
+ FCVTZU_wd_fixed = FCVTZU_fixed | FP64,
+ FCVTZU_xd_fixed = FCVTZU_fixed | SixtyFourBits | FP64,
+ SCVTF_fixed = FPFixedPointConvertFixed | 0x00020000,
+ SCVTF_sw_fixed = SCVTF_fixed,
+ SCVTF_sx_fixed = SCVTF_fixed | SixtyFourBits,
+ SCVTF_dw_fixed = SCVTF_fixed | FP64,
+ SCVTF_dx_fixed = SCVTF_fixed | SixtyFourBits | FP64,
+ UCVTF_fixed = FPFixedPointConvertFixed | 0x00030000,
+ UCVTF_sw_fixed = UCVTF_fixed,
+ UCVTF_sx_fixed = UCVTF_fixed | SixtyFourBits,
+ UCVTF_dw_fixed = UCVTF_fixed | FP64,
+ UCVTF_dx_fixed = UCVTF_fixed | SixtyFourBits | FP64
+};
+
+// Unimplemented and unallocated instructions. These are defined to make fixed
+// bit assertion easier.
+enum UnimplementedOp {
+ UnimplementedFixed = 0x00000000,
+ UnimplementedFMask = 0x00000000
+};
+
+enum UnallocatedOp {
+ UnallocatedFixed = 0x00000000,
+ UnallocatedFMask = 0x00000000
+};
+
+} } // namespace v8::internal
+
+#endif // V8_A64_CONSTANTS_A64_H_
--- /dev/null
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// CPU specific code for arm independent of OS goes here.
+
+#include "v8.h"
+
+#if V8_TARGET_ARCH_A64
+
+#include "a64/cpu-a64.h"
+#include "a64/utils-a64.h"
+
+namespace v8 {
+namespace internal {
+
+#ifdef DEBUG
+bool CpuFeatures::initialized_ = false;
+#endif
+unsigned CpuFeatures::supported_ = 0;
+unsigned CpuFeatures::found_by_runtime_probing_only_ = 0;
+unsigned CpuFeatures::cross_compile_ = 0;
+
+// Initialise to smallest possible cache size.
+unsigned CpuFeatures::dcache_line_size_ = 1;
+unsigned CpuFeatures::icache_line_size_ = 1;
+
+
+void CPU::SetUp() {
+ CpuFeatures::Probe();
+}
+
+
+bool CPU::SupportsCrankshaft() {
+ return true;
+}
+
+
+void CPU::FlushICache(void* address, size_t length) {
+ if (length == 0) {
+ return;
+ }
+
+#ifdef USE_SIMULATOR
+ // TODO(all): consider doing some cache simulation to ensure every address
+ // run has been synced.
+ USE(address);
+ USE(length);
+#else
+ // The code below assumes user space cache operations are allowed. The goal
+ // of this routine is to make sure the code generated is visible to the I
+ // side of the CPU.
+
+ uintptr_t start = reinterpret_cast<uintptr_t>(address);
+ // Sizes will be used to generate a mask big enough to cover a pointer.
+ uintptr_t dsize = static_cast<uintptr_t>(CpuFeatures::dcache_line_size());
+ uintptr_t isize = static_cast<uintptr_t>(CpuFeatures::icache_line_size());
+ // Cache line sizes are always a power of 2.
+ ASSERT(CountSetBits(dsize, 64) == 1);
+ ASSERT(CountSetBits(isize, 64) == 1);
+ uintptr_t dstart = start & ~(dsize - 1);
+ uintptr_t istart = start & ~(isize - 1);
+ uintptr_t end = start + length;
+
+ __asm__ __volatile__ ( // NOLINT
+ // Clean every line of the D cache containing the target data.
+ "0: \n\t"
+ // dc : Data Cache maintenance
+ // c : Clean
+ // va : by (Virtual) Address
+ // u : to the point of Unification
+ // The point of unification for a processor is the point by which the
+ // instruction and data caches are guaranteed to see the same copy of a
+ // memory location. See ARM DDI 0406B page B2-12 for more information.
+ "dc cvau, %[dline] \n\t"
+ "add %[dline], %[dline], %[dsize] \n\t"
+ "cmp %[dline], %[end] \n\t"
+ "b.lt 0b \n\t"
+ // Barrier to make sure the effect of the code above is visible to the rest
+ // of the world.
+ // dsb : Data Synchronisation Barrier
+ // ish : Inner SHareable domain
+ // The point of unification for an Inner Shareable shareability domain is
+ // the point by which the instruction and data caches of all the processors
+ // in that Inner Shareable shareability domain are guaranteed to see the
+ // same copy of a memory location. See ARM DDI 0406B page B2-12 for more
+ // information.
+ "dsb ish \n\t"
+ // Invalidate every line of the I cache containing the target data.
+ "1: \n\t"
+ // ic : instruction cache maintenance
+ // i : invalidate
+ // va : by address
+ // u : to the point of unification
+ "ic ivau, %[iline] \n\t"
+ "add %[iline], %[iline], %[isize] \n\t"
+ "cmp %[iline], %[end] \n\t"
+ "b.lt 1b \n\t"
+ // Barrier to make sure the effect of the code above is visible to the rest
+ // of the world.
+ "dsb ish \n\t"
+ // Barrier to ensure any prefetching which happened before this code is
+ // discarded.
+ // isb : Instruction Synchronisation Barrier
+ "isb \n\t"
+ : [dline] "+r" (dstart),
+ [iline] "+r" (istart)
+ : [dsize] "r" (dsize),
+ [isize] "r" (isize),
+ [end] "r" (end)
+ // This code does not write to memory but without the dependency gcc might
+ // move this code before the code is generated.
+ : "cc", "memory"
+ ); // NOLINT
+#endif
+}
+
+
+void CpuFeatures::Probe() {
+ // Compute I and D cache line size. The cache type register holds
+ // information about the caches.
+ uint32_t cache_type_register = GetCacheType();
+
+ static const int kDCacheLineSizeShift = 16;
+ static const int kICacheLineSizeShift = 0;
+ static const uint32_t kDCacheLineSizeMask = 0xf << kDCacheLineSizeShift;
+ static const uint32_t kICacheLineSizeMask = 0xf << kICacheLineSizeShift;
+
+ // The cache type register holds the size of the I and D caches as a power of
+ // two.
+ uint32_t dcache_line_size_power_of_two =
+ (cache_type_register & kDCacheLineSizeMask) >> kDCacheLineSizeShift;
+ uint32_t icache_line_size_power_of_two =
+ (cache_type_register & kICacheLineSizeMask) >> kICacheLineSizeShift;
+
+ dcache_line_size_ = 1 << dcache_line_size_power_of_two;
+ icache_line_size_ = 1 << icache_line_size_power_of_two;
+
+ // AArch64 has no configuration options, no further probing is required.
+ supported_ = 0;
+
+#ifdef DEBUG
+ initialized_ = true;
+#endif
+}
+
+
+unsigned CpuFeatures::dcache_line_size() {
+ ASSERT(initialized_);
+ return dcache_line_size_;
+}
+
+
+unsigned CpuFeatures::icache_line_size() {
+ ASSERT(initialized_);
+ return icache_line_size_;
+}
+
+
+uint32_t CpuFeatures::GetCacheType() {
+#ifdef USE_SIMULATOR
+ // This will lead to a cache with 1 byte long lines, which is fine since the
+ // simulator will not need this information.
+ return 0;
+#else
+ uint32_t cache_type_register;
+ // Copy the content of the cache type register to a core register.
+ __asm__ __volatile__ ("mrs %[ctr], ctr_el0" // NOLINT
+ : [ctr] "=r" (cache_type_register));
+ return cache_type_register;
+#endif
+}
+
+} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_A64
--- /dev/null
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_A64_CPU_A64_H_
+#define V8_A64_CPU_A64_H_
+
+#include <stdio.h>
+#include "serialize.h"
+#include "cpu.h"
+
+namespace v8 {
+namespace internal {
+
+
+// CpuFeatures keeps track of which features are supported by the target CPU.
+// Supported features must be enabled by a CpuFeatureScope before use.
+class CpuFeatures : public AllStatic {
+ public:
+ // Detect features of the target CPU. Set safe defaults if the serializer
+ // is enabled (snapshots must be portable).
+ static void Probe();
+
+ // Check whether a feature is supported by the target CPU.
+ static bool IsSupported(CpuFeature f) {
+ ASSERT(initialized_);
+ // There are no optional features for A64.
+ return false;
+ };
+
+ static bool IsFoundByRuntimeProbingOnly(CpuFeature f) {
+ ASSERT(initialized_);
+ // There are no optional features for A64.
+ return false;
+ }
+
+ static bool IsSafeForSnapshot(CpuFeature f) {
+ return (IsSupported(f) &&
+ (!Serializer::enabled() || !IsFoundByRuntimeProbingOnly(f)));
+ }
+
+ // I and D cache line size in bytes.
+ static unsigned dcache_line_size();
+ static unsigned icache_line_size();
+
+ static unsigned supported_;
+
+ static bool VerifyCrossCompiling() {
+ // There are no optional features for A64.
+ ASSERT(cross_compile_ == 0);
+ return true;
+ }
+
+ static bool VerifyCrossCompiling(CpuFeature f) {
+ // There are no optional features for A64.
+ USE(f);
+ ASSERT(cross_compile_ == 0);
+ return true;
+ }
+
+ private:
+ // Return the content of the cache type register.
+ static uint32_t GetCacheType();
+
+ // I and D cache line size in bytes.
+ static unsigned icache_line_size_;
+ static unsigned dcache_line_size_;
+
+#ifdef DEBUG
+ static bool initialized_;
+#endif
+
+ // This isn't used (and is always 0), but it is required by V8.
+ static unsigned found_by_runtime_probing_only_;
+
+ static unsigned cross_compile_;
+
+ friend class PlatformFeatureScope;
+ DISALLOW_COPY_AND_ASSIGN(CpuFeatures);
+};
+
+} } // namespace v8::internal
+
+#endif // V8_A64_CPU_A64_H_
--- /dev/null
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#if V8_TARGET_ARCH_A64
+
+#include "codegen.h"
+#include "debug.h"
+
+namespace v8 {
+namespace internal {
+
+
+#define __ ACCESS_MASM(masm)
+
+
+#ifdef ENABLE_DEBUGGER_SUPPORT
+bool BreakLocationIterator::IsDebugBreakAtReturn() {
+ return Debug::IsDebugBreakAtReturn(rinfo());
+}
+
+
+void BreakLocationIterator::SetDebugBreakAtReturn() {
+ // Patch the code emitted by FullCodeGenerator::EmitReturnSequence, changing
+ // the return from JS function sequence from
+ // mov sp, fp
+ // ldp fp, lr, [sp] #16
+ // lrd ip0, [pc, #(3 * kInstructionSize)]
+ // add sp, sp, ip0
+ // ret
+ // <number of paramters ...
+ // ... plus one (64 bits)>
+ // to a call to the debug break return code.
+ // ldr ip0, [pc, #(3 * kInstructionSize)]
+ // blr ip0
+ // hlt kHltBadCode @ code should not return, catch if it does.
+ // <debug break return code ...
+ // ... entry point address (64 bits)>
+
+ // The patching code must not overflow the space occupied by the return
+ // sequence.
+ STATIC_ASSERT(Assembler::kJSRetSequenceInstructions >= 5);
+ PatchingAssembler patcher(reinterpret_cast<Instruction*>(rinfo()->pc()), 5);
+ byte* entry =
+ debug_info_->GetIsolate()->debug()->debug_break_return()->entry();
+
+ // The first instruction of a patched return sequence must be a load literal
+ // loading the address of the debug break return code.
+ patcher.LoadLiteral(ip0, 3 * kInstructionSize);
+ // TODO(all): check the following is correct.
+ // The debug break return code will push a frame and call statically compiled
+ // code. By using blr, even though control will not return after the branch,
+ // this call site will be registered in the frame (lr being saved as the pc
+ // of the next instruction to execute for this frame). The debugger can now
+ // iterate on the frames to find call to debug break return code.
+ patcher.blr(ip0);
+ patcher.hlt(kHltBadCode);
+ patcher.dc64(reinterpret_cast<int64_t>(entry));
+}
+
+
+void BreakLocationIterator::ClearDebugBreakAtReturn() {
+ // Reset the code emitted by EmitReturnSequence to its original state.
+ rinfo()->PatchCode(original_rinfo()->pc(),
+ Assembler::kJSRetSequenceInstructions);
+}
+
+
+bool Debug::IsDebugBreakAtReturn(RelocInfo* rinfo) {
+ ASSERT(RelocInfo::IsJSReturn(rinfo->rmode()));
+ return rinfo->IsPatchedReturnSequence();
+}
+
+
+bool BreakLocationIterator::IsDebugBreakAtSlot() {
+ ASSERT(IsDebugBreakSlot());
+ // Check whether the debug break slot instructions have been patched.
+ return rinfo()->IsPatchedDebugBreakSlotSequence();
+}
+
+
+void BreakLocationIterator::SetDebugBreakAtSlot() {
+ // Patch the code emitted by Debug::GenerateSlots, changing the debug break
+ // slot code from
+ // mov x0, x0 @ nop DEBUG_BREAK_NOP
+ // mov x0, x0 @ nop DEBUG_BREAK_NOP
+ // mov x0, x0 @ nop DEBUG_BREAK_NOP
+ // mov x0, x0 @ nop DEBUG_BREAK_NOP
+ // to a call to the debug slot code.
+ // ldr ip0, [pc, #(2 * kInstructionSize)]
+ // blr ip0
+ // <debug break slot code ...
+ // ... entry point address (64 bits)>
+
+ // TODO(all): consider adding a hlt instruction after the blr as we don't
+ // expect control to return here. This implies increasing
+ // kDebugBreakSlotInstructions to 5 instructions.
+
+ // The patching code must not overflow the space occupied by the return
+ // sequence.
+ STATIC_ASSERT(Assembler::kDebugBreakSlotInstructions >= 4);
+ PatchingAssembler patcher(reinterpret_cast<Instruction*>(rinfo()->pc()), 4);
+ byte* entry =
+ debug_info_->GetIsolate()->debug()->debug_break_slot()->entry();
+
+ // The first instruction of a patched debug break slot must be a load literal
+ // loading the address of the debug break slot code.
+ patcher.LoadLiteral(ip0, 2 * kInstructionSize);
+ // TODO(all): check the following is correct.
+ // The debug break slot code will push a frame and call statically compiled
+ // code. By using blr, event hough control will not return after the branch,
+ // this call site will be registered in the frame (lr being saved as the pc
+ // of the next instruction to execute for this frame). The debugger can now
+ // iterate on the frames to find call to debug break slot code.
+ patcher.blr(ip0);
+ patcher.dc64(reinterpret_cast<int64_t>(entry));
+}
+
+
+void BreakLocationIterator::ClearDebugBreakAtSlot() {
+ ASSERT(IsDebugBreakSlot());
+ rinfo()->PatchCode(original_rinfo()->pc(),
+ Assembler::kDebugBreakSlotInstructions);
+}
+
+const bool Debug::FramePaddingLayout::kIsSupported = false;
+
+static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
+ RegList object_regs,
+ RegList non_object_regs,
+ Register scratch) {
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ // Any live values (object_regs and non_object_regs) in caller-saved
+ // registers (or lr) need to be stored on the stack so that their values are
+ // safely preserved for a call into C code.
+ //
+ // Also:
+ // * object_regs may be modified during the C code by the garbage
+ // collector. Every object register must be a valid tagged pointer or
+ // SMI.
+ //
+ // * non_object_regs will be converted to SMIs so that the garbage
+ // collector doesn't try to interpret them as pointers.
+ //
+ // TODO(jbramley): Why can't this handle callee-saved registers?
+ ASSERT((~kCallerSaved.list() & object_regs) == 0);
+ ASSERT((~kCallerSaved.list() & non_object_regs) == 0);
+ ASSERT((object_regs & non_object_regs) == 0);
+ ASSERT((scratch.Bit() & object_regs) == 0);
+ ASSERT((scratch.Bit() & non_object_regs) == 0);
+ ASSERT((ip0.Bit() & (object_regs | non_object_regs)) == 0);
+ ASSERT((ip1.Bit() & (object_regs | non_object_regs)) == 0);
+ STATIC_ASSERT(kSmiValueSize == 32);
+
+ CPURegList non_object_list =
+ CPURegList(CPURegister::kRegister, kXRegSize, non_object_regs);
+ while (!non_object_list.IsEmpty()) {
+ // Store each non-object register as two SMIs.
+ Register reg = Register(non_object_list.PopLowestIndex());
+ __ Push(reg);
+ __ Poke(wzr, 0);
+ __ Push(reg.W(), wzr);
+ // Stack:
+ // jssp[12]: reg[63:32]
+ // jssp[8]: 0x00000000 (SMI tag & padding)
+ // jssp[4]: reg[31:0]
+ // jssp[0]: 0x00000000 (SMI tag & padding)
+ STATIC_ASSERT((kSmiTag == 0) && (kSmiShift == 32));
+ }
+
+ if (object_regs != 0) {
+ __ PushXRegList(object_regs);
+ }
+
+#ifdef DEBUG
+ __ RecordComment("// Calling from debug break to runtime - come in - over");
+#endif
+ __ Mov(x0, 0); // No arguments.
+ __ Mov(x1, Operand(ExternalReference::debug_break(masm->isolate())));
+
+ CEntryStub stub(1);
+ __ CallStub(&stub);
+
+ // Restore the register values from the expression stack.
+ if (object_regs != 0) {
+ __ PopXRegList(object_regs);
+ }
+
+ non_object_list =
+ CPURegList(CPURegister::kRegister, kXRegSize, non_object_regs);
+ while (!non_object_list.IsEmpty()) {
+ // Load each non-object register from two SMIs.
+ // Stack:
+ // jssp[12]: reg[63:32]
+ // jssp[8]: 0x00000000 (SMI tag & padding)
+ // jssp[4]: reg[31:0]
+ // jssp[0]: 0x00000000 (SMI tag & padding)
+ Register reg = Register(non_object_list.PopHighestIndex());
+ __ Pop(scratch, reg);
+ __ Bfxil(reg, scratch, 32, 32);
+ }
+
+ // Leave the internal frame.
+ }
+
+ // Now that the break point has been handled, resume normal execution by
+ // jumping to the target address intended by the caller and that was
+ // overwritten by the address of DebugBreakXXX.
+ ExternalReference after_break_target(Debug_Address::AfterBreakTarget(),
+ masm->isolate());
+ __ Mov(scratch, Operand(after_break_target));
+ __ Ldr(scratch, MemOperand(scratch));
+ __ Br(scratch);
+}
+
+
+void Debug::GenerateLoadICDebugBreak(MacroAssembler* masm) {
+ // Calling convention for IC load (from ic-arm.cc).
+ // ----------- S t a t e -------------
+ // -- x2 : name
+ // -- lr : return address
+ // -- x0 : receiver
+ // -- [sp] : receiver
+ // -----------------------------------
+ // Registers x0 and x2 contain objects that need to be pushed on the
+ // expression stack of the fake JS frame.
+ Generate_DebugBreakCallHelper(masm, x0.Bit() | x2.Bit(), 0, x10);
+}
+
+
+void Debug::GenerateStoreICDebugBreak(MacroAssembler* masm) {
+ // Calling convention for IC store (from ic-arm.cc).
+ // ----------- S t a t e -------------
+ // -- x0 : value
+ // -- x1 : receiver
+ // -- x2 : name
+ // -- lr : return address
+ // -----------------------------------
+ // Registers x0, x1, and x2 contain objects that need to be pushed on the
+ // expression stack of the fake JS frame.
+ Generate_DebugBreakCallHelper(masm, x0.Bit() | x1.Bit() | x2.Bit(), 0, x10);
+}
+
+
+void Debug::GenerateKeyedLoadICDebugBreak(MacroAssembler* masm) {
+ // ---------- S t a t e --------------
+ // -- lr : return address
+ // -- x0 : key
+ // -- x1 : receiver
+ Generate_DebugBreakCallHelper(masm, x0.Bit() | x1.Bit(), 0, x10);
+}
+
+
+void Debug::GenerateKeyedStoreICDebugBreak(MacroAssembler* masm) {
+ // ---------- S t a t e --------------
+ // -- x0 : value
+ // -- x1 : key
+ // -- x2 : receiver
+ // -- lr : return address
+ Generate_DebugBreakCallHelper(masm, x0.Bit() | x1.Bit() | x2.Bit(), 0, x10);
+}
+
+
+void Debug::GenerateCompareNilICDebugBreak(MacroAssembler* masm) {
+ // Register state for CompareNil IC
+ // ----------- S t a t e -------------
+ // -- r0 : value
+ // -----------------------------------
+ Generate_DebugBreakCallHelper(masm, x0.Bit(), 0, x10);
+}
+
+
+void Debug::GenerateCallICDebugBreak(MacroAssembler* masm) {
+ // Calling convention for IC call (from ic-arm.cc)
+ // ----------- S t a t e -------------
+ // -- x2 : name
+ // -----------------------------------
+ Generate_DebugBreakCallHelper(masm, x2.Bit(), 0, x10);
+}
+
+
+void Debug::GenerateReturnDebugBreak(MacroAssembler* masm) {
+ // In places other than IC call sites it is expected that r0 is TOS which
+ // is an object - this is not generally the case so this should be used with
+ // care.
+ Generate_DebugBreakCallHelper(masm, x0.Bit(), 0, x10);
+}
+
+
+void Debug::GenerateCallFunctionStubDebugBreak(MacroAssembler* masm) {
+ // Register state for CallFunctionStub (from code-stubs-a64.cc).
+ // ----------- S t a t e -------------
+ // -- x1 : function
+ // -----------------------------------
+ Generate_DebugBreakCallHelper(masm, x1.Bit(), 0, x10);
+}
+
+
+void Debug::GenerateCallFunctionStubRecordDebugBreak(MacroAssembler* masm) {
+ // Register state for CallFunctionStub (from code-stubs-a64.cc).
+ // ----------- S t a t e -------------
+ // -- x1 : function
+ // -- x2 : feedback array
+ // -- x3 : slot in feedback array
+ // -----------------------------------
+ Generate_DebugBreakCallHelper(masm, x1.Bit() | x2.Bit() | x3.Bit(), 0, x10);
+}
+
+
+void Debug::GenerateCallConstructStubDebugBreak(MacroAssembler* masm) {
+ // Calling convention for CallConstructStub (from code-stubs-a64.cc).
+ // ----------- S t a t e -------------
+ // -- x0 : number of arguments (not smi)
+ // -- x1 : constructor function
+ // -----------------------------------
+ Generate_DebugBreakCallHelper(masm, x1.Bit(), x0.Bit(), x10);
+}
+
+
+void Debug::GenerateCallConstructStubRecordDebugBreak(MacroAssembler* masm) {
+ // Calling convention for CallConstructStub (from code-stubs-a64.cc).
+ // ----------- S t a t e -------------
+ // -- x0 : number of arguments (not smi)
+ // -- x1 : constructor function
+ // -- x2 : feedback array
+ // -- x3 : feedback slot (smi)
+ // -----------------------------------
+ Generate_DebugBreakCallHelper(
+ masm, x1.Bit() | x2.Bit() | x3.Bit(), x0.Bit(), x10);
+}
+
+
+void Debug::GenerateSlot(MacroAssembler* masm) {
+ // Generate enough nop's to make space for a call instruction. Avoid emitting
+ // the constant pool in the debug break slot code.
+ InstructionAccurateScope scope(masm, Assembler::kDebugBreakSlotInstructions);
+
+ __ RecordDebugBreakSlot();
+ for (int i = 0; i < Assembler::kDebugBreakSlotInstructions; i++) {
+ __ nop(Assembler::DEBUG_BREAK_NOP);
+ }
+}
+
+
+void Debug::GenerateSlotDebugBreak(MacroAssembler* masm) {
+ // In the places where a debug break slot is inserted no registers can contain
+ // object pointers.
+ Generate_DebugBreakCallHelper(masm, 0, 0, x10);
+}
+
+
+void Debug::GeneratePlainReturnLiveEdit(MacroAssembler* masm) {
+ masm->Abort(kLiveEditFrameDroppingIsNotSupportedOnA64);
+}
+
+
+void Debug::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
+ masm->Abort(kLiveEditFrameDroppingIsNotSupportedOnA64);
+}
+
+const bool Debug::kFrameDropperSupported = false;
+
+#endif // ENABLE_DEBUGGER_SUPPORT
+
+} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_A64
--- /dev/null
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#if V8_TARGET_ARCH_A64
+
+#if defined(USE_SIMULATOR)
+
+#include "a64/debugger-a64.h"
+
+namespace v8 {
+namespace internal {
+
+
+void Debugger::VisitException(Instruction* instr) {
+ switch (instr->Mask(ExceptionMask)) {
+ case HLT: {
+ if (instr->ImmException() == kImmExceptionIsDebug) {
+ // Read the arguments encoded inline in the instruction stream.
+ uint32_t code;
+ uint32_t parameters;
+ char const * message;
+
+ ASSERT(sizeof(*pc_) == 1);
+ memcpy(&code, pc_ + kDebugCodeOffset, sizeof(code));
+ memcpy(¶meters, pc_ + kDebugParamsOffset, sizeof(parameters));
+ message = reinterpret_cast<char const *>(pc_ + kDebugMessageOffset);
+
+ if (message[0] == '\0') {
+ fprintf(stream_, "Debugger hit %" PRIu32 ".\n", code);
+ } else {
+ fprintf(stream_, "Debugger hit %" PRIu32 ": %s\n", code, message);
+ }
+
+ // Other options.
+ switch (parameters & kDebuggerTracingDirectivesMask) {
+ case TRACE_ENABLE:
+ set_log_parameters(log_parameters() | parameters);
+ break;
+ case TRACE_DISABLE:
+ set_log_parameters(log_parameters() & ~parameters);
+ break;
+ case TRACE_OVERRIDE:
+ set_log_parameters(parameters);
+ break;
+ default:
+ // We don't support a one-shot LOG_DISASM.
+ ASSERT((parameters & LOG_DISASM) == 0);
+ // Don't print information that is already being traced.
+ parameters &= ~log_parameters();
+ // Print the requested information.
+ if (parameters & LOG_SYS_REGS) PrintSystemRegisters(true);
+ if (parameters & LOG_REGS) PrintRegisters(true);
+ if (parameters & LOG_FP_REGS) PrintFPRegisters(true);
+ }
+
+ // Check if the debugger should break.
+ if (parameters & BREAK) OS::DebugBreak();
+
+ // The stop parameters are inlined in the code. Skip them:
+ // - Skip to the end of the message string.
+ pc_ += kDebugMessageOffset + strlen(message) + 1;
+ // - Advance to the next aligned location.
+ pc_ = AlignUp(pc_, kInstructionSize);
+ // - Verify that the unreachable marker is present.
+ ASSERT(reinterpret_cast<Instruction*>(pc_)->Mask(ExceptionMask) == HLT);
+ ASSERT(reinterpret_cast<Instruction*>(pc_)->ImmException() ==
+ kImmExceptionIsUnreachable);
+ // - Skip past the unreachable marker.
+ pc_ += kInstructionSize;
+ pc_modified_ = true;
+ } else {
+ Simulator::VisitException(instr);
+ }
+ break;
+ }
+
+ default:
+ UNIMPLEMENTED();
+ }
+}
+
+
+} } // namespace v8::internal
+
+#endif // USE_SIMULATOR
+
+#endif // V8_TARGET_ARCH_A64
--- /dev/null
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_A64_DEBUGGER_A64_H_
+#define V8_A64_DEBUGGER_A64_H_
+
+#if defined(USE_SIMULATOR)
+
+#include "globals.h"
+#include "utils.h"
+#include "a64/constants-a64.h"
+#include "a64/simulator-a64.h"
+
+namespace v8 {
+namespace internal {
+
+
+class Debugger : public Simulator {
+ public:
+ Debugger(Decoder* decoder, FILE* stream = stderr)
+ : Simulator(decoder, NULL, stream) {}
+
+ // Functions overloading.
+ void VisitException(Instruction* instr);
+};
+
+
+} } // namespace v8::internal
+
+#endif // USE_SIMULATOR
+
+#endif // V8_A64_DEBUGGER_A64_H_
--- /dev/null
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#if V8_TARGET_ARCH_A64
+
+#include "globals.h"
+#include "utils.h"
+#include "a64/decoder-a64.h"
+
+
+namespace v8 {
+namespace internal {
+
+// Top-level instruction decode function.
+void Decoder::Decode(Instruction *instr) {
+ if (instr->Bits(28, 27) == 0) {
+ VisitUnallocated(instr);
+ } else {
+ switch (instr->Bits(27, 24)) {
+ // 0: PC relative addressing.
+ case 0x0: DecodePCRelAddressing(instr); break;
+
+ // 1: Add/sub immediate.
+ case 0x1: DecodeAddSubImmediate(instr); break;
+
+ // A: Logical shifted register.
+ // Add/sub with carry.
+ // Conditional compare register.
+ // Conditional compare immediate.
+ // Conditional select.
+ // Data processing 1 source.
+ // Data processing 2 source.
+ // B: Add/sub shifted register.
+ // Add/sub extended register.
+ // Data processing 3 source.
+ case 0xA:
+ case 0xB: DecodeDataProcessing(instr); break;
+
+ // 2: Logical immediate.
+ // Move wide immediate.
+ case 0x2: DecodeLogical(instr); break;
+
+ // 3: Bitfield.
+ // Extract.
+ case 0x3: DecodeBitfieldExtract(instr); break;
+
+ // 4: Unconditional branch immediate.
+ // Exception generation.
+ // Compare and branch immediate.
+ // 5: Compare and branch immediate.
+ // Conditional branch.
+ // System.
+ // 6,7: Unconditional branch.
+ // Test and branch immediate.
+ case 0x4:
+ case 0x5:
+ case 0x6:
+ case 0x7: DecodeBranchSystemException(instr); break;
+
+ // 8,9: Load/store register pair post-index.
+ // Load register literal.
+ // Load/store register unscaled immediate.
+ // Load/store register immediate post-index.
+ // Load/store register immediate pre-index.
+ // Load/store register offset.
+ // C,D: Load/store register pair offset.
+ // Load/store register pair pre-index.
+ // Load/store register unsigned immediate.
+ // Advanced SIMD.
+ case 0x8:
+ case 0x9:
+ case 0xC:
+ case 0xD: DecodeLoadStore(instr); break;
+
+ // E: FP fixed point conversion.
+ // FP integer conversion.
+ // FP data processing 1 source.
+ // FP compare.
+ // FP immediate.
+ // FP data processing 2 source.
+ // FP conditional compare.
+ // FP conditional select.
+ // Advanced SIMD.
+ // F: FP data processing 3 source.
+ // Advanced SIMD.
+ case 0xE:
+ case 0xF: DecodeFP(instr); break;
+ }
+ }
+}
+
+
+void Decoder::AppendVisitor(DecoderVisitor* new_visitor) {
+ visitors_.remove(new_visitor);
+ visitors_.push_front(new_visitor);
+}
+
+
+void Decoder::PrependVisitor(DecoderVisitor* new_visitor) {
+ visitors_.remove(new_visitor);
+ visitors_.push_back(new_visitor);
+}
+
+
+void Decoder::InsertVisitorBefore(DecoderVisitor* new_visitor,
+ DecoderVisitor* registered_visitor) {
+ visitors_.remove(new_visitor);
+ std::list<DecoderVisitor*>::iterator it;
+ for (it = visitors_.begin(); it != visitors_.end(); it++) {
+ if (*it == registered_visitor) {
+ visitors_.insert(it, new_visitor);
+ return;
+ }
+ }
+ // We reached the end of the list. The last element must be
+ // registered_visitor.
+ ASSERT(*it == registered_visitor);
+ visitors_.insert(it, new_visitor);
+}
+
+
+void Decoder::InsertVisitorAfter(DecoderVisitor* new_visitor,
+ DecoderVisitor* registered_visitor) {
+ visitors_.remove(new_visitor);
+ std::list<DecoderVisitor*>::iterator it;
+ for (it = visitors_.begin(); it != visitors_.end(); it++) {
+ if (*it == registered_visitor) {
+ it++;
+ visitors_.insert(it, new_visitor);
+ return;
+ }
+ }
+ // We reached the end of the list. The last element must be
+ // registered_visitor.
+ ASSERT(*it == registered_visitor);
+ visitors_.push_back(new_visitor);
+}
+
+
+void Decoder::RemoveVisitor(DecoderVisitor* visitor) {
+ visitors_.remove(visitor);
+}
+
+
+void Decoder::DecodePCRelAddressing(Instruction* instr) {
+ ASSERT(instr->Bits(27, 24) == 0x0);
+ // We know bit 28 is set, as <b28:b27> = 0 is filtered out at the top level
+ // decode.
+ ASSERT(instr->Bit(28) == 0x1);
+ VisitPCRelAddressing(instr);
+}
+
+
+void Decoder::DecodeBranchSystemException(Instruction* instr) {
+ ASSERT((instr->Bits(27, 24) == 0x4) ||
+ (instr->Bits(27, 24) == 0x5) ||
+ (instr->Bits(27, 24) == 0x6) ||
+ (instr->Bits(27, 24) == 0x7) );
+
+ switch (instr->Bits(31, 29)) {
+ case 0:
+ case 4: {
+ VisitUnconditionalBranch(instr);
+ break;
+ }
+ case 1:
+ case 5: {
+ if (instr->Bit(25) == 0) {
+ VisitCompareBranch(instr);
+ } else {
+ VisitTestBranch(instr);
+ }
+ break;
+ }
+ case 2: {
+ if (instr->Bit(25) == 0) {
+ if ((instr->Bit(24) == 0x1) ||
+ (instr->Mask(0x01000010) == 0x00000010)) {
+ VisitUnallocated(instr);
+ } else {
+ VisitConditionalBranch(instr);
+ }
+ } else {
+ VisitUnallocated(instr);
+ }
+ break;
+ }
+ case 6: {
+ if (instr->Bit(25) == 0) {
+ if (instr->Bit(24) == 0) {
+ if ((instr->Bits(4, 2) != 0) ||
+ (instr->Mask(0x00E0001D) == 0x00200001) ||
+ (instr->Mask(0x00E0001D) == 0x00400001) ||
+ (instr->Mask(0x00E0001E) == 0x00200002) ||
+ (instr->Mask(0x00E0001E) == 0x00400002) ||
+ (instr->Mask(0x00E0001C) == 0x00600000) ||
+ (instr->Mask(0x00E0001C) == 0x00800000) ||
+ (instr->Mask(0x00E0001F) == 0x00A00000) ||
+ (instr->Mask(0x00C0001C) == 0x00C00000)) {
+ VisitUnallocated(instr);
+ } else {
+ VisitException(instr);
+ }
+ } else {
+ if (instr->Bits(23, 22) == 0) {
+ const Instr masked_003FF0E0 = instr->Mask(0x003FF0E0);
+ if ((instr->Bits(21, 19) == 0x4) ||
+ (masked_003FF0E0 == 0x00033000) ||
+ (masked_003FF0E0 == 0x003FF020) ||
+ (masked_003FF0E0 == 0x003FF060) ||
+ (masked_003FF0E0 == 0x003FF0E0) ||
+ (instr->Mask(0x00388000) == 0x00008000) ||
+ (instr->Mask(0x0038E000) == 0x00000000) ||
+ (instr->Mask(0x0039E000) == 0x00002000) ||
+ (instr->Mask(0x003AE000) == 0x00002000) ||
+ (instr->Mask(0x003CE000) == 0x00042000) ||
+ (instr->Mask(0x003FFFC0) == 0x000320C0) ||
+ (instr->Mask(0x003FF100) == 0x00032100) ||
+ (instr->Mask(0x003FF200) == 0x00032200) ||
+ (instr->Mask(0x003FF400) == 0x00032400) ||
+ (instr->Mask(0x003FF800) == 0x00032800) ||
+ (instr->Mask(0x0038F000) == 0x00005000) ||
+ (instr->Mask(0x0038E000) == 0x00006000)) {
+ VisitUnallocated(instr);
+ } else {
+ VisitSystem(instr);
+ }
+ } else {
+ VisitUnallocated(instr);
+ }
+ }
+ } else {
+ if ((instr->Bit(24) == 0x1) ||
+ (instr->Bits(20, 16) != 0x1F) ||
+ (instr->Bits(15, 10) != 0) ||
+ (instr->Bits(4, 0) != 0) ||
+ (instr->Bits(24, 21) == 0x3) ||
+ (instr->Bits(24, 22) == 0x3)) {
+ VisitUnallocated(instr);
+ } else {
+ VisitUnconditionalBranchToRegister(instr);
+ }
+ }
+ break;
+ }
+ case 3:
+ case 7: {
+ VisitUnallocated(instr);
+ break;
+ }
+ }
+}
+
+
+void Decoder::DecodeLoadStore(Instruction* instr) {
+ ASSERT((instr->Bits(27, 24) == 0x8) ||
+ (instr->Bits(27, 24) == 0x9) ||
+ (instr->Bits(27, 24) == 0xC) ||
+ (instr->Bits(27, 24) == 0xD) );
+
+ if (instr->Bit(24) == 0) {
+ if (instr->Bit(28) == 0) {
+ if (instr->Bit(29) == 0) {
+ if (instr->Bit(26) == 0) {
+ // TODO(all): VisitLoadStoreExclusive.
+ VisitUnimplemented(instr);
+ } else {
+ DecodeAdvSIMDLoadStore(instr);
+ }
+ } else {
+ if ((instr->Bits(31, 30) == 0x3) ||
+ (instr->Mask(0xC4400000) == 0x40000000)) {
+ VisitUnallocated(instr);
+ } else {
+ if (instr->Bit(23) == 0) {
+ if (instr->Mask(0xC4400000) == 0xC0400000) {
+ VisitUnallocated(instr);
+ } else {
+ VisitLoadStorePairNonTemporal(instr);
+ }
+ } else {
+ VisitLoadStorePairPostIndex(instr);
+ }
+ }
+ }
+ } else {
+ if (instr->Bit(29) == 0) {
+ if (instr->Mask(0xC4000000) == 0xC4000000) {
+ VisitUnallocated(instr);
+ } else {
+ VisitLoadLiteral(instr);
+ }
+ } else {
+ if ((instr->Mask(0x84C00000) == 0x80C00000) ||
+ (instr->Mask(0x44800000) == 0x44800000) ||
+ (instr->Mask(0x84800000) == 0x84800000)) {
+ VisitUnallocated(instr);
+ } else {
+ if (instr->Bit(21) == 0) {
+ switch (instr->Bits(11, 10)) {
+ case 0: {
+ VisitLoadStoreUnscaledOffset(instr);
+ break;
+ }
+ case 1: {
+ if (instr->Mask(0xC4C00000) == 0xC0800000) {
+ VisitUnallocated(instr);
+ } else {
+ VisitLoadStorePostIndex(instr);
+ }
+ break;
+ }
+ case 2: {
+ // TODO(all): VisitLoadStoreRegisterOffsetUnpriv.
+ VisitUnimplemented(instr);
+ break;
+ }
+ case 3: {
+ if (instr->Mask(0xC4C00000) == 0xC0800000) {
+ VisitUnallocated(instr);
+ } else {
+ VisitLoadStorePreIndex(instr);
+ }
+ break;
+ }
+ }
+ } else {
+ if (instr->Bits(11, 10) == 0x2) {
+ if (instr->Bit(14) == 0) {
+ VisitUnallocated(instr);
+ } else {
+ VisitLoadStoreRegisterOffset(instr);
+ }
+ } else {
+ VisitUnallocated(instr);
+ }
+ }
+ }
+ }
+ }
+ } else {
+ if (instr->Bit(28) == 0) {
+ if (instr->Bit(29) == 0) {
+ VisitUnallocated(instr);
+ } else {
+ if ((instr->Bits(31, 30) == 0x3) ||
+ (instr->Mask(0xC4400000) == 0x40000000)) {
+ VisitUnallocated(instr);
+ } else {
+ if (instr->Bit(23) == 0) {
+ VisitLoadStorePairOffset(instr);
+ } else {
+ VisitLoadStorePairPreIndex(instr);
+ }
+ }
+ }
+ } else {
+ if (instr->Bit(29) == 0) {
+ VisitUnallocated(instr);
+ } else {
+ if ((instr->Mask(0x84C00000) == 0x80C00000) ||
+ (instr->Mask(0x44800000) == 0x44800000) ||
+ (instr->Mask(0x84800000) == 0x84800000)) {
+ VisitUnallocated(instr);
+ } else {
+ VisitLoadStoreUnsignedOffset(instr);
+ }
+ }
+ }
+ }
+}
+
+
+void Decoder::DecodeLogical(Instruction* instr) {
+ ASSERT(instr->Bits(27, 24) == 0x2);
+
+ if (instr->Mask(0x80400000) == 0x00400000) {
+ VisitUnallocated(instr);
+ } else {
+ if (instr->Bit(23) == 0) {
+ VisitLogicalImmediate(instr);
+ } else {
+ if (instr->Bits(30, 29) == 0x1) {
+ VisitUnallocated(instr);
+ } else {
+ VisitMoveWideImmediate(instr);
+ }
+ }
+ }
+}
+
+
+void Decoder::DecodeBitfieldExtract(Instruction* instr) {
+ ASSERT(instr->Bits(27, 24) == 0x3);
+
+ if ((instr->Mask(0x80400000) == 0x80000000) ||
+ (instr->Mask(0x80400000) == 0x00400000) ||
+ (instr->Mask(0x80008000) == 0x00008000)) {
+ VisitUnallocated(instr);
+ } else if (instr->Bit(23) == 0) {
+ if ((instr->Mask(0x80200000) == 0x00200000) ||
+ (instr->Mask(0x60000000) == 0x60000000)) {
+ VisitUnallocated(instr);
+ } else {
+ VisitBitfield(instr);
+ }
+ } else {
+ if ((instr->Mask(0x60200000) == 0x00200000) ||
+ (instr->Mask(0x60000000) != 0x00000000)) {
+ VisitUnallocated(instr);
+ } else {
+ VisitExtract(instr);
+ }
+ }
+}
+
+
+void Decoder::DecodeAddSubImmediate(Instruction* instr) {
+ ASSERT(instr->Bits(27, 24) == 0x1);
+ if (instr->Bit(23) == 1) {
+ VisitUnallocated(instr);
+ } else {
+ VisitAddSubImmediate(instr);
+ }
+}
+
+
+void Decoder::DecodeDataProcessing(Instruction* instr) {
+ ASSERT((instr->Bits(27, 24) == 0xA) ||
+ (instr->Bits(27, 24) == 0xB) );
+
+ if (instr->Bit(24) == 0) {
+ if (instr->Bit(28) == 0) {
+ if (instr->Mask(0x80008000) == 0x00008000) {
+ VisitUnallocated(instr);
+ } else {
+ VisitLogicalShifted(instr);
+ }
+ } else {
+ switch (instr->Bits(23, 21)) {
+ case 0: {
+ if (instr->Mask(0x0000FC00) != 0) {
+ VisitUnallocated(instr);
+ } else {
+ VisitAddSubWithCarry(instr);
+ }
+ break;
+ }
+ case 2: {
+ if ((instr->Bit(29) == 0) ||
+ (instr->Mask(0x00000410) != 0)) {
+ VisitUnallocated(instr);
+ } else {
+ if (instr->Bit(11) == 0) {
+ VisitConditionalCompareRegister(instr);
+ } else {
+ VisitConditionalCompareImmediate(instr);
+ }
+ }
+ break;
+ }
+ case 4: {
+ if (instr->Mask(0x20000800) != 0x00000000) {
+ VisitUnallocated(instr);
+ } else {
+ VisitConditionalSelect(instr);
+ }
+ break;
+ }
+ case 6: {
+ if (instr->Bit(29) == 0x1) {
+ VisitUnallocated(instr);
+ } else {
+ if (instr->Bit(30) == 0) {
+ if ((instr->Bit(15) == 0x1) ||
+ (instr->Bits(15, 11) == 0) ||
+ (instr->Bits(15, 12) == 0x1) ||
+ (instr->Bits(15, 12) == 0x3) ||
+ (instr->Bits(15, 13) == 0x3) ||
+ (instr->Mask(0x8000EC00) == 0x00004C00) ||
+ (instr->Mask(0x8000E800) == 0x80004000) ||
+ (instr->Mask(0x8000E400) == 0x80004000)) {
+ VisitUnallocated(instr);
+ } else {
+ VisitDataProcessing2Source(instr);
+ }
+ } else {
+ if ((instr->Bit(13) == 1) ||
+ (instr->Bits(20, 16) != 0) ||
+ (instr->Bits(15, 14) != 0) ||
+ (instr->Mask(0xA01FFC00) == 0x00000C00) ||
+ (instr->Mask(0x201FF800) == 0x00001800)) {
+ VisitUnallocated(instr);
+ } else {
+ VisitDataProcessing1Source(instr);
+ }
+ }
+ break;
+ }
+ }
+ case 1:
+ case 3:
+ case 5:
+ case 7: VisitUnallocated(instr); break;
+ }
+ }
+ } else {
+ if (instr->Bit(28) == 0) {
+ if (instr->Bit(21) == 0) {
+ if ((instr->Bits(23, 22) == 0x3) ||
+ (instr->Mask(0x80008000) == 0x00008000)) {
+ VisitUnallocated(instr);
+ } else {
+ VisitAddSubShifted(instr);
+ }
+ } else {
+ if ((instr->Mask(0x00C00000) != 0x00000000) ||
+ (instr->Mask(0x00001400) == 0x00001400) ||
+ (instr->Mask(0x00001800) == 0x00001800)) {
+ VisitUnallocated(instr);
+ } else {
+ VisitAddSubExtended(instr);
+ }
+ }
+ } else {
+ if ((instr->Bit(30) == 0x1) ||
+ (instr->Bits(30, 29) == 0x1) ||
+ (instr->Mask(0xE0600000) == 0x00200000) ||
+ (instr->Mask(0xE0608000) == 0x00400000) ||
+ (instr->Mask(0x60608000) == 0x00408000) ||
+ (instr->Mask(0x60E00000) == 0x00E00000) ||
+ (instr->Mask(0x60E00000) == 0x00800000) ||
+ (instr->Mask(0x60E00000) == 0x00600000)) {
+ VisitUnallocated(instr);
+ } else {
+ VisitDataProcessing3Source(instr);
+ }
+ }
+ }
+}
+
+
+void Decoder::DecodeFP(Instruction* instr) {
+ ASSERT((instr->Bits(27, 24) == 0xE) ||
+ (instr->Bits(27, 24) == 0xF) );
+
+ if (instr->Bit(28) == 0) {
+ DecodeAdvSIMDDataProcessing(instr);
+ } else {
+ if (instr->Bit(29) == 1) {
+ VisitUnallocated(instr);
+ } else {
+ if (instr->Bits(31, 30) == 0x3) {
+ VisitUnallocated(instr);
+ } else if (instr->Bits(31, 30) == 0x1) {
+ DecodeAdvSIMDDataProcessing(instr);
+ } else {
+ if (instr->Bit(24) == 0) {
+ if (instr->Bit(21) == 0) {
+ if ((instr->Bit(23) == 1) ||
+ (instr->Bit(18) == 1) ||
+ (instr->Mask(0x80008000) == 0x00000000) ||
+ (instr->Mask(0x000E0000) == 0x00000000) ||
+ (instr->Mask(0x000E0000) == 0x000A0000) ||
+ (instr->Mask(0x00160000) == 0x00000000) ||
+ (instr->Mask(0x00160000) == 0x00120000)) {
+ VisitUnallocated(instr);
+ } else {
+ VisitFPFixedPointConvert(instr);
+ }
+ } else {
+ if (instr->Bits(15, 10) == 32) {
+ VisitUnallocated(instr);
+ } else if (instr->Bits(15, 10) == 0) {
+ if ((instr->Bits(23, 22) == 0x3) ||
+ (instr->Mask(0x000E0000) == 0x000A0000) ||
+ (instr->Mask(0x000E0000) == 0x000C0000) ||
+ (instr->Mask(0x00160000) == 0x00120000) ||
+ (instr->Mask(0x00160000) == 0x00140000) ||
+ (instr->Mask(0x20C40000) == 0x00800000) ||
+ (instr->Mask(0x20C60000) == 0x00840000) ||
+ (instr->Mask(0xA0C60000) == 0x80060000) ||
+ (instr->Mask(0xA0C60000) == 0x00860000) ||
+ (instr->Mask(0xA0C60000) == 0x00460000) ||
+ (instr->Mask(0xA0CE0000) == 0x80860000) ||
+ (instr->Mask(0xA0CE0000) == 0x804E0000) ||
+ (instr->Mask(0xA0CE0000) == 0x000E0000) ||
+ (instr->Mask(0xA0D60000) == 0x00160000) ||
+ (instr->Mask(0xA0D60000) == 0x80560000) ||
+ (instr->Mask(0xA0D60000) == 0x80960000)) {
+ VisitUnallocated(instr);
+ } else {
+ VisitFPIntegerConvert(instr);
+ }
+ } else if (instr->Bits(14, 10) == 16) {
+ const Instr masked_A0DF8000 = instr->Mask(0xA0DF8000);
+ if ((instr->Mask(0x80180000) != 0) ||
+ (masked_A0DF8000 == 0x00020000) ||
+ (masked_A0DF8000 == 0x00030000) ||
+ (masked_A0DF8000 == 0x00068000) ||
+ (masked_A0DF8000 == 0x00428000) ||
+ (masked_A0DF8000 == 0x00430000) ||
+ (masked_A0DF8000 == 0x00468000) ||
+ (instr->Mask(0xA0D80000) == 0x00800000) ||
+ (instr->Mask(0xA0DE0000) == 0x00C00000) ||
+ (instr->Mask(0xA0DF0000) == 0x00C30000) ||
+ (instr->Mask(0xA0DC0000) == 0x00C40000)) {
+ VisitUnallocated(instr);
+ } else {
+ VisitFPDataProcessing1Source(instr);
+ }
+ } else if (instr->Bits(13, 10) == 8) {
+ if ((instr->Bits(15, 14) != 0) ||
+ (instr->Bits(2, 0) != 0) ||
+ (instr->Mask(0x80800000) != 0x00000000)) {
+ VisitUnallocated(instr);
+ } else {
+ VisitFPCompare(instr);
+ }
+ } else if (instr->Bits(12, 10) == 4) {
+ if ((instr->Bits(9, 5) != 0) ||
+ (instr->Mask(0x80800000) != 0x00000000)) {
+ VisitUnallocated(instr);
+ } else {
+ VisitFPImmediate(instr);
+ }
+ } else {
+ if (instr->Mask(0x80800000) != 0x00000000) {
+ VisitUnallocated(instr);
+ } else {
+ switch (instr->Bits(11, 10)) {
+ case 1: {
+ VisitFPConditionalCompare(instr);
+ break;
+ }
+ case 2: {
+ if ((instr->Bits(15, 14) == 0x3) ||
+ (instr->Mask(0x00009000) == 0x00009000) ||
+ (instr->Mask(0x0000A000) == 0x0000A000)) {
+ VisitUnallocated(instr);
+ } else {
+ VisitFPDataProcessing2Source(instr);
+ }
+ break;
+ }
+ case 3: {
+ VisitFPConditionalSelect(instr);
+ break;
+ }
+ default: UNREACHABLE();
+ }
+ }
+ }
+ }
+ } else {
+ // Bit 30 == 1 has been handled earlier.
+ ASSERT(instr->Bit(30) == 0);
+ if (instr->Mask(0xA0800000) != 0) {
+ VisitUnallocated(instr);
+ } else {
+ VisitFPDataProcessing3Source(instr);
+ }
+ }
+ }
+ }
+ }
+}
+
+
+void Decoder::DecodeAdvSIMDLoadStore(Instruction* instr) {
+ // TODO(all): Implement Advanced SIMD load/store instruction decode.
+ ASSERT(instr->Bits(29, 25) == 0x6);
+ VisitUnimplemented(instr);
+}
+
+
+void Decoder::DecodeAdvSIMDDataProcessing(Instruction* instr) {
+ // TODO(all): Implement Advanced SIMD data processing instruction decode.
+ ASSERT(instr->Bits(27, 25) == 0x7);
+ VisitUnimplemented(instr);
+}
+
+
+#define DEFINE_VISITOR_CALLERS(A) \
+ void Decoder::Visit##A(Instruction *instr) { \
+ if (!(instr->Mask(A##FMask) == A##Fixed)) { \
+ ASSERT(instr->Mask(A##FMask) == A##Fixed); \
+ } \
+ std::list<DecoderVisitor*>::iterator it; \
+ for (it = visitors_.begin(); it != visitors_.end(); it++) { \
+ (*it)->Visit##A(instr); \
+ } \
+ }
+VISITOR_LIST(DEFINE_VISITOR_CALLERS)
+#undef DEFINE_VISITOR_CALLERS
+
+
+} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_A64
--- /dev/null
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_A64_DECODER_A64_H_
+#define V8_A64_DECODER_A64_H_
+
+#include <list>
+
+#include "globals.h"
+#include "a64/instructions-a64.h"
+
+namespace v8 {
+namespace internal {
+
+
+// List macro containing all visitors needed by the decoder class.
+
+#define VISITOR_LIST(V) \
+ V(PCRelAddressing) \
+ V(AddSubImmediate) \
+ V(LogicalImmediate) \
+ V(MoveWideImmediate) \
+ V(Bitfield) \
+ V(Extract) \
+ V(UnconditionalBranch) \
+ V(UnconditionalBranchToRegister) \
+ V(CompareBranch) \
+ V(TestBranch) \
+ V(ConditionalBranch) \
+ V(System) \
+ V(Exception) \
+ V(LoadStorePairPostIndex) \
+ V(LoadStorePairOffset) \
+ V(LoadStorePairPreIndex) \
+ V(LoadStorePairNonTemporal) \
+ V(LoadLiteral) \
+ V(LoadStoreUnscaledOffset) \
+ V(LoadStorePostIndex) \
+ V(LoadStorePreIndex) \
+ V(LoadStoreRegisterOffset) \
+ V(LoadStoreUnsignedOffset) \
+ V(LogicalShifted) \
+ V(AddSubShifted) \
+ V(AddSubExtended) \
+ V(AddSubWithCarry) \
+ V(ConditionalCompareRegister) \
+ V(ConditionalCompareImmediate) \
+ V(ConditionalSelect) \
+ V(DataProcessing1Source) \
+ V(DataProcessing2Source) \
+ V(DataProcessing3Source) \
+ V(FPCompare) \
+ V(FPConditionalCompare) \
+ V(FPConditionalSelect) \
+ V(FPImmediate) \
+ V(FPDataProcessing1Source) \
+ V(FPDataProcessing2Source) \
+ V(FPDataProcessing3Source) \
+ V(FPIntegerConvert) \
+ V(FPFixedPointConvert) \
+ V(Unallocated) \
+ V(Unimplemented)
+
+// The Visitor interface. Disassembler and simulator (and other tools)
+// must provide implementations for all of these functions.
+class DecoderVisitor {
+ public:
+ #define DECLARE(A) virtual void Visit##A(Instruction* instr) = 0;
+ VISITOR_LIST(DECLARE)
+ #undef DECLARE
+
+ virtual ~DecoderVisitor() {}
+
+ private:
+ // Visitors are registered in a list.
+ std::list<DecoderVisitor*> visitors_;
+
+ friend class Decoder;
+};
+
+
+class Decoder: public DecoderVisitor {
+ public:
+ explicit Decoder() {}
+
+ // Top-level instruction decoder function. Decodes an instruction and calls
+ // the visitor functions registered with the Decoder class.
+ void Decode(Instruction *instr);
+
+ // Register a new visitor class with the decoder.
+ // Decode() will call the corresponding visitor method from all registered
+ // visitor classes when decoding reaches the leaf node of the instruction
+ // decode tree.
+ // Visitors are called in the order.
+ // A visitor can only be registered once.
+ // Registering an already registered visitor will update its position.
+ //
+ // d.AppendVisitor(V1);
+ // d.AppendVisitor(V2);
+ // d.PrependVisitor(V2); // Move V2 at the start of the list.
+ // d.InsertVisitorBefore(V3, V2);
+ // d.AppendVisitor(V4);
+ // d.AppendVisitor(V4); // No effect.
+ //
+ // d.Decode(i);
+ //
+ // will call in order visitor methods in V3, V2, V1, V4.
+ void AppendVisitor(DecoderVisitor* visitor);
+ void PrependVisitor(DecoderVisitor* visitor);
+ void InsertVisitorBefore(DecoderVisitor* new_visitor,
+ DecoderVisitor* registered_visitor);
+ void InsertVisitorAfter(DecoderVisitor* new_visitor,
+ DecoderVisitor* registered_visitor);
+
+ // Remove a previously registered visitor class from the list of visitors
+ // stored by the decoder.
+ void RemoveVisitor(DecoderVisitor* visitor);
+
+ #define DECLARE(A) void Visit##A(Instruction* instr);
+ VISITOR_LIST(DECLARE)
+ #undef DECLARE
+
+ private:
+ // Decode the PC relative addressing instruction, and call the corresponding
+ // visitors.
+ // On entry, instruction bits 27:24 = 0x0.
+ void DecodePCRelAddressing(Instruction* instr);
+
+ // Decode the add/subtract immediate instruction, and call the corresponding
+ // visitors.
+ // On entry, instruction bits 27:24 = 0x1.
+ void DecodeAddSubImmediate(Instruction* instr);
+
+ // Decode the branch, system command, and exception generation parts of
+ // the instruction tree, and call the corresponding visitors.
+ // On entry, instruction bits 27:24 = {0x4, 0x5, 0x6, 0x7}.
+ void DecodeBranchSystemException(Instruction* instr);
+
+ // Decode the load and store parts of the instruction tree, and call
+ // the corresponding visitors.
+ // On entry, instruction bits 27:24 = {0x8, 0x9, 0xC, 0xD}.
+ void DecodeLoadStore(Instruction* instr);
+
+ // Decode the logical immediate and move wide immediate parts of the
+ // instruction tree, and call the corresponding visitors.
+ // On entry, instruction bits 27:24 = 0x2.
+ void DecodeLogical(Instruction* instr);
+
+ // Decode the bitfield and extraction parts of the instruction tree,
+ // and call the corresponding visitors.
+ // On entry, instruction bits 27:24 = 0x3.
+ void DecodeBitfieldExtract(Instruction* instr);
+
+ // Decode the data processing parts of the instruction tree, and call the
+ // corresponding visitors.
+ // On entry, instruction bits 27:24 = {0x1, 0xA, 0xB}.
+ void DecodeDataProcessing(Instruction* instr);
+
+ // Decode the floating point parts of the instruction tree, and call the
+ // corresponding visitors.
+ // On entry, instruction bits 27:24 = {0xE, 0xF}.
+ void DecodeFP(Instruction* instr);
+
+ // Decode the Advanced SIMD (NEON) load/store part of the instruction tree,
+ // and call the corresponding visitors.
+ // On entry, instruction bits 29:25 = 0x6.
+ void DecodeAdvSIMDLoadStore(Instruction* instr);
+
+ // Decode the Advanced SIMD (NEON) data processing part of the instruction
+ // tree, and call the corresponding visitors.
+ // On entry, instruction bits 27:25 = 0x7.
+ void DecodeAdvSIMDDataProcessing(Instruction* instr);
+};
+
+
+} } // namespace v8::internal
+
+#endif // V8_A64_DECODER_A64_H_
--- /dev/null
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "codegen.h"
+#include "deoptimizer.h"
+#include "full-codegen.h"
+#include "safepoint-table.h"
+
+
+namespace v8 {
+namespace internal {
+
+
+int Deoptimizer::patch_size() {
+ // Size of the code used to patch lazy bailout points.
+ // Patching is done by Deoptimizer::DeoptimizeFunction.
+ return 4 * kInstructionSize;
+}
+
+
+
+void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
+ // Invalidate the relocation information, as it will become invalid by the
+ // code patching below, and is not needed any more.
+ code->InvalidateRelocation();
+
+ // For each LLazyBailout instruction insert a call to the corresponding
+ // deoptimization entry.
+ DeoptimizationInputData* deopt_data =
+ DeoptimizationInputData::cast(code->deoptimization_data());
+ Address code_start_address = code->instruction_start();
+#ifdef DEBUG
+ Address prev_call_address = NULL;
+#endif
+
+ for (int i = 0; i < deopt_data->DeoptCount(); i++) {
+ if (deopt_data->Pc(i)->value() == -1) continue;
+
+ Address call_address = code_start_address + deopt_data->Pc(i)->value();
+ Address deopt_entry = GetDeoptimizationEntry(isolate, i, LAZY);
+
+ PatchingAssembler patcher(call_address, patch_size() / kInstructionSize);
+ patcher.LoadLiteral(ip0, 2 * kInstructionSize);
+ patcher.blr(ip0);
+ patcher.dc64(reinterpret_cast<intptr_t>(deopt_entry));
+
+ ASSERT((prev_call_address == NULL) ||
+ (call_address >= prev_call_address + patch_size()));
+ ASSERT(call_address + patch_size() <= code->instruction_end());
+#ifdef DEBUG
+ prev_call_address = call_address;
+#endif
+ }
+}
+
+
+void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
+ // Set the register values. The values are not important as there are no
+ // callee saved registers in JavaScript frames, so all registers are
+ // spilled. Registers fp and sp are set to the correct values though.
+ for (int i = 0; i < Register::NumRegisters(); i++) {
+ input_->SetRegister(i, 0);
+ }
+
+ // TODO(all): Do we also need to set a value to csp?
+ input_->SetRegister(jssp.code(), reinterpret_cast<intptr_t>(frame->sp()));
+ input_->SetRegister(fp.code(), reinterpret_cast<intptr_t>(frame->fp()));
+
+ for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); i++) {
+ input_->SetDoubleRegister(i, 0.0);
+ }
+
+ // Fill the frame content from the actual data on the frame.
+ for (unsigned i = 0; i < input_->GetFrameSize(); i += kPointerSize) {
+ input_->SetFrameSlot(i, Memory::uint64_at(tos + i));
+ }
+}
+
+
+bool Deoptimizer::HasAlignmentPadding(JSFunction* function) {
+ // There is no dynamic alignment padding on A64 in the input frame.
+ return false;
+}
+
+
+void Deoptimizer::SetPlatformCompiledStubRegisters(
+ FrameDescription* output_frame, CodeStubInterfaceDescriptor* descriptor) {
+ ApiFunction function(descriptor->deoptimization_handler_);
+ ExternalReference xref(&function, ExternalReference::BUILTIN_CALL, isolate_);
+ intptr_t handler = reinterpret_cast<intptr_t>(xref.address());
+ int params = descriptor->GetHandlerParameterCount();
+ output_frame->SetRegister(x0.code(), params);
+ output_frame->SetRegister(x1.code(), handler);
+}
+
+
+void Deoptimizer::CopyDoubleRegisters(FrameDescription* output_frame) {
+ for (int i = 0; i < DoubleRegister::kMaxNumRegisters; ++i) {
+ double double_value = input_->GetDoubleRegister(i);
+ output_frame->SetDoubleRegister(i, double_value);
+ }
+}
+
+
+Code* Deoptimizer::NotifyStubFailureBuiltin() {
+ return isolate_->builtins()->builtin(Builtins::kNotifyStubFailureSaveDoubles);
+}
+
+
+#define __ masm()->
+
+void Deoptimizer::EntryGenerator::Generate() {
+ GeneratePrologue();
+
+ // TODO(all): This code needs to be revisited. We probably only need to save
+ // caller-saved registers here. Callee-saved registers can be stored directly
+ // in the input frame.
+
+ // Save all allocatable floating point registers.
+ CPURegList saved_fp_registers(CPURegister::kFPRegister, kDRegSize,
+ 0, FPRegister::NumAllocatableRegisters() - 1);
+ __ PushCPURegList(saved_fp_registers);
+
+ // We save all the registers expcept jssp, sp and lr.
+ CPURegList saved_registers(CPURegister::kRegister, kXRegSize, 0, 27);
+ saved_registers.Combine(fp);
+ __ PushCPURegList(saved_registers);
+
+ const int kSavedRegistersAreaSize =
+ (saved_registers.Count() * kXRegSizeInBytes) +
+ (saved_fp_registers.Count() * kDRegSizeInBytes);
+
+ // Floating point registers are saved on the stack above core registers.
+ const int kFPRegistersOffset = saved_registers.Count() * kXRegSizeInBytes;
+
+ // Get the bailout id from the stack.
+ Register bailout_id = x2;
+ __ Peek(bailout_id, kSavedRegistersAreaSize);
+
+ Register code_object = x3;
+ Register fp_to_sp = x4;
+ // Get the address of the location in the code object. This is the return
+ // address for lazy deoptimization.
+ __ Mov(code_object, lr);
+ // Compute the fp-to-sp delta, and correct one word for bailout id.
+ __ Add(fp_to_sp, masm()->StackPointer(),
+ kSavedRegistersAreaSize + (1 * kPointerSize));
+ __ Sub(fp_to_sp, fp, fp_to_sp);
+
+ // Allocate a new deoptimizer object.
+ __ Ldr(x0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ __ Mov(x1, type());
+ // Following arguments are already loaded:
+ // - x2: bailout id
+ // - x3: code object address
+ // - x4: fp-to-sp delta
+ __ Mov(x5, Operand(ExternalReference::isolate_address(isolate())));
+
+ {
+ // Call Deoptimizer::New().
+ AllowExternalCallThatCantCauseGC scope(masm());
+ __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate()), 6);
+ }
+
+ // Preserve "deoptimizer" object in register x0.
+ Register deoptimizer = x0;
+
+ // Get the input frame descriptor pointer.
+ __ Ldr(x1, MemOperand(deoptimizer, Deoptimizer::input_offset()));
+
+ // Copy core registers into the input frame.
+ CPURegList copy_to_input = saved_registers;
+ for (int i = 0; i < saved_registers.Count(); i++) {
+ // TODO(all): Look for opportunities to optimize this by using ldp/stp.
+ __ Peek(x2, i * kPointerSize);
+ CPURegister current_reg = copy_to_input.PopLowestIndex();
+ int offset = (current_reg.code() * kPointerSize) +
+ FrameDescription::registers_offset();
+ __ Str(x2, MemOperand(x1, offset));
+ }
+
+ // Copy FP registers to the input frame.
+ for (int i = 0; i < saved_fp_registers.Count(); i++) {
+ // TODO(all): Look for opportunities to optimize this by using ldp/stp.
+ int dst_offset = FrameDescription::double_registers_offset() +
+ (i * kDoubleSize);
+ int src_offset = kFPRegistersOffset + (i * kDoubleSize);
+ __ Peek(x2, src_offset);
+ __ Str(x2, MemOperand(x1, dst_offset));
+ }
+
+ // Remove the bailout id and the saved registers from the stack.
+ __ Drop(1 + (kSavedRegistersAreaSize / kXRegSizeInBytes));
+
+ // Compute a pointer to the unwinding limit in register x2; that is
+ // the first stack slot not part of the input frame.
+ Register unwind_limit = x2;
+ __ Ldr(unwind_limit, MemOperand(x1, FrameDescription::frame_size_offset()));
+ __ Add(unwind_limit, unwind_limit, __ StackPointer());
+
+ // Unwind the stack down to - but not including - the unwinding
+ // limit and copy the contents of the activation frame to the input
+ // frame description.
+ __ Add(x3, x1, FrameDescription::frame_content_offset());
+ Label pop_loop;
+ Label pop_loop_header;
+ __ B(&pop_loop_header);
+ __ Bind(&pop_loop);
+ __ Pop(x4);
+ __ Str(x4, MemOperand(x3, kPointerSize, PostIndex));
+ __ Bind(&pop_loop_header);
+ __ Cmp(unwind_limit, __ StackPointer());
+ __ B(ne, &pop_loop);
+
+ // Compute the output frame in the deoptimizer.
+ __ Push(x0); // Preserve deoptimizer object across call.
+
+ {
+ // Call Deoptimizer::ComputeOutputFrames().
+ AllowExternalCallThatCantCauseGC scope(masm());
+ __ CallCFunction(
+ ExternalReference::compute_output_frames_function(isolate()), 1);
+ }
+ __ Pop(x4); // Restore deoptimizer object (class Deoptimizer).
+
+ // Replace the current (input) frame with the output frames.
+ Label outer_push_loop, inner_push_loop,
+ outer_loop_header, inner_loop_header;
+ __ Ldrsw(x1, MemOperand(x4, Deoptimizer::output_count_offset()));
+ __ Ldr(x0, MemOperand(x4, Deoptimizer::output_offset()));
+ __ Add(x1, x0, Operand(x1, LSL, kPointerSizeLog2));
+ __ B(&outer_loop_header);
+
+ __ Bind(&outer_push_loop);
+ Register current_frame = x2;
+ __ Ldr(current_frame, MemOperand(x0, 0));
+ __ Ldr(x3, MemOperand(current_frame, FrameDescription::frame_size_offset()));
+ __ B(&inner_loop_header);
+
+ __ Bind(&inner_push_loop);
+ __ Sub(x3, x3, kPointerSize);
+ __ Add(x6, current_frame, x3);
+ __ Ldr(x7, MemOperand(x6, FrameDescription::frame_content_offset()));
+ __ Push(x7);
+ __ Bind(&inner_loop_header);
+ __ Cbnz(x3, &inner_push_loop);
+
+ __ Add(x0, x0, kPointerSize);
+ __ Bind(&outer_loop_header);
+ __ Cmp(x0, x1);
+ __ B(lt, &outer_push_loop);
+
+ __ Ldr(x1, MemOperand(x4, Deoptimizer::input_offset()));
+ ASSERT(!saved_fp_registers.IncludesAliasOf(crankshaft_fp_scratch) &&
+ !saved_fp_registers.IncludesAliasOf(fp_zero) &&
+ !saved_fp_registers.IncludesAliasOf(fp_scratch));
+ int src_offset = FrameDescription::double_registers_offset();
+ while (!saved_fp_registers.IsEmpty()) {
+ const CPURegister reg = saved_fp_registers.PopLowestIndex();
+ __ Ldr(reg, MemOperand(x1, src_offset));
+ src_offset += kDoubleSize;
+ }
+
+ // Push state from the last output frame.
+ __ Ldr(x6, MemOperand(current_frame, FrameDescription::state_offset()));
+ __ Push(x6);
+
+ // TODO(all): ARM copies a lot (if not all) of the last output frame onto the
+ // stack, then pops it all into registers. Here, we try to load it directly
+ // into the relevant registers. Is this correct? If so, we should improve the
+ // ARM code.
+
+ // TODO(all): This code needs to be revisited, We probably don't need to
+ // restore all the registers as fullcodegen does not keep live values in
+ // registers (note that at least fp must be restored though).
+
+ // Restore registers from the last output frame.
+ // Note that lr is not in the list of saved_registers and will be restored
+ // later. We can use it to hold the address of last output frame while
+ // reloading the other registers.
+ ASSERT(!saved_registers.IncludesAliasOf(lr));
+ Register last_output_frame = lr;
+ __ Mov(last_output_frame, current_frame);
+
+ // We don't need to restore x7 as it will be clobbered later to hold the
+ // continuation address.
+ Register continuation = x7;
+ saved_registers.Remove(continuation);
+
+ while (!saved_registers.IsEmpty()) {
+ // TODO(all): Look for opportunities to optimize this by using ldp.
+ CPURegister current_reg = saved_registers.PopLowestIndex();
+ int offset = (current_reg.code() * kPointerSize) +
+ FrameDescription::registers_offset();
+ __ Ldr(current_reg, MemOperand(last_output_frame, offset));
+ }
+
+ __ Ldr(continuation, MemOperand(last_output_frame,
+ FrameDescription::continuation_offset()));
+ __ Ldr(lr, MemOperand(last_output_frame, FrameDescription::pc_offset()));
+ __ InitializeRootRegister();
+ __ Br(continuation);
+}
+
+
+// Size of an entry of the second level deopt table.
+// This is the code size generated by GeneratePrologue for one entry.
+const int Deoptimizer::table_entry_size_ = 2 * kInstructionSize;
+
+
+void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
+ // Create a sequence of deoptimization entries.
+ // Note that registers are still live when jumping to an entry.
+ Label done;
+ {
+ InstructionAccurateScope scope(masm());
+
+ // The number of entry will never exceed kMaxNumberOfEntries.
+ // As long as kMaxNumberOfEntries is a valid 16 bits immediate you can use
+ // a movz instruction to load the entry id.
+ ASSERT(is_uint16(Deoptimizer::kMaxNumberOfEntries));
+
+ for (int i = 0; i < count(); i++) {
+ int start = masm()->pc_offset();
+ USE(start);
+ __ movz(masm()->Tmp0(), i);
+ __ b(&done);
+ ASSERT(masm()->pc_offset() - start == table_entry_size_);
+ }
+ }
+ __ Bind(&done);
+ // TODO(all): We need to add some kind of assertion to verify that Tmp0()
+ // is not clobbered by Push.
+ __ Push(masm()->Tmp0());
+}
+
+
+void FrameDescription::SetCallerPc(unsigned offset, intptr_t value) {
+ SetFrameSlot(offset, value);
+}
+
+
+void FrameDescription::SetCallerFp(unsigned offset, intptr_t value) {
+ SetFrameSlot(offset, value);
+}
+
+
+#undef __
+
+} } // namespace v8::internal
--- /dev/null
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <assert.h>
+#include <stdio.h>
+#include <stdarg.h>
+#include <string.h>
+
+#include "v8.h"
+
+#if V8_TARGET_ARCH_A64
+
+#include "disasm.h"
+#include "a64/disasm-a64.h"
+#include "macro-assembler.h"
+#include "platform.h"
+
+namespace v8 {
+namespace internal {
+
+
+Disassembler::Disassembler() {
+ buffer_size_ = 256;
+ buffer_ = reinterpret_cast<char*>(malloc(buffer_size_));
+ buffer_pos_ = 0;
+ own_buffer_ = true;
+}
+
+
+Disassembler::Disassembler(char* text_buffer, int buffer_size) {
+ buffer_size_ = buffer_size;
+ buffer_ = text_buffer;
+ buffer_pos_ = 0;
+ own_buffer_ = false;
+}
+
+
+Disassembler::~Disassembler() {
+ if (own_buffer_) {
+ free(buffer_);
+ }
+}
+
+
+char* Disassembler::GetOutput() {
+ return buffer_;
+}
+
+
+void Disassembler::VisitAddSubImmediate(Instruction* instr) {
+ bool rd_is_zr = RdIsZROrSP(instr);
+ bool stack_op = (rd_is_zr || RnIsZROrSP(instr)) &&
+ (instr->ImmAddSub() == 0) ? true : false;
+ const char *mnemonic = "";
+ const char *form = "'Rds, 'Rns, 'IAddSub";
+ const char *form_cmp = "'Rns, 'IAddSub";
+ const char *form_mov = "'Rds, 'Rns";
+
+ switch (instr->Mask(AddSubImmediateMask)) {
+ case ADD_w_imm:
+ case ADD_x_imm: {
+ mnemonic = "add";
+ if (stack_op) {
+ mnemonic = "mov";
+ form = form_mov;
+ }
+ break;
+ }
+ case ADDS_w_imm:
+ case ADDS_x_imm: {
+ mnemonic = "adds";
+ if (rd_is_zr) {
+ mnemonic = "cmn";
+ form = form_cmp;
+ }
+ break;
+ }
+ case SUB_w_imm:
+ case SUB_x_imm: mnemonic = "sub"; break;
+ case SUBS_w_imm:
+ case SUBS_x_imm: {
+ mnemonic = "subs";
+ if (rd_is_zr) {
+ mnemonic = "cmp";
+ form = form_cmp;
+ }
+ break;
+ }
+ default: UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitAddSubShifted(Instruction* instr) {
+ bool rd_is_zr = RdIsZROrSP(instr);
+ bool rn_is_zr = RnIsZROrSP(instr);
+ const char *mnemonic = "";
+ const char *form = "'Rd, 'Rn, 'Rm'HDP";
+ const char *form_cmp = "'Rn, 'Rm'HDP";
+ const char *form_neg = "'Rd, 'Rm'HDP";
+
+ switch (instr->Mask(AddSubShiftedMask)) {
+ case ADD_w_shift:
+ case ADD_x_shift: mnemonic = "add"; break;
+ case ADDS_w_shift:
+ case ADDS_x_shift: {
+ mnemonic = "adds";
+ if (rd_is_zr) {
+ mnemonic = "cmn";
+ form = form_cmp;
+ }
+ break;
+ }
+ case SUB_w_shift:
+ case SUB_x_shift: {
+ mnemonic = "sub";
+ if (rn_is_zr) {
+ mnemonic = "neg";
+ form = form_neg;
+ }
+ break;
+ }
+ case SUBS_w_shift:
+ case SUBS_x_shift: {
+ mnemonic = "subs";
+ if (rd_is_zr) {
+ mnemonic = "cmp";
+ form = form_cmp;
+ } else if (rn_is_zr) {
+ mnemonic = "negs";
+ form = form_neg;
+ }
+ break;
+ }
+ default: UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitAddSubExtended(Instruction* instr) {
+ bool rd_is_zr = RdIsZROrSP(instr);
+ const char *mnemonic = "";
+ Extend mode = static_cast<Extend>(instr->ExtendMode());
+ const char *form = ((mode == UXTX) || (mode == SXTX)) ?
+ "'Rds, 'Rns, 'Xm'Ext" : "'Rds, 'Rns, 'Wm'Ext";
+ const char *form_cmp = ((mode == UXTX) || (mode == SXTX)) ?
+ "'Rns, 'Xm'Ext" : "'Rns, 'Wm'Ext";
+
+ switch (instr->Mask(AddSubExtendedMask)) {
+ case ADD_w_ext:
+ case ADD_x_ext: mnemonic = "add"; break;
+ case ADDS_w_ext:
+ case ADDS_x_ext: {
+ mnemonic = "adds";
+ if (rd_is_zr) {
+ mnemonic = "cmn";
+ form = form_cmp;
+ }
+ break;
+ }
+ case SUB_w_ext:
+ case SUB_x_ext: mnemonic = "sub"; break;
+ case SUBS_w_ext:
+ case SUBS_x_ext: {
+ mnemonic = "subs";
+ if (rd_is_zr) {
+ mnemonic = "cmp";
+ form = form_cmp;
+ }
+ break;
+ }
+ default: UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitAddSubWithCarry(Instruction* instr) {
+ bool rn_is_zr = RnIsZROrSP(instr);
+ const char *mnemonic = "";
+ const char *form = "'Rd, 'Rn, 'Rm";
+ const char *form_neg = "'Rd, 'Rm";
+
+ switch (instr->Mask(AddSubWithCarryMask)) {
+ case ADC_w:
+ case ADC_x: mnemonic = "adc"; break;
+ case ADCS_w:
+ case ADCS_x: mnemonic = "adcs"; break;
+ case SBC_w:
+ case SBC_x: {
+ mnemonic = "sbc";
+ if (rn_is_zr) {
+ mnemonic = "ngc";
+ form = form_neg;
+ }
+ break;
+ }
+ case SBCS_w:
+ case SBCS_x: {
+ mnemonic = "sbcs";
+ if (rn_is_zr) {
+ mnemonic = "ngcs";
+ form = form_neg;
+ }
+ break;
+ }
+ default: UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitLogicalImmediate(Instruction* instr) {
+ bool rd_is_zr = RdIsZROrSP(instr);
+ bool rn_is_zr = RnIsZROrSP(instr);
+ const char *mnemonic = "";
+ const char *form = "'Rds, 'Rn, 'ITri";
+
+ if (instr->ImmLogical() == 0) {
+ // The immediate encoded in the instruction is not in the expected format.
+ Format(instr, "unallocated", "(LogicalImmediate)");
+ return;
+ }
+
+ switch (instr->Mask(LogicalImmediateMask)) {
+ case AND_w_imm:
+ case AND_x_imm: mnemonic = "and"; break;
+ case ORR_w_imm:
+ case ORR_x_imm: {
+ mnemonic = "orr";
+ unsigned reg_size = (instr->SixtyFourBits() == 1) ? kXRegSize
+ : kWRegSize;
+ if (rn_is_zr && !IsMovzMovnImm(reg_size, instr->ImmLogical())) {
+ mnemonic = "mov";
+ form = "'Rds, 'ITri";
+ }
+ break;
+ }
+ case EOR_w_imm:
+ case EOR_x_imm: mnemonic = "eor"; break;
+ case ANDS_w_imm:
+ case ANDS_x_imm: {
+ mnemonic = "ands";
+ if (rd_is_zr) {
+ mnemonic = "tst";
+ form = "'Rn, 'ITri";
+ }
+ break;
+ }
+ default: UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+bool Disassembler::IsMovzMovnImm(unsigned reg_size, uint64_t value) {
+ ASSERT((reg_size == kXRegSize) ||
+ ((reg_size == kWRegSize) && (value <= 0xffffffff)));
+
+ // Test for movz: 16-bits set at positions 0, 16, 32 or 48.
+ if (((value & 0xffffffffffff0000UL) == 0UL) ||
+ ((value & 0xffffffff0000ffffUL) == 0UL) ||
+ ((value & 0xffff0000ffffffffUL) == 0UL) ||
+ ((value & 0x0000ffffffffffffUL) == 0UL)) {
+ return true;
+ }
+
+ // Test for movn: NOT(16-bits set at positions 0, 16, 32 or 48).
+ if ((reg_size == kXRegSize) &&
+ (((value & 0xffffffffffff0000UL) == 0xffffffffffff0000UL) ||
+ ((value & 0xffffffff0000ffffUL) == 0xffffffff0000ffffUL) ||
+ ((value & 0xffff0000ffffffffUL) == 0xffff0000ffffffffUL) ||
+ ((value & 0x0000ffffffffffffUL) == 0x0000ffffffffffffUL))) {
+ return true;
+ }
+ if ((reg_size == kWRegSize) &&
+ (((value & 0xffff0000) == 0xffff0000) ||
+ ((value & 0x0000ffff) == 0x0000ffff))) {
+ return true;
+ }
+ return false;
+}
+
+
+void Disassembler::VisitLogicalShifted(Instruction* instr) {
+ bool rd_is_zr = RdIsZROrSP(instr);
+ bool rn_is_zr = RnIsZROrSP(instr);
+ const char *mnemonic = "";
+ const char *form = "'Rd, 'Rn, 'Rm'HLo";
+
+ switch (instr->Mask(LogicalShiftedMask)) {
+ case AND_w:
+ case AND_x: mnemonic = "and"; break;
+ case BIC_w:
+ case BIC_x: mnemonic = "bic"; break;
+ case EOR_w:
+ case EOR_x: mnemonic = "eor"; break;
+ case EON_w:
+ case EON_x: mnemonic = "eon"; break;
+ case BICS_w:
+ case BICS_x: mnemonic = "bics"; break;
+ case ANDS_w:
+ case ANDS_x: {
+ mnemonic = "ands";
+ if (rd_is_zr) {
+ mnemonic = "tst";
+ form = "'Rn, 'Rm'HLo";
+ }
+ break;
+ }
+ case ORR_w:
+ case ORR_x: {
+ mnemonic = "orr";
+ if (rn_is_zr && (instr->ImmDPShift() == 0) && (instr->ShiftDP() == LSL)) {
+ mnemonic = "mov";
+ form = "'Rd, 'Rm";
+ }
+ break;
+ }
+ case ORN_w:
+ case ORN_x: {
+ mnemonic = "orn";
+ if (rn_is_zr) {
+ mnemonic = "mvn";
+ form = "'Rd, 'Rm'HLo";
+ }
+ break;
+ }
+ default: UNREACHABLE();
+ }
+
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitConditionalCompareRegister(Instruction* instr) {
+ const char *mnemonic = "";
+ const char *form = "'Rn, 'Rm, 'INzcv, 'Cond";
+
+ switch (instr->Mask(ConditionalCompareRegisterMask)) {
+ case CCMN_w:
+ case CCMN_x: mnemonic = "ccmn"; break;
+ case CCMP_w:
+ case CCMP_x: mnemonic = "ccmp"; break;
+ default: UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitConditionalCompareImmediate(Instruction* instr) {
+ const char *mnemonic = "";
+ const char *form = "'Rn, 'IP, 'INzcv, 'Cond";
+
+ switch (instr->Mask(ConditionalCompareImmediateMask)) {
+ case CCMN_w_imm:
+ case CCMN_x_imm: mnemonic = "ccmn"; break;
+ case CCMP_w_imm:
+ case CCMP_x_imm: mnemonic = "ccmp"; break;
+ default: UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitConditionalSelect(Instruction* instr) {
+ bool rnm_is_zr = (RnIsZROrSP(instr) && RmIsZROrSP(instr));
+ bool rn_is_rm = (instr->Rn() == instr->Rm());
+ const char *mnemonic = "";
+ const char *form = "'Rd, 'Rn, 'Rm, 'Cond";
+ const char *form_test = "'Rd, 'CInv";
+ const char *form_update = "'Rd, 'Rn, 'CInv";
+
+ Condition cond = static_cast<Condition>(instr->Condition());
+ bool invertible_cond = (cond != al) && (cond != nv);
+
+ switch (instr->Mask(ConditionalSelectMask)) {
+ case CSEL_w:
+ case CSEL_x: mnemonic = "csel"; break;
+ case CSINC_w:
+ case CSINC_x: {
+ mnemonic = "csinc";
+ if (rnm_is_zr && invertible_cond) {
+ mnemonic = "cset";
+ form = form_test;
+ } else if (rn_is_rm && invertible_cond) {
+ mnemonic = "cinc";
+ form = form_update;
+ }
+ break;
+ }
+ case CSINV_w:
+ case CSINV_x: {
+ mnemonic = "csinv";
+ if (rnm_is_zr && invertible_cond) {
+ mnemonic = "csetm";
+ form = form_test;
+ } else if (rn_is_rm && invertible_cond) {
+ mnemonic = "cinv";
+ form = form_update;
+ }
+ break;
+ }
+ case CSNEG_w:
+ case CSNEG_x: {
+ mnemonic = "csneg";
+ if (rn_is_rm && invertible_cond) {
+ mnemonic = "cneg";
+ form = form_update;
+ }
+ break;
+ }
+ default: UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitBitfield(Instruction* instr) {
+ unsigned s = instr->ImmS();
+ unsigned r = instr->ImmR();
+ unsigned rd_size_minus_1 =
+ ((instr->SixtyFourBits() == 1) ? kXRegSize : kWRegSize) - 1;
+ const char *mnemonic = "";
+ const char *form = "";
+ const char *form_shift_right = "'Rd, 'Rn, 'IBr";
+ const char *form_extend = "'Rd, 'Wn";
+ const char *form_bfiz = "'Rd, 'Rn, 'IBZ-r, 'IBs+1";
+ const char *form_bfx = "'Rd, 'Rn, 'IBr, 'IBs-r+1";
+ const char *form_lsl = "'Rd, 'Rn, 'IBZ-r";
+
+ switch (instr->Mask(BitfieldMask)) {
+ case SBFM_w:
+ case SBFM_x: {
+ mnemonic = "sbfx";
+ form = form_bfx;
+ if (r == 0) {
+ form = form_extend;
+ if (s == 7) {
+ mnemonic = "sxtb";
+ } else if (s == 15) {
+ mnemonic = "sxth";
+ } else if ((s == 31) && (instr->SixtyFourBits() == 1)) {
+ mnemonic = "sxtw";
+ } else {
+ form = form_bfx;
+ }
+ } else if (s == rd_size_minus_1) {
+ mnemonic = "asr";
+ form = form_shift_right;
+ } else if (s < r) {
+ mnemonic = "sbfiz";
+ form = form_bfiz;
+ }
+ break;
+ }
+ case UBFM_w:
+ case UBFM_x: {
+ mnemonic = "ubfx";
+ form = form_bfx;
+ if (r == 0) {
+ form = form_extend;
+ if (s == 7) {
+ mnemonic = "uxtb";
+ } else if (s == 15) {
+ mnemonic = "uxth";
+ } else {
+ form = form_bfx;
+ }
+ }
+ if (s == rd_size_minus_1) {
+ mnemonic = "lsr";
+ form = form_shift_right;
+ } else if (r == s + 1) {
+ mnemonic = "lsl";
+ form = form_lsl;
+ } else if (s < r) {
+ mnemonic = "ubfiz";
+ form = form_bfiz;
+ }
+ break;
+ }
+ case BFM_w:
+ case BFM_x: {
+ mnemonic = "bfxil";
+ form = form_bfx;
+ if (s < r) {
+ mnemonic = "bfi";
+ form = form_bfiz;
+ }
+ }
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitExtract(Instruction* instr) {
+ const char *mnemonic = "";
+ const char *form = "'Rd, 'Rn, 'Rm, 'IExtract";
+
+ switch (instr->Mask(ExtractMask)) {
+ case EXTR_w:
+ case EXTR_x: {
+ if (instr->Rn() == instr->Rm()) {
+ mnemonic = "ror";
+ form = "'Rd, 'Rn, 'IExtract";
+ } else {
+ mnemonic = "extr";
+ }
+ break;
+ }
+ default: UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitPCRelAddressing(Instruction* instr) {
+ switch (instr->Mask(PCRelAddressingMask)) {
+ case ADR: Format(instr, "adr", "'Xd, 'AddrPCRelByte"); break;
+ // ADRP is not implemented.
+ default: Format(instr, "unimplemented", "(PCRelAddressing)");
+ }
+}
+
+
+void Disassembler::VisitConditionalBranch(Instruction* instr) {
+ switch (instr->Mask(ConditionalBranchMask)) {
+ case B_cond: Format(instr, "b.'CBrn", "'BImmCond"); break;
+ default: UNREACHABLE();
+ }
+}
+
+
+void Disassembler::VisitUnconditionalBranchToRegister(Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "'Xn";
+
+ switch (instr->Mask(UnconditionalBranchToRegisterMask)) {
+ case BR: mnemonic = "br"; break;
+ case BLR: mnemonic = "blr"; break;
+ case RET: {
+ mnemonic = "ret";
+ if (instr->Rn() == kLinkRegCode) {
+ form = NULL;
+ }
+ break;
+ }
+ default: form = "(UnconditionalBranchToRegister)";
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitUnconditionalBranch(Instruction* instr) {
+ const char *mnemonic = "";
+ const char *form = "'BImmUncn";
+
+ switch (instr->Mask(UnconditionalBranchMask)) {
+ case B: mnemonic = "b"; break;
+ case BL: mnemonic = "bl"; break;
+ default: UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitDataProcessing1Source(Instruction* instr) {
+ const char *mnemonic = "";
+ const char *form = "'Rd, 'Rn";
+
+ switch (instr->Mask(DataProcessing1SourceMask)) {
+ #define FORMAT(A, B) \
+ case A##_w: \
+ case A##_x: mnemonic = B; break;
+ FORMAT(RBIT, "rbit");
+ FORMAT(REV16, "rev16");
+ FORMAT(REV, "rev");
+ FORMAT(CLZ, "clz");
+ FORMAT(CLS, "cls");
+ #undef FORMAT
+ case REV32_x: mnemonic = "rev32"; break;
+ default: UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitDataProcessing2Source(Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "'Rd, 'Rn, 'Rm";
+
+ switch (instr->Mask(DataProcessing2SourceMask)) {
+ #define FORMAT(A, B) \
+ case A##_w: \
+ case A##_x: mnemonic = B; break;
+ FORMAT(UDIV, "udiv");
+ FORMAT(SDIV, "sdiv");
+ FORMAT(LSLV, "lsl");
+ FORMAT(LSRV, "lsr");
+ FORMAT(ASRV, "asr");
+ FORMAT(RORV, "ror");
+ #undef FORMAT
+ default: form = "(DataProcessing2Source)";
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitDataProcessing3Source(Instruction* instr) {
+ bool ra_is_zr = RaIsZROrSP(instr);
+ const char *mnemonic = "";
+ const char *form = "'Xd, 'Wn, 'Wm, 'Xa";
+ const char *form_rrr = "'Rd, 'Rn, 'Rm";
+ const char *form_rrrr = "'Rd, 'Rn, 'Rm, 'Ra";
+ const char *form_xww = "'Xd, 'Wn, 'Wm";
+ const char *form_xxx = "'Xd, 'Xn, 'Xm";
+
+ switch (instr->Mask(DataProcessing3SourceMask)) {
+ case MADD_w:
+ case MADD_x: {
+ mnemonic = "madd";
+ form = form_rrrr;
+ if (ra_is_zr) {
+ mnemonic = "mul";
+ form = form_rrr;
+ }
+ break;
+ }
+ case MSUB_w:
+ case MSUB_x: {
+ mnemonic = "msub";
+ form = form_rrrr;
+ if (ra_is_zr) {
+ mnemonic = "mneg";
+ form = form_rrr;
+ }
+ break;
+ }
+ case SMADDL_x: {
+ mnemonic = "smaddl";
+ if (ra_is_zr) {
+ mnemonic = "smull";
+ form = form_xww;
+ }
+ break;
+ }
+ case SMSUBL_x: {
+ mnemonic = "smsubl";
+ if (ra_is_zr) {
+ mnemonic = "smnegl";
+ form = form_xww;
+ }
+ break;
+ }
+ case UMADDL_x: {
+ mnemonic = "umaddl";
+ if (ra_is_zr) {
+ mnemonic = "umull";
+ form = form_xww;
+ }
+ break;
+ }
+ case UMSUBL_x: {
+ mnemonic = "umsubl";
+ if (ra_is_zr) {
+ mnemonic = "umnegl";
+ form = form_xww;
+ }
+ break;
+ }
+ case SMULH_x: {
+ mnemonic = "smulh";
+ form = form_xxx;
+ break;
+ }
+ case UMULH_x: {
+ mnemonic = "umulh";
+ form = form_xxx;
+ break;
+ }
+ default: UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitCompareBranch(Instruction* instr) {
+ const char *mnemonic = "";
+ const char *form = "'Rt, 'BImmCmpa";
+
+ switch (instr->Mask(CompareBranchMask)) {
+ case CBZ_w:
+ case CBZ_x: mnemonic = "cbz"; break;
+ case CBNZ_w:
+ case CBNZ_x: mnemonic = "cbnz"; break;
+ default: UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitTestBranch(Instruction* instr) {
+ const char *mnemonic = "";
+ // If the top bit of the immediate is clear, the tested register is
+ // disassembled as Wt, otherwise Xt. As the top bit of the immediate is
+ // encoded in bit 31 of the instruction, we can reuse the Rt form, which
+ // uses bit 31 (normally "sf") to choose the register size.
+ const char *form = "'Rt, 'IS, 'BImmTest";
+
+ switch (instr->Mask(TestBranchMask)) {
+ case TBZ: mnemonic = "tbz"; break;
+ case TBNZ: mnemonic = "tbnz"; break;
+ default: UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitMoveWideImmediate(Instruction* instr) {
+ const char *mnemonic = "";
+ const char *form = "'Rd, 'IMoveImm";
+
+ // Print the shift separately for movk, to make it clear which half word will
+ // be overwritten. Movn and movz print the computed immediate, which includes
+ // shift calculation.
+ switch (instr->Mask(MoveWideImmediateMask)) {
+ case MOVN_w:
+ case MOVN_x: mnemonic = "movn"; break;
+ case MOVZ_w:
+ case MOVZ_x: mnemonic = "movz"; break;
+ case MOVK_w:
+ case MOVK_x: mnemonic = "movk"; form = "'Rd, 'IMoveLSL"; break;
+ default: UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+#define LOAD_STORE_LIST(V) \
+ V(STRB_w, "strb", "'Wt") \
+ V(STRH_w, "strh", "'Wt") \
+ V(STR_w, "str", "'Wt") \
+ V(STR_x, "str", "'Xt") \
+ V(LDRB_w, "ldrb", "'Wt") \
+ V(LDRH_w, "ldrh", "'Wt") \
+ V(LDR_w, "ldr", "'Wt") \
+ V(LDR_x, "ldr", "'Xt") \
+ V(LDRSB_x, "ldrsb", "'Xt") \
+ V(LDRSH_x, "ldrsh", "'Xt") \
+ V(LDRSW_x, "ldrsw", "'Xt") \
+ V(LDRSB_w, "ldrsb", "'Wt") \
+ V(LDRSH_w, "ldrsh", "'Wt") \
+ V(STR_s, "str", "'St") \
+ V(STR_d, "str", "'Dt") \
+ V(LDR_s, "ldr", "'St") \
+ V(LDR_d, "ldr", "'Dt")
+
+void Disassembler::VisitLoadStorePreIndex(Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "(LoadStorePreIndex)";
+
+ switch (instr->Mask(LoadStorePreIndexMask)) {
+ #define LS_PREINDEX(A, B, C) \
+ case A##_pre: mnemonic = B; form = C ", ['Xns'ILS]!"; break;
+ LOAD_STORE_LIST(LS_PREINDEX)
+ #undef LS_PREINDEX
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitLoadStorePostIndex(Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "(LoadStorePostIndex)";
+
+ switch (instr->Mask(LoadStorePostIndexMask)) {
+ #define LS_POSTINDEX(A, B, C) \
+ case A##_post: mnemonic = B; form = C ", ['Xns]'ILS"; break;
+ LOAD_STORE_LIST(LS_POSTINDEX)
+ #undef LS_POSTINDEX
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitLoadStoreUnsignedOffset(Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "(LoadStoreUnsignedOffset)";
+
+ switch (instr->Mask(LoadStoreUnsignedOffsetMask)) {
+ #define LS_UNSIGNEDOFFSET(A, B, C) \
+ case A##_unsigned: mnemonic = B; form = C ", ['Xns'ILU]"; break;
+ LOAD_STORE_LIST(LS_UNSIGNEDOFFSET)
+ #undef LS_UNSIGNEDOFFSET
+ case PRFM_unsigned: mnemonic = "prfm"; form = "'PrefOp, ['Xn'ILU]";
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitLoadStoreRegisterOffset(Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "(LoadStoreRegisterOffset)";
+
+ switch (instr->Mask(LoadStoreRegisterOffsetMask)) {
+ #define LS_REGISTEROFFSET(A, B, C) \
+ case A##_reg: mnemonic = B; form = C ", ['Xns, 'Offsetreg]"; break;
+ LOAD_STORE_LIST(LS_REGISTEROFFSET)
+ #undef LS_REGISTEROFFSET
+ case PRFM_reg: mnemonic = "prfm"; form = "'PrefOp, ['Xns, 'Offsetreg]";
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitLoadStoreUnscaledOffset(Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "'Wt, ['Xns'ILS]";
+ const char *form_x = "'Xt, ['Xns'ILS]";
+ const char *form_s = "'St, ['Xns'ILS]";
+ const char *form_d = "'Dt, ['Xns'ILS]";
+
+ switch (instr->Mask(LoadStoreUnscaledOffsetMask)) {
+ case STURB_w: mnemonic = "sturb"; break;
+ case STURH_w: mnemonic = "sturh"; break;
+ case STUR_w: mnemonic = "stur"; break;
+ case STUR_x: mnemonic = "stur"; form = form_x; break;
+ case STUR_s: mnemonic = "stur"; form = form_s; break;
+ case STUR_d: mnemonic = "stur"; form = form_d; break;
+ case LDURB_w: mnemonic = "ldurb"; break;
+ case LDURH_w: mnemonic = "ldurh"; break;
+ case LDUR_w: mnemonic = "ldur"; break;
+ case LDUR_x: mnemonic = "ldur"; form = form_x; break;
+ case LDUR_s: mnemonic = "ldur"; form = form_s; break;
+ case LDUR_d: mnemonic = "ldur"; form = form_d; break;
+ case LDURSB_x: form = form_x; // Fall through.
+ case LDURSB_w: mnemonic = "ldursb"; break;
+ case LDURSH_x: form = form_x; // Fall through.
+ case LDURSH_w: mnemonic = "ldursh"; break;
+ case LDURSW_x: mnemonic = "ldursw"; form = form_x; break;
+ default: form = "(LoadStoreUnscaledOffset)";
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitLoadLiteral(Instruction* instr) {
+ const char *mnemonic = "ldr";
+ const char *form = "(LoadLiteral)";
+
+ switch (instr->Mask(LoadLiteralMask)) {
+ case LDR_w_lit: form = "'Wt, 'ILLiteral 'LValue"; break;
+ case LDR_x_lit: form = "'Xt, 'ILLiteral 'LValue"; break;
+ case LDR_s_lit: form = "'St, 'ILLiteral 'LValue"; break;
+ case LDR_d_lit: form = "'Dt, 'ILLiteral 'LValue"; break;
+ default: mnemonic = "unimplemented";
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+#define LOAD_STORE_PAIR_LIST(V) \
+ V(STP_w, "stp", "'Wt, 'Wt2", "4") \
+ V(LDP_w, "ldp", "'Wt, 'Wt2", "4") \
+ V(LDPSW_x, "ldpsw", "'Xt, 'Xt2", "4") \
+ V(STP_x, "stp", "'Xt, 'Xt2", "8") \
+ V(LDP_x, "ldp", "'Xt, 'Xt2", "8") \
+ V(STP_s, "stp", "'St, 'St2", "4") \
+ V(LDP_s, "ldp", "'St, 'St2", "4") \
+ V(STP_d, "stp", "'Dt, 'Dt2", "8") \
+ V(LDP_d, "ldp", "'Dt, 'Dt2", "8")
+
+void Disassembler::VisitLoadStorePairPostIndex(Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "(LoadStorePairPostIndex)";
+
+ switch (instr->Mask(LoadStorePairPostIndexMask)) {
+ #define LSP_POSTINDEX(A, B, C, D) \
+ case A##_post: mnemonic = B; form = C ", ['Xns]'ILP" D; break;
+ LOAD_STORE_PAIR_LIST(LSP_POSTINDEX)
+ #undef LSP_POSTINDEX
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitLoadStorePairPreIndex(Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "(LoadStorePairPreIndex)";
+
+ switch (instr->Mask(LoadStorePairPreIndexMask)) {
+ #define LSP_PREINDEX(A, B, C, D) \
+ case A##_pre: mnemonic = B; form = C ", ['Xns'ILP" D "]!"; break;
+ LOAD_STORE_PAIR_LIST(LSP_PREINDEX)
+ #undef LSP_PREINDEX
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitLoadStorePairOffset(Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "(LoadStorePairOffset)";
+
+ switch (instr->Mask(LoadStorePairOffsetMask)) {
+ #define LSP_OFFSET(A, B, C, D) \
+ case A##_off: mnemonic = B; form = C ", ['Xns'ILP" D "]"; break;
+ LOAD_STORE_PAIR_LIST(LSP_OFFSET)
+ #undef LSP_OFFSET
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitLoadStorePairNonTemporal(Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form;
+
+ switch (instr->Mask(LoadStorePairNonTemporalMask)) {
+ case STNP_w: mnemonic = "stnp"; form = "'Wt, 'Wt2, ['Xns'ILP4]"; break;
+ case LDNP_w: mnemonic = "ldnp"; form = "'Wt, 'Wt2, ['Xns'ILP4]"; break;
+ case STNP_x: mnemonic = "stnp"; form = "'Xt, 'Xt2, ['Xns'ILP8]"; break;
+ case LDNP_x: mnemonic = "ldnp"; form = "'Xt, 'Xt2, ['Xns'ILP8]"; break;
+ case STNP_s: mnemonic = "stnp"; form = "'St, 'St2, ['Xns'ILP4]"; break;
+ case LDNP_s: mnemonic = "ldnp"; form = "'St, 'St2, ['Xns'ILP4]"; break;
+ case STNP_d: mnemonic = "stnp"; form = "'Dt, 'Dt2, ['Xns'ILP8]"; break;
+ case LDNP_d: mnemonic = "ldnp"; form = "'Dt, 'Dt2, ['Xns'ILP8]"; break;
+ default: form = "(LoadStorePairNonTemporal)";
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitFPCompare(Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "'Fn, 'Fm";
+ const char *form_zero = "'Fn, #0.0";
+
+ switch (instr->Mask(FPCompareMask)) {
+ case FCMP_s_zero:
+ case FCMP_d_zero: form = form_zero; // Fall through.
+ case FCMP_s:
+ case FCMP_d: mnemonic = "fcmp"; break;
+ default: form = "(FPCompare)";
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitFPConditionalCompare(Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "'Fn, 'Fm, 'INzcv, 'Cond";
+
+ switch (instr->Mask(FPConditionalCompareMask)) {
+ case FCCMP_s:
+ case FCCMP_d: mnemonic = "fccmp"; break;
+ case FCCMPE_s:
+ case FCCMPE_d: mnemonic = "fccmpe"; break;
+ default: form = "(FPConditionalCompare)";
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitFPConditionalSelect(Instruction* instr) {
+ const char *mnemonic = "";
+ const char *form = "'Fd, 'Fn, 'Fm, 'Cond";
+
+ switch (instr->Mask(FPConditionalSelectMask)) {
+ case FCSEL_s:
+ case FCSEL_d: mnemonic = "fcsel"; break;
+ default: UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitFPDataProcessing1Source(Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "'Fd, 'Fn";
+
+ switch (instr->Mask(FPDataProcessing1SourceMask)) {
+ #define FORMAT(A, B) \
+ case A##_s: \
+ case A##_d: mnemonic = B; break;
+ FORMAT(FMOV, "fmov");
+ FORMAT(FABS, "fabs");
+ FORMAT(FNEG, "fneg");
+ FORMAT(FSQRT, "fsqrt");
+ FORMAT(FRINTN, "frintn");
+ FORMAT(FRINTP, "frintp");
+ FORMAT(FRINTM, "frintm");
+ FORMAT(FRINTZ, "frintz");
+ FORMAT(FRINTA, "frinta");
+ FORMAT(FRINTX, "frintx");
+ FORMAT(FRINTI, "frinti");
+ #undef FORMAT
+ case FCVT_ds: mnemonic = "fcvt"; form = "'Dd, 'Sn"; break;
+ case FCVT_sd: mnemonic = "fcvt"; form = "'Sd, 'Dn"; break;
+ default: form = "(FPDataProcessing1Source)";
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitFPDataProcessing2Source(Instruction* instr) {
+ const char *mnemonic = "";
+ const char *form = "'Fd, 'Fn, 'Fm";
+
+ switch (instr->Mask(FPDataProcessing2SourceMask)) {
+ #define FORMAT(A, B) \
+ case A##_s: \
+ case A##_d: mnemonic = B; break;
+ FORMAT(FMUL, "fmul");
+ FORMAT(FDIV, "fdiv");
+ FORMAT(FADD, "fadd");
+ FORMAT(FSUB, "fsub");
+ FORMAT(FMAX, "fmax");
+ FORMAT(FMIN, "fmin");
+ FORMAT(FMAXNM, "fmaxnm");
+ FORMAT(FMINNM, "fminnm");
+ FORMAT(FNMUL, "fnmul");
+ #undef FORMAT
+ default: UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitFPDataProcessing3Source(Instruction* instr) {
+ const char *mnemonic = "";
+ const char *form = "'Fd, 'Fn, 'Fm, 'Fa";
+
+ switch (instr->Mask(FPDataProcessing3SourceMask)) {
+ #define FORMAT(A, B) \
+ case A##_s: \
+ case A##_d: mnemonic = B; break;
+ FORMAT(FMADD, "fmadd");
+ FORMAT(FMSUB, "fmsub");
+ FORMAT(FNMADD, "fnmadd");
+ FORMAT(FNMSUB, "fnmsub");
+ #undef FORMAT
+ default: UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitFPImmediate(Instruction* instr) {
+ const char *mnemonic = "";
+ const char *form = "(FPImmediate)";
+
+ switch (instr->Mask(FPImmediateMask)) {
+ case FMOV_s_imm: mnemonic = "fmov"; form = "'Sd, 'IFPSingle"; break;
+ case FMOV_d_imm: mnemonic = "fmov"; form = "'Dd, 'IFPDouble"; break;
+ default: UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitFPIntegerConvert(Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "(FPIntegerConvert)";
+ const char *form_rf = "'Rd, 'Fn";
+ const char *form_fr = "'Fd, 'Rn";
+
+ switch (instr->Mask(FPIntegerConvertMask)) {
+ case FMOV_ws:
+ case FMOV_xd: mnemonic = "fmov"; form = form_rf; break;
+ case FMOV_sw:
+ case FMOV_dx: mnemonic = "fmov"; form = form_fr; break;
+ case FCVTAS_ws:
+ case FCVTAS_xs:
+ case FCVTAS_wd:
+ case FCVTAS_xd: mnemonic = "fcvtas"; form = form_rf; break;
+ case FCVTAU_ws:
+ case FCVTAU_xs:
+ case FCVTAU_wd:
+ case FCVTAU_xd: mnemonic = "fcvtau"; form = form_rf; break;
+ case FCVTMS_ws:
+ case FCVTMS_xs:
+ case FCVTMS_wd:
+ case FCVTMS_xd: mnemonic = "fcvtms"; form = form_rf; break;
+ case FCVTMU_ws:
+ case FCVTMU_xs:
+ case FCVTMU_wd:
+ case FCVTMU_xd: mnemonic = "fcvtmu"; form = form_rf; break;
+ case FCVTNS_ws:
+ case FCVTNS_xs:
+ case FCVTNS_wd:
+ case FCVTNS_xd: mnemonic = "fcvtns"; form = form_rf; break;
+ case FCVTNU_ws:
+ case FCVTNU_xs:
+ case FCVTNU_wd:
+ case FCVTNU_xd: mnemonic = "fcvtnu"; form = form_rf; break;
+ case FCVTZU_xd:
+ case FCVTZU_ws:
+ case FCVTZU_wd:
+ case FCVTZU_xs: mnemonic = "fcvtzu"; form = form_rf; break;
+ case FCVTZS_xd:
+ case FCVTZS_wd:
+ case FCVTZS_xs:
+ case FCVTZS_ws: mnemonic = "fcvtzs"; form = form_rf; break;
+ case SCVTF_sw:
+ case SCVTF_sx:
+ case SCVTF_dw:
+ case SCVTF_dx: mnemonic = "scvtf"; form = form_fr; break;
+ case UCVTF_sw:
+ case UCVTF_sx:
+ case UCVTF_dw:
+ case UCVTF_dx: mnemonic = "ucvtf"; form = form_fr; break;
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitFPFixedPointConvert(Instruction* instr) {
+ const char *mnemonic = "";
+ const char *form = "'Rd, 'Fn, 'IFPFBits";
+ const char *form_fr = "'Fd, 'Rn, 'IFPFBits";
+
+ switch (instr->Mask(FPFixedPointConvertMask)) {
+ case FCVTZS_ws_fixed:
+ case FCVTZS_xs_fixed:
+ case FCVTZS_wd_fixed:
+ case FCVTZS_xd_fixed: mnemonic = "fcvtzs"; break;
+ case FCVTZU_ws_fixed:
+ case FCVTZU_xs_fixed:
+ case FCVTZU_wd_fixed:
+ case FCVTZU_xd_fixed: mnemonic = "fcvtzu"; break;
+ case SCVTF_sw_fixed:
+ case SCVTF_sx_fixed:
+ case SCVTF_dw_fixed:
+ case SCVTF_dx_fixed: mnemonic = "scvtf"; form = form_fr; break;
+ case UCVTF_sw_fixed:
+ case UCVTF_sx_fixed:
+ case UCVTF_dw_fixed:
+ case UCVTF_dx_fixed: mnemonic = "ucvtf"; form = form_fr; break;
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitSystem(Instruction* instr) {
+ // Some system instructions hijack their Op and Cp fields to represent a
+ // range of immediates instead of indicating a different instruction. This
+ // makes the decoding tricky.
+ const char *mnemonic = "unimplemented";
+ const char *form = "(System)";
+
+ if (instr->Mask(SystemSysRegFMask) == SystemSysRegFixed) {
+ switch (instr->Mask(SystemSysRegMask)) {
+ case MRS: {
+ mnemonic = "mrs";
+ switch (instr->ImmSystemRegister()) {
+ case NZCV: form = "'Xt, nzcv"; break;
+ case FPCR: form = "'Xt, fpcr"; break;
+ default: form = "'Xt, (unknown)"; break;
+ }
+ break;
+ }
+ case MSR: {
+ mnemonic = "msr";
+ switch (instr->ImmSystemRegister()) {
+ case NZCV: form = "nzcv, 'Xt"; break;
+ case FPCR: form = "fpcr, 'Xt"; break;
+ default: form = "(unknown), 'Xt"; break;
+ }
+ break;
+ }
+ }
+ } else if (instr->Mask(SystemHintFMask) == SystemHintFixed) {
+ ASSERT(instr->Mask(SystemHintMask) == HINT);
+ switch (instr->ImmHint()) {
+ case NOP: {
+ mnemonic = "nop";
+ form = NULL;
+ break;
+ }
+ }
+ } else if (instr->Mask(MemBarrierFMask) == MemBarrierFixed) {
+ switch (instr->Mask(MemBarrierMask)) {
+ case DMB: {
+ mnemonic = "dmb";
+ form = "'M";
+ break;
+ }
+ case DSB: {
+ mnemonic = "dsb";
+ form = "'M";
+ break;
+ }
+ case ISB: {
+ mnemonic = "isb";
+ form = NULL;
+ break;
+ }
+ }
+ }
+
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitException(Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "'IDebug";
+
+ switch (instr->Mask(ExceptionMask)) {
+ case HLT: mnemonic = "hlt"; break;
+ case BRK: mnemonic = "brk"; break;
+ case SVC: mnemonic = "svc"; break;
+ case HVC: mnemonic = "hvc"; break;
+ case SMC: mnemonic = "smc"; break;
+ case DCPS1: mnemonic = "dcps1"; form = "{'IDebug}"; break;
+ case DCPS2: mnemonic = "dcps2"; form = "{'IDebug}"; break;
+ case DCPS3: mnemonic = "dcps3"; form = "{'IDebug}"; break;
+ default: form = "(Exception)";
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitUnimplemented(Instruction* instr) {
+ Format(instr, "unimplemented", "(Unimplemented)");
+}
+
+
+void Disassembler::VisitUnallocated(Instruction* instr) {
+ Format(instr, "unallocated", "(Unallocated)");
+}
+
+
+void Disassembler::ProcessOutput(Instruction* /*instr*/) {
+ // The base disasm does nothing more than disassembling into a buffer.
+}
+
+
+void Disassembler::Format(Instruction* instr, const char* mnemonic,
+ const char* format) {
+ // TODO(mcapewel) don't think I can use the instr address here - there needs
+ // to be a base address too
+ ASSERT(mnemonic != NULL);
+ ResetOutput();
+ Substitute(instr, mnemonic);
+ if (format != NULL) {
+ buffer_[buffer_pos_++] = ' ';
+ Substitute(instr, format);
+ }
+ buffer_[buffer_pos_] = 0;
+ ProcessOutput(instr);
+}
+
+
+void Disassembler::Substitute(Instruction* instr, const char* string) {
+ char chr = *string++;
+ while (chr != '\0') {
+ if (chr == '\'') {
+ string += SubstituteField(instr, string);
+ } else {
+ buffer_[buffer_pos_++] = chr;
+ }
+ chr = *string++;
+ }
+}
+
+
+int Disassembler::SubstituteField(Instruction* instr, const char* format) {
+ switch (format[0]) {
+ case 'R': // Register. X or W, selected by sf bit.
+ case 'F': // FP Register. S or D, selected by type field.
+ case 'W':
+ case 'X':
+ case 'S':
+ case 'D': return SubstituteRegisterField(instr, format);
+ case 'I': return SubstituteImmediateField(instr, format);
+ case 'L': return SubstituteLiteralField(instr, format);
+ case 'H': return SubstituteShiftField(instr, format);
+ case 'P': return SubstitutePrefetchField(instr, format);
+ case 'C': return SubstituteConditionField(instr, format);
+ case 'E': return SubstituteExtendField(instr, format);
+ case 'A': return SubstitutePCRelAddressField(instr, format);
+ case 'B': return SubstituteBranchTargetField(instr, format);
+ case 'O': return SubstituteLSRegOffsetField(instr, format);
+ case 'M': return SubstituteBarrierField(instr, format);
+ default: {
+ UNREACHABLE();
+ return 1;
+ }
+ }
+}
+
+
+int Disassembler::SubstituteRegisterField(Instruction* instr,
+ const char* format) {
+ unsigned reg_num = 0;
+ unsigned field_len = 2;
+ switch (format[1]) {
+ case 'd': reg_num = instr->Rd(); break;
+ case 'n': reg_num = instr->Rn(); break;
+ case 'm': reg_num = instr->Rm(); break;
+ case 'a': reg_num = instr->Ra(); break;
+ case 't': {
+ if (format[2] == '2') {
+ reg_num = instr->Rt2();
+ field_len = 3;
+ } else {
+ reg_num = instr->Rt();
+ }
+ break;
+ }
+ default: UNREACHABLE();
+ }
+
+ // Increase field length for registers tagged as stack.
+ if (format[2] == 's') {
+ field_len = 3;
+ }
+
+ char reg_type;
+ if (format[0] == 'R') {
+ // Register type is R: use sf bit to choose X and W.
+ reg_type = instr->SixtyFourBits() ? 'x' : 'w';
+ } else if (format[0] == 'F') {
+ // Floating-point register: use type field to choose S or D.
+ reg_type = ((instr->FPType() & 1) == 0) ? 's' : 'd';
+ } else {
+ // Register type is specified. Make it lower case.
+ reg_type = format[0] + 0x20;
+ }
+
+ if ((reg_num != kZeroRegCode) || (reg_type == 's') || (reg_type == 'd')) {
+ // A normal register: w0 - w30, x0 - x30, s0 - s31, d0 - d31.
+
+ // Filter special registers
+ if ((reg_type == 'x') && (reg_num == 27)) {
+ AppendToOutput("cp");
+ } else if ((reg_type == 'x') && (reg_num == 28)) {
+ AppendToOutput("jssp");
+ } else if ((reg_type == 'x') && (reg_num == 29)) {
+ AppendToOutput("fp");
+ } else if ((reg_type == 'x') && (reg_num == 30)) {
+ AppendToOutput("lr");
+ } else {
+ AppendToOutput("%c%d", reg_type, reg_num);
+ }
+ } else if (format[2] == 's') {
+ // Disassemble w31/x31 as stack pointer wcsp/csp.
+ AppendToOutput("%s", (reg_type == 'w') ? "wcsp" : "csp");
+ } else {
+ // Disassemble w31/x31 as zero register wzr/xzr.
+ AppendToOutput("%czr", reg_type);
+ }
+
+ return field_len;
+}
+
+
+int Disassembler::SubstituteImmediateField(Instruction* instr,
+ const char* format) {
+ ASSERT(format[0] == 'I');
+
+ switch (format[1]) {
+ case 'M': { // IMoveImm or IMoveLSL.
+ if (format[5] == 'I') {
+ uint64_t imm = instr->ImmMoveWide() << (16 * instr->ShiftMoveWide());
+ AppendToOutput("#0x%" PRIx64, imm);
+ } else {
+ ASSERT(format[5] == 'L');
+ AppendToOutput("#0x%" PRIx64, instr->ImmMoveWide());
+ if (instr->ShiftMoveWide() > 0) {
+ AppendToOutput(", lsl #%d", 16 * instr->ShiftMoveWide());
+ }
+ }
+ return 8;
+ }
+ case 'L': {
+ switch (format[2]) {
+ case 'L': { // ILLiteral - Immediate Load Literal.
+ AppendToOutput("pc%+" PRId64,
+ instr->ImmLLiteral() << kLiteralEntrySizeLog2);
+ return 9;
+ }
+ case 'S': { // ILS - Immediate Load/Store.
+ if (instr->ImmLS() != 0) {
+ AppendToOutput(", #%" PRId64, instr->ImmLS());
+ }
+ return 3;
+ }
+ case 'P': { // ILPx - Immediate Load/Store Pair, x = access size.
+ if (instr->ImmLSPair() != 0) {
+ // format[3] is the scale value. Convert to a number.
+ int scale = format[3] - 0x30;
+ AppendToOutput(", #%" PRId64, instr->ImmLSPair() * scale);
+ }
+ return 4;
+ }
+ case 'U': { // ILU - Immediate Load/Store Unsigned.
+ if (instr->ImmLSUnsigned() != 0) {
+ AppendToOutput(", #%" PRIu64,
+ instr->ImmLSUnsigned() << instr->SizeLS());
+ }
+ return 3;
+ }
+ }
+ }
+ case 'C': { // ICondB - Immediate Conditional Branch.
+ int64_t offset = instr->ImmCondBranch() << 2;
+ char sign = (offset >= 0) ? '+' : '-';
+ AppendToOutput("#%c0x%" PRIx64, sign, offset);
+ return 6;
+ }
+ case 'A': { // IAddSub.
+ ASSERT(instr->ShiftAddSub() <= 1);
+ int64_t imm = instr->ImmAddSub() << (12 * instr->ShiftAddSub());
+ AppendToOutput("#0x%" PRIx64 " (%" PRId64 ")", imm, imm);
+ return 7;
+ }
+ case 'F': { // IFPSingle, IFPDouble or IFPFBits.
+ if (format[3] == 'F') { // IFPFBits.
+ AppendToOutput("#%d", 64 - instr->FPScale());
+ return 8;
+ } else {
+ AppendToOutput("#0x%" PRIx64 " (%.4f)", instr->ImmFP(),
+ format[3] == 'S' ? instr->ImmFP32() : instr->ImmFP64());
+ return 9;
+ }
+ }
+ case 'T': { // ITri - Immediate Triangular Encoded.
+ AppendToOutput("#0x%" PRIx64, instr->ImmLogical());
+ return 4;
+ }
+ case 'N': { // INzcv.
+ int nzcv = (instr->Nzcv() << Flags_offset);
+ AppendToOutput("#%c%c%c%c", ((nzcv & NFlag) == 0) ? 'n' : 'N',
+ ((nzcv & ZFlag) == 0) ? 'z' : 'Z',
+ ((nzcv & CFlag) == 0) ? 'c' : 'C',
+ ((nzcv & VFlag) == 0) ? 'v' : 'V');
+ return 5;
+ }
+ case 'P': { // IP - Conditional compare.
+ AppendToOutput("#%d", instr->ImmCondCmp());
+ return 2;
+ }
+ case 'B': { // Bitfields.
+ return SubstituteBitfieldImmediateField(instr, format);
+ }
+ case 'E': { // IExtract.
+ AppendToOutput("#%d", instr->ImmS());
+ return 8;
+ }
+ case 'S': { // IS - Test and branch bit.
+ AppendToOutput("#%d", (instr->ImmTestBranchBit5() << 5) |
+ instr->ImmTestBranchBit40());
+ return 2;
+ }
+ case 'D': { // IDebug - HLT and BRK instructions.
+ AppendToOutput("#0x%x", instr->ImmException());
+ return 6;
+ }
+ default: {
+ UNIMPLEMENTED();
+ return 0;
+ }
+ }
+}
+
+
+int Disassembler::SubstituteBitfieldImmediateField(Instruction* instr,
+ const char* format) {
+ ASSERT((format[0] == 'I') && (format[1] == 'B'));
+ unsigned r = instr->ImmR();
+ unsigned s = instr->ImmS();
+
+ switch (format[2]) {
+ case 'r': { // IBr.
+ AppendToOutput("#%d", r);
+ return 3;
+ }
+ case 's': { // IBs+1 or IBs-r+1.
+ if (format[3] == '+') {
+ AppendToOutput("#%d", s + 1);
+ return 5;
+ } else {
+ ASSERT(format[3] == '-');
+ AppendToOutput("#%d", s - r + 1);
+ return 7;
+ }
+ }
+ case 'Z': { // IBZ-r.
+ ASSERT((format[3] == '-') && (format[4] == 'r'));
+ unsigned reg_size = (instr->SixtyFourBits() == 1) ? kXRegSize : kWRegSize;
+ AppendToOutput("#%d", reg_size - r);
+ return 5;
+ }
+ default: {
+ UNREACHABLE();
+ return 0;
+ }
+ }
+}
+
+
+int Disassembler::SubstituteLiteralField(Instruction* instr,
+ const char* format) {
+ ASSERT(strncmp(format, "LValue", 6) == 0);
+ USE(format);
+
+ switch (instr->Mask(LoadLiteralMask)) {
+ case LDR_w_lit:
+ case LDR_x_lit:
+ case LDR_s_lit:
+ case LDR_d_lit: AppendToOutput("(addr %p)", instr->LiteralAddress()); break;
+ default: UNREACHABLE();
+ }
+
+ return 6;
+}
+
+
+int Disassembler::SubstituteShiftField(Instruction* instr, const char* format) {
+ ASSERT(format[0] == 'H');
+ ASSERT(instr->ShiftDP() <= 0x3);
+
+ switch (format[1]) {
+ case 'D': { // HDP.
+ ASSERT(instr->ShiftDP() != ROR);
+ } // Fall through.
+ case 'L': { // HLo.
+ if (instr->ImmDPShift() != 0) {
+ const char* shift_type[] = {"lsl", "lsr", "asr", "ror"};
+ AppendToOutput(", %s #%" PRId64, shift_type[instr->ShiftDP()],
+ instr->ImmDPShift());
+ }
+ return 3;
+ }
+ default:
+ UNIMPLEMENTED();
+ return 0;
+ }
+}
+
+
+int Disassembler::SubstituteConditionField(Instruction* instr,
+ const char* format) {
+ ASSERT(format[0] == 'C');
+ const char* condition_code[] = { "eq", "ne", "hs", "lo",
+ "mi", "pl", "vs", "vc",
+ "hi", "ls", "ge", "lt",
+ "gt", "le", "al", "nv" };
+ int cond;
+ switch (format[1]) {
+ case 'B': cond = instr->ConditionBranch(); break;
+ case 'I': {
+ cond = InvertCondition(static_cast<Condition>(instr->Condition()));
+ break;
+ }
+ default: cond = instr->Condition();
+ }
+ AppendToOutput("%s", condition_code[cond]);
+ return 4;
+}
+
+
+int Disassembler::SubstitutePCRelAddressField(Instruction* instr,
+ const char* format) {
+ USE(format);
+ ASSERT(strncmp(format, "AddrPCRel", 9) == 0);
+
+ int offset = instr->ImmPCRel();
+
+ // Only ADR (AddrPCRelByte) is supported.
+ ASSERT(strcmp(format, "AddrPCRelByte") == 0);
+
+ char sign = '+';
+ if (offset < 0) {
+ offset = -offset;
+ sign = '-';
+ }
+ // TODO(jbramley): Can we print the target address here?
+ AppendToOutput("#%c0x%x", sign, offset);
+ return 13;
+}
+
+
+int Disassembler::SubstituteBranchTargetField(Instruction* instr,
+ const char* format) {
+ ASSERT(strncmp(format, "BImm", 4) == 0);
+
+ int64_t offset = 0;
+ switch (format[5]) {
+ // BImmUncn - unconditional branch immediate.
+ case 'n': offset = instr->ImmUncondBranch(); break;
+ // BImmCond - conditional branch immediate.
+ case 'o': offset = instr->ImmCondBranch(); break;
+ // BImmCmpa - compare and branch immediate.
+ case 'm': offset = instr->ImmCmpBranch(); break;
+ // BImmTest - test and branch immediate.
+ case 'e': offset = instr->ImmTestBranch(); break;
+ default: UNIMPLEMENTED();
+ }
+ offset <<= kInstructionSizeLog2;
+ char sign = '+';
+ if (offset < 0) {
+ offset = -offset;
+ sign = '-';
+ }
+ // TODO(mcapewel): look up pc + offset in label table.
+ AppendToOutput("#%c0x%" PRIx64, sign, offset);
+ return 8;
+}
+
+
+int Disassembler::SubstituteExtendField(Instruction* instr,
+ const char* format) {
+ ASSERT(strncmp(format, "Ext", 3) == 0);
+ ASSERT(instr->ExtendMode() <= 7);
+ USE(format);
+
+ const char* extend_mode[] = { "uxtb", "uxth", "uxtw", "uxtx",
+ "sxtb", "sxth", "sxtw", "sxtx" };
+
+ // If rd or rn is SP, uxtw on 32-bit registers and uxtx on 64-bit
+ // registers becomes lsl.
+ if (((instr->Rd() == kZeroRegCode) || (instr->Rn() == kZeroRegCode)) &&
+ (((instr->ExtendMode() == UXTW) && (instr->SixtyFourBits() == 0)) ||
+ (instr->ExtendMode() == UXTX))) {
+ if (instr->ImmExtendShift() > 0) {
+ AppendToOutput(", lsl #%d", instr->ImmExtendShift());
+ }
+ } else {
+ AppendToOutput(", %s", extend_mode[instr->ExtendMode()]);
+ if (instr->ImmExtendShift() > 0) {
+ AppendToOutput(" #%d", instr->ImmExtendShift());
+ }
+ }
+ return 3;
+}
+
+
+int Disassembler::SubstituteLSRegOffsetField(Instruction* instr,
+ const char* format) {
+ ASSERT(strncmp(format, "Offsetreg", 9) == 0);
+ const char* extend_mode[] = { "undefined", "undefined", "uxtw", "lsl",
+ "undefined", "undefined", "sxtw", "sxtx" };
+ USE(format);
+
+ unsigned shift = instr->ImmShiftLS();
+ Extend ext = static_cast<Extend>(instr->ExtendMode());
+ char reg_type = ((ext == UXTW) || (ext == SXTW)) ? 'w' : 'x';
+
+ unsigned rm = instr->Rm();
+ if (rm == kZeroRegCode) {
+ AppendToOutput("%czr", reg_type);
+ } else {
+ AppendToOutput("%c%d", reg_type, rm);
+ }
+
+ // Extend mode UXTX is an alias for shift mode LSL here.
+ if (!((ext == UXTX) && (shift == 0))) {
+ AppendToOutput(", %s", extend_mode[ext]);
+ if (shift != 0) {
+ AppendToOutput(" #%d", instr->SizeLS());
+ }
+ }
+ return 9;
+}
+
+
+int Disassembler::SubstitutePrefetchField(Instruction* instr,
+ const char* format) {
+ ASSERT(format[0] == 'P');
+ USE(format);
+
+ int prefetch_mode = instr->PrefetchMode();
+
+ const char* ls = (prefetch_mode & 0x10) ? "st" : "ld";
+ int level = (prefetch_mode >> 1) + 1;
+ const char* ks = (prefetch_mode & 1) ? "strm" : "keep";
+
+ AppendToOutput("p%sl%d%s", ls, level, ks);
+ return 6;
+}
+
+int Disassembler::SubstituteBarrierField(Instruction* instr,
+ const char* format) {
+ ASSERT(format[0] == 'M');
+ USE(format);
+
+ static const char* options[4][4] = {
+ { "sy (0b0000)", "oshld", "oshst", "osh" },
+ { "sy (0b0100)", "nshld", "nshst", "nsh" },
+ { "sy (0b1000)", "ishld", "ishst", "ish" },
+ { "sy (0b1100)", "ld", "st", "sy" }
+ };
+ int domain = instr->ImmBarrierDomain();
+ int type = instr->ImmBarrierType();
+
+ AppendToOutput("%s", options[domain][type]);
+ return 1;
+}
+
+
+void Disassembler::ResetOutput() {
+ buffer_pos_ = 0;
+ buffer_[buffer_pos_] = 0;
+}
+
+
+void Disassembler::AppendToOutput(const char* format, ...) {
+ va_list args;
+ va_start(args, format);
+ buffer_pos_ += vsnprintf(&buffer_[buffer_pos_], buffer_size_, format, args);
+ va_end(args);
+}
+
+
+void PrintDisassembler::ProcessOutput(Instruction* instr) {
+ fprintf(stream_, "0x%016" PRIx64 " %08" PRIx32 "\t\t%s\n",
+ reinterpret_cast<uint64_t>(instr), instr->InstructionBits(),
+ GetOutput());
+}
+
+} } // namespace v8::internal
+
+
+namespace disasm {
+
+
+const char* NameConverter::NameOfAddress(byte* addr) const {
+ v8::internal::OS::SNPrintF(tmp_buffer_, "%p", addr);
+ return tmp_buffer_.start();
+}
+
+
+const char* NameConverter::NameOfConstant(byte* addr) const {
+ return NameOfAddress(addr);
+}
+
+
+const char* NameConverter::NameOfCPURegister(int reg) const {
+ unsigned ureg = reg; // Avoid warnings about signed/unsigned comparisons.
+ if (ureg >= v8::internal::kNumberOfRegisters) {
+ return "noreg";
+ }
+ if (ureg == v8::internal::kZeroRegCode) {
+ return "xzr";
+ }
+ v8::internal::OS::SNPrintF(tmp_buffer_, "x%u", ureg);
+ return tmp_buffer_.start();
+}
+
+
+const char* NameConverter::NameOfByteCPURegister(int reg) const {
+ UNREACHABLE(); // A64 does not have the concept of a byte register
+ return "nobytereg";
+}
+
+
+const char* NameConverter::NameOfXMMRegister(int reg) const {
+ UNREACHABLE(); // A64 does not have any XMM registers
+ return "noxmmreg";
+}
+
+
+const char* NameConverter::NameInCode(byte* addr) const {
+ // The default name converter is called for unknown code, so we will not try
+ // to access any memory.
+ return "";
+}
+
+
+//------------------------------------------------------------------------------
+
+class BufferDisassembler : public v8::internal::Disassembler {
+ public:
+ explicit BufferDisassembler(v8::internal::Vector<char> out_buffer)
+ : out_buffer_(out_buffer) { }
+
+ ~BufferDisassembler() { }
+
+ virtual void ProcessOutput(v8::internal::Instruction* instr) {
+ v8::internal::OS::SNPrintF(out_buffer_, "%s", GetOutput());
+ }
+
+ private:
+ v8::internal::Vector<char> out_buffer_;
+};
+
+Disassembler::Disassembler(const NameConverter& converter)
+ : converter_(converter) {}
+
+
+Disassembler::~Disassembler() {}
+
+
+int Disassembler::InstructionDecode(v8::internal::Vector<char> buffer,
+ byte* instr) {
+ v8::internal::Decoder decoder;
+ BufferDisassembler disasm(buffer);
+ decoder.AppendVisitor(&disasm);
+
+ decoder.Decode(reinterpret_cast<v8::internal::Instruction*>(instr));
+ return v8::internal::kInstructionSize;
+}
+
+
+int Disassembler::ConstantPoolSizeAt(byte* instr) {
+ return v8::internal::Assembler::ConstantPoolSizeAt(
+ reinterpret_cast<v8::internal::Instruction*>(instr));
+}
+
+
+void Disassembler::Disassemble(FILE* file, byte* start, byte* end) {
+ v8::internal::Decoder decoder;
+ v8::internal::PrintDisassembler disasm(file);
+ decoder.AppendVisitor(&disasm);
+
+ for (byte* pc = start; pc < end; pc += v8::internal::kInstructionSize) {
+ decoder.Decode(reinterpret_cast<v8::internal::Instruction*>(pc));
+ }
+}
+
+} // namespace disasm
+
+#endif // V8_TARGET_ARCH_A64
--- /dev/null
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_A64_DISASM_A64_H
+#define V8_A64_DISASM_A64_H
+
+#include "v8.h"
+
+#include "globals.h"
+#include "utils.h"
+#include "instructions-a64.h"
+#include "decoder-a64.h"
+
+namespace v8 {
+namespace internal {
+
+
+class Disassembler: public DecoderVisitor {
+ public:
+ Disassembler();
+ Disassembler(char* text_buffer, int buffer_size);
+ virtual ~Disassembler();
+ char* GetOutput();
+
+ // Declare all Visitor functions.
+ #define DECLARE(A) void Visit##A(Instruction* instr);
+ VISITOR_LIST(DECLARE)
+ #undef DECLARE
+
+ protected:
+ virtual void ProcessOutput(Instruction* instr);
+
+ void Format(Instruction* instr, const char* mnemonic, const char* format);
+ void Substitute(Instruction* instr, const char* string);
+ int SubstituteField(Instruction* instr, const char* format);
+ int SubstituteRegisterField(Instruction* instr, const char* format);
+ int SubstituteImmediateField(Instruction* instr, const char* format);
+ int SubstituteLiteralField(Instruction* instr, const char* format);
+ int SubstituteBitfieldImmediateField(Instruction* instr, const char* format);
+ int SubstituteShiftField(Instruction* instr, const char* format);
+ int SubstituteExtendField(Instruction* instr, const char* format);
+ int SubstituteConditionField(Instruction* instr, const char* format);
+ int SubstitutePCRelAddressField(Instruction* instr, const char* format);
+ int SubstituteBranchTargetField(Instruction* instr, const char* format);
+ int SubstituteLSRegOffsetField(Instruction* instr, const char* format);
+ int SubstitutePrefetchField(Instruction* instr, const char* format);
+ int SubstituteBarrierField(Instruction* instr, const char* format);
+
+ bool RdIsZROrSP(Instruction* instr) const {
+ return (instr->Rd() == kZeroRegCode);
+ }
+
+ bool RnIsZROrSP(Instruction* instr) const {
+ return (instr->Rn() == kZeroRegCode);
+ }
+
+ bool RmIsZROrSP(Instruction* instr) const {
+ return (instr->Rm() == kZeroRegCode);
+ }
+
+ bool RaIsZROrSP(Instruction* instr) const {
+ return (instr->Ra() == kZeroRegCode);
+ }
+
+ bool IsMovzMovnImm(unsigned reg_size, uint64_t value);
+
+ void ResetOutput();
+ void AppendToOutput(const char* string, ...);
+
+ char* buffer_;
+ uint32_t buffer_pos_;
+ uint32_t buffer_size_;
+ bool own_buffer_;
+};
+
+
+class PrintDisassembler: public Disassembler {
+ public:
+ explicit PrintDisassembler(FILE* stream) : stream_(stream) { }
+ ~PrintDisassembler() { }
+
+ virtual void ProcessOutput(Instruction* instr);
+
+ private:
+ FILE *stream_;
+};
+
+
+} } // namespace v8::internal
+
+#endif // V8_A64_DISASM_A64_H
--- /dev/null
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#if V8_TARGET_ARCH_A64
+
+#include "assembler.h"
+#include "assembler-a64.h"
+#include "assembler-a64-inl.h"
+#include "frames.h"
+
+namespace v8 {
+namespace internal {
+
+
+Register JavaScriptFrame::fp_register() { return v8::internal::fp; }
+Register JavaScriptFrame::context_register() { return cp; }
+
+
+Register StubFailureTrampolineFrame::fp_register() { return v8::internal::fp; }
+Register StubFailureTrampolineFrame::context_register() { return cp; }
+
+
+Object*& ExitFrame::constant_pool_slot() const {
+ UNREACHABLE();
+ return Memory::Object_at(NULL);
+}
+
+
+} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_A64
--- /dev/null
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "a64/constants-a64.h"
+#include "a64/assembler-a64.h"
+
+#ifndef V8_A64_FRAMES_A64_H_
+#define V8_A64_FRAMES_A64_H_
+
+namespace v8 {
+namespace internal {
+
+const int kNumRegs = kNumberOfRegisters;
+// Registers x0-x17 are caller-saved.
+const int kNumJSCallerSaved = 18;
+const RegList kJSCallerSaved = 0x3ffff;
+typedef Object* JSCallerSavedBuffer[kNumJSCallerSaved];
+
+// Number of registers for which space is reserved in safepoints. Must be a
+// multiple of eight.
+// TODO(all): Refine this number.
+const int kNumSafepointRegisters = 32;
+
+// Define the list of registers actually saved at safepoints.
+// Note that the number of saved registers may be smaller than the reserved
+// space, i.e. kNumSafepointSavedRegisters <= kNumSafepointRegisters.
+#define kSafepointSavedRegisters CPURegList::GetSafepointSavedRegisters().list()
+#define kNumSafepointSavedRegisters \
+ CPURegList::GetSafepointSavedRegisters().Count();
+
+class EntryFrameConstants : public AllStatic {
+ public:
+ static const int kCallerFPOffset =
+ -(StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize);
+};
+
+
+class ExitFrameConstants : public AllStatic {
+ public:
+ static const int kFrameSize = 2 * kPointerSize;
+
+ static const int kCallerSPDisplacement = 2 * kPointerSize;
+ static const int kCallerPCOffset = 1 * kPointerSize;
+ static const int kCallerFPOffset = 0 * kPointerSize; // <- fp
+ static const int kSPOffset = -1 * kPointerSize;
+ static const int kCodeOffset = -2 * kPointerSize;
+ static const int kLastExitFrameField = kCodeOffset;
+};
+
+
+class JavaScriptFrameConstants : public AllStatic {
+ public:
+ // FP-relative.
+ static const int kLocal0Offset = StandardFrameConstants::kExpressionsOffset;
+
+ // There are two words on the stack (saved fp and saved lr) between fp and
+ // the arguments.
+ static const int kLastParameterOffset = 2 * kPointerSize;
+
+ static const int kFunctionOffset = StandardFrameConstants::kMarkerOffset;
+};
+
+
+class ArgumentsAdaptorFrameConstants : public AllStatic {
+ public:
+ // FP-relative.
+ static const int kLengthOffset = StandardFrameConstants::kExpressionsOffset;
+
+ static const int kFrameSize =
+ StandardFrameConstants::kFixedFrameSize + kPointerSize;
+};
+
+
+class ConstructFrameConstants : public AllStatic {
+ public:
+ // FP-relative.
+ static const int kCodeOffset = StandardFrameConstants::kExpressionsOffset;
+ static const int kLengthOffset = -4 * kPointerSize;
+ static const int kConstructorOffset = -5 * kPointerSize;
+ static const int kImplicitReceiverOffset = -6 * kPointerSize;
+
+ static const int kFrameSize =
+ StandardFrameConstants::kFixedFrameSize + 4 * kPointerSize;
+};
+
+
+class InternalFrameConstants : public AllStatic {
+ public:
+ // FP-relative.
+ static const int kCodeOffset = StandardFrameConstants::kExpressionsOffset;
+};
+
+
+inline Object* JavaScriptFrame::function_slot_object() const {
+ const int offset = JavaScriptFrameConstants::kFunctionOffset;
+ return Memory::Object_at(fp() + offset);
+}
+
+
+inline void StackHandler::SetFp(Address slot, Address fp) {
+ Memory::Address_at(slot) = fp;
+}
+
+
+} } // namespace v8::internal
+
+#endif // V8_A64_FRAMES_A64_H_
--- /dev/null
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#if V8_TARGET_ARCH_A64
+
+#include "code-stubs.h"
+#include "codegen.h"
+#include "compiler.h"
+#include "debug.h"
+#include "full-codegen.h"
+#include "isolate-inl.h"
+#include "parser.h"
+#include "scopes.h"
+#include "stub-cache.h"
+
+#include "a64/code-stubs-a64.h"
+#include "a64/macro-assembler-a64.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm_)
+
+class JumpPatchSite BASE_EMBEDDED {
+ public:
+ explicit JumpPatchSite(MacroAssembler* masm) : masm_(masm), reg_(NoReg) {
+#ifdef DEBUG
+ info_emitted_ = false;
+#endif
+ }
+
+ ~JumpPatchSite() {
+ if (patch_site_.is_bound()) {
+ ASSERT(info_emitted_);
+ } else {
+ ASSERT(reg_.IsNone());
+ }
+ }
+
+ void EmitJumpIfNotSmi(Register reg, Label* target) {
+ // This code will be patched by PatchInlinedSmiCode, in ic-a64.cc.
+ InstructionAccurateScope scope(masm_, 1);
+ ASSERT(!info_emitted_);
+ ASSERT(reg.Is64Bits());
+ ASSERT(!reg.Is(csp));
+ reg_ = reg;
+ __ bind(&patch_site_);
+ __ tbz(xzr, 0, target); // Always taken before patched.
+ }
+
+ void EmitJumpIfSmi(Register reg, Label* target) {
+ // This code will be patched by PatchInlinedSmiCode, in ic-a64.cc.
+ InstructionAccurateScope scope(masm_, 1);
+ ASSERT(!info_emitted_);
+ ASSERT(reg.Is64Bits());
+ ASSERT(!reg.Is(csp));
+ reg_ = reg;
+ __ bind(&patch_site_);
+ __ tbnz(xzr, 0, target); // Never taken before patched.
+ }
+
+ void EmitJumpIfEitherNotSmi(Register reg1, Register reg2, Label* target) {
+ // We need to use ip0, so don't allow access to the MacroAssembler.
+ InstructionAccurateScope scope(masm_);
+ __ orr(ip0, reg1, reg2);
+ EmitJumpIfNotSmi(ip0, target);
+ }
+
+ void EmitPatchInfo() {
+ Assembler::BlockConstPoolScope scope(masm_);
+ InlineSmiCheckInfo::Emit(masm_, reg_, &patch_site_);
+#ifdef DEBUG
+ info_emitted_ = true;
+#endif
+ }
+
+ private:
+ MacroAssembler* masm_;
+ Label patch_site_;
+ Register reg_;
+#ifdef DEBUG
+ bool info_emitted_;
+#endif
+};
+
+
+// Generate code for a JS function. On entry to the function the receiver
+// and arguments have been pushed on the stack left to right. The actual
+// argument count matches the formal parameter count expected by the
+// function.
+//
+// The live registers are:
+// - x1: the JS function object being called (i.e. ourselves).
+// - cp: our context.
+// - fp: our caller's frame pointer.
+// - jssp: stack pointer.
+// - lr: return address.
+//
+// The function builds a JS frame. See JavaScriptFrameConstants in
+// frames-arm.h for its layout.
+void FullCodeGenerator::Generate() {
+ CompilationInfo* info = info_;
+ handler_table_ =
+ isolate()->factory()->NewFixedArray(function()->handler_count(), TENURED);
+
+ InitializeFeedbackVector();
+
+ profiling_counter_ = isolate()->factory()->NewCell(
+ Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget), isolate()));
+ SetFunctionPosition(function());
+ Comment cmnt(masm_, "[ Function compiled by full code generator");
+
+ ProfileEntryHookStub::MaybeCallEntryHook(masm_);
+
+#ifdef DEBUG
+ if (strlen(FLAG_stop_at) > 0 &&
+ info->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
+ __ Debug("stop-at", __LINE__, BREAK);
+ }
+#endif
+
+ // Classic mode functions and builtins need to replace the receiver with the
+ // global proxy when called as functions (without an explicit receiver
+ // object).
+ if (info->is_classic_mode() && !info->is_native()) {
+ Label ok;
+ int receiver_offset = info->scope()->num_parameters() * kXRegSizeInBytes;
+ __ Peek(x10, receiver_offset);
+ __ JumpIfNotRoot(x10, Heap::kUndefinedValueRootIndex, &ok);
+
+ __ Ldr(x10, GlobalObjectMemOperand());
+ __ Ldr(x10, FieldMemOperand(x10, GlobalObject::kGlobalReceiverOffset));
+ __ Poke(x10, receiver_offset);
+
+ __ Bind(&ok);
+ }
+
+
+ // Open a frame scope to indicate that there is a frame on the stack.
+ // The MANUAL indicates that the scope shouldn't actually generate code
+ // to set up the frame because we do it manually below.
+ FrameScope frame_scope(masm_, StackFrame::MANUAL);
+
+ // This call emits the following sequence in a way that can be patched for
+ // code ageing support:
+ // Push(lr, fp, cp, x1);
+ // Add(fp, jssp, 2 * kPointerSize);
+ info->set_prologue_offset(masm_->pc_offset());
+ __ Prologue(BUILD_FUNCTION_FRAME);
+ info->AddNoFrameRange(0, masm_->pc_offset());
+
+ // Reserve space on the stack for locals.
+ { Comment cmnt(masm_, "[ Allocate locals");
+ int locals_count = info->scope()->num_stack_slots();
+ // Generators allocate locals, if any, in context slots.
+ ASSERT(!info->function()->is_generator() || locals_count == 0);
+
+ if (locals_count > 0) {
+ __ LoadRoot(x10, Heap::kUndefinedValueRootIndex);
+ __ PushMultipleTimes(locals_count, x10);
+ }
+ }
+
+ bool function_in_register_x1 = true;
+
+ int heap_slots = info->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
+ if (heap_slots > 0) {
+ // Argument to NewContext is the function, which is still in x1.
+ Comment cmnt(masm_, "[ Allocate context");
+ if (FLAG_harmony_scoping && info->scope()->is_global_scope()) {
+ __ Mov(x10, Operand(info->scope()->GetScopeInfo()));
+ __ Push(x1, x10);
+ __ CallRuntime(Runtime::kNewGlobalContext, 2);
+ } else if (heap_slots <= FastNewContextStub::kMaximumSlots) {
+ FastNewContextStub stub(heap_slots);
+ __ CallStub(&stub);
+ } else {
+ __ Push(x1);
+ __ CallRuntime(Runtime::kNewFunctionContext, 1);
+ }
+ function_in_register_x1 = false;
+ // Context is returned in x0. It replaces the context passed to us.
+ // It's saved in the stack and kept live in cp.
+ __ Mov(cp, x0);
+ __ Str(x0, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ // Copy any necessary parameters into the context.
+ int num_parameters = info->scope()->num_parameters();
+ for (int i = 0; i < num_parameters; i++) {
+ Variable* var = scope()->parameter(i);
+ if (var->IsContextSlot()) {
+ int parameter_offset = StandardFrameConstants::kCallerSPOffset +
+ (num_parameters - 1 - i) * kPointerSize;
+ // Load parameter from stack.
+ __ Ldr(x10, MemOperand(fp, parameter_offset));
+ // Store it in the context.
+ MemOperand target = ContextMemOperand(cp, var->index());
+ __ Str(x10, target);
+
+ // Update the write barrier.
+ __ RecordWriteContextSlot(
+ cp, target.offset(), x10, x11, kLRHasBeenSaved, kDontSaveFPRegs);
+ }
+ }
+ }
+
+ Variable* arguments = scope()->arguments();
+ if (arguments != NULL) {
+ // Function uses arguments object.
+ Comment cmnt(masm_, "[ Allocate arguments object");
+ if (!function_in_register_x1) {
+ // Load this again, if it's used by the local context below.
+ __ Ldr(x3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ } else {
+ __ Mov(x3, x1);
+ }
+ // Receiver is just before the parameters on the caller's stack.
+ int num_parameters = info->scope()->num_parameters();
+ int offset = num_parameters * kPointerSize;
+ __ Add(x2, fp, StandardFrameConstants::kCallerSPOffset + offset);
+ __ Mov(x1, Operand(Smi::FromInt(num_parameters)));
+ __ Push(x3, x2, x1);
+
+ // Arguments to ArgumentsAccessStub:
+ // function, receiver address, parameter count.
+ // The stub will rewrite receiver and parameter count if the previous
+ // stack frame was an arguments adapter frame.
+ ArgumentsAccessStub::Type type;
+ if (!is_classic_mode()) {
+ type = ArgumentsAccessStub::NEW_STRICT;
+ } else if (function()->has_duplicate_parameters()) {
+ type = ArgumentsAccessStub::NEW_NON_STRICT_SLOW;
+ } else {
+ type = ArgumentsAccessStub::NEW_NON_STRICT_FAST;
+ }
+ ArgumentsAccessStub stub(type);
+ __ CallStub(&stub);
+
+ SetVar(arguments, x0, x1, x2);
+ }
+
+ if (FLAG_trace) {
+ __ CallRuntime(Runtime::kTraceEnter, 0);
+ }
+
+
+ // Visit the declarations and body unless there is an illegal
+ // redeclaration.
+ if (scope()->HasIllegalRedeclaration()) {
+ Comment cmnt(masm_, "[ Declarations");
+ scope()->VisitIllegalRedeclaration(this);
+
+ } else {
+ PrepareForBailoutForId(BailoutId::FunctionEntry(), NO_REGISTERS);
+ { Comment cmnt(masm_, "[ Declarations");
+ if (scope()->is_function_scope() && scope()->function() != NULL) {
+ VariableDeclaration* function = scope()->function();
+ ASSERT(function->proxy()->var()->mode() == CONST ||
+ function->proxy()->var()->mode() == CONST_HARMONY);
+ ASSERT(function->proxy()->var()->location() != Variable::UNALLOCATED);
+ VisitVariableDeclaration(function);
+ }
+ VisitDeclarations(scope()->declarations());
+ }
+ }
+
+ { Comment cmnt(masm_, "[ Stack check");
+ PrepareForBailoutForId(BailoutId::Declarations(), NO_REGISTERS);
+ Label ok;
+ ASSERT(jssp.Is(__ StackPointer()));
+ __ CompareRoot(jssp, Heap::kStackLimitRootIndex);
+ __ B(hs, &ok);
+ PredictableCodeSizeScope predictable(masm_,
+ Assembler::kCallSizeWithRelocation);
+ __ Call(isolate()->builtins()->StackCheck(), RelocInfo::CODE_TARGET);
+ __ Bind(&ok);
+ }
+
+ { Comment cmnt(masm_, "[ Body");
+ ASSERT(loop_depth() == 0);
+ VisitStatements(function()->body());
+ ASSERT(loop_depth() == 0);
+ }
+
+ // Always emit a 'return undefined' in case control fell off the end of
+ // the body.
+ { Comment cmnt(masm_, "[ return <undefined>;");
+ __ LoadRoot(x0, Heap::kUndefinedValueRootIndex);
+ }
+ EmitReturnSequence();
+
+ // Force emit the constant pool, so it doesn't get emitted in the middle
+ // of the back edge table.
+ masm()->CheckConstPool(true, false);
+}
+
+
+void FullCodeGenerator::ClearAccumulator() {
+ __ Mov(x0, Operand(Smi::FromInt(0)));
+}
+
+
+void FullCodeGenerator::EmitProfilingCounterDecrement(int delta) {
+ __ Mov(x2, Operand(profiling_counter_));
+ __ Ldr(x3, FieldMemOperand(x2, Cell::kValueOffset));
+ __ Subs(x3, x3, Operand(Smi::FromInt(delta)));
+ __ Str(x3, FieldMemOperand(x2, Cell::kValueOffset));
+}
+
+
+void FullCodeGenerator::EmitProfilingCounterReset() {
+ int reset_value = FLAG_interrupt_budget;
+ if (isolate()->IsDebuggerActive()) {
+ // Detect debug break requests as soon as possible.
+ reset_value = FLAG_interrupt_budget >> 4;
+ }
+ __ Mov(x2, Operand(profiling_counter_));
+ __ Mov(x3, Operand(Smi::FromInt(reset_value)));
+ __ Str(x3, FieldMemOperand(x2, Cell::kValueOffset));
+}
+
+
+void FullCodeGenerator::EmitBackEdgeBookkeeping(IterationStatement* stmt,
+ Label* back_edge_target) {
+ ASSERT(jssp.Is(__ StackPointer()));
+ Comment cmnt(masm_, "[ Back edge bookkeeping");
+ // Block literal pools whilst emitting back edge code.
+ Assembler::BlockConstPoolScope block_const_pool(masm_);
+ Label ok;
+
+ ASSERT(back_edge_target->is_bound());
+ int distance = masm_->SizeOfCodeGeneratedSince(back_edge_target);
+ int weight = Min(kMaxBackEdgeWeight,
+ Max(1, distance / kCodeSizeMultiplier));
+ EmitProfilingCounterDecrement(weight);
+ __ B(pl, &ok);
+ __ Call(isolate()->builtins()->InterruptCheck(), RelocInfo::CODE_TARGET);
+
+ // Record a mapping of this PC offset to the OSR id. This is used to find
+ // the AST id from the unoptimized code in order to use it as a key into
+ // the deoptimization input data found in the optimized code.
+ RecordBackEdge(stmt->OsrEntryId());
+
+ EmitProfilingCounterReset();
+
+ __ Bind(&ok);
+ PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
+ // Record a mapping of the OSR id to this PC. This is used if the OSR
+ // entry becomes the target of a bailout. We don't expect it to be, but
+ // we want it to work if it is.
+ PrepareForBailoutForId(stmt->OsrEntryId(), NO_REGISTERS);
+}
+
+
+void FullCodeGenerator::EmitReturnSequence() {
+ Comment cmnt(masm_, "[ Return sequence");
+
+ if (return_label_.is_bound()) {
+ __ B(&return_label_);
+
+ } else {
+ __ Bind(&return_label_);
+ if (FLAG_trace) {
+ // Push the return value on the stack as the parameter.
+ // Runtime::TraceExit returns its parameter in x0.
+ __ Push(result_register());
+ __ CallRuntime(Runtime::kTraceExit, 1);
+ ASSERT(x0.Is(result_register()));
+ }
+ // Pretend that the exit is a backwards jump to the entry.
+ int weight = 1;
+ if (info_->ShouldSelfOptimize()) {
+ weight = FLAG_interrupt_budget / FLAG_self_opt_count;
+ } else {
+ int distance = masm_->pc_offset();
+ weight = Min(kMaxBackEdgeWeight,
+ Max(1, distance / kCodeSizeMultiplier));
+ }
+ EmitProfilingCounterDecrement(weight);
+ Label ok;
+ __ B(pl, &ok);
+ __ Push(x0);
+ __ Call(isolate()->builtins()->InterruptCheck(),
+ RelocInfo::CODE_TARGET);
+ __ Pop(x0);
+ EmitProfilingCounterReset();
+ __ Bind(&ok);
+
+ // Make sure that the constant pool is not emitted inside of the return
+ // sequence. This sequence can get patched when the debugger is used. See
+ // debug-a64.cc:BreakLocationIterator::SetDebugBreakAtReturn().
+ {
+ InstructionAccurateScope scope(masm_,
+ Assembler::kJSRetSequenceInstructions);
+ CodeGenerator::RecordPositions(masm_, function()->end_position() - 1);
+ __ RecordJSReturn();
+ // This code is generated using Assembler methods rather than Macro
+ // Assembler methods because it will be patched later on, and so the size
+ // of the generated code must be consistent.
+ const Register& current_sp = __ StackPointer();
+ // Nothing ensures 16 bytes alignment here.
+ ASSERT(!current_sp.Is(csp));
+ __ mov(current_sp, fp);
+ int no_frame_start = masm_->pc_offset();
+ __ ldp(fp, lr, MemOperand(current_sp, 2 * kXRegSizeInBytes, PostIndex));
+ // Drop the arguments and receiver and return.
+ // TODO(all): This implementation is overkill as it supports 2**31+1
+ // arguments, consider how to improve it without creating a security
+ // hole.
+ __ LoadLiteral(ip0, 3 * kInstructionSize);
+ __ add(current_sp, current_sp, ip0);
+ __ ret();
+ __ dc64(kXRegSizeInBytes * (info_->scope()->num_parameters() + 1));
+ info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
+ }
+ }
+}
+
+
+void FullCodeGenerator::EffectContext::Plug(Variable* var) const {
+ ASSERT(var->IsStackAllocated() || var->IsContextSlot());
+}
+
+
+void FullCodeGenerator::AccumulatorValueContext::Plug(Variable* var) const {
+ ASSERT(var->IsStackAllocated() || var->IsContextSlot());
+ codegen()->GetVar(result_register(), var);
+}
+
+
+void FullCodeGenerator::StackValueContext::Plug(Variable* var) const {
+ ASSERT(var->IsStackAllocated() || var->IsContextSlot());
+ codegen()->GetVar(result_register(), var);
+ __ Push(result_register());
+}
+
+
+void FullCodeGenerator::TestContext::Plug(Variable* var) const {
+ ASSERT(var->IsStackAllocated() || var->IsContextSlot());
+ // For simplicity we always test the accumulator register.
+ codegen()->GetVar(result_register(), var);
+ codegen()->PrepareForBailoutBeforeSplit(condition(), false, NULL, NULL);
+ codegen()->DoTest(this);
+}
+
+
+void FullCodeGenerator::EffectContext::Plug(Heap::RootListIndex index) const {
+ // Root values have no side effects.
+}
+
+
+void FullCodeGenerator::AccumulatorValueContext::Plug(
+ Heap::RootListIndex index) const {
+ __ LoadRoot(result_register(), index);
+}
+
+
+void FullCodeGenerator::StackValueContext::Plug(
+ Heap::RootListIndex index) const {
+ __ LoadRoot(result_register(), index);
+ __ Push(result_register());
+}
+
+
+void FullCodeGenerator::TestContext::Plug(Heap::RootListIndex index) const {
+ codegen()->PrepareForBailoutBeforeSplit(condition(), true, true_label_,
+ false_label_);
+ if (index == Heap::kUndefinedValueRootIndex ||
+ index == Heap::kNullValueRootIndex ||
+ index == Heap::kFalseValueRootIndex) {
+ if (false_label_ != fall_through_) __ B(false_label_);
+ } else if (index == Heap::kTrueValueRootIndex) {
+ if (true_label_ != fall_through_) __ B(true_label_);
+ } else {
+ __ LoadRoot(result_register(), index);
+ codegen()->DoTest(this);
+ }
+}
+
+
+void FullCodeGenerator::EffectContext::Plug(Handle<Object> lit) const {
+}
+
+
+void FullCodeGenerator::AccumulatorValueContext::Plug(
+ Handle<Object> lit) const {
+ __ Mov(result_register(), Operand(lit));
+}
+
+
+void FullCodeGenerator::StackValueContext::Plug(Handle<Object> lit) const {
+ // Immediates cannot be pushed directly.
+ __ Mov(result_register(), Operand(lit));
+ __ Push(result_register());
+}
+
+
+void FullCodeGenerator::TestContext::Plug(Handle<Object> lit) const {
+ codegen()->PrepareForBailoutBeforeSplit(condition(),
+ true,
+ true_label_,
+ false_label_);
+ ASSERT(!lit->IsUndetectableObject()); // There are no undetectable literals.
+ if (lit->IsUndefined() || lit->IsNull() || lit->IsFalse()) {
+ if (false_label_ != fall_through_) __ B(false_label_);
+ } else if (lit->IsTrue() || lit->IsJSObject()) {
+ if (true_label_ != fall_through_) __ B(true_label_);
+ } else if (lit->IsString()) {
+ if (String::cast(*lit)->length() == 0) {
+ if (false_label_ != fall_through_) __ B(false_label_);
+ } else {
+ if (true_label_ != fall_through_) __ B(true_label_);
+ }
+ } else if (lit->IsSmi()) {
+ if (Smi::cast(*lit)->value() == 0) {
+ if (false_label_ != fall_through_) __ B(false_label_);
+ } else {
+ if (true_label_ != fall_through_) __ B(true_label_);
+ }
+ } else {
+ // For simplicity we always test the accumulator register.
+ __ Mov(result_register(), Operand(lit));
+ codegen()->DoTest(this);
+ }
+}
+
+
+void FullCodeGenerator::EffectContext::DropAndPlug(int count,
+ Register reg) const {
+ ASSERT(count > 0);
+ __ Drop(count);
+}
+
+
+void FullCodeGenerator::AccumulatorValueContext::DropAndPlug(
+ int count,
+ Register reg) const {
+ ASSERT(count > 0);
+ __ Drop(count);
+ __ Move(result_register(), reg);
+}
+
+
+void FullCodeGenerator::StackValueContext::DropAndPlug(int count,
+ Register reg) const {
+ ASSERT(count > 0);
+ if (count > 1) __ Drop(count - 1);
+ __ Poke(reg, 0);
+}
+
+
+void FullCodeGenerator::TestContext::DropAndPlug(int count,
+ Register reg) const {
+ ASSERT(count > 0);
+ // For simplicity we always test the accumulator register.
+ __ Drop(count);
+ __ Mov(result_register(), reg);
+ codegen()->PrepareForBailoutBeforeSplit(condition(), false, NULL, NULL);
+ codegen()->DoTest(this);
+}
+
+
+void FullCodeGenerator::EffectContext::Plug(Label* materialize_true,
+ Label* materialize_false) const {
+ ASSERT(materialize_true == materialize_false);
+ __ Bind(materialize_true);
+}
+
+
+void FullCodeGenerator::AccumulatorValueContext::Plug(
+ Label* materialize_true,
+ Label* materialize_false) const {
+ Label done;
+ __ Bind(materialize_true);
+ __ LoadRoot(result_register(), Heap::kTrueValueRootIndex);
+ __ B(&done);
+ __ Bind(materialize_false);
+ __ LoadRoot(result_register(), Heap::kFalseValueRootIndex);
+ __ Bind(&done);
+}
+
+
+void FullCodeGenerator::StackValueContext::Plug(
+ Label* materialize_true,
+ Label* materialize_false) const {
+ Label done;
+ __ Bind(materialize_true);
+ __ LoadRoot(x10, Heap::kTrueValueRootIndex);
+ __ B(&done);
+ __ Bind(materialize_false);
+ __ LoadRoot(x10, Heap::kFalseValueRootIndex);
+ __ Bind(&done);
+ __ Push(x10);
+}
+
+
+void FullCodeGenerator::TestContext::Plug(Label* materialize_true,
+ Label* materialize_false) const {
+ ASSERT(materialize_true == true_label_);
+ ASSERT(materialize_false == false_label_);
+}
+
+
+void FullCodeGenerator::EffectContext::Plug(bool flag) const {
+}
+
+
+void FullCodeGenerator::AccumulatorValueContext::Plug(bool flag) const {
+ Heap::RootListIndex value_root_index =
+ flag ? Heap::kTrueValueRootIndex : Heap::kFalseValueRootIndex;
+ __ LoadRoot(result_register(), value_root_index);
+}
+
+
+void FullCodeGenerator::StackValueContext::Plug(bool flag) const {
+ Heap::RootListIndex value_root_index =
+ flag ? Heap::kTrueValueRootIndex : Heap::kFalseValueRootIndex;
+ __ LoadRoot(x10, value_root_index);
+ __ Push(x10);
+}
+
+
+void FullCodeGenerator::TestContext::Plug(bool flag) const {
+ codegen()->PrepareForBailoutBeforeSplit(condition(),
+ true,
+ true_label_,
+ false_label_);
+ if (flag) {
+ if (true_label_ != fall_through_) {
+ __ B(true_label_);
+ }
+ } else {
+ if (false_label_ != fall_through_) {
+ __ B(false_label_);
+ }
+ }
+}
+
+
+void FullCodeGenerator::DoTest(Expression* condition,
+ Label* if_true,
+ Label* if_false,
+ Label* fall_through) {
+ Handle<Code> ic = ToBooleanStub::GetUninitialized(isolate());
+ CallIC(ic, condition->test_id());
+ __ CompareAndSplit(result_register(), 0, ne, if_true, if_false, fall_through);
+}
+
+
+// If (cond), branch to if_true.
+// If (!cond), branch to if_false.
+// fall_through is used as an optimization in cases where only one branch
+// instruction is necessary.
+void FullCodeGenerator::Split(Condition cond,
+ Label* if_true,
+ Label* if_false,
+ Label* fall_through) {
+ if (if_false == fall_through) {
+ __ B(cond, if_true);
+ } else if (if_true == fall_through) {
+ ASSERT(if_false != fall_through);
+ __ B(InvertCondition(cond), if_false);
+ } else {
+ __ B(cond, if_true);
+ __ B(if_false);
+ }
+}
+
+
+MemOperand FullCodeGenerator::StackOperand(Variable* var) {
+ // Offset is negative because higher indexes are at lower addresses.
+ int offset = -var->index() * kXRegSizeInBytes;
+ // Adjust by a (parameter or local) base offset.
+ if (var->IsParameter()) {
+ offset += (info_->scope()->num_parameters() + 1) * kPointerSize;
+ } else {
+ offset += JavaScriptFrameConstants::kLocal0Offset;
+ }
+ return MemOperand(fp, offset);
+}
+
+
+MemOperand FullCodeGenerator::VarOperand(Variable* var, Register scratch) {
+ ASSERT(var->IsContextSlot() || var->IsStackAllocated());
+ if (var->IsContextSlot()) {
+ int context_chain_length = scope()->ContextChainLength(var->scope());
+ __ LoadContext(scratch, context_chain_length);
+ return ContextMemOperand(scratch, var->index());
+ } else {
+ return StackOperand(var);
+ }
+}
+
+
+void FullCodeGenerator::GetVar(Register dest, Variable* var) {
+ // Use destination as scratch.
+ MemOperand location = VarOperand(var, dest);
+ __ Ldr(dest, location);
+}
+
+
+void FullCodeGenerator::SetVar(Variable* var,
+ Register src,
+ Register scratch0,
+ Register scratch1) {
+ ASSERT(var->IsContextSlot() || var->IsStackAllocated());
+ ASSERT(!AreAliased(src, scratch0, scratch1));
+ MemOperand location = VarOperand(var, scratch0);
+ __ Str(src, location);
+
+ // Emit the write barrier code if the location is in the heap.
+ if (var->IsContextSlot()) {
+ // scratch0 contains the correct context.
+ __ RecordWriteContextSlot(scratch0,
+ location.offset(),
+ src,
+ scratch1,
+ kLRHasBeenSaved,
+ kDontSaveFPRegs);
+ }
+}
+
+
+void FullCodeGenerator::PrepareForBailoutBeforeSplit(Expression* expr,
+ bool should_normalize,
+ Label* if_true,
+ Label* if_false) {
+ // Only prepare for bailouts before splits if we're in a test
+ // context. Otherwise, we let the Visit function deal with the
+ // preparation to avoid preparing with the same AST id twice.
+ if (!context()->IsTest() || !info_->IsOptimizable()) return;
+
+ // TODO(all): Investigate to see if there is something to work on here.
+ Label skip;
+ if (should_normalize) {
+ __ B(&skip);
+ }
+ PrepareForBailout(expr, TOS_REG);
+ if (should_normalize) {
+ __ CompareRoot(x0, Heap::kTrueValueRootIndex);
+ Split(eq, if_true, if_false, NULL);
+ __ Bind(&skip);
+ }
+}
+
+
+void FullCodeGenerator::EmitDebugCheckDeclarationContext(Variable* variable) {
+ // The variable in the declaration always resides in the current function
+ // context.
+ ASSERT_EQ(0, scope()->ContextChainLength(variable->scope()));
+ if (generate_debug_code_) {
+ // Check that we're not inside a with or catch context.
+ __ Ldr(x1, FieldMemOperand(cp, HeapObject::kMapOffset));
+ __ CompareRoot(x1, Heap::kWithContextMapRootIndex);
+ __ Check(ne, kDeclarationInWithContext);
+ __ CompareRoot(x1, Heap::kCatchContextMapRootIndex);
+ __ Check(ne, kDeclarationInCatchContext);
+ }
+}
+
+
+void FullCodeGenerator::VisitVariableDeclaration(
+ VariableDeclaration* declaration) {
+ // If it was not possible to allocate the variable at compile time, we
+ // need to "declare" it at runtime to make sure it actually exists in the
+ // local context.
+ VariableProxy* proxy = declaration->proxy();
+ VariableMode mode = declaration->mode();
+ Variable* variable = proxy->var();
+ bool hole_init = (mode == CONST) || (mode == CONST_HARMONY) || (mode == LET);
+
+ switch (variable->location()) {
+ case Variable::UNALLOCATED:
+ globals_->Add(variable->name(), zone());
+ globals_->Add(variable->binding_needs_init()
+ ? isolate()->factory()->the_hole_value()
+ : isolate()->factory()->undefined_value(),
+ zone());
+ break;
+
+ case Variable::PARAMETER:
+ case Variable::LOCAL:
+ if (hole_init) {
+ Comment cmnt(masm_, "[ VariableDeclaration");
+ __ LoadRoot(x10, Heap::kTheHoleValueRootIndex);
+ __ Str(x10, StackOperand(variable));
+ }
+ break;
+
+ case Variable::CONTEXT:
+ if (hole_init) {
+ Comment cmnt(masm_, "[ VariableDeclaration");
+ EmitDebugCheckDeclarationContext(variable);
+ __ LoadRoot(x10, Heap::kTheHoleValueRootIndex);
+ __ Str(x10, ContextMemOperand(cp, variable->index()));
+ // No write barrier since the_hole_value is in old space.
+ PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
+ }
+ break;
+
+ case Variable::LOOKUP: {
+ Comment cmnt(masm_, "[ VariableDeclaration");
+ __ Mov(x2, Operand(variable->name()));
+ // Declaration nodes are always introduced in one of four modes.
+ ASSERT(IsDeclaredVariableMode(mode));
+ PropertyAttributes attr = IsImmutableVariableMode(mode) ? READ_ONLY
+ : NONE;
+ __ Mov(x1, Operand(Smi::FromInt(attr)));
+ // Push initial value, if any.
+ // Note: For variables we must not push an initial value (such as
+ // 'undefined') because we may have a (legal) redeclaration and we
+ // must not destroy the current value.
+ if (hole_init) {
+ __ LoadRoot(x0, Heap::kTheHoleValueRootIndex);
+ __ Push(cp, x2, x1, x0);
+ } else {
+ // Pushing 0 (xzr) indicates no initial value.
+ __ Push(cp, x2, x1, xzr);
+ }
+ __ CallRuntime(Runtime::kDeclareContextSlot, 4);
+ break;
+ }
+ }
+}
+
+
+void FullCodeGenerator::VisitFunctionDeclaration(
+ FunctionDeclaration* declaration) {
+ VariableProxy* proxy = declaration->proxy();
+ Variable* variable = proxy->var();
+ switch (variable->location()) {
+ case Variable::UNALLOCATED: {
+ globals_->Add(variable->name(), zone());
+ Handle<SharedFunctionInfo> function =
+ Compiler::BuildFunctionInfo(declaration->fun(), script());
+ // Check for stack overflow exception.
+ if (function.is_null()) return SetStackOverflow();
+ globals_->Add(function, zone());
+ break;
+ }
+
+ case Variable::PARAMETER:
+ case Variable::LOCAL: {
+ Comment cmnt(masm_, "[ Function Declaration");
+ VisitForAccumulatorValue(declaration->fun());
+ __ Str(result_register(), StackOperand(variable));
+ break;
+ }
+
+ case Variable::CONTEXT: {
+ Comment cmnt(masm_, "[ Function Declaration");
+ EmitDebugCheckDeclarationContext(variable);
+ VisitForAccumulatorValue(declaration->fun());
+ __ Str(result_register(), ContextMemOperand(cp, variable->index()));
+ int offset = Context::SlotOffset(variable->index());
+ // We know that we have written a function, which is not a smi.
+ __ RecordWriteContextSlot(cp,
+ offset,
+ result_register(),
+ x2,
+ kLRHasBeenSaved,
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+ PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
+ break;
+ }
+
+ case Variable::LOOKUP: {
+ Comment cmnt(masm_, "[ Function Declaration");
+ __ Mov(x2, Operand(variable->name()));
+ __ Mov(x1, Operand(Smi::FromInt(NONE)));
+ __ Push(cp, x2, x1);
+ // Push initial value for function declaration.
+ VisitForStackValue(declaration->fun());
+ __ CallRuntime(Runtime::kDeclareContextSlot, 4);
+ break;
+ }
+ }
+}
+
+
+void FullCodeGenerator::VisitModuleDeclaration(ModuleDeclaration* declaration) {
+ Variable* variable = declaration->proxy()->var();
+ ASSERT(variable->location() == Variable::CONTEXT);
+ ASSERT(variable->interface()->IsFrozen());
+
+ Comment cmnt(masm_, "[ ModuleDeclaration");
+ EmitDebugCheckDeclarationContext(variable);
+
+ // Load instance object.
+ __ LoadContext(x1, scope_->ContextChainLength(scope_->GlobalScope()));
+ __ Ldr(x1, ContextMemOperand(x1, variable->interface()->Index()));
+ __ Ldr(x1, ContextMemOperand(x1, Context::EXTENSION_INDEX));
+
+ // Assign it.
+ __ Str(x1, ContextMemOperand(cp, variable->index()));
+ // We know that we have written a module, which is not a smi.
+ __ RecordWriteContextSlot(cp,
+ Context::SlotOffset(variable->index()),
+ x1,
+ x3,
+ kLRHasBeenSaved,
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+ PrepareForBailoutForId(declaration->proxy()->id(), NO_REGISTERS);
+
+ // Traverse info body.
+ Visit(declaration->module());
+}
+
+
+void FullCodeGenerator::VisitImportDeclaration(ImportDeclaration* declaration) {
+ VariableProxy* proxy = declaration->proxy();
+ Variable* variable = proxy->var();
+ switch (variable->location()) {
+ case Variable::UNALLOCATED:
+ // TODO(rossberg)
+ break;
+
+ case Variable::CONTEXT: {
+ Comment cmnt(masm_, "[ ImportDeclaration");
+ EmitDebugCheckDeclarationContext(variable);
+ // TODO(rossberg)
+ break;
+ }
+
+ case Variable::PARAMETER:
+ case Variable::LOCAL:
+ case Variable::LOOKUP:
+ UNREACHABLE();
+ }
+}
+
+
+void FullCodeGenerator::VisitExportDeclaration(ExportDeclaration* declaration) {
+ // TODO(rossberg)
+}
+
+
+void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
+ // Call the runtime to declare the globals.
+ __ Mov(x11, Operand(pairs));
+ Register flags = xzr;
+ if (Smi::FromInt(DeclareGlobalsFlags())) {
+ flags = x10;
+ __ Mov(flags, Operand(Smi::FromInt(DeclareGlobalsFlags())));
+ }
+ __ Push(cp, x11, flags);
+ __ CallRuntime(Runtime::kDeclareGlobals, 3);
+ // Return value is ignored.
+}
+
+
+void FullCodeGenerator::DeclareModules(Handle<FixedArray> descriptions) {
+ // Call the runtime to declare the modules.
+ __ Push(descriptions);
+ __ CallRuntime(Runtime::kDeclareModules, 1);
+ // Return value is ignored.
+}
+
+
+void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
+ ASM_LOCATION("FullCodeGenerator::VisitSwitchStatement");
+ Comment cmnt(masm_, "[ SwitchStatement");
+ Breakable nested_statement(this, stmt);
+ SetStatementPosition(stmt);
+
+ // Keep the switch value on the stack until a case matches.
+ VisitForStackValue(stmt->tag());
+ PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
+
+ ZoneList<CaseClause*>* clauses = stmt->cases();
+ CaseClause* default_clause = NULL; // Can occur anywhere in the list.
+
+ Label next_test; // Recycled for each test.
+ // Compile all the tests with branches to their bodies.
+ for (int i = 0; i < clauses->length(); i++) {
+ CaseClause* clause = clauses->at(i);
+ clause->body_target()->Unuse();
+
+ // The default is not a test, but remember it as final fall through.
+ if (clause->is_default()) {
+ default_clause = clause;
+ continue;
+ }
+
+ Comment cmnt(masm_, "[ Case comparison");
+ __ Bind(&next_test);
+ next_test.Unuse();
+
+ // Compile the label expression.
+ VisitForAccumulatorValue(clause->label());
+
+ // Perform the comparison as if via '==='.
+ __ Peek(x1, 0); // Switch value.
+
+ JumpPatchSite patch_site(masm_);
+ if (ShouldInlineSmiCase(Token::EQ_STRICT)) {
+ Label slow_case;
+ patch_site.EmitJumpIfEitherNotSmi(x0, x1, &slow_case);
+ __ Cmp(x1, x0);
+ __ B(ne, &next_test);
+ __ Drop(1); // Switch value is no longer needed.
+ __ B(clause->body_target());
+ __ Bind(&slow_case);
+ }
+
+ // Record position before stub call for type feedback.
+ SetSourcePosition(clause->position());
+ Handle<Code> ic = CompareIC::GetUninitialized(isolate(), Token::EQ_STRICT);
+ CallIC(ic, clause->CompareId());
+ patch_site.EmitPatchInfo();
+
+ Label skip;
+ __ B(&skip);
+ PrepareForBailout(clause, TOS_REG);
+ __ JumpIfNotRoot(x0, Heap::kTrueValueRootIndex, &next_test);
+ __ Drop(1);
+ __ B(clause->body_target());
+ __ Bind(&skip);
+
+ __ Cbnz(x0, &next_test);
+ __ Drop(1); // Switch value is no longer needed.
+ __ B(clause->body_target());
+ }
+
+ // Discard the test value and jump to the default if present, otherwise to
+ // the end of the statement.
+ __ Bind(&next_test);
+ __ Drop(1); // Switch value is no longer needed.
+ if (default_clause == NULL) {
+ __ B(nested_statement.break_label());
+ } else {
+ __ B(default_clause->body_target());
+ }
+
+ // Compile all the case bodies.
+ for (int i = 0; i < clauses->length(); i++) {
+ Comment cmnt(masm_, "[ Case body");
+ CaseClause* clause = clauses->at(i);
+ __ Bind(clause->body_target());
+ PrepareForBailoutForId(clause->EntryId(), NO_REGISTERS);
+ VisitStatements(clause->statements());
+ }
+
+ __ Bind(nested_statement.break_label());
+ PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
+}
+
+
+void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
+ ASM_LOCATION("FullCodeGenerator::VisitForInStatement");
+ Comment cmnt(masm_, "[ ForInStatement");
+ int slot = stmt->ForInFeedbackSlot();
+ // TODO(all): This visitor probably needs better comments and a revisit.
+ SetStatementPosition(stmt);
+
+ Label loop, exit;
+ ForIn loop_statement(this, stmt);
+ increment_loop_depth();
+
+ // Get the object to enumerate over. If the object is null or undefined, skip
+ // over the loop. See ECMA-262 version 5, section 12.6.4.
+ VisitForAccumulatorValue(stmt->enumerable());
+ __ JumpIfRoot(x0, Heap::kUndefinedValueRootIndex, &exit);
+ Register null_value = x15;
+ __ LoadRoot(null_value, Heap::kNullValueRootIndex);
+ __ Cmp(x0, null_value);
+ __ B(eq, &exit);
+
+ PrepareForBailoutForId(stmt->PrepareId(), TOS_REG);
+
+ // Convert the object to a JS object.
+ Label convert, done_convert;
+ __ JumpIfSmi(x0, &convert);
+ __ JumpIfObjectType(x0, x10, x11, FIRST_SPEC_OBJECT_TYPE, &done_convert, ge);
+ __ Bind(&convert);
+ __ Push(x0);
+ __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+ __ Bind(&done_convert);
+ __ Push(x0);
+
+ // Check for proxies.
+ Label call_runtime;
+ STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
+ __ JumpIfObjectType(x0, x10, x11, LAST_JS_PROXY_TYPE, &call_runtime, le);
+
+ // Check cache validity in generated code. This is a fast case for
+ // the JSObject::IsSimpleEnum cache validity checks. If we cannot
+ // guarantee cache validity, call the runtime system to check cache
+ // validity or get the property names in a fixed array.
+ __ CheckEnumCache(x0, null_value, x10, x11, x12, x13, &call_runtime);
+
+ // The enum cache is valid. Load the map of the object being
+ // iterated over and use the cache for the iteration.
+ Label use_cache;
+ __ Ldr(x0, FieldMemOperand(x0, HeapObject::kMapOffset));
+ __ B(&use_cache);
+
+ // Get the set of properties to enumerate.
+ __ Bind(&call_runtime);
+ __ Push(x0); // Duplicate the enumerable object on the stack.
+ __ CallRuntime(Runtime::kGetPropertyNamesFast, 1);
+
+ // If we got a map from the runtime call, we can do a fast
+ // modification check. Otherwise, we got a fixed array, and we have
+ // to do a slow check.
+ Label fixed_array, no_descriptors;
+ __ Ldr(x2, FieldMemOperand(x0, HeapObject::kMapOffset));
+ __ JumpIfNotRoot(x2, Heap::kMetaMapRootIndex, &fixed_array);
+
+ // We got a map in register x0. Get the enumeration cache from it.
+ __ Bind(&use_cache);
+
+ __ EnumLengthUntagged(x1, x0);
+ __ Cbz(x1, &no_descriptors);
+
+ __ LoadInstanceDescriptors(x0, x2);
+ __ Ldr(x2, FieldMemOperand(x2, DescriptorArray::kEnumCacheOffset));
+ __ Ldr(x2,
+ FieldMemOperand(x2, DescriptorArray::kEnumCacheBridgeCacheOffset));
+
+ // Set up the four remaining stack slots.
+ __ Push(x0); // Map.
+ __ Mov(x0, Operand(Smi::FromInt(0)));
+ // Push enumeration cache, enumeration cache length (as smi) and zero.
+ __ SmiTag(x1);
+ __ Push(x2, x1, x0);
+ __ B(&loop);
+
+ __ Bind(&no_descriptors);
+ __ Drop(1);
+ __ B(&exit);
+
+ // We got a fixed array in register x0. Iterate through that.
+ __ Bind(&fixed_array);
+
+ Handle<Object> feedback = Handle<Object>(
+ Smi::FromInt(TypeFeedbackInfo::kForInFastCaseMarker),
+ isolate());
+ StoreFeedbackVectorSlot(slot, feedback);
+ __ LoadObject(x1, FeedbackVector());
+ __ Mov(x10, Operand(Smi::FromInt(TypeFeedbackInfo::kForInSlowCaseMarker)));
+ __ Str(x10, FieldMemOperand(x1, FixedArray::OffsetOfElementAt(slot)));
+
+ __ Mov(x1, Operand(Smi::FromInt(1))); // Smi indicates slow check.
+ __ Peek(x10, 0); // Get enumerated object.
+ STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
+ // TODO(all): similar check was done already. Can we avoid it here?
+ __ CompareObjectType(x10, x11, x12, LAST_JS_PROXY_TYPE);
+ ASSERT(Smi::FromInt(0) == 0);
+ __ CzeroX(x1, le); // Zero indicates proxy.
+ __ Push(x1, x0); // Smi and array
+ __ Ldr(x1, FieldMemOperand(x0, FixedArray::kLengthOffset));
+ __ Push(x1, xzr); // Fixed array length (as smi) and initial index.
+
+ // Generate code for doing the condition check.
+ PrepareForBailoutForId(stmt->BodyId(), NO_REGISTERS);
+ __ Bind(&loop);
+ // Load the current count to x0, load the length to x1.
+ __ PeekPair(x0, x1, 0);
+ __ Cmp(x0, x1); // Compare to the array length.
+ __ B(hs, loop_statement.break_label());
+
+ // Get the current entry of the array into register r3.
+ __ Peek(x10, 2 * kXRegSizeInBytes);
+ __ Add(x10, x10, Operand::UntagSmiAndScale(x0, kPointerSizeLog2));
+ __ Ldr(x3, MemOperand(x10, FixedArray::kHeaderSize - kHeapObjectTag));
+
+ // Get the expected map from the stack or a smi in the
+ // permanent slow case into register x10.
+ __ Peek(x2, 3 * kXRegSizeInBytes);
+
+ // Check if the expected map still matches that of the enumerable.
+ // If not, we may have to filter the key.
+ Label update_each;
+ __ Peek(x1, 4 * kXRegSizeInBytes);
+ __ Ldr(x11, FieldMemOperand(x1, HeapObject::kMapOffset));
+ __ Cmp(x11, x2);
+ __ B(eq, &update_each);
+
+ // For proxies, no filtering is done.
+ // TODO(rossberg): What if only a prototype is a proxy? Not specified yet.
+ STATIC_ASSERT(kSmiTag == 0);
+ __ Cbz(x2, &update_each);
+
+ // Convert the entry to a string or (smi) 0 if it isn't a property
+ // any more. If the property has been removed while iterating, we
+ // just skip it.
+ __ Push(x1, x3);
+ __ InvokeBuiltin(Builtins::FILTER_KEY, CALL_FUNCTION);
+ __ Mov(x3, x0);
+ __ Cbz(x0, loop_statement.continue_label());
+
+ // Update the 'each' property or variable from the possibly filtered
+ // entry in register x3.
+ __ Bind(&update_each);
+ __ Mov(result_register(), x3);
+ // Perform the assignment as if via '='.
+ { EffectContext context(this);
+ EmitAssignment(stmt->each());
+ }
+
+ // Generate code for the body of the loop.
+ Visit(stmt->body());
+
+ // Generate code for going to the next element by incrementing
+ // the index (smi) stored on top of the stack.
+ __ Bind(loop_statement.continue_label());
+ // TODO(all): We could use a callee saved register to avoid popping.
+ __ Pop(x0);
+ __ Add(x0, x0, Operand(Smi::FromInt(1)));
+ __ Push(x0);
+
+ EmitBackEdgeBookkeeping(stmt, &loop);
+ __ B(&loop);
+
+ // Remove the pointers stored on the stack.
+ __ Bind(loop_statement.break_label());
+ __ Drop(5);
+
+ // Exit and decrement the loop depth.
+ PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
+ __ Bind(&exit);
+ decrement_loop_depth();
+}
+
+
+void FullCodeGenerator::VisitForOfStatement(ForOfStatement* stmt) {
+ Comment cmnt(masm_, "[ ForOfStatement");
+ SetStatementPosition(stmt);
+
+ Iteration loop_statement(this, stmt);
+ increment_loop_depth();
+
+ // var iterator = iterable[@@iterator]()
+ VisitForAccumulatorValue(stmt->assign_iterator());
+
+ // As with for-in, skip the loop if the iterator is null or undefined.
+ Register iterator = x0;
+ __ JumpIfRoot(iterator, Heap::kUndefinedValueRootIndex,
+ loop_statement.break_label());
+ __ JumpIfRoot(iterator, Heap::kNullValueRootIndex,
+ loop_statement.break_label());
+
+ // Convert the iterator to a JS object.
+ Label convert, done_convert;
+ __ JumpIfSmi(iterator, &convert);
+ __ CompareObjectType(iterator, x1, x1, FIRST_SPEC_OBJECT_TYPE);
+ __ B(ge, &done_convert);
+ __ Bind(&convert);
+ __ Push(iterator);
+ __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+ __ Bind(&done_convert);
+ __ Push(iterator);
+
+ // Loop entry.
+ __ Bind(loop_statement.continue_label());
+
+ // result = iterator.next()
+ VisitForEffect(stmt->next_result());
+
+ // if (result.done) break;
+ Label result_not_done;
+ VisitForControl(stmt->result_done(),
+ loop_statement.break_label(),
+ &result_not_done,
+ &result_not_done);
+ __ Bind(&result_not_done);
+
+ // each = result.value
+ VisitForEffect(stmt->assign_each());
+
+ // Generate code for the body of the loop.
+ Visit(stmt->body());
+
+ // Check stack before looping.
+ PrepareForBailoutForId(stmt->BackEdgeId(), NO_REGISTERS);
+ EmitBackEdgeBookkeeping(stmt, loop_statement.continue_label());
+ __ B(loop_statement.continue_label());
+
+ // Exit and decrement the loop depth.
+ PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
+ __ Bind(loop_statement.break_label());
+ decrement_loop_depth();
+}
+
+
+void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
+ bool pretenure) {
+ // Use the fast case closure allocation code that allocates in new space for
+ // nested functions that don't need literals cloning. If we're running with
+ // the --always-opt or the --prepare-always-opt flag, we need to use the
+ // runtime function so that the new function we are creating here gets a
+ // chance to have its code optimized and doesn't just get a copy of the
+ // existing unoptimized code.
+ if (!FLAG_always_opt &&
+ !FLAG_prepare_always_opt &&
+ !pretenure &&
+ scope()->is_function_scope() &&
+ info->num_literals() == 0) {
+ FastNewClosureStub stub(info->language_mode(), info->is_generator());
+ __ Mov(x2, Operand(info));
+ __ CallStub(&stub);
+ } else {
+ __ Mov(x11, Operand(info));
+ __ LoadRoot(x10, pretenure ? Heap::kTrueValueRootIndex
+ : Heap::kFalseValueRootIndex);
+ __ Push(cp, x11, x10);
+ __ CallRuntime(Runtime::kNewClosure, 3);
+ }
+ context()->Plug(x0);
+}
+
+
+void FullCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
+ Comment cmnt(masm_, "[ VariableProxy");
+ EmitVariableLoad(expr);
+}
+
+
+void FullCodeGenerator::EmitLoadGlobalCheckExtensions(Variable* var,
+ TypeofState typeof_state,
+ Label* slow) {
+ Register current = cp;
+ Register next = x10;
+ Register temp = x11;
+
+ Scope* s = scope();
+ while (s != NULL) {
+ if (s->num_heap_slots() > 0) {
+ if (s->calls_non_strict_eval()) {
+ // Check that extension is NULL.
+ __ Ldr(temp, ContextMemOperand(current, Context::EXTENSION_INDEX));
+ __ Cbnz(temp, slow);
+ }
+ // Load next context in chain.
+ __ Ldr(next, ContextMemOperand(current, Context::PREVIOUS_INDEX));
+ // Walk the rest of the chain without clobbering cp.
+ current = next;
+ }
+ // If no outer scope calls eval, we do not need to check more
+ // context extensions.
+ if (!s->outer_scope_calls_non_strict_eval() || s->is_eval_scope()) break;
+ s = s->outer_scope();
+ }
+
+ if (s->is_eval_scope()) {
+ Label loop, fast;
+ __ Mov(next, current);
+
+ __ Bind(&loop);
+ // Terminate at native context.
+ __ Ldr(temp, FieldMemOperand(next, HeapObject::kMapOffset));
+ __ JumpIfRoot(temp, Heap::kNativeContextMapRootIndex, &fast);
+ // Check that extension is NULL.
+ __ Ldr(temp, ContextMemOperand(next, Context::EXTENSION_INDEX));
+ __ Cbnz(temp, slow);
+ // Load next context in chain.
+ __ Ldr(next, ContextMemOperand(next, Context::PREVIOUS_INDEX));
+ __ B(&loop);
+ __ Bind(&fast);
+ }
+
+ __ Ldr(x0, GlobalObjectMemOperand());
+ __ Mov(x2, Operand(var->name()));
+ ContextualMode mode = (typeof_state == INSIDE_TYPEOF) ? NOT_CONTEXTUAL
+ : CONTEXTUAL;
+ CallLoadIC(mode);
+}
+
+
+MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(Variable* var,
+ Label* slow) {
+ ASSERT(var->IsContextSlot());
+ Register context = cp;
+ Register next = x10;
+ Register temp = x11;
+
+ for (Scope* s = scope(); s != var->scope(); s = s->outer_scope()) {
+ if (s->num_heap_slots() > 0) {
+ if (s->calls_non_strict_eval()) {
+ // Check that extension is NULL.
+ __ Ldr(temp, ContextMemOperand(context, Context::EXTENSION_INDEX));
+ __ Cbnz(temp, slow);
+ }
+ __ Ldr(next, ContextMemOperand(context, Context::PREVIOUS_INDEX));
+ // Walk the rest of the chain without clobbering cp.
+ context = next;
+ }
+ }
+ // Check that last extension is NULL.
+ __ Ldr(temp, ContextMemOperand(context, Context::EXTENSION_INDEX));
+ __ Cbnz(temp, slow);
+
+ // This function is used only for loads, not stores, so it's safe to
+ // return an cp-based operand (the write barrier cannot be allowed to
+ // destroy the cp register).
+ return ContextMemOperand(context, var->index());
+}
+
+
+void FullCodeGenerator::EmitDynamicLookupFastCase(Variable* var,
+ TypeofState typeof_state,
+ Label* slow,
+ Label* done) {
+ // Generate fast-case code for variables that might be shadowed by
+ // eval-introduced variables. Eval is used a lot without
+ // introducing variables. In those cases, we do not want to
+ // perform a runtime call for all variables in the scope
+ // containing the eval.
+ if (var->mode() == DYNAMIC_GLOBAL) {
+ EmitLoadGlobalCheckExtensions(var, typeof_state, slow);
+ __ B(done);
+ } else if (var->mode() == DYNAMIC_LOCAL) {
+ Variable* local = var->local_if_not_shadowed();
+ __ Ldr(x0, ContextSlotOperandCheckExtensions(local, slow));
+ if (local->mode() == LET ||
+ local->mode() == CONST ||
+ local->mode() == CONST_HARMONY) {
+ __ JumpIfNotRoot(x0, Heap::kTheHoleValueRootIndex, done);
+ if (local->mode() == CONST) {
+ __ LoadRoot(x0, Heap::kUndefinedValueRootIndex);
+ } else { // LET || CONST_HARMONY
+ __ Mov(x0, Operand(var->name()));
+ __ Push(x0);
+ __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ }
+ }
+ __ B(done);
+ }
+}
+
+
+void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
+ // Record position before possible IC call.
+ SetSourcePosition(proxy->position());
+ Variable* var = proxy->var();
+
+ // Three cases: global variables, lookup variables, and all other types of
+ // variables.
+ switch (var->location()) {
+ case Variable::UNALLOCATED: {
+ Comment cmnt(masm_, "Global variable");
+ // Use inline caching. Variable name is passed in x2 and the global
+ // object (receiver) in x0.
+ __ Ldr(x0, GlobalObjectMemOperand());
+ __ Mov(x2, Operand(var->name()));
+ CallLoadIC(CONTEXTUAL);
+ context()->Plug(x0);
+ break;
+ }
+
+ case Variable::PARAMETER:
+ case Variable::LOCAL:
+ case Variable::CONTEXT: {
+ Comment cmnt(masm_, var->IsContextSlot()
+ ? "Context variable"
+ : "Stack variable");
+ if (var->binding_needs_init()) {
+ // var->scope() may be NULL when the proxy is located in eval code and
+ // refers to a potential outside binding. Currently those bindings are
+ // always looked up dynamically, i.e. in that case
+ // var->location() == LOOKUP.
+ // always holds.
+ ASSERT(var->scope() != NULL);
+
+ // Check if the binding really needs an initialization check. The check
+ // can be skipped in the following situation: we have a LET or CONST
+ // binding in harmony mode, both the Variable and the VariableProxy have
+ // the same declaration scope (i.e. they are both in global code, in the
+ // same function or in the same eval code) and the VariableProxy is in
+ // the source physically located after the initializer of the variable.
+ //
+ // We cannot skip any initialization checks for CONST in non-harmony
+ // mode because const variables may be declared but never initialized:
+ // if (false) { const x; }; var y = x;
+ //
+ // The condition on the declaration scopes is a conservative check for
+ // nested functions that access a binding and are called before the
+ // binding is initialized:
+ // function() { f(); let x = 1; function f() { x = 2; } }
+ //
+ bool skip_init_check;
+ if (var->scope()->DeclarationScope() != scope()->DeclarationScope()) {
+ skip_init_check = false;
+ } else {
+ // Check that we always have valid source position.
+ ASSERT(var->initializer_position() != RelocInfo::kNoPosition);
+ ASSERT(proxy->position() != RelocInfo::kNoPosition);
+ skip_init_check = var->mode() != CONST &&
+ var->initializer_position() < proxy->position();
+ }
+
+ if (!skip_init_check) {
+ // Let and const need a read barrier.
+ GetVar(x0, var);
+ Label done;
+ __ JumpIfNotRoot(x0, Heap::kTheHoleValueRootIndex, &done);
+ if (var->mode() == LET || var->mode() == CONST_HARMONY) {
+ // Throw a reference error when using an uninitialized let/const
+ // binding in harmony mode.
+ __ Mov(x0, Operand(var->name()));
+ __ Push(x0);
+ __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ __ Bind(&done);
+ } else {
+ // Uninitalized const bindings outside of harmony mode are unholed.
+ ASSERT(var->mode() == CONST);
+ __ LoadRoot(x0, Heap::kUndefinedValueRootIndex);
+ __ Bind(&done);
+ }
+ context()->Plug(x0);
+ break;
+ }
+ }
+ context()->Plug(var);
+ break;
+ }
+
+ case Variable::LOOKUP: {
+ Label done, slow;
+ // Generate code for loading from variables potentially shadowed by
+ // eval-introduced variables.
+ EmitDynamicLookupFastCase(var, NOT_INSIDE_TYPEOF, &slow, &done);
+ __ Bind(&slow);
+ Comment cmnt(masm_, "Lookup variable");
+ __ Mov(x1, Operand(var->name()));
+ __ Push(cp, x1); // Context and name.
+ __ CallRuntime(Runtime::kLoadContextSlot, 2);
+ __ Bind(&done);
+ context()->Plug(x0);
+ break;
+ }
+ }
+}
+
+
+void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
+ Comment cmnt(masm_, "[ RegExpLiteral");
+ Label materialized;
+ // Registers will be used as follows:
+ // x5 = materialized value (RegExp literal)
+ // x4 = JS function, literals array
+ // x3 = literal index
+ // x2 = RegExp pattern
+ // x1 = RegExp flags
+ // x0 = RegExp literal clone
+ __ Ldr(x10, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ __ Ldr(x4, FieldMemOperand(x10, JSFunction::kLiteralsOffset));
+ int literal_offset =
+ FixedArray::kHeaderSize + expr->literal_index() * kPointerSize;
+ __ Ldr(x5, FieldMemOperand(x4, literal_offset));
+ __ JumpIfNotRoot(x5, Heap::kUndefinedValueRootIndex, &materialized);
+
+ // Create regexp literal using runtime function.
+ // Result will be in x0.
+ __ Mov(x3, Operand(Smi::FromInt(expr->literal_index())));
+ __ Mov(x2, Operand(expr->pattern()));
+ __ Mov(x1, Operand(expr->flags()));
+ __ Push(x4, x3, x2, x1);
+ __ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
+ __ Mov(x5, x0);
+
+ __ Bind(&materialized);
+ int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
+ Label allocated, runtime_allocate;
+ __ Allocate(size, x0, x2, x3, &runtime_allocate, TAG_OBJECT);
+ __ B(&allocated);
+
+ __ Bind(&runtime_allocate);
+ __ Mov(x10, Operand(Smi::FromInt(size)));
+ __ Push(x5, x10);
+ __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
+ __ Pop(x5);
+
+ __ Bind(&allocated);
+ // After this, registers are used as follows:
+ // x0: Newly allocated regexp.
+ // x5: Materialized regexp.
+ // x10, x11, x12: temps.
+ __ CopyFields(x0, x5, CPURegList(x10, x11, x12), size / kPointerSize);
+ context()->Plug(x0);
+}
+
+
+void FullCodeGenerator::EmitAccessor(Expression* expression) {
+ if (expression == NULL) {
+ __ LoadRoot(x10, Heap::kNullValueRootIndex);
+ __ Push(x10);
+ } else {
+ VisitForStackValue(expression);
+ }
+}
+
+
+void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
+ Comment cmnt(masm_, "[ ObjectLiteral");
+
+ expr->BuildConstantProperties(isolate());
+ Handle<FixedArray> constant_properties = expr->constant_properties();
+ __ Ldr(x3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ __ Ldr(x3, FieldMemOperand(x3, JSFunction::kLiteralsOffset));
+ __ Mov(x2, Operand(Smi::FromInt(expr->literal_index())));
+ __ Mov(x1, Operand(constant_properties));
+ int flags = expr->fast_elements()
+ ? ObjectLiteral::kFastElements
+ : ObjectLiteral::kNoFlags;
+ flags |= expr->has_function()
+ ? ObjectLiteral::kHasFunction
+ : ObjectLiteral::kNoFlags;
+ __ Mov(x0, Operand(Smi::FromInt(flags)));
+ int properties_count = constant_properties->length() / 2;
+ const int max_cloned_properties =
+ FastCloneShallowObjectStub::kMaximumClonedProperties;
+ if ((FLAG_track_double_fields && expr->may_store_doubles()) ||
+ (expr->depth() > 1) || Serializer::enabled() ||
+ (flags != ObjectLiteral::kFastElements) ||
+ (properties_count > max_cloned_properties)) {
+ __ Push(x3, x2, x1, x0);
+ __ CallRuntime(Runtime::kCreateObjectLiteral, 4);
+ } else {
+ FastCloneShallowObjectStub stub(properties_count);
+ __ CallStub(&stub);
+ }
+
+ // If result_saved is true the result is on top of the stack. If
+ // result_saved is false the result is in x0.
+ bool result_saved = false;
+
+ // Mark all computed expressions that are bound to a key that
+ // is shadowed by a later occurrence of the same key. For the
+ // marked expressions, no store code is emitted.
+ expr->CalculateEmitStore(zone());
+
+ AccessorTable accessor_table(zone());
+ for (int i = 0; i < expr->properties()->length(); i++) {
+ ObjectLiteral::Property* property = expr->properties()->at(i);
+ if (property->IsCompileTimeValue()) continue;
+
+ Literal* key = property->key();
+ Expression* value = property->value();
+ if (!result_saved) {
+ __ Push(x0); // Save result on stack
+ result_saved = true;
+ }
+ switch (property->kind()) {
+ case ObjectLiteral::Property::CONSTANT:
+ UNREACHABLE();
+ case ObjectLiteral::Property::MATERIALIZED_LITERAL:
+ ASSERT(!CompileTimeValue::IsCompileTimeValue(property->value()));
+ // Fall through.
+ case ObjectLiteral::Property::COMPUTED:
+ if (key->value()->IsInternalizedString()) {
+ if (property->emit_store()) {
+ VisitForAccumulatorValue(value);
+ __ Mov(x2, Operand(key->value()));
+ __ Peek(x1, 0);
+ CallStoreIC(key->LiteralFeedbackId());
+ PrepareForBailoutForId(key->id(), NO_REGISTERS);
+ } else {
+ VisitForEffect(value);
+ }
+ break;
+ }
+ // Duplicate receiver on stack.
+ __ Peek(x0, 0);
+ __ Push(x0);
+ VisitForStackValue(key);
+ VisitForStackValue(value);
+ if (property->emit_store()) {
+ __ Mov(x0, Operand(Smi::FromInt(NONE))); // PropertyAttributes
+ __ Push(x0);
+ __ CallRuntime(Runtime::kSetProperty, 4);
+ } else {
+ __ Drop(3);
+ }
+ break;
+ case ObjectLiteral::Property::PROTOTYPE:
+ // Duplicate receiver on stack.
+ __ Peek(x0, 0);
+ // TODO(jbramley): This push shouldn't be necessary if we don't call the
+ // runtime below. In that case, skip it.
+ __ Push(x0);
+ VisitForStackValue(value);
+ if (property->emit_store()) {
+ __ CallRuntime(Runtime::kSetPrototype, 2);
+ } else {
+ __ Drop(2);
+ }
+ break;
+ case ObjectLiteral::Property::GETTER:
+ accessor_table.lookup(key)->second->getter = value;
+ break;
+ case ObjectLiteral::Property::SETTER:
+ accessor_table.lookup(key)->second->setter = value;
+ break;
+ }
+ }
+
+ // Emit code to define accessors, using only a single call to the runtime for
+ // each pair of corresponding getters and setters.
+ for (AccessorTable::Iterator it = accessor_table.begin();
+ it != accessor_table.end();
+ ++it) {
+ __ Peek(x10, 0); // Duplicate receiver.
+ __ Push(x10);
+ VisitForStackValue(it->first);
+ EmitAccessor(it->second->getter);
+ EmitAccessor(it->second->setter);
+ __ Mov(x10, Operand(Smi::FromInt(NONE)));
+ __ Push(x10);
+ __ CallRuntime(Runtime::kDefineOrRedefineAccessorProperty, 5);
+ }
+
+ if (expr->has_function()) {
+ ASSERT(result_saved);
+ __ Peek(x0, 0);
+ __ Push(x0);
+ __ CallRuntime(Runtime::kToFastProperties, 1);
+ }
+
+ if (result_saved) {
+ context()->PlugTOS();
+ } else {
+ context()->Plug(x0);
+ }
+}
+
+
+void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
+ Comment cmnt(masm_, "[ ArrayLiteral");
+
+ expr->BuildConstantElements(isolate());
+ int flags = (expr->depth() == 1) ? ArrayLiteral::kShallowElements
+ : ArrayLiteral::kNoFlags;
+
+ ZoneList<Expression*>* subexprs = expr->values();
+ int length = subexprs->length();
+ Handle<FixedArray> constant_elements = expr->constant_elements();
+ ASSERT_EQ(2, constant_elements->length());
+ ElementsKind constant_elements_kind =
+ static_cast<ElementsKind>(Smi::cast(constant_elements->get(0))->value());
+ bool has_fast_elements = IsFastObjectElementsKind(constant_elements_kind);
+ Handle<FixedArrayBase> constant_elements_values(
+ FixedArrayBase::cast(constant_elements->get(1)));
+
+ AllocationSiteMode allocation_site_mode = TRACK_ALLOCATION_SITE;
+ if (has_fast_elements && !FLAG_allocation_site_pretenuring) {
+ // If the only customer of allocation sites is transitioning, then
+ // we can turn it off if we don't have anywhere else to transition to.
+ allocation_site_mode = DONT_TRACK_ALLOCATION_SITE;
+ }
+
+ __ Ldr(x3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ __ Ldr(x3, FieldMemOperand(x3, JSFunction::kLiteralsOffset));
+ // TODO(jbramley): Can these Operand constructors be implicit?
+ __ Mov(x2, Operand(Smi::FromInt(expr->literal_index())));
+ __ Mov(x1, Operand(constant_elements));
+ if (has_fast_elements && constant_elements_values->map() ==
+ isolate()->heap()->fixed_cow_array_map()) {
+ FastCloneShallowArrayStub stub(
+ FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS,
+ allocation_site_mode,
+ length);
+ __ CallStub(&stub);
+ __ IncrementCounter(
+ isolate()->counters()->cow_arrays_created_stub(), 1, x10, x11);
+ } else if ((expr->depth() > 1) || Serializer::enabled() ||
+ length > FastCloneShallowArrayStub::kMaximumClonedLength) {
+ __ Mov(x0, Operand(Smi::FromInt(flags)));
+ __ Push(x3, x2, x1, x0);
+ __ CallRuntime(Runtime::kCreateArrayLiteral, 4);
+ } else {
+ ASSERT(IsFastSmiOrObjectElementsKind(constant_elements_kind) ||
+ FLAG_smi_only_arrays);
+ FastCloneShallowArrayStub::Mode mode =
+ FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS;
+
+ if (has_fast_elements) {
+ mode = FastCloneShallowArrayStub::CLONE_ELEMENTS;
+ }
+
+ FastCloneShallowArrayStub stub(mode, allocation_site_mode, length);
+ __ CallStub(&stub);
+ }
+
+ bool result_saved = false; // Is the result saved to the stack?
+
+ // Emit code to evaluate all the non-constant subexpressions and to store
+ // them into the newly cloned array.
+ for (int i = 0; i < length; i++) {
+ Expression* subexpr = subexprs->at(i);
+ // If the subexpression is a literal or a simple materialized literal it
+ // is already set in the cloned array.
+ if (CompileTimeValue::IsCompileTimeValue(subexpr)) continue;
+
+ if (!result_saved) {
+ __ Push(x0);
+ __ Push(Smi::FromInt(expr->literal_index()));
+ result_saved = true;
+ }
+ VisitForAccumulatorValue(subexpr);
+
+ if (IsFastObjectElementsKind(constant_elements_kind)) {
+ int offset = FixedArray::kHeaderSize + (i * kPointerSize);
+ __ Peek(x6, kPointerSize); // Copy of array literal.
+ __ Ldr(x1, FieldMemOperand(x6, JSObject::kElementsOffset));
+ __ Str(result_register(), FieldMemOperand(x1, offset));
+ // Update the write barrier for the array store.
+ __ RecordWriteField(x1, offset, result_register(), x10,
+ kLRHasBeenSaved, kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET, INLINE_SMI_CHECK);
+ } else {
+ __ Mov(x3, Operand(Smi::FromInt(i)));
+ StoreArrayLiteralElementStub stub;
+ __ CallStub(&stub);
+ }
+
+ PrepareForBailoutForId(expr->GetIdForElement(i), NO_REGISTERS);
+ }
+
+ if (result_saved) {
+ __ Drop(1); // literal index
+ context()->PlugTOS();
+ } else {
+ context()->Plug(x0);
+ }
+}
+
+
+void FullCodeGenerator::VisitAssignment(Assignment* expr) {
+ Comment cmnt(masm_, "[ Assignment");
+ // Invalid left-hand sides are rewritten to have a 'throw ReferenceError'
+ // on the left-hand side.
+ if (!expr->target()->IsValidLeftHandSide()) {
+ VisitForEffect(expr->target());
+ return;
+ }
+
+ // Left-hand side can only be a property, a global or a (parameter or local)
+ // slot.
+ enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
+ LhsKind assign_type = VARIABLE;
+ Property* property = expr->target()->AsProperty();
+ if (property != NULL) {
+ assign_type = (property->key()->IsPropertyName())
+ ? NAMED_PROPERTY
+ : KEYED_PROPERTY;
+ }
+
+ // Evaluate LHS expression.
+ switch (assign_type) {
+ case VARIABLE:
+ // Nothing to do here.
+ break;
+ case NAMED_PROPERTY:
+ if (expr->is_compound()) {
+ // We need the receiver both on the stack and in the accumulator.
+ VisitForAccumulatorValue(property->obj());
+ __ Push(result_register());
+ } else {
+ VisitForStackValue(property->obj());
+ }
+ break;
+ case KEYED_PROPERTY:
+ if (expr->is_compound()) {
+ VisitForStackValue(property->obj());
+ VisitForAccumulatorValue(property->key());
+ __ Peek(x1, 0);
+ __ Push(x0);
+ } else {
+ VisitForStackValue(property->obj());
+ VisitForStackValue(property->key());
+ }
+ break;
+ }
+
+ // For compound assignments we need another deoptimization point after the
+ // variable/property load.
+ if (expr->is_compound()) {
+ { AccumulatorValueContext context(this);
+ switch (assign_type) {
+ case VARIABLE:
+ EmitVariableLoad(expr->target()->AsVariableProxy());
+ PrepareForBailout(expr->target(), TOS_REG);
+ break;
+ case NAMED_PROPERTY:
+ EmitNamedPropertyLoad(property);
+ PrepareForBailoutForId(property->LoadId(), TOS_REG);
+ break;
+ case KEYED_PROPERTY:
+ EmitKeyedPropertyLoad(property);
+ PrepareForBailoutForId(property->LoadId(), TOS_REG);
+ break;
+ }
+ }
+
+ Token::Value op = expr->binary_op();
+ __ Push(x0); // Left operand goes on the stack.
+ VisitForAccumulatorValue(expr->value());
+
+ OverwriteMode mode = expr->value()->ResultOverwriteAllowed()
+ ? OVERWRITE_RIGHT
+ : NO_OVERWRITE;
+ SetSourcePosition(expr->position() + 1);
+ AccumulatorValueContext context(this);
+ if (ShouldInlineSmiCase(op)) {
+ EmitInlineSmiBinaryOp(expr->binary_operation(),
+ op,
+ mode,
+ expr->target(),
+ expr->value());
+ } else {
+ EmitBinaryOp(expr->binary_operation(), op, mode);
+ }
+
+ // Deoptimization point in case the binary operation may have side effects.
+ PrepareForBailout(expr->binary_operation(), TOS_REG);
+ } else {
+ VisitForAccumulatorValue(expr->value());
+ }
+
+ // Record source position before possible IC call.
+ SetSourcePosition(expr->position());
+
+ // Store the value.
+ switch (assign_type) {
+ case VARIABLE:
+ EmitVariableAssignment(expr->target()->AsVariableProxy()->var(),
+ expr->op());
+ PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+ context()->Plug(x0);
+ break;
+ case NAMED_PROPERTY:
+ EmitNamedPropertyAssignment(expr);
+ break;
+ case KEYED_PROPERTY:
+ EmitKeyedPropertyAssignment(expr);
+ break;
+ }
+}
+
+
+void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
+ SetSourcePosition(prop->position());
+ Literal* key = prop->key()->AsLiteral();
+ __ Mov(x2, Operand(key->value()));
+ // Call load IC. It has arguments receiver and property name x0 and x2.
+ CallLoadIC(NOT_CONTEXTUAL, prop->PropertyFeedbackId());
+}
+
+
+void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
+ SetSourcePosition(prop->position());
+ // Call keyed load IC. It has arguments key and receiver in r0 and r1.
+ Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
+ CallIC(ic, prop->PropertyFeedbackId());
+}
+
+
+void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
+ Token::Value op,
+ OverwriteMode mode,
+ Expression* left_expr,
+ Expression* right_expr) {
+ Label done, both_smis, stub_call;
+
+ // Get the arguments.
+ Register left = x1;
+ Register right = x0;
+ Register result = x0;
+ __ Pop(left);
+
+ // Perform combined smi check on both operands.
+ __ Orr(x10, left, right);
+ JumpPatchSite patch_site(masm_);
+ patch_site.EmitJumpIfSmi(x10, &both_smis);
+
+ __ Bind(&stub_call);
+ BinaryOpICStub stub(op, mode);
+ {
+ Assembler::BlockConstPoolScope scope(masm_);
+ CallIC(stub.GetCode(isolate()), expr->BinaryOperationFeedbackId());
+ patch_site.EmitPatchInfo();
+ }
+ __ B(&done);
+
+ __ Bind(&both_smis);
+ // Smi case. This code works in the same way as the smi-smi case in the type
+ // recording binary operation stub, see
+ // BinaryOpStub::GenerateSmiSmiOperation for comments.
+ // TODO(all): That doesn't exist any more. Where are the comments?
+ //
+ // The set of operations that needs to be supported here is controlled by
+ // FullCodeGenerator::ShouldInlineSmiCase().
+ switch (op) {
+ case Token::SAR:
+ __ Ubfx(right, right, kSmiShift, 5);
+ __ Asr(result, left, right);
+ __ Bic(result, result, kSmiShiftMask);
+ break;
+ case Token::SHL:
+ __ Ubfx(right, right, kSmiShift, 5);
+ __ Lsl(result, left, right);
+ break;
+ case Token::SHR: {
+ Label right_not_zero;
+ __ Cbnz(right, &right_not_zero);
+ __ Tbnz(left, kXSignBit, &stub_call);
+ __ Bind(&right_not_zero);
+ __ Ubfx(right, right, kSmiShift, 5);
+ __ Lsr(result, left, right);
+ __ Bic(result, result, kSmiShiftMask);
+ break;
+ }
+ case Token::ADD:
+ __ Adds(x10, left, right);
+ __ B(vs, &stub_call);
+ __ Mov(result, x10);
+ break;
+ case Token::SUB:
+ __ Subs(x10, left, right);
+ __ B(vs, &stub_call);
+ __ Mov(result, x10);
+ break;
+ case Token::MUL: {
+ Label not_minus_zero, done;
+ __ Smulh(x10, left, right);
+ __ Cbnz(x10, ¬_minus_zero);
+ __ Eor(x11, left, right);
+ __ Tbnz(x11, kXSignBit, &stub_call);
+ STATIC_ASSERT(kSmiTag == 0);
+ __ Mov(result, x10);
+ __ B(&done);
+ __ Bind(¬_minus_zero);
+ __ Cls(x11, x10);
+ __ Cmp(x11, kXRegSize - kSmiShift);
+ __ B(lt, &stub_call);
+ __ SmiTag(result, x10);
+ __ Bind(&done);
+ break;
+ }
+ case Token::BIT_OR:
+ __ Orr(result, left, right);
+ break;
+ case Token::BIT_AND:
+ __ And(result, left, right);
+ break;
+ case Token::BIT_XOR:
+ __ Eor(result, left, right);
+ break;
+ default:
+ UNREACHABLE();
+ }
+
+ __ Bind(&done);
+ context()->Plug(x0);
+}
+
+
+void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr,
+ Token::Value op,
+ OverwriteMode mode) {
+ __ Pop(x1);
+ BinaryOpICStub stub(op, mode);
+ JumpPatchSite patch_site(masm_); // Unbound, signals no inlined smi code.
+ {
+ Assembler::BlockConstPoolScope scope(masm_);
+ CallIC(stub.GetCode(isolate()), expr->BinaryOperationFeedbackId());
+ patch_site.EmitPatchInfo();
+ }
+ context()->Plug(x0);
+}
+
+
+void FullCodeGenerator::EmitAssignment(Expression* expr) {
+ // Invalid left-hand sides are rewritten to have a 'throw
+ // ReferenceError' on the left-hand side.
+ if (!expr->IsValidLeftHandSide()) {
+ VisitForEffect(expr);
+ return;
+ }
+
+ // Left-hand side can only be a property, a global or a (parameter or local)
+ // slot.
+ enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
+ LhsKind assign_type = VARIABLE;
+ Property* prop = expr->AsProperty();
+ if (prop != NULL) {
+ assign_type = (prop->key()->IsPropertyName())
+ ? NAMED_PROPERTY
+ : KEYED_PROPERTY;
+ }
+
+ switch (assign_type) {
+ case VARIABLE: {
+ Variable* var = expr->AsVariableProxy()->var();
+ EffectContext context(this);
+ EmitVariableAssignment(var, Token::ASSIGN);
+ break;
+ }
+ case NAMED_PROPERTY: {
+ __ Push(x0); // Preserve value.
+ VisitForAccumulatorValue(prop->obj());
+ // TODO(all): We could introduce a VisitForRegValue(reg, expr) to avoid
+ // this copy.
+ __ Mov(x1, x0);
+ __ Pop(x0); // Restore value.
+ __ Mov(x2, Operand(prop->key()->AsLiteral()->value()));
+ CallStoreIC();
+ break;
+ }
+ case KEYED_PROPERTY: {
+ __ Push(x0); // Preserve value.
+ VisitForStackValue(prop->obj());
+ VisitForAccumulatorValue(prop->key());
+ __ Mov(x1, x0);
+ __ Pop(x2, x0);
+ Handle<Code> ic = is_classic_mode()
+ ? isolate()->builtins()->KeyedStoreIC_Initialize()
+ : isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
+ CallIC(ic);
+ break;
+ }
+ }
+ context()->Plug(x0);
+}
+
+
+void FullCodeGenerator::EmitVariableAssignment(Variable* var,
+ Token::Value op) {
+ ASM_LOCATION("FullCodeGenerator::EmitVariableAssignment");
+ if (var->IsUnallocated()) {
+ // Global var, const, or let.
+ __ Mov(x2, Operand(var->name()));
+ __ Ldr(x1, GlobalObjectMemOperand());
+ CallStoreIC();
+
+ } else if (op == Token::INIT_CONST) {
+ // Const initializers need a write barrier.
+ ASSERT(!var->IsParameter()); // No const parameters.
+ if (var->IsStackLocal()) {
+ Label skip;
+ __ Ldr(x1, StackOperand(var));
+ __ JumpIfNotRoot(x1, Heap::kTheHoleValueRootIndex, &skip);
+ __ Str(result_register(), StackOperand(var));
+ __ Bind(&skip);
+ } else {
+ ASSERT(var->IsContextSlot() || var->IsLookupSlot());
+ // Like var declarations, const declarations are hoisted to function
+ // scope. However, unlike var initializers, const initializers are
+ // able to drill a hole to that function context, even from inside a
+ // 'with' context. We thus bypass the normal static scope lookup for
+ // var->IsContextSlot().
+ __ Push(x0);
+ __ Mov(x0, Operand(var->name()));
+ __ Push(cp, x0); // Context and name.
+ __ CallRuntime(Runtime::kInitializeConstContextSlot, 3);
+ }
+
+ } else if (var->mode() == LET && op != Token::INIT_LET) {
+ // Non-initializing assignment to let variable needs a write barrier.
+ if (var->IsLookupSlot()) {
+ __ Push(x0, cp); // Context, value.
+ __ Mov(x11, Operand(var->name()));
+ __ Mov(x10, Operand(Smi::FromInt(language_mode())));
+ __ Push(x11, x10); // Strict mode, name.
+ __ CallRuntime(Runtime::kStoreContextSlot, 4);
+ } else {
+ ASSERT(var->IsStackAllocated() || var->IsContextSlot());
+ Label assign;
+ MemOperand location = VarOperand(var, x1);
+ __ Ldr(x10, location);
+ __ JumpIfNotRoot(x10, Heap::kTheHoleValueRootIndex, &assign);
+ __ Mov(x10, Operand(var->name()));
+ __ Push(x10);
+ __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ // Perform the assignment.
+ __ Bind(&assign);
+ __ Str(result_register(), location);
+ if (var->IsContextSlot()) {
+ // RecordWrite may destroy all its register arguments.
+ __ Mov(x10, result_register());
+ int offset = Context::SlotOffset(var->index());
+ __ RecordWriteContextSlot(
+ x1, offset, x10, x11, kLRHasBeenSaved, kDontSaveFPRegs);
+ }
+ }
+
+ } else if (!var->is_const_mode() || op == Token::INIT_CONST_HARMONY) {
+ // Assignment to var or initializing assignment to let/const
+ // in harmony mode.
+ if (var->IsStackAllocated() || var->IsContextSlot()) {
+ MemOperand location = VarOperand(var, x1);
+ if (FLAG_debug_code && op == Token::INIT_LET) {
+ __ Ldr(x10, location);
+ __ CompareRoot(x10, Heap::kTheHoleValueRootIndex);
+ __ Check(eq, kLetBindingReInitialization);
+ }
+ // Perform the assignment.
+ __ Str(x0, location);
+ if (var->IsContextSlot()) {
+ __ Mov(x10, x0);
+ int offset = Context::SlotOffset(var->index());
+ __ RecordWriteContextSlot(
+ x1, offset, x10, x11, kLRHasBeenSaved, kDontSaveFPRegs);
+ }
+ } else {
+ ASSERT(var->IsLookupSlot());
+ __ Mov(x11, Operand(var->name()));
+ __ Mov(x10, Operand(Smi::FromInt(language_mode())));
+ // jssp[0] : mode.
+ // jssp[8] : name.
+ // jssp[16] : context.
+ // jssp[24] : value.
+ __ Push(x0, cp, x11, x10);
+ __ CallRuntime(Runtime::kStoreContextSlot, 4);
+ }
+ }
+ // Non-initializing assignments to consts are ignored.
+}
+
+
+void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
+ ASM_LOCATION("FullCodeGenerator::EmitNamedPropertyAssignment");
+ // Assignment to a property, using a named store IC.
+ Property* prop = expr->target()->AsProperty();
+ ASSERT(prop != NULL);
+ ASSERT(prop->key()->AsLiteral() != NULL);
+
+ // Record source code position before IC call.
+ SetSourcePosition(expr->position());
+ __ Mov(x2, Operand(prop->key()->AsLiteral()->value()));
+ __ Pop(x1);
+
+ CallStoreIC(expr->AssignmentFeedbackId());
+
+ PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+ context()->Plug(x0);
+}
+
+
+void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
+ ASM_LOCATION("FullCodeGenerator::EmitKeyedPropertyAssignment");
+ // Assignment to a property, using a keyed store IC.
+
+ // Record source code position before IC call.
+ SetSourcePosition(expr->position());
+ // TODO(all): Could we pass this in registers rather than on the stack?
+ __ Pop(x1, x2); // Key and object holding the property.
+
+ Handle<Code> ic = is_classic_mode()
+ ? isolate()->builtins()->KeyedStoreIC_Initialize()
+ : isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
+ CallIC(ic, expr->AssignmentFeedbackId());
+
+ PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+ context()->Plug(x0);
+}
+
+
+void FullCodeGenerator::VisitProperty(Property* expr) {
+ Comment cmnt(masm_, "[ Property");
+ Expression* key = expr->key();
+
+ if (key->IsPropertyName()) {
+ VisitForAccumulatorValue(expr->obj());
+ EmitNamedPropertyLoad(expr);
+ PrepareForBailoutForId(expr->LoadId(), TOS_REG);
+ context()->Plug(x0);
+ } else {
+ VisitForStackValue(expr->obj());
+ VisitForAccumulatorValue(expr->key());
+ __ Pop(x1);
+ EmitKeyedPropertyLoad(expr);
+ context()->Plug(x0);
+ }
+}
+
+
+void FullCodeGenerator::CallIC(Handle<Code> code,
+ TypeFeedbackId ast_id) {
+ ic_total_count_++;
+ // All calls must have a predictable size in full-codegen code to ensure that
+ // the debugger can patch them correctly.
+ __ Call(code, RelocInfo::CODE_TARGET, ast_id);
+}
+
+
+// Code common for calls using the IC.
+void FullCodeGenerator::EmitCallWithIC(Call* expr) {
+ ASM_LOCATION("EmitCallWithIC");
+
+ Expression* callee = expr->expression();
+ ZoneList<Expression*>* args = expr->arguments();
+ int arg_count = args->length();
+
+ CallFunctionFlags flags;
+ // Get the target function.
+ if (callee->IsVariableProxy()) {
+ { StackValueContext context(this);
+ EmitVariableLoad(callee->AsVariableProxy());
+ PrepareForBailout(callee, NO_REGISTERS);
+ }
+ // Push undefined as receiver. This is patched in the method prologue if it
+ // is a classic mode method.
+ __ Push(isolate()->factory()->undefined_value());
+ flags = NO_CALL_FUNCTION_FLAGS;
+ } else {
+ // Load the function from the receiver.
+ ASSERT(callee->IsProperty());
+ __ Peek(x0, 0);
+ EmitNamedPropertyLoad(callee->AsProperty());
+ PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
+ // Push the target function under the receiver.
+ __ Pop(x10);
+ __ Push(x0, x10);
+ flags = CALL_AS_METHOD;
+ }
+
+ // Load the arguments.
+ { PreservePositionScope scope(masm()->positions_recorder());
+ for (int i = 0; i < arg_count; i++) {
+ VisitForStackValue(args->at(i));
+ }
+ }
+
+ // Record source position for debugger.
+ SetSourcePosition(expr->position());
+ CallFunctionStub stub(arg_count, flags);
+ __ Peek(x1, (arg_count + 1) * kPointerSize);
+ __ CallStub(&stub);
+
+ RecordJSReturnSite(expr);
+
+ // Restore context register.
+ __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+
+ context()->DropAndPlug(1, x0);
+}
+
+
+// Code common for calls using the IC.
+void FullCodeGenerator::EmitKeyedCallWithIC(Call* expr,
+ Expression* key) {
+ // Load the key.
+ VisitForAccumulatorValue(key);
+
+ Expression* callee = expr->expression();
+ ZoneList<Expression*>* args = expr->arguments();
+ int arg_count = args->length();
+
+ // Load the function from the receiver.
+ ASSERT(callee->IsProperty());
+ __ Peek(x1, 0);
+ EmitKeyedPropertyLoad(callee->AsProperty());
+ PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
+
+ // Push the target function under the receiver.
+ __ Pop(x10);
+ __ Push(x0, x10);
+
+ { PreservePositionScope scope(masm()->positions_recorder());
+ for (int i = 0; i < arg_count; i++) {
+ VisitForStackValue(args->at(i));
+ }
+ }
+
+ // Record source position for debugger.
+ SetSourcePosition(expr->position());
+ CallFunctionStub stub(arg_count, CALL_AS_METHOD);
+ __ Peek(x1, (arg_count + 1) * kPointerSize);
+ __ CallStub(&stub);
+
+ RecordJSReturnSite(expr);
+ // Restore context register.
+ __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+
+ context()->DropAndPlug(1, x0);
+}
+
+
+void FullCodeGenerator::EmitCallWithStub(Call* expr) {
+ // Code common for calls using the call stub.
+ ZoneList<Expression*>* args = expr->arguments();
+ int arg_count = args->length();
+ { PreservePositionScope scope(masm()->positions_recorder());
+ for (int i = 0; i < arg_count; i++) {
+ VisitForStackValue(args->at(i));
+ }
+ }
+ // Record source position for debugger.
+ SetSourcePosition(expr->position());
+
+ Handle<Object> uninitialized =
+ TypeFeedbackInfo::UninitializedSentinel(isolate());
+ StoreFeedbackVectorSlot(expr->CallFeedbackSlot(), uninitialized);
+ __ LoadObject(x2, FeedbackVector());
+ __ Mov(x3, Operand(Smi::FromInt(expr->CallFeedbackSlot())));
+
+ // Record call targets in unoptimized code.
+ CallFunctionStub stub(arg_count, RECORD_CALL_TARGET);
+ __ Peek(x1, (arg_count + 1) * kXRegSizeInBytes);
+ __ CallStub(&stub);
+ RecordJSReturnSite(expr);
+ // Restore context register.
+ __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ context()->DropAndPlug(1, x0);
+}
+
+
+void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
+ ASM_LOCATION("FullCodeGenerator::EmitResolvePossiblyDirectEval");
+ // Prepare to push a copy of the first argument or undefined if it doesn't
+ // exist.
+ if (arg_count > 0) {
+ __ Peek(x10, arg_count * kXRegSizeInBytes);
+ } else {
+ __ LoadRoot(x10, Heap::kUndefinedValueRootIndex);
+ }
+
+ // Prepare to push the receiver of the enclosing function.
+ int receiver_offset = 2 + info_->scope()->num_parameters();
+ __ Ldr(x11, MemOperand(fp, receiver_offset * kPointerSize));
+
+ // Push.
+ __ Push(x10, x11);
+
+ // Prepare to push the language mode.
+ __ Mov(x10, Operand(Smi::FromInt(language_mode())));
+ // Prepare to push the start position of the scope the calls resides in.
+ __ Mov(x11, Operand(Smi::FromInt(scope()->start_position())));
+
+ // Push.
+ __ Push(x10, x11);
+
+ // Do the runtime call.
+ __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 5);
+}
+
+
+void FullCodeGenerator::VisitCall(Call* expr) {
+#ifdef DEBUG
+ // We want to verify that RecordJSReturnSite gets called on all paths
+ // through this function. Avoid early returns.
+ expr->return_is_recorded_ = false;
+#endif
+
+ Comment cmnt(masm_, "[ Call");
+ Expression* callee = expr->expression();
+ Call::CallType call_type = expr->GetCallType(isolate());
+
+ if (call_type == Call::POSSIBLY_EVAL_CALL) {
+ // In a call to eval, we first call %ResolvePossiblyDirectEval to
+ // resolve the function we need to call and the receiver of the
+ // call. Then we call the resolved function using the given
+ // arguments.
+ ZoneList<Expression*>* args = expr->arguments();
+ int arg_count = args->length();
+
+ {
+ PreservePositionScope pos_scope(masm()->positions_recorder());
+ VisitForStackValue(callee);
+ __ LoadRoot(x10, Heap::kUndefinedValueRootIndex);
+ __ Push(x10); // Reserved receiver slot.
+
+ // Push the arguments.
+ for (int i = 0; i < arg_count; i++) {
+ VisitForStackValue(args->at(i));
+ }
+
+ // Push a copy of the function (found below the arguments) and
+ // resolve eval.
+ __ Peek(x10, (arg_count + 1) * kPointerSize);
+ __ Push(x10);
+ EmitResolvePossiblyDirectEval(arg_count);
+
+ // The runtime call returns a pair of values in x0 (function) and
+ // x1 (receiver). Touch up the stack with the right values.
+ __ PokePair(x1, x0, arg_count * kPointerSize);
+ }
+
+ // Record source position for debugger.
+ SetSourcePosition(expr->position());
+
+ // Call the evaluated function.
+ CallFunctionStub stub(arg_count, NO_CALL_FUNCTION_FLAGS);
+ __ Peek(x1, (arg_count + 1) * kXRegSizeInBytes);
+ __ CallStub(&stub);
+ RecordJSReturnSite(expr);
+ // Restore context register.
+ __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ context()->DropAndPlug(1, x0);
+
+ } else if (call_type == Call::GLOBAL_CALL) {
+ EmitCallWithIC(expr);
+
+ } else if (call_type == Call::LOOKUP_SLOT_CALL) {
+ // Call to a lookup slot (dynamically introduced variable).
+ VariableProxy* proxy = callee->AsVariableProxy();
+ Label slow, done;
+
+ { PreservePositionScope scope(masm()->positions_recorder());
+ // Generate code for loading from variables potentially shadowed
+ // by eval-introduced variables.
+ EmitDynamicLookupFastCase(proxy->var(), NOT_INSIDE_TYPEOF, &slow, &done);
+ }
+
+ __ Bind(&slow);
+ // Call the runtime to find the function to call (returned in x0)
+ // and the object holding it (returned in x1).
+ __ Push(context_register());
+ __ Mov(x10, Operand(proxy->name()));
+ __ Push(x10);
+ __ CallRuntime(Runtime::kLoadContextSlot, 2);
+ __ Push(x0, x1); // Receiver, function.
+
+ // If fast case code has been generated, emit code to push the
+ // function and receiver and have the slow path jump around this
+ // code.
+ if (done.is_linked()) {
+ Label call;
+ __ B(&call);
+ __ Bind(&done);
+ // Push function.
+ __ Push(x0);
+ // The receiver is implicitly the global receiver. Indicate this
+ // by passing the undefined to the call function stub.
+ __ LoadRoot(x1, Heap::kUndefinedValueRootIndex);
+ __ Push(x1);
+ __ Bind(&call);
+ }
+
+ // The receiver is either the global receiver or an object found
+ // by LoadContextSlot.
+ EmitCallWithStub(expr);
+ } else if (call_type == Call::PROPERTY_CALL) {
+ Property* property = callee->AsProperty();
+ { PreservePositionScope scope(masm()->positions_recorder());
+ VisitForStackValue(property->obj());
+ }
+ if (property->key()->IsPropertyName()) {
+ EmitCallWithIC(expr);
+ } else {
+ EmitKeyedCallWithIC(expr, property->key());
+ }
+
+ } else {
+ ASSERT(call_type == Call::OTHER_CALL);
+ // Call to an arbitrary expression not handled specially above.
+ { PreservePositionScope scope(masm()->positions_recorder());
+ VisitForStackValue(callee);
+ }
+ __ LoadRoot(x1, Heap::kUndefinedValueRootIndex);
+ __ Push(x1);
+ // Emit function call.
+ EmitCallWithStub(expr);
+ }
+
+#ifdef DEBUG
+ // RecordJSReturnSite should have been called.
+ ASSERT(expr->return_is_recorded_);
+#endif
+}
+
+
+void FullCodeGenerator::VisitCallNew(CallNew* expr) {
+ Comment cmnt(masm_, "[ CallNew");
+ // According to ECMA-262, section 11.2.2, page 44, the function
+ // expression in new calls must be evaluated before the
+ // arguments.
+
+ // Push constructor on the stack. If it's not a function it's used as
+ // receiver for CALL_NON_FUNCTION, otherwise the value on the stack is
+ // ignored.
+ VisitForStackValue(expr->expression());
+
+ // Push the arguments ("left-to-right") on the stack.
+ ZoneList<Expression*>* args = expr->arguments();
+ int arg_count = args->length();
+ for (int i = 0; i < arg_count; i++) {
+ VisitForStackValue(args->at(i));
+ }
+
+ // Call the construct call builtin that handles allocation and
+ // constructor invocation.
+ SetSourcePosition(expr->position());
+
+ // Load function and argument count into x1 and x0.
+ __ Mov(x0, arg_count);
+ __ Peek(x1, arg_count * kXRegSizeInBytes);
+
+ // Record call targets in unoptimized code.
+ Handle<Object> uninitialized =
+ TypeFeedbackInfo::UninitializedSentinel(isolate());
+ StoreFeedbackVectorSlot(expr->CallNewFeedbackSlot(), uninitialized);
+ __ LoadObject(x2, FeedbackVector());
+ __ Mov(x3, Operand(Smi::FromInt(expr->CallNewFeedbackSlot())));
+
+ CallConstructStub stub(RECORD_CALL_TARGET);
+ __ Call(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL);
+ PrepareForBailoutForId(expr->ReturnId(), TOS_REG);
+ context()->Plug(x0);
+}
+
+
+void FullCodeGenerator::EmitIsSmi(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 1);
+
+ VisitForAccumulatorValue(args->at(0));
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ __ TestAndSplit(x0, kSmiTagMask, if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitIsNonNegativeSmi(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 1);
+
+ VisitForAccumulatorValue(args->at(0));
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ __ TestAndSplit(x0, kSmiTagMask | (0x80000000UL << kSmiShift), if_true,
+ if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitIsObject(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 1);
+
+ VisitForAccumulatorValue(args->at(0));
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ __ JumpIfSmi(x0, if_false);
+ __ JumpIfRoot(x0, Heap::kNullValueRootIndex, if_true);
+ __ Ldr(x10, FieldMemOperand(x0, HeapObject::kMapOffset));
+ // Undetectable objects behave like undefined when tested with typeof.
+ __ Ldrb(x11, FieldMemOperand(x10, Map::kBitFieldOffset));
+ __ Tbnz(x11, Map::kIsUndetectable, if_false);
+ __ Ldrb(x12, FieldMemOperand(x10, Map::kInstanceTypeOffset));
+ __ Cmp(x12, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE);
+ __ B(lt, if_false);
+ __ Cmp(x12, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ Split(le, if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitIsSpecObject(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 1);
+
+ VisitForAccumulatorValue(args->at(0));
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ __ JumpIfSmi(x0, if_false);
+ __ CompareObjectType(x0, x10, x11, FIRST_SPEC_OBJECT_TYPE);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ Split(ge, if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitIsUndetectableObject(CallRuntime* expr) {
+ ASM_LOCATION("FullCodeGenerator::EmitIsUndetectableObject");
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 1);
+
+ VisitForAccumulatorValue(args->at(0));
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ __ JumpIfSmi(x0, if_false);
+ __ Ldr(x10, FieldMemOperand(x0, HeapObject::kMapOffset));
+ __ Ldrb(x11, FieldMemOperand(x10, Map::kBitFieldOffset));
+ __ Tst(x11, 1 << Map::kIsUndetectable);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ Split(ne, if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
+ CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 1);
+ VisitForAccumulatorValue(args->at(0));
+
+ Label materialize_true, materialize_false, skip_lookup;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ Register object = x0;
+ __ AssertNotSmi(object);
+
+ Register map = x10;
+ Register bitfield2 = x11;
+ __ Ldr(map, FieldMemOperand(object, HeapObject::kMapOffset));
+ __ Ldrb(bitfield2, FieldMemOperand(map, Map::kBitField2Offset));
+ __ Tbnz(bitfield2, Map::kStringWrapperSafeForDefaultValueOf, &skip_lookup);
+
+ // Check for fast case object. Generate false result for slow case object.
+ Register props = x12;
+ Register props_map = x12;
+ Register hash_table_map = x13;
+ __ Ldr(props, FieldMemOperand(object, JSObject::kPropertiesOffset));
+ __ Ldr(props_map, FieldMemOperand(props, HeapObject::kMapOffset));
+ __ LoadRoot(hash_table_map, Heap::kHashTableMapRootIndex);
+ __ Cmp(props_map, hash_table_map);
+ __ B(eq, if_false);
+
+ // Look for valueOf name in the descriptor array, and indicate false if found.
+ // Since we omit an enumeration index check, if it is added via a transition
+ // that shares its descriptor array, this is a false positive.
+ Label loop, done;
+
+ // Skip loop if no descriptors are valid.
+ Register descriptors = x12;
+ Register descriptors_length = x13;
+ __ NumberOfOwnDescriptors(descriptors_length, map);
+ __ Cbz(descriptors_length, &done);
+
+ __ LoadInstanceDescriptors(map, descriptors);
+
+ // Calculate the end of the descriptor array.
+ Register descriptors_end = x14;
+ __ Mov(x15, DescriptorArray::kDescriptorSize);
+ __ Mul(descriptors_length, descriptors_length, x15);
+ // Calculate location of the first key name.
+ __ Add(descriptors, descriptors,
+ DescriptorArray::kFirstOffset - kHeapObjectTag);
+ // Calculate the end of the descriptor array.
+ __ Add(descriptors_end, descriptors,
+ Operand(descriptors_length, LSL, kPointerSizeLog2));
+
+ // Loop through all the keys in the descriptor array. If one of these is the
+ // string "valueOf" the result is false.
+ // TODO(all): optimise this loop to combine the add and ldr into an
+ // addressing mode.
+ Register valueof_string = x1;
+ __ Mov(valueof_string, Operand(isolate()->factory()->value_of_string()));
+ __ Bind(&loop);
+ __ Ldr(x15, MemOperand(descriptors));
+ __ Cmp(x15, valueof_string);
+ __ B(eq, if_false);
+ __ Add(descriptors, descriptors,
+ DescriptorArray::kDescriptorSize * kPointerSize);
+ __ Cmp(descriptors, descriptors_end);
+ __ B(ne, &loop);
+
+ __ Bind(&done);
+
+ // Set the bit in the map to indicate that there is no local valueOf field.
+ __ Ldrb(x2, FieldMemOperand(map, Map::kBitField2Offset));
+ __ Orr(x2, x2, 1 << Map::kStringWrapperSafeForDefaultValueOf);
+ __ Strb(x2, FieldMemOperand(map, Map::kBitField2Offset));
+
+ __ Bind(&skip_lookup);
+
+ // If a valueOf property is not found on the object check that its prototype
+ // is the unmodified String prototype. If not result is false.
+ Register prototype = x1;
+ Register global_idx = x2;
+ Register native_context = x2;
+ Register string_proto = x3;
+ Register proto_map = x4;
+ __ Ldr(prototype, FieldMemOperand(map, Map::kPrototypeOffset));
+ __ JumpIfSmi(prototype, if_false);
+ __ Ldr(proto_map, FieldMemOperand(prototype, HeapObject::kMapOffset));
+ __ Ldr(global_idx, GlobalObjectMemOperand());
+ __ Ldr(native_context,
+ FieldMemOperand(global_idx, GlobalObject::kNativeContextOffset));
+ __ Ldr(string_proto,
+ ContextMemOperand(native_context,
+ Context::STRING_FUNCTION_PROTOTYPE_MAP_INDEX));
+ __ Cmp(proto_map, string_proto);
+
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ Split(eq, if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitIsFunction(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 1);
+
+ VisitForAccumulatorValue(args->at(0));
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ __ JumpIfSmi(x0, if_false);
+ __ CompareObjectType(x0, x10, x11, JS_FUNCTION_TYPE);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ Split(eq, if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitIsMinusZero(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 1);
+
+ VisitForAccumulatorValue(args->at(0));
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ // Only a HeapNumber can be -0.0, so return false if we have something else.
+ __ CheckMap(x0, x1, Heap::kHeapNumberMapRootIndex, if_false, DO_SMI_CHECK);
+
+ // Test the bit pattern.
+ __ Ldr(x10, FieldMemOperand(x0, HeapNumber::kValueOffset));
+ __ Cmp(x10, 1); // Set V on 0x8000000000000000.
+
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ Split(vs, if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitIsArray(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 1);
+
+ VisitForAccumulatorValue(args->at(0));
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ __ JumpIfSmi(x0, if_false);
+ __ CompareObjectType(x0, x10, x11, JS_ARRAY_TYPE);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ Split(eq, if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitIsRegExp(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 1);
+
+ VisitForAccumulatorValue(args->at(0));
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ __ JumpIfSmi(x0, if_false);
+ __ CompareObjectType(x0, x10, x11, JS_REGEXP_TYPE);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ Split(eq, if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+
+
+void FullCodeGenerator::EmitIsConstructCall(CallRuntime* expr) {
+ ASSERT(expr->arguments()->length() == 0);
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ // Get the frame pointer for the calling frame.
+ __ Ldr(x2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+
+ // Skip the arguments adaptor frame if it exists.
+ Label check_frame_marker;
+ __ Ldr(x1, MemOperand(x2, StandardFrameConstants::kContextOffset));
+ __ Cmp(x1, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ B(ne, &check_frame_marker);
+ __ Ldr(x2, MemOperand(x2, StandardFrameConstants::kCallerFPOffset));
+
+ // Check the marker in the calling frame.
+ __ Bind(&check_frame_marker);
+ __ Ldr(x1, MemOperand(x2, StandardFrameConstants::kMarkerOffset));
+ __ Cmp(x1, Operand(Smi::FromInt(StackFrame::CONSTRUCT)));
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ Split(eq, if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitObjectEquals(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 2);
+
+ // Load the two objects into registers and perform the comparison.
+ VisitForStackValue(args->at(0));
+ VisitForAccumulatorValue(args->at(1));
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ __ Pop(x1);
+ __ Cmp(x0, x1);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ Split(eq, if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitArguments(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 1);
+
+ // ArgumentsAccessStub expects the key in x1.
+ VisitForAccumulatorValue(args->at(0));
+ __ Mov(x1, x0);
+ __ Mov(x0, Operand(Smi::FromInt(info_->scope()->num_parameters())));
+ ArgumentsAccessStub stub(ArgumentsAccessStub::READ_ELEMENT);
+ __ CallStub(&stub);
+ context()->Plug(x0);
+}
+
+
+void FullCodeGenerator::EmitArgumentsLength(CallRuntime* expr) {
+ ASSERT(expr->arguments()->length() == 0);
+ Label exit;
+ // Get the number of formal parameters.
+ __ Mov(x0, Operand(Smi::FromInt(info_->scope()->num_parameters())));
+
+ // Check if the calling frame is an arguments adaptor frame.
+ __ Ldr(x12, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ Ldr(x13, MemOperand(x12, StandardFrameConstants::kContextOffset));
+ __ Cmp(x13, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ B(ne, &exit);
+
+ // Arguments adaptor case: Read the arguments length from the
+ // adaptor frame.
+ __ Ldr(x0, MemOperand(x12, ArgumentsAdaptorFrameConstants::kLengthOffset));
+
+ __ Bind(&exit);
+ context()->Plug(x0);
+}
+
+
+void FullCodeGenerator::EmitClassOf(CallRuntime* expr) {
+ ASM_LOCATION("FullCodeGenerator::EmitClassOf");
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 1);
+ Label done, null, function, non_function_constructor;
+
+ VisitForAccumulatorValue(args->at(0));
+
+ // If the object is a smi, we return null.
+ __ JumpIfSmi(x0, &null);
+
+ // Check that the object is a JS object but take special care of JS
+ // functions to make sure they have 'Function' as their class.
+ // Assume that there are only two callable types, and one of them is at
+ // either end of the type range for JS object types. Saves extra comparisons.
+ STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
+ __ CompareObjectType(x0, x10, x11, FIRST_SPEC_OBJECT_TYPE);
+ // x10: object's map.
+ // x11: object's type.
+ __ B(lt, &null);
+ STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
+ FIRST_SPEC_OBJECT_TYPE + 1);
+ __ B(eq, &function);
+
+ __ Cmp(x11, LAST_SPEC_OBJECT_TYPE);
+ STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
+ LAST_SPEC_OBJECT_TYPE - 1);
+ __ B(eq, &function);
+ // Assume that there is no larger type.
+ STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE == LAST_TYPE - 1);
+
+ // Check if the constructor in the map is a JS function.
+ __ Ldr(x12, FieldMemOperand(x10, Map::kConstructorOffset));
+ __ JumpIfNotObjectType(x12, x13, x14, JS_FUNCTION_TYPE,
+ &non_function_constructor);
+
+ // x12 now contains the constructor function. Grab the
+ // instance class name from there.
+ __ Ldr(x13, FieldMemOperand(x12, JSFunction::kSharedFunctionInfoOffset));
+ __ Ldr(x0,
+ FieldMemOperand(x13, SharedFunctionInfo::kInstanceClassNameOffset));
+ __ B(&done);
+
+ // Functions have class 'Function'.
+ __ Bind(&function);
+ __ LoadRoot(x0, Heap::kfunction_class_stringRootIndex);
+ __ B(&done);
+
+ // Objects with a non-function constructor have class 'Object'.
+ __ Bind(&non_function_constructor);
+ __ LoadRoot(x0, Heap::kObject_stringRootIndex);
+ __ B(&done);
+
+ // Non-JS objects have class null.
+ __ Bind(&null);
+ __ LoadRoot(x0, Heap::kNullValueRootIndex);
+
+ // All done.
+ __ Bind(&done);
+
+ context()->Plug(x0);
+}
+
+
+void FullCodeGenerator::EmitLog(CallRuntime* expr) {
+ // Conditionally generate a log call.
+ // Args:
+ // 0 (literal string): The type of logging (corresponds to the flags).
+ // This is used to determine whether or not to generate the log call.
+ // 1 (string): Format string. Access the string at argument index 2
+ // with '%2s' (see Logger::LogRuntime for all the formats).
+ // 2 (array): Arguments to the format string.
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT_EQ(args->length(), 3);
+ if (CodeGenerator::ShouldGenerateLog(isolate(), args->at(0))) {
+ VisitForStackValue(args->at(1));
+ VisitForStackValue(args->at(2));
+ __ CallRuntime(Runtime::kLog, 2);
+ }
+
+ // Finally, we're expected to leave a value on the top of the stack.
+ __ LoadRoot(x0, Heap::kUndefinedValueRootIndex);
+ context()->Plug(x0);
+}
+
+
+void FullCodeGenerator::EmitSubString(CallRuntime* expr) {
+ // Load the arguments on the stack and call the stub.
+ SubStringStub stub;
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 3);
+ VisitForStackValue(args->at(0));
+ VisitForStackValue(args->at(1));
+ VisitForStackValue(args->at(2));
+ __ CallStub(&stub);
+ context()->Plug(x0);
+}
+
+
+void FullCodeGenerator::EmitRegExpExec(CallRuntime* expr) {
+ // Load the arguments on the stack and call the stub.
+ RegExpExecStub stub;
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 4);
+ VisitForStackValue(args->at(0));
+ VisitForStackValue(args->at(1));
+ VisitForStackValue(args->at(2));
+ VisitForStackValue(args->at(3));
+ __ CallStub(&stub);
+ context()->Plug(x0);
+}
+
+
+void FullCodeGenerator::EmitValueOf(CallRuntime* expr) {
+ ASM_LOCATION("FullCodeGenerator::EmitValueOf");
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 1);
+ VisitForAccumulatorValue(args->at(0)); // Load the object.
+
+ Label done;
+ // If the object is a smi return the object.
+ __ JumpIfSmi(x0, &done);
+ // If the object is not a value type, return the object.
+ __ JumpIfNotObjectType(x0, x10, x11, JS_VALUE_TYPE, &done);
+ __ Ldr(x0, FieldMemOperand(x0, JSValue::kValueOffset));
+
+ __ Bind(&done);
+ context()->Plug(x0);
+}
+
+
+void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 2);
+ ASSERT_NE(NULL, args->at(1)->AsLiteral());
+ Smi* index = Smi::cast(*(args->at(1)->AsLiteral()->value()));
+
+ VisitForAccumulatorValue(args->at(0)); // Load the object.
+
+ Label runtime, done, not_date_object;
+ Register object = x0;
+ Register result = x0;
+ Register stamp_addr = x10;
+ Register stamp_cache = x11;
+
+ __ JumpIfSmi(object, ¬_date_object);
+ __ JumpIfNotObjectType(object, x10, x10, JS_DATE_TYPE, ¬_date_object);
+
+ if (index->value() == 0) {
+ __ Ldr(result, FieldMemOperand(object, JSDate::kValueOffset));
+ __ B(&done);
+ } else {
+ if (index->value() < JSDate::kFirstUncachedField) {
+ ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
+ __ Mov(x10, Operand(stamp));
+ __ Ldr(stamp_addr, MemOperand(x10));
+ __ Ldr(stamp_cache, FieldMemOperand(object, JSDate::kCacheStampOffset));
+ __ Cmp(stamp_addr, stamp_cache);
+ __ B(ne, &runtime);
+ __ Ldr(result, FieldMemOperand(object, JSDate::kValueOffset +
+ kPointerSize * index->value()));
+ __ B(&done);
+ }
+
+ __ Bind(&runtime);
+ __ Mov(x1, Operand(index));
+ __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
+ __ B(&done);
+ }
+
+ __ Bind(¬_date_object);
+ __ CallRuntime(Runtime::kThrowNotDateError, 0);
+ __ Bind(&done);
+ context()->Plug(x0);
+}
+
+
+void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT_EQ(3, args->length());
+
+ Register string = x0;
+ Register index = x1;
+ Register value = x2;
+ Register scratch = x10;
+
+ VisitForStackValue(args->at(1)); // index
+ VisitForStackValue(args->at(2)); // value
+ VisitForAccumulatorValue(args->at(0)); // string
+ __ Pop(value, index);
+
+ if (FLAG_debug_code) {
+ __ AssertSmi(value, kNonSmiValue);
+ __ AssertSmi(index, kNonSmiIndex);
+ static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
+ __ EmitSeqStringSetCharCheck(string, index, kIndexIsSmi, scratch,
+ one_byte_seq_type);
+ }
+
+ __ Add(scratch, string, SeqOneByteString::kHeaderSize - kHeapObjectTag);
+ __ SmiUntag(value);
+ __ SmiUntag(index);
+ __ Strb(value, MemOperand(scratch, index));
+ context()->Plug(string);
+}
+
+
+void FullCodeGenerator::EmitTwoByteSeqStringSetChar(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT_EQ(3, args->length());
+
+ Register string = x0;
+ Register index = x1;
+ Register value = x2;
+ Register scratch = x10;
+
+ VisitForStackValue(args->at(1)); // index
+ VisitForStackValue(args->at(2)); // value
+ VisitForAccumulatorValue(args->at(0)); // string
+ __ Pop(value, index);
+
+ if (FLAG_debug_code) {
+ __ AssertSmi(value, kNonSmiValue);
+ __ AssertSmi(index, kNonSmiIndex);
+ static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
+ __ EmitSeqStringSetCharCheck(string, index, kIndexIsSmi, scratch,
+ two_byte_seq_type);
+ }
+
+ __ Add(scratch, string, SeqTwoByteString::kHeaderSize - kHeapObjectTag);
+ __ SmiUntag(value);
+ __ SmiUntag(index);
+ __ Strh(value, MemOperand(scratch, index, LSL, 1));
+ context()->Plug(string);
+}
+
+
+void FullCodeGenerator::EmitMathPow(CallRuntime* expr) {
+ // Load the arguments on the stack and call the MathPow stub.
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 2);
+ VisitForStackValue(args->at(0));
+ VisitForStackValue(args->at(1));
+ MathPowStub stub(MathPowStub::ON_STACK);
+ __ CallStub(&stub);
+ context()->Plug(x0);
+}
+
+
+void FullCodeGenerator::EmitSetValueOf(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 2);
+ VisitForStackValue(args->at(0)); // Load the object.
+ VisitForAccumulatorValue(args->at(1)); // Load the value.
+ __ Pop(x1);
+ // x0 = value.
+ // x1 = object.
+
+ Label done;
+ // If the object is a smi, return the value.
+ __ JumpIfSmi(x1, &done);
+
+ // If the object is not a value type, return the value.
+ __ JumpIfNotObjectType(x1, x10, x11, JS_VALUE_TYPE, &done);
+
+ // Store the value.
+ __ Str(x0, FieldMemOperand(x1, JSValue::kValueOffset));
+ // Update the write barrier. Save the value as it will be
+ // overwritten by the write barrier code and is needed afterward.
+ __ Mov(x10, x0);
+ __ RecordWriteField(
+ x1, JSValue::kValueOffset, x10, x11, kLRHasBeenSaved, kDontSaveFPRegs);
+
+ __ Bind(&done);
+ context()->Plug(x0);
+}
+
+
+void FullCodeGenerator::EmitNumberToString(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT_EQ(args->length(), 1);
+
+ // Load the argument into x0 and call the stub.
+ VisitForAccumulatorValue(args->at(0));
+
+ NumberToStringStub stub;
+ __ CallStub(&stub);
+ context()->Plug(x0);
+}
+
+
+void FullCodeGenerator::EmitStringCharFromCode(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 1);
+
+ VisitForAccumulatorValue(args->at(0));
+
+ Label done;
+ Register code = x0;
+ Register result = x1;
+
+ StringCharFromCodeGenerator generator(code, result);
+ generator.GenerateFast(masm_);
+ __ B(&done);
+
+ NopRuntimeCallHelper call_helper;
+ generator.GenerateSlow(masm_, call_helper);
+
+ __ Bind(&done);
+ context()->Plug(result);
+}
+
+
+void FullCodeGenerator::EmitStringCharCodeAt(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 2);
+
+ VisitForStackValue(args->at(0));
+ VisitForAccumulatorValue(args->at(1));
+
+ Register object = x1;
+ Register index = x0;
+ Register result = x3;
+
+ __ Pop(object);
+
+ Label need_conversion;
+ Label index_out_of_range;
+ Label done;
+ StringCharCodeAtGenerator generator(object,
+ index,
+ result,
+ &need_conversion,
+ &need_conversion,
+ &index_out_of_range,
+ STRING_INDEX_IS_NUMBER);
+ generator.GenerateFast(masm_);
+ __ B(&done);
+
+ __ Bind(&index_out_of_range);
+ // When the index is out of range, the spec requires us to return NaN.
+ __ LoadRoot(result, Heap::kNanValueRootIndex);
+ __ B(&done);
+
+ __ Bind(&need_conversion);
+ // Load the undefined value into the result register, which will
+ // trigger conversion.
+ __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
+ __ B(&done);
+
+ NopRuntimeCallHelper call_helper;
+ generator.GenerateSlow(masm_, call_helper);
+
+ __ Bind(&done);
+ context()->Plug(result);
+}
+
+
+void FullCodeGenerator::EmitStringCharAt(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 2);
+
+ VisitForStackValue(args->at(0));
+ VisitForAccumulatorValue(args->at(1));
+
+ Register object = x1;
+ Register index = x0;
+ Register result = x0;
+
+ __ Pop(object);
+
+ Label need_conversion;
+ Label index_out_of_range;
+ Label done;
+ StringCharAtGenerator generator(object,
+ index,
+ x3,
+ result,
+ &need_conversion,
+ &need_conversion,
+ &index_out_of_range,
+ STRING_INDEX_IS_NUMBER);
+ generator.GenerateFast(masm_);
+ __ B(&done);
+
+ __ Bind(&index_out_of_range);
+ // When the index is out of range, the spec requires us to return
+ // the empty string.
+ __ LoadRoot(result, Heap::kempty_stringRootIndex);
+ __ B(&done);
+
+ __ Bind(&need_conversion);
+ // Move smi zero into the result register, which will trigger conversion.
+ __ Mov(result, Operand(Smi::FromInt(0)));
+ __ B(&done);
+
+ NopRuntimeCallHelper call_helper;
+ generator.GenerateSlow(masm_, call_helper);
+
+ __ Bind(&done);
+ context()->Plug(result);
+}
+
+
+void FullCodeGenerator::EmitStringAdd(CallRuntime* expr) {
+ ASM_LOCATION("FullCodeGenerator::EmitStringAdd");
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT_EQ(2, args->length());
+
+ VisitForStackValue(args->at(0));
+ VisitForAccumulatorValue(args->at(1));
+
+ __ Pop(x1);
+ StringAddStub stub(STRING_ADD_CHECK_BOTH, NOT_TENURED);
+ __ CallStub(&stub);
+
+ context()->Plug(x0);
+}
+
+
+void FullCodeGenerator::EmitStringCompare(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT_EQ(2, args->length());
+ VisitForStackValue(args->at(0));
+ VisitForStackValue(args->at(1));
+
+ StringCompareStub stub;
+ __ CallStub(&stub);
+ context()->Plug(x0);
+}
+
+
+void FullCodeGenerator::EmitMathLog(CallRuntime* expr) {
+ // Load the argument on the stack and call the runtime function.
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 1);
+ VisitForStackValue(args->at(0));
+ __ CallRuntime(Runtime::kMath_log, 1);
+ context()->Plug(x0);
+}
+
+
+void FullCodeGenerator::EmitMathSqrt(CallRuntime* expr) {
+ // Load the argument on the stack and call the runtime function.
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 1);
+ VisitForStackValue(args->at(0));
+ __ CallRuntime(Runtime::kMath_sqrt, 1);
+ context()->Plug(x0);
+}
+
+
+void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) {
+ ASM_LOCATION("FullCodeGenerator::EmitCallFunction");
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() >= 2);
+
+ int arg_count = args->length() - 2; // 2 ~ receiver and function.
+ for (int i = 0; i < arg_count + 1; i++) {
+ VisitForStackValue(args->at(i));
+ }
+ VisitForAccumulatorValue(args->last()); // Function.
+
+ Label runtime, done;
+ // Check for non-function argument (including proxy).
+ __ JumpIfSmi(x0, &runtime);
+ __ JumpIfNotObjectType(x0, x1, x1, JS_FUNCTION_TYPE, &runtime);
+
+ // InvokeFunction requires the function in x1. Move it in there.
+ __ Mov(x1, x0);
+ ParameterCount count(arg_count);
+ __ InvokeFunction(x1, count, CALL_FUNCTION, NullCallWrapper());
+ __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ B(&done);
+
+ __ Bind(&runtime);
+ __ Push(x0);
+ __ CallRuntime(Runtime::kCall, args->length());
+ __ Bind(&done);
+
+ context()->Plug(x0);
+}
+
+
+void FullCodeGenerator::EmitRegExpConstructResult(CallRuntime* expr) {
+ RegExpConstructResultStub stub;
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 3);
+ VisitForStackValue(args->at(0));
+ VisitForStackValue(args->at(1));
+ VisitForAccumulatorValue(args->at(2));
+ __ Pop(x1, x2);
+ __ CallStub(&stub);
+ context()->Plug(x0);
+}
+
+
+void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT_EQ(2, args->length());
+ ASSERT_NE(NULL, args->at(0)->AsLiteral());
+ int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->value()))->value();
+
+ Handle<FixedArray> jsfunction_result_caches(
+ isolate()->native_context()->jsfunction_result_caches());
+ if (jsfunction_result_caches->length() <= cache_id) {
+ __ Abort(kAttemptToUseUndefinedCache);
+ __ LoadRoot(x0, Heap::kUndefinedValueRootIndex);
+ context()->Plug(x0);
+ return;
+ }
+
+ VisitForAccumulatorValue(args->at(1));
+
+ Register key = x0;
+ Register cache = x1;
+ __ Ldr(cache, GlobalObjectMemOperand());
+ __ Ldr(cache, FieldMemOperand(cache, GlobalObject::kNativeContextOffset));
+ __ Ldr(cache, ContextMemOperand(cache,
+ Context::JSFUNCTION_RESULT_CACHES_INDEX));
+ __ Ldr(cache,
+ FieldMemOperand(cache, FixedArray::OffsetOfElementAt(cache_id)));
+
+ Label done;
+ __ Ldrsw(x2, UntagSmiFieldMemOperand(cache,
+ JSFunctionResultCache::kFingerOffset));
+ __ Add(x3, cache, FixedArray::kHeaderSize - kHeapObjectTag);
+ __ Add(x3, x3, Operand(x2, LSL, kPointerSizeLog2));
+
+ // Load the key and data from the cache.
+ __ Ldp(x2, x3, MemOperand(x3));
+
+ __ Cmp(key, x2);
+ __ CmovX(x0, x3, eq);
+ __ B(eq, &done);
+
+ // Call runtime to perform the lookup.
+ __ Push(cache, key);
+ __ CallRuntime(Runtime::kGetFromCache, 2);
+
+ __ Bind(&done);
+ context()->Plug(x0);
+}
+
+
+void FullCodeGenerator::EmitHasCachedArrayIndex(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ VisitForAccumulatorValue(args->at(0));
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ __ Ldr(x10, FieldMemOperand(x0, String::kHashFieldOffset));
+ __ Tst(x10, String::kContainsCachedArrayIndexMask);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ Split(eq, if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitGetCachedArrayIndex(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 1);
+ VisitForAccumulatorValue(args->at(0));
+
+ __ AssertString(x0);
+
+ __ Ldr(x10, FieldMemOperand(x0, String::kHashFieldOffset));
+ __ IndexFromHash(x10, x0);
+
+ context()->Plug(x0);
+}
+
+
+void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
+ ASM_LOCATION("FullCodeGenerator::EmitFastAsciiArrayJoin");
+
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 2);
+ VisitForStackValue(args->at(1));
+ VisitForAccumulatorValue(args->at(0));
+
+ Register array = x0;
+ Register result = x0;
+ Register elements = x1;
+ Register element = x2;
+ Register separator = x3;
+ Register array_length = x4;
+ Register result_pos = x5;
+ Register map = x6;
+ Register string_length = x10;
+ Register elements_end = x11;
+ Register string = x12;
+ Register scratch1 = x13;
+ Register scratch2 = x14;
+ Register scratch3 = x7;
+ Register separator_length = x15;
+
+ Label bailout, done, one_char_separator, long_separator,
+ non_trivial_array, not_size_one_array, loop,
+ empty_separator_loop, one_char_separator_loop,
+ one_char_separator_loop_entry, long_separator_loop;
+
+ // The separator operand is on the stack.
+ __ Pop(separator);
+
+ // Check that the array is a JSArray.
+ __ JumpIfSmi(array, &bailout);
+ __ JumpIfNotObjectType(array, map, scratch1, JS_ARRAY_TYPE, &bailout);
+
+ // Check that the array has fast elements.
+ __ CheckFastElements(map, scratch1, &bailout);
+
+ // If the array has length zero, return the empty string.
+ // Load and untag the length of the array.
+ // It is an unsigned value, so we can skip sign extension.
+ // We assume little endianness.
+ __ Ldrsw(array_length,
+ UntagSmiFieldMemOperand(array, JSArray::kLengthOffset));
+ __ Cbnz(array_length, &non_trivial_array);
+ __ LoadRoot(result, Heap::kempty_stringRootIndex);
+ __ B(&done);
+
+ __ Bind(&non_trivial_array);
+ // Get the FixedArray containing array's elements.
+ __ Ldr(elements, FieldMemOperand(array, JSArray::kElementsOffset));
+
+ // Check that all array elements are sequential ASCII strings, and
+ // accumulate the sum of their lengths.
+ __ Mov(string_length, 0);
+ __ Add(element, elements, FixedArray::kHeaderSize - kHeapObjectTag);
+ __ Add(elements_end, element, Operand(array_length, LSL, kPointerSizeLog2));
+ // Loop condition: while (element < elements_end).
+ // Live values in registers:
+ // elements: Fixed array of strings.
+ // array_length: Length of the fixed array of strings (not smi)
+ // separator: Separator string
+ // string_length: Accumulated sum of string lengths (not smi).
+ // element: Current array element.
+ // elements_end: Array end.
+ if (FLAG_debug_code) {
+ __ Cmp(array_length, Operand(0));
+ __ Assert(gt, kNoEmptyArraysHereInEmitFastAsciiArrayJoin);
+ }
+ __ Bind(&loop);
+ __ Ldr(string, MemOperand(element, kPointerSize, PostIndex));
+ __ JumpIfSmi(string, &bailout);
+ __ Ldr(scratch1, FieldMemOperand(string, HeapObject::kMapOffset));
+ __ Ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
+ __ JumpIfInstanceTypeIsNotSequentialAscii(scratch1, scratch2, &bailout);
+ __ Ldrsw(scratch1,
+ UntagSmiFieldMemOperand(string, SeqOneByteString::kLengthOffset));
+ __ Adds(string_length, string_length, scratch1);
+ __ B(vs, &bailout);
+ __ Cmp(element, elements_end);
+ __ B(lt, &loop);
+
+ // If array_length is 1, return elements[0], a string.
+ __ Cmp(array_length, 1);
+ __ B(ne, ¬_size_one_array);
+ __ Ldr(result, FieldMemOperand(elements, FixedArray::kHeaderSize));
+ __ B(&done);
+
+ __ Bind(¬_size_one_array);
+
+ // Live values in registers:
+ // separator: Separator string
+ // array_length: Length of the array (not smi).
+ // string_length: Sum of string lengths (not smi).
+ // elements: FixedArray of strings.
+
+ // Check that the separator is a flat ASCII string.
+ __ JumpIfSmi(separator, &bailout);
+ __ Ldr(scratch1, FieldMemOperand(separator, HeapObject::kMapOffset));
+ __ Ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
+ __ JumpIfInstanceTypeIsNotSequentialAscii(scratch1, scratch2, &bailout);
+
+ // Add (separator length times array_length) - separator length to the
+ // string_length to get the length of the result string.
+ // Load the separator length as untagged.
+ // We assume little endianness, and that the length is positive.
+ __ Ldrsw(separator_length,
+ UntagSmiFieldMemOperand(separator,
+ SeqOneByteString::kLengthOffset));
+ __ Sub(string_length, string_length, separator_length);
+ __ Umaddl(string_length, array_length.W(), separator_length.W(),
+ string_length);
+
+ // Get first element in the array.
+ __ Add(element, elements, FixedArray::kHeaderSize - kHeapObjectTag);
+ // Live values in registers:
+ // element: First array element
+ // separator: Separator string
+ // string_length: Length of result string (not smi)
+ // array_length: Length of the array (not smi).
+ __ AllocateAsciiString(result, string_length, scratch1, scratch2, scratch3,
+ &bailout);
+
+ // Prepare for looping. Set up elements_end to end of the array. Set
+ // result_pos to the position of the result where to write the first
+ // character.
+ // TODO(all): useless unless AllocateAsciiString trashes the register.
+ __ Add(elements_end, element, Operand(array_length, LSL, kPointerSizeLog2));
+ __ Add(result_pos, result, SeqOneByteString::kHeaderSize - kHeapObjectTag);
+
+ // Check the length of the separator.
+ __ Cmp(separator_length, 1);
+ __ B(eq, &one_char_separator);
+ __ B(gt, &long_separator);
+
+ // Empty separator case
+ __ Bind(&empty_separator_loop);
+ // Live values in registers:
+ // result_pos: the position to which we are currently copying characters.
+ // element: Current array element.
+ // elements_end: Array end.
+
+ // Copy next array element to the result.
+ __ Ldr(string, MemOperand(element, kPointerSize, PostIndex));
+ __ Ldrsw(string_length,
+ UntagSmiFieldMemOperand(string, String::kLengthOffset));
+ __ Add(string, string, SeqOneByteString::kHeaderSize - kHeapObjectTag);
+ __ CopyBytes(result_pos, string, string_length, scratch1);
+ __ Cmp(element, elements_end);
+ __ B(lt, &empty_separator_loop); // End while (element < elements_end).
+ __ B(&done);
+
+ // One-character separator case
+ __ Bind(&one_char_separator);
+ // Replace separator with its ASCII character value.
+ __ Ldrb(separator, FieldMemOperand(separator, SeqOneByteString::kHeaderSize));
+ // Jump into the loop after the code that copies the separator, so the first
+ // element is not preceded by a separator
+ __ B(&one_char_separator_loop_entry);
+
+ __ Bind(&one_char_separator_loop);
+ // Live values in registers:
+ // result_pos: the position to which we are currently copying characters.
+ // element: Current array element.
+ // elements_end: Array end.
+ // separator: Single separator ASCII char (in lower byte).
+
+ // Copy the separator character to the result.
+ __ Strb(separator, MemOperand(result_pos, 1, PostIndex));
+
+ // Copy next array element to the result.
+ __ Bind(&one_char_separator_loop_entry);
+ __ Ldr(string, MemOperand(element, kPointerSize, PostIndex));
+ __ Ldrsw(string_length,
+ UntagSmiFieldMemOperand(string, String::kLengthOffset));
+ __ Add(string, string, SeqOneByteString::kHeaderSize - kHeapObjectTag);
+ __ CopyBytes(result_pos, string, string_length, scratch1);
+ __ Cmp(element, elements_end);
+ __ B(lt, &one_char_separator_loop); // End while (element < elements_end).
+ __ B(&done);
+
+ // Long separator case (separator is more than one character). Entry is at the
+ // label long_separator below.
+ __ Bind(&long_separator_loop);
+ // Live values in registers:
+ // result_pos: the position to which we are currently copying characters.
+ // element: Current array element.
+ // elements_end: Array end.
+ // separator: Separator string.
+
+ // Copy the separator to the result.
+ // TODO(all): hoist next two instructions.
+ __ Ldrsw(string_length,
+ UntagSmiFieldMemOperand(separator, String::kLengthOffset));
+ __ Add(string, separator, SeqOneByteString::kHeaderSize - kHeapObjectTag);
+ __ CopyBytes(result_pos, string, string_length, scratch1);
+
+ __ Bind(&long_separator);
+ __ Ldr(string, MemOperand(element, kPointerSize, PostIndex));
+ __ Ldrsw(string_length,
+ UntagSmiFieldMemOperand(string, String::kLengthOffset));
+ __ Add(string, string, SeqOneByteString::kHeaderSize - kHeapObjectTag);
+ __ CopyBytes(result_pos, string, string_length, scratch1);
+ __ Cmp(element, elements_end);
+ __ B(lt, &long_separator_loop); // End while (element < elements_end).
+ __ B(&done);
+
+ __ Bind(&bailout);
+ // Returning undefined will force slower code to handle it.
+ __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
+ __ Bind(&done);
+ context()->Plug(result);
+}
+
+
+void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
+ Handle<String> name = expr->name();
+ if (name->length() > 0 && name->Get(0) == '_') {
+ Comment cmnt(masm_, "[ InlineRuntimeCall");
+ EmitInlineRuntimeCall(expr);
+ return;
+ }
+
+ Comment cmnt(masm_, "[ CallRunTime");
+ ZoneList<Expression*>* args = expr->arguments();
+ int arg_count = args->length();
+
+ if (expr->is_jsruntime()) {
+ // Push the builtins object as the receiver.
+ __ Ldr(x10, GlobalObjectMemOperand());
+ __ Ldr(x0, FieldMemOperand(x10, GlobalObject::kBuiltinsOffset));
+ __ Push(x0);
+
+ // Load the function from the receiver.
+ __ Mov(x2, Operand(name));
+ CallLoadIC(NOT_CONTEXTUAL, expr->CallRuntimeFeedbackId());
+
+ // Push the target function under the receiver.
+ __ Pop(x10);
+ __ Push(x0, x10);
+
+ int arg_count = args->length();
+ for (int i = 0; i < arg_count; i++) {
+ VisitForStackValue(args->at(i));
+ }
+
+ // Record source position of the IC call.
+ SetSourcePosition(expr->position());
+ CallFunctionStub stub(arg_count, NO_CALL_FUNCTION_FLAGS);
+ __ Peek(x1, (arg_count + 1) * kPointerSize);
+ __ CallStub(&stub);
+
+ // Restore context register.
+ __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+
+ context()->DropAndPlug(1, x0);
+ } else {
+ // Push the arguments ("left-to-right").
+ for (int i = 0; i < arg_count; i++) {
+ VisitForStackValue(args->at(i));
+ }
+
+ // Call the C runtime function.
+ __ CallRuntime(expr->function(), arg_count);
+ context()->Plug(x0);
+ }
+}
+
+
+void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
+ switch (expr->op()) {
+ case Token::DELETE: {
+ Comment cmnt(masm_, "[ UnaryOperation (DELETE)");
+ Property* property = expr->expression()->AsProperty();
+ VariableProxy* proxy = expr->expression()->AsVariableProxy();
+
+ if (property != NULL) {
+ VisitForStackValue(property->obj());
+ VisitForStackValue(property->key());
+ StrictModeFlag strict_mode_flag = (language_mode() == CLASSIC_MODE)
+ ? kNonStrictMode : kStrictMode;
+ __ Mov(x10, Operand(Smi::FromInt(strict_mode_flag)));
+ __ Push(x10);
+ __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
+ context()->Plug(x0);
+ } else if (proxy != NULL) {
+ Variable* var = proxy->var();
+ // Delete of an unqualified identifier is disallowed in strict mode
+ // but "delete this" is allowed.
+ ASSERT(language_mode() == CLASSIC_MODE || var->is_this());
+ if (var->IsUnallocated()) {
+ __ Ldr(x12, GlobalObjectMemOperand());
+ __ Mov(x11, Operand(var->name()));
+ __ Mov(x10, Operand(Smi::FromInt(kNonStrictMode)));
+ __ Push(x12, x11, x10);
+ __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
+ context()->Plug(x0);
+ } else if (var->IsStackAllocated() || var->IsContextSlot()) {
+ // Result of deleting non-global, non-dynamic variables is false.
+ // The subexpression does not have side effects.
+ context()->Plug(var->is_this());
+ } else {
+ // Non-global variable. Call the runtime to try to delete from the
+ // context where the variable was introduced.
+ __ Mov(x2, Operand(var->name()));
+ __ Push(context_register(), x2);
+ __ CallRuntime(Runtime::kDeleteContextSlot, 2);
+ context()->Plug(x0);
+ }
+ } else {
+ // Result of deleting non-property, non-variable reference is true.
+ // The subexpression may have side effects.
+ VisitForEffect(expr->expression());
+ context()->Plug(true);
+ }
+ break;
+ break;
+ }
+ case Token::VOID: {
+ Comment cmnt(masm_, "[ UnaryOperation (VOID)");
+ VisitForEffect(expr->expression());
+ context()->Plug(Heap::kUndefinedValueRootIndex);
+ break;
+ }
+ case Token::NOT: {
+ Comment cmnt(masm_, "[ UnaryOperation (NOT)");
+ if (context()->IsEffect()) {
+ // Unary NOT has no side effects so it's only necessary to visit the
+ // subexpression. Match the optimizing compiler by not branching.
+ VisitForEffect(expr->expression());
+ } else if (context()->IsTest()) {
+ const TestContext* test = TestContext::cast(context());
+ // The labels are swapped for the recursive call.
+ VisitForControl(expr->expression(),
+ test->false_label(),
+ test->true_label(),
+ test->fall_through());
+ context()->Plug(test->true_label(), test->false_label());
+ } else {
+ ASSERT(context()->IsAccumulatorValue() || context()->IsStackValue());
+ // TODO(jbramley): This could be much more efficient using (for
+ // example) the CSEL instruction.
+ Label materialize_true, materialize_false, done;
+ VisitForControl(expr->expression(),
+ &materialize_false,
+ &materialize_true,
+ &materialize_true);
+
+ __ Bind(&materialize_true);
+ PrepareForBailoutForId(expr->MaterializeTrueId(), NO_REGISTERS);
+ __ LoadRoot(result_register(), Heap::kTrueValueRootIndex);
+ __ B(&done);
+
+ __ Bind(&materialize_false);
+ PrepareForBailoutForId(expr->MaterializeFalseId(), NO_REGISTERS);
+ __ LoadRoot(result_register(), Heap::kFalseValueRootIndex);
+ __ B(&done);
+
+ __ Bind(&done);
+ if (context()->IsStackValue()) {
+ __ Push(result_register());
+ }
+ }
+ break;
+ }
+ case Token::TYPEOF: {
+ Comment cmnt(masm_, "[ UnaryOperation (TYPEOF)");
+ {
+ StackValueContext context(this);
+ VisitForTypeofValue(expr->expression());
+ }
+ __ CallRuntime(Runtime::kTypeof, 1);
+ context()->Plug(x0);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
+ Comment cmnt(masm_, "[ CountOperation");
+ SetSourcePosition(expr->position());
+
+ // Invalid left-hand sides are rewritten to have a 'throw ReferenceError'
+ // as the left-hand side.
+ if (!expr->expression()->IsValidLeftHandSide()) {
+ VisitForEffect(expr->expression());
+ return;
+ }
+
+ // Expression can only be a property, a global or a (parameter or local)
+ // slot.
+ enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
+ LhsKind assign_type = VARIABLE;
+ Property* prop = expr->expression()->AsProperty();
+ // In case of a property we use the uninitialized expression context
+ // of the key to detect a named property.
+ if (prop != NULL) {
+ assign_type =
+ (prop->key()->IsPropertyName()) ? NAMED_PROPERTY : KEYED_PROPERTY;
+ }
+
+ // Evaluate expression and get value.
+ if (assign_type == VARIABLE) {
+ ASSERT(expr->expression()->AsVariableProxy()->var() != NULL);
+ AccumulatorValueContext context(this);
+ EmitVariableLoad(expr->expression()->AsVariableProxy());
+ } else {
+ // Reserve space for result of postfix operation.
+ if (expr->is_postfix() && !context()->IsEffect()) {
+ __ Push(xzr);
+ }
+ if (assign_type == NAMED_PROPERTY) {
+ // Put the object both on the stack and in the accumulator.
+ VisitForAccumulatorValue(prop->obj());
+ __ Push(x0);
+ EmitNamedPropertyLoad(prop);
+ } else {
+ // KEYED_PROPERTY
+ VisitForStackValue(prop->obj());
+ VisitForAccumulatorValue(prop->key());
+ __ Peek(x1, 0);
+ __ Push(x0);
+ EmitKeyedPropertyLoad(prop);
+ }
+ }
+
+ // We need a second deoptimization point after loading the value
+ // in case evaluating the property load my have a side effect.
+ if (assign_type == VARIABLE) {
+ PrepareForBailout(expr->expression(), TOS_REG);
+ } else {
+ PrepareForBailoutForId(prop->LoadId(), TOS_REG);
+ }
+
+ // Inline smi case if we are in a loop.
+ Label stub_call, done;
+ JumpPatchSite patch_site(masm_);
+
+ int count_value = expr->op() == Token::INC ? 1 : -1;
+ if (ShouldInlineSmiCase(expr->op())) {
+ Label slow;
+ patch_site.EmitJumpIfNotSmi(x0, &slow);
+
+ // Save result for postfix expressions.
+ if (expr->is_postfix()) {
+ if (!context()->IsEffect()) {
+ // Save the result on the stack. If we have a named or keyed property we
+ // store the result under the receiver that is currently on top of the
+ // stack.
+ switch (assign_type) {
+ case VARIABLE:
+ __ Push(x0);
+ break;
+ case NAMED_PROPERTY:
+ __ Poke(x0, kPointerSize);
+ break;
+ case KEYED_PROPERTY:
+ __ Poke(x0, kPointerSize * 2);
+ break;
+ }
+ }
+ }
+
+ __ Adds(x0, x0, Operand(Smi::FromInt(count_value)));
+ __ B(vc, &done);
+ // Call stub. Undo operation first.
+ __ Sub(x0, x0, Operand(Smi::FromInt(count_value)));
+ __ B(&stub_call);
+ __ Bind(&slow);
+ }
+ ToNumberStub convert_stub;
+ __ CallStub(&convert_stub);
+
+ // Save result for postfix expressions.
+ if (expr->is_postfix()) {
+ if (!context()->IsEffect()) {
+ // Save the result on the stack. If we have a named or keyed property
+ // we store the result under the receiver that is currently on top
+ // of the stack.
+ switch (assign_type) {
+ case VARIABLE:
+ __ Push(x0);
+ break;
+ case NAMED_PROPERTY:
+ __ Poke(x0, kXRegSizeInBytes);
+ break;
+ case KEYED_PROPERTY:
+ __ Poke(x0, 2 * kXRegSizeInBytes);
+ break;
+ }
+ }
+ }
+
+ __ Bind(&stub_call);
+ __ Mov(x1, x0);
+ __ Mov(x0, Operand(Smi::FromInt(count_value)));
+
+ // Record position before stub call.
+ SetSourcePosition(expr->position());
+
+ {
+ Assembler::BlockConstPoolScope scope(masm_);
+ BinaryOpICStub stub(Token::ADD, NO_OVERWRITE);
+ CallIC(stub.GetCode(isolate()), expr->CountBinOpFeedbackId());
+ patch_site.EmitPatchInfo();
+ }
+ __ Bind(&done);
+
+ // Store the value returned in x0.
+ switch (assign_type) {
+ case VARIABLE:
+ if (expr->is_postfix()) {
+ { EffectContext context(this);
+ EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
+ Token::ASSIGN);
+ PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+ context.Plug(x0);
+ }
+ // For all contexts except EffectConstant We have the result on
+ // top of the stack.
+ if (!context()->IsEffect()) {
+ context()->PlugTOS();
+ }
+ } else {
+ EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
+ Token::ASSIGN);
+ PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+ context()->Plug(x0);
+ }
+ break;
+ case NAMED_PROPERTY: {
+ __ Mov(x2, Operand(prop->key()->AsLiteral()->value()));
+ __ Pop(x1);
+ CallStoreIC(expr->CountStoreFeedbackId());
+ PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+ if (expr->is_postfix()) {
+ if (!context()->IsEffect()) {
+ context()->PlugTOS();
+ }
+ } else {
+ context()->Plug(x0);
+ }
+ break;
+ }
+ case KEYED_PROPERTY: {
+ __ Pop(x1); // Key.
+ __ Pop(x2); // Receiver.
+ Handle<Code> ic = is_classic_mode()
+ ? isolate()->builtins()->KeyedStoreIC_Initialize()
+ : isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
+ CallIC(ic, expr->CountStoreFeedbackId());
+ PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+ if (expr->is_postfix()) {
+ if (!context()->IsEffect()) {
+ context()->PlugTOS();
+ }
+ } else {
+ context()->Plug(x0);
+ }
+ break;
+ }
+ }
+}
+
+
+void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
+ ASSERT(!context()->IsEffect());
+ ASSERT(!context()->IsTest());
+ VariableProxy* proxy = expr->AsVariableProxy();
+ if (proxy != NULL && proxy->var()->IsUnallocated()) {
+ Comment cmnt(masm_, "Global variable");
+ __ Ldr(x0, GlobalObjectMemOperand());
+ __ Mov(x2, Operand(proxy->name()));
+ // Use a regular load, not a contextual load, to avoid a reference
+ // error.
+ CallLoadIC(NOT_CONTEXTUAL);
+ PrepareForBailout(expr, TOS_REG);
+ context()->Plug(x0);
+ } else if (proxy != NULL && proxy->var()->IsLookupSlot()) {
+ Label done, slow;
+
+ // Generate code for loading from variables potentially shadowed
+ // by eval-introduced variables.
+ EmitDynamicLookupFastCase(proxy->var(), INSIDE_TYPEOF, &slow, &done);
+
+ __ Bind(&slow);
+ __ Mov(x0, Operand(proxy->name()));
+ __ Push(cp, x0);
+ __ CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
+ PrepareForBailout(expr, TOS_REG);
+ __ Bind(&done);
+
+ context()->Plug(x0);
+ } else {
+ // This expression cannot throw a reference error at the top level.
+ VisitInDuplicateContext(expr);
+ }
+}
+
+
+void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
+ Expression* sub_expr,
+ Handle<String> check) {
+ ASM_LOCATION("FullCodeGenerator::EmitLiteralCompareTypeof");
+ Comment cmnt(masm_, "[ EmitLiteralCompareTypeof");
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ { AccumulatorValueContext context(this);
+ VisitForTypeofValue(sub_expr);
+ }
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+
+ if (check->Equals(isolate()->heap()->number_string())) {
+ ASM_LOCATION("FullCodeGenerator::EmitLiteralCompareTypeof number_string");
+ __ JumpIfSmi(x0, if_true);
+ __ Ldr(x0, FieldMemOperand(x0, HeapObject::kMapOffset));
+ __ CompareRoot(x0, Heap::kHeapNumberMapRootIndex);
+ Split(eq, if_true, if_false, fall_through);
+ } else if (check->Equals(isolate()->heap()->string_string())) {
+ ASM_LOCATION("FullCodeGenerator::EmitLiteralCompareTypeof string_string");
+ __ JumpIfSmi(x0, if_false);
+ // Check for undetectable objects => false.
+ __ JumpIfObjectType(x0, x0, x1, FIRST_NONSTRING_TYPE, if_false, ge);
+ __ Ldrb(x1, FieldMemOperand(x0, Map::kBitFieldOffset));
+ __ TestAndSplit(x1, 1 << Map::kIsUndetectable, if_true, if_false,
+ fall_through);
+ } else if (check->Equals(isolate()->heap()->symbol_string())) {
+ ASM_LOCATION("FullCodeGenerator::EmitLiteralCompareTypeof symbol_string");
+ __ JumpIfSmi(x0, if_false);
+ __ CompareObjectType(x0, x0, x1, SYMBOL_TYPE);
+ Split(eq, if_true, if_false, fall_through);
+ } else if (check->Equals(isolate()->heap()->boolean_string())) {
+ ASM_LOCATION("FullCodeGenerator::EmitLiteralCompareTypeof boolean_string");
+ __ JumpIfRoot(x0, Heap::kTrueValueRootIndex, if_true);
+ __ CompareRoot(x0, Heap::kFalseValueRootIndex);
+ Split(eq, if_true, if_false, fall_through);
+ } else if (FLAG_harmony_typeof &&
+ check->Equals(isolate()->heap()->null_string())) {
+ ASM_LOCATION("FullCodeGenerator::EmitLiteralCompareTypeof null_string");
+ __ CompareRoot(x0, Heap::kNullValueRootIndex);
+ Split(eq, if_true, if_false, fall_through);
+ } else if (check->Equals(isolate()->heap()->undefined_string())) {
+ ASM_LOCATION(
+ "FullCodeGenerator::EmitLiteralCompareTypeof undefined_string");
+ __ JumpIfRoot(x0, Heap::kUndefinedValueRootIndex, if_true);
+ __ JumpIfSmi(x0, if_false);
+ // Check for undetectable objects => true.
+ __ Ldr(x0, FieldMemOperand(x0, HeapObject::kMapOffset));
+ __ Ldrb(x1, FieldMemOperand(x0, Map::kBitFieldOffset));
+ __ TestAndSplit(x1, 1 << Map::kIsUndetectable, if_false, if_true,
+ fall_through);
+ } else if (check->Equals(isolate()->heap()->function_string())) {
+ ASM_LOCATION("FullCodeGenerator::EmitLiteralCompareTypeof function_string");
+ __ JumpIfSmi(x0, if_false);
+ STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
+ __ JumpIfObjectType(x0, x10, x11, JS_FUNCTION_TYPE, if_true);
+ __ CompareAndSplit(x11, JS_FUNCTION_PROXY_TYPE, eq, if_true, if_false,
+ fall_through);
+
+ } else if (check->Equals(isolate()->heap()->object_string())) {
+ ASM_LOCATION("FullCodeGenerator::EmitLiteralCompareTypeof object_string");
+ __ JumpIfSmi(x0, if_false);
+ if (!FLAG_harmony_typeof) {
+ __ JumpIfRoot(x0, Heap::kNullValueRootIndex, if_true);
+ }
+ // Check for JS objects => true.
+ Register map = x10;
+ __ JumpIfObjectType(x0, map, x11, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE,
+ if_false, lt);
+ __ CompareInstanceType(map, x11, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
+ __ B(gt, if_false);
+ // Check for undetectable objects => false.
+ __ Ldrb(x10, FieldMemOperand(map, Map::kBitFieldOffset));
+
+ __ TestAndSplit(x10, 1 << Map::kIsUndetectable, if_true, if_false,
+ fall_through);
+
+ } else {
+ ASM_LOCATION("FullCodeGenerator::EmitLiteralCompareTypeof other");
+ if (if_false != fall_through) __ B(if_false);
+ }
+ context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
+ Comment cmnt(masm_, "[ CompareOperation");
+ SetSourcePosition(expr->position());
+
+ // Try to generate an optimized comparison with a literal value.
+ // TODO(jbramley): This only checks common values like NaN or undefined.
+ // Should it also handle A64 immediate operands?
+ if (TryLiteralCompare(expr)) {
+ return;
+ }
+
+ // Assign labels according to context()->PrepareTest.
+ Label materialize_true;
+ Label materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ Token::Value op = expr->op();
+ VisitForStackValue(expr->left());
+ switch (op) {
+ case Token::IN:
+ VisitForStackValue(expr->right());
+ __ InvokeBuiltin(Builtins::IN, CALL_FUNCTION);
+ PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
+ __ CompareRoot(x0, Heap::kTrueValueRootIndex);
+ Split(eq, if_true, if_false, fall_through);
+ break;
+
+ case Token::INSTANCEOF: {
+ VisitForStackValue(expr->right());
+ InstanceofStub stub(InstanceofStub::kNoFlags);
+ __ CallStub(&stub);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ // The stub returns 0 for true.
+ __ CompareAndSplit(x0, 0, eq, if_true, if_false, fall_through);
+ break;
+ }
+
+ default: {
+ VisitForAccumulatorValue(expr->right());
+ Condition cond = CompareIC::ComputeCondition(op);
+
+ // Pop the stack value.
+ __ Pop(x1);
+
+ JumpPatchSite patch_site(masm_);
+ if (ShouldInlineSmiCase(op)) {
+ Label slow_case;
+ patch_site.EmitJumpIfEitherNotSmi(x0, x1, &slow_case);
+ __ Cmp(x1, x0);
+ Split(cond, if_true, if_false, NULL);
+ __ Bind(&slow_case);
+ }
+
+ // Record position and call the compare IC.
+ SetSourcePosition(expr->position());
+ Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
+ CallIC(ic, expr->CompareOperationFeedbackId());
+ patch_site.EmitPatchInfo();
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ __ CompareAndSplit(x0, 0, cond, if_true, if_false, fall_through);
+ }
+ }
+
+ // Convert the result of the comparison into one expected for this
+ // expression's context.
+ context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr,
+ Expression* sub_expr,
+ NilValue nil) {
+ ASM_LOCATION("FullCodeGenerator::EmitLiteralCompareNil");
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ VisitForAccumulatorValue(sub_expr);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+
+ if (expr->op() == Token::EQ_STRICT) {
+ Heap::RootListIndex nil_value = nil == kNullValue ?
+ Heap::kNullValueRootIndex :
+ Heap::kUndefinedValueRootIndex;
+ __ CompareRoot(x0, nil_value);
+ Split(eq, if_true, if_false, fall_through);
+ } else {
+ Handle<Code> ic = CompareNilICStub::GetUninitialized(isolate(), nil);
+ CallIC(ic, expr->CompareOperationFeedbackId());
+ __ CompareAndSplit(x0, 0, ne, if_true, if_false, fall_through);
+ }
+
+ context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::VisitThisFunction(ThisFunction* expr) {
+ __ Ldr(x0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ context()->Plug(x0);
+}
+
+
+void FullCodeGenerator::VisitYield(Yield* expr) {
+ Comment cmnt(masm_, "[ Yield");
+ // Evaluate yielded value first; the initial iterator definition depends on
+ // this. It stays on the stack while we update the iterator.
+ VisitForStackValue(expr->expression());
+
+ // TODO(jbramley): Tidy this up once the merge is done, using named registers
+ // and suchlike. The implementation changes a little by bleeding_edge so I
+ // don't want to spend too much time on it now.
+
+ switch (expr->yield_kind()) {
+ case Yield::SUSPEND:
+ // Pop value from top-of-stack slot; box result into result register.
+ EmitCreateIteratorResult(false);
+ __ Push(result_register());
+ // Fall through.
+ case Yield::INITIAL: {
+ Label suspend, continuation, post_runtime, resume;
+
+ __ B(&suspend);
+
+ // TODO(jbramley): This label is bound here because the following code
+ // looks at its pos(). Is it possible to do something more efficient here,
+ // perhaps using Adr?
+ __ Bind(&continuation);
+ __ B(&resume);
+
+ __ Bind(&suspend);
+ VisitForAccumulatorValue(expr->generator_object());
+ ASSERT((continuation.pos() > 0) && Smi::IsValid(continuation.pos()));
+ __ Mov(x1, Operand(Smi::FromInt(continuation.pos())));
+ __ Str(x1, FieldMemOperand(x0, JSGeneratorObject::kContinuationOffset));
+ __ Str(cp, FieldMemOperand(x0, JSGeneratorObject::kContextOffset));
+ __ Mov(x1, cp);
+ __ RecordWriteField(x0, JSGeneratorObject::kContextOffset, x1, x2,
+ kLRHasBeenSaved, kDontSaveFPRegs);
+ __ Add(x1, fp, StandardFrameConstants::kExpressionsOffset);
+ __ Cmp(__ StackPointer(), x1);
+ __ B(eq, &post_runtime);
+ __ Push(x0); // generator object
+ __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
+ __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ Bind(&post_runtime);
+ __ Pop(result_register());
+ EmitReturnSequence();
+
+ __ Bind(&resume);
+ context()->Plug(result_register());
+ break;
+ }
+
+ case Yield::FINAL: {
+ VisitForAccumulatorValue(expr->generator_object());
+ __ Mov(x1, Operand(Smi::FromInt(JSGeneratorObject::kGeneratorClosed)));
+ __ Str(x1, FieldMemOperand(result_register(),
+ JSGeneratorObject::kContinuationOffset));
+ // Pop value from top-of-stack slot, box result into result register.
+ EmitCreateIteratorResult(true);
+ EmitUnwindBeforeReturn();
+ EmitReturnSequence();
+ break;
+ }
+
+ case Yield::DELEGATING: {
+ VisitForStackValue(expr->generator_object());
+
+ // Initial stack layout is as follows:
+ // [sp + 1 * kPointerSize] iter
+ // [sp + 0 * kPointerSize] g
+
+ Label l_catch, l_try, l_suspend, l_continuation, l_resume;
+ Label l_next, l_call, l_loop;
+ // Initial send value is undefined.
+ __ LoadRoot(x0, Heap::kUndefinedValueRootIndex);
+ __ B(&l_next);
+
+ // catch (e) { receiver = iter; f = 'throw'; arg = e; goto l_call; }
+ __ Bind(&l_catch);
+ handler_table()->set(expr->index(), Smi::FromInt(l_catch.pos()));
+ __ LoadRoot(x2, Heap::kthrow_stringRootIndex); // "throw"
+ __ Peek(x3, 1 * kPointerSize); // iter
+ __ Push(x2, x3, x0); // "throw", iter, except
+ __ B(&l_call);
+
+ // try { received = %yield result }
+ // Shuffle the received result above a try handler and yield it without
+ // re-boxing.
+ __ Bind(&l_try);
+ __ Pop(x0); // result
+ __ PushTryHandler(StackHandler::CATCH, expr->index());
+ const int handler_size = StackHandlerConstants::kSize;
+ __ Push(x0); // result
+ __ B(&l_suspend);
+
+ // TODO(jbramley): This label is bound here because the following code
+ // looks at its pos(). Is it possible to do something more efficient here,
+ // perhaps using Adr?
+ __ Bind(&l_continuation);
+ __ B(&l_resume);
+
+ __ Bind(&l_suspend);
+ const int generator_object_depth = kPointerSize + handler_size;
+ __ Peek(x0, generator_object_depth);
+ __ Push(x0); // g
+ ASSERT((l_continuation.pos() > 0) && Smi::IsValid(l_continuation.pos()));
+ __ Mov(x1, Operand(Smi::FromInt(l_continuation.pos())));
+ __ Str(x1, FieldMemOperand(x0, JSGeneratorObject::kContinuationOffset));
+ __ Str(cp, FieldMemOperand(x0, JSGeneratorObject::kContextOffset));
+ __ Mov(x1, cp);
+ __ RecordWriteField(x0, JSGeneratorObject::kContextOffset, x1, x2,
+ kLRHasBeenSaved, kDontSaveFPRegs);
+ __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
+ __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ Pop(x0); // result
+ EmitReturnSequence();
+ __ Bind(&l_resume); // received in x0
+ __ PopTryHandler();
+
+ // receiver = iter; f = 'next'; arg = received;
+ __ Bind(&l_next);
+ __ LoadRoot(x2, Heap::knext_stringRootIndex); // "next"
+ __ Peek(x3, 1 * kPointerSize); // iter
+ __ Push(x2, x3, x0); // "next", iter, received
+
+ // result = receiver[f](arg);
+ __ Bind(&l_call);
+ __ Peek(x1, 1 * kPointerSize);
+ __ Peek(x0, 2 * kPointerSize);
+ Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
+ CallIC(ic, TypeFeedbackId::None());
+ __ Mov(x1, x0);
+ __ Poke(x1, 2 * kPointerSize);
+ CallFunctionStub stub(1, CALL_AS_METHOD);
+ __ CallStub(&stub);
+
+ __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ Drop(1); // The function is still on the stack; drop it.
+
+ // if (!result.done) goto l_try;
+ __ Bind(&l_loop);
+ __ Push(x0); // save result
+ __ LoadRoot(x2, Heap::kdone_stringRootIndex); // "done"
+ CallLoadIC(NOT_CONTEXTUAL); // result.done in x0
+ // The ToBooleanStub argument (result.done) is in x0.
+ Handle<Code> bool_ic = ToBooleanStub::GetUninitialized(isolate());
+ CallIC(bool_ic);
+ __ Cbz(x0, &l_try);
+
+ // result.value
+ __ Pop(x0); // result
+ __ LoadRoot(x2, Heap::kvalue_stringRootIndex); // "value"
+ CallLoadIC(NOT_CONTEXTUAL); // result.value in x0
+ context()->DropAndPlug(2, x0); // drop iter and g
+ break;
+ }
+ }
+}
+
+
+void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
+ Expression *value,
+ JSGeneratorObject::ResumeMode resume_mode) {
+ ASM_LOCATION("FullCodeGenerator::EmitGeneratorResume");
+ Register value_reg = x0;
+ Register generator_object = x1;
+ Register the_hole = x2;
+ Register operand_stack_size = w3;
+ Register function = x4;
+
+ // The value stays in x0, and is ultimately read by the resumed generator, as
+ // if the CallRuntime(Runtime::kSuspendJSGeneratorObject) returned it. Or it
+ // is read to throw the value when the resumed generator is already closed. r1
+ // will hold the generator object until the activation has been resumed.
+ VisitForStackValue(generator);
+ VisitForAccumulatorValue(value);
+ __ Pop(generator_object);
+
+ // Check generator state.
+ Label wrong_state, closed_state, done;
+ __ Ldr(x10, FieldMemOperand(generator_object,
+ JSGeneratorObject::kContinuationOffset));
+ STATIC_ASSERT(JSGeneratorObject::kGeneratorExecuting < 0);
+ STATIC_ASSERT(JSGeneratorObject::kGeneratorClosed == 0);
+ __ CompareAndBranch(x10, Operand(Smi::FromInt(0)), eq, &closed_state);
+ __ CompareAndBranch(x10, Operand(Smi::FromInt(0)), lt, &wrong_state);
+
+ // Load suspended function and context.
+ __ Ldr(cp, FieldMemOperand(generator_object,
+ JSGeneratorObject::kContextOffset));
+ __ Ldr(function, FieldMemOperand(generator_object,
+ JSGeneratorObject::kFunctionOffset));
+
+ // Load receiver and store as the first argument.
+ __ Ldr(x10, FieldMemOperand(generator_object,
+ JSGeneratorObject::kReceiverOffset));
+ __ Push(x10);
+
+ // Push holes for the rest of the arguments to the generator function.
+ __ Ldr(x10, FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
+
+ // The number of arguments is stored as an int32_t, and -1 is a marker
+ // (SharedFunctionInfo::kDontAdaptArgumentsSentinel), so we need sign
+ // extension to correctly handle it. However, in this case, we operate on
+ // 32-bit W registers, so extension isn't required.
+ __ Ldr(w10, FieldMemOperand(x10,
+ SharedFunctionInfo::kFormalParameterCountOffset));
+ __ LoadRoot(the_hole, Heap::kTheHoleValueRootIndex);
+
+ // TODO(jbramley): Write a variant of PushMultipleTimes which takes a register
+ // instead of a constant count, and use it to replace this loop.
+ Label push_argument_holes, push_frame;
+ __ Bind(&push_argument_holes);
+ __ Subs(w10, w10, 1);
+ __ B(mi, &push_frame);
+ __ Push(the_hole);
+ __ B(&push_argument_holes);
+
+ // Enter a new JavaScript frame, and initialize its slots as they were when
+ // the generator was suspended.
+ Label resume_frame;
+ __ Bind(&push_frame);
+ __ Bl(&resume_frame);
+ __ B(&done);
+
+ __ Bind(&resume_frame);
+ __ Push(lr, // Return address.
+ fp, // Caller's frame pointer.
+ cp, // Callee's context.
+ function); // Callee's JS Function.
+ __ Add(fp, __ StackPointer(), kPointerSize * 2);
+
+ // Load and untag the operand stack size.
+ __ Ldr(x10, FieldMemOperand(generator_object,
+ JSGeneratorObject::kOperandStackOffset));
+ __ Ldr(operand_stack_size,
+ UntagSmiFieldMemOperand(x10, FixedArray::kLengthOffset));
+
+ // If we are sending a value and there is no operand stack, we can jump back
+ // in directly.
+ if (resume_mode == JSGeneratorObject::NEXT) {
+ Label slow_resume;
+ __ Cbnz(operand_stack_size, &slow_resume);
+ __ Ldr(x10, FieldMemOperand(function, JSFunction::kCodeEntryOffset));
+ __ Ldrsw(x11,
+ UntagSmiFieldMemOperand(generator_object,
+ JSGeneratorObject::kContinuationOffset));
+ __ Add(x10, x10, x11);
+ __ Mov(x12, Operand(Smi::FromInt(JSGeneratorObject::kGeneratorExecuting)));
+ __ Str(x12, FieldMemOperand(generator_object,
+ JSGeneratorObject::kContinuationOffset));
+ __ Br(x10);
+
+ __ Bind(&slow_resume);
+ }
+
+ // Otherwise, we push holes for the operand stack and call the runtime to fix
+ // up the stack and the handlers.
+ // TODO(jbramley): Write a variant of PushMultipleTimes which takes a register
+ // instead of a constant count, and use it to replace this loop.
+ Label push_operand_holes, call_resume;
+ __ Bind(&push_operand_holes);
+ __ Subs(operand_stack_size, operand_stack_size, 1);
+ __ B(mi, &call_resume);
+ __ Push(the_hole);
+ __ B(&push_operand_holes);
+
+ __ Bind(&call_resume);
+ __ Mov(x10, Operand(Smi::FromInt(resume_mode)));
+ __ Push(generator_object, result_register(), x10);
+ __ CallRuntime(Runtime::kResumeJSGeneratorObject, 3);
+ // Not reached: the runtime call returns elsewhere.
+ __ Unreachable();
+
+ // Reach here when generator is closed.
+ __ Bind(&closed_state);
+ if (resume_mode == JSGeneratorObject::NEXT) {
+ // Return completed iterator result when generator is closed.
+ __ LoadRoot(x10, Heap::kUndefinedValueRootIndex);
+ __ Push(x10);
+ // Pop value from top-of-stack slot; box result into result register.
+ EmitCreateIteratorResult(true);
+ } else {
+ // Throw the provided value.
+ __ Push(value_reg);
+ __ CallRuntime(Runtime::kThrow, 1);
+ }
+ __ B(&done);
+
+ // Throw error if we attempt to operate on a running generator.
+ __ Bind(&wrong_state);
+ __ Push(generator_object);
+ __ CallRuntime(Runtime::kThrowGeneratorStateError, 1);
+
+ __ Bind(&done);
+ context()->Plug(result_register());
+}
+
+
+void FullCodeGenerator::EmitCreateIteratorResult(bool done) {
+ Label gc_required;
+ Label allocated;
+
+ Handle<Map> map(isolate()->native_context()->generator_result_map());
+
+ // Allocate and populate an object with this form: { value: VAL, done: DONE }
+
+ Register result = x0;
+ __ Allocate(map->instance_size(), result, x10, x11, &gc_required, TAG_OBJECT);
+ __ B(&allocated);
+
+ __ Bind(&gc_required);
+ __ Push(Smi::FromInt(map->instance_size()));
+ __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
+ __ Ldr(context_register(),
+ MemOperand(fp, StandardFrameConstants::kContextOffset));
+
+ __ Bind(&allocated);
+ Register map_reg = x1;
+ Register result_value = x2;
+ Register boolean_done = x3;
+ Register empty_fixed_array = x4;
+ __ Mov(map_reg, Operand(map));
+ __ Pop(result_value);
+ __ Mov(boolean_done, Operand(isolate()->factory()->ToBoolean(done)));
+ __ Mov(empty_fixed_array, Operand(isolate()->factory()->empty_fixed_array()));
+ ASSERT_EQ(map->instance_size(), 5 * kPointerSize);
+ // TODO(jbramley): Use Stp if possible.
+ __ Str(map_reg, FieldMemOperand(result, HeapObject::kMapOffset));
+ __ Str(empty_fixed_array,
+ FieldMemOperand(result, JSObject::kPropertiesOffset));
+ __ Str(empty_fixed_array, FieldMemOperand(result, JSObject::kElementsOffset));
+ __ Str(result_value,
+ FieldMemOperand(result,
+ JSGeneratorObject::kResultValuePropertyOffset));
+ __ Str(boolean_done,
+ FieldMemOperand(result,
+ JSGeneratorObject::kResultDonePropertyOffset));
+
+ // Only the value field needs a write barrier, as the other values are in the
+ // root set.
+ __ RecordWriteField(result, JSGeneratorObject::kResultValuePropertyOffset,
+ x10, x11, kLRHasBeenSaved, kDontSaveFPRegs);
+}
+
+
+// TODO(all): I don't like this method.
+// It seems to me that in too many places x0 is used in place of this.
+// Also, this function is not suitable for all places where x0 should be
+// abstracted (eg. when used as an argument). But some places assume that the
+// first argument register is x0, and use this function instead.
+// Considering that most of the register allocation is hard-coded in the
+// FullCodeGen, that it is unlikely we will need to change it extensively, and
+// that abstracting the allocation through functions would not yield any
+// performance benefit, I think the existence of this function is debatable.
+Register FullCodeGenerator::result_register() {
+ return x0;
+}
+
+
+Register FullCodeGenerator::context_register() {
+ return cp;
+}
+
+
+void FullCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
+ ASSERT(POINTER_SIZE_ALIGN(frame_offset) == frame_offset);
+ __ Str(value, MemOperand(fp, frame_offset));
+}
+
+
+void FullCodeGenerator::LoadContextField(Register dst, int context_index) {
+ __ Ldr(dst, ContextMemOperand(cp, context_index));
+}
+
+
+void FullCodeGenerator::PushFunctionArgumentForContextAllocation() {
+ Scope* declaration_scope = scope()->DeclarationScope();
+ if (declaration_scope->is_global_scope() ||
+ declaration_scope->is_module_scope()) {
+ // Contexts nested in the native context have a canonical empty function
+ // as their closure, not the anonymous closure containing the global
+ // code. Pass a smi sentinel and let the runtime look up the empty
+ // function.
+ ASSERT(kSmiTag == 0);
+ __ Push(xzr);
+ } else if (declaration_scope->is_eval_scope()) {
+ // Contexts created by a call to eval have the same closure as the
+ // context calling eval, not the anonymous closure containing the eval
+ // code. Fetch it from the context.
+ __ Ldr(x10, ContextMemOperand(cp, Context::CLOSURE_INDEX));
+ __ Push(x10);
+ } else {
+ ASSERT(declaration_scope->is_function_scope());
+ __ Ldr(x10, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ __ Push(x10);
+ }
+}
+
+
+void FullCodeGenerator::EnterFinallyBlock() {
+ ASM_LOCATION("FullCodeGenerator::EnterFinallyBlock");
+ ASSERT(!result_register().is(x10));
+ // Preserve the result register while executing finally block.
+ // Also cook the return address in lr to the stack (smi encoded Code* delta).
+ __ Sub(x10, lr, Operand(masm_->CodeObject()));
+ __ SmiTag(x10);
+ __ Push(result_register(), x10);
+
+ // Store pending message while executing finally block.
+ ExternalReference pending_message_obj =
+ ExternalReference::address_of_pending_message_obj(isolate());
+ __ Mov(x10, Operand(pending_message_obj));
+ __ Ldr(x10, MemOperand(x10));
+
+ ExternalReference has_pending_message =
+ ExternalReference::address_of_has_pending_message(isolate());
+ __ Mov(x11, Operand(has_pending_message));
+ __ Ldr(x11, MemOperand(x11));
+ __ SmiTag(x11);
+
+ __ Push(x10, x11);
+
+ ExternalReference pending_message_script =
+ ExternalReference::address_of_pending_message_script(isolate());
+ __ Mov(x10, Operand(pending_message_script));
+ __ Ldr(x10, MemOperand(x10));
+ __ Push(x10);
+}
+
+
+void FullCodeGenerator::ExitFinallyBlock() {
+ ASM_LOCATION("FullCodeGenerator::ExitFinallyBlock");
+ ASSERT(!result_register().is(x10));
+
+ // Restore pending message from stack.
+ __ Pop(x10, x11, x12);
+ ExternalReference pending_message_script =
+ ExternalReference::address_of_pending_message_script(isolate());
+ __ Mov(x13, Operand(pending_message_script));
+ __ Str(x10, MemOperand(x13));
+
+ __ SmiUntag(x11);
+ ExternalReference has_pending_message =
+ ExternalReference::address_of_has_pending_message(isolate());
+ __ Mov(x13, Operand(has_pending_message));
+ __ Str(x11, MemOperand(x13));
+
+ ExternalReference pending_message_obj =
+ ExternalReference::address_of_pending_message_obj(isolate());
+ __ Mov(x13, Operand(pending_message_obj));
+ __ Str(x12, MemOperand(x13));
+
+ // Restore result register and cooked return address from the stack.
+ __ Pop(x10, result_register());
+
+ // Uncook the return address (see EnterFinallyBlock).
+ __ SmiUntag(x10);
+ __ Add(x11, x10, Operand(masm_->CodeObject()));
+ __ Br(x11);
+}
+
+
+#undef __
+
+
+void BackEdgeTable::PatchAt(Code* unoptimized_code,
+ Address pc,
+ BackEdgeState target_state,
+ Code* replacement_code) {
+ // Turn the jump into a nop.
+ Address branch_address = pc - 3 * kInstructionSize;
+ PatchingAssembler patcher(branch_address, 1);
+
+ switch (target_state) {
+ case INTERRUPT:
+ // <decrement profiling counter>
+ // .. .. .. .. b.pl ok
+ // .. .. .. .. ldr x16, pc+<interrupt stub address>
+ // .. .. .. .. blr x16
+ // ... more instructions.
+ // ok-label
+ // Jump offset is 6 instructions.
+ ASSERT(Instruction::Cast(branch_address)
+ ->IsNop(Assembler::INTERRUPT_CODE_NOP));
+ patcher.b(6, pl);
+ break;
+ case ON_STACK_REPLACEMENT:
+ case OSR_AFTER_STACK_CHECK:
+ // <decrement profiling counter>
+ // .. .. .. .. mov x0, x0 (NOP)
+ // .. .. .. .. ldr x16, pc+<on-stack replacement address>
+ // .. .. .. .. blr x16
+ ASSERT(Instruction::Cast(branch_address)->IsCondBranchImm());
+ ASSERT(Instruction::Cast(branch_address)->ImmPCOffset() ==
+ 6 * kInstructionSize);
+ patcher.nop(Assembler::INTERRUPT_CODE_NOP);
+ break;
+ }
+
+ // Replace the call address.
+ Instruction* load = Instruction::Cast(pc)->preceding(2);
+ Address interrupt_address_pointer =
+ reinterpret_cast<Address>(load) + load->ImmPCOffset();
+ ASSERT((Memory::uint64_at(interrupt_address_pointer) ==
+ reinterpret_cast<uint64_t>(unoptimized_code->GetIsolate()
+ ->builtins()
+ ->OnStackReplacement()
+ ->entry())) ||
+ (Memory::uint64_at(interrupt_address_pointer) ==
+ reinterpret_cast<uint64_t>(unoptimized_code->GetIsolate()
+ ->builtins()
+ ->InterruptCheck()
+ ->entry())) ||
+ (Memory::uint64_at(interrupt_address_pointer) ==
+ reinterpret_cast<uint64_t>(unoptimized_code->GetIsolate()
+ ->builtins()
+ ->OsrAfterStackCheck()
+ ->entry())) ||
+ (Memory::uint64_at(interrupt_address_pointer) ==
+ reinterpret_cast<uint64_t>(unoptimized_code->GetIsolate()
+ ->builtins()
+ ->OnStackReplacement()
+ ->entry())));
+ Memory::uint64_at(interrupt_address_pointer) =
+ reinterpret_cast<uint64_t>(replacement_code->entry());
+
+ unoptimized_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
+ unoptimized_code, reinterpret_cast<Address>(load), replacement_code);
+}
+
+
+BackEdgeTable::BackEdgeState BackEdgeTable::GetBackEdgeState(
+ Isolate* isolate,
+ Code* unoptimized_code,
+ Address pc) {
+ // TODO(jbramley): There should be some extra assertions here (as in the ARM
+ // back-end), but this function is gone in bleeding_edge so it might not
+ // matter anyway.
+ Instruction* jump_or_nop = Instruction::Cast(pc)->preceding(3);
+
+ if (jump_or_nop->IsNop(Assembler::INTERRUPT_CODE_NOP)) {
+ Instruction* load = Instruction::Cast(pc)->preceding(2);
+ uint64_t entry = Memory::uint64_at(reinterpret_cast<Address>(load) +
+ load->ImmPCOffset());
+ if (entry == reinterpret_cast<uint64_t>(
+ isolate->builtins()->OnStackReplacement()->entry())) {
+ return ON_STACK_REPLACEMENT;
+ } else if (entry == reinterpret_cast<uint64_t>(
+ isolate->builtins()->OsrAfterStackCheck()->entry())) {
+ return OSR_AFTER_STACK_CHECK;
+ } else {
+ UNREACHABLE();
+ }
+ }
+
+ return INTERRUPT;
+}
+
+
+#define __ ACCESS_MASM(masm())
+
+
+FullCodeGenerator::NestedStatement* FullCodeGenerator::TryFinally::Exit(
+ int* stack_depth,
+ int* context_length) {
+ ASM_LOCATION("FullCodeGenerator::TryFinally::Exit");
+ // The macros used here must preserve the result register.
+
+ // Because the handler block contains the context of the finally
+ // code, we can restore it directly from there for the finally code
+ // rather than iteratively unwinding contexts via their previous
+ // links.
+ __ Drop(*stack_depth); // Down to the handler block.
+ if (*context_length > 0) {
+ // Restore the context to its dedicated register and the stack.
+ __ Peek(cp, StackHandlerConstants::kContextOffset);
+ __ Str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ }
+ __ PopTryHandler();
+ __ Bl(finally_entry_);
+
+ *stack_depth = 0;
+ *context_length = 0;
+ return previous_;
+}
+
+
+#undef __
+
+
+} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_A64
--- /dev/null
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#if V8_TARGET_ARCH_A64
+
+#include "a64/assembler-a64.h"
+#include "code-stubs.h"
+#include "codegen.h"
+#include "disasm.h"
+#include "ic-inl.h"
+#include "runtime.h"
+#include "stub-cache.h"
+
+namespace v8 {
+namespace internal {
+
+
+#define __ ACCESS_MASM(masm)
+
+
+// "type" holds an instance type on entry and is not clobbered.
+// Generated code branch on "global_object" if type is any kind of global
+// JS object.
+static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm,
+ Register type,
+ Label* global_object) {
+ __ Cmp(type, JS_GLOBAL_OBJECT_TYPE);
+ __ Ccmp(type, JS_BUILTINS_OBJECT_TYPE, ZFlag, ne);
+ __ Ccmp(type, JS_GLOBAL_PROXY_TYPE, ZFlag, ne);
+ __ B(eq, global_object);
+}
+
+
+// Generated code falls through if the receiver is a regular non-global
+// JS object with slow properties and no interceptors.
+//
+// "receiver" holds the receiver on entry and is unchanged.
+// "elements" holds the property dictionary on fall through.
+static void GenerateNameDictionaryReceiverCheck(MacroAssembler* masm,
+ Register receiver,
+ Register elements,
+ Register scratch0,
+ Register scratch1,
+ Label* miss) {
+ ASSERT(!AreAliased(receiver, elements, scratch0, scratch1));
+
+ // Check that the receiver isn't a smi.
+ __ JumpIfSmi(receiver, miss);
+
+ // Check that the receiver is a valid JS object.
+ // Let t be the object instance type, we want:
+ // FIRST_SPEC_OBJECT_TYPE <= t <= LAST_SPEC_OBJECT_TYPE.
+ // Since LAST_SPEC_OBJECT_TYPE is the last possible instance type we only
+ // check the lower bound.
+ STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE);
+
+ __ JumpIfObjectType(receiver, scratch0, scratch1, FIRST_SPEC_OBJECT_TYPE,
+ miss, lt);
+
+ // scratch0 now contains the map of the receiver and scratch1 the object type.
+ Register map = scratch0;
+ Register type = scratch1;
+
+ // Check if the receiver is a global JS object.
+ GenerateGlobalInstanceTypeCheck(masm, type, miss);
+
+ // Check that the object does not require access checks.
+ __ Ldrb(scratch1, FieldMemOperand(map, Map::kBitFieldOffset));
+ __ Tbnz(scratch1, Map::kIsAccessCheckNeeded, miss);
+ __ Tbnz(scratch1, Map::kHasNamedInterceptor, miss);
+
+ // Check that the properties dictionary is valid.
+ __ Ldr(elements, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+ __ Ldr(scratch1, FieldMemOperand(elements, HeapObject::kMapOffset));
+ __ JumpIfNotRoot(scratch1, Heap::kHashTableMapRootIndex, miss);
+}
+
+
+// Helper function used from LoadIC GenerateNormal.
+//
+// elements: Property dictionary. It is not clobbered if a jump to the miss
+// label is done.
+// name: Property name. It is not clobbered if a jump to the miss label is
+// done
+// result: Register for the result. It is only updated if a jump to the miss
+// label is not done.
+// The scratch registers need to be different from elements, name and result.
+// The generated code assumes that the receiver has slow properties,
+// is not a global object and does not have interceptors.
+static void GenerateDictionaryLoad(MacroAssembler* masm,
+ Label* miss,
+ Register elements,
+ Register name,
+ Register result,
+ Register scratch1,
+ Register scratch2) {
+ ASSERT(!AreAliased(elements, name, scratch1, scratch2));
+ ASSERT(!AreAliased(result, scratch1, scratch2));
+
+ Label done;
+
+ // Probe the dictionary.
+ NameDictionaryLookupStub::GeneratePositiveLookup(masm,
+ miss,
+ &done,
+ elements,
+ name,
+ scratch1,
+ scratch2);
+
+ // If probing finds an entry check that the value is a normal property.
+ __ Bind(&done);
+
+ static const int kElementsStartOffset = NameDictionary::kHeaderSize +
+ NameDictionary::kElementsStartIndex * kPointerSize;
+ static const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
+ __ Ldr(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
+ __ Tst(scratch1, Operand(Smi::FromInt(PropertyDetails::TypeField::kMask)));
+ __ B(ne, miss);
+
+ // Get the value at the masked, scaled index and return.
+ __ Ldr(result,
+ FieldMemOperand(scratch2, kElementsStartOffset + 1 * kPointerSize));
+}
+
+
+// Helper function used from StoreIC::GenerateNormal.
+//
+// elements: Property dictionary. It is not clobbered if a jump to the miss
+// label is done.
+// name: Property name. It is not clobbered if a jump to the miss label is
+// done
+// value: The value to store (never clobbered).
+//
+// The generated code assumes that the receiver has slow properties,
+// is not a global object and does not have interceptors.
+static void GenerateDictionaryStore(MacroAssembler* masm,
+ Label* miss,
+ Register elements,
+ Register name,
+ Register value,
+ Register scratch1,
+ Register scratch2) {
+ ASSERT(!AreAliased(elements, name, value, scratch1, scratch2));
+
+ Label done;
+
+ // Probe the dictionary.
+ NameDictionaryLookupStub::GeneratePositiveLookup(masm,
+ miss,
+ &done,
+ elements,
+ name,
+ scratch1,
+ scratch2);
+
+ // If probing finds an entry in the dictionary check that the value
+ // is a normal property that is not read only.
+ __ Bind(&done);
+
+ static const int kElementsStartOffset = NameDictionary::kHeaderSize +
+ NameDictionary::kElementsStartIndex * kPointerSize;
+ static const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
+ static const int kTypeAndReadOnlyMask =
+ PropertyDetails::TypeField::kMask |
+ PropertyDetails::AttributesField::encode(READ_ONLY);
+ __ Ldrsw(scratch1, UntagSmiFieldMemOperand(scratch2, kDetailsOffset));
+ __ Tst(scratch1, kTypeAndReadOnlyMask);
+ __ B(ne, miss);
+
+ // Store the value at the masked, scaled index and return.
+ static const int kValueOffset = kElementsStartOffset + kPointerSize;
+ __ Add(scratch2, scratch2, kValueOffset - kHeapObjectTag);
+ __ Str(value, MemOperand(scratch2));
+
+ // Update the write barrier. Make sure not to clobber the value.
+ __ Mov(scratch1, value);
+ __ RecordWrite(
+ elements, scratch2, scratch1, kLRHasNotBeenSaved, kDontSaveFPRegs);
+}
+
+
+// Checks the receiver for special cases (value type, slow case bits).
+// Falls through for regular JS object and return the map of the
+// receiver in 'map_scratch' if the receiver is not a SMI.
+static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
+ Register receiver,
+ Register map_scratch,
+ Register scratch,
+ int interceptor_bit,
+ Label* slow) {
+ ASSERT(!AreAliased(map_scratch, scratch));
+
+ // Check that the object isn't a smi.
+ __ JumpIfSmi(receiver, slow);
+ // Get the map of the receiver.
+ __ Ldr(map_scratch, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ // Check bit field.
+ __ Ldrb(scratch, FieldMemOperand(map_scratch, Map::kBitFieldOffset));
+ __ Tbnz(scratch, Map::kIsAccessCheckNeeded, slow);
+ __ Tbnz(scratch, interceptor_bit, slow);
+
+ // Check that the object is some kind of JS object EXCEPT JS Value type.
+ // In the case that the object is a value-wrapper object, we enter the
+ // runtime system to make sure that indexing into string objects work
+ // as intended.
+ STATIC_ASSERT(JS_OBJECT_TYPE > JS_VALUE_TYPE);
+ __ Ldrb(scratch, FieldMemOperand(map_scratch, Map::kInstanceTypeOffset));
+ __ Cmp(scratch, JS_OBJECT_TYPE);
+ __ B(lt, slow);
+}
+
+
+// Loads an indexed element from a fast case array.
+// If not_fast_array is NULL, doesn't perform the elements map check.
+//
+// receiver - holds the receiver on entry.
+// Unchanged unless 'result' is the same register.
+//
+// key - holds the smi key on entry.
+// Unchanged unless 'result' is the same register.
+//
+// elements - holds the elements of the receiver on exit.
+//
+// elements_map - holds the elements map on exit if the not_fast_array branch is
+// taken. Otherwise, this is used as a scratch register.
+//
+// result - holds the result on exit if the load succeeded.
+// Allowed to be the the same as 'receiver' or 'key'.
+// Unchanged on bailout so 'receiver' and 'key' can be safely
+// used by further computation.
+static void GenerateFastArrayLoad(MacroAssembler* masm,
+ Register receiver,
+ Register key,
+ Register elements,
+ Register elements_map,
+ Register scratch2,
+ Register result,
+ Label* not_fast_array,
+ Label* slow) {
+ ASSERT(!AreAliased(receiver, key, elements, elements_map, scratch2));
+
+ // Check for fast array.
+ __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+ if (not_fast_array != NULL) {
+ // Check that the object is in fast mode and writable.
+ __ Ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
+ __ JumpIfNotRoot(elements_map, Heap::kFixedArrayMapRootIndex,
+ not_fast_array);
+ } else {
+ __ AssertFastElements(elements);
+ }
+
+ // The elements_map register is only used for the not_fast_array path, which
+ // was handled above. From this point onward it is a scratch register.
+ Register scratch1 = elements_map;
+
+ // Check that the key (index) is within bounds.
+ __ Ldr(scratch1, FieldMemOperand(elements, FixedArray::kLengthOffset));
+ __ Cmp(key, scratch1);
+ __ B(hs, slow);
+
+ // Fast case: Do the load.
+ __ Add(scratch1, elements, FixedArray::kHeaderSize - kHeapObjectTag);
+ __ SmiUntag(scratch2, key);
+ __ Ldr(scratch2, MemOperand(scratch1, scratch2, LSL, kPointerSizeLog2));
+
+ // In case the loaded value is the_hole we have to consult GetProperty
+ // to ensure the prototype chain is searched.
+ __ JumpIfRoot(scratch2, Heap::kTheHoleValueRootIndex, slow);
+
+ // Move the value to the result register.
+ // 'result' can alias with 'receiver' or 'key' but these two must be
+ // preserved if we jump to 'slow'.
+ __ Mov(result, scratch2);
+}
+
+
+// Checks whether a key is an array index string or a unique name.
+// Falls through if a key is a unique name.
+// The map of the key is returned in 'map_scratch'.
+// If the jump to 'index_string' is done the hash of the key is left
+// in 'hash_scratch'.
+static void GenerateKeyNameCheck(MacroAssembler* masm,
+ Register key,
+ Register map_scratch,
+ Register hash_scratch,
+ Label* index_string,
+ Label* not_unique) {
+ ASSERT(!AreAliased(key, map_scratch, hash_scratch));
+
+ // Is the key a name?
+ Label unique;
+ __ JumpIfObjectType(key, map_scratch, hash_scratch, LAST_UNIQUE_NAME_TYPE,
+ not_unique, hi);
+ STATIC_ASSERT(LAST_UNIQUE_NAME_TYPE == FIRST_NONSTRING_TYPE);
+ __ B(eq, &unique);
+
+ // Is the string an array index with cached numeric value?
+ __ Ldr(hash_scratch.W(), FieldMemOperand(key, Name::kHashFieldOffset));
+ __ TestAndBranchIfAllClear(hash_scratch,
+ Name::kContainsCachedArrayIndexMask,
+ index_string);
+
+ // Is the string internalized? We know it's a string, so a single bit test is
+ // enough.
+ __ Ldrb(hash_scratch, FieldMemOperand(map_scratch, Map::kInstanceTypeOffset));
+ STATIC_ASSERT(kInternalizedTag == 0);
+ __ TestAndBranchIfAnySet(hash_scratch, kIsNotInternalizedMask, not_unique);
+
+ __ Bind(&unique);
+ // Fall through if the key is a unique name.
+}
+
+
+// Neither 'object' nor 'key' are modified by this function.
+//
+// If the 'unmapped_case' or 'slow_case' exit is taken, the 'map' register is
+// left with the object's elements map. Otherwise, it is used as a scratch
+// register.
+static MemOperand GenerateMappedArgumentsLookup(MacroAssembler* masm,
+ Register object,
+ Register key,
+ Register map,
+ Register scratch1,
+ Register scratch2,
+ Label* unmapped_case,
+ Label* slow_case) {
+ ASSERT(!AreAliased(object, key, map, scratch1, scratch2));
+
+ Heap* heap = masm->isolate()->heap();
+
+ // Check that the receiver is a JSObject. Because of the elements
+ // map check later, we do not need to check for interceptors or
+ // whether it requires access checks.
+ __ JumpIfSmi(object, slow_case);
+ // Check that the object is some kind of JSObject.
+ __ JumpIfObjectType(object, map, scratch1, FIRST_JS_RECEIVER_TYPE,
+ slow_case, lt);
+
+ // Check that the key is a positive smi.
+ __ JumpIfNotSmi(key, slow_case);
+ __ Tbnz(key, kXSignBit, slow_case);
+
+ // Load the elements object and check its map.
+ Handle<Map> arguments_map(heap->non_strict_arguments_elements_map());
+ __ Ldr(map, FieldMemOperand(object, JSObject::kElementsOffset));
+ __ CheckMap(map, scratch1, arguments_map, slow_case, DONT_DO_SMI_CHECK);
+
+ // Check if element is in the range of mapped arguments. If not, jump
+ // to the unmapped lookup.
+ __ Ldr(scratch1, FieldMemOperand(map, FixedArray::kLengthOffset));
+ __ Sub(scratch1, scratch1, Operand(Smi::FromInt(2)));
+ __ Cmp(key, scratch1);
+ __ B(hs, unmapped_case);
+
+ // Load element index and check whether it is the hole.
+ static const int offset =
+ FixedArray::kHeaderSize + 2 * kPointerSize - kHeapObjectTag;
+
+ __ Add(scratch1, map, offset);
+ __ SmiUntag(scratch2, key);
+ __ Ldr(scratch1, MemOperand(scratch1, scratch2, LSL, kPointerSizeLog2));
+ __ JumpIfRoot(scratch1, Heap::kTheHoleValueRootIndex, unmapped_case);
+
+ // Load value from context and return it.
+ __ Ldr(scratch2, FieldMemOperand(map, FixedArray::kHeaderSize));
+ __ SmiUntag(scratch1);
+ __ Add(scratch2, scratch2, Context::kHeaderSize - kHeapObjectTag);
+ return MemOperand(scratch2, scratch1, LSL, kPointerSizeLog2);
+}
+
+
+// The 'parameter_map' register must be loaded with the parameter map of the
+// arguments object and is overwritten.
+static MemOperand GenerateUnmappedArgumentsLookup(MacroAssembler* masm,
+ Register key,
+ Register parameter_map,
+ Register scratch,
+ Label* slow_case) {
+ ASSERT(!AreAliased(key, parameter_map, scratch));
+
+ // Element is in arguments backing store, which is referenced by the
+ // second element of the parameter_map.
+ const int kBackingStoreOffset = FixedArray::kHeaderSize + kPointerSize;
+ Register backing_store = parameter_map;
+ __ Ldr(backing_store, FieldMemOperand(parameter_map, kBackingStoreOffset));
+ Handle<Map> fixed_array_map(masm->isolate()->heap()->fixed_array_map());
+ __ CheckMap(
+ backing_store, scratch, fixed_array_map, slow_case, DONT_DO_SMI_CHECK);
+ __ Ldr(scratch, FieldMemOperand(backing_store, FixedArray::kLengthOffset));
+ __ Cmp(key, scratch);
+ __ B(hs, slow_case);
+
+ __ Add(backing_store,
+ backing_store,
+ FixedArray::kHeaderSize - kHeapObjectTag);
+ __ SmiUntag(scratch, key);
+ return MemOperand(backing_store, scratch, LSL, kPointerSizeLog2);
+}
+
+
+void LoadIC::GenerateMegamorphic(MacroAssembler* masm,
+ ExtraICState extra_state) {
+ // ----------- S t a t e -------------
+ // -- x2 : name
+ // -- lr : return address
+ // -- x0 : receiver
+ // -----------------------------------
+
+ // Probe the stub cache.
+ Code::Flags flags = Code::ComputeFlags(
+ Code::HANDLER, MONOMORPHIC, extra_state,
+ Code::NORMAL, Code::LOAD_IC);
+ masm->isolate()->stub_cache()->GenerateProbe(
+ masm, flags, x0, x2, x3, x4, x5, x6);
+
+ // Cache miss: Jump to runtime.
+ GenerateMiss(masm);
+}
+
+
+void LoadIC::GenerateNormal(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- x2 : name
+ // -- lr : return address
+ // -- x0 : receiver
+ // -----------------------------------
+ Label miss;
+
+ GenerateNameDictionaryReceiverCheck(masm, x0, x1, x3, x4, &miss);
+
+ // x1 now holds the property dictionary.
+ GenerateDictionaryLoad(masm, &miss, x1, x2, x0, x3, x4);
+ __ Ret();
+
+ // Cache miss: Jump to runtime.
+ __ Bind(&miss);
+ GenerateMiss(masm);
+}
+
+
+void LoadIC::GenerateMiss(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- x2 : name
+ // -- lr : return address
+ // -- x0 : receiver
+ // -----------------------------------
+ Isolate* isolate = masm->isolate();
+ ASM_LOCATION("LoadIC::GenerateMiss");
+
+ __ IncrementCounter(isolate->counters()->load_miss(), 1, x3, x4);
+
+ // TODO(jbramley): Does the target actually expect an argument in x3, or is
+ // this inherited from ARM's push semantics?
+ __ Mov(x3, x0);
+ __ Push(x3, x2);
+
+ // Perform tail call to the entry.
+ ExternalReference ref =
+ ExternalReference(IC_Utility(kLoadIC_Miss), isolate);
+ __ TailCallExternalReference(ref, 2, 1);
+}
+
+
+void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
+ // ---------- S t a t e --------------
+ // -- x2 : name
+ // -- lr : return address
+ // -- x0 : receiver
+ // -----------------------------------
+
+ // TODO(jbramley): Does the target actually expect an argument in x3, or is
+ // this inherited from ARM's push semantics?
+ __ Mov(x3, x0);
+ __ Push(x3, x2);
+
+ __ TailCallRuntime(Runtime::kGetProperty, 2, 1);
+}
+
+
+void KeyedLoadIC::GenerateNonStrictArguments(MacroAssembler* masm) {
+ // ---------- S t a t e --------------
+ // -- lr : return address
+ // -- x0 : key
+ // -- x1 : receiver
+ // -----------------------------------
+ Register result = x0;
+ Register key = x0;
+ Register receiver = x1;
+ Label miss, unmapped;
+
+ Register map_scratch = x2;
+ MemOperand mapped_location = GenerateMappedArgumentsLookup(
+ masm, receiver, key, map_scratch, x3, x4, &unmapped, &miss);
+ __ Ldr(result, mapped_location);
+ __ Ret();
+
+ __ Bind(&unmapped);
+ // Parameter map is left in map_scratch when a jump on unmapped is done.
+ MemOperand unmapped_location =
+ GenerateUnmappedArgumentsLookup(masm, key, map_scratch, x3, &miss);
+ __ Ldr(x2, unmapped_location);
+ __ JumpIfRoot(x2, Heap::kTheHoleValueRootIndex, &miss);
+ // Move the result in x0. x0 must be preserved on miss.
+ __ Mov(result, x2);
+ __ Ret();
+
+ __ Bind(&miss);
+ GenerateMiss(masm);
+}
+
+
+void KeyedStoreIC::GenerateNonStrictArguments(MacroAssembler* masm) {
+ ASM_LOCATION("KeyedStoreIC::GenerateNonStrictArguments");
+ // ---------- S t a t e --------------
+ // -- lr : return address
+ // -- x0 : value
+ // -- x1 : key
+ // -- x2 : receiver
+ // -----------------------------------
+
+ Label slow, notin;
+
+ Register value = x0;
+ Register key = x1;
+ Register receiver = x2;
+ Register map = x3;
+
+ // These registers are used by GenerateMappedArgumentsLookup to build a
+ // MemOperand. They are live for as long as the MemOperand is live.
+ Register mapped1 = x4;
+ Register mapped2 = x5;
+
+ MemOperand mapped =
+ GenerateMappedArgumentsLookup(masm, receiver, key, map,
+ mapped1, mapped2,
+ ¬in, &slow);
+ Operand mapped_offset = mapped.OffsetAsOperand();
+ __ Str(value, mapped);
+ __ Add(x10, mapped.base(), mapped_offset);
+ __ Mov(x11, value);
+ __ RecordWrite(mapped.base(), x10, x11, kLRHasNotBeenSaved, kDontSaveFPRegs);
+ __ Ret();
+
+ __ Bind(¬in);
+
+ // These registers are used by GenerateMappedArgumentsLookup to build a
+ // MemOperand. They are live for as long as the MemOperand is live.
+ Register unmapped1 = map; // This is assumed to alias 'map'.
+ Register unmapped2 = x4;
+ MemOperand unmapped =
+ GenerateUnmappedArgumentsLookup(masm, key, unmapped1, unmapped2, &slow);
+ Operand unmapped_offset = unmapped.OffsetAsOperand();
+ __ Str(value, unmapped);
+ __ Add(x10, unmapped.base(), unmapped_offset);
+ __ Mov(x11, value);
+ __ RecordWrite(unmapped.base(), x10, x11,
+ kLRHasNotBeenSaved, kDontSaveFPRegs);
+ __ Ret();
+ __ Bind(&slow);
+ GenerateMiss(masm);
+}
+
+
+void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
+ // ---------- S t a t e --------------
+ // -- lr : return address
+ // -- x0 : key
+ // -- x1 : receiver
+ // -----------------------------------
+ Isolate* isolate = masm->isolate();
+
+ __ IncrementCounter(isolate->counters()->keyed_load_miss(), 1, x10, x11);
+
+ __ Push(x1, x0);
+
+ // Perform tail call to the entry.
+ ExternalReference ref =
+ ExternalReference(IC_Utility(kKeyedLoadIC_Miss), isolate);
+
+ __ TailCallExternalReference(ref, 2, 1);
+}
+
+
+void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
+ // ---------- S t a t e --------------
+ // -- lr : return address
+ // -- x0 : key
+ // -- x1 : receiver
+ // -----------------------------------
+ Register key = x0;
+ Register receiver = x1;
+
+ __ Push(receiver, key);
+ __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
+}
+
+
+static void GenerateKeyedLoadWithSmiKey(MacroAssembler* masm,
+ Register key,
+ Register receiver,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Register scratch4,
+ Register scratch5,
+ Label *slow) {
+ ASSERT(!AreAliased(
+ key, receiver, scratch1, scratch2, scratch3, scratch4, scratch5));
+
+ Isolate* isolate = masm->isolate();
+ Label check_number_dictionary;
+ // If we can load the value, it should be returned in x0.
+ Register result = x0;
+
+ GenerateKeyedLoadReceiverCheck(
+ masm, receiver, scratch1, scratch2, Map::kHasIndexedInterceptor, slow);
+
+ // Check the receiver's map to see if it has fast elements.
+ __ CheckFastElements(scratch1, scratch2, &check_number_dictionary);
+
+ GenerateFastArrayLoad(
+ masm, receiver, key, scratch3, scratch2, scratch1, result, NULL, slow);
+ __ IncrementCounter(
+ isolate->counters()->keyed_load_generic_smi(), 1, scratch1, scratch2);
+ __ Ret();
+
+ __ Bind(&check_number_dictionary);
+ __ Ldr(scratch3, FieldMemOperand(receiver, JSObject::kElementsOffset));
+ __ Ldr(scratch2, FieldMemOperand(scratch3, JSObject::kMapOffset));
+
+ // Check whether we have a number dictionary.
+ __ JumpIfNotRoot(scratch2, Heap::kHashTableMapRootIndex, slow);
+
+ __ LoadFromNumberDictionary(
+ slow, scratch3, key, result, scratch1, scratch2, scratch4, scratch5);
+ __ Ret();
+}
+
+static void GenerateKeyedLoadWithNameKey(MacroAssembler* masm,
+ Register key,
+ Register receiver,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Register scratch4,
+ Register scratch5,
+ Label *slow) {
+ ASSERT(!AreAliased(
+ key, receiver, scratch1, scratch2, scratch3, scratch4, scratch5));
+
+ Isolate* isolate = masm->isolate();
+ Label probe_dictionary, property_array_property;
+ // If we can load the value, it should be returned in x0.
+ Register result = x0;
+
+ GenerateKeyedLoadReceiverCheck(
+ masm, receiver, scratch1, scratch2, Map::kHasNamedInterceptor, slow);
+
+ // If the receiver is a fast-case object, check the keyed lookup cache.
+ // Otherwise probe the dictionary.
+ __ Ldr(scratch2, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+ __ Ldr(scratch3, FieldMemOperand(scratch2, HeapObject::kMapOffset));
+ __ JumpIfRoot(scratch3, Heap::kHashTableMapRootIndex, &probe_dictionary);
+
+ // We keep the map of the receiver in scratch1.
+ Register receiver_map = scratch1;
+
+ // Load the map of the receiver, compute the keyed lookup cache hash
+ // based on 32 bits of the map pointer and the name hash.
+ __ Ldr(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ Mov(scratch2, Operand(receiver_map, ASR, KeyedLookupCache::kMapHashShift));
+ __ Ldr(scratch3.W(), FieldMemOperand(key, Name::kHashFieldOffset));
+ __ Eor(scratch2, scratch2, Operand(scratch3, ASR, Name::kHashShift));
+ int mask = KeyedLookupCache::kCapacityMask & KeyedLookupCache::kHashMask;
+ __ And(scratch2, scratch2, mask);
+
+ // Load the key (consisting of map and unique name) from the cache and
+ // check for match.
+ Label load_in_object_property;
+ static const int kEntriesPerBucket = KeyedLookupCache::kEntriesPerBucket;
+ Label hit_on_nth_entry[kEntriesPerBucket];
+ ExternalReference cache_keys =
+ ExternalReference::keyed_lookup_cache_keys(isolate);
+
+ __ Mov(scratch3, Operand(cache_keys));
+ __ Add(scratch3, scratch3, Operand(scratch2, LSL, kPointerSizeLog2 + 1));
+
+ for (int i = 0; i < kEntriesPerBucket - 1; i++) {
+ Label try_next_entry;
+ // Load map and make scratch3 pointing to the next entry.
+ __ Ldr(scratch4, MemOperand(scratch3, kPointerSize * 2, PostIndex));
+ __ Cmp(receiver_map, scratch4);
+ __ B(ne, &try_next_entry);
+ __ Ldr(scratch4, MemOperand(scratch3, -kPointerSize)); // Load name
+ __ Cmp(key, scratch4);
+ __ B(eq, &hit_on_nth_entry[i]);
+ __ Bind(&try_next_entry);
+ }
+
+ // Last entry.
+ __ Ldr(scratch4, MemOperand(scratch3, kPointerSize, PostIndex));
+ __ Cmp(receiver_map, scratch4);
+ __ B(ne, slow);
+ __ Ldr(scratch4, MemOperand(scratch3));
+ __ Cmp(key, scratch4);
+ __ B(ne, slow);
+
+ // Get field offset.
+ ExternalReference cache_field_offsets =
+ ExternalReference::keyed_lookup_cache_field_offsets(isolate);
+
+ // Hit on nth entry.
+ for (int i = kEntriesPerBucket - 1; i >= 0; i--) {
+ __ Bind(&hit_on_nth_entry[i]);
+ __ Mov(scratch3, Operand(cache_field_offsets));
+ if (i != 0) {
+ __ Add(scratch2, scratch2, i);
+ }
+ __ Ldr(scratch4.W(), MemOperand(scratch3, scratch2, LSL, 2));
+ __ Ldrb(scratch5,
+ FieldMemOperand(receiver_map, Map::kInObjectPropertiesOffset));
+ __ Subs(scratch4, scratch4, scratch5);
+ __ B(ge, &property_array_property);
+ if (i != 0) {
+ __ B(&load_in_object_property);
+ }
+ }
+
+ // Load in-object property.
+ __ Bind(&load_in_object_property);
+ __ Ldrb(scratch5, FieldMemOperand(receiver_map, Map::kInstanceSizeOffset));
+ __ Add(scratch5, scratch5, scratch4); // Index from start of object.
+ __ Sub(receiver, receiver, kHeapObjectTag); // Remove the heap tag.
+ __ Ldr(result, MemOperand(receiver, scratch5, LSL, kPointerSizeLog2));
+ __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(),
+ 1, scratch1, scratch2);
+ __ Ret();
+
+ // Load property array property.
+ __ Bind(&property_array_property);
+ __ Ldr(scratch1, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+ __ Add(scratch1, scratch1, FixedArray::kHeaderSize - kHeapObjectTag);
+ __ Ldr(result, MemOperand(scratch1, scratch4, LSL, kPointerSizeLog2));
+ __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(),
+ 1, scratch1, scratch2);
+ __ Ret();
+
+ // Do a quick inline probe of the receiver's dictionary, if it exists.
+ __ Bind(&probe_dictionary);
+ __ Ldr(scratch1, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ Ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
+ GenerateGlobalInstanceTypeCheck(masm, scratch1, slow);
+ // Load the property.
+ GenerateDictionaryLoad(masm, slow, scratch2, key, result, scratch1, scratch3);
+ __ IncrementCounter(isolate->counters()->keyed_load_generic_symbol(),
+ 1, scratch1, scratch2);
+ __ Ret();
+}
+
+
+void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
+ // ---------- S t a t e --------------
+ // -- lr : return address
+ // -- x0 : key
+ // -- x1 : receiver
+ // -----------------------------------
+ Label slow, check_name, index_smi, index_name;
+
+ Register key = x0;
+ Register receiver = x1;
+
+ __ JumpIfNotSmi(key, &check_name);
+ __ Bind(&index_smi);
+ // Now the key is known to be a smi. This place is also jumped to from below
+ // where a numeric string is converted to a smi.
+ GenerateKeyedLoadWithSmiKey(masm, key, receiver, x2, x3, x4, x5, x6, &slow);
+
+ // Slow case, key and receiver still in x0 and x1.
+ __ Bind(&slow);
+ __ IncrementCounter(
+ masm->isolate()->counters()->keyed_load_generic_slow(), 1, x2, x3);
+ GenerateRuntimeGetProperty(masm);
+
+ __ Bind(&check_name);
+ GenerateKeyNameCheck(masm, key, x2, x3, &index_name, &slow);
+
+ GenerateKeyedLoadWithNameKey(masm, key, receiver, x2, x3, x4, x5, x6, &slow);
+
+ __ Bind(&index_name);
+ __ IndexFromHash(x3, key);
+ // Now jump to the place where smi keys are handled.
+ __ B(&index_smi);
+}
+
+
+void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
+ // ---------- S t a t e --------------
+ // -- lr : return address
+ // -- x0 : key (index)
+ // -- x1 : receiver
+ // -----------------------------------
+ Label miss;
+
+ Register index = x0;
+ Register receiver = x1;
+ Register result = x0;
+ Register scratch = x3;
+
+ StringCharAtGenerator char_at_generator(receiver,
+ index,
+ scratch,
+ result,
+ &miss, // When not a string.
+ &miss, // When not a number.
+ &miss, // When index out of range.
+ STRING_INDEX_IS_ARRAY_INDEX);
+ char_at_generator.GenerateFast(masm);
+ __ Ret();
+
+ StubRuntimeCallHelper call_helper;
+ char_at_generator.GenerateSlow(masm, call_helper);
+
+ __ Bind(&miss);
+ GenerateMiss(masm);
+}
+
+
+void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
+ // ---------- S t a t e --------------
+ // -- lr : return address
+ // -- x0 : key
+ // -- x1 : receiver
+ // -----------------------------------
+ Label slow;
+ Register key = x0;
+ Register receiver = x1;
+
+ // Check that the receiver isn't a smi.
+ __ JumpIfSmi(receiver, &slow);
+
+ // Check that the key is an array index, that is Uint32.
+ __ TestAndBranchIfAnySet(key, kSmiTagMask | kSmiSignMask, &slow);
+
+ // Get the map of the receiver.
+ Register map = x2;
+ __ Ldr(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+
+ // Check that it has indexed interceptor and access checks
+ // are not enabled for this object.
+ __ Ldrb(x3, FieldMemOperand(map, Map::kBitFieldOffset));
+ ASSERT(kSlowCaseBitFieldMask ==
+ ((1 << Map::kIsAccessCheckNeeded) | (1 << Map::kHasIndexedInterceptor)));
+ __ Tbnz(x3, Map::kIsAccessCheckNeeded, &slow);
+ __ Tbz(x3, Map::kHasIndexedInterceptor, &slow);
+
+ // Everything is fine, call runtime.
+ __ Push(receiver, key);
+ __ TailCallExternalReference(
+ ExternalReference(IC_Utility(kKeyedLoadPropertyWithInterceptor),
+ masm->isolate()),
+ 2,
+ 1);
+
+ __ Bind(&slow);
+ GenerateMiss(masm);
+}
+
+
+void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
+ ASM_LOCATION("KeyedStoreIC::GenerateMiss");
+ // ---------- S t a t e --------------
+ // -- x0 : value
+ // -- x1 : key
+ // -- x2 : receiver
+ // -- lr : return address
+ // -----------------------------------
+
+ // Push receiver, key and value for runtime call.
+ __ Push(x2, x1, x0);
+
+ ExternalReference ref =
+ ExternalReference(IC_Utility(kKeyedStoreIC_Miss), masm->isolate());
+ __ TailCallExternalReference(ref, 3, 1);
+}
+
+
+void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
+ ASM_LOCATION("KeyedStoreIC::GenerateSlow");
+ // ---------- S t a t e --------------
+ // -- lr : return address
+ // -- x0 : value
+ // -- x1 : key
+ // -- x2 : receiver
+ // -----------------------------------
+
+ // Push receiver, key and value for runtime call.
+ __ Push(x2, x1, x0);
+
+ // The slow case calls into the runtime to complete the store without causing
+ // an IC miss that would otherwise cause a transition to the generic stub.
+ ExternalReference ref =
+ ExternalReference(IC_Utility(kKeyedStoreIC_Slow), masm->isolate());
+ __ TailCallExternalReference(ref, 3, 1);
+}
+
+
+void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
+ StrictModeFlag strict_mode) {
+ ASM_LOCATION("KeyedStoreIC::GenerateRuntimeSetProperty");
+ // ---------- S t a t e --------------
+ // -- x0 : value
+ // -- x1 : key
+ // -- x2 : receiver
+ // -- lr : return address
+ // -----------------------------------
+
+ // Push receiver, key and value for runtime call.
+ __ Push(x2, x1, x0);
+
+ // Push PropertyAttributes(NONE) and strict_mode for runtime call.
+ STATIC_ASSERT(NONE == 0);
+ __ Mov(x10, Operand(Smi::FromInt(strict_mode)));
+ __ Push(xzr, x10);
+
+ __ TailCallRuntime(Runtime::kSetProperty, 5, 1);
+}
+
+
+static void KeyedStoreGenerateGenericHelper(
+ MacroAssembler* masm,
+ Label* fast_object,
+ Label* fast_double,
+ Label* slow,
+ KeyedStoreCheckMap check_map,
+ KeyedStoreIncrementLength increment_length,
+ Register value,
+ Register key,
+ Register receiver,
+ Register receiver_map,
+ Register elements_map,
+ Register elements) {
+ ASSERT(!AreAliased(
+ value, key, receiver, receiver_map, elements_map, elements, x10, x11));
+
+ Label transition_smi_elements;
+ Label transition_double_elements;
+ Label fast_double_without_map_check;
+ Label non_double_value;
+ Label finish_store;
+
+ __ Bind(fast_object);
+ if (check_map == kCheckMap) {
+ __ Ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
+ __ Cmp(elements_map,
+ Operand(masm->isolate()->factory()->fixed_array_map()));
+ __ B(ne, fast_double);
+ }
+
+ // HOLECHECK: guards "A[i] = V"
+ // We have to go to the runtime if the current value is the hole because there
+ // may be a callback on the element.
+ Label holecheck_passed;
+ // TODO(all): This address calculation is repeated later (for the store
+ // itself). We should keep the result to avoid doing the work twice.
+ __ Add(x10, elements, FixedArray::kHeaderSize - kHeapObjectTag);
+ __ Add(x10, x10, Operand::UntagSmiAndScale(key, kPointerSizeLog2));
+ __ Ldr(x11, MemOperand(x10));
+ __ JumpIfNotRoot(x11, Heap::kTheHoleValueRootIndex, &holecheck_passed);
+ __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, x10, slow);
+ __ bind(&holecheck_passed);
+
+ // Smi stores don't require further checks.
+ __ JumpIfSmi(value, &finish_store);
+
+ // Escape to elements kind transition case.
+ __ CheckFastObjectElements(receiver_map, x10, &transition_smi_elements);
+
+ __ Bind(&finish_store);
+ if (increment_length == kIncrementLength) {
+ // Add 1 to receiver->length.
+ __ Add(x10, key, Operand(Smi::FromInt(1)));
+ __ Str(x10, FieldMemOperand(receiver, JSArray::kLengthOffset));
+ }
+
+ Register address = x11;
+ __ Add(address, elements, FixedArray::kHeaderSize - kHeapObjectTag);
+ __ Add(address, address, Operand::UntagSmiAndScale(key, kPointerSizeLog2));
+ __ Str(value, MemOperand(address));
+
+ Label dont_record_write;
+ __ JumpIfSmi(value, &dont_record_write);
+
+ // Update write barrier for the elements array address.
+ __ Mov(x10, value); // Preserve the value which is returned.
+ __ RecordWrite(elements,
+ address,
+ x10,
+ kLRHasNotBeenSaved,
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+
+ __ Bind(&dont_record_write);
+ __ Ret();
+
+
+ __ Bind(fast_double);
+ if (check_map == kCheckMap) {
+ // Check for fast double array case. If this fails, call through to the
+ // runtime.
+ __ JumpIfNotRoot(elements_map, Heap::kFixedDoubleArrayMapRootIndex, slow);
+ }
+
+ // HOLECHECK: guards "A[i] double hole?"
+ // We have to see if the double version of the hole is present. If so go to
+ // the runtime.
+ // TODO(all): This address calculation was done earlier. We should keep the
+ // result to avoid doing the work twice.
+ __ Add(x10, elements, FixedDoubleArray::kHeaderSize - kHeapObjectTag);
+ __ Add(x10, x10, Operand::UntagSmiAndScale(key, kPointerSizeLog2));
+ __ Ldr(x11, MemOperand(x10));
+ __ CompareAndBranch(x11, kHoleNanInt64, ne, &fast_double_without_map_check);
+ __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, x10, slow);
+
+ __ Bind(&fast_double_without_map_check);
+ __ StoreNumberToDoubleElements(value,
+ key,
+ elements,
+ x10,
+ d0,
+ d1,
+ &transition_double_elements);
+ if (increment_length == kIncrementLength) {
+ // Add 1 to receiver->length.
+ __ Add(x10, key, Operand(Smi::FromInt(1)));
+ __ Str(x10, FieldMemOperand(receiver, JSArray::kLengthOffset));
+ }
+ __ Ret();
+
+
+ __ Bind(&transition_smi_elements);
+ // Transition the array appropriately depending on the value type.
+ __ Ldr(x10, FieldMemOperand(value, HeapObject::kMapOffset));
+ __ JumpIfNotRoot(x10, Heap::kHeapNumberMapRootIndex, &non_double_value);
+
+ // Value is a double. Transition FAST_SMI_ELEMENTS ->
+ // FAST_DOUBLE_ELEMENTS and complete the store.
+ __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
+ FAST_DOUBLE_ELEMENTS,
+ receiver_map,
+ x10,
+ slow);
+ ASSERT(receiver_map.Is(x3)); // Transition code expects map in x3.
+ AllocationSiteMode mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS,
+ FAST_DOUBLE_ELEMENTS);
+ ElementsTransitionGenerator::GenerateSmiToDouble(masm, mode, slow);
+ __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+ __ B(&fast_double_without_map_check);
+
+ __ Bind(&non_double_value);
+ // Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS.
+ __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
+ FAST_ELEMENTS,
+ receiver_map,
+ x10,
+ slow);
+ ASSERT(receiver_map.Is(x3)); // Transition code expects map in x3.
+ mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS);
+ ElementsTransitionGenerator::GenerateMapChangeElementsTransition(masm, mode,
+ slow);
+ __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+ __ B(&finish_store);
+
+ __ Bind(&transition_double_elements);
+ // Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a
+ // HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
+ // transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
+ __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS,
+ FAST_ELEMENTS,
+ receiver_map,
+ x10,
+ slow);
+ ASSERT(receiver_map.Is(x3)); // Transition code expects map in x3.
+ mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS);
+ ElementsTransitionGenerator::GenerateDoubleToObject(masm, mode, slow);
+ __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+ __ B(&finish_store);
+}
+
+
+void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
+ StrictModeFlag strict_mode) {
+ ASM_LOCATION("KeyedStoreIC::GenerateGeneric");
+ // ---------- S t a t e --------------
+ // -- x0 : value
+ // -- x1 : key
+ // -- x2 : receiver
+ // -- lr : return address
+ // -----------------------------------
+ Label slow;
+ Label array;
+ Label fast_object;
+ Label extra;
+ Label fast_object_grow;
+ Label fast_double_grow;
+ Label fast_double;
+
+ Register value = x0;
+ Register key = x1;
+ Register receiver = x2;
+ Register receiver_map = x3;
+ Register elements = x4;
+ Register elements_map = x5;
+
+ __ JumpIfNotSmi(key, &slow);
+ __ JumpIfSmi(receiver, &slow);
+ __ Ldr(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+
+ // Check that the receiver does not require access checks and is not observed.
+ // The generic stub does not perform map checks or handle observed objects.
+ __ Ldrb(x10, FieldMemOperand(receiver_map, Map::kBitFieldOffset));
+ __ TestAndBranchIfAnySet(
+ x10, (1 << Map::kIsAccessCheckNeeded) | (1 << Map::kIsObserved), &slow);
+
+ // Check if the object is a JS array or not.
+ Register instance_type = x10;
+ __ CompareInstanceType(receiver_map, instance_type, JS_ARRAY_TYPE);
+ __ B(eq, &array);
+ // Check that the object is some kind of JSObject.
+ __ Cmp(instance_type, FIRST_JS_OBJECT_TYPE);
+ __ B(lt, &slow);
+
+ // Object case: Check key against length in the elements array.
+ __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+ // Check array bounds. Both the key and the length of FixedArray are smis.
+ __ Ldrsw(x10, UntagSmiFieldMemOperand(elements, FixedArray::kLengthOffset));
+ __ Cmp(x10, Operand::UntagSmi(key));
+ __ B(hi, &fast_object);
+
+
+ __ Bind(&slow);
+ // Slow case, handle jump to runtime.
+ // Live values:
+ // x0: value
+ // x1: key
+ // x2: receiver
+ GenerateRuntimeSetProperty(masm, strict_mode);
+
+
+ __ Bind(&extra);
+ // Extra capacity case: Check if there is extra capacity to
+ // perform the store and update the length. Used for adding one
+ // element to the array by writing to array[array.length].
+
+ // Check for room in the elements backing store.
+ // Both the key and the length of FixedArray are smis.
+ __ Ldrsw(x10, UntagSmiFieldMemOperand(elements, FixedArray::kLengthOffset));
+ __ Cmp(x10, Operand::UntagSmi(key));
+ __ B(ls, &slow);
+
+ __ Ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
+ __ Cmp(elements_map, Operand(masm->isolate()->factory()->fixed_array_map()));
+ __ B(eq, &fast_object_grow);
+ __ Cmp(elements_map,
+ Operand(masm->isolate()->factory()->fixed_double_array_map()));
+ __ B(eq, &fast_double_grow);
+ __ B(&slow);
+
+
+ __ Bind(&array);
+ // Array case: Get the length and the elements array from the JS
+ // array. Check that the array is in fast mode (and writable); if it
+ // is the length is always a smi.
+
+ __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+
+ // Check the key against the length in the array.
+ __ Ldrsw(x10, UntagSmiFieldMemOperand(receiver, JSArray::kLengthOffset));
+ __ Cmp(x10, Operand::UntagSmi(key));
+ __ B(eq, &extra); // We can handle the case where we are appending 1 element.
+ __ B(lo, &slow);
+
+ KeyedStoreGenerateGenericHelper(masm, &fast_object, &fast_double,
+ &slow, kCheckMap, kDontIncrementLength,
+ value, key, receiver, receiver_map,
+ elements_map, elements);
+ KeyedStoreGenerateGenericHelper(masm, &fast_object_grow, &fast_double_grow,
+ &slow, kDontCheckMap, kIncrementLength,
+ value, key, receiver, receiver_map,
+ elements_map, elements);
+}
+
+
+void StoreIC::GenerateMegamorphic(MacroAssembler* masm,
+ ExtraICState extra_ic_state) {
+ // ----------- S t a t e -------------
+ // -- x0 : value
+ // -- x1 : receiver
+ // -- x2 : name
+ // -- lr : return address
+ // -----------------------------------
+
+ // Probe the stub cache.
+ Code::Flags flags = Code::ComputeFlags(
+ Code::HANDLER, MONOMORPHIC, extra_ic_state,
+ Code::NORMAL, Code::STORE_IC);
+
+ masm->isolate()->stub_cache()->GenerateProbe(
+ masm, flags, x1, x2, x3, x4, x5, x6);
+
+ // Cache miss: Jump to runtime.
+ GenerateMiss(masm);
+}
+
+
+void StoreIC::GenerateMiss(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- x0 : value
+ // -- x1 : receiver
+ // -- x2 : name
+ // -- lr : return address
+ // -----------------------------------
+
+ __ Push(x1, x2, x0);
+
+ // Tail call to the entry.
+ ExternalReference ref =
+ ExternalReference(IC_Utility(kStoreIC_Miss), masm->isolate());
+ __ TailCallExternalReference(ref, 3, 1);
+}
+
+
+void StoreIC::GenerateNormal(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- x0 : value
+ // -- x1 : receiver
+ // -- x2 : name
+ // -- lr : return address
+ // -----------------------------------
+ Label miss;
+ Register value = x0;
+ Register receiver = x1;
+ Register name = x2;
+ Register dictionary = x3;
+
+ GenerateNameDictionaryReceiverCheck(
+ masm, receiver, dictionary, x4, x5, &miss);
+
+ GenerateDictionaryStore(masm, &miss, dictionary, name, value, x4, x5);
+ Counters* counters = masm->isolate()->counters();
+ __ IncrementCounter(counters->store_normal_hit(), 1, x4, x5);
+ __ Ret();
+
+ // Cache miss: Jump to runtime.
+ __ Bind(&miss);
+ __ IncrementCounter(counters->store_normal_miss(), 1, x4, x5);
+ GenerateMiss(masm);
+}
+
+
+void StoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
+ StrictModeFlag strict_mode) {
+ ASM_LOCATION("StoreIC::GenerateRuntimeSetProperty");
+ // ----------- S t a t e -------------
+ // -- x0 : value
+ // -- x1 : receiver
+ // -- x2 : name
+ // -- lr : return address
+ // -----------------------------------
+
+ __ Push(x1, x2, x0);
+
+ __ Mov(x11, Operand(Smi::FromInt(NONE))); // PropertyAttributes
+ __ Mov(x10, Operand(Smi::FromInt(strict_mode)));
+ __ Push(x11, x10);
+
+ // Do tail-call to runtime routine.
+ __ TailCallRuntime(Runtime::kSetProperty, 5, 1);
+}
+
+
+void StoreIC::GenerateSlow(MacroAssembler* masm) {
+ // ---------- S t a t e --------------
+ // -- x0 : value
+ // -- x1 : receiver
+ // -- x2 : name
+ // -- lr : return address
+ // -----------------------------------
+
+ // Push receiver, name and value for runtime call.
+ __ Push(x1, x2, x0);
+
+ // The slow case calls into the runtime to complete the store without causing
+ // an IC miss that would otherwise cause a transition to the generic stub.
+ ExternalReference ref =
+ ExternalReference(IC_Utility(kStoreIC_Slow), masm->isolate());
+ __ TailCallExternalReference(ref, 3, 1);
+}
+
+
+Condition CompareIC::ComputeCondition(Token::Value op) {
+ switch (op) {
+ case Token::EQ_STRICT:
+ case Token::EQ:
+ return eq;
+ case Token::LT:
+ return lt;
+ case Token::GT:
+ return gt;
+ case Token::LTE:
+ return le;
+ case Token::GTE:
+ return ge;
+ default:
+ UNREACHABLE();
+ return al;
+ }
+}
+
+
+bool CompareIC::HasInlinedSmiCode(Address address) {
+ // The address of the instruction following the call.
+ Address info_address =
+ Assembler::return_address_from_call_start(address);
+
+ InstructionSequence* patch_info = InstructionSequence::At(info_address);
+ return patch_info->IsInlineData();
+}
+
+
+// Activate a SMI fast-path by patching the instructions generated by
+// JumpPatchSite::EmitJumpIf(Not)Smi(), using the information encoded by
+// JumpPatchSite::EmitPatchInfo().
+void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
+ // The patch information is encoded in the instruction stream using
+ // instructions which have no side effects, so we can safely execute them.
+ // The patch information is encoded directly after the call to the helper
+ // function which is requesting this patch operation.
+ Address info_address =
+ Assembler::return_address_from_call_start(address);
+ InlineSmiCheckInfo info(info_address);
+
+ // Check and decode the patch information instruction.
+ if (!info.HasSmiCheck()) {
+ return;
+ }
+
+ if (FLAG_trace_ic) {
+ PrintF("[ Patching ic at %p, marker=%p, SMI check=%p\n",
+ address, info_address, reinterpret_cast<void*>(info.SmiCheck()));
+ }
+
+ // Patch and activate code generated by JumpPatchSite::EmitJumpIfNotSmi()
+ // and JumpPatchSite::EmitJumpIfSmi().
+ // Changing
+ // tb(n)z xzr, #0, <target>
+ // to
+ // tb(!n)z test_reg, #0, <target>
+ Instruction* to_patch = info.SmiCheck();
+ PatchingAssembler patcher(to_patch, 1);
+ ASSERT(to_patch->IsTestBranch());
+ ASSERT(to_patch->ImmTestBranchBit5() == 0);
+ ASSERT(to_patch->ImmTestBranchBit40() == 0);
+
+ STATIC_ASSERT(kSmiTag == 0);
+ STATIC_ASSERT(kSmiTagMask == 1);
+
+ int branch_imm = to_patch->ImmTestBranch();
+ Register smi_reg;
+ if (check == ENABLE_INLINED_SMI_CHECK) {
+ ASSERT(to_patch->Rt() == xzr.code());
+ smi_reg = info.SmiRegister();
+ } else {
+ ASSERT(check == DISABLE_INLINED_SMI_CHECK);
+ ASSERT(to_patch->Rt() != xzr.code());
+ smi_reg = xzr;
+ }
+
+ if (to_patch->Mask(TestBranchMask) == TBZ) {
+ // This is JumpIfNotSmi(smi_reg, branch_imm).
+ patcher.tbnz(smi_reg, 0, branch_imm);
+ } else {
+ ASSERT(to_patch->Mask(TestBranchMask) == TBNZ);
+ // This is JumpIfSmi(smi_reg, branch_imm).
+ patcher.tbz(smi_reg, 0, branch_imm);
+ }
+}
+
+
+} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_A64
--- /dev/null
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#if V8_TARGET_ARCH_A64
+
+#define A64_DEFINE_FP_STATICS
+
+#include "a64/instructions-a64.h"
+#include "a64/assembler-a64-inl.h"
+
+namespace v8 {
+namespace internal {
+
+
+bool Instruction::IsLoad() const {
+ if (Mask(LoadStoreAnyFMask) != LoadStoreAnyFixed) {
+ return false;
+ }
+
+ if (Mask(LoadStorePairAnyFMask) == LoadStorePairAnyFixed) {
+ return Mask(LoadStorePairLBit) != 0;
+ } else {
+ LoadStoreOp op = static_cast<LoadStoreOp>(Mask(LoadStoreOpMask));
+ switch (op) {
+ case LDRB_w:
+ case LDRH_w:
+ case LDR_w:
+ case LDR_x:
+ case LDRSB_w:
+ case LDRSB_x:
+ case LDRSH_w:
+ case LDRSH_x:
+ case LDRSW_x:
+ case LDR_s:
+ case LDR_d: return true;
+ default: return false;
+ }
+ }
+}
+
+
+bool Instruction::IsStore() const {
+ if (Mask(LoadStoreAnyFMask) != LoadStoreAnyFixed) {
+ return false;
+ }
+
+ if (Mask(LoadStorePairAnyFMask) == LoadStorePairAnyFixed) {
+ return Mask(LoadStorePairLBit) == 0;
+ } else {
+ LoadStoreOp op = static_cast<LoadStoreOp>(Mask(LoadStoreOpMask));
+ switch (op) {
+ case STRB_w:
+ case STRH_w:
+ case STR_w:
+ case STR_x:
+ case STR_s:
+ case STR_d: return true;
+ default: return false;
+ }
+ }
+}
+
+
+static uint64_t RotateRight(uint64_t value,
+ unsigned int rotate,
+ unsigned int width) {
+ ASSERT(width <= 64);
+ rotate &= 63;
+ return ((value & ((1UL << rotate) - 1UL)) << (width - rotate)) |
+ (value >> rotate);
+}
+
+
+static uint64_t RepeatBitsAcrossReg(unsigned reg_size,
+ uint64_t value,
+ unsigned width) {
+ ASSERT((width == 2) || (width == 4) || (width == 8) || (width == 16) ||
+ (width == 32));
+ ASSERT((reg_size == kWRegSize) || (reg_size == kXRegSize));
+ uint64_t result = value & ((1UL << width) - 1UL);
+ for (unsigned i = width; i < reg_size; i *= 2) {
+ result |= (result << i);
+ }
+ return result;
+}
+
+
+// Logical immediates can't encode zero, so a return value of zero is used to
+// indicate a failure case. Specifically, where the constraints on imm_s are not
+// met.
+uint64_t Instruction::ImmLogical() {
+ unsigned reg_size = SixtyFourBits() ? kXRegSize : kWRegSize;
+ int64_t n = BitN();
+ int64_t imm_s = ImmSetBits();
+ int64_t imm_r = ImmRotate();
+
+ // An integer is constructed from the n, imm_s and imm_r bits according to
+ // the following table:
+ //
+ // N imms immr size S R
+ // 1 ssssss rrrrrr 64 UInt(ssssss) UInt(rrrrrr)
+ // 0 0sssss xrrrrr 32 UInt(sssss) UInt(rrrrr)
+ // 0 10ssss xxrrrr 16 UInt(ssss) UInt(rrrr)
+ // 0 110sss xxxrrr 8 UInt(sss) UInt(rrr)
+ // 0 1110ss xxxxrr 4 UInt(ss) UInt(rr)
+ // 0 11110s xxxxxr 2 UInt(s) UInt(r)
+ // (s bits must not be all set)
+ //
+ // A pattern is constructed of size bits, where the least significant S+1
+ // bits are set. The pattern is rotated right by R, and repeated across a
+ // 32 or 64-bit value, depending on destination register width.
+ //
+
+ if (n == 1) {
+ if (imm_s == 0x3F) {
+ return 0;
+ }
+ uint64_t bits = (1UL << (imm_s + 1)) - 1;
+ return RotateRight(bits, imm_r, 64);
+ } else {
+ if ((imm_s >> 1) == 0x1F) {
+ return 0;
+ }
+ for (int width = 0x20; width >= 0x2; width >>= 1) {
+ if ((imm_s & width) == 0) {
+ int mask = width - 1;
+ if ((imm_s & mask) == mask) {
+ return 0;
+ }
+ uint64_t bits = (1UL << ((imm_s & mask) + 1)) - 1;
+ return RepeatBitsAcrossReg(reg_size,
+ RotateRight(bits, imm_r & mask, width),
+ width);
+ }
+ }
+ }
+ UNREACHABLE();
+ return 0;
+}
+
+
+float Instruction::ImmFP32() {
+ // ImmFP: abcdefgh (8 bits)
+ // Single: aBbb.bbbc.defg.h000.0000.0000.0000.0000 (32 bits)
+ // where B is b ^ 1
+ uint32_t bits = ImmFP();
+ uint32_t bit7 = (bits >> 7) & 0x1;
+ uint32_t bit6 = (bits >> 6) & 0x1;
+ uint32_t bit5_to_0 = bits & 0x3f;
+ uint32_t result = (bit7 << 31) | ((32 - bit6) << 25) | (bit5_to_0 << 19);
+
+ return rawbits_to_float(result);
+}
+
+
+double Instruction::ImmFP64() {
+ // ImmFP: abcdefgh (8 bits)
+ // Double: aBbb.bbbb.bbcd.efgh.0000.0000.0000.0000
+ // 0000.0000.0000.0000.0000.0000.0000.0000 (64 bits)
+ // where B is b ^ 1
+ uint32_t bits = ImmFP();
+ uint64_t bit7 = (bits >> 7) & 0x1;
+ uint64_t bit6 = (bits >> 6) & 0x1;
+ uint64_t bit5_to_0 = bits & 0x3f;
+ uint64_t result = (bit7 << 63) | ((256 - bit6) << 54) | (bit5_to_0 << 48);
+
+ return rawbits_to_double(result);
+}
+
+
+LSDataSize CalcLSPairDataSize(LoadStorePairOp op) {
+ switch (op) {
+ case STP_x:
+ case LDP_x:
+ case STP_d:
+ case LDP_d: return LSDoubleWord;
+ default: return LSWord;
+ }
+}
+
+
+ptrdiff_t Instruction::ImmPCOffset() {
+ ptrdiff_t offset;
+ if (IsPCRelAddressing()) {
+ // PC-relative addressing. Only ADR is supported.
+ offset = ImmPCRel();
+ } else if (BranchType() != UnknownBranchType) {
+ // All PC-relative branches.
+ // Relative branch offsets are instruction-size-aligned.
+ offset = ImmBranch() << kInstructionSizeLog2;
+ } else {
+ // Load literal (offset from PC).
+ ASSERT(IsLdrLiteral());
+ // The offset is always shifted by 2 bits, even for loads to 64-bits
+ // registers.
+ offset = ImmLLiteral() << kInstructionSizeLog2;
+ }
+ return offset;
+}
+
+
+Instruction* Instruction::ImmPCOffsetTarget() {
+ return this + ImmPCOffset();
+}
+
+
+void Instruction::SetImmPCOffsetTarget(Instruction* target) {
+ if (IsPCRelAddressing()) {
+ SetPCRelImmTarget(target);
+ } else if (BranchType() != UnknownBranchType) {
+ SetBranchImmTarget(target);
+ } else {
+ SetImmLLiteral(target);
+ }
+}
+
+
+void Instruction::SetPCRelImmTarget(Instruction* target) {
+ // ADRP is not supported, so 'this' must point to an ADR instruction.
+ ASSERT(Mask(PCRelAddressingMask) == ADR);
+
+ Instr imm = Assembler::ImmPCRelAddress(target - this);
+
+ SetInstructionBits(Mask(~ImmPCRel_mask) | imm);
+}
+
+
+void Instruction::SetBranchImmTarget(Instruction* target) {
+ ASSERT(((target - this) & 3) == 0);
+ Instr branch_imm = 0;
+ uint32_t imm_mask = 0;
+ int offset = (target - this) >> kInstructionSizeLog2;
+ switch (BranchType()) {
+ case CondBranchType: {
+ branch_imm = Assembler::ImmCondBranch(offset);
+ imm_mask = ImmCondBranch_mask;
+ break;
+ }
+ case UncondBranchType: {
+ branch_imm = Assembler::ImmUncondBranch(offset);
+ imm_mask = ImmUncondBranch_mask;
+ break;
+ }
+ case CompareBranchType: {
+ branch_imm = Assembler::ImmCmpBranch(offset);
+ imm_mask = ImmCmpBranch_mask;
+ break;
+ }
+ case TestBranchType: {
+ branch_imm = Assembler::ImmTestBranch(offset);
+ imm_mask = ImmTestBranch_mask;
+ break;
+ }
+ default: UNREACHABLE();
+ }
+ SetInstructionBits(Mask(~imm_mask) | branch_imm);
+}
+
+
+void Instruction::SetImmLLiteral(Instruction* source) {
+ ASSERT(((source - this) & 3) == 0);
+ int offset = (source - this) >> kLiteralEntrySizeLog2;
+ Instr imm = Assembler::ImmLLiteral(offset);
+ Instr mask = ImmLLiteral_mask;
+
+ SetInstructionBits(Mask(~mask) | imm);
+}
+
+
+// TODO(jbramley): We can't put this inline in the class because things like
+// xzr and Register are not defined in that header. Consider adding
+// instructions-a64-inl.h to work around this.
+bool InstructionSequence::IsInlineData() const {
+ // Inline data is encoded as a single movz instruction which writes to xzr
+ // (x31).
+ return IsMovz() && SixtyFourBits() && (Rd() == xzr.code());
+ // TODO(all): If we extend ::InlineData() to support bigger data, we need
+ // to update this method too.
+}
+
+
+// TODO(jbramley): We can't put this inline in the class because things like
+// xzr and Register are not defined in that header. Consider adding
+// instructions-a64-inl.h to work around this.
+uint64_t InstructionSequence::InlineData() const {
+ ASSERT(IsInlineData());
+ uint64_t payload = ImmMoveWide();
+ // TODO(all): If we extend ::InlineData() to support bigger data, we need
+ // to update this method too.
+ return payload;
+}
+
+
+} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_A64
--- /dev/null
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_A64_INSTRUCTIONS_A64_H_
+#define V8_A64_INSTRUCTIONS_A64_H_
+
+#include "globals.h"
+#include "utils.h"
+#include "a64/constants-a64.h"
+#include "a64/utils-a64.h"
+
+namespace v8 {
+namespace internal {
+
+
+// ISA constants. --------------------------------------------------------------
+
+typedef uint32_t Instr;
+
+// The following macros initialize a float/double variable with a bit pattern
+// without using static initializers: If A64_DEFINE_FP_STATICS is defined, the
+// symbol is defined as uint32_t/uint64_t initialized with the desired bit
+// pattern. Otherwise, the same symbol is declared as an external float/double.
+#if defined(A64_DEFINE_FP_STATICS)
+#define DEFINE_FLOAT(name, value) extern const uint32_t name = value
+#define DEFINE_DOUBLE(name, value) extern const uint64_t name = value
+#else
+#define DEFINE_FLOAT(name, value) extern const float name
+#define DEFINE_DOUBLE(name, value) extern const double name
+#endif // defined(A64_DEFINE_FP_STATICS)
+
+DEFINE_FLOAT(kFP32PositiveInfinity, 0x7f800000);
+DEFINE_FLOAT(kFP32NegativeInfinity, 0xff800000);
+DEFINE_DOUBLE(kFP64PositiveInfinity, 0x7ff0000000000000UL);
+DEFINE_DOUBLE(kFP64NegativeInfinity, 0xfff0000000000000UL);
+
+// This value is a signalling NaN as both a double and as a float (taking the
+// least-significant word).
+DEFINE_DOUBLE(kFP64SignallingNaN, 0x7ff000007f800001);
+DEFINE_FLOAT(kFP32SignallingNaN, 0x7f800001);
+
+// A similar value, but as a quiet NaN.
+DEFINE_DOUBLE(kFP64QuietNaN, 0x7ff800007fc00001);
+DEFINE_FLOAT(kFP32QuietNaN, 0x7fc00001);
+
+#undef DEFINE_FLOAT
+#undef DEFINE_DOUBLE
+
+
+enum LSDataSize {
+ LSByte = 0,
+ LSHalfword = 1,
+ LSWord = 2,
+ LSDoubleWord = 3
+};
+
+LSDataSize CalcLSPairDataSize(LoadStorePairOp op);
+
+enum ImmBranchType {
+ UnknownBranchType = 0,
+ CondBranchType = 1,
+ UncondBranchType = 2,
+ CompareBranchType = 3,
+ TestBranchType = 4
+};
+
+enum AddrMode {
+ Offset,
+ PreIndex,
+ PostIndex
+};
+
+enum FPRounding {
+ // The first four values are encodable directly by FPCR<RMode>.
+ FPTieEven = 0x0,
+ FPPositiveInfinity = 0x1,
+ FPNegativeInfinity = 0x2,
+ FPZero = 0x3,
+
+ // The final rounding mode is only available when explicitly specified by the
+ // instruction (such as with fcvta). It cannot be set in FPCR.
+ FPTieAway
+};
+
+enum Reg31Mode {
+ Reg31IsStackPointer,
+ Reg31IsZeroRegister
+};
+
+// Instructions. ---------------------------------------------------------------
+
+class Instruction {
+ public:
+ Instr InstructionBits() const {
+ Instr bits;
+ memcpy(&bits, this, sizeof(bits));
+ return bits;
+ }
+
+ void SetInstructionBits(Instr new_instr) {
+ memcpy(this, &new_instr, sizeof(new_instr));
+ }
+
+ int Bit(int pos) const {
+ return (InstructionBits() >> pos) & 1;
+ }
+
+ uint32_t Bits(int msb, int lsb) const {
+ return unsigned_bitextract_32(msb, lsb, InstructionBits());
+ }
+
+ int32_t SignedBits(int msb, int lsb) const {
+ int32_t bits = *(reinterpret_cast<const int32_t*>(this));
+ return signed_bitextract_32(msb, lsb, bits);
+ }
+
+ Instr Mask(uint32_t mask) const {
+ return InstructionBits() & mask;
+ }
+
+ Instruction* following(int count = 1) {
+ return this + count * kInstructionSize;
+ }
+
+ Instruction* preceding(int count = 1) {
+ return this - count * kInstructionSize;
+ }
+
+ #define DEFINE_GETTER(Name, HighBit, LowBit, Func) \
+ int64_t Name() const { return Func(HighBit, LowBit); }
+ INSTRUCTION_FIELDS_LIST(DEFINE_GETTER)
+ #undef DEFINE_GETTER
+
+ // ImmPCRel is a compound field (not present in INSTRUCTION_FIELDS_LIST),
+ // formed from ImmPCRelLo and ImmPCRelHi.
+ int ImmPCRel() const {
+ int const offset = ((ImmPCRelHi() << ImmPCRelLo_width) | ImmPCRelLo());
+ int const width = ImmPCRelLo_width + ImmPCRelHi_width;
+ return signed_bitextract_32(width-1, 0, offset);
+ }
+
+ uint64_t ImmLogical();
+ float ImmFP32();
+ double ImmFP64();
+
+ LSDataSize SizeLSPair() const {
+ return CalcLSPairDataSize(
+ static_cast<LoadStorePairOp>(Mask(LoadStorePairMask)));
+ }
+
+ // Helpers.
+ bool IsCondBranchImm() const {
+ return Mask(ConditionalBranchFMask) == ConditionalBranchFixed;
+ }
+
+ bool IsUncondBranchImm() const {
+ return Mask(UnconditionalBranchFMask) == UnconditionalBranchFixed;
+ }
+
+ bool IsCompareBranch() const {
+ return Mask(CompareBranchFMask) == CompareBranchFixed;
+ }
+
+ bool IsTestBranch() const {
+ return Mask(TestBranchFMask) == TestBranchFixed;
+ }
+
+ bool IsLdrLiteral() const {
+ return Mask(LoadLiteralFMask) == LoadLiteralFixed;
+ }
+
+ bool IsLdrLiteralX() const {
+ return Mask(LoadLiteralMask) == LDR_x_lit;
+ }
+
+ bool IsPCRelAddressing() const {
+ return Mask(PCRelAddressingFMask) == PCRelAddressingFixed;
+ }
+
+ bool IsLogicalImmediate() const {
+ return Mask(LogicalImmediateFMask) == LogicalImmediateFixed;
+ }
+
+ bool IsAddSubImmediate() const {
+ return Mask(AddSubImmediateFMask) == AddSubImmediateFixed;
+ }
+
+ bool IsAddSubExtended() const {
+ return Mask(AddSubExtendedFMask) == AddSubExtendedFixed;
+ }
+
+ // Match any loads or stores, including pairs.
+ bool IsLoadOrStore() const {
+ return Mask(LoadStoreAnyFMask) == LoadStoreAnyFixed;
+ }
+
+ // Match any loads, including pairs.
+ bool IsLoad() const;
+ // Match any stores, including pairs.
+ bool IsStore() const;
+
+ // Indicate whether Rd can be the stack pointer or the zero register. This
+ // does not check that the instruction actually has an Rd field.
+ Reg31Mode RdMode() const {
+ // The following instructions use csp or wsp as Rd:
+ // Add/sub (immediate) when not setting the flags.
+ // Add/sub (extended) when not setting the flags.
+ // Logical (immediate) when not setting the flags.
+ // Otherwise, r31 is the zero register.
+ if (IsAddSubImmediate() || IsAddSubExtended()) {
+ if (Mask(AddSubSetFlagsBit)) {
+ return Reg31IsZeroRegister;
+ } else {
+ return Reg31IsStackPointer;
+ }
+ }
+ if (IsLogicalImmediate()) {
+ // Of the logical (immediate) instructions, only ANDS (and its aliases)
+ // can set the flags. The others can all write into csp.
+ // Note that some logical operations are not available to
+ // immediate-operand instructions, so we have to combine two masks here.
+ if (Mask(LogicalImmediateMask & LogicalOpMask) == ANDS) {
+ return Reg31IsZeroRegister;
+ } else {
+ return Reg31IsStackPointer;
+ }
+ }
+ return Reg31IsZeroRegister;
+ }
+
+ // Indicate whether Rn can be the stack pointer or the zero register. This
+ // does not check that the instruction actually has an Rn field.
+ Reg31Mode RnMode() const {
+ // The following instructions use csp or wsp as Rn:
+ // All loads and stores.
+ // Add/sub (immediate).
+ // Add/sub (extended).
+ // Otherwise, r31 is the zero register.
+ if (IsLoadOrStore() || IsAddSubImmediate() || IsAddSubExtended()) {
+ return Reg31IsStackPointer;
+ }
+ return Reg31IsZeroRegister;
+ }
+
+ ImmBranchType BranchType() const {
+ if (IsCondBranchImm()) {
+ return CondBranchType;
+ } else if (IsUncondBranchImm()) {
+ return UncondBranchType;
+ } else if (IsCompareBranch()) {
+ return CompareBranchType;
+ } else if (IsTestBranch()) {
+ return TestBranchType;
+ } else {
+ return UnknownBranchType;
+ }
+ }
+
+ int ImmBranch() const {
+ switch (BranchType()) {
+ case CondBranchType: return ImmCondBranch();
+ case UncondBranchType: return ImmUncondBranch();
+ case CompareBranchType: return ImmCmpBranch();
+ case TestBranchType: return ImmTestBranch();
+ default: UNREACHABLE();
+ }
+ return 0;
+ }
+
+ bool IsBranchAndLinkToRegister() const {
+ return Mask(UnconditionalBranchToRegisterMask) == BLR;
+ }
+
+ bool IsMovz() const {
+ return (Mask(MoveWideImmediateMask) == MOVZ_x) ||
+ (Mask(MoveWideImmediateMask) == MOVZ_w);
+ }
+
+ bool IsMovk() const {
+ return (Mask(MoveWideImmediateMask) == MOVK_x) ||
+ (Mask(MoveWideImmediateMask) == MOVK_w);
+ }
+
+ bool IsMovn() const {
+ return (Mask(MoveWideImmediateMask) == MOVN_x) ||
+ (Mask(MoveWideImmediateMask) == MOVN_w);
+ }
+
+ bool IsNop(int n) {
+ // A marking nop is an instruction
+ // mov r<n>, r<n>
+ // which is encoded as
+ // orr r<n>, xzr, r<n>
+ return (Mask(LogicalShiftedMask) == ORR_x) &&
+ (Rd() == Rm()) &&
+ (Rd() == n);
+ }
+
+ // Find the PC offset encoded in this instruction. 'this' may be a branch or
+ // a PC-relative addressing instruction.
+ // The offset returned is unscaled.
+ ptrdiff_t ImmPCOffset();
+
+ // Find the target of this instruction. 'this' may be a branch or a
+ // PC-relative addressing instruction.
+ Instruction* ImmPCOffsetTarget();
+
+ // Patch a PC-relative offset to refer to 'target'. 'this' may be a branch or
+ // a PC-relative addressing instruction.
+ void SetImmPCOffsetTarget(Instruction* target);
+ // Patch a literal load instruction to load from 'source'.
+ void SetImmLLiteral(Instruction* source);
+
+ uint8_t* LiteralAddress() {
+ int offset = ImmLLiteral() << kLiteralEntrySizeLog2;
+ return reinterpret_cast<uint8_t*>(this) + offset;
+ }
+
+ uint32_t Literal32() {
+ uint32_t literal;
+ memcpy(&literal, LiteralAddress(), sizeof(literal));
+
+ return literal;
+ }
+
+ uint64_t Literal64() {
+ uint64_t literal;
+ memcpy(&literal, LiteralAddress(), sizeof(literal));
+
+ return literal;
+ }
+
+ float LiteralFP32() {
+ return rawbits_to_float(Literal32());
+ }
+
+ double LiteralFP64() {
+ return rawbits_to_double(Literal64());
+ }
+
+ Instruction* NextInstruction() {
+ return this + kInstructionSize;
+ }
+
+ Instruction* InstructionAtOffset(int64_t offset) {
+ ASSERT(IsAligned(reinterpret_cast<uintptr_t>(this) + offset,
+ kInstructionSize));
+ return this + offset;
+ }
+
+ template<typename T> static Instruction* Cast(T src) {
+ return reinterpret_cast<Instruction*>(src);
+ }
+
+
+ void SetPCRelImmTarget(Instruction* target);
+ void SetBranchImmTarget(Instruction* target);
+};
+
+
+// Where Instruction looks at instructions generated by the Assembler,
+// InstructionSequence looks at instructions sequences generated by the
+// MacroAssembler.
+class InstructionSequence : public Instruction {
+ public:
+ static InstructionSequence* At(Address address) {
+ return reinterpret_cast<InstructionSequence*>(address);
+ }
+
+ // Sequences generated by MacroAssembler::InlineData().
+ bool IsInlineData() const;
+ uint64_t InlineData() const;
+};
+
+
+// Simulator/Debugger debug instructions ---------------------------------------
+// Each debug marker is represented by a HLT instruction. The immediate comment
+// field in the instruction is used to identify the type of debug marker. Each
+// marker encodes arguments in a different way, as described below.
+
+// Indicate to the Debugger that the instruction is a redirected call.
+const Instr kImmExceptionIsRedirectedCall = 0xca11;
+
+// Represent unreachable code. This is used as a guard in parts of the code that
+// should not be reachable, such as in data encoded inline in the instructions.
+const Instr kImmExceptionIsUnreachable = 0xdebf;
+
+// A pseudo 'printf' instruction. The arguments will be passed to the platform
+// printf method.
+const Instr kImmExceptionIsPrintf = 0xdeb1;
+// Parameters are stored in A64 registers as if the printf pseudo-instruction
+// was a call to the real printf method:
+//
+// x0: The format string, then either of:
+// x1-x7: Optional arguments.
+// d0-d7: Optional arguments.
+//
+// Floating-point and integer arguments are passed in separate sets of
+// registers in AAPCS64 (even for varargs functions), so it is not possible to
+// determine the type of location of each arguments without some information
+// about the values that were passed in. This information could be retrieved
+// from the printf format string, but the format string is not trivial to
+// parse so we encode the relevant information with the HLT instruction.
+// - Type
+// Either kRegister or kFPRegister, but stored as a uint32_t because there's
+// no way to guarantee the size of the CPURegister::RegisterType enum.
+const unsigned kPrintfTypeOffset = 1 * kInstructionSize;
+const unsigned kPrintfLength = 2 * kInstructionSize;
+
+// A pseudo 'debug' instruction.
+const Instr kImmExceptionIsDebug = 0xdeb0;
+// Parameters are inlined in the code after a debug pseudo-instruction:
+// - Debug code.
+// - Debug parameters.
+// - Debug message string. This is a NULL-terminated ASCII string, padded to
+// kInstructionSize so that subsequent instructions are correctly aligned.
+// - A kImmExceptionIsUnreachable marker, to catch accidental execution of the
+// string data.
+const unsigned kDebugCodeOffset = 1 * kInstructionSize;
+const unsigned kDebugParamsOffset = 2 * kInstructionSize;
+const unsigned kDebugMessageOffset = 3 * kInstructionSize;
+
+// Debug parameters.
+// Used without a TRACE_ option, the Debugger will print the arguments only
+// once. Otherwise TRACE_ENABLE and TRACE_DISABLE will enable or disable tracing
+// before every instruction for the specified LOG_ parameters.
+//
+// TRACE_OVERRIDE enables the specified LOG_ parameters, and disabled any
+// others that were not specified.
+//
+// For example:
+//
+// __ debug("print registers and fp registers", 0, LOG_REGS | LOG_FP_REGS);
+// will print the registers and fp registers only once.
+//
+// __ debug("trace disasm", 1, TRACE_ENABLE | LOG_DISASM);
+// starts disassembling the code.
+//
+// __ debug("trace rets", 2, TRACE_ENABLE | LOG_REGS);
+// adds the general purpose registers to the trace.
+//
+// __ debug("stop regs", 3, TRACE_DISABLE | LOG_REGS);
+// stops tracing the registers.
+const unsigned kDebuggerTracingDirectivesMask = 3 << 6;
+enum DebugParameters {
+ NO_PARAM = 0,
+ BREAK = 1 << 0,
+ LOG_DISASM = 1 << 1, // Use only with TRACE. Disassemble the code.
+ LOG_REGS = 1 << 2, // Log general purpose registers.
+ LOG_FP_REGS = 1 << 3, // Log floating-point registers.
+ LOG_SYS_REGS = 1 << 4, // Log the status flags.
+ LOG_WRITE = 1 << 5, // Log any memory write.
+
+ LOG_STATE = LOG_REGS | LOG_FP_REGS | LOG_SYS_REGS,
+ LOG_ALL = LOG_DISASM | LOG_STATE | LOG_WRITE,
+
+ // Trace control.
+ TRACE_ENABLE = 1 << 6,
+ TRACE_DISABLE = 2 << 6,
+ TRACE_OVERRIDE = 3 << 6
+};
+
+
+} } // namespace v8::internal
+
+
+#endif // V8_A64_INSTRUCTIONS_A64_H_
--- /dev/null
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "a64/instrument-a64.h"
+
+namespace v8 {
+namespace internal {
+
+Counter::Counter(const char* name, CounterType type)
+ : count_(0), enabled_(false), type_(type) {
+ ASSERT(name != NULL);
+ strncpy(name_, name, kCounterNameMaxLength);
+}
+
+
+void Counter::Enable() {
+ enabled_ = true;
+}
+
+
+void Counter::Disable() {
+ enabled_ = false;
+}
+
+
+bool Counter::IsEnabled() {
+ return enabled_;
+}
+
+
+void Counter::Increment() {
+ if (enabled_) {
+ count_++;
+ }
+}
+
+
+uint64_t Counter::count() {
+ uint64_t result = count_;
+ if (type_ == Gauge) {
+ // If the counter is a Gauge, reset the count after reading.
+ count_ = 0;
+ }
+ return result;
+}
+
+
+const char* Counter::name() {
+ return name_;
+}
+
+
+CounterType Counter::type() {
+ return type_;
+}
+
+
+typedef struct {
+ const char* name;
+ CounterType type;
+} CounterDescriptor;
+
+
+static const CounterDescriptor kCounterList[] = {
+ {"Instruction", Cumulative},
+
+ {"Move Immediate", Gauge},
+ {"Add/Sub DP", Gauge},
+ {"Logical DP", Gauge},
+ {"Other Int DP", Gauge},
+ {"FP DP", Gauge},
+
+ {"Conditional Select", Gauge},
+ {"Conditional Compare", Gauge},
+
+ {"Unconditional Branch", Gauge},
+ {"Compare and Branch", Gauge},
+ {"Test and Branch", Gauge},
+ {"Conditional Branch", Gauge},
+
+ {"Load Integer", Gauge},
+ {"Load FP", Gauge},
+ {"Load Pair", Gauge},
+ {"Load Literal", Gauge},
+
+ {"Store Integer", Gauge},
+ {"Store FP", Gauge},
+ {"Store Pair", Gauge},
+
+ {"PC Addressing", Gauge},
+ {"Other", Gauge},
+ {"SP Adjust", Gauge},
+};
+
+
+Instrument::Instrument(const char* datafile, uint64_t sample_period)
+ : output_stream_(stderr), sample_period_(sample_period) {
+
+ // Set up the output stream. If datafile is non-NULL, use that file. If it
+ // can't be opened, or datafile is NULL, use stderr.
+ if (datafile != NULL) {
+ output_stream_ = fopen(datafile, "w");
+ if (output_stream_ == NULL) {
+ fprintf(stderr, "Can't open output file %s. Using stderr.\n", datafile);
+ output_stream_ = stderr;
+ }
+ }
+
+ static const int num_counters =
+ sizeof(kCounterList) / sizeof(CounterDescriptor);
+
+ // Dump an instrumentation description comment at the top of the file.
+ fprintf(output_stream_, "# counters=%d\n", num_counters);
+ fprintf(output_stream_, "# sample_period=%" PRIu64 "\n", sample_period_);
+
+ // Construct Counter objects from counter description array.
+ for (int i = 0; i < num_counters; i++) {
+ Counter* counter = new Counter(kCounterList[i].name, kCounterList[i].type);
+ counters_.push_back(counter);
+ }
+
+ DumpCounterNames();
+}
+
+
+Instrument::~Instrument() {
+ // Dump any remaining instruction data to the output file.
+ DumpCounters();
+
+ // Free all the counter objects.
+ std::list<Counter*>::iterator it;
+ for (it = counters_.begin(); it != counters_.end(); it++) {
+ delete *it;
+ }
+
+ if (output_stream_ != stderr) {
+ fclose(output_stream_);
+ }
+}
+
+
+void Instrument::Update() {
+ // Increment the instruction counter, and dump all counters if a sample period
+ // has elapsed.
+ static Counter* counter = GetCounter("Instruction");
+ ASSERT(counter->type() == Cumulative);
+ counter->Increment();
+
+ if (counter->IsEnabled() && (counter->count() % sample_period_) == 0) {
+ DumpCounters();
+ }
+}
+
+
+void Instrument::DumpCounters() {
+ // Iterate through the counter objects, dumping their values to the output
+ // stream.
+ std::list<Counter*>::const_iterator it;
+ for (it = counters_.begin(); it != counters_.end(); it++) {
+ fprintf(output_stream_, "%" PRIu64 ",", (*it)->count());
+ }
+ fprintf(output_stream_, "\n");
+ fflush(output_stream_);
+}
+
+
+void Instrument::DumpCounterNames() {
+ // Iterate through the counter objects, dumping the counter names to the
+ // output stream.
+ std::list<Counter*>::const_iterator it;
+ for (it = counters_.begin(); it != counters_.end(); it++) {
+ fprintf(output_stream_, "%s,", (*it)->name());
+ }
+ fprintf(output_stream_, "\n");
+ fflush(output_stream_);
+}
+
+
+void Instrument::HandleInstrumentationEvent(unsigned event) {
+ switch (event) {
+ case InstrumentStateEnable: Enable(); break;
+ case InstrumentStateDisable: Disable(); break;
+ default: DumpEventMarker(event);
+ }
+}
+
+
+void Instrument::DumpEventMarker(unsigned marker) {
+ // Dumpan event marker to the output stream as a specially formatted comment
+ // line.
+ static Counter* counter = GetCounter("Instruction");
+
+ fprintf(output_stream_, "# %c%c @ %" PRId64 "\n", marker & 0xff,
+ (marker >> 8) & 0xff, counter->count());
+}
+
+
+Counter* Instrument::GetCounter(const char* name) {
+ // Get a Counter object by name from the counter list.
+ std::list<Counter*>::const_iterator it;
+ for (it = counters_.begin(); it != counters_.end(); it++) {
+ if (strcmp((*it)->name(), name) == 0) {
+ return *it;
+ }
+ }
+
+ // A Counter by that name does not exist: print an error message to stderr
+ // and the output file, and exit.
+ static const char* error_message =
+ "# Error: Unknown counter \"%s\". Exiting.\n";
+ fprintf(stderr, error_message, name);
+ fprintf(output_stream_, error_message, name);
+ exit(1);
+}
+
+
+void Instrument::Enable() {
+ std::list<Counter*>::iterator it;
+ for (it = counters_.begin(); it != counters_.end(); it++) {
+ (*it)->Enable();
+ }
+}
+
+
+void Instrument::Disable() {
+ std::list<Counter*>::iterator it;
+ for (it = counters_.begin(); it != counters_.end(); it++) {
+ (*it)->Disable();
+ }
+}
+
+
+void Instrument::VisitPCRelAddressing(Instruction* instr) {
+ Update();
+ static Counter* counter = GetCounter("PC Addressing");
+ counter->Increment();
+}
+
+
+void Instrument::VisitAddSubImmediate(Instruction* instr) {
+ Update();
+ static Counter* sp_counter = GetCounter("SP Adjust");
+ static Counter* add_sub_counter = GetCounter("Add/Sub DP");
+ if (((instr->Mask(AddSubOpMask) == SUB) ||
+ (instr->Mask(AddSubOpMask) == ADD)) &&
+ (instr->Rd() == 31) && (instr->Rn() == 31)) {
+ // Count adjustments to the C stack pointer caused by V8 needing two SPs.
+ sp_counter->Increment();
+ } else {
+ add_sub_counter->Increment();
+ }
+}
+
+
+void Instrument::VisitLogicalImmediate(Instruction* instr) {
+ Update();
+ static Counter* counter = GetCounter("Logical DP");
+ counter->Increment();
+}
+
+
+void Instrument::VisitMoveWideImmediate(Instruction* instr) {
+ Update();
+ static Counter* counter = GetCounter("Move Immediate");
+
+ if (instr->IsMovn() && (instr->Rd() == kZeroRegCode)) {
+ unsigned imm = instr->ImmMoveWide();
+ HandleInstrumentationEvent(imm);
+ } else {
+ counter->Increment();
+ }
+}
+
+
+void Instrument::VisitBitfield(Instruction* instr) {
+ Update();
+ static Counter* counter = GetCounter("Other Int DP");
+ counter->Increment();
+}
+
+
+void Instrument::VisitExtract(Instruction* instr) {
+ Update();
+ static Counter* counter = GetCounter("Other Int DP");
+ counter->Increment();
+}
+
+
+void Instrument::VisitUnconditionalBranch(Instruction* instr) {
+ Update();
+ static Counter* counter = GetCounter("Unconditional Branch");
+ counter->Increment();
+}
+
+
+void Instrument::VisitUnconditionalBranchToRegister(Instruction* instr) {
+ Update();
+ static Counter* counter = GetCounter("Unconditional Branch");
+ counter->Increment();
+}
+
+
+void Instrument::VisitCompareBranch(Instruction* instr) {
+ Update();
+ static Counter* counter = GetCounter("Compare and Branch");
+ counter->Increment();
+}
+
+
+void Instrument::VisitTestBranch(Instruction* instr) {
+ Update();
+ static Counter* counter = GetCounter("Test and Branch");
+ counter->Increment();
+}
+
+
+void Instrument::VisitConditionalBranch(Instruction* instr) {
+ Update();
+ static Counter* counter = GetCounter("Conditional Branch");
+ counter->Increment();
+}
+
+
+void Instrument::VisitSystem(Instruction* instr) {
+ Update();
+ static Counter* counter = GetCounter("Other");
+ counter->Increment();
+}
+
+
+void Instrument::VisitException(Instruction* instr) {
+ Update();
+ static Counter* counter = GetCounter("Other");
+ counter->Increment();
+}
+
+
+void Instrument::InstrumentLoadStorePair(Instruction* instr) {
+ static Counter* load_pair_counter = GetCounter("Load Pair");
+ static Counter* store_pair_counter = GetCounter("Store Pair");
+ if (instr->Mask(LoadStorePairLBit) != 0) {
+ load_pair_counter->Increment();
+ } else {
+ store_pair_counter->Increment();
+ }
+}
+
+
+void Instrument::VisitLoadStorePairPostIndex(Instruction* instr) {
+ Update();
+ InstrumentLoadStorePair(instr);
+}
+
+
+void Instrument::VisitLoadStorePairOffset(Instruction* instr) {
+ Update();
+ InstrumentLoadStorePair(instr);
+}
+
+
+void Instrument::VisitLoadStorePairPreIndex(Instruction* instr) {
+ Update();
+ InstrumentLoadStorePair(instr);
+}
+
+
+void Instrument::VisitLoadStorePairNonTemporal(Instruction* instr) {
+ Update();
+ InstrumentLoadStorePair(instr);
+}
+
+
+void Instrument::VisitLoadLiteral(Instruction* instr) {
+ Update();
+ static Counter* counter = GetCounter("Load Literal");
+ counter->Increment();
+}
+
+
+void Instrument::InstrumentLoadStore(Instruction* instr) {
+ static Counter* load_int_counter = GetCounter("Load Integer");
+ static Counter* store_int_counter = GetCounter("Store Integer");
+ static Counter* load_fp_counter = GetCounter("Load FP");
+ static Counter* store_fp_counter = GetCounter("Store FP");
+
+ switch (instr->Mask(LoadStoreOpMask)) {
+ case STRB_w: // Fall through.
+ case STRH_w: // Fall through.
+ case STR_w: // Fall through.
+ case STR_x: store_int_counter->Increment(); break;
+ case STR_s: // Fall through.
+ case STR_d: store_fp_counter->Increment(); break;
+ case LDRB_w: // Fall through.
+ case LDRH_w: // Fall through.
+ case LDR_w: // Fall through.
+ case LDR_x: // Fall through.
+ case LDRSB_x: // Fall through.
+ case LDRSH_x: // Fall through.
+ case LDRSW_x: // Fall through.
+ case LDRSB_w: // Fall through.
+ case LDRSH_w: load_int_counter->Increment(); break;
+ case LDR_s: // Fall through.
+ case LDR_d: load_fp_counter->Increment(); break;
+ default: UNREACHABLE();
+ }
+}
+
+
+void Instrument::VisitLoadStoreUnscaledOffset(Instruction* instr) {
+ Update();
+ InstrumentLoadStore(instr);
+}
+
+
+void Instrument::VisitLoadStorePostIndex(Instruction* instr) {
+ Update();
+ InstrumentLoadStore(instr);
+}
+
+
+void Instrument::VisitLoadStorePreIndex(Instruction* instr) {
+ Update();
+ InstrumentLoadStore(instr);
+}
+
+
+void Instrument::VisitLoadStoreRegisterOffset(Instruction* instr) {
+ Update();
+ InstrumentLoadStore(instr);
+}
+
+
+void Instrument::VisitLoadStoreUnsignedOffset(Instruction* instr) {
+ Update();
+ InstrumentLoadStore(instr);
+}
+
+
+void Instrument::VisitLogicalShifted(Instruction* instr) {
+ Update();
+ static Counter* counter = GetCounter("Logical DP");
+ counter->Increment();
+}
+
+
+void Instrument::VisitAddSubShifted(Instruction* instr) {
+ Update();
+ static Counter* counter = GetCounter("Add/Sub DP");
+ counter->Increment();
+}
+
+
+void Instrument::VisitAddSubExtended(Instruction* instr) {
+ Update();
+ static Counter* sp_counter = GetCounter("SP Adjust");
+ static Counter* add_sub_counter = GetCounter("Add/Sub DP");
+ if (((instr->Mask(AddSubOpMask) == SUB) ||
+ (instr->Mask(AddSubOpMask) == ADD)) &&
+ (instr->Rd() == 31) && (instr->Rn() == 31)) {
+ // Count adjustments to the C stack pointer caused by V8 needing two SPs.
+ sp_counter->Increment();
+ } else {
+ add_sub_counter->Increment();
+ }
+}
+
+
+void Instrument::VisitAddSubWithCarry(Instruction* instr) {
+ Update();
+ static Counter* counter = GetCounter("Add/Sub DP");
+ counter->Increment();
+}
+
+
+void Instrument::VisitConditionalCompareRegister(Instruction* instr) {
+ Update();
+ static Counter* counter = GetCounter("Conditional Compare");
+ counter->Increment();
+}
+
+
+void Instrument::VisitConditionalCompareImmediate(Instruction* instr) {
+ Update();
+ static Counter* counter = GetCounter("Conditional Compare");
+ counter->Increment();
+}
+
+
+void Instrument::VisitConditionalSelect(Instruction* instr) {
+ Update();
+ static Counter* counter = GetCounter("Conditional Select");
+ counter->Increment();
+}
+
+
+void Instrument::VisitDataProcessing1Source(Instruction* instr) {
+ Update();
+ static Counter* counter = GetCounter("Other Int DP");
+ counter->Increment();
+}
+
+
+void Instrument::VisitDataProcessing2Source(Instruction* instr) {
+ Update();
+ static Counter* counter = GetCounter("Other Int DP");
+ counter->Increment();
+}
+
+
+void Instrument::VisitDataProcessing3Source(Instruction* instr) {
+ Update();
+ static Counter* counter = GetCounter("Other Int DP");
+ counter->Increment();
+}
+
+
+void Instrument::VisitFPCompare(Instruction* instr) {
+ Update();
+ static Counter* counter = GetCounter("FP DP");
+ counter->Increment();
+}
+
+
+void Instrument::VisitFPConditionalCompare(Instruction* instr) {
+ Update();
+ static Counter* counter = GetCounter("Conditional Compare");
+ counter->Increment();
+}
+
+
+void Instrument::VisitFPConditionalSelect(Instruction* instr) {
+ Update();
+ static Counter* counter = GetCounter("Conditional Select");
+ counter->Increment();
+}
+
+
+void Instrument::VisitFPImmediate(Instruction* instr) {
+ Update();
+ static Counter* counter = GetCounter("FP DP");
+ counter->Increment();
+}
+
+
+void Instrument::VisitFPDataProcessing1Source(Instruction* instr) {
+ Update();
+ static Counter* counter = GetCounter("FP DP");
+ counter->Increment();
+}
+
+
+void Instrument::VisitFPDataProcessing2Source(Instruction* instr) {
+ Update();
+ static Counter* counter = GetCounter("FP DP");
+ counter->Increment();
+}
+
+
+void Instrument::VisitFPDataProcessing3Source(Instruction* instr) {
+ Update();
+ static Counter* counter = GetCounter("FP DP");
+ counter->Increment();
+}
+
+
+void Instrument::VisitFPIntegerConvert(Instruction* instr) {
+ Update();
+ static Counter* counter = GetCounter("FP DP");
+ counter->Increment();
+}
+
+
+void Instrument::VisitFPFixedPointConvert(Instruction* instr) {
+ Update();
+ static Counter* counter = GetCounter("FP DP");
+ counter->Increment();
+}
+
+
+void Instrument::VisitUnallocated(Instruction* instr) {
+ Update();
+ static Counter* counter = GetCounter("Other");
+ counter->Increment();
+}
+
+
+void Instrument::VisitUnimplemented(Instruction* instr) {
+ Update();
+ static Counter* counter = GetCounter("Other");
+ counter->Increment();
+}
+
+
+} } // namespace v8::internal
--- /dev/null
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_A64_INSTRUMENT_A64_H_
+#define V8_A64_INSTRUMENT_A64_H_
+
+#include "globals.h"
+#include "utils.h"
+#include "a64/decoder-a64.h"
+#include "a64/constants-a64.h"
+#include "a64/instrument-a64.h"
+
+namespace v8 {
+namespace internal {
+
+const int kCounterNameMaxLength = 256;
+const uint64_t kDefaultInstrumentationSamplingPeriod = 1 << 22;
+
+
+enum InstrumentState {
+ InstrumentStateDisable = 0,
+ InstrumentStateEnable = 1
+};
+
+
+enum CounterType {
+ Gauge = 0, // Gauge counters reset themselves after reading.
+ Cumulative = 1 // Cumulative counters keep their value after reading.
+};
+
+
+class Counter {
+ public:
+ Counter(const char* name, CounterType type = Gauge);
+
+ void Increment();
+ void Enable();
+ void Disable();
+ bool IsEnabled();
+ uint64_t count();
+ const char* name();
+ CounterType type();
+
+ private:
+ char name_[kCounterNameMaxLength];
+ uint64_t count_;
+ bool enabled_;
+ CounterType type_;
+};
+
+
+class Instrument: public DecoderVisitor {
+ public:
+ explicit Instrument(const char* datafile = NULL,
+ uint64_t sample_period = kDefaultInstrumentationSamplingPeriod);
+ ~Instrument();
+
+ // Declare all Visitor functions.
+ #define DECLARE(A) void Visit##A(Instruction* instr);
+ VISITOR_LIST(DECLARE)
+ #undef DECLARE
+
+ private:
+ void Update();
+ void Enable();
+ void Disable();
+ void DumpCounters();
+ void DumpCounterNames();
+ void DumpEventMarker(unsigned marker);
+ void HandleInstrumentationEvent(unsigned event);
+ Counter* GetCounter(const char* name);
+
+ void InstrumentLoadStore(Instruction* instr);
+ void InstrumentLoadStorePair(Instruction* instr);
+
+ std::list<Counter*> counters_;
+
+ FILE *output_stream_;
+ uint64_t sample_period_;
+};
+
+} } // namespace v8::internal
+
+#endif // V8_A64_INSTRUMENT_A64_H_
--- /dev/null
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "lithium-allocator-inl.h"
+#include "a64/lithium-a64.h"
+#include "a64/lithium-codegen-a64.h"
+#include "hydrogen-osr.h"
+
+namespace v8 {
+namespace internal {
+
+
+#define DEFINE_COMPILE(type) \
+ void L##type::CompileToNative(LCodeGen* generator) { \
+ generator->Do##type(this); \
+ }
+LITHIUM_CONCRETE_INSTRUCTION_LIST(DEFINE_COMPILE)
+#undef DEFINE_COMPILE
+
+#ifdef DEBUG
+void LInstruction::VerifyCall() {
+ // Call instructions can use only fixed registers as temporaries and
+ // outputs because all registers are blocked by the calling convention.
+ // Inputs operands must use a fixed register or use-at-start policy or
+ // a non-register policy.
+ ASSERT(Output() == NULL ||
+ LUnallocated::cast(Output())->HasFixedPolicy() ||
+ !LUnallocated::cast(Output())->HasRegisterPolicy());
+ for (UseIterator it(this); !it.Done(); it.Advance()) {
+ LUnallocated* operand = LUnallocated::cast(it.Current());
+ ASSERT(operand->HasFixedPolicy() ||
+ operand->IsUsedAtStart());
+ }
+ for (TempIterator it(this); !it.Done(); it.Advance()) {
+ LUnallocated* operand = LUnallocated::cast(it.Current());
+ ASSERT(operand->HasFixedPolicy() ||!operand->HasRegisterPolicy());
+ }
+}
+#endif
+
+
+void LLabel::PrintDataTo(StringStream* stream) {
+ LGap::PrintDataTo(stream);
+ LLabel* rep = replacement();
+ if (rep != NULL) {
+ stream->Add(" Dead block replaced with B%d", rep->block_id());
+ }
+}
+
+
+void LAccessArgumentsAt::PrintDataTo(StringStream* stream) {
+ arguments()->PrintTo(stream);
+ stream->Add(" length ");
+ length()->PrintTo(stream);
+ stream->Add(" index ");
+ index()->PrintTo(stream);
+}
+
+
+void LBranch::PrintDataTo(StringStream* stream) {
+ stream->Add("B%d | B%d on ", true_block_id(), false_block_id());
+ value()->PrintTo(stream);
+}
+
+
+void LCallJSFunction::PrintDataTo(StringStream* stream) {
+ stream->Add("= ");
+ function()->PrintTo(stream);
+ stream->Add("#%d / ", arity());
+}
+
+
+void LCallWithDescriptor::PrintDataTo(StringStream* stream) {
+ for (int i = 0; i < InputCount(); i++) {
+ InputAt(i)->PrintTo(stream);
+ stream->Add(" ");
+ }
+ stream->Add("#%d / ", arity());
+}
+
+
+void LCallNew::PrintDataTo(StringStream* stream) {
+ stream->Add("= ");
+ constructor()->PrintTo(stream);
+ stream->Add(" #%d / ", arity());
+}
+
+
+void LCallNewArray::PrintDataTo(StringStream* stream) {
+ stream->Add("= ");
+ constructor()->PrintTo(stream);
+ stream->Add(" #%d / ", arity());
+ ElementsKind kind = hydrogen()->elements_kind();
+ stream->Add(" (%s) ", ElementsKindToString(kind));
+}
+
+
+void LClassOfTestAndBranch::PrintDataTo(StringStream* stream) {
+ stream->Add("if class_of_test(");
+ value()->PrintTo(stream);
+ stream->Add(", \"%o\") then B%d else B%d",
+ *hydrogen()->class_name(),
+ true_block_id(),
+ false_block_id());
+}
+
+
+void LCompareNumericAndBranch::PrintDataTo(StringStream* stream) {
+ stream->Add("if ");
+ left()->PrintTo(stream);
+ stream->Add(" %s ", Token::String(op()));
+ right()->PrintTo(stream);
+ stream->Add(" then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LHasCachedArrayIndexAndBranch::PrintDataTo(StringStream* stream) {
+ stream->Add("if has_cached_array_index(");
+ value()->PrintTo(stream);
+ stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+bool LGoto::HasInterestingComment(LCodeGen* gen) const {
+ return !gen->IsNextEmittedBlock(block_id());
+}
+
+
+void LGoto::PrintDataTo(StringStream* stream) {
+ stream->Add("B%d", block_id());
+}
+
+
+void LInnerAllocatedObject::PrintDataTo(StringStream* stream) {
+ stream->Add(" = ");
+ base_object()->PrintTo(stream);
+ stream->Add(" + ");
+ offset()->PrintTo(stream);
+}
+
+
+void LInvokeFunction::PrintDataTo(StringStream* stream) {
+ stream->Add("= ");
+ function()->PrintTo(stream);
+ stream->Add(" #%d / ", arity());
+}
+
+
+void LInstruction::PrintTo(StringStream* stream) {
+ stream->Add("%s ", this->Mnemonic());
+
+ PrintOutputOperandTo(stream);
+
+ PrintDataTo(stream);
+
+ if (HasEnvironment()) {
+ stream->Add(" ");
+ environment()->PrintTo(stream);
+ }
+
+ if (HasPointerMap()) {
+ stream->Add(" ");
+ pointer_map()->PrintTo(stream);
+ }
+}
+
+
+void LInstruction::PrintDataTo(StringStream* stream) {
+ stream->Add("= ");
+ for (int i = 0; i < InputCount(); i++) {
+ if (i > 0) stream->Add(" ");
+ if (InputAt(i) == NULL) {
+ stream->Add("NULL");
+ } else {
+ InputAt(i)->PrintTo(stream);
+ }
+ }
+}
+
+
+void LInstruction::PrintOutputOperandTo(StringStream* stream) {
+ if (HasResult()) result()->PrintTo(stream);
+}
+
+
+void LHasInstanceTypeAndBranch::PrintDataTo(StringStream* stream) {
+ stream->Add("if has_instance_type(");
+ value()->PrintTo(stream);
+ stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LIsObjectAndBranch::PrintDataTo(StringStream* stream) {
+ stream->Add("if is_object(");
+ value()->PrintTo(stream);
+ stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LIsStringAndBranch::PrintDataTo(StringStream* stream) {
+ stream->Add("if is_string(");
+ value()->PrintTo(stream);
+ stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LIsSmiAndBranch::PrintDataTo(StringStream* stream) {
+ stream->Add("if is_smi(");
+ value()->PrintTo(stream);
+ stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LTypeofIsAndBranch::PrintDataTo(StringStream* stream) {
+ stream->Add("if typeof ");
+ value()->PrintTo(stream);
+ stream->Add(" == \"%s\" then B%d else B%d",
+ hydrogen()->type_literal()->ToCString().get(),
+ true_block_id(), false_block_id());
+}
+
+
+void LIsUndetectableAndBranch::PrintDataTo(StringStream* stream) {
+ stream->Add("if is_undetectable(");
+ value()->PrintTo(stream);
+ stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+bool LGap::IsRedundant() const {
+ for (int i = 0; i < 4; i++) {
+ if ((parallel_moves_[i] != NULL) && !parallel_moves_[i]->IsRedundant()) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+
+void LGap::PrintDataTo(StringStream* stream) {
+ for (int i = 0; i < 4; i++) {
+ stream->Add("(");
+ if (parallel_moves_[i] != NULL) {
+ parallel_moves_[i]->PrintDataTo(stream);
+ }
+ stream->Add(") ");
+ }
+}
+
+
+void LLoadContextSlot::PrintDataTo(StringStream* stream) {
+ context()->PrintTo(stream);
+ stream->Add("[%d]", slot_index());
+}
+
+
+void LStoreCodeEntry::PrintDataTo(StringStream* stream) {
+ stream->Add(" = ");
+ function()->PrintTo(stream);
+ stream->Add(".code_entry = ");
+ code_object()->PrintTo(stream);
+}
+
+
+void LStoreContextSlot::PrintDataTo(StringStream* stream) {
+ context()->PrintTo(stream);
+ stream->Add("[%d] <- ", slot_index());
+ value()->PrintTo(stream);
+}
+
+
+void LStoreKeyedGeneric::PrintDataTo(StringStream* stream) {
+ object()->PrintTo(stream);
+ stream->Add("[");
+ key()->PrintTo(stream);
+ stream->Add("] <- ");
+ value()->PrintTo(stream);
+}
+
+
+void LStoreNamedField::PrintDataTo(StringStream* stream) {
+ object()->PrintTo(stream);
+ hydrogen()->access().PrintTo(stream);
+ stream->Add(" <- ");
+ value()->PrintTo(stream);
+}
+
+
+void LStoreNamedGeneric::PrintDataTo(StringStream* stream) {
+ object()->PrintTo(stream);
+ stream->Add(".");
+ stream->Add(String::cast(*name())->ToCString().get());
+ stream->Add(" <- ");
+ value()->PrintTo(stream);
+}
+
+
+void LStringCompareAndBranch::PrintDataTo(StringStream* stream) {
+ stream->Add("if string_compare(");
+ left()->PrintTo(stream);
+ right()->PrintTo(stream);
+ stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LTransitionElementsKind::PrintDataTo(StringStream* stream) {
+ object()->PrintTo(stream);
+ stream->Add("%p -> %p", *original_map(), *transitioned_map());
+}
+
+
+template<int T>
+void LUnaryMathOperation<T>::PrintDataTo(StringStream* stream) {
+ value()->PrintTo(stream);
+}
+
+
+const char* LArithmeticD::Mnemonic() const {
+ switch (op()) {
+ case Token::ADD: return "add-d";
+ case Token::SUB: return "sub-d";
+ case Token::MUL: return "mul-d";
+ case Token::DIV: return "div-d";
+ case Token::MOD: return "mod-d";
+ default:
+ UNREACHABLE();
+ return NULL;
+ }
+}
+
+
+const char* LArithmeticT::Mnemonic() const {
+ switch (op()) {
+ case Token::ADD: return "add-t";
+ case Token::SUB: return "sub-t";
+ case Token::MUL: return "mul-t";
+ case Token::MOD: return "mod-t";
+ case Token::DIV: return "div-t";
+ case Token::BIT_AND: return "bit-and-t";
+ case Token::BIT_OR: return "bit-or-t";
+ case Token::BIT_XOR: return "bit-xor-t";
+ case Token::ROR: return "ror-t";
+ case Token::SHL: return "shl-t";
+ case Token::SAR: return "sar-t";
+ case Token::SHR: return "shr-t";
+ default:
+ UNREACHABLE();
+ return NULL;
+ }
+}
+
+
+void LChunkBuilder::Abort(BailoutReason reason) {
+ info()->set_bailout_reason(reason);
+ status_ = ABORTED;
+}
+
+
+LUnallocated* LChunkBuilder::ToUnallocated(Register reg) {
+ return new(zone()) LUnallocated(LUnallocated::FIXED_REGISTER,
+ Register::ToAllocationIndex(reg));
+}
+
+
+LUnallocated* LChunkBuilder::ToUnallocated(DoubleRegister reg) {
+ return new(zone()) LUnallocated(LUnallocated::FIXED_DOUBLE_REGISTER,
+ DoubleRegister::ToAllocationIndex(reg));
+}
+
+
+LOperand* LChunkBuilder::Use(HValue* value, LUnallocated* operand) {
+ if (value->EmitAtUses()) {
+ HInstruction* instr = HInstruction::cast(value);
+ VisitInstruction(instr);
+ }
+ operand->set_virtual_register(value->id());
+ return operand;
+}
+
+
+LOperand* LChunkBuilder::UseFixed(HValue* value, Register fixed_register) {
+ return Use(value, ToUnallocated(fixed_register));
+}
+
+
+LOperand* LChunkBuilder::UseFixedDouble(HValue* value,
+ DoubleRegister fixed_register) {
+ return Use(value, ToUnallocated(fixed_register));
+}
+
+
+LOperand* LChunkBuilder::UseRegister(HValue* value) {
+ return Use(value, new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
+}
+
+
+LOperand* LChunkBuilder::UseRegisterAndClobber(HValue* value) {
+ return Use(value, new(zone()) LUnallocated(LUnallocated::WRITABLE_REGISTER));
+}
+
+
+LOperand* LChunkBuilder::UseRegisterAtStart(HValue* value) {
+ return Use(value,
+ new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER,
+ LUnallocated::USED_AT_START));
+}
+
+
+LOperand* LChunkBuilder::UseRegisterOrConstant(HValue* value) {
+ return value->IsConstant() ? UseConstant(value) : UseRegister(value);
+}
+
+
+LOperand* LChunkBuilder::UseRegisterOrConstantAtStart(HValue* value) {
+ return value->IsConstant() ? UseConstant(value) : UseRegisterAtStart(value);
+}
+
+
+LConstantOperand* LChunkBuilder::UseConstant(HValue* value) {
+ return chunk_->DefineConstantOperand(HConstant::cast(value));
+}
+
+
+LOperand* LChunkBuilder::UseAny(HValue* value) {
+ return value->IsConstant()
+ ? UseConstant(value)
+ : Use(value, new(zone()) LUnallocated(LUnallocated::ANY));
+}
+
+
+LInstruction* LChunkBuilder::Define(LTemplateResultInstruction<1>* instr,
+ LUnallocated* result) {
+ result->set_virtual_register(current_instruction_->id());
+ instr->set_result(result);
+ return instr;
+}
+
+
+LInstruction* LChunkBuilder::DefineAsRegister(
+ LTemplateResultInstruction<1>* instr) {
+ return Define(instr,
+ new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
+}
+
+
+LInstruction* LChunkBuilder::DefineAsSpilled(
+ LTemplateResultInstruction<1>* instr, int index) {
+ return Define(instr,
+ new(zone()) LUnallocated(LUnallocated::FIXED_SLOT, index));
+}
+
+
+LInstruction* LChunkBuilder::DefineSameAsFirst(
+ LTemplateResultInstruction<1>* instr) {
+ return Define(instr,
+ new(zone()) LUnallocated(LUnallocated::SAME_AS_FIRST_INPUT));
+}
+
+
+LInstruction* LChunkBuilder::DefineFixed(
+ LTemplateResultInstruction<1>* instr, Register reg) {
+ return Define(instr, ToUnallocated(reg));
+}
+
+
+LInstruction* LChunkBuilder::DefineFixedDouble(
+ LTemplateResultInstruction<1>* instr, DoubleRegister reg) {
+ return Define(instr, ToUnallocated(reg));
+}
+
+
+LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
+ HInstruction* hinstr,
+ CanDeoptimize can_deoptimize) {
+ info()->MarkAsNonDeferredCalling();
+#ifdef DEBUG
+ instr->VerifyCall();
+#endif
+ instr->MarkAsCall();
+ instr = AssignPointerMap(instr);
+
+ if (hinstr->HasObservableSideEffects()) {
+ ASSERT(hinstr->next()->IsSimulate());
+ HSimulate* sim = HSimulate::cast(hinstr->next());
+ ASSERT(instruction_pending_deoptimization_environment_ == NULL);
+ ASSERT(pending_deoptimization_ast_id_.IsNone());
+ instruction_pending_deoptimization_environment_ = instr;
+ pending_deoptimization_ast_id_ = sim->ast_id();
+ }
+
+ // If instruction does not have side-effects lazy deoptimization
+ // after the call will try to deoptimize to the point before the call.
+ // Thus we still need to attach environment to this call even if
+ // call sequence can not deoptimize eagerly.
+ bool needs_environment =
+ (can_deoptimize == CAN_DEOPTIMIZE_EAGERLY) ||
+ !hinstr->HasObservableSideEffects();
+ if (needs_environment && !instr->HasEnvironment()) {
+ instr = AssignEnvironment(instr);
+ }
+
+ return instr;
+}
+
+
+LInstruction* LChunkBuilder::AssignPointerMap(LInstruction* instr) {
+ ASSERT(!instr->HasPointerMap());
+ instr->set_pointer_map(new(zone()) LPointerMap(zone()));
+ return instr;
+}
+
+
+LUnallocated* LChunkBuilder::TempRegister() {
+ LUnallocated* operand =
+ new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER);
+ int vreg = allocator_->GetVirtualRegister();
+ if (!allocator_->AllocationOk()) {
+ Abort(kOutOfVirtualRegistersWhileTryingToAllocateTempRegister);
+ vreg = 0;
+ }
+ operand->set_virtual_register(vreg);
+ return operand;
+}
+
+
+int LPlatformChunk::GetNextSpillIndex() {
+ return spill_slot_count_++;
+}
+
+
+LOperand* LPlatformChunk::GetNextSpillSlot(RegisterKind kind) {
+ int index = GetNextSpillIndex();
+ if (kind == DOUBLE_REGISTERS) {
+ return LDoubleStackSlot::Create(index, zone());
+ } else {
+ ASSERT(kind == GENERAL_REGISTERS);
+ return LStackSlot::Create(index, zone());
+ }
+}
+
+
+LOperand* LChunkBuilder::FixedTemp(DoubleRegister reg) {
+ LUnallocated* operand = ToUnallocated(reg);
+ ASSERT(operand->HasFixedPolicy());
+ return operand;
+}
+
+
+LPlatformChunk* LChunkBuilder::Build() {
+ ASSERT(is_unused());
+ chunk_ = new(zone()) LPlatformChunk(info_, graph_);
+ LPhase phase("L_Building chunk", chunk_);
+ status_ = BUILDING;
+
+ // If compiling for OSR, reserve space for the unoptimized frame,
+ // which will be subsumed into this frame.
+ if (graph()->has_osr()) {
+ // TODO(all): GetNextSpillIndex just increments a field. It has no other
+ // side effects, so we should get rid of this loop.
+ for (int i = graph()->osr()->UnoptimizedFrameSlots(); i > 0; i--) {
+ chunk_->GetNextSpillIndex();
+ }
+ }
+
+ const ZoneList<HBasicBlock*>* blocks = graph_->blocks();
+ for (int i = 0; i < blocks->length(); i++) {
+ DoBasicBlock(blocks->at(i));
+ if (is_aborted()) return NULL;
+ }
+ status_ = DONE;
+ return chunk_;
+}
+
+
+void LChunkBuilder::DoBasicBlock(HBasicBlock* block) {
+ ASSERT(is_building());
+ current_block_ = block;
+
+ if (block->IsStartBlock()) {
+ block->UpdateEnvironment(graph_->start_environment());
+ argument_count_ = 0;
+ } else if (block->predecessors()->length() == 1) {
+ // We have a single predecessor => copy environment and outgoing
+ // argument count from the predecessor.
+ ASSERT(block->phis()->length() == 0);
+ HBasicBlock* pred = block->predecessors()->at(0);
+ HEnvironment* last_environment = pred->last_environment();
+ ASSERT(last_environment != NULL);
+
+ // Only copy the environment, if it is later used again.
+ if (pred->end()->SecondSuccessor() == NULL) {
+ ASSERT(pred->end()->FirstSuccessor() == block);
+ } else {
+ if ((pred->end()->FirstSuccessor()->block_id() > block->block_id()) ||
+ (pred->end()->SecondSuccessor()->block_id() > block->block_id())) {
+ last_environment = last_environment->Copy();
+ }
+ }
+ block->UpdateEnvironment(last_environment);
+ ASSERT(pred->argument_count() >= 0);
+ argument_count_ = pred->argument_count();
+ } else {
+ // We are at a state join => process phis.
+ HBasicBlock* pred = block->predecessors()->at(0);
+ // No need to copy the environment, it cannot be used later.
+ HEnvironment* last_environment = pred->last_environment();
+ for (int i = 0; i < block->phis()->length(); ++i) {
+ HPhi* phi = block->phis()->at(i);
+ if (phi->HasMergedIndex()) {
+ last_environment->SetValueAt(phi->merged_index(), phi);
+ }
+ }
+ for (int i = 0; i < block->deleted_phis()->length(); ++i) {
+ if (block->deleted_phis()->at(i) < last_environment->length()) {
+ last_environment->SetValueAt(block->deleted_phis()->at(i),
+ graph_->GetConstantUndefined());
+ }
+ }
+ block->UpdateEnvironment(last_environment);
+ // Pick up the outgoing argument count of one of the predecessors.
+ argument_count_ = pred->argument_count();
+ }
+
+ // Translate hydrogen instructions to lithium ones for the current block.
+ HInstruction* current = block->first();
+ int start = chunk_->instructions()->length();
+ while ((current != NULL) && !is_aborted()) {
+ // Code for constants in registers is generated lazily.
+ if (!current->EmitAtUses()) {
+ VisitInstruction(current);
+ }
+ current = current->next();
+ }
+ int end = chunk_->instructions()->length() - 1;
+ if (end >= start) {
+ block->set_first_instruction_index(start);
+ block->set_last_instruction_index(end);
+ }
+ block->set_argument_count(argument_count_);
+ current_block_ = NULL;
+}
+
+
+void LChunkBuilder::VisitInstruction(HInstruction* current) {
+ HInstruction* old_current = current_instruction_;
+ current_instruction_ = current;
+ if (current->has_position()) position_ = current->position();
+
+ LInstruction* instr = NULL;
+ if (current->CanReplaceWithDummyUses()) {
+ if (current->OperandCount() == 0) {
+ instr = DefineAsRegister(new(zone()) LDummy());
+ } else {
+ ASSERT(!current->OperandAt(0)->IsControlInstruction());
+ instr = DefineAsRegister(new(zone())
+ LDummyUse(UseAny(current->OperandAt(0))));
+ }
+ for (int i = 1; i < current->OperandCount(); ++i) {
+ if (current->OperandAt(i)->IsControlInstruction()) continue;
+ LInstruction* dummy =
+ new(zone()) LDummyUse(UseAny(current->OperandAt(i)));
+ dummy->set_hydrogen_value(current);
+ chunk_->AddInstruction(dummy, current_block_);
+ }
+ } else {
+ instr = current->CompileToLithium(this);
+ }
+
+ argument_count_ += current->argument_delta();
+ ASSERT(argument_count_ >= 0);
+
+ if (instr != NULL) {
+ // Associate the hydrogen instruction first, since we may need it for
+ // the ClobbersRegisters() or ClobbersDoubleRegisters() calls below.
+ instr->set_hydrogen_value(current);
+
+#if DEBUG
+ // Make sure that the lithium instruction has either no fixed register
+ // constraints in temps or the result OR no uses that are only used at
+ // start. If this invariant doesn't hold, the register allocator can decide
+ // to insert a split of a range immediately before the instruction due to an
+ // already allocated register needing to be used for the instruction's fixed
+ // register constraint. In this case, the register allocator won't see an
+ // interference between the split child and the use-at-start (it would if
+ // the it was just a plain use), so it is free to move the split child into
+ // the same register that is used for the use-at-start.
+ // See https://code.google.com/p/chromium/issues/detail?id=201590
+ if (!(instr->ClobbersRegisters() && instr->ClobbersDoubleRegisters())) {
+ int fixed = 0;
+ int used_at_start = 0;
+ for (UseIterator it(instr); !it.Done(); it.Advance()) {
+ LUnallocated* operand = LUnallocated::cast(it.Current());
+ if (operand->IsUsedAtStart()) ++used_at_start;
+ }
+ if (instr->Output() != NULL) {
+ if (LUnallocated::cast(instr->Output())->HasFixedPolicy()) ++fixed;
+ }
+ for (TempIterator it(instr); !it.Done(); it.Advance()) {
+ LUnallocated* operand = LUnallocated::cast(it.Current());
+ if (operand->HasFixedPolicy()) ++fixed;
+ }
+ ASSERT(fixed == 0 || used_at_start == 0);
+ }
+#endif
+
+ if (FLAG_stress_pointer_maps && !instr->HasPointerMap()) {
+ instr = AssignPointerMap(instr);
+ }
+ if (FLAG_stress_environments && !instr->HasEnvironment()) {
+ instr = AssignEnvironment(instr);
+ }
+ chunk_->AddInstruction(instr, current_block_);
+ }
+ current_instruction_ = old_current;
+}
+
+
+LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
+ HEnvironment* hydrogen_env = current_block_->last_environment();
+ int argument_index_accumulator = 0;
+ ZoneList<HValue*> objects_to_materialize(0, zone());
+ instr->set_environment(CreateEnvironment(hydrogen_env,
+ &argument_index_accumulator,
+ &objects_to_materialize));
+ return instr;
+}
+
+
+LInstruction* LChunkBuilder::DoAbnormalExit(HAbnormalExit* instr) {
+ // The control instruction marking the end of a block that completed
+ // abruptly (e.g., threw an exception). There is nothing specific to do.
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoArithmeticD(Token::Value op,
+ HArithmeticBinaryOperation* instr) {
+ ASSERT(instr->representation().IsDouble());
+ ASSERT(instr->left()->representation().IsDouble());
+ ASSERT(instr->right()->representation().IsDouble());
+
+ if (op == Token::MOD) {
+ LOperand* left = UseFixedDouble(instr->left(), d0);
+ LOperand* right = UseFixedDouble(instr->right(), d1);
+ LArithmeticD* result = new(zone()) LArithmeticD(Token::MOD, left, right);
+ return MarkAsCall(DefineFixedDouble(result, d0), instr);
+ } else {
+ LOperand* left = UseRegisterAtStart(instr->left());
+ LOperand* right = UseRegisterAtStart(instr->right());
+ LArithmeticD* result = new(zone()) LArithmeticD(op, left, right);
+ return DefineAsRegister(result);
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoArithmeticT(Token::Value op,
+ HBinaryOperation* instr) {
+ ASSERT((op == Token::ADD) || (op == Token::SUB) || (op == Token::MUL) ||
+ (op == Token::DIV) || (op == Token::MOD) || (op == Token::SHR) ||
+ (op == Token::SHL) || (op == Token::SAR) || (op == Token::ROR) ||
+ (op == Token::BIT_OR) || (op == Token::BIT_AND) ||
+ (op == Token::BIT_XOR));
+ HValue* left = instr->left();
+ HValue* right = instr->right();
+
+ // TODO(jbramley): Once we've implemented smi support for all arithmetic
+ // operations, these assertions should check IsTagged().
+ ASSERT(instr->representation().IsSmiOrTagged());
+ ASSERT(left->representation().IsSmiOrTagged());
+ ASSERT(right->representation().IsSmiOrTagged());
+
+ LOperand* context = UseFixed(instr->context(), cp);
+ LOperand* left_operand = UseFixed(left, x1);
+ LOperand* right_operand = UseFixed(right, x0);
+ LArithmeticT* result =
+ new(zone()) LArithmeticT(op, context, left_operand, right_operand);
+ return MarkAsCall(DefineFixed(result, x0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoBoundsCheckBaseIndexInformation(
+ HBoundsCheckBaseIndexInformation* instr) {
+ UNREACHABLE();
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoAccessArgumentsAt(HAccessArgumentsAt* instr) {
+ // TODO(all): Try to improve this, like ARM r17925.
+ info()->MarkAsRequiresFrame();
+ LOperand* args = NULL;
+ LOperand* length = NULL;
+ LOperand* index = NULL;
+ LOperand* temp = NULL;
+
+ if (instr->length()->IsConstant() && instr->index()->IsConstant()) {
+ args = UseRegisterAtStart(instr->arguments());
+ length = UseConstant(instr->length());
+ index = UseConstant(instr->index());
+ } else {
+ args = UseRegister(instr->arguments());
+ length = UseRegisterAtStart(instr->length());
+ index = UseRegisterOrConstantAtStart(instr->index());
+ temp = TempRegister();
+ }
+
+ return DefineAsRegister(
+ new(zone()) LAccessArgumentsAt(args, length, index, temp));
+}
+
+
+LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
+ if (instr->representation().IsSmiOrInteger32()) {
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
+ LOperand* right =
+ UseRegisterOrConstantAtStart(instr->BetterRightOperand());
+ LInstruction* result = instr->representation().IsSmi() ?
+ DefineAsRegister(new(zone()) LAddS(left, right)) :
+ DefineAsRegister(new(zone()) LAddI(left, right));
+ if (instr->CheckFlag(HValue::kCanOverflow)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+ } else if (instr->representation().IsExternal()) {
+ ASSERT(instr->left()->representation().IsExternal());
+ ASSERT(instr->right()->representation().IsInteger32());
+ ASSERT(!instr->CheckFlag(HValue::kCanOverflow));
+ LOperand* left = UseRegisterAtStart(instr->left());
+ LOperand* right = UseRegisterOrConstantAtStart(instr->right());
+ return DefineAsRegister(new(zone()) LAddE(left, right));
+ } else if (instr->representation().IsDouble()) {
+ return DoArithmeticD(Token::ADD, instr);
+ } else {
+ ASSERT(instr->representation().IsTagged());
+ return DoArithmeticT(Token::ADD, instr);
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) {
+ info()->MarkAsDeferredCalling();
+ LOperand* context = UseAny(instr->context());
+ LOperand* size = UseRegisterOrConstant(instr->size());
+ LOperand* temp1 = TempRegister();
+ LOperand* temp2 = TempRegister();
+ LAllocate* result = new(zone()) LAllocate(context, size, temp1, temp2);
+ return AssignPointerMap(DefineAsRegister(result));
+}
+
+
+LInstruction* LChunkBuilder::DoApplyArguments(HApplyArguments* instr) {
+ LOperand* function = UseFixed(instr->function(), x1);
+ LOperand* receiver = UseFixed(instr->receiver(), x0);
+ LOperand* length = UseFixed(instr->length(), x2);
+ LOperand* elements = UseFixed(instr->elements(), x3);
+ LApplyArguments* result = new(zone()) LApplyArguments(function,
+ receiver,
+ length,
+ elements);
+ return MarkAsCall(DefineFixed(result, x0), instr, CAN_DEOPTIMIZE_EAGERLY);
+}
+
+
+LInstruction* LChunkBuilder::DoArgumentsElements(HArgumentsElements* instr) {
+ info()->MarkAsRequiresFrame();
+ LOperand* temp = instr->from_inlined() ? NULL : TempRegister();
+ return DefineAsRegister(new(zone()) LArgumentsElements(temp));
+}
+
+
+LInstruction* LChunkBuilder::DoArgumentsLength(HArgumentsLength* instr) {
+ info()->MarkAsRequiresFrame();
+ LOperand* value = UseRegisterAtStart(instr->value());
+ return DefineAsRegister(new(zone()) LArgumentsLength(value));
+}
+
+
+LInstruction* LChunkBuilder::DoArgumentsObject(HArgumentsObject* instr) {
+ // There are no real uses of the arguments object.
+ // arguments.length and element access are supported directly on
+ // stack arguments, and any real arguments object use causes a bailout.
+ // So this value is never used.
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) {
+ if (instr->representation().IsSmiOrInteger32()) {
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ ASSERT(instr->CheckFlag(HValue::kTruncatingToInt32));
+
+ LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
+ LOperand* right =
+ UseRegisterOrConstantAtStart(instr->BetterRightOperand());
+ return instr->representation().IsSmi() ?
+ DefineAsRegister(new(zone()) LBitS(left, right)) :
+ DefineAsRegister(new(zone()) LBitI(left, right));
+ } else {
+ return DoArithmeticT(instr->op(), instr);
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoBlockEntry(HBlockEntry* instr) {
+ // V8 expects a label to be generated for each basic block.
+ // This is used in some places like LAllocator::IsBlockBoundary
+ // in lithium-allocator.cc
+ return new(zone()) LLabel(instr->block());
+}
+
+
+LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) {
+ LOperand* value = UseRegisterOrConstantAtStart(instr->index());
+ LOperand* length = UseRegister(instr->length());
+ return AssignEnvironment(new(zone()) LBoundsCheck(value, length));
+}
+
+
+LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
+ LInstruction* goto_instr = CheckElideControlInstruction(instr);
+ if (goto_instr != NULL) return goto_instr;
+
+ HValue* value = instr->value();
+ Representation r = value->representation();
+ HType type = value->type();
+
+ if (r.IsInteger32() || r.IsSmi() || r.IsDouble()) {
+ // These representations have simple checks that cannot deoptimize.
+ return new(zone()) LBranch(UseRegister(value), NULL, NULL);
+ } else {
+ ASSERT(r.IsTagged());
+ if (type.IsBoolean() || type.IsSmi() || type.IsJSArray() ||
+ type.IsHeapNumber()) {
+ // These types have simple checks that cannot deoptimize.
+ return new(zone()) LBranch(UseRegister(value), NULL, NULL);
+ }
+
+ if (type.IsString()) {
+ // This type cannot deoptimize, but needs a scratch register.
+ return new(zone()) LBranch(UseRegister(value), TempRegister(), NULL);
+ }
+
+ ToBooleanStub::Types expected = instr->expected_input_types();
+ bool needs_temps = expected.NeedsMap() || expected.IsEmpty();
+ LOperand* temp1 = needs_temps ? TempRegister() : NULL;
+ LOperand* temp2 = needs_temps ? TempRegister() : NULL;
+
+ if (expected.IsGeneric() || expected.IsEmpty()) {
+ // The generic case cannot deoptimize because it already supports every
+ // possible input type.
+ ASSERT(needs_temps);
+ return new(zone()) LBranch(UseRegister(value), temp1, temp2);
+ } else {
+ return AssignEnvironment(
+ new(zone()) LBranch(UseRegister(value), temp1, temp2));
+ }
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoCallJSFunction(
+ HCallJSFunction* instr) {
+ LOperand* function = UseFixed(instr->function(), x1);
+
+ LCallJSFunction* result = new(zone()) LCallJSFunction(function);
+
+ return MarkAsCall(DefineFixed(result, x0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCallWithDescriptor(
+ HCallWithDescriptor* instr) {
+ const CallInterfaceDescriptor* descriptor = instr->descriptor();
+
+ LOperand* target = UseRegisterOrConstantAtStart(instr->target());
+ ZoneList<LOperand*> ops(instr->OperandCount(), zone());
+ ops.Add(target, zone());
+ for (int i = 1; i < instr->OperandCount(); i++) {
+ LOperand* op = UseFixed(instr->OperandAt(i),
+ descriptor->GetParameterRegister(i - 1));
+ ops.Add(op, zone());
+ }
+
+ LCallWithDescriptor* result = new(zone()) LCallWithDescriptor(descriptor,
+ ops,
+ zone());
+ return MarkAsCall(DefineFixed(result, x0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCallFunction(HCallFunction* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ LOperand* function = UseFixed(instr->function(), x1);
+ LCallFunction* call = new(zone()) LCallFunction(context, function);
+ return MarkAsCall(DefineFixed(call, x0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCallNew(HCallNew* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ // The call to CallConstructStub will expect the constructor to be in x1.
+ LOperand* constructor = UseFixed(instr->constructor(), x1);
+ LCallNew* result = new(zone()) LCallNew(context, constructor);
+ return MarkAsCall(DefineFixed(result, x0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCallNewArray(HCallNewArray* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ // The call to ArrayConstructCode will expect the constructor to be in x1.
+ LOperand* constructor = UseFixed(instr->constructor(), x1);
+ LCallNewArray* result = new(zone()) LCallNewArray(context, constructor);
+ return MarkAsCall(DefineFixed(result, x0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCallRuntime(HCallRuntime* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ return MarkAsCall(DefineFixed(new(zone()) LCallRuntime(context), x0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCallStub(HCallStub* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ return MarkAsCall(DefineFixed(new(zone()) LCallStub(context), x0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCapturedObject(HCapturedObject* instr) {
+ instr->ReplayEnvironment(current_block_->last_environment());
+
+ // There are no real uses of a captured object.
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoChange(HChange* instr) {
+ Representation from = instr->from();
+ Representation to = instr->to();
+
+ if (from.IsSmi()) {
+ if (to.IsTagged()) {
+ LOperand* value = UseRegister(instr->value());
+ return DefineSameAsFirst(new(zone()) LDummyUse(value));
+ }
+ from = Representation::Tagged();
+ }
+
+ if (from.IsTagged()) {
+ if (to.IsDouble()) {
+ LOperand* value = UseRegister(instr->value());
+ LOperand* temp = TempRegister();
+ LNumberUntagD* res = new(zone()) LNumberUntagD(value, temp);
+ return AssignEnvironment(DefineAsRegister(res));
+ } else if (to.IsSmi()) {
+ LOperand* value = UseRegister(instr->value());
+ if (instr->value()->type().IsSmi()) {
+ return DefineSameAsFirst(new(zone()) LDummyUse(value));
+ }
+ return AssignEnvironment(DefineSameAsFirst(new(zone()) LCheckSmi(value)));
+ } else {
+ ASSERT(to.IsInteger32());
+ LInstruction* res = NULL;
+
+ if (instr->value()->type().IsSmi() ||
+ instr->value()->representation().IsSmi()) {
+ LOperand* value = UseRegisterAtStart(instr->value());
+ res = DefineAsRegister(new(zone()) LSmiUntag(value, false));
+ } else {
+ LOperand* value = UseRegister(instr->value());
+ LOperand* temp1 = TempRegister();
+ LOperand* temp2 =
+ instr->CanTruncateToInt32() ? TempRegister() : FixedTemp(d24);
+ res = DefineAsRegister(new(zone()) LTaggedToI(value, temp1, temp2));
+ res = AssignEnvironment(res);
+ }
+
+ return res;
+ }
+ } else if (from.IsDouble()) {
+ if (to.IsTagged()) {
+ info()->MarkAsDeferredCalling();
+ LOperand* value = UseRegister(instr->value());
+ LOperand* temp1 = TempRegister();
+ LOperand* temp2 = TempRegister();
+
+ LNumberTagD* result = new(zone()) LNumberTagD(value, temp1, temp2);
+ return AssignPointerMap(DefineAsRegister(result));
+ } else {
+ ASSERT(to.IsSmi() || to.IsInteger32());
+ LOperand* value = UseRegister(instr->value());
+
+ if (instr->CanTruncateToInt32()) {
+ LOperand* temp1 = TempRegister();
+ LOperand* temp2 = TempRegister();
+ LTruncateDoubleToIntOrSmi* result =
+ new(zone()) LTruncateDoubleToIntOrSmi(value, temp1, temp2);
+ return DefineAsRegister(result);
+ } else {
+ LDoubleToIntOrSmi* result = new(zone()) LDoubleToIntOrSmi(value);
+ return AssignEnvironment(DefineAsRegister(result));
+ }
+ }
+ } else if (from.IsInteger32()) {
+ info()->MarkAsDeferredCalling();
+ if (to.IsTagged()) {
+ if (instr->value()->CheckFlag(HInstruction::kUint32)) {
+ LOperand* value = UseRegister(instr->value());
+ LNumberTagU* result = new(zone()) LNumberTagU(value,
+ TempRegister(),
+ TempRegister());
+ return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
+ } else {
+ STATIC_ASSERT((kMinInt == Smi::kMinValue) &&
+ (kMaxInt == Smi::kMaxValue));
+ LOperand* value = UseRegisterAtStart(instr->value());
+ return DefineAsRegister(new(zone()) LSmiTag(value));
+ }
+ } else if (to.IsSmi()) {
+ LOperand* value = UseRegisterAtStart(instr->value());
+ if (instr->value()->CheckFlag(HInstruction::kUint32)) {
+ LUint32ToSmi* result = new(zone()) LUint32ToSmi(value);
+ return AssignEnvironment(DefineAsRegister(result));
+ } else {
+ // This cannot deoptimize because an A64 smi can represent any int32.
+ return DefineAsRegister(new(zone()) LInteger32ToSmi(value));
+ }
+ } else {
+ ASSERT(to.IsDouble());
+ if (instr->value()->CheckFlag(HInstruction::kUint32)) {
+ return DefineAsRegister(
+ new(zone()) LUint32ToDouble(UseRegisterAtStart(instr->value())));
+ } else {
+ return DefineAsRegister(
+ new(zone()) LInteger32ToDouble(UseRegisterAtStart(instr->value())));
+ }
+ }
+ }
+
+ UNREACHABLE();
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoCheckValue(HCheckValue* instr) {
+ // We only need a temp register if the target is in new space, but we can't
+ // dereference the handle to test that here.
+ // TODO(all): Check these constraints. The temp register is not always used.
+ LOperand* value = UseRegister(instr->value());
+ LOperand* temp = TempRegister();
+ return AssignEnvironment(new(zone()) LCheckValue(value, temp));
+}
+
+
+LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) {
+ LOperand* value = UseRegisterAtStart(instr->value());
+ LOperand* temp = TempRegister();
+ LInstruction* result = new(zone()) LCheckInstanceType(value, temp);
+ return AssignEnvironment(result);
+}
+
+
+LInstruction* LChunkBuilder::DoCheckMaps(HCheckMaps* instr) {
+ if (instr->CanOmitMapChecks()) {
+ // LCheckMaps does nothing in this case.
+ return new(zone()) LCheckMaps(NULL);
+ } else {
+ LOperand* value = UseRegisterAtStart(instr->value());
+ LOperand* temp = TempRegister();
+
+ if (instr->has_migration_target()) {
+ info()->MarkAsDeferredCalling();
+ LInstruction* result = new(zone()) LCheckMaps(value, temp);
+ return AssignPointerMap(AssignEnvironment(result));
+ } else {
+ return AssignEnvironment(new(zone()) LCheckMaps(value, temp));
+ }
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoCheckHeapObject(HCheckHeapObject* instr) {
+ LOperand* value = UseRegisterAtStart(instr->value());
+ return AssignEnvironment(new(zone()) LCheckNonSmi(value));
+}
+
+
+LInstruction* LChunkBuilder::DoCheckSmi(HCheckSmi* instr) {
+ LOperand* value = UseRegisterAtStart(instr->value());
+ return AssignEnvironment(new(zone()) LCheckSmi(value));
+}
+
+
+LInstruction* LChunkBuilder::DoClampToUint8(HClampToUint8* instr) {
+ HValue* value = instr->value();
+ Representation input_rep = value->representation();
+ LOperand* reg = UseRegister(value);
+ if (input_rep.IsDouble()) {
+ return DefineAsRegister(new(zone()) LClampDToUint8(reg));
+ } else if (input_rep.IsInteger32()) {
+ return DefineAsRegister(new(zone()) LClampIToUint8(reg));
+ } else {
+ ASSERT(input_rep.IsSmiOrTagged());
+ return AssignEnvironment(
+ DefineAsRegister(new(zone()) LClampTToUint8(reg,
+ TempRegister(),
+ FixedTemp(d24))));
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoClassOfTestAndBranch(
+ HClassOfTestAndBranch* instr) {
+ ASSERT(instr->value()->representation().IsTagged());
+ LOperand* value = UseRegisterAtStart(instr->value());
+ return new(zone()) LClassOfTestAndBranch(value,
+ TempRegister(),
+ TempRegister());
+}
+
+
+LInstruction* LChunkBuilder::DoCompareNumericAndBranch(
+ HCompareNumericAndBranch* instr) {
+ Representation r = instr->representation();
+
+ // TODO(all): This instruction has been replaced by HCompareNumericAndBranch
+ // on bleeding_edge. We should update when we'll do the rebase.
+ if (r.IsSmiOrInteger32()) {
+ ASSERT(instr->left()->representation().Equals(r));
+ ASSERT(instr->right()->representation().Equals(r));
+ LOperand* left = UseRegisterOrConstantAtStart(instr->left());
+ LOperand* right = UseRegisterOrConstantAtStart(instr->right());
+ return new(zone()) LCompareNumericAndBranch(left, right);
+ } else {
+ ASSERT(r.IsDouble());
+ ASSERT(instr->left()->representation().IsDouble());
+ ASSERT(instr->right()->representation().IsDouble());
+ // TODO(all): In fact the only case that we can handle more efficiently is
+ // when one of the operand is the constant 0. Currently the MacroAssembler
+ // will be able to cope with any constant by loading it into an internal
+ // scratch register. This means that if the constant is used more that once,
+ // it will be loaded multiple times. Unfortunatly crankshaft already
+ // duplicates constant loads, but we should modify the code below once this
+ // issue has been addressed in crankshaft.
+ LOperand* left = UseRegisterOrConstantAtStart(instr->left());
+ LOperand* right = UseRegisterOrConstantAtStart(instr->right());
+ return new(zone()) LCompareNumericAndBranch(left, right);
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) {
+ ASSERT(instr->left()->representation().IsTagged());
+ ASSERT(instr->right()->representation().IsTagged());
+ LOperand* context = UseFixed(instr->context(), cp);
+ LOperand* left = UseFixed(instr->left(), x1);
+ LOperand* right = UseFixed(instr->right(), x0);
+ LCmpT* result = new(zone()) LCmpT(context, left, right);
+ return MarkAsCall(DefineFixed(result, x0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCompareHoleAndBranch(
+ HCompareHoleAndBranch* instr) {
+ LOperand* value = UseRegister(instr->value());
+ if (instr->representation().IsTagged()) {
+ return new(zone()) LCmpHoleAndBranchT(value);
+ } else {
+ LOperand* temp = TempRegister();
+ return new(zone()) LCmpHoleAndBranchD(value, temp);
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoCompareObjectEqAndBranch(
+ HCompareObjectEqAndBranch* instr) {
+ LInstruction* goto_instr = CheckElideControlInstruction(instr);
+ if (goto_instr != NULL) return goto_instr;
+
+ LOperand* left = UseRegisterAtStart(instr->left());
+ LOperand* right = UseRegisterAtStart(instr->right());
+ return new(zone()) LCmpObjectEqAndBranch(left, right);
+}
+
+
+LInstruction* LChunkBuilder::DoCompareMap(HCompareMap* instr) {
+ LInstruction* goto_instr = CheckElideControlInstruction(instr);
+ if (goto_instr != NULL) return goto_instr;
+
+ ASSERT(instr->value()->representation().IsTagged());
+ LOperand* value = UseRegisterAtStart(instr->value());
+ LOperand* temp = TempRegister();
+ return new(zone()) LCmpMapAndBranch(value, temp);
+}
+
+
+LInstruction* LChunkBuilder::DoConstant(HConstant* instr) {
+ Representation r = instr->representation();
+ if (r.IsSmi()) {
+ return DefineAsRegister(new(zone()) LConstantS);
+ } else if (r.IsInteger32()) {
+ return DefineAsRegister(new(zone()) LConstantI);
+ } else if (r.IsDouble()) {
+ return DefineAsRegister(new(zone()) LConstantD);
+ } else if (r.IsExternal()) {
+ return DefineAsRegister(new(zone()) LConstantE);
+ } else if (r.IsTagged()) {
+ return DefineAsRegister(new(zone()) LConstantT);
+ } else {
+ UNREACHABLE();
+ return NULL;
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoContext(HContext* instr) {
+ if (instr->HasNoUses()) return NULL;
+
+ if (info()->IsStub()) {
+ return DefineFixed(new(zone()) LContext, cp);
+ }
+
+ return DefineAsRegister(new(zone()) LContext);
+}
+
+
+LInstruction* LChunkBuilder::DoDateField(HDateField* instr) {
+ LOperand* object = UseFixed(instr->value(), x0);
+ LDateField* result = new(zone()) LDateField(object, instr->index());
+ return MarkAsCall(DefineFixed(result, x0), instr, CAN_DEOPTIMIZE_EAGERLY);
+}
+
+
+LInstruction* LChunkBuilder::DoDebugBreak(HDebugBreak* instr) {
+ return new(zone()) LDebugBreak();
+}
+
+
+LInstruction* LChunkBuilder::DoDeclareGlobals(HDeclareGlobals* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ return MarkAsCall(new(zone()) LDeclareGlobals(context), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoDeoptimize(HDeoptimize* instr) {
+ return AssignEnvironment(new(zone()) LDeoptimize);
+}
+
+
+LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
+ if (instr->representation().IsInteger32()) {
+ // TODO(all): Update this case to support smi inputs.
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ if (instr->RightIsPowerOf2()) {
+ ASSERT(!instr->CheckFlag(HValue::kCanBeDivByZero));
+ LOperand* value = UseRegisterAtStart(instr->left());
+ LDivI* div = new(zone()) LDivI(value, UseConstant(instr->right()), NULL);
+ return AssignEnvironment(DefineAsRegister(div));
+ }
+ LOperand* dividend = UseRegister(instr->left());
+ LOperand* divisor = UseRegister(instr->right());
+ LOperand* temp = instr->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)
+ ? NULL : TempRegister();
+ LDivI* div = new(zone()) LDivI(dividend, divisor, temp);
+ return AssignEnvironment(DefineAsRegister(div));
+ } else if (instr->representation().IsDouble()) {
+ return DoArithmeticD(Token::DIV, instr);
+ } else {
+ return DoArithmeticT(Token::DIV, instr);
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoDummyUse(HDummyUse* instr) {
+ return DefineAsRegister(new(zone()) LDummyUse(UseAny(instr->value())));
+}
+
+
+LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
+ HEnvironment* outer = current_block_->last_environment();
+ HConstant* undefined = graph()->GetConstantUndefined();
+ HEnvironment* inner = outer->CopyForInlining(instr->closure(),
+ instr->arguments_count(),
+ instr->function(),
+ undefined,
+ instr->inlining_kind());
+ // Only replay binding of arguments object if it wasn't removed from graph.
+ if ((instr->arguments_var() != NULL) &&
+ instr->arguments_object()->IsLinked()) {
+ inner->Bind(instr->arguments_var(), instr->arguments_object());
+ }
+ inner->set_entry(instr);
+ current_block_->UpdateEnvironment(inner);
+ chunk_->AddInlinedClosure(instr->closure());
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoEnvironmentMarker(HEnvironmentMarker* instr) {
+ UNREACHABLE();
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoForceRepresentation(
+ HForceRepresentation* instr) {
+ // All HForceRepresentation instructions should be eliminated in the
+ // representation change phase of Hydrogen.
+ UNREACHABLE();
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoFunctionLiteral(HFunctionLiteral* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ return MarkAsCall(
+ DefineFixed(new(zone()) LFunctionLiteral(context), x0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoGetCachedArrayIndex(
+ HGetCachedArrayIndex* instr) {
+ ASSERT(instr->value()->representation().IsTagged());
+ LOperand* value = UseRegisterAtStart(instr->value());
+ return DefineAsRegister(new(zone()) LGetCachedArrayIndex(value));
+}
+
+
+LInstruction* LChunkBuilder::DoGoto(HGoto* instr) {
+ return new(zone()) LGoto(instr->FirstSuccessor());
+}
+
+
+LInstruction* LChunkBuilder::DoHasCachedArrayIndexAndBranch(
+ HHasCachedArrayIndexAndBranch* instr) {
+ ASSERT(instr->value()->representation().IsTagged());
+ return new(zone()) LHasCachedArrayIndexAndBranch(
+ UseRegisterAtStart(instr->value()), TempRegister());
+}
+
+
+LInstruction* LChunkBuilder::DoHasInstanceTypeAndBranch(
+ HHasInstanceTypeAndBranch* instr) {
+ ASSERT(instr->value()->representation().IsTagged());
+ LOperand* value = UseRegisterAtStart(instr->value());
+ return new(zone()) LHasInstanceTypeAndBranch(value, TempRegister());
+}
+
+
+LInstruction* LChunkBuilder::DoInnerAllocatedObject(
+ HInnerAllocatedObject* instr) {
+ LOperand* base_object = UseRegisterAtStart(instr->base_object());
+ LOperand* offset = UseRegisterOrConstantAtStart(instr->offset());
+ return DefineAsRegister(
+ new(zone()) LInnerAllocatedObject(base_object, offset));
+}
+
+
+LInstruction* LChunkBuilder::DoInstanceOf(HInstanceOf* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ LInstanceOf* result = new(zone()) LInstanceOf(
+ context,
+ UseFixed(instr->left(), InstanceofStub::left()),
+ UseFixed(instr->right(), InstanceofStub::right()));
+ return MarkAsCall(DefineFixed(result, x0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoInstanceOfKnownGlobal(
+ HInstanceOfKnownGlobal* instr) {
+ LInstanceOfKnownGlobal* result = new(zone()) LInstanceOfKnownGlobal(
+ UseFixed(instr->context(), cp),
+ UseFixed(instr->left(), InstanceofStub::left()));
+ return MarkAsCall(DefineFixed(result, x0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoInvokeFunction(HInvokeFunction* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ // The function is required (by MacroAssembler::InvokeFunction) to be in x1.
+ LOperand* function = UseFixed(instr->function(), x1);
+ LInvokeFunction* result = new(zone()) LInvokeFunction(context, function);
+ return MarkAsCall(DefineFixed(result, x0), instr, CANNOT_DEOPTIMIZE_EAGERLY);
+}
+
+
+LInstruction* LChunkBuilder::DoIsConstructCallAndBranch(
+ HIsConstructCallAndBranch* instr) {
+ return new(zone()) LIsConstructCallAndBranch(TempRegister(), TempRegister());
+}
+
+
+LInstruction* LChunkBuilder::DoCompareMinusZeroAndBranch(
+ HCompareMinusZeroAndBranch* instr) {
+ LInstruction* goto_instr = CheckElideControlInstruction(instr);
+ if (goto_instr != NULL) return goto_instr;
+ LOperand* value = UseRegister(instr->value());
+ LOperand* scratch = TempRegister();
+ return new(zone()) LCompareMinusZeroAndBranch(value, scratch);
+}
+
+
+LInstruction* LChunkBuilder::DoIsObjectAndBranch(HIsObjectAndBranch* instr) {
+ ASSERT(instr->value()->representation().IsTagged());
+ LOperand* value = UseRegisterAtStart(instr->value());
+ LOperand* temp1 = TempRegister();
+ LOperand* temp2 = TempRegister();
+ return new(zone()) LIsObjectAndBranch(value, temp1, temp2);
+}
+
+
+LInstruction* LChunkBuilder::DoIsStringAndBranch(HIsStringAndBranch* instr) {
+ ASSERT(instr->value()->representation().IsTagged());
+ LOperand* value = UseRegisterAtStart(instr->value());
+ LOperand* temp = TempRegister();
+ return new(zone()) LIsStringAndBranch(value, temp);
+}
+
+
+LInstruction* LChunkBuilder::DoIsSmiAndBranch(HIsSmiAndBranch* instr) {
+ ASSERT(instr->value()->representation().IsTagged());
+ return new(zone()) LIsSmiAndBranch(UseRegisterAtStart(instr->value()));
+}
+
+
+LInstruction* LChunkBuilder::DoIsUndetectableAndBranch(
+ HIsUndetectableAndBranch* instr) {
+ ASSERT(instr->value()->representation().IsTagged());
+ LOperand* value = UseRegisterAtStart(instr->value());
+ return new(zone()) LIsUndetectableAndBranch(value, TempRegister());
+}
+
+
+LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) {
+ LInstruction* pop = NULL;
+ HEnvironment* env = current_block_->last_environment();
+
+ if (env->entry()->arguments_pushed()) {
+ int argument_count = env->arguments_environment()->parameter_count();
+ pop = new(zone()) LDrop(argument_count);
+ ASSERT(instr->argument_delta() == -argument_count);
+ }
+
+ HEnvironment* outer =
+ current_block_->last_environment()->DiscardInlined(false);
+ current_block_->UpdateEnvironment(outer);
+
+ return pop;
+}
+
+
+LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
+ LOperand* context = UseRegisterAtStart(instr->value());
+ LInstruction* result =
+ DefineAsRegister(new(zone()) LLoadContextSlot(context));
+ return instr->RequiresHoleCheck() ? AssignEnvironment(result) : result;
+}
+
+
+LInstruction* LChunkBuilder::DoLoadFunctionPrototype(
+ HLoadFunctionPrototype* instr) {
+ LOperand* function = UseRegister(instr->function());
+ LOperand* temp = TempRegister();
+ return AssignEnvironment(DefineAsRegister(
+ new(zone()) LLoadFunctionPrototype(function, temp)));
+}
+
+
+LInstruction* LChunkBuilder::DoLoadGlobalCell(HLoadGlobalCell* instr) {
+ LLoadGlobalCell* result = new(zone()) LLoadGlobalCell();
+ return instr->RequiresHoleCheck()
+ ? AssignEnvironment(DefineAsRegister(result))
+ : DefineAsRegister(result);
+}
+
+
+LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ LOperand* global_object = UseFixed(instr->global_object(), x0);
+ LLoadGlobalGeneric* result =
+ new(zone()) LLoadGlobalGeneric(context, global_object);
+ return MarkAsCall(DefineFixed(result, x0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
+ ASSERT(instr->key()->representation().IsSmiOrInteger32());
+ ElementsKind elements_kind = instr->elements_kind();
+ LOperand* elements = UseRegister(instr->elements());
+ LOperand* key = UseRegisterOrConstantAtStart(instr->key());
+
+ if (!instr->is_typed_elements()) {
+ if (instr->representation().IsDouble()) {
+ LOperand* temp = (!instr->key()->IsConstant() ||
+ instr->RequiresHoleCheck())
+ ? TempRegister()
+ : NULL;
+
+ LLoadKeyedFixedDouble* result =
+ new(zone()) LLoadKeyedFixedDouble(elements, key, temp);
+ return instr->RequiresHoleCheck()
+ ? AssignEnvironment(DefineAsRegister(result))
+ : DefineAsRegister(result);
+ } else {
+ ASSERT(instr->representation().IsSmiOrTagged() ||
+ instr->representation().IsInteger32());
+ LOperand* temp = instr->key()->IsConstant() ? NULL : TempRegister();
+ LLoadKeyedFixed* result =
+ new(zone()) LLoadKeyedFixed(elements, key, temp);
+ return instr->RequiresHoleCheck()
+ ? AssignEnvironment(DefineAsRegister(result))
+ : DefineAsRegister(result);
+ }
+ } else {
+ ASSERT((instr->representation().IsInteger32() &&
+ !IsDoubleOrFloatElementsKind(instr->elements_kind())) ||
+ (instr->representation().IsDouble() &&
+ IsDoubleOrFloatElementsKind(instr->elements_kind())));
+
+ LOperand* temp = instr->key()->IsConstant() ? NULL : TempRegister();
+ LLoadKeyedExternal* result =
+ new(zone()) LLoadKeyedExternal(elements, key, temp);
+ // An unsigned int array load might overflow and cause a deopt. Make sure it
+ // has an environment.
+ if (instr->RequiresHoleCheck() ||
+ elements_kind == EXTERNAL_UINT32_ELEMENTS ||
+ elements_kind == UINT32_ELEMENTS) {
+ return AssignEnvironment(DefineAsRegister(result));
+ } else {
+ return DefineAsRegister(result);
+ }
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ LOperand* object = UseFixed(instr->object(), x1);
+ LOperand* key = UseFixed(instr->key(), x0);
+
+ LInstruction* result =
+ DefineFixed(new(zone()) LLoadKeyedGeneric(context, object, key), x0);
+ return MarkAsCall(result, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) {
+ LOperand* object = UseRegisterAtStart(instr->object());
+ return DefineAsRegister(new(zone()) LLoadNamedField(object));
+}
+
+
+LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ LOperand* object = UseFixed(instr->object(), x0);
+ LInstruction* result =
+ DefineFixed(new(zone()) LLoadNamedGeneric(context, object), x0);
+ return MarkAsCall(result, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoLoadRoot(HLoadRoot* instr) {
+ return DefineAsRegister(new(zone()) LLoadRoot);
+}
+
+
+LInstruction* LChunkBuilder::DoMapEnumLength(HMapEnumLength* instr) {
+ LOperand* map = UseRegisterAtStart(instr->value());
+ return DefineAsRegister(new(zone()) LMapEnumLength(map));
+}
+
+
+LInstruction* LChunkBuilder::DoMathFloorOfDiv(HMathFloorOfDiv* instr) {
+ HValue* right = instr->right();
+ LOperand* dividend = UseRegister(instr->left());
+ LOperand* divisor = UseRegister(right);
+ LOperand* remainder = TempRegister();
+ return AssignEnvironment(DefineAsRegister(
+ new(zone()) LMathFloorOfDiv(dividend, divisor, remainder)));
+}
+
+
+LInstruction* LChunkBuilder::DoMathMinMax(HMathMinMax* instr) {
+ LOperand* left = NULL;
+ LOperand* right = NULL;
+ if (instr->representation().IsSmiOrInteger32()) {
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ left = UseRegisterAtStart(instr->BetterLeftOperand());
+ right = UseRegisterOrConstantAtStart(instr->BetterRightOperand());
+ } else {
+ ASSERT(instr->representation().IsDouble());
+ ASSERT(instr->left()->representation().IsDouble());
+ ASSERT(instr->right()->representation().IsDouble());
+ left = UseRegisterAtStart(instr->left());
+ right = UseRegisterAtStart(instr->right());
+ }
+ return DefineAsRegister(new(zone()) LMathMinMax(left, right));
+}
+
+
+LInstruction* LChunkBuilder::DoMod(HMod* hmod) {
+ HValue* hleft = hmod->left();
+ HValue* hright = hmod->right();
+
+ // TODO(jbramley): Add smi support.
+ if (hmod->representation().IsInteger32()) {
+ ASSERT(hleft->representation().IsInteger32());
+ ASSERT(hleft->representation().IsInteger32());
+ LOperand* left_op;
+ LOperand* right_op;
+
+ if (hmod->RightIsPowerOf2()) {
+ left_op = UseRegisterAtStart(hleft);
+ right_op = UseConstant(hright);
+ } else {
+ right_op = UseRegister(hright);
+ left_op = UseRegister(hleft);
+ }
+
+ LModI* lmod = new(zone()) LModI(left_op, right_op);
+
+ if (hmod->right()->CanBeZero() ||
+ (hmod->CheckFlag(HValue::kBailoutOnMinusZero) &&
+ hmod->left()->CanBeNegative() && hmod->CanBeZero())) {
+ AssignEnvironment(lmod);
+ }
+ return DefineAsRegister(lmod);
+
+ } else if (hmod->representation().IsSmiOrTagged()) {
+ return DoArithmeticT(Token::MOD, hmod);
+ } else {
+ return DoArithmeticD(Token::MOD, hmod);
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoMul(HMul* instr) {
+ if (instr->representation().IsSmiOrInteger32()) {
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+
+ bool can_overflow = instr->CheckFlag(HValue::kCanOverflow);
+ bool bailout_on_minus_zero = instr->CheckFlag(HValue::kBailoutOnMinusZero);
+ bool needs_environment = can_overflow || bailout_on_minus_zero;
+
+ HValue* least_const = instr->BetterLeftOperand();
+ HValue* most_const = instr->BetterRightOperand();
+
+ LOperand* left = UseRegisterAtStart(least_const);
+
+ // LMulConstI can handle a subset of constants:
+ // With support for overflow detection:
+ // -1, 0, 1, 2
+ // Without support for overflow detection:
+ // 2^n, -(2^n)
+ // 2^n + 1, -(2^n - 1)
+ if (most_const->IsConstant()) {
+ int32_t constant = HConstant::cast(most_const)->Integer32Value();
+ int32_t constant_abs = (constant >= 0) ? constant : -constant;
+
+ if (((constant >= -1) && (constant <= 2)) ||
+ (!can_overflow && (IsPowerOf2(constant_abs) ||
+ IsPowerOf2(constant_abs + 1) ||
+ IsPowerOf2(constant_abs - 1)))) {
+ LConstantOperand* right = UseConstant(most_const);
+ LMulConstIS* mul = new(zone()) LMulConstIS(left, right);
+ if (needs_environment) AssignEnvironment(mul);
+ return DefineAsRegister(mul);
+ }
+ }
+
+ // LMulI/S can handle all cases, but it requires that a register is
+ // allocated for the second operand.
+ LInstruction* result;
+ if (instr->representation().IsSmi()) {
+ // TODO(jbramley/rmcilroy): Fix LMulS so we can UseRegisterAtStart here.
+ LOperand* right = UseRegister(most_const);
+ result = DefineAsRegister(new(zone()) LMulS(left, right));
+ } else {
+ LOperand* right = UseRegisterAtStart(most_const);
+ result = DefineAsRegister(new(zone()) LMulI(left, right));
+ }
+ if (needs_environment) AssignEnvironment(result);
+ return result;
+ } else if (instr->representation().IsDouble()) {
+ return DoArithmeticD(Token::MUL, instr);
+ } else {
+ return DoArithmeticT(Token::MUL, instr);
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) {
+ ASSERT(argument_count_ == 0);
+ allocator_->MarkAsOsrEntry();
+ current_block_->last_environment()->set_ast_id(instr->ast_id());
+ return AssignEnvironment(new(zone()) LOsrEntry);
+}
+
+
+LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
+ LParameter* result = new(zone()) LParameter;
+ if (instr->kind() == HParameter::STACK_PARAMETER) {
+ int spill_index = chunk_->GetParameterStackSlot(instr->index());
+ return DefineAsSpilled(result, spill_index);
+ } else {
+ ASSERT(info()->IsStub());
+ CodeStubInterfaceDescriptor* descriptor =
+ info()->code_stub()->GetInterfaceDescriptor(info()->isolate());
+ int index = static_cast<int>(instr->index());
+ Register reg = descriptor->GetParameterRegister(index);
+ return DefineFixed(result, reg);
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoPower(HPower* instr) {
+ ASSERT(instr->representation().IsDouble());
+ // We call a C function for double power. It can't trigger a GC.
+ // We need to use fixed result register for the call.
+ Representation exponent_type = instr->right()->representation();
+ ASSERT(instr->left()->representation().IsDouble());
+ LOperand* left = UseFixedDouble(instr->left(), d0);
+ LOperand* right = exponent_type.IsInteger32()
+ ? UseFixed(instr->right(), x12)
+ : exponent_type.IsDouble()
+ ? UseFixedDouble(instr->right(), d1)
+ : UseFixed(instr->right(), x11);
+ LPower* result = new(zone()) LPower(left, right);
+ return MarkAsCall(DefineFixedDouble(result, d0),
+ instr,
+ CAN_DEOPTIMIZE_EAGERLY);
+}
+
+
+LInstruction* LChunkBuilder::DoPushArgument(HPushArgument* instr) {
+ LOperand* argument = UseRegister(instr->argument());
+ return new(zone()) LPushArgument(argument);
+}
+
+
+LInstruction* LChunkBuilder::DoRegExpLiteral(HRegExpLiteral* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ return MarkAsCall(
+ DefineFixed(new(zone()) LRegExpLiteral(context), x0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoReturn(HReturn* instr) {
+ LOperand* context = info()->IsStub()
+ ? UseFixed(instr->context(), cp)
+ : NULL;
+ LOperand* parameter_count = UseRegisterOrConstant(instr->parameter_count());
+ return new(zone()) LReturn(UseFixed(instr->value(), x0), context,
+ parameter_count);
+}
+
+
+LInstruction* LChunkBuilder::DoSeqStringGetChar(HSeqStringGetChar* instr) {
+ // TODO(all): Use UseRegisterAtStart and UseRegisterOrConstantAtStart here.
+ // We cannot do it now because the debug code in the implementation changes
+ // temp.
+ LOperand* string = UseRegister(instr->string());
+ LOperand* index = UseRegisterOrConstant(instr->index());
+ LOperand* temp = TempRegister();
+ LSeqStringGetChar* result =
+ new(zone()) LSeqStringGetChar(string, index, temp);
+ return DefineAsRegister(result);
+}
+
+
+LInstruction* LChunkBuilder::DoSeqStringSetChar(HSeqStringSetChar* instr) {
+ LOperand* string = UseRegister(instr->string());
+ LOperand* index = FLAG_debug_code
+ ? UseRegister(instr->index())
+ : UseRegisterOrConstant(instr->index());
+ LOperand* value = UseRegister(instr->value());
+ LOperand* context = FLAG_debug_code ? UseFixed(instr->context(), cp) : NULL;
+ LOperand* temp = TempRegister();
+ LSeqStringSetChar* result =
+ new(zone()) LSeqStringSetChar(context, string, index, value, temp);
+ return DefineAsRegister(result);
+}
+
+
+LInstruction* LChunkBuilder::DoShift(Token::Value op,
+ HBitwiseBinaryOperation* instr) {
+ if (instr->representation().IsTagged()) {
+ return DoArithmeticT(op, instr);
+ }
+
+ ASSERT(instr->representation().IsInteger32() ||
+ instr->representation().IsSmi());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+
+ LOperand* left = instr->representation().IsSmi()
+ ? UseRegister(instr->left())
+ : UseRegisterAtStart(instr->left());
+
+ HValue* right_value = instr->right();
+ LOperand* right = NULL;
+ LOperand* temp = NULL;
+ int constant_value = 0;
+ if (right_value->IsConstant()) {
+ right = UseConstant(right_value);
+ HConstant* constant = HConstant::cast(right_value);
+ constant_value = constant->Integer32Value() & 0x1f;
+ } else {
+ right = UseRegisterAtStart(right_value);
+ if (op == Token::ROR) {
+ temp = TempRegister();
+ }
+ }
+
+ // Shift operations can only deoptimize if we do a logical shift by 0 and the
+ // result cannot be truncated to int32.
+ bool does_deopt = false;
+ if ((op == Token::SHR) && (constant_value == 0)) {
+ if (FLAG_opt_safe_uint32_operations) {
+ does_deopt = !instr->CheckFlag(HInstruction::kUint32);
+ } else {
+ does_deopt = !instr->CheckUsesForFlag(HValue::kTruncatingToInt32);
+ }
+ }
+
+ LInstruction* result;
+ if (instr->representation().IsInteger32()) {
+ result = DefineAsRegister(new(zone()) LShiftI(op, left, right, does_deopt));
+ } else {
+ ASSERT(instr->representation().IsSmi());
+ result = DefineAsRegister(
+ new(zone()) LShiftS(op, left, right, temp, does_deopt));
+ }
+
+ return does_deopt ? AssignEnvironment(result) : result;
+}
+
+
+LInstruction* LChunkBuilder::DoRor(HRor* instr) {
+ return DoShift(Token::ROR, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoSar(HSar* instr) {
+ return DoShift(Token::SAR, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoShl(HShl* instr) {
+ return DoShift(Token::SHL, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoShr(HShr* instr) {
+ return DoShift(Token::SHR, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) {
+ instr->ReplayEnvironment(current_block_->last_environment());
+
+ // If there is an instruction pending deoptimization environment create a
+ // lazy bailout instruction to capture the environment.
+ if (pending_deoptimization_ast_id_ == instr->ast_id()) {
+ LInstruction* result = new(zone()) LLazyBailout;
+ result = AssignEnvironment(result);
+ // Store the lazy deopt environment with the instruction if needed. Right
+ // now it is only used for LInstanceOfKnownGlobal.
+ instruction_pending_deoptimization_environment_->
+ SetDeferredLazyDeoptimizationEnvironment(result->environment());
+ instruction_pending_deoptimization_environment_ = NULL;
+ pending_deoptimization_ast_id_ = BailoutId::None();
+ return result;
+ }
+
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoStackCheck(HStackCheck* instr) {
+ if (instr->is_function_entry()) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ return MarkAsCall(new(zone()) LStackCheck(context), instr);
+ } else {
+ ASSERT(instr->is_backwards_branch());
+ LOperand* context = UseAny(instr->context());
+ return AssignEnvironment(
+ AssignPointerMap(new(zone()) LStackCheck(context)));
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoStoreCodeEntry(HStoreCodeEntry* instr) {
+ LOperand* function = UseRegister(instr->function());
+ LOperand* code_object = UseRegisterAtStart(instr->code_object());
+ LOperand* temp = TempRegister();
+ return new(zone()) LStoreCodeEntry(function, code_object, temp);
+}
+
+
+LInstruction* LChunkBuilder::DoStoreContextSlot(HStoreContextSlot* instr) {
+ LOperand* temp = TempRegister();
+ LOperand* context;
+ LOperand* value;
+ if (instr->NeedsWriteBarrier()) {
+ // TODO(all): Replace these constraints when RecordWriteStub has been
+ // rewritten.
+ context = UseRegisterAndClobber(instr->context());
+ value = UseRegisterAndClobber(instr->value());
+ } else {
+ context = UseRegister(instr->context());
+ value = UseRegister(instr->value());
+ }
+ LInstruction* result = new(zone()) LStoreContextSlot(context, value, temp);
+ return instr->RequiresHoleCheck() ? AssignEnvironment(result) : result;
+}
+
+
+LInstruction* LChunkBuilder::DoStoreGlobalCell(HStoreGlobalCell* instr) {
+ LOperand* value = UseRegister(instr->value());
+ if (instr->RequiresHoleCheck()) {
+ return AssignEnvironment(new(zone()) LStoreGlobalCell(value,
+ TempRegister(),
+ TempRegister()));
+ } else {
+ return new(zone()) LStoreGlobalCell(value, TempRegister(), NULL);
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
+ LOperand* temp = NULL;
+ LOperand* elements = NULL;
+ LOperand* val = NULL;
+ LOperand* key = NULL;
+
+ if (!instr->is_typed_elements() &&
+ instr->value()->representation().IsTagged() &&
+ instr->NeedsWriteBarrier()) {
+ // RecordWrite() will clobber all registers.
+ elements = UseRegisterAndClobber(instr->elements());
+ val = UseRegisterAndClobber(instr->value());
+ key = UseRegisterAndClobber(instr->key());
+ } else {
+ elements = UseRegister(instr->elements());
+ val = UseRegister(instr->value());
+ key = UseRegisterOrConstantAtStart(instr->key());
+ }
+
+ if (instr->is_typed_elements()) {
+ ASSERT((instr->value()->representation().IsInteger32() &&
+ !IsDoubleOrFloatElementsKind(instr->elements_kind())) ||
+ (instr->value()->representation().IsDouble() &&
+ IsDoubleOrFloatElementsKind(instr->elements_kind())));
+ ASSERT((instr->is_fixed_typed_array() &&
+ instr->elements()->representation().IsTagged()) ||
+ (instr->is_external() &&
+ instr->elements()->representation().IsExternal()));
+ temp = instr->key()->IsConstant() ? NULL : TempRegister();
+ return new(zone()) LStoreKeyedExternal(elements, key, val, temp);
+
+ } else if (instr->value()->representation().IsDouble()) {
+ ASSERT(instr->elements()->representation().IsTagged());
+
+ // The constraint used here is UseRegister, even though the StoreKeyed
+ // instruction may canonicalize the value in the register if it is a NaN.
+ temp = TempRegister();
+ return new(zone()) LStoreKeyedFixedDouble(elements, key, val, temp);
+
+ } else {
+ ASSERT(instr->elements()->representation().IsTagged());
+ ASSERT(instr->value()->representation().IsSmiOrTagged() ||
+ instr->value()->representation().IsInteger32());
+
+ temp = TempRegister();
+ return new(zone()) LStoreKeyedFixed(elements, key, val, temp);
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ LOperand* object = UseFixed(instr->object(), x2);
+ LOperand* key = UseFixed(instr->key(), x1);
+ LOperand* value = UseFixed(instr->value(), x0);
+
+ ASSERT(instr->object()->representation().IsTagged());
+ ASSERT(instr->key()->representation().IsTagged());
+ ASSERT(instr->value()->representation().IsTagged());
+
+ return MarkAsCall(
+ new(zone()) LStoreKeyedGeneric(context, object, key, value), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
+ // TODO(jbramley): Optimize register usage in this instruction. For now, it
+ // allocates everything that it might need because it keeps changing in the
+ // merge and keeping it valid is time-consuming.
+
+ // TODO(jbramley): It might be beneficial to allow value to be a constant in
+ // some cases. x64 makes use of this with FLAG_track_fields, for example.
+
+ LOperand* object = UseRegister(instr->object());
+ LOperand* value = UseRegisterAndClobber(instr->value());
+ LOperand* temp0 = TempRegister();
+ LOperand* temp1 = TempRegister();
+
+ LStoreNamedField* result =
+ new(zone()) LStoreNamedField(object, value, temp0, temp1);
+ if (FLAG_track_heap_object_fields &&
+ instr->field_representation().IsHeapObject() &&
+ !instr->value()->type().IsHeapObject()) {
+ return AssignEnvironment(result);
+ }
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ LOperand* object = UseFixed(instr->object(), x1);
+ LOperand* value = UseFixed(instr->value(), x0);
+ LInstruction* result = new(zone()) LStoreNamedGeneric(context, object, value);
+ return MarkAsCall(result, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoStringAdd(HStringAdd* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ LOperand* left = UseFixed(instr->left(), x1);
+ LOperand* right = UseFixed(instr->right(), x0);
+
+ LStringAdd* result = new(zone()) LStringAdd(context, left, right);
+ return MarkAsCall(DefineFixed(result, x0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoStringCharCodeAt(HStringCharCodeAt* instr) {
+ LOperand* string = UseRegisterAndClobber(instr->string());
+ LOperand* index = UseRegisterAndClobber(instr->index());
+ LOperand* context = UseAny(instr->context());
+ LStringCharCodeAt* result =
+ new(zone()) LStringCharCodeAt(context, string, index);
+ return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
+}
+
+
+LInstruction* LChunkBuilder::DoStringCharFromCode(HStringCharFromCode* instr) {
+ // TODO(all) use at start and remove assert in codegen
+ LOperand* char_code = UseRegister(instr->value());
+ LOperand* context = UseAny(instr->context());
+ LStringCharFromCode* result =
+ new(zone()) LStringCharFromCode(context, char_code);
+ return AssignPointerMap(DefineAsRegister(result));
+}
+
+
+LInstruction* LChunkBuilder::DoStringCompareAndBranch(
+ HStringCompareAndBranch* instr) {
+ ASSERT(instr->left()->representation().IsTagged());
+ ASSERT(instr->right()->representation().IsTagged());
+ LOperand* context = UseFixed(instr->context(), cp);
+ LOperand* left = UseFixed(instr->left(), x1);
+ LOperand* right = UseFixed(instr->right(), x0);
+ LStringCompareAndBranch* result =
+ new(zone()) LStringCompareAndBranch(context, left, right);
+ return MarkAsCall(result, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoSub(HSub* instr) {
+ if (instr->representation().IsSmiOrInteger32()) {
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand *left;
+ if (instr->left()->IsConstant() &&
+ (HConstant::cast(instr->left())->Integer32Value() == 0)) {
+ left = UseConstant(instr->left());
+ } else {
+ left = UseRegisterAtStart(instr->left());
+ }
+ LOperand* right = UseRegisterOrConstantAtStart(instr->right());
+ LInstruction* result = instr->representation().IsSmi() ?
+ DefineAsRegister(new(zone()) LSubS(left, right)) :
+ DefineAsRegister(new(zone()) LSubI(left, right));
+ if (instr->CheckFlag(HValue::kCanOverflow)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+ } else if (instr->representation().IsDouble()) {
+ return DoArithmeticD(Token::SUB, instr);
+ } else {
+ return DoArithmeticT(Token::SUB, instr);
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoThisFunction(HThisFunction* instr) {
+ if (instr->HasNoUses()) {
+ return NULL;
+ } else {
+ return DefineAsRegister(new(zone()) LThisFunction);
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoToFastProperties(HToFastProperties* instr) {
+ LOperand* object = UseFixed(instr->value(), x0);
+ LToFastProperties* result = new(zone()) LToFastProperties(object);
+ return MarkAsCall(DefineFixed(result, x0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoTransitionElementsKind(
+ HTransitionElementsKind* instr) {
+ LOperand* object = UseRegister(instr->object());
+ if (IsSimpleMapChangeTransition(instr->from_kind(), instr->to_kind())) {
+ LTransitionElementsKind* result =
+ new(zone()) LTransitionElementsKind(object, NULL,
+ TempRegister(), TempRegister());
+ return result;
+ } else {
+ LOperand* context = UseFixed(instr->context(), cp);
+ LTransitionElementsKind* result =
+ new(zone()) LTransitionElementsKind(object, context, TempRegister());
+ return AssignPointerMap(result);
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoTrapAllocationMemento(
+ HTrapAllocationMemento* instr) {
+ LOperand* object = UseRegister(instr->object());
+ LOperand* temp1 = TempRegister();
+ LOperand* temp2 = TempRegister();
+ LTrapAllocationMemento* result =
+ new(zone()) LTrapAllocationMemento(object, temp1, temp2);
+ return AssignEnvironment(result);
+}
+
+
+LInstruction* LChunkBuilder::DoTypeof(HTypeof* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ // TODO(jbramley): In ARM, this uses UseFixed to force the input to x0.
+ // However, LCodeGen::DoTypeof just pushes it to the stack (for CallRuntime)
+ // anyway, so the input doesn't have to be in x0. We might be able to improve
+ // the ARM back-end a little by relaxing this restriction.
+ LTypeof* result =
+ new(zone()) LTypeof(context, UseRegisterAtStart(instr->value()));
+ return MarkAsCall(DefineFixed(result, x0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoTypeofIsAndBranch(HTypeofIsAndBranch* instr) {
+ LInstruction* goto_instr = CheckElideControlInstruction(instr);
+ if (goto_instr != NULL) return goto_instr;
+
+ // We only need temp registers in some cases, but we can't dereference the
+ // instr->type_literal() handle to test that here.
+ LOperand* temp1 = TempRegister();
+ LOperand* temp2 = TempRegister();
+
+ return new(zone()) LTypeofIsAndBranch(
+ UseRegister(instr->value()), temp1, temp2);
+}
+
+
+LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
+ switch (instr->op()) {
+ case kMathAbs: {
+ Representation r = instr->representation();
+ if (r.IsTagged()) {
+ // The tagged case might need to allocate a HeapNumber for the result,
+ // so it is handled by a separate LInstruction.
+ LOperand* context = UseFixed(instr->context(), cp);
+ LOperand* input = UseRegister(instr->value());
+ LOperand* temp1 = TempRegister();
+ LOperand* temp2 = TempRegister();
+ LOperand* temp3 = TempRegister();
+ LMathAbsTagged* result =
+ new(zone()) LMathAbsTagged(context, input, temp1, temp2, temp3);
+ return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
+ } else {
+ LOperand* input = UseRegisterAtStart(instr->value());
+ LMathAbs* result = new(zone()) LMathAbs(input);
+ if (r.IsDouble()) {
+ // The Double case can never fail so it doesn't need an environment.
+ return DefineAsRegister(result);
+ } else {
+ ASSERT(r.IsInteger32() || r.IsSmi());
+ // The Integer32 and Smi cases need an environment because they can
+ // deoptimize on minimum representable number.
+ return AssignEnvironment(DefineAsRegister(result));
+ }
+ }
+ }
+ case kMathExp: {
+ ASSERT(instr->representation().IsDouble());
+ ASSERT(instr->value()->representation().IsDouble());
+ LOperand* input = UseRegister(instr->value());
+ // TODO(all): Implement TempFPRegister.
+ LOperand* double_temp1 = FixedTemp(d24); // This was chosen arbitrarily.
+ LOperand* temp1 = TempRegister();
+ LOperand* temp2 = TempRegister();
+ LOperand* temp3 = TempRegister();
+ LMathExp* result = new(zone()) LMathExp(input, double_temp1,
+ temp1, temp2, temp3);
+ return DefineAsRegister(result);
+ }
+ case kMathFloor: {
+ ASSERT(instr->representation().IsInteger32());
+ ASSERT(instr->value()->representation().IsDouble());
+ // TODO(jbramley): A64 can easily handle a double argument with frintm,
+ // but we're never asked for it here. At the moment, we fall back to the
+ // runtime if the result doesn't fit, like the other architectures.
+ LOperand* input = UseRegisterAtStart(instr->value());
+ LMathFloor* result = new(zone()) LMathFloor(input);
+ return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
+ }
+ case kMathLog: {
+ ASSERT(instr->representation().IsDouble());
+ ASSERT(instr->value()->representation().IsDouble());
+ LOperand* input = UseFixedDouble(instr->value(), d0);
+ LMathLog* result = new(zone()) LMathLog(input);
+ return MarkAsCall(DefineFixedDouble(result, d0), instr);
+ }
+ case kMathPowHalf: {
+ ASSERT(instr->representation().IsDouble());
+ ASSERT(instr->value()->representation().IsDouble());
+ LOperand* input = UseRegister(instr->value());
+ return DefineAsRegister(new(zone()) LMathPowHalf(input));
+ }
+ case kMathRound: {
+ ASSERT(instr->representation().IsInteger32());
+ ASSERT(instr->value()->representation().IsDouble());
+ // TODO(jbramley): As with kMathFloor, we can probably handle double
+ // results fairly easily, but we are never asked for them.
+ LOperand* input = UseRegister(instr->value());
+ LOperand* temp = FixedTemp(d24); // Choosen arbitrarily.
+ LMathRound* result = new(zone()) LMathRound(input, temp);
+ return AssignEnvironment(DefineAsRegister(result));
+ }
+ case kMathSqrt: {
+ ASSERT(instr->representation().IsDouble());
+ ASSERT(instr->value()->representation().IsDouble());
+ LOperand* input = UseRegisterAtStart(instr->value());
+ return DefineAsRegister(new(zone()) LMathSqrt(input));
+ }
+ default:
+ UNREACHABLE();
+ return NULL;
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoUnknownOSRValue(HUnknownOSRValue* instr) {
+ // Use an index that corresponds to the location in the unoptimized frame,
+ // which the optimized frame will subsume.
+ int env_index = instr->index();
+ int spill_index = 0;
+ if (instr->environment()->is_parameter_index(env_index)) {
+ spill_index = chunk_->GetParameterStackSlot(env_index);
+ } else {
+ spill_index = env_index - instr->environment()->first_local_index();
+ if (spill_index > LUnallocated::kMaxFixedSlotIndex) {
+ Abort(kTooManySpillSlotsNeededForOSR);
+ spill_index = 0;
+ }
+ }
+ return DefineAsSpilled(new(zone()) LUnknownOSRValue, spill_index);
+}
+
+
+LInstruction* LChunkBuilder::DoUseConst(HUseConst* instr) {
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoForInPrepareMap(HForInPrepareMap* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ // Assign object to a fixed register different from those already used in
+ // LForInPrepareMap.
+ LOperand* object = UseFixed(instr->enumerable(), x0);
+ LForInPrepareMap* result = new(zone()) LForInPrepareMap(context, object);
+ return MarkAsCall(DefineFixed(result, x0), instr, CAN_DEOPTIMIZE_EAGERLY);
+}
+
+
+LInstruction* LChunkBuilder::DoForInCacheArray(HForInCacheArray* instr) {
+ LOperand* map = UseRegister(instr->map());
+ return AssignEnvironment(DefineAsRegister(new(zone()) LForInCacheArray(map)));
+}
+
+
+LInstruction* LChunkBuilder::DoCheckMapValue(HCheckMapValue* instr) {
+ LOperand* value = UseRegisterAtStart(instr->value());
+ LOperand* map = UseRegister(instr->map());
+ LOperand* temp = TempRegister();
+ return AssignEnvironment(new(zone()) LCheckMapValue(value, map, temp));
+}
+
+
+LInstruction* LChunkBuilder::DoLoadFieldByIndex(HLoadFieldByIndex* instr) {
+ LOperand* object = UseRegisterAtStart(instr->object());
+ LOperand* index = UseRegister(instr->index());
+ return DefineAsRegister(new(zone()) LLoadFieldByIndex(object, index));
+}
+
+
+LInstruction* LChunkBuilder::DoWrapReceiver(HWrapReceiver* instr) {
+ LOperand* receiver = UseRegister(instr->receiver());
+ LOperand* function = UseRegister(instr->function());
+ LWrapReceiver* result = new(zone()) LWrapReceiver(receiver, function);
+ return AssignEnvironment(DefineAsRegister(result));
+}
+
+
+} } // namespace v8::internal
--- /dev/null
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_A64_LITHIUM_A64_H_
+#define V8_A64_LITHIUM_A64_H_
+
+#include "hydrogen.h"
+#include "lithium-allocator.h"
+#include "lithium.h"
+#include "safepoint-table.h"
+#include "utils.h"
+
+namespace v8 {
+namespace internal {
+
+// Forward declarations.
+class LCodeGen;
+
+#define LITHIUM_CONCRETE_INSTRUCTION_LIST(V) \
+ V(AccessArgumentsAt) \
+ V(AddE) \
+ V(AddI) \
+ V(AddS) \
+ V(Allocate) \
+ V(ApplyArguments) \
+ V(ArgumentsElements) \
+ V(ArgumentsLength) \
+ V(ArithmeticD) \
+ V(ArithmeticT) \
+ V(BitI) \
+ V(BitS) \
+ V(BoundsCheck) \
+ V(Branch) \
+ V(CallFunction) \
+ V(CallJSFunction) \
+ V(CallNew) \
+ V(CallNewArray) \
+ V(CallRuntime) \
+ V(CallStub) \
+ V(CallWithDescriptor) \
+ V(CheckInstanceType) \
+ V(CheckMapValue) \
+ V(CheckMaps) \
+ V(CheckNonSmi) \
+ V(CheckSmi) \
+ V(CheckValue) \
+ V(ClampDToUint8) \
+ V(ClampIToUint8) \
+ V(ClampTToUint8) \
+ V(ClassOfTestAndBranch) \
+ V(CmpHoleAndBranchD) \
+ V(CmpHoleAndBranchT) \
+ V(CmpMapAndBranch) \
+ V(CmpObjectEqAndBranch) \
+ V(CmpT) \
+ V(CompareMinusZeroAndBranch) \
+ V(CompareNumericAndBranch) \
+ V(ConstantD) \
+ V(ConstantE) \
+ V(ConstantI) \
+ V(ConstantS) \
+ V(ConstantT) \
+ V(Context) \
+ V(DateField) \
+ V(DebugBreak) \
+ V(DeclareGlobals) \
+ V(Deoptimize) \
+ V(DivI) \
+ V(DoubleToIntOrSmi) \
+ V(Drop) \
+ V(Dummy) \
+ V(DummyUse) \
+ V(ForInCacheArray) \
+ V(ForInPrepareMap) \
+ V(FunctionLiteral) \
+ V(GetCachedArrayIndex) \
+ V(Goto) \
+ V(HasCachedArrayIndexAndBranch) \
+ V(HasInstanceTypeAndBranch) \
+ V(InnerAllocatedObject) \
+ V(InstanceOf) \
+ V(InstanceOfKnownGlobal) \
+ V(InstructionGap) \
+ V(Integer32ToDouble) \
+ V(Integer32ToSmi) \
+ V(InvokeFunction) \
+ V(IsConstructCallAndBranch) \
+ V(IsObjectAndBranch) \
+ V(IsSmiAndBranch) \
+ V(IsStringAndBranch) \
+ V(IsUndetectableAndBranch) \
+ V(Label) \
+ V(LazyBailout) \
+ V(LoadContextSlot) \
+ V(LoadFieldByIndex) \
+ V(LoadFunctionPrototype) \
+ V(LoadGlobalCell) \
+ V(LoadGlobalGeneric) \
+ V(LoadKeyedExternal) \
+ V(LoadKeyedFixed) \
+ V(LoadKeyedFixedDouble) \
+ V(LoadKeyedGeneric) \
+ V(LoadNamedField) \
+ V(LoadNamedGeneric) \
+ V(LoadRoot) \
+ V(MapEnumLength) \
+ V(MathAbs) \
+ V(MathAbsTagged) \
+ V(MathExp) \
+ V(MathFloor) \
+ V(MathFloorOfDiv) \
+ V(MathLog) \
+ V(MathMinMax) \
+ V(MathPowHalf) \
+ V(MathRound) \
+ V(MathSqrt) \
+ V(ModI) \
+ V(MulConstIS) \
+ V(MulI) \
+ V(MulS) \
+ V(NumberTagD) \
+ V(NumberTagU) \
+ V(NumberUntagD) \
+ V(OsrEntry) \
+ V(Parameter) \
+ V(Power) \
+ V(PushArgument) \
+ V(RegExpLiteral) \
+ V(Return) \
+ V(SeqStringGetChar) \
+ V(SeqStringSetChar) \
+ V(ShiftI) \
+ V(ShiftS) \
+ V(SmiTag) \
+ V(SmiUntag) \
+ V(StackCheck) \
+ V(StoreCodeEntry) \
+ V(StoreContextSlot) \
+ V(StoreGlobalCell) \
+ V(StoreKeyedExternal) \
+ V(StoreKeyedFixed) \
+ V(StoreKeyedFixedDouble) \
+ V(StoreKeyedGeneric) \
+ V(StoreNamedField) \
+ V(StoreNamedGeneric) \
+ V(StringAdd) \
+ V(StringCharCodeAt) \
+ V(StringCharFromCode) \
+ V(StringCompareAndBranch) \
+ V(SubI) \
+ V(SubS) \
+ V(TaggedToI) \
+ V(ThisFunction) \
+ V(ToFastProperties) \
+ V(TransitionElementsKind) \
+ V(TrapAllocationMemento) \
+ V(TruncateDoubleToIntOrSmi) \
+ V(Typeof) \
+ V(TypeofIsAndBranch) \
+ V(Uint32ToDouble) \
+ V(Uint32ToSmi) \
+ V(UnknownOSRValue) \
+ V(WrapReceiver)
+
+
+#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic) \
+ virtual Opcode opcode() const V8_FINAL V8_OVERRIDE { \
+ return LInstruction::k##type; \
+ } \
+ virtual void CompileToNative(LCodeGen* generator) V8_FINAL V8_OVERRIDE; \
+ virtual const char* Mnemonic() const V8_FINAL V8_OVERRIDE { \
+ return mnemonic; \
+ } \
+ static L##type* cast(LInstruction* instr) { \
+ ASSERT(instr->Is##type()); \
+ return reinterpret_cast<L##type*>(instr); \
+ }
+
+
+#define DECLARE_HYDROGEN_ACCESSOR(type) \
+ H##type* hydrogen() const { \
+ return H##type::cast(this->hydrogen_value()); \
+ }
+
+
+class LInstruction : public ZoneObject {
+ public:
+ LInstruction()
+ : environment_(NULL),
+ hydrogen_value_(NULL),
+ bit_field_(IsCallBits::encode(false)) { }
+
+ virtual ~LInstruction() { }
+
+ virtual void CompileToNative(LCodeGen* generator) = 0;
+ virtual const char* Mnemonic() const = 0;
+ virtual void PrintTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintOutputOperandTo(StringStream* stream);
+
+ enum Opcode {
+ // Declare a unique enum value for each instruction.
+#define DECLARE_OPCODE(type) k##type,
+ LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_OPCODE)
+ kNumberOfInstructions
+#undef DECLARE_OPCODE
+ };
+
+ virtual Opcode opcode() const = 0;
+
+ // Declare non-virtual type testers for all leaf IR classes.
+#define DECLARE_PREDICATE(type) \
+ bool Is##type() const { return opcode() == k##type; }
+ LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_PREDICATE)
+#undef DECLARE_PREDICATE
+
+ // Declare virtual predicates for instructions that don't have
+ // an opcode.
+ virtual bool IsGap() const { return false; }
+
+ virtual bool IsControl() const { return false; }
+
+ void set_environment(LEnvironment* env) { environment_ = env; }
+ LEnvironment* environment() const { return environment_; }
+ bool HasEnvironment() const { return environment_ != NULL; }
+
+ void set_pointer_map(LPointerMap* p) { pointer_map_.set(p); }
+ LPointerMap* pointer_map() const { return pointer_map_.get(); }
+ bool HasPointerMap() const { return pointer_map_.is_set(); }
+
+ void set_hydrogen_value(HValue* value) { hydrogen_value_ = value; }
+ HValue* hydrogen_value() const { return hydrogen_value_; }
+
+ virtual void SetDeferredLazyDeoptimizationEnvironment(LEnvironment* env) { }
+
+ void MarkAsCall() { bit_field_ = IsCallBits::update(bit_field_, true); }
+ bool IsCall() const { return IsCallBits::decode(bit_field_); }
+
+ // Interface to the register allocator and iterators.
+ bool ClobbersTemps() const { return IsCall(); }
+ bool ClobbersRegisters() const { return IsCall(); }
+ virtual bool ClobbersDoubleRegisters() const { return IsCall(); }
+ bool IsMarkedAsCall() const { return IsCall(); }
+
+ virtual bool HasResult() const = 0;
+ virtual LOperand* result() const = 0;
+
+ virtual int InputCount() = 0;
+ virtual LOperand* InputAt(int i) = 0;
+ virtual int TempCount() = 0;
+ virtual LOperand* TempAt(int i) = 0;
+
+ LOperand* FirstInput() { return InputAt(0); }
+ LOperand* Output() { return HasResult() ? result() : NULL; }
+
+ virtual bool HasInterestingComment(LCodeGen* gen) const { return true; }
+
+#ifdef DEBUG
+ void VerifyCall();
+#endif
+
+ private:
+ class IsCallBits: public BitField<bool, 0, 1> {};
+
+ LEnvironment* environment_;
+ SetOncePointer<LPointerMap> pointer_map_;
+ HValue* hydrogen_value_;
+ int32_t bit_field_;
+};
+
+
+// R = number of result operands (0 or 1).
+template<int R>
+class LTemplateResultInstruction : public LInstruction {
+ public:
+ // Allow 0 or 1 output operands.
+ STATIC_ASSERT(R == 0 || R == 1);
+ virtual bool HasResult() const V8_FINAL V8_OVERRIDE {
+ return (R != 0) && (result() != NULL);
+ }
+ void set_result(LOperand* operand) { results_[0] = operand; }
+ LOperand* result() const { return results_[0]; }
+
+ protected:
+ EmbeddedContainer<LOperand*, R> results_;
+};
+
+
+// R = number of result operands (0 or 1).
+// I = number of input operands.
+// T = number of temporary operands.
+template<int R, int I, int T>
+class LTemplateInstruction : public LTemplateResultInstruction<R> {
+ protected:
+ EmbeddedContainer<LOperand*, I> inputs_;
+ EmbeddedContainer<LOperand*, T> temps_;
+
+ private:
+ // Iterator support.
+ virtual int InputCount() V8_FINAL V8_OVERRIDE { return I; }
+ virtual LOperand* InputAt(int i) V8_FINAL V8_OVERRIDE { return inputs_[i]; }
+
+ virtual int TempCount() V8_FINAL V8_OVERRIDE { return T; }
+ virtual LOperand* TempAt(int i) V8_FINAL V8_OVERRIDE { return temps_[i]; }
+};
+
+
+class LUnknownOSRValue V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+ virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE {
+ return false;
+ }
+ DECLARE_CONCRETE_INSTRUCTION(UnknownOSRValue, "unknown-osr-value")
+};
+
+
+template<int I, int T>
+class LControlInstruction : public LTemplateInstruction<0, I, T> {
+ public:
+ LControlInstruction() : false_label_(NULL), true_label_(NULL) { }
+
+ virtual bool IsControl() const V8_FINAL V8_OVERRIDE { return true; }
+
+ int SuccessorCount() { return hydrogen()->SuccessorCount(); }
+ HBasicBlock* SuccessorAt(int i) { return hydrogen()->SuccessorAt(i); }
+
+ int TrueDestination(LChunk* chunk) {
+ return chunk->LookupDestination(true_block_id());
+ }
+
+ int FalseDestination(LChunk* chunk) {
+ return chunk->LookupDestination(false_block_id());
+ }
+
+ Label* TrueLabel(LChunk* chunk) {
+ if (true_label_ == NULL) {
+ true_label_ = chunk->GetAssemblyLabel(TrueDestination(chunk));
+ }
+ return true_label_;
+ }
+
+ Label* FalseLabel(LChunk* chunk) {
+ if (false_label_ == NULL) {
+ false_label_ = chunk->GetAssemblyLabel(FalseDestination(chunk));
+ }
+ return false_label_;
+ }
+
+ protected:
+ int true_block_id() { return SuccessorAt(0)->block_id(); }
+ int false_block_id() { return SuccessorAt(1)->block_id(); }
+
+ private:
+ DECLARE_HYDROGEN_ACCESSOR(ControlInstruction);
+
+ Label* false_label_;
+ Label* true_label_;
+};
+
+
+class LGap : public LTemplateInstruction<0, 0, 0> {
+ public:
+ explicit LGap(HBasicBlock* block)
+ : block_(block) {
+ parallel_moves_[BEFORE] = NULL;
+ parallel_moves_[START] = NULL;
+ parallel_moves_[END] = NULL;
+ parallel_moves_[AFTER] = NULL;
+ }
+
+ // Can't use the DECLARE-macro here because of sub-classes.
+ virtual bool IsGap() const V8_OVERRIDE { return true; }
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ static LGap* cast(LInstruction* instr) {
+ ASSERT(instr->IsGap());
+ return reinterpret_cast<LGap*>(instr);
+ }
+
+ bool IsRedundant() const;
+
+ HBasicBlock* block() const { return block_; }
+
+ enum InnerPosition {
+ BEFORE,
+ START,
+ END,
+ AFTER,
+ FIRST_INNER_POSITION = BEFORE,
+ LAST_INNER_POSITION = AFTER
+ };
+
+ LParallelMove* GetOrCreateParallelMove(InnerPosition pos, Zone* zone) {
+ if (parallel_moves_[pos] == NULL) {
+ parallel_moves_[pos] = new(zone) LParallelMove(zone);
+ }
+ return parallel_moves_[pos];
+ }
+
+ LParallelMove* GetParallelMove(InnerPosition pos) {
+ return parallel_moves_[pos];
+ }
+
+ private:
+ LParallelMove* parallel_moves_[LAST_INNER_POSITION + 1];
+ HBasicBlock* block_;
+};
+
+
+class LInstructionGap V8_FINAL : public LGap {
+ public:
+ explicit LInstructionGap(HBasicBlock* block) : LGap(block) { }
+
+ virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE {
+ return !IsRedundant();
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(InstructionGap, "gap")
+};
+
+
+class LDrop V8_FINAL : public LTemplateInstruction<0, 0, 0> {
+ public:
+ explicit LDrop(int count) : count_(count) { }
+
+ int count() const { return count_; }
+
+ DECLARE_CONCRETE_INSTRUCTION(Drop, "drop")
+
+ private:
+ int count_;
+};
+
+
+class LDummy V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+ explicit LDummy() { }
+ DECLARE_CONCRETE_INSTRUCTION(Dummy, "dummy")
+};
+
+
+class LDummyUse V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LDummyUse(LOperand* value) {
+ inputs_[0] = value;
+ }
+ DECLARE_CONCRETE_INSTRUCTION(DummyUse, "dummy-use")
+};
+
+
+class LGoto V8_FINAL : public LTemplateInstruction<0, 0, 0> {
+ public:
+ explicit LGoto(HBasicBlock* block) : block_(block) { }
+
+ virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE;
+ DECLARE_CONCRETE_INSTRUCTION(Goto, "goto")
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual bool IsControl() const V8_OVERRIDE { return true; }
+
+ int block_id() const { return block_->block_id(); }
+
+ private:
+ HBasicBlock* block_;
+};
+
+
+class LLazyBailout V8_FINAL : public LTemplateInstruction<0, 0, 0> {
+ public:
+ LLazyBailout() : gap_instructions_size_(0) { }
+
+ DECLARE_CONCRETE_INSTRUCTION(LazyBailout, "lazy-bailout")
+
+ void set_gap_instructions_size(int gap_instructions_size) {
+ gap_instructions_size_ = gap_instructions_size;
+ }
+ int gap_instructions_size() { return gap_instructions_size_; }
+
+ private:
+ int gap_instructions_size_;
+};
+
+
+class LLabel V8_FINAL : public LGap {
+ public:
+ explicit LLabel(HBasicBlock* block)
+ : LGap(block), replacement_(NULL) { }
+
+ virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE {
+ return false;
+ }
+ DECLARE_CONCRETE_INSTRUCTION(Label, "label")
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+
+ int block_id() const { return block()->block_id(); }
+ bool is_loop_header() const { return block()->IsLoopHeader(); }
+ bool is_osr_entry() const { return block()->is_osr_entry(); }
+ Label* label() { return &label_; }
+ LLabel* replacement() const { return replacement_; }
+ void set_replacement(LLabel* label) { replacement_ = label; }
+ bool HasReplacement() const { return replacement_ != NULL; }
+
+ private:
+ Label label_;
+ LLabel* replacement_;
+};
+
+
+class LOsrEntry V8_FINAL : public LTemplateInstruction<0, 0, 0> {
+ public:
+ LOsrEntry() {}
+
+ virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE {
+ return false;
+ }
+ DECLARE_CONCRETE_INSTRUCTION(OsrEntry, "osr-entry")
+};
+
+
+class LAccessArgumentsAt V8_FINAL : public LTemplateInstruction<1, 3, 1> {
+ public:
+ LAccessArgumentsAt(LOperand* arguments,
+ LOperand* length,
+ LOperand* index,
+ LOperand* temp) {
+ inputs_[0] = arguments;
+ inputs_[1] = length;
+ inputs_[2] = index;
+ temps_[0] = temp;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(AccessArgumentsAt, "access-arguments-at")
+
+ LOperand* arguments() { return inputs_[0]; }
+ LOperand* length() { return inputs_[1]; }
+ LOperand* index() { return inputs_[2]; }
+ LOperand* temp() { return temps_[0]; }
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+};
+
+
+class LAddE V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LAddE(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(AddE, "add-e")
+ DECLARE_HYDROGEN_ACCESSOR(Add)
+};
+
+
+class LAddI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LAddI(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(AddI, "add-i")
+ DECLARE_HYDROGEN_ACCESSOR(Add)
+};
+
+
+class LAddS V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LAddS(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(AddS, "add-s")
+ DECLARE_HYDROGEN_ACCESSOR(Add)
+};
+
+
+class LAllocate V8_FINAL : public LTemplateInstruction<1, 2, 2> {
+ public:
+ LAllocate(LOperand* context,
+ LOperand* size,
+ LOperand* temp1,
+ LOperand* temp2) {
+ inputs_[0] = context;
+ inputs_[1] = size;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* size() { return inputs_[1]; }
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(Allocate, "allocate")
+ DECLARE_HYDROGEN_ACCESSOR(Allocate)
+};
+
+
+class LApplyArguments V8_FINAL : public LTemplateInstruction<1, 4, 0> {
+ public:
+ LApplyArguments(LOperand* function,
+ LOperand* receiver,
+ LOperand* length,
+ LOperand* elements) {
+ inputs_[0] = function;
+ inputs_[1] = receiver;
+ inputs_[2] = length;
+ inputs_[3] = elements;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(ApplyArguments, "apply-arguments")
+
+ LOperand* function() { return inputs_[0]; }
+ LOperand* receiver() { return inputs_[1]; }
+ LOperand* length() { return inputs_[2]; }
+ LOperand* elements() { return inputs_[3]; }
+};
+
+
+class LArgumentsElements V8_FINAL : public LTemplateInstruction<1, 0, 1> {
+ public:
+ explicit LArgumentsElements(LOperand* temp) {
+ temps_[0] = temp;
+ }
+
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ArgumentsElements, "arguments-elements")
+ DECLARE_HYDROGEN_ACCESSOR(ArgumentsElements)
+};
+
+
+class LArgumentsLength V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LArgumentsLength(LOperand* elements) {
+ inputs_[0] = elements;
+ }
+
+ LOperand* elements() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ArgumentsLength, "arguments-length")
+};
+
+
+class LArithmeticD V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LArithmeticD(Token::Value op,
+ LOperand* left,
+ LOperand* right)
+ : op_(op) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ Token::Value op() const { return op_; }
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
+ virtual Opcode opcode() const V8_OVERRIDE {
+ return LInstruction::kArithmeticD;
+ }
+ virtual void CompileToNative(LCodeGen* generator) V8_OVERRIDE;
+ virtual const char* Mnemonic() const V8_OVERRIDE;
+
+ private:
+ Token::Value op_;
+};
+
+
+class LArithmeticT V8_FINAL : public LTemplateInstruction<1, 3, 0> {
+ public:
+ LArithmeticT(Token::Value op,
+ LOperand* context,
+ LOperand* left,
+ LOperand* right)
+ : op_(op) {
+ inputs_[0] = context;
+ inputs_[1] = left;
+ inputs_[2] = right;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* left() { return inputs_[1]; }
+ LOperand* right() { return inputs_[2]; }
+ Token::Value op() const { return op_; }
+
+ virtual Opcode opcode() const V8_OVERRIDE {
+ return LInstruction::kArithmeticT;
+ }
+ virtual void CompileToNative(LCodeGen* generator) V8_OVERRIDE;
+ virtual const char* Mnemonic() const V8_OVERRIDE;
+
+ private:
+ Token::Value op_;
+};
+
+
+class LBoundsCheck V8_FINAL : public LTemplateInstruction<0, 2, 0> {
+ public:
+ explicit LBoundsCheck(LOperand* index, LOperand* length) {
+ inputs_[0] = index;
+ inputs_[1] = length;
+ }
+
+ LOperand* index() { return inputs_[0]; }
+ LOperand* length() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(BoundsCheck, "bounds-check")
+ DECLARE_HYDROGEN_ACCESSOR(BoundsCheck)
+};
+
+
+class LBitI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LBitI(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
+ Token::Value op() const { return hydrogen()->op(); }
+
+ DECLARE_CONCRETE_INSTRUCTION(BitI, "bit-i")
+ DECLARE_HYDROGEN_ACCESSOR(Bitwise)
+};
+
+
+class LBitS V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LBitS(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
+ Token::Value op() const { return hydrogen()->op(); }
+
+ DECLARE_CONCRETE_INSTRUCTION(BitS, "bit-s")
+ DECLARE_HYDROGEN_ACCESSOR(Bitwise)
+};
+
+
+class LBranch V8_FINAL : public LControlInstruction<1, 2> {
+ public:
+ explicit LBranch(LOperand* value, LOperand *temp1, LOperand *temp2) {
+ inputs_[0] = value;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(Branch, "branch")
+ DECLARE_HYDROGEN_ACCESSOR(Branch)
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+};
+
+
+class LCallJSFunction V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LCallJSFunction(LOperand* function) {
+ inputs_[0] = function;
+ }
+
+ LOperand* function() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CallJSFunction, "call-js-function")
+ DECLARE_HYDROGEN_ACCESSOR(CallJSFunction)
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+
+ int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
+class LCallFunction V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LCallFunction(LOperand* context, LOperand* function) {
+ inputs_[0] = context;
+ inputs_[1] = function;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* function() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CallFunction, "call-function")
+ DECLARE_HYDROGEN_ACCESSOR(CallFunction)
+
+ int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
+class LCallNew V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LCallNew(LOperand* context, LOperand* constructor) {
+ inputs_[0] = context;
+ inputs_[1] = constructor;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* constructor() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CallNew, "call-new")
+ DECLARE_HYDROGEN_ACCESSOR(CallNew)
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+
+ int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
+class LCallNewArray V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LCallNewArray(LOperand* context, LOperand* constructor) {
+ inputs_[0] = context;
+ inputs_[1] = constructor;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* constructor() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CallNewArray, "call-new-array")
+ DECLARE_HYDROGEN_ACCESSOR(CallNewArray)
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+
+ int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
+class LCallRuntime V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LCallRuntime(LOperand* context) {
+ inputs_[0] = context;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime")
+ DECLARE_HYDROGEN_ACCESSOR(CallRuntime)
+
+ virtual bool ClobbersDoubleRegisters() const V8_OVERRIDE {
+ return save_doubles() == kDontSaveFPRegs;
+ }
+
+ const Runtime::Function* function() const { return hydrogen()->function(); }
+ int arity() const { return hydrogen()->argument_count(); }
+ SaveFPRegsMode save_doubles() const { return hydrogen()->save_doubles(); }
+};
+
+
+class LCallStub V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LCallStub(LOperand* context) {
+ inputs_[0] = context;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CallStub, "call-stub")
+ DECLARE_HYDROGEN_ACCESSOR(CallStub)
+};
+
+
+class LCheckInstanceType V8_FINAL : public LTemplateInstruction<0, 1, 1> {
+ public:
+ explicit LCheckInstanceType(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CheckInstanceType, "check-instance-type")
+ DECLARE_HYDROGEN_ACCESSOR(CheckInstanceType)
+};
+
+
+class LCheckMaps V8_FINAL : public LTemplateInstruction<0, 1, 1> {
+ public:
+ explicit LCheckMaps(LOperand* value, LOperand* temp = NULL) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CheckMaps, "check-maps")
+ DECLARE_HYDROGEN_ACCESSOR(CheckMaps)
+};
+
+
+class LCheckNonSmi V8_FINAL : public LTemplateInstruction<0, 1, 0> {
+ public:
+ explicit LCheckNonSmi(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CheckNonSmi, "check-non-smi")
+ DECLARE_HYDROGEN_ACCESSOR(CheckHeapObject)
+};
+
+
+class LCheckSmi V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LCheckSmi(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CheckSmi, "check-smi")
+};
+
+
+class LCheckValue V8_FINAL : public LTemplateInstruction<0, 1, 1> {
+ public:
+ LCheckValue(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CheckValue, "check-value")
+ DECLARE_HYDROGEN_ACCESSOR(CheckValue)
+};
+
+
+class LClampDToUint8 V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LClampDToUint8(LOperand* unclamped) {
+ inputs_[0] = unclamped;
+ }
+
+ LOperand* unclamped() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ClampDToUint8, "clamp-d-to-uint8")
+};
+
+
+class LClampIToUint8 V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LClampIToUint8(LOperand* unclamped) {
+ inputs_[0] = unclamped;
+ }
+
+ LOperand* unclamped() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ClampIToUint8, "clamp-i-to-uint8")
+};
+
+
+class LClampTToUint8 V8_FINAL : public LTemplateInstruction<1, 1, 2> {
+ public:
+ LClampTToUint8(LOperand* unclamped, LOperand* temp1, LOperand* temp2) {
+ inputs_[0] = unclamped;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
+ }
+
+ LOperand* unclamped() { return inputs_[0]; }
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ClampTToUint8, "clamp-t-to-uint8")
+};
+
+
+class LClassOfTestAndBranch V8_FINAL : public LControlInstruction<1, 2> {
+ public:
+ LClassOfTestAndBranch(LOperand* value, LOperand* temp1, LOperand* temp2) {
+ inputs_[0] = value;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ClassOfTestAndBranch,
+ "class-of-test-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(ClassOfTestAndBranch)
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+};
+
+
+class LCmpHoleAndBranchD V8_FINAL : public LControlInstruction<1, 1> {
+ public:
+ explicit LCmpHoleAndBranchD(LOperand* object, LOperand* temp) {
+ inputs_[0] = object;
+ temps_[0] = temp;
+ }
+
+ LOperand* object() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CmpHoleAndBranchD, "cmp-hole-and-branch-d")
+ DECLARE_HYDROGEN_ACCESSOR(CompareHoleAndBranch)
+};
+
+
+class LCmpHoleAndBranchT V8_FINAL : public LControlInstruction<1, 0> {
+ public:
+ explicit LCmpHoleAndBranchT(LOperand* object) {
+ inputs_[0] = object;
+ }
+
+ LOperand* object() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CmpHoleAndBranchT, "cmp-hole-and-branch-t")
+ DECLARE_HYDROGEN_ACCESSOR(CompareHoleAndBranch)
+};
+
+
+class LCmpMapAndBranch V8_FINAL : public LControlInstruction<1, 1> {
+ public:
+ LCmpMapAndBranch(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CmpMapAndBranch, "cmp-map-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(CompareMap)
+
+ Handle<Map> map() const { return hydrogen()->map().handle(); }
+};
+
+
+class LCmpObjectEqAndBranch V8_FINAL : public LControlInstruction<2, 0> {
+ public:
+ LCmpObjectEqAndBranch(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CmpObjectEqAndBranch, "cmp-object-eq-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(CompareObjectEqAndBranch)
+};
+
+
+class LCmpT V8_FINAL : public LTemplateInstruction<1, 3, 0> {
+ public:
+ LCmpT(LOperand* context, LOperand* left, LOperand* right) {
+ inputs_[0] = context;
+ inputs_[1] = left;
+ inputs_[2] = right;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* left() { return inputs_[1]; }
+ LOperand* right() { return inputs_[2]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CmpT, "cmp-t")
+ DECLARE_HYDROGEN_ACCESSOR(CompareGeneric)
+
+ Token::Value op() const { return hydrogen()->token(); }
+};
+
+
+class LCompareMinusZeroAndBranch V8_FINAL : public LControlInstruction<1, 1> {
+ public:
+ LCompareMinusZeroAndBranch(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CompareMinusZeroAndBranch,
+ "cmp-minus-zero-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(CompareMinusZeroAndBranch)
+};
+
+
+class LCompareNumericAndBranch V8_FINAL : public LControlInstruction<2, 0> {
+ public:
+ LCompareNumericAndBranch(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CompareNumericAndBranch,
+ "compare-numeric-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(CompareNumericAndBranch)
+
+ Token::Value op() const { return hydrogen()->token(); }
+ bool is_double() const {
+ return hydrogen()->representation().IsDouble();
+ }
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+};
+
+
+class LConstantD V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(ConstantD, "constant-d")
+ DECLARE_HYDROGEN_ACCESSOR(Constant)
+
+ double value() const { return hydrogen()->DoubleValue(); }
+};
+
+
+class LConstantE V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(ConstantE, "constant-e")
+ DECLARE_HYDROGEN_ACCESSOR(Constant)
+
+ ExternalReference value() const {
+ return hydrogen()->ExternalReferenceValue();
+ }
+};
+
+
+class LConstantI V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(ConstantI, "constant-i")
+ DECLARE_HYDROGEN_ACCESSOR(Constant)
+
+ int32_t value() const { return hydrogen()->Integer32Value(); }
+};
+
+
+class LConstantS V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(ConstantS, "constant-s")
+ DECLARE_HYDROGEN_ACCESSOR(Constant)
+
+ Smi* value() const { return Smi::FromInt(hydrogen()->Integer32Value()); }
+};
+
+
+class LConstantT V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(ConstantT, "constant-t")
+ DECLARE_HYDROGEN_ACCESSOR(Constant)
+
+ Handle<Object> value(Isolate* isolate) const {
+ return hydrogen()->handle(isolate);
+ }
+};
+
+
+class LContext V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(Context, "context")
+ DECLARE_HYDROGEN_ACCESSOR(Context)
+};
+
+
+class LDateField V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ LDateField(LOperand* date, Smi* index) : index_(index) {
+ inputs_[0] = date;
+ }
+
+ LOperand* date() { return inputs_[0]; }
+ Smi* index() const { return index_; }
+
+ DECLARE_CONCRETE_INSTRUCTION(DateField, "date-field")
+ DECLARE_HYDROGEN_ACCESSOR(DateField)
+
+ private:
+ Smi* index_;
+};
+
+
+class LDebugBreak V8_FINAL : public LTemplateInstruction<0, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(DebugBreak, "break")
+};
+
+
+class LDeclareGlobals V8_FINAL : public LTemplateInstruction<0, 1, 0> {
+ public:
+ explicit LDeclareGlobals(LOperand* context) {
+ inputs_[0] = context;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(DeclareGlobals, "declare-globals")
+ DECLARE_HYDROGEN_ACCESSOR(DeclareGlobals)
+};
+
+
+class LDeoptimize V8_FINAL : public LTemplateInstruction<0, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(Deoptimize, "deoptimize")
+ DECLARE_HYDROGEN_ACCESSOR(Deoptimize)
+};
+
+
+class LDivI V8_FINAL : public LTemplateInstruction<1, 2, 1> {
+ public:
+ LDivI(LOperand* left, LOperand* right, LOperand* temp) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ temps_[0] = temp;
+ }
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(DivI, "div-i")
+ DECLARE_HYDROGEN_ACCESSOR(Div)
+};
+
+
+class LDoubleToIntOrSmi V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LDoubleToIntOrSmi(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(DoubleToIntOrSmi, "double-to-int-or-smi")
+ DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
+
+ bool tag_result() { return hydrogen()->representation().IsSmi(); }
+};
+
+
+class LForInCacheArray V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LForInCacheArray(LOperand* map) {
+ inputs_[0] = map;
+ }
+
+ LOperand* map() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ForInCacheArray, "for-in-cache-array")
+
+ int idx() {
+ return HForInCacheArray::cast(this->hydrogen_value())->idx();
+ }
+};
+
+
+class LForInPrepareMap V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LForInPrepareMap(LOperand* context, LOperand* object) {
+ inputs_[0] = context;
+ inputs_[1] = object;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* object() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ForInPrepareMap, "for-in-prepare-map")
+};
+
+
+class LGetCachedArrayIndex V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LGetCachedArrayIndex(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(GetCachedArrayIndex, "get-cached-array-index")
+ DECLARE_HYDROGEN_ACCESSOR(GetCachedArrayIndex)
+};
+
+
+class LHasCachedArrayIndexAndBranch V8_FINAL
+ : public LControlInstruction<1, 1> {
+ public:
+ LHasCachedArrayIndexAndBranch(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndexAndBranch,
+ "has-cached-array-index-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(HasCachedArrayIndexAndBranch)
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+};
+
+
+class LHasInstanceTypeAndBranch V8_FINAL : public LControlInstruction<1, 1> {
+ public:
+ LHasInstanceTypeAndBranch(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(HasInstanceTypeAndBranch,
+ "has-instance-type-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(HasInstanceTypeAndBranch)
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+};
+
+
+class LInnerAllocatedObject V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LInnerAllocatedObject(LOperand* base_object, LOperand* offset) {
+ inputs_[0] = base_object;
+ inputs_[1] = offset;
+ }
+
+ LOperand* base_object() const { return inputs_[0]; }
+ LOperand* offset() const { return inputs_[1]; }
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+
+ DECLARE_CONCRETE_INSTRUCTION(InnerAllocatedObject, "inner-allocated-object")
+};
+
+
+class LInstanceOf V8_FINAL : public LTemplateInstruction<1, 3, 0> {
+ public:
+ LInstanceOf(LOperand* context, LOperand* left, LOperand* right) {
+ inputs_[0] = context;
+ inputs_[1] = left;
+ inputs_[2] = right;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* left() { return inputs_[1]; }
+ LOperand* right() { return inputs_[2]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(InstanceOf, "instance-of")
+};
+
+
+class LInstanceOfKnownGlobal V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LInstanceOfKnownGlobal(LOperand* context, LOperand* value) {
+ inputs_[0] = context;
+ inputs_[1] = value;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* value() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(InstanceOfKnownGlobal,
+ "instance-of-known-global")
+ DECLARE_HYDROGEN_ACCESSOR(InstanceOfKnownGlobal)
+
+ Handle<JSFunction> function() const { return hydrogen()->function(); }
+ LEnvironment* GetDeferredLazyDeoptimizationEnvironment() {
+ return lazy_deopt_env_;
+ }
+ virtual void SetDeferredLazyDeoptimizationEnvironment(
+ LEnvironment* env) V8_OVERRIDE {
+ lazy_deopt_env_ = env;
+ }
+
+ private:
+ LEnvironment* lazy_deopt_env_;
+};
+
+
+class LInteger32ToDouble V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LInteger32ToDouble(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(Integer32ToDouble, "int32-to-double")
+};
+
+
+class LInteger32ToSmi V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LInteger32ToSmi(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(Integer32ToDouble, "int32-to-smi")
+ DECLARE_HYDROGEN_ACCESSOR(Change)
+};
+
+
+class LCallWithDescriptor V8_FINAL : public LTemplateResultInstruction<1> {
+ public:
+ LCallWithDescriptor(const CallInterfaceDescriptor* descriptor,
+ ZoneList<LOperand*>& operands,
+ Zone* zone)
+ : descriptor_(descriptor),
+ inputs_(descriptor->environment_length() + 1, zone) {
+ ASSERT(descriptor->environment_length() + 1 == operands.length());
+ inputs_.AddAll(operands, zone);
+ }
+
+ LOperand* target() const { return inputs_[0]; }
+
+ const CallInterfaceDescriptor* descriptor() { return descriptor_; }
+
+ private:
+ DECLARE_CONCRETE_INSTRUCTION(CallWithDescriptor, "call-with-descriptor")
+ DECLARE_HYDROGEN_ACCESSOR(CallWithDescriptor)
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+
+ int arity() const { return hydrogen()->argument_count() - 1; }
+
+ const CallInterfaceDescriptor* descriptor_;
+ ZoneList<LOperand*> inputs_;
+
+ // Iterator support.
+ virtual int InputCount() V8_FINAL V8_OVERRIDE { return inputs_.length(); }
+ virtual LOperand* InputAt(int i) V8_FINAL V8_OVERRIDE { return inputs_[i]; }
+
+ virtual int TempCount() V8_FINAL V8_OVERRIDE { return 0; }
+ virtual LOperand* TempAt(int i) V8_FINAL V8_OVERRIDE { return NULL; }
+};
+
+
+class LInvokeFunction V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LInvokeFunction(LOperand* context, LOperand* function) {
+ inputs_[0] = context;
+ inputs_[1] = function;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* function() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(InvokeFunction, "invoke-function")
+ DECLARE_HYDROGEN_ACCESSOR(InvokeFunction)
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+
+ int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
+class LIsConstructCallAndBranch V8_FINAL : public LControlInstruction<0, 2> {
+ public:
+ LIsConstructCallAndBranch(LOperand* temp1, LOperand* temp2) {
+ temps_[0] = temp1;
+ temps_[1] = temp2;
+ }
+
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(IsConstructCallAndBranch,
+ "is-construct-call-and-branch")
+};
+
+
+class LIsObjectAndBranch V8_FINAL : public LControlInstruction<1, 2> {
+ public:
+ LIsObjectAndBranch(LOperand* value, LOperand* temp1, LOperand* temp2) {
+ inputs_[0] = value;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(IsObjectAndBranch, "is-object-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(IsObjectAndBranch)
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+};
+
+
+class LIsStringAndBranch V8_FINAL : public LControlInstruction<1, 1> {
+ public:
+ LIsStringAndBranch(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(IsStringAndBranch, "is-string-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(IsStringAndBranch)
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+};
+
+
+class LIsSmiAndBranch V8_FINAL : public LControlInstruction<1, 0> {
+ public:
+ explicit LIsSmiAndBranch(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(IsSmiAndBranch, "is-smi-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(IsSmiAndBranch)
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+};
+
+
+class LIsUndetectableAndBranch V8_FINAL : public LControlInstruction<1, 1> {
+ public:
+ explicit LIsUndetectableAndBranch(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(IsUndetectableAndBranch,
+ "is-undetectable-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(IsUndetectableAndBranch)
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+};
+
+
+class LLoadContextSlot V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LLoadContextSlot(LOperand* context) {
+ inputs_[0] = context;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadContextSlot, "load-context-slot")
+ DECLARE_HYDROGEN_ACCESSOR(LoadContextSlot)
+
+ int slot_index() const { return hydrogen()->slot_index(); }
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+};
+
+
+class LLoadNamedField V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LLoadNamedField(LOperand* object) {
+ inputs_[0] = object;
+ }
+
+ LOperand* object() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadNamedField, "load-named-field")
+ DECLARE_HYDROGEN_ACCESSOR(LoadNamedField)
+};
+
+
+class LFunctionLiteral V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LFunctionLiteral(LOperand* context) {
+ inputs_[0] = context;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(FunctionLiteral, "function-literal")
+ DECLARE_HYDROGEN_ACCESSOR(FunctionLiteral)
+};
+
+
+class LLoadFunctionPrototype V8_FINAL : public LTemplateInstruction<1, 1, 1> {
+ public:
+ LLoadFunctionPrototype(LOperand* function, LOperand* temp) {
+ inputs_[0] = function;
+ temps_[0] = temp;
+ }
+
+ LOperand* function() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadFunctionPrototype, "load-function-prototype")
+ DECLARE_HYDROGEN_ACCESSOR(LoadFunctionPrototype)
+};
+
+
+class LLoadGlobalCell V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(LoadGlobalCell, "load-global-cell")
+ DECLARE_HYDROGEN_ACCESSOR(LoadGlobalCell)
+};
+
+
+class LLoadGlobalGeneric V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LLoadGlobalGeneric(LOperand* context, LOperand* global_object) {
+ inputs_[0] = context;
+ inputs_[1] = global_object;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* global_object() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadGlobalGeneric, "load-global-generic")
+ DECLARE_HYDROGEN_ACCESSOR(LoadGlobalGeneric)
+
+ Handle<Object> name() const { return hydrogen()->name(); }
+ bool for_typeof() const { return hydrogen()->for_typeof(); }
+};
+
+
+template<int T>
+class LLoadKeyed : public LTemplateInstruction<1, 2, T> {
+ public:
+ LLoadKeyed(LOperand* elements, LOperand* key) {
+ this->inputs_[0] = elements;
+ this->inputs_[1] = key;
+ }
+
+ LOperand* elements() { return this->inputs_[0]; }
+ LOperand* key() { return this->inputs_[1]; }
+ ElementsKind elements_kind() const {
+ return this->hydrogen()->elements_kind();
+ }
+ bool is_external() const {
+ return this->hydrogen()->is_external();
+ }
+ bool is_fixed_typed_array() const {
+ return hydrogen()->is_fixed_typed_array();
+ }
+ bool is_typed_elements() const {
+ return is_external() || is_fixed_typed_array();
+ }
+ uint32_t additional_index() const {
+ return this->hydrogen()->index_offset();
+ }
+ void PrintDataTo(StringStream* stream) V8_OVERRIDE {
+ this->elements()->PrintTo(stream);
+ stream->Add("[");
+ this->key()->PrintTo(stream);
+ if (this->hydrogen()->IsDehoisted()) {
+ stream->Add(" + %d]", this->additional_index());
+ } else {
+ stream->Add("]");
+ }
+ }
+
+ DECLARE_HYDROGEN_ACCESSOR(LoadKeyed)
+};
+
+
+class LLoadKeyedExternal: public LLoadKeyed<1> {
+ public:
+ LLoadKeyedExternal(LOperand* elements, LOperand* key, LOperand* temp) :
+ LLoadKeyed<1>(elements, key) {
+ temps_[0] = temp;
+ }
+
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadKeyedExternal, "load-keyed-external");
+};
+
+
+class LLoadKeyedFixed: public LLoadKeyed<1> {
+ public:
+ LLoadKeyedFixed(LOperand* elements, LOperand* key, LOperand* temp) :
+ LLoadKeyed<1>(elements, key) {
+ temps_[0] = temp;
+ }
+
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadKeyedFixed, "load-keyed-fixed");
+};
+
+
+class LLoadKeyedFixedDouble: public LLoadKeyed<1> {
+ public:
+ LLoadKeyedFixedDouble(LOperand* elements, LOperand* key, LOperand* temp) :
+ LLoadKeyed<1>(elements, key) {
+ temps_[0] = temp;
+ }
+
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadKeyedFixedDouble, "load-keyed-fixed-double");
+};
+
+
+class LLoadKeyedGeneric V8_FINAL : public LTemplateInstruction<1, 3, 0> {
+ public:
+ LLoadKeyedGeneric(LOperand* context, LOperand* object, LOperand* key) {
+ inputs_[0] = context;
+ inputs_[1] = object;
+ inputs_[2] = key;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* object() { return inputs_[1]; }
+ LOperand* key() { return inputs_[2]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadKeyedGeneric, "load-keyed-generic")
+};
+
+
+class LLoadNamedGeneric V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LLoadNamedGeneric(LOperand* context, LOperand* object) {
+ inputs_[0] = context;
+ inputs_[1] = object;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* object() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadNamedGeneric, "load-named-generic")
+ DECLARE_HYDROGEN_ACCESSOR(LoadNamedGeneric)
+
+ Handle<Object> name() const { return hydrogen()->name(); }
+};
+
+
+class LLoadRoot V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(LoadRoot, "load-root")
+ DECLARE_HYDROGEN_ACCESSOR(LoadRoot)
+
+ Heap::RootListIndex index() const { return hydrogen()->index(); }
+};
+
+
+class LMapEnumLength V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LMapEnumLength(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MapEnumLength, "map-enum-length")
+};
+
+
+template<int T>
+class LUnaryMathOperation : public LTemplateInstruction<1, 1, T> {
+ public:
+ explicit LUnaryMathOperation(LOperand* value) {
+ this->inputs_[0] = value;
+ }
+
+ LOperand* value() { return this->inputs_[0]; }
+ BuiltinFunctionId op() const { return this->hydrogen()->op(); }
+
+ void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+
+ DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
+};
+
+
+class LMathAbs V8_FINAL : public LUnaryMathOperation<0> {
+ public:
+ explicit LMathAbs(LOperand* value) : LUnaryMathOperation<0>(value) {}
+
+ DECLARE_CONCRETE_INSTRUCTION(MathAbs, "math-abs")
+};
+
+
+class LMathAbsTagged: public LTemplateInstruction<1, 2, 3> {
+ public:
+ LMathAbsTagged(LOperand* context, LOperand* value,
+ LOperand* temp1, LOperand* temp2, LOperand* temp3) {
+ inputs_[0] = context;
+ inputs_[1] = value;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
+ temps_[2] = temp3;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* value() { return inputs_[1]; }
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+ LOperand* temp3() { return temps_[2]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathAbsTagged, "math-abs-tagged")
+ DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
+};
+
+
+class LMathExp V8_FINAL : public LUnaryMathOperation<4> {
+ public:
+ LMathExp(LOperand* value,
+ LOperand* double_temp1,
+ LOperand* temp1,
+ LOperand* temp2,
+ LOperand* temp3)
+ : LUnaryMathOperation<4>(value) {
+ temps_[0] = double_temp1;
+ temps_[1] = temp1;
+ temps_[2] = temp2;
+ temps_[3] = temp3;
+ ExternalReference::InitializeMathExpData();
+ }
+
+ LOperand* double_temp1() { return temps_[0]; }
+ LOperand* temp1() { return temps_[1]; }
+ LOperand* temp2() { return temps_[2]; }
+ LOperand* temp3() { return temps_[3]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathExp, "math-exp")
+};
+
+
+class LMathFloor V8_FINAL : public LUnaryMathOperation<0> {
+ public:
+ explicit LMathFloor(LOperand* value) : LUnaryMathOperation<0>(value) { }
+ DECLARE_CONCRETE_INSTRUCTION(MathFloor, "math-floor")
+};
+
+
+class LMathFloorOfDiv V8_FINAL : public LTemplateInstruction<1, 2, 1> {
+ public:
+ LMathFloorOfDiv(LOperand* left,
+ LOperand* right,
+ LOperand* temp = NULL) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ temps_[0] = temp;
+ }
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathFloorOfDiv, "math-floor-of-div")
+ DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv)
+};
+
+
+class LMathLog V8_FINAL : public LUnaryMathOperation<0> {
+ public:
+ explicit LMathLog(LOperand* value) : LUnaryMathOperation<0>(value) { }
+ DECLARE_CONCRETE_INSTRUCTION(MathLog, "math-log")
+};
+
+
+class LMathMinMax V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LMathMinMax(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathMinMax, "math-min-max")
+ DECLARE_HYDROGEN_ACCESSOR(MathMinMax)
+};
+
+
+class LMathPowHalf V8_FINAL : public LUnaryMathOperation<0> {
+ public:
+ explicit LMathPowHalf(LOperand* value) : LUnaryMathOperation<0>(value) { }
+ DECLARE_CONCRETE_INSTRUCTION(MathPowHalf, "math-pow-half")
+};
+
+
+class LMathRound V8_FINAL : public LUnaryMathOperation<1> {
+ public:
+ LMathRound(LOperand* value, LOperand* temp1)
+ : LUnaryMathOperation<1>(value) {
+ temps_[0] = temp1;
+ }
+
+ LOperand* temp1() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathRound, "math-round")
+};
+
+
+class LMathSqrt V8_FINAL : public LUnaryMathOperation<0> {
+ public:
+ explicit LMathSqrt(LOperand* value) : LUnaryMathOperation<0>(value) { }
+ DECLARE_CONCRETE_INSTRUCTION(MathSqrt, "math-sqrt")
+};
+
+
+class LModI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LModI(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ModI, "mod-i")
+ DECLARE_HYDROGEN_ACCESSOR(Mod)
+};
+
+
+class LMulConstIS V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LMulConstIS(LOperand* left, LConstantOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ LOperand* left() { return inputs_[0]; }
+ LConstantOperand* right() { return LConstantOperand::cast(inputs_[1]); }
+
+ DECLARE_CONCRETE_INSTRUCTION(MulConstIS, "mul-const-i-s")
+ DECLARE_HYDROGEN_ACCESSOR(Mul)
+};
+
+
+class LMulI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LMulI(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MulI, "mul-i")
+ DECLARE_HYDROGEN_ACCESSOR(Mul)
+};
+
+
+class LMulS V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LMulS(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MulI, "mul-s")
+ DECLARE_HYDROGEN_ACCESSOR(Mul)
+};
+
+
+class LNumberTagD V8_FINAL : public LTemplateInstruction<1, 1, 2> {
+ public:
+ LNumberTagD(LOperand* value, LOperand* temp1, LOperand* temp2) {
+ inputs_[0] = value;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(NumberTagD, "number-tag-d")
+ DECLARE_HYDROGEN_ACCESSOR(Change)
+};
+
+
+class LNumberTagU V8_FINAL : public LTemplateInstruction<1, 1, 2> {
+ public:
+ explicit LNumberTagU(LOperand* value,
+ LOperand* temp1,
+ LOperand* temp2) {
+ inputs_[0] = value;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(NumberTagU, "number-tag-u")
+};
+
+
+class LNumberUntagD V8_FINAL : public LTemplateInstruction<1, 1, 1> {
+ public:
+ LNumberUntagD(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(NumberUntagD, "double-untag")
+ DECLARE_HYDROGEN_ACCESSOR(Change)
+};
+
+
+class LParameter V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+ virtual bool HasInterestingComment(LCodeGen* gen) const { return false; }
+ DECLARE_CONCRETE_INSTRUCTION(Parameter, "parameter")
+};
+
+
+class LPower V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LPower(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(Power, "power")
+ DECLARE_HYDROGEN_ACCESSOR(Power)
+};
+
+
+class LPushArgument V8_FINAL : public LTemplateInstruction<0, 1, 0> {
+ public:
+ explicit LPushArgument(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(PushArgument, "push-argument")
+};
+
+
+class LRegExpLiteral V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LRegExpLiteral(LOperand* context) {
+ inputs_[0] = context;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(RegExpLiteral, "regexp-literal")
+ DECLARE_HYDROGEN_ACCESSOR(RegExpLiteral)
+};
+
+
+class LReturn V8_FINAL : public LTemplateInstruction<0, 3, 0> {
+ public:
+ LReturn(LOperand* value, LOperand* context, LOperand* parameter_count) {
+ inputs_[0] = value;
+ inputs_[1] = context;
+ inputs_[2] = parameter_count;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* parameter_count() { return inputs_[2]; }
+
+ bool has_constant_parameter_count() {
+ return parameter_count()->IsConstantOperand();
+ }
+ LConstantOperand* constant_parameter_count() {
+ ASSERT(has_constant_parameter_count());
+ return LConstantOperand::cast(parameter_count());
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(Return, "return")
+};
+
+
+class LSeqStringGetChar V8_FINAL : public LTemplateInstruction<1, 2, 1> {
+ public:
+ LSeqStringGetChar(LOperand* string,
+ LOperand* index,
+ LOperand* temp) {
+ inputs_[0] = string;
+ inputs_[1] = index;
+ temps_[0] = temp;
+ }
+
+ LOperand* string() { return inputs_[0]; }
+ LOperand* index() { return inputs_[1]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(SeqStringGetChar, "seq-string-get-char")
+ DECLARE_HYDROGEN_ACCESSOR(SeqStringGetChar)
+};
+
+
+class LSeqStringSetChar V8_FINAL : public LTemplateInstruction<1, 4, 1> {
+ public:
+ LSeqStringSetChar(LOperand* context,
+ LOperand* string,
+ LOperand* index,
+ LOperand* value,
+ LOperand* temp) {
+ inputs_[0] = context;
+ inputs_[1] = string;
+ inputs_[2] = index;
+ inputs_[3] = value;
+ temps_[0] = temp;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* string() { return inputs_[1]; }
+ LOperand* index() { return inputs_[2]; }
+ LOperand* value() { return inputs_[3]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(SeqStringSetChar, "seq-string-set-char")
+ DECLARE_HYDROGEN_ACCESSOR(SeqStringSetChar)
+};
+
+
+class LSmiTag V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LSmiTag(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(SmiTag, "smi-tag")
+};
+
+
+class LSmiUntag V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ LSmiUntag(LOperand* value, bool needs_check)
+ : needs_check_(needs_check) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ bool needs_check() const { return needs_check_; }
+
+ DECLARE_CONCRETE_INSTRUCTION(SmiUntag, "smi-untag")
+
+ private:
+ bool needs_check_;
+};
+
+
+class LStackCheck V8_FINAL : public LTemplateInstruction<0, 1, 0> {
+ public:
+ explicit LStackCheck(LOperand* context) {
+ inputs_[0] = context;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StackCheck, "stack-check")
+ DECLARE_HYDROGEN_ACCESSOR(StackCheck)
+
+ Label* done_label() { return &done_label_; }
+
+ private:
+ Label done_label_;
+};
+
+
+template<int T>
+class LStoreKeyed : public LTemplateInstruction<0, 3, T> {
+ public:
+ LStoreKeyed(LOperand* elements, LOperand* key, LOperand* value) {
+ this->inputs_[0] = elements;
+ this->inputs_[1] = key;
+ this->inputs_[2] = value;
+ }
+
+ bool is_external() const { return this->hydrogen()->is_external(); }
+ bool is_fixed_typed_array() const {
+ return hydrogen()->is_fixed_typed_array();
+ }
+ bool is_typed_elements() const {
+ return is_external() || is_fixed_typed_array();
+ }
+ LOperand* elements() { return this->inputs_[0]; }
+ LOperand* key() { return this->inputs_[1]; }
+ LOperand* value() { return this->inputs_[2]; }
+ ElementsKind elements_kind() const {
+ return this->hydrogen()->elements_kind();
+ }
+
+ bool NeedsCanonicalization() {
+ return this->hydrogen()->NeedsCanonicalization();
+ }
+ uint32_t additional_index() const { return this->hydrogen()->index_offset(); }
+
+ void PrintDataTo(StringStream* stream) V8_OVERRIDE {
+ this->elements()->PrintTo(stream);
+ stream->Add("[");
+ this->key()->PrintTo(stream);
+ if (this->hydrogen()->IsDehoisted()) {
+ stream->Add(" + %d] <-", this->additional_index());
+ } else {
+ stream->Add("] <- ");
+ }
+
+ if (this->value() == NULL) {
+ ASSERT(hydrogen()->IsConstantHoleStore() &&
+ hydrogen()->value()->representation().IsDouble());
+ stream->Add("<the hole(nan)>");
+ } else {
+ this->value()->PrintTo(stream);
+ }
+ }
+
+ DECLARE_HYDROGEN_ACCESSOR(StoreKeyed)
+};
+
+
+class LStoreKeyedExternal V8_FINAL : public LStoreKeyed<1> {
+ public:
+ LStoreKeyedExternal(LOperand* elements, LOperand* key, LOperand* value,
+ LOperand* temp) :
+ LStoreKeyed<1>(elements, key, value) {
+ temps_[0] = temp;
+ };
+
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreKeyedExternal, "store-keyed-external")
+};
+
+
+class LStoreKeyedFixed V8_FINAL : public LStoreKeyed<1> {
+ public:
+ LStoreKeyedFixed(LOperand* elements, LOperand* key, LOperand* value,
+ LOperand* temp) :
+ LStoreKeyed<1>(elements, key, value) {
+ temps_[0] = temp;
+ };
+
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFixed, "store-keyed-fixed")
+};
+
+
+class LStoreKeyedFixedDouble V8_FINAL : public LStoreKeyed<1> {
+ public:
+ LStoreKeyedFixedDouble(LOperand* elements, LOperand* key, LOperand* value,
+ LOperand* temp) :
+ LStoreKeyed<1>(elements, key, value) {
+ temps_[0] = temp;
+ };
+
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFixedDouble,
+ "store-keyed-fixed-double")
+};
+
+
+class LStoreKeyedGeneric V8_FINAL : public LTemplateInstruction<0, 4, 0> {
+ public:
+ LStoreKeyedGeneric(LOperand* context,
+ LOperand* obj,
+ LOperand* key,
+ LOperand* value) {
+ inputs_[0] = context;
+ inputs_[1] = obj;
+ inputs_[2] = key;
+ inputs_[3] = value;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* object() { return inputs_[1]; }
+ LOperand* key() { return inputs_[2]; }
+ LOperand* value() { return inputs_[3]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic")
+ DECLARE_HYDROGEN_ACCESSOR(StoreKeyedGeneric)
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+
+ StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
+};
+
+
+class LStoreNamedField V8_FINAL : public LTemplateInstruction<0, 2, 2> {
+ public:
+ LStoreNamedField(LOperand* object, LOperand* value,
+ LOperand* temp0, LOperand* temp1) {
+ inputs_[0] = object;
+ inputs_[1] = value;
+ temps_[0] = temp0;
+ temps_[1] = temp1;
+ }
+
+ LOperand* object() { return inputs_[0]; }
+ LOperand* value() { return inputs_[1]; }
+ LOperand* temp0() { return temps_[0]; }
+ LOperand* temp1() { return temps_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store-named-field")
+ DECLARE_HYDROGEN_ACCESSOR(StoreNamedField)
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+
+ Handle<Map> transition() const { return hydrogen()->transition_map(); }
+ Representation representation() const {
+ return hydrogen()->field_representation();
+ }
+};
+
+
+class LStoreNamedGeneric V8_FINAL: public LTemplateInstruction<0, 3, 0> {
+ public:
+ LStoreNamedGeneric(LOperand* context, LOperand* object, LOperand* value) {
+ inputs_[0] = context;
+ inputs_[1] = object;
+ inputs_[2] = value;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* object() { return inputs_[1]; }
+ LOperand* value() { return inputs_[2]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store-named-generic")
+ DECLARE_HYDROGEN_ACCESSOR(StoreNamedGeneric)
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+
+ Handle<Object> name() const { return hydrogen()->name(); }
+ StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
+};
+
+
+class LStringAdd V8_FINAL : public LTemplateInstruction<1, 3, 0> {
+ public:
+ LStringAdd(LOperand* context, LOperand* left, LOperand* right) {
+ inputs_[0] = context;
+ inputs_[1] = left;
+ inputs_[2] = right;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* left() { return inputs_[1]; }
+ LOperand* right() { return inputs_[2]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StringAdd, "string-add")
+ DECLARE_HYDROGEN_ACCESSOR(StringAdd)
+};
+
+
+
+class LStringCharCodeAt V8_FINAL : public LTemplateInstruction<1, 3, 0> {
+ public:
+ LStringCharCodeAt(LOperand* context, LOperand* string, LOperand* index) {
+ inputs_[0] = context;
+ inputs_[1] = string;
+ inputs_[2] = index;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* string() { return inputs_[1]; }
+ LOperand* index() { return inputs_[2]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StringCharCodeAt, "string-char-code-at")
+ DECLARE_HYDROGEN_ACCESSOR(StringCharCodeAt)
+};
+
+
+class LStringCharFromCode V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LStringCharFromCode(LOperand* context, LOperand* char_code) {
+ inputs_[0] = context;
+ inputs_[1] = char_code;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* char_code() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StringCharFromCode, "string-char-from-code")
+ DECLARE_HYDROGEN_ACCESSOR(StringCharFromCode)
+};
+
+
+class LStringCompareAndBranch V8_FINAL : public LControlInstruction<3, 0> {
+ public:
+ LStringCompareAndBranch(LOperand* context, LOperand* left, LOperand* right) {
+ inputs_[0] = context;
+ inputs_[1] = left;
+ inputs_[2] = right;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* left() { return inputs_[1]; }
+ LOperand* right() { return inputs_[2]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StringCompareAndBranch,
+ "string-compare-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(StringCompareAndBranch)
+
+ Token::Value op() const { return hydrogen()->token(); }
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+};
+
+
+// Truncating conversion from a tagged value to an int32.
+class LTaggedToI V8_FINAL : public LTemplateInstruction<1, 1, 2> {
+ public:
+ explicit LTaggedToI(LOperand* value, LOperand* temp1, LOperand* temp2) {
+ inputs_[0] = value;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(TaggedToI, "tagged-to-i")
+ DECLARE_HYDROGEN_ACCESSOR(Change)
+
+ bool truncating() { return hydrogen()->CanTruncateToInt32(); }
+};
+
+
+class LShiftI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LShiftI(Token::Value op, LOperand* left, LOperand* right, bool can_deopt)
+ : op_(op), can_deopt_(can_deopt) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ Token::Value op() const { return op_; }
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+ bool can_deopt() const { return can_deopt_; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ShiftI, "shift-i")
+
+ private:
+ Token::Value op_;
+ bool can_deopt_;
+};
+
+
+class LShiftS V8_FINAL : public LTemplateInstruction<1, 2, 1> {
+ public:
+ LShiftS(Token::Value op, LOperand* left, LOperand* right, LOperand* temp,
+ bool can_deopt) : op_(op), can_deopt_(can_deopt) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ temps_[0] = temp;
+ }
+
+ Token::Value op() const { return op_; }
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+ LOperand* temp() { return temps_[0]; }
+ bool can_deopt() const { return can_deopt_; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ShiftS, "shift-s")
+
+ private:
+ Token::Value op_;
+ bool can_deopt_;
+};
+
+
+class LStoreCodeEntry V8_FINAL: public LTemplateInstruction<0, 2, 1> {
+ public:
+ LStoreCodeEntry(LOperand* function, LOperand* code_object,
+ LOperand* temp) {
+ inputs_[0] = function;
+ inputs_[1] = code_object;
+ temps_[0] = temp;
+ }
+
+ LOperand* function() { return inputs_[0]; }
+ LOperand* code_object() { return inputs_[1]; }
+ LOperand* temp() { return temps_[0]; }
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreCodeEntry, "store-code-entry")
+ DECLARE_HYDROGEN_ACCESSOR(StoreCodeEntry)
+};
+
+
+class LStoreContextSlot V8_FINAL : public LTemplateInstruction<0, 2, 1> {
+ public:
+ LStoreContextSlot(LOperand* context, LOperand* value, LOperand* temp) {
+ inputs_[0] = context;
+ inputs_[1] = value;
+ temps_[0] = temp;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* value() { return inputs_[1]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreContextSlot, "store-context-slot")
+ DECLARE_HYDROGEN_ACCESSOR(StoreContextSlot)
+
+ int slot_index() { return hydrogen()->slot_index(); }
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+};
+
+
+class LStoreGlobalCell V8_FINAL : public LTemplateInstruction<0, 1, 2> {
+ public:
+ LStoreGlobalCell(LOperand* value, LOperand* temp1, LOperand* temp2) {
+ inputs_[0] = value;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreGlobalCell, "store-global-cell")
+ DECLARE_HYDROGEN_ACCESSOR(StoreGlobalCell)
+};
+
+
+class LSubI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LSubI(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(SubI, "sub-i")
+ DECLARE_HYDROGEN_ACCESSOR(Sub)
+};
+
+
+class LSubS: public LTemplateInstruction<1, 2, 0> {
+ public:
+ LSubS(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(SubS, "sub-s")
+ DECLARE_HYDROGEN_ACCESSOR(Sub)
+};
+
+
+class LThisFunction V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(ThisFunction, "this-function")
+ DECLARE_HYDROGEN_ACCESSOR(ThisFunction)
+};
+
+
+class LToFastProperties V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LToFastProperties(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ToFastProperties, "to-fast-properties")
+ DECLARE_HYDROGEN_ACCESSOR(ToFastProperties)
+};
+
+
+class LTransitionElementsKind V8_FINAL : public LTemplateInstruction<0, 2, 2> {
+ public:
+ LTransitionElementsKind(LOperand* object,
+ LOperand* context,
+ LOperand* temp1,
+ LOperand* temp2 = NULL) {
+ inputs_[0] = object;
+ inputs_[1] = context;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
+ }
+
+ LOperand* object() { return inputs_[0]; }
+ LOperand* context() { return inputs_[1]; }
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(TransitionElementsKind,
+ "transition-elements-kind")
+ DECLARE_HYDROGEN_ACCESSOR(TransitionElementsKind)
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+
+ Handle<Map> original_map() { return hydrogen()->original_map().handle(); }
+ Handle<Map> transitioned_map() {
+ return hydrogen()->transitioned_map().handle();
+ }
+ ElementsKind from_kind() const { return hydrogen()->from_kind(); }
+ ElementsKind to_kind() const { return hydrogen()->to_kind(); }
+};
+
+
+class LTrapAllocationMemento V8_FINAL : public LTemplateInstruction<0, 1, 2> {
+ public:
+ LTrapAllocationMemento(LOperand* object, LOperand* temp1, LOperand* temp2) {
+ inputs_[0] = object;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
+ }
+
+ LOperand* object() { return inputs_[0]; }
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(TrapAllocationMemento, "trap-allocation-memento")
+};
+
+
+class LTruncateDoubleToIntOrSmi V8_FINAL
+ : public LTemplateInstruction<1, 1, 2> {
+ public:
+ LTruncateDoubleToIntOrSmi(LOperand* value, LOperand* temp1, LOperand* temp2) {
+ inputs_[0] = value;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(TruncateDoubleToIntOrSmi,
+ "truncate-double-to-int-or-smi")
+ DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
+
+ bool tag_result() { return hydrogen()->representation().IsSmi(); }
+};
+
+
+class LTypeof V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LTypeof(LOperand* context, LOperand* value) {
+ inputs_[0] = context;
+ inputs_[1] = value;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* value() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(Typeof, "typeof")
+};
+
+
+class LTypeofIsAndBranch V8_FINAL : public LControlInstruction<1, 2> {
+ public:
+ LTypeofIsAndBranch(LOperand* value, LOperand* temp1, LOperand* temp2) {
+ inputs_[0] = value;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(TypeofIsAndBranch, "typeof-is-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(TypeofIsAndBranch)
+
+ Handle<String> type_literal() const { return hydrogen()->type_literal(); }
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+};
+
+
+class LUint32ToDouble V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LUint32ToDouble(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(Uint32ToDouble, "uint32-to-double")
+};
+
+
+class LUint32ToSmi V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LUint32ToSmi(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(Uint32ToSmi, "uint32-to-smi")
+ DECLARE_HYDROGEN_ACCESSOR(Change)
+};
+
+
+class LCheckMapValue V8_FINAL : public LTemplateInstruction<0, 2, 1> {
+ public:
+ LCheckMapValue(LOperand* value, LOperand* map, LOperand* temp) {
+ inputs_[0] = value;
+ inputs_[1] = map;
+ temps_[0] = temp;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* map() { return inputs_[1]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CheckMapValue, "check-map-value")
+};
+
+
+class LLoadFieldByIndex V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LLoadFieldByIndex(LOperand* object, LOperand* index) {
+ inputs_[0] = object;
+ inputs_[1] = index;
+ }
+
+ LOperand* object() { return inputs_[0]; }
+ LOperand* index() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadFieldByIndex, "load-field-by-index")
+};
+
+
+class LWrapReceiver V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LWrapReceiver(LOperand* receiver, LOperand* function) {
+ inputs_[0] = receiver;
+ inputs_[1] = function;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(WrapReceiver, "wrap-receiver")
+ DECLARE_HYDROGEN_ACCESSOR(WrapReceiver)
+
+ LOperand* receiver() { return inputs_[0]; }
+ LOperand* function() { return inputs_[1]; }
+};
+
+
+class LChunkBuilder;
+class LPlatformChunk V8_FINAL : public LChunk {
+ public:
+ LPlatformChunk(CompilationInfo* info, HGraph* graph)
+ : LChunk(info, graph) { }
+
+ int GetNextSpillIndex();
+ LOperand* GetNextSpillSlot(RegisterKind kind);
+};
+
+
+class LChunkBuilder V8_FINAL : public LChunkBuilderBase {
+ public:
+ LChunkBuilder(CompilationInfo* info, HGraph* graph, LAllocator* allocator)
+ : LChunkBuilderBase(graph->zone()),
+ chunk_(NULL),
+ info_(info),
+ graph_(graph),
+ status_(UNUSED),
+ current_instruction_(NULL),
+ current_block_(NULL),
+ allocator_(allocator),
+ position_(RelocInfo::kNoPosition),
+ instruction_pending_deoptimization_environment_(NULL),
+ pending_deoptimization_ast_id_(BailoutId::None()) { }
+
+ // Build the sequence for the graph.
+ LPlatformChunk* Build();
+
+ LInstruction* CheckElideControlInstruction(HControlInstruction* instr);
+
+ // Declare methods that deal with the individual node types.
+#define DECLARE_DO(type) LInstruction* Do##type(H##type* node);
+ HYDROGEN_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
+#undef DECLARE_DO
+
+ static bool HasMagicNumberForDivision(int32_t divisor);
+
+ private:
+ enum Status {
+ UNUSED,
+ BUILDING,
+ DONE,
+ ABORTED
+ };
+
+ HGraph* graph() const { return graph_; }
+ Isolate* isolate() const { return info_->isolate(); }
+
+ bool is_unused() const { return status_ == UNUSED; }
+ bool is_building() const { return status_ == BUILDING; }
+ bool is_done() const { return status_ == DONE; }
+ bool is_aborted() const { return status_ == ABORTED; }
+
+ int argument_count() const { return argument_count_; }
+ CompilationInfo* info() const { return info_; }
+ Heap* heap() const { return isolate()->heap(); }
+
+ void Abort(BailoutReason reason);
+
+ // Methods for getting operands for Use / Define / Temp.
+ LUnallocated* ToUnallocated(Register reg);
+ LUnallocated* ToUnallocated(DoubleRegister reg);
+
+ // Methods for setting up define-use relationships.
+ MUST_USE_RESULT LOperand* Use(HValue* value, LUnallocated* operand);
+ MUST_USE_RESULT LOperand* UseFixed(HValue* value, Register fixed_register);
+ MUST_USE_RESULT LOperand* UseFixedDouble(HValue* value,
+ DoubleRegister fixed_register);
+
+ // A value that is guaranteed to be allocated to a register.
+ // The operand created by UseRegister is guaranteed to be live until the end
+ // of the instruction. This means that register allocator will not reuse its
+ // register for any other operand inside instruction.
+ MUST_USE_RESULT LOperand* UseRegister(HValue* value);
+
+ // The operand created by UseRegisterAndClobber is guaranteed to be live until
+ // the end of the end of the instruction, and it may also be used as a scratch
+ // register by the instruction implementation.
+ //
+ // This behaves identically to ARM's UseTempRegister. However, it is renamed
+ // to discourage its use in A64, since in most cases it is better to allocate
+ // a temporary register for the Lithium instruction.
+ MUST_USE_RESULT LOperand* UseRegisterAndClobber(HValue* value);
+
+ // The operand created by UseRegisterAtStart is guaranteed to be live only at
+ // instruction start. The register allocator is free to assign the same
+ // register to some other operand used inside instruction (i.e. temporary or
+ // output).
+ MUST_USE_RESULT LOperand* UseRegisterAtStart(HValue* value);
+
+ // An input operand in a register or a constant operand.
+ MUST_USE_RESULT LOperand* UseRegisterOrConstant(HValue* value);
+ MUST_USE_RESULT LOperand* UseRegisterOrConstantAtStart(HValue* value);
+
+ // A constant operand.
+ MUST_USE_RESULT LConstantOperand* UseConstant(HValue* value);
+
+ // An input operand in register, stack slot or a constant operand.
+ // Will not be moved to a register even if one is freely available.
+ virtual MUST_USE_RESULT LOperand* UseAny(HValue* value);
+
+ // Temporary operand that must be in a register.
+ MUST_USE_RESULT LUnallocated* TempRegister();
+
+ // Temporary operand that must be in a fixed double register.
+ MUST_USE_RESULT LOperand* FixedTemp(DoubleRegister reg);
+
+ // Methods for setting up define-use relationships.
+ // Return the same instruction that they are passed.
+ LInstruction* Define(LTemplateResultInstruction<1>* instr,
+ LUnallocated* result);
+ LInstruction* DefineAsRegister(LTemplateResultInstruction<1>* instr);
+ LInstruction* DefineAsSpilled(LTemplateResultInstruction<1>* instr,
+ int index);
+
+ LInstruction* DefineSameAsFirst(LTemplateResultInstruction<1>* instr);
+ LInstruction* DefineFixed(LTemplateResultInstruction<1>* instr,
+ Register reg);
+ LInstruction* DefineFixedDouble(LTemplateResultInstruction<1>* instr,
+ DoubleRegister reg);
+
+ enum CanDeoptimize { CAN_DEOPTIMIZE_EAGERLY, CANNOT_DEOPTIMIZE_EAGERLY };
+
+ // By default we assume that instruction sequences generated for calls
+ // cannot deoptimize eagerly and we do not attach environment to this
+ // instruction.
+ LInstruction* MarkAsCall(
+ LInstruction* instr,
+ HInstruction* hinstr,
+ CanDeoptimize can_deoptimize = CANNOT_DEOPTIMIZE_EAGERLY);
+
+ LInstruction* AssignPointerMap(LInstruction* instr);
+ LInstruction* AssignEnvironment(LInstruction* instr);
+
+ void VisitInstruction(HInstruction* current);
+ void DoBasicBlock(HBasicBlock* block);
+
+ LInstruction* DoShift(Token::Value op, HBitwiseBinaryOperation* instr);
+ LInstruction* DoArithmeticD(Token::Value op,
+ HArithmeticBinaryOperation* instr);
+ LInstruction* DoArithmeticT(Token::Value op,
+ HBinaryOperation* instr);
+
+ LPlatformChunk* chunk_;
+ CompilationInfo* info_;
+ HGraph* const graph_;
+ Status status_;
+ HInstruction* current_instruction_;
+ HBasicBlock* current_block_;
+ LAllocator* allocator_;
+ int position_;
+ LInstruction* instruction_pending_deoptimization_environment_;
+ BailoutId pending_deoptimization_ast_id_;
+
+ DISALLOW_COPY_AND_ASSIGN(LChunkBuilder);
+};
+
+#undef DECLARE_HYDROGEN_ACCESSOR
+#undef DECLARE_CONCRETE_INSTRUCTION
+
+} } // namespace v8::internal
+
+#endif // V8_A64_LITHIUM_A64_H_
--- /dev/null
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "a64/lithium-codegen-a64.h"
+#include "a64/lithium-gap-resolver-a64.h"
+#include "code-stubs.h"
+#include "stub-cache.h"
+#include "hydrogen-osr.h"
+
+namespace v8 {
+namespace internal {
+
+
+class SafepointGenerator V8_FINAL : public CallWrapper {
+ public:
+ SafepointGenerator(LCodeGen* codegen,
+ LPointerMap* pointers,
+ Safepoint::DeoptMode mode)
+ : codegen_(codegen),
+ pointers_(pointers),
+ deopt_mode_(mode) { }
+ virtual ~SafepointGenerator() { }
+
+ virtual void BeforeCall(int call_size) const { }
+
+ virtual void AfterCall() const {
+ codegen_->RecordSafepoint(pointers_, deopt_mode_);
+ }
+
+ private:
+ LCodeGen* codegen_;
+ LPointerMap* pointers_;
+ Safepoint::DeoptMode deopt_mode_;
+};
+
+
+#define __ masm()->
+
+// Emit code to branch if the given condition holds.
+// The code generated here doesn't modify the flags and they must have
+// been set by some prior instructions.
+//
+// The EmitInverted function simply inverts the condition.
+class BranchOnCondition : public BranchGenerator {
+ public:
+ BranchOnCondition(LCodeGen* codegen, Condition cond)
+ : BranchGenerator(codegen),
+ cond_(cond) { }
+
+ virtual void Emit(Label* label) const {
+ __ B(cond_, label);
+ }
+
+ virtual void EmitInverted(Label* label) const {
+ if (cond_ != al) {
+ __ B(InvertCondition(cond_), label);
+ }
+ }
+
+ private:
+ Condition cond_;
+};
+
+
+// Emit code to compare lhs and rhs and branch if the condition holds.
+// This uses MacroAssembler's CompareAndBranch function so it will handle
+// converting the comparison to Cbz/Cbnz if the right-hand side is 0.
+//
+// EmitInverted still compares the two operands but inverts the condition.
+class CompareAndBranch : public BranchGenerator {
+ public:
+ CompareAndBranch(LCodeGen* codegen,
+ Condition cond,
+ const Register& lhs,
+ const Operand& rhs)
+ : BranchGenerator(codegen),
+ cond_(cond),
+ lhs_(lhs),
+ rhs_(rhs) { }
+
+ virtual void Emit(Label* label) const {
+ __ CompareAndBranch(lhs_, rhs_, cond_, label);
+ }
+
+ virtual void EmitInverted(Label* label) const {
+ __ CompareAndBranch(lhs_, rhs_, InvertCondition(cond_), label);
+ }
+
+ private:
+ Condition cond_;
+ const Register& lhs_;
+ const Operand& rhs_;
+};
+
+
+// Test the input with the given mask and branch if the condition holds.
+// If the condition is 'eq' or 'ne' this will use MacroAssembler's
+// TestAndBranchIfAllClear and TestAndBranchIfAnySet so it will handle the
+// conversion to Tbz/Tbnz when possible.
+class TestAndBranch : public BranchGenerator {
+ public:
+ TestAndBranch(LCodeGen* codegen,
+ Condition cond,
+ const Register& value,
+ uint64_t mask)
+ : BranchGenerator(codegen),
+ cond_(cond),
+ value_(value),
+ mask_(mask) { }
+
+ virtual void Emit(Label* label) const {
+ switch (cond_) {
+ case eq:
+ __ TestAndBranchIfAllClear(value_, mask_, label);
+ break;
+ case ne:
+ __ TestAndBranchIfAnySet(value_, mask_, label);
+ break;
+ default:
+ __ Tst(value_, mask_);
+ __ B(cond_, label);
+ }
+ }
+
+ virtual void EmitInverted(Label* label) const {
+ // The inverse of "all clear" is "any set" and vice versa.
+ switch (cond_) {
+ case eq:
+ __ TestAndBranchIfAnySet(value_, mask_, label);
+ break;
+ case ne:
+ __ TestAndBranchIfAllClear(value_, mask_, label);
+ break;
+ default:
+ __ Tst(value_, mask_);
+ __ B(InvertCondition(cond_), label);
+ }
+ }
+
+ private:
+ Condition cond_;
+ const Register& value_;
+ uint64_t mask_;
+};
+
+
+// Test the input and branch if it is non-zero and not a NaN.
+class BranchIfNonZeroNumber : public BranchGenerator {
+ public:
+ BranchIfNonZeroNumber(LCodeGen* codegen, const FPRegister& value,
+ const FPRegister& scratch)
+ : BranchGenerator(codegen), value_(value), scratch_(scratch) { }
+
+ virtual void Emit(Label* label) const {
+ __ Fabs(scratch_, value_);
+ // Compare with 0.0. Because scratch_ is positive, the result can be one of
+ // nZCv (equal), nzCv (greater) or nzCV (unordered).
+ __ Fcmp(scratch_, 0.0);
+ __ B(gt, label);
+ }
+
+ virtual void EmitInverted(Label* label) const {
+ __ Fabs(scratch_, value_);
+ __ Fcmp(scratch_, 0.0);
+ __ B(le, label);
+ }
+
+ private:
+ const FPRegister& value_;
+ const FPRegister& scratch_;
+};
+
+
+// Test the input and branch if it is a heap number.
+class BranchIfHeapNumber : public BranchGenerator {
+ public:
+ BranchIfHeapNumber(LCodeGen* codegen, const Register& value)
+ : BranchGenerator(codegen), value_(value) { }
+
+ virtual void Emit(Label* label) const {
+ __ JumpIfHeapNumber(value_, label);
+ }
+
+ virtual void EmitInverted(Label* label) const {
+ __ JumpIfNotHeapNumber(value_, label);
+ }
+
+ private:
+ const Register& value_;
+};
+
+
+// Test the input and branch if it is the specified root value.
+class BranchIfRoot : public BranchGenerator {
+ public:
+ BranchIfRoot(LCodeGen* codegen, const Register& value,
+ Heap::RootListIndex index)
+ : BranchGenerator(codegen), value_(value), index_(index) { }
+
+ virtual void Emit(Label* label) const {
+ __ JumpIfRoot(value_, index_, label);
+ }
+
+ virtual void EmitInverted(Label* label) const {
+ __ JumpIfNotRoot(value_, index_, label);
+ }
+
+ private:
+ const Register& value_;
+ const Heap::RootListIndex index_;
+};
+
+
+void LCodeGen::WriteTranslation(LEnvironment* environment,
+ Translation* translation) {
+ if (environment == NULL) return;
+
+ // The translation includes one command per value in the environment.
+ int translation_size = environment->translation_size();
+ // The output frame height does not include the parameters.
+ int height = translation_size - environment->parameter_count();
+
+ WriteTranslation(environment->outer(), translation);
+ bool has_closure_id = !info()->closure().is_null() &&
+ !info()->closure().is_identical_to(environment->closure());
+ int closure_id = has_closure_id
+ ? DefineDeoptimizationLiteral(environment->closure())
+ : Translation::kSelfLiteralId;
+
+ switch (environment->frame_type()) {
+ case JS_FUNCTION:
+ translation->BeginJSFrame(environment->ast_id(), closure_id, height);
+ break;
+ case JS_CONSTRUCT:
+ translation->BeginConstructStubFrame(closure_id, translation_size);
+ break;
+ case JS_GETTER:
+ ASSERT(translation_size == 1);
+ ASSERT(height == 0);
+ translation->BeginGetterStubFrame(closure_id);
+ break;
+ case JS_SETTER:
+ ASSERT(translation_size == 2);
+ ASSERT(height == 0);
+ translation->BeginSetterStubFrame(closure_id);
+ break;
+ case STUB:
+ translation->BeginCompiledStubFrame();
+ break;
+ case ARGUMENTS_ADAPTOR:
+ translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
+ break;
+ default:
+ UNREACHABLE();
+ }
+
+ int object_index = 0;
+ int dematerialized_index = 0;
+ for (int i = 0; i < translation_size; ++i) {
+ LOperand* value = environment->values()->at(i);
+
+ AddToTranslation(environment,
+ translation,
+ value,
+ environment->HasTaggedValueAt(i),
+ environment->HasUint32ValueAt(i),
+ &object_index,
+ &dematerialized_index);
+ }
+}
+
+
+void LCodeGen::AddToTranslation(LEnvironment* environment,
+ Translation* translation,
+ LOperand* op,
+ bool is_tagged,
+ bool is_uint32,
+ int* object_index_pointer,
+ int* dematerialized_index_pointer) {
+ if (op == LEnvironment::materialization_marker()) {
+ int object_index = (*object_index_pointer)++;
+ if (environment->ObjectIsDuplicateAt(object_index)) {
+ int dupe_of = environment->ObjectDuplicateOfAt(object_index);
+ translation->DuplicateObject(dupe_of);
+ return;
+ }
+ int object_length = environment->ObjectLengthAt(object_index);
+ if (environment->ObjectIsArgumentsAt(object_index)) {
+ translation->BeginArgumentsObject(object_length);
+ } else {
+ translation->BeginCapturedObject(object_length);
+ }
+ int dematerialized_index = *dematerialized_index_pointer;
+ int env_offset = environment->translation_size() + dematerialized_index;
+ *dematerialized_index_pointer += object_length;
+ for (int i = 0; i < object_length; ++i) {
+ LOperand* value = environment->values()->at(env_offset + i);
+ AddToTranslation(environment,
+ translation,
+ value,
+ environment->HasTaggedValueAt(env_offset + i),
+ environment->HasUint32ValueAt(env_offset + i),
+ object_index_pointer,
+ dematerialized_index_pointer);
+ }
+ return;
+ }
+
+ if (op->IsStackSlot()) {
+ if (is_tagged) {
+ translation->StoreStackSlot(op->index());
+ } else if (is_uint32) {
+ translation->StoreUint32StackSlot(op->index());
+ } else {
+ translation->StoreInt32StackSlot(op->index());
+ }
+ } else if (op->IsDoubleStackSlot()) {
+ translation->StoreDoubleStackSlot(op->index());
+ } else if (op->IsArgument()) {
+ ASSERT(is_tagged);
+ int src_index = GetStackSlotCount() + op->index();
+ translation->StoreStackSlot(src_index);
+ } else if (op->IsRegister()) {
+ Register reg = ToRegister(op);
+ if (is_tagged) {
+ translation->StoreRegister(reg);
+ } else if (is_uint32) {
+ translation->StoreUint32Register(reg);
+ } else {
+ translation->StoreInt32Register(reg);
+ }
+ } else if (op->IsDoubleRegister()) {
+ DoubleRegister reg = ToDoubleRegister(op);
+ translation->StoreDoubleRegister(reg);
+ } else if (op->IsConstantOperand()) {
+ HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
+ int src_index = DefineDeoptimizationLiteral(constant->handle(isolate()));
+ translation->StoreLiteral(src_index);
+ } else {
+ UNREACHABLE();
+ }
+}
+
+
+int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) {
+ int result = deoptimization_literals_.length();
+ for (int i = 0; i < deoptimization_literals_.length(); ++i) {
+ if (deoptimization_literals_[i].is_identical_to(literal)) return i;
+ }
+ deoptimization_literals_.Add(literal, zone());
+ return result;
+}
+
+
+void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
+ Safepoint::DeoptMode mode) {
+ if (!environment->HasBeenRegistered()) {
+ int frame_count = 0;
+ int jsframe_count = 0;
+ for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
+ ++frame_count;
+ if (e->frame_type() == JS_FUNCTION) {
+ ++jsframe_count;
+ }
+ }
+ Translation translation(&translations_, frame_count, jsframe_count, zone());
+ WriteTranslation(environment, &translation);
+ int deoptimization_index = deoptimizations_.length();
+ int pc_offset = masm()->pc_offset();
+ environment->Register(deoptimization_index,
+ translation.index(),
+ (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
+ deoptimizations_.Add(environment, zone());
+ }
+}
+
+
+void LCodeGen::CallCode(Handle<Code> code,
+ RelocInfo::Mode mode,
+ LInstruction* instr) {
+ CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT);
+}
+
+
+void LCodeGen::CallCodeGeneric(Handle<Code> code,
+ RelocInfo::Mode mode,
+ LInstruction* instr,
+ SafepointMode safepoint_mode) {
+ ASSERT(instr != NULL);
+
+ Assembler::BlockConstPoolScope scope(masm_);
+ __ Call(code, mode);
+ RecordSafepointWithLazyDeopt(instr, safepoint_mode);
+
+ if ((code->kind() == Code::BINARY_OP_IC) ||
+ (code->kind() == Code::COMPARE_IC)) {
+ // Signal that we don't inline smi code before these stubs in the
+ // optimizing code generator.
+ InlineSmiCheckInfo::EmitNotInlined(masm());
+ }
+}
+
+
+void LCodeGen::DoCallFunction(LCallFunction* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
+ ASSERT(ToRegister(instr->function()).Is(x1));
+ ASSERT(ToRegister(instr->result()).Is(x0));
+
+ int arity = instr->arity();
+ CallFunctionStub stub(arity, instr->hydrogen()->function_flags());
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoCallNew(LCallNew* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
+ ASSERT(instr->IsMarkedAsCall());
+ ASSERT(ToRegister(instr->constructor()).is(x1));
+
+ __ Mov(x0, instr->arity());
+ // No cell in x2 for construct type feedback in optimized code.
+ Handle<Object> undefined_value(isolate()->factory()->undefined_value());
+ __ Mov(x2, Operand(undefined_value));
+
+ CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
+
+ ASSERT(ToRegister(instr->result()).is(x0));
+}
+
+
+void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
+ ASSERT(instr->IsMarkedAsCall());
+ ASSERT(ToRegister(instr->context()).is(cp));
+ ASSERT(ToRegister(instr->constructor()).is(x1));
+
+ __ Mov(x0, Operand(instr->arity()));
+ __ Mov(x2, Operand(factory()->undefined_value()));
+
+ ElementsKind kind = instr->hydrogen()->elements_kind();
+ AllocationSiteOverrideMode override_mode =
+ (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE)
+ ? DISABLE_ALLOCATION_SITES
+ : DONT_OVERRIDE;
+
+ if (instr->arity() == 0) {
+ ArrayNoArgumentConstructorStub stub(kind, override_mode);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
+ } else if (instr->arity() == 1) {
+ Label done;
+ if (IsFastPackedElementsKind(kind)) {
+ Label packed_case;
+
+ // We might need to create a holey array; look at the first argument.
+ __ Peek(x10, 0);
+ __ Cbz(x10, &packed_case);
+
+ ElementsKind holey_kind = GetHoleyElementsKind(kind);
+ ArraySingleArgumentConstructorStub stub(holey_kind, override_mode);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
+ __ B(&done);
+ __ Bind(&packed_case);
+ }
+
+ ArraySingleArgumentConstructorStub stub(kind, override_mode);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
+ __ Bind(&done);
+ } else {
+ ArrayNArgumentsConstructorStub stub(kind, override_mode);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
+ }
+
+ ASSERT(ToRegister(instr->result()).is(x0));
+}
+
+
+void LCodeGen::CallRuntime(const Runtime::Function* function,
+ int num_arguments,
+ LInstruction* instr,
+ SaveFPRegsMode save_doubles) {
+ ASSERT(instr != NULL);
+
+ __ CallRuntime(function, num_arguments, save_doubles);
+
+ RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
+}
+
+
+void LCodeGen::LoadContextFromDeferred(LOperand* context) {
+ if (context->IsRegister()) {
+ __ Mov(cp, ToRegister(context));
+ } else if (context->IsStackSlot()) {
+ __ Ldr(cp, ToMemOperand(context));
+ } else if (context->IsConstantOperand()) {
+ HConstant* constant =
+ chunk_->LookupConstant(LConstantOperand::cast(context));
+ __ LoadHeapObject(cp,
+ Handle<HeapObject>::cast(constant->handle(isolate())));
+ } else {
+ UNREACHABLE();
+ }
+}
+
+
+void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
+ int argc,
+ LInstruction* instr,
+ LOperand* context) {
+ LoadContextFromDeferred(context);
+ __ CallRuntimeSaveDoubles(id);
+ RecordSafepointWithRegisters(
+ instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
+}
+
+
+void LCodeGen::RecordAndWritePosition(int position) {
+ if (position == RelocInfo::kNoPosition) return;
+ masm()->positions_recorder()->RecordPosition(position);
+ masm()->positions_recorder()->WriteRecordedPositions();
+}
+
+
+void LCodeGen::RecordSafepointWithLazyDeopt(LInstruction* instr,
+ SafepointMode safepoint_mode) {
+ if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
+ RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
+ } else {
+ ASSERT(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
+ RecordSafepointWithRegisters(
+ instr->pointer_map(), 0, Safepoint::kLazyDeopt);
+ }
+}
+
+
+void LCodeGen::RecordSafepoint(LPointerMap* pointers,
+ Safepoint::Kind kind,
+ int arguments,
+ Safepoint::DeoptMode deopt_mode) {
+ ASSERT(expected_safepoint_kind_ == kind);
+
+ const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
+ Safepoint safepoint = safepoints_.DefineSafepoint(
+ masm(), kind, arguments, deopt_mode);
+
+ for (int i = 0; i < operands->length(); i++) {
+ LOperand* pointer = operands->at(i);
+ if (pointer->IsStackSlot()) {
+ safepoint.DefinePointerSlot(pointer->index(), zone());
+ } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
+ safepoint.DefinePointerRegister(ToRegister(pointer), zone());
+ }
+ }
+
+ if (kind & Safepoint::kWithRegisters) {
+ // Register cp always contains a pointer to the context.
+ safepoint.DefinePointerRegister(cp, zone());
+ }
+}
+
+void LCodeGen::RecordSafepoint(LPointerMap* pointers,
+ Safepoint::DeoptMode deopt_mode) {
+ RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode);
+}
+
+
+void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
+ LPointerMap empty_pointers(zone());
+ RecordSafepoint(&empty_pointers, deopt_mode);
+}
+
+
+void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
+ int arguments,
+ Safepoint::DeoptMode deopt_mode) {
+ RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments, deopt_mode);
+}
+
+
+void LCodeGen::RecordSafepointWithRegistersAndDoubles(
+ LPointerMap* pointers, int arguments, Safepoint::DeoptMode deopt_mode) {
+ RecordSafepoint(
+ pointers, Safepoint::kWithRegistersAndDoubles, arguments, deopt_mode);
+}
+
+
+bool LCodeGen::GenerateCode() {
+ LPhase phase("Z_Code generation", chunk());
+ ASSERT(is_unused());
+ status_ = GENERATING;
+
+ // Open a frame scope to indicate that there is a frame on the stack. The
+ // NONE indicates that the scope shouldn't actually generate code to set up
+ // the frame (that is done in GeneratePrologue).
+ FrameScope frame_scope(masm_, StackFrame::NONE);
+
+ return GeneratePrologue() &&
+ GenerateBody() &&
+ GenerateDeferredCode() &&
+ GenerateDeoptJumpTable() &&
+ GenerateSafepointTable();
+}
+
+
+void LCodeGen::SaveCallerDoubles() {
+ ASSERT(info()->saves_caller_doubles());
+ ASSERT(NeedsEagerFrame());
+ Comment(";;; Save clobbered callee double registers");
+ BitVector* doubles = chunk()->allocated_double_registers();
+ BitVector::Iterator iterator(doubles);
+ int count = 0;
+ while (!iterator.Done()) {
+ // TODO(all): Is this supposed to save just the callee-saved doubles? It
+ // looks like it's saving all of them.
+ FPRegister value = FPRegister::FromAllocationIndex(iterator.Current());
+ __ Poke(value, count * kDoubleSize);
+ iterator.Advance();
+ count++;
+ }
+}
+
+
+void LCodeGen::RestoreCallerDoubles() {
+ ASSERT(info()->saves_caller_doubles());
+ ASSERT(NeedsEagerFrame());
+ Comment(";;; Restore clobbered callee double registers");
+ BitVector* doubles = chunk()->allocated_double_registers();
+ BitVector::Iterator iterator(doubles);
+ int count = 0;
+ while (!iterator.Done()) {
+ // TODO(all): Is this supposed to restore just the callee-saved doubles? It
+ // looks like it's restoring all of them.
+ FPRegister value = FPRegister::FromAllocationIndex(iterator.Current());
+ __ Peek(value, count * kDoubleSize);
+ iterator.Advance();
+ count++;
+ }
+}
+
+
+bool LCodeGen::GeneratePrologue() {
+ ASSERT(is_generating());
+
+ if (info()->IsOptimizing()) {
+ ProfileEntryHookStub::MaybeCallEntryHook(masm_);
+
+ // TODO(all): Add support for stop_t FLAG in DEBUG mode.
+
+ // Classic mode functions and builtins need to replace the receiver with the
+ // global proxy when called as functions (without an explicit receiver
+ // object).
+ if (info_->this_has_uses() &&
+ info_->is_classic_mode() &&
+ !info_->is_native()) {
+ Label ok;
+ int receiver_offset = info_->scope()->num_parameters() * kXRegSizeInBytes;
+ __ Peek(x10, receiver_offset);
+ __ JumpIfNotRoot(x10, Heap::kUndefinedValueRootIndex, &ok);
+
+ __ Ldr(x10, GlobalObjectMemOperand());
+ __ Ldr(x10, FieldMemOperand(x10, GlobalObject::kGlobalReceiverOffset));
+ __ Poke(x10, receiver_offset);
+
+ __ Bind(&ok);
+ }
+ }
+
+ ASSERT(__ StackPointer().Is(jssp));
+ info()->set_prologue_offset(masm_->pc_offset());
+ if (NeedsEagerFrame()) {
+ __ Prologue(info()->IsStub() ? BUILD_STUB_FRAME : BUILD_FUNCTION_FRAME);
+ frame_is_built_ = true;
+ info_->AddNoFrameRange(0, masm_->pc_offset());
+ }
+
+ // Reserve space for the stack slots needed by the code.
+ int slots = GetStackSlotCount();
+ if (slots > 0) {
+ __ Claim(slots, kPointerSize);
+ }
+
+ if (info()->saves_caller_doubles()) {
+ SaveCallerDoubles();
+ }
+
+ // Allocate a local context if needed.
+ int heap_slots = info()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
+ if (heap_slots > 0) {
+ Comment(";;; Allocate local context");
+ // Argument to NewContext is the function, which is in x1.
+ if (heap_slots <= FastNewContextStub::kMaximumSlots) {
+ FastNewContextStub stub(heap_slots);
+ __ CallStub(&stub);
+ } else {
+ __ Push(x1);
+ __ CallRuntime(Runtime::kNewFunctionContext, 1);
+ }
+ RecordSafepoint(Safepoint::kNoLazyDeopt);
+ // Context is returned in x0. It replaces the context passed to us. It's
+ // saved in the stack and kept live in cp.
+ __ Mov(cp, x0);
+ __ Str(x0, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ // Copy any necessary parameters into the context.
+ int num_parameters = scope()->num_parameters();
+ for (int i = 0; i < num_parameters; i++) {
+ Variable* var = scope()->parameter(i);
+ if (var->IsContextSlot()) {
+ Register value = x0;
+ Register scratch = x3;
+
+ int parameter_offset = StandardFrameConstants::kCallerSPOffset +
+ (num_parameters - 1 - i) * kPointerSize;
+ // Load parameter from stack.
+ __ Ldr(value, MemOperand(fp, parameter_offset));
+ // Store it in the context.
+ MemOperand target = ContextMemOperand(cp, var->index());
+ __ Str(value, target);
+ // Update the write barrier. This clobbers value and scratch.
+ __ RecordWriteContextSlot(cp, target.offset(), value, scratch,
+ GetLinkRegisterState(), kSaveFPRegs);
+ }
+ }
+ Comment(";;; End allocate local context");
+ }
+
+ // Trace the call.
+ if (FLAG_trace && info()->IsOptimizing()) {
+ // We have not executed any compiled code yet, so cp still holds the
+ // incoming context.
+ __ CallRuntime(Runtime::kTraceEnter, 0);
+ }
+
+ return !is_aborted();
+}
+
+
+void LCodeGen::GenerateOsrPrologue() {
+ // Generate the OSR entry prologue at the first unknown OSR value, or if there
+ // are none, at the OSR entrypoint instruction.
+ if (osr_pc_offset_ >= 0) return;
+
+ osr_pc_offset_ = masm()->pc_offset();
+
+ // Adjust the frame size, subsuming the unoptimized frame into the
+ // optimized frame.
+ int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
+ ASSERT(slots >= 0);
+ __ Claim(slots);
+}
+
+
+bool LCodeGen::GenerateDeferredCode() {
+ ASSERT(is_generating());
+ if (deferred_.length() > 0) {
+ for (int i = 0; !is_aborted() && (i < deferred_.length()); i++) {
+ LDeferredCode* code = deferred_[i];
+
+ HValue* value =
+ instructions_->at(code->instruction_index())->hydrogen_value();
+ RecordAndWritePosition(value->position());
+
+ Comment(";;; <@%d,#%d> "
+ "-------------------- Deferred %s --------------------",
+ code->instruction_index(),
+ code->instr()->hydrogen_value()->id(),
+ code->instr()->Mnemonic());
+
+ __ Bind(code->entry());
+
+ if (NeedsDeferredFrame()) {
+ Comment(";;; Build frame");
+ ASSERT(!frame_is_built_);
+ ASSERT(info()->IsStub());
+ frame_is_built_ = true;
+ __ Push(lr, fp, cp);
+ __ Mov(fp, Operand(Smi::FromInt(StackFrame::STUB)));
+ __ Push(fp);
+ __ Add(fp, __ StackPointer(),
+ StandardFrameConstants::kFixedFrameSizeFromFp);
+ Comment(";;; Deferred code");
+ }
+
+ code->Generate();
+
+ if (NeedsDeferredFrame()) {
+ Comment(";;; Destroy frame");
+ ASSERT(frame_is_built_);
+ __ Pop(xzr, cp, fp, lr);
+ frame_is_built_ = false;
+ }
+
+ __ B(code->exit());
+ }
+ }
+
+ // Force constant pool emission at the end of the deferred code to make
+ // sure that no constant pools are emitted after deferred code because
+ // deferred code generation is the last step which generates code. The two
+ // following steps will only output data used by crakshaft.
+ masm()->CheckConstPool(true, false);
+
+ return !is_aborted();
+}
+
+
+bool LCodeGen::GenerateDeoptJumpTable() {
+ if (deopt_jump_table_.length() > 0) {
+ Comment(";;; -------------------- Jump table --------------------");
+ }
+ Label table_start;
+ __ bind(&table_start);
+ Label needs_frame;
+ for (int i = 0; i < deopt_jump_table_.length(); i++) {
+ __ Bind(&deopt_jump_table_[i].label);
+ Address entry = deopt_jump_table_[i].address;
+ Deoptimizer::BailoutType type = deopt_jump_table_[i].bailout_type;
+ int id = Deoptimizer::GetDeoptimizationId(isolate(), entry, type);
+ if (id == Deoptimizer::kNotDeoptimizationEntry) {
+ Comment(";;; jump table entry %d.", i);
+ } else {
+ Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id);
+ }
+ if (deopt_jump_table_[i].needs_frame) {
+ ASSERT(!info()->saves_caller_doubles());
+ __ Mov(__ Tmp0(), Operand(ExternalReference::ForDeoptEntry(entry)));
+ if (needs_frame.is_bound()) {
+ __ B(&needs_frame);
+ } else {
+ __ Bind(&needs_frame);
+ // This variant of deopt can only be used with stubs. Since we don't
+ // have a function pointer to install in the stack frame that we're
+ // building, install a special marker there instead.
+ // TODO(jochen): Revisit the use of TmpX().
+ ASSERT(info()->IsStub());
+ __ Mov(__ Tmp1(), Operand(Smi::FromInt(StackFrame::STUB)));
+ __ Push(lr, fp, cp, __ Tmp1());
+ __ Add(fp, __ StackPointer(), 2 * kPointerSize);
+ __ Call(__ Tmp0());
+ }
+ } else {
+ if (info()->saves_caller_doubles()) {
+ ASSERT(info()->IsStub());
+ RestoreCallerDoubles();
+ }
+ __ Call(entry, RelocInfo::RUNTIME_ENTRY);
+ }
+ masm()->CheckConstPool(false, false);
+ }
+
+ // Force constant pool emission at the end of the deopt jump table to make
+ // sure that no constant pools are emitted after.
+ masm()->CheckConstPool(true, false);
+
+ // The deoptimization jump table is the last part of the instruction
+ // sequence. Mark the generated code as done unless we bailed out.
+ if (!is_aborted()) status_ = DONE;
+ return !is_aborted();
+}
+
+
+bool LCodeGen::GenerateSafepointTable() {
+ ASSERT(is_done());
+ safepoints_.Emit(masm(), GetStackSlotCount());
+ return !is_aborted();
+}
+
+
+void LCodeGen::FinishCode(Handle<Code> code) {
+ ASSERT(is_done());
+ code->set_stack_slots(GetStackSlotCount());
+ code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
+ RegisterDependentCodeForEmbeddedMaps(code);
+ PopulateDeoptimizationData(code);
+ info()->CommitDependencies(code);
+}
+
+
+void LCodeGen::Abort(BailoutReason reason) {
+ info()->set_bailout_reason(reason);
+ status_ = ABORTED;
+}
+
+
+void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
+ int length = deoptimizations_.length();
+ if (length == 0) return;
+
+ Handle<DeoptimizationInputData> data =
+ factory()->NewDeoptimizationInputData(length, TENURED);
+
+ Handle<ByteArray> translations =
+ translations_.CreateByteArray(isolate()->factory());
+ data->SetTranslationByteArray(*translations);
+ data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
+
+ Handle<FixedArray> literals =
+ factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
+ { AllowDeferredHandleDereference copy_handles;
+ for (int i = 0; i < deoptimization_literals_.length(); i++) {
+ literals->set(i, *deoptimization_literals_[i]);
+ }
+ data->SetLiteralArray(*literals);
+ }
+
+ data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id().ToInt()));
+ data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
+
+ // Populate the deoptimization entries.
+ for (int i = 0; i < length; i++) {
+ LEnvironment* env = deoptimizations_[i];
+ data->SetAstId(i, env->ast_id());
+ data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
+ data->SetArgumentsStackHeight(i,
+ Smi::FromInt(env->arguments_stack_height()));
+ data->SetPc(i, Smi::FromInt(env->pc_offset()));
+ }
+
+ code->set_deoptimization_data(*data);
+}
+
+
+void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
+ ASSERT(deoptimization_literals_.length() == 0);
+
+ const ZoneList<Handle<JSFunction> >* inlined_closures =
+ chunk()->inlined_closures();
+
+ for (int i = 0, length = inlined_closures->length(); i < length; i++) {
+ DefineDeoptimizationLiteral(inlined_closures->at(i));
+ }
+
+ inlined_function_count_ = deoptimization_literals_.length();
+}
+
+
+Deoptimizer::BailoutType LCodeGen::DeoptimizeHeader(
+ LEnvironment* environment,
+ Deoptimizer::BailoutType* override_bailout_type) {
+ RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
+ ASSERT(environment->HasBeenRegistered());
+ ASSERT(info()->IsOptimizing() || info()->IsStub());
+ int id = environment->deoptimization_index();
+ Deoptimizer::BailoutType bailout_type =
+ info()->IsStub() ? Deoptimizer::LAZY : Deoptimizer::EAGER;
+ if (override_bailout_type) bailout_type = *override_bailout_type;
+ Address entry =
+ Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
+
+ if (entry == NULL) {
+ Abort(kBailoutWasNotPrepared);
+ return bailout_type;
+ }
+
+ if (FLAG_deopt_every_n_times != 0 && !info()->IsStub()) {
+ Label not_zero;
+ ExternalReference count = ExternalReference::stress_deopt_count(isolate());
+
+ __ Push(x0, x1, x2);
+ __ Mrs(x2, NZCV);
+ __ Mov(x0, Operand(count));
+ __ Ldr(w1, MemOperand(x0));
+ __ Subs(x1, x1, 1);
+ __ B(gt, ¬_zero);
+ __ Mov(w1, FLAG_deopt_every_n_times);
+ __ Str(w1, MemOperand(x0));
+ __ Pop(x0, x1, x2);
+ ASSERT(frame_is_built_);
+ __ Call(entry, RelocInfo::RUNTIME_ENTRY);
+ __ Unreachable();
+
+ __ Bind(¬_zero);
+ __ Str(w1, MemOperand(x0));
+ __ Msr(NZCV, x2);
+ __ Pop(x0, x1, x2);
+ }
+
+ return bailout_type;
+}
+
+
+void LCodeGen::Deoptimize(LEnvironment* environment,
+ Deoptimizer::BailoutType bailout_type) {
+ ASSERT(environment->HasBeenRegistered());
+ ASSERT(info()->IsOptimizing() || info()->IsStub());
+ int id = environment->deoptimization_index();
+ Address entry =
+ Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
+
+ if (info()->ShouldTrapOnDeopt()) {
+ __ Debug("trap_on_deopt", __LINE__, BREAK);
+ }
+
+ ASSERT(info()->IsStub() || frame_is_built_);
+ // Go through jump table if we need to build frame, or restore caller doubles.
+ if (frame_is_built_ && !info()->saves_caller_doubles()) {
+ __ Call(entry, RelocInfo::RUNTIME_ENTRY);
+ } else {
+ // We often have several deopts to the same entry, reuse the last
+ // jump entry if this is the case.
+ if (deopt_jump_table_.is_empty() ||
+ (deopt_jump_table_.last().address != entry) ||
+ (deopt_jump_table_.last().bailout_type != bailout_type) ||
+ (deopt_jump_table_.last().needs_frame != !frame_is_built_)) {
+ Deoptimizer::JumpTableEntry table_entry(entry,
+ bailout_type,
+ !frame_is_built_);
+ deopt_jump_table_.Add(table_entry, zone());
+ }
+ __ B(&deopt_jump_table_.last().label);
+ }
+}
+
+
+void LCodeGen::Deoptimize(LEnvironment* environment) {
+ Deoptimizer::BailoutType bailout_type = DeoptimizeHeader(environment, NULL);
+ Deoptimize(environment, bailout_type);
+}
+
+
+void LCodeGen::DeoptimizeIf(Condition cond, LEnvironment* environment) {
+ Label dont_deopt;
+ Deoptimizer::BailoutType bailout_type = DeoptimizeHeader(environment, NULL);
+ __ B(InvertCondition(cond), &dont_deopt);
+ Deoptimize(environment, bailout_type);
+ __ Bind(&dont_deopt);
+}
+
+
+void LCodeGen::DeoptimizeIfZero(Register rt, LEnvironment* environment) {
+ Label dont_deopt;
+ Deoptimizer::BailoutType bailout_type = DeoptimizeHeader(environment, NULL);
+ __ Cbnz(rt, &dont_deopt);
+ Deoptimize(environment, bailout_type);
+ __ Bind(&dont_deopt);
+}
+
+
+void LCodeGen::DeoptimizeIfNegative(Register rt, LEnvironment* environment) {
+ Label dont_deopt;
+ Deoptimizer::BailoutType bailout_type = DeoptimizeHeader(environment, NULL);
+ __ Tbz(rt, rt.Is64Bits() ? kXSignBit : kWSignBit, &dont_deopt);
+ Deoptimize(environment, bailout_type);
+ __ Bind(&dont_deopt);
+}
+
+
+void LCodeGen::DeoptimizeIfSmi(Register rt,
+ LEnvironment* environment) {
+ Label dont_deopt;
+ Deoptimizer::BailoutType bailout_type = DeoptimizeHeader(environment, NULL);
+ __ JumpIfNotSmi(rt, &dont_deopt);
+ Deoptimize(environment, bailout_type);
+ __ Bind(&dont_deopt);
+}
+
+
+void LCodeGen::DeoptimizeIfNotSmi(Register rt, LEnvironment* environment) {
+ Label dont_deopt;
+ Deoptimizer::BailoutType bailout_type = DeoptimizeHeader(environment, NULL);
+ __ JumpIfSmi(rt, &dont_deopt);
+ Deoptimize(environment, bailout_type);
+ __ Bind(&dont_deopt);
+}
+
+
+void LCodeGen::DeoptimizeIfRoot(Register rt,
+ Heap::RootListIndex index,
+ LEnvironment* environment) {
+ Label dont_deopt;
+ Deoptimizer::BailoutType bailout_type = DeoptimizeHeader(environment, NULL);
+ __ JumpIfNotRoot(rt, index, &dont_deopt);
+ Deoptimize(environment, bailout_type);
+ __ Bind(&dont_deopt);
+}
+
+
+void LCodeGen::DeoptimizeIfNotRoot(Register rt,
+ Heap::RootListIndex index,
+ LEnvironment* environment) {
+ Label dont_deopt;
+ Deoptimizer::BailoutType bailout_type = DeoptimizeHeader(environment, NULL);
+ __ JumpIfRoot(rt, index, &dont_deopt);
+ Deoptimize(environment, bailout_type);
+ __ Bind(&dont_deopt);
+}
+
+
+void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
+ if (!info()->IsStub()) {
+ // Ensure that we have enough space after the previous lazy-bailout
+ // instruction for patching the code here.
+ intptr_t current_pc = masm()->pc_offset();
+
+ if (current_pc < (last_lazy_deopt_pc_ + space_needed)) {
+ ptrdiff_t padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
+ ASSERT((padding_size % kInstructionSize) == 0);
+ InstructionAccurateScope instruction_accurate(
+ masm(), padding_size / kInstructionSize);
+
+ while (padding_size > 0) {
+ __ nop();
+ padding_size -= kInstructionSize;
+ }
+ }
+ }
+ last_lazy_deopt_pc_ = masm()->pc_offset();
+}
+
+
+Register LCodeGen::ToRegister(LOperand* op) const {
+ // TODO(all): support zero register results, as ToRegister32.
+ ASSERT((op != NULL) && op->IsRegister());
+ return Register::FromAllocationIndex(op->index());
+}
+
+
+Register LCodeGen::ToRegister32(LOperand* op) const {
+ ASSERT(op != NULL);
+ if (op->IsConstantOperand()) {
+ // If this is a constant operand, the result must be the zero register.
+ ASSERT(ToInteger32(LConstantOperand::cast(op)) == 0);
+ return wzr;
+ } else {
+ return ToRegister(op).W();
+ }
+}
+
+
+Smi* LCodeGen::ToSmi(LConstantOperand* op) const {
+ HConstant* constant = chunk_->LookupConstant(op);
+ return Smi::FromInt(constant->Integer32Value());
+}
+
+
+DoubleRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
+ ASSERT((op != NULL) && op->IsDoubleRegister());
+ return DoubleRegister::FromAllocationIndex(op->index());
+}
+
+
+Operand LCodeGen::ToOperand(LOperand* op) {
+ ASSERT(op != NULL);
+ if (op->IsConstantOperand()) {
+ LConstantOperand* const_op = LConstantOperand::cast(op);
+ HConstant* constant = chunk()->LookupConstant(const_op);
+ Representation r = chunk_->LookupLiteralRepresentation(const_op);
+ if (r.IsSmi()) {
+ ASSERT(constant->HasSmiValue());
+ return Operand(Smi::FromInt(constant->Integer32Value()));
+ } else if (r.IsInteger32()) {
+ ASSERT(constant->HasInteger32Value());
+ return Operand(constant->Integer32Value());
+ } else if (r.IsDouble()) {
+ Abort(kToOperandUnsupportedDoubleImmediate);
+ }
+ ASSERT(r.IsTagged());
+ return Operand(constant->handle(isolate()));
+ } else if (op->IsRegister()) {
+ return Operand(ToRegister(op));
+ } else if (op->IsDoubleRegister()) {
+ Abort(kToOperandIsDoubleRegisterUnimplemented);
+ return Operand(0);
+ }
+ // Stack slots not implemented, use ToMemOperand instead.
+ UNREACHABLE();
+ return Operand(0);
+}
+
+
+Operand LCodeGen::ToOperand32I(LOperand* op) {
+ return ToOperand32(op, SIGNED_INT32);
+}
+
+
+Operand LCodeGen::ToOperand32U(LOperand* op) {
+ return ToOperand32(op, UNSIGNED_INT32);
+}
+
+
+Operand LCodeGen::ToOperand32(LOperand* op, IntegerSignedness signedness) {
+ ASSERT(op != NULL);
+ if (op->IsRegister()) {
+ return Operand(ToRegister32(op));
+ } else if (op->IsConstantOperand()) {
+ LConstantOperand* const_op = LConstantOperand::cast(op);
+ HConstant* constant = chunk()->LookupConstant(const_op);
+ Representation r = chunk_->LookupLiteralRepresentation(const_op);
+ if (r.IsInteger32()) {
+ ASSERT(constant->HasInteger32Value());
+ return Operand(signedness == SIGNED_INT32
+ ? constant->Integer32Value()
+ : static_cast<uint32_t>(constant->Integer32Value()));
+ } else {
+ // Other constants not implemented.
+ Abort(kToOperand32UnsupportedImmediate);
+ }
+ }
+ // Other cases are not implemented.
+ UNREACHABLE();
+ return Operand(0);
+}
+
+
+static ptrdiff_t ArgumentsOffsetWithoutFrame(ptrdiff_t index) {
+ ASSERT(index < 0);
+ return -(index + 1) * kPointerSize;
+}
+
+
+MemOperand LCodeGen::ToMemOperand(LOperand* op) const {
+ ASSERT(op != NULL);
+ ASSERT(!op->IsRegister());
+ ASSERT(!op->IsDoubleRegister());
+ ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot());
+ if (NeedsEagerFrame()) {
+ return MemOperand(fp, StackSlotOffset(op->index()));
+ } else {
+ // Retrieve parameter without eager stack-frame relative to the
+ // stack-pointer.
+ return MemOperand(masm()->StackPointer(),
+ ArgumentsOffsetWithoutFrame(op->index()));
+ }
+}
+
+
+Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
+ HConstant* constant = chunk_->LookupConstant(op);
+ ASSERT(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
+ return constant->handle(isolate());
+}
+
+
+bool LCodeGen::IsSmi(LConstantOperand* op) const {
+ return chunk_->LookupLiteralRepresentation(op).IsSmi();
+}
+
+
+bool LCodeGen::IsInteger32Constant(LConstantOperand* op) const {
+ return op->IsConstantOperand() &&
+ chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32();
+}
+
+
+int32_t LCodeGen::ToInteger32(LConstantOperand* op) const {
+ HConstant* constant = chunk_->LookupConstant(op);
+ return constant->Integer32Value();
+}
+
+
+double LCodeGen::ToDouble(LConstantOperand* op) const {
+ HConstant* constant = chunk_->LookupConstant(op);
+ ASSERT(constant->HasDoubleValue());
+ return constant->DoubleValue();
+}
+
+
+Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
+ Condition cond = nv;
+ switch (op) {
+ case Token::EQ:
+ case Token::EQ_STRICT:
+ cond = eq;
+ break;
+ case Token::NE:
+ case Token::NE_STRICT:
+ cond = ne;
+ break;
+ case Token::LT:
+ cond = is_unsigned ? lo : lt;
+ break;
+ case Token::GT:
+ cond = is_unsigned ? hi : gt;
+ break;
+ case Token::LTE:
+ cond = is_unsigned ? ls : le;
+ break;
+ case Token::GTE:
+ cond = is_unsigned ? hs : ge;
+ break;
+ case Token::IN:
+ case Token::INSTANCEOF:
+ default:
+ UNREACHABLE();
+ }
+ return cond;
+}
+
+
+template<class InstrType>
+void LCodeGen::EmitBranchGeneric(InstrType instr,
+ const BranchGenerator& branch) {
+ int left_block = instr->TrueDestination(chunk_);
+ int right_block = instr->FalseDestination(chunk_);
+
+ int next_block = GetNextEmittedBlock();
+
+ if (right_block == left_block) {
+ EmitGoto(left_block);
+ } else if (left_block == next_block) {
+ branch.EmitInverted(chunk_->GetAssemblyLabel(right_block));
+ } else if (right_block == next_block) {
+ branch.Emit(chunk_->GetAssemblyLabel(left_block));
+ } else {
+ branch.Emit(chunk_->GetAssemblyLabel(left_block));
+ __ B(chunk_->GetAssemblyLabel(right_block));
+ }
+}
+
+
+template<class InstrType>
+void LCodeGen::EmitBranch(InstrType instr, Condition condition) {
+ ASSERT((condition != al) && (condition != nv));
+ BranchOnCondition branch(this, condition);
+ EmitBranchGeneric(instr, branch);
+}
+
+
+template<class InstrType>
+void LCodeGen::EmitCompareAndBranch(InstrType instr,
+ Condition condition,
+ const Register& lhs,
+ const Operand& rhs) {
+ ASSERT((condition != al) && (condition != nv));
+ CompareAndBranch branch(this, condition, lhs, rhs);
+ EmitBranchGeneric(instr, branch);
+}
+
+
+template<class InstrType>
+void LCodeGen::EmitTestAndBranch(InstrType instr,
+ Condition condition,
+ const Register& value,
+ uint64_t mask) {
+ ASSERT((condition != al) && (condition != nv));
+ TestAndBranch branch(this, condition, value, mask);
+ EmitBranchGeneric(instr, branch);
+}
+
+
+template<class InstrType>
+void LCodeGen::EmitBranchIfNonZeroNumber(InstrType instr,
+ const FPRegister& value,
+ const FPRegister& scratch) {
+ BranchIfNonZeroNumber branch(this, value, scratch);
+ EmitBranchGeneric(instr, branch);
+}
+
+
+template<class InstrType>
+void LCodeGen::EmitBranchIfHeapNumber(InstrType instr,
+ const Register& value) {
+ BranchIfHeapNumber branch(this, value);
+ EmitBranchGeneric(instr, branch);
+}
+
+
+template<class InstrType>
+void LCodeGen::EmitBranchIfRoot(InstrType instr,
+ const Register& value,
+ Heap::RootListIndex index) {
+ BranchIfRoot branch(this, value, index);
+ EmitBranchGeneric(instr, branch);
+}
+
+
+void LCodeGen::DoGap(LGap* gap) {
+ for (int i = LGap::FIRST_INNER_POSITION;
+ i <= LGap::LAST_INNER_POSITION;
+ i++) {
+ LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
+ LParallelMove* move = gap->GetParallelMove(inner_pos);
+ if (move != NULL) {
+ resolver_.Resolve(move);
+ }
+ }
+}
+
+
+void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
+ // TODO(all): Try to improve this, like ARM r17925.
+ Register arguments = ToRegister(instr->arguments());
+ Register result = ToRegister(instr->result());
+
+ if (instr->length()->IsConstantOperand() &&
+ instr->index()->IsConstantOperand()) {
+ ASSERT(instr->temp() == NULL);
+ int index = ToInteger32(LConstantOperand::cast(instr->index()));
+ int length = ToInteger32(LConstantOperand::cast(instr->length()));
+ int offset = ((length - index) + 1) * kPointerSize;
+ __ Ldr(result, MemOperand(arguments, offset));
+ } else {
+ ASSERT(instr->temp() != NULL);
+ Register temp = ToRegister32(instr->temp());
+ Register length = ToRegister32(instr->length());
+ Operand index = ToOperand32I(instr->index());
+ // There are two words between the frame pointer and the last arguments.
+ // Subtracting from length accounts for only one, so we add one more.
+ __ Sub(temp, length, index);
+ __ Add(temp, temp, 1);
+ __ Ldr(result, MemOperand(arguments, temp, UXTW, kPointerSizeLog2));
+ }
+}
+
+
+void LCodeGen::DoAddE(LAddE* instr) {
+ Register result = ToRegister(instr->result());
+ Register left = ToRegister(instr->left());
+ Operand right = (instr->right()->IsConstantOperand())
+ ? ToInteger32(LConstantOperand::cast(instr->right()))
+ : Operand(ToRegister32(instr->right()), SXTW);
+
+ ASSERT(!instr->hydrogen()->CheckFlag(HValue::kCanOverflow));
+ __ Add(result, left, right);
+}
+
+
+void LCodeGen::DoAddI(LAddI* instr) {
+ bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
+ Register result = ToRegister32(instr->result());
+ Register left = ToRegister32(instr->left());
+ Operand right = ToOperand32I(instr->right());
+ if (can_overflow) {
+ __ Adds(result, left, right);
+ DeoptimizeIf(vs, instr->environment());
+ } else {
+ __ Add(result, left, right);
+ }
+}
+
+
+void LCodeGen::DoAddS(LAddS* instr) {
+ bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
+ Register result = ToRegister(instr->result());
+ Register left = ToRegister(instr->left());
+ Operand right = ToOperand(instr->right());
+ if (can_overflow) {
+ __ Adds(result, left, right);
+ DeoptimizeIf(vs, instr->environment());
+ } else {
+ __ Add(result, left, right);
+ }
+}
+
+
+void LCodeGen::DoAllocate(LAllocate* instr) {
+ class DeferredAllocate: public LDeferredCode {
+ public:
+ DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
+ : LDeferredCode(codegen), instr_(instr) { }
+ virtual void Generate() { codegen()->DoDeferredAllocate(instr_); }
+ virtual LInstruction* instr() { return instr_; }
+ private:
+ LAllocate* instr_;
+ };
+
+ DeferredAllocate* deferred = new(zone()) DeferredAllocate(this, instr);
+
+ Register result = ToRegister(instr->result());
+ Register temp1 = ToRegister(instr->temp1());
+ Register temp2 = ToRegister(instr->temp2());
+
+ // Allocate memory for the object.
+ AllocationFlags flags = TAG_OBJECT;
+ if (instr->hydrogen()->MustAllocateDoubleAligned()) {
+ flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
+ }
+
+ if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
+ ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation());
+ ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
+ flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE);
+ } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
+ ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
+ flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_DATA_SPACE);
+ }
+
+ if (instr->size()->IsConstantOperand()) {
+ int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
+ __ Allocate(size, result, temp1, temp2, deferred->entry(), flags);
+ } else {
+ Register size = ToRegister(instr->size());
+ __ Allocate(size, result, temp1, temp2, deferred->entry(), flags);
+ }
+
+ __ Bind(deferred->exit());
+
+ if (instr->hydrogen()->MustPrefillWithFiller()) {
+ if (instr->size()->IsConstantOperand()) {
+ int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
+ __ Mov(temp1, size - kPointerSize);
+ } else {
+ __ Sub(temp1, ToRegister(instr->size()), kPointerSize);
+ }
+ __ Sub(result, result, kHeapObjectTag);
+
+ // TODO(jbramley): Optimize this loop using stp.
+ Label loop;
+ __ Bind(&loop);
+ __ Mov(temp2, Operand(isolate()->factory()->one_pointer_filler_map()));
+ __ Str(temp2, MemOperand(result, temp1));
+ __ Subs(temp1, temp1, kPointerSize);
+ __ B(ge, &loop);
+
+ __ Add(result, result, kHeapObjectTag);
+ }
+}
+
+
+void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
+ // TODO(3095996): Get rid of this. For now, we need to make the
+ // result register contain a valid pointer because it is already
+ // contained in the register pointer map.
+ __ Mov(ToRegister(instr->result()), Operand(Smi::FromInt(0)));
+
+ PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+ // We're in a SafepointRegistersScope so we can use any scratch registers.
+ Register size = x0;
+ if (instr->size()->IsConstantOperand()) {
+ __ Mov(size, Operand(ToSmi(LConstantOperand::cast(instr->size()))));
+ } else {
+ __ SmiTag(size, ToRegister(instr->size()));
+ }
+ int flags = AllocateDoubleAlignFlag::encode(
+ instr->hydrogen()->MustAllocateDoubleAligned());
+ if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
+ ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation());
+ ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
+ flags = AllocateTargetSpace::update(flags, OLD_POINTER_SPACE);
+ } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
+ ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
+ flags = AllocateTargetSpace::update(flags, OLD_DATA_SPACE);
+ } else {
+ flags = AllocateTargetSpace::update(flags, NEW_SPACE);
+ }
+ __ Mov(x10, Operand(Smi::FromInt(flags)));
+ __ Push(size, x10);
+
+ CallRuntimeFromDeferred(
+ Runtime::kAllocateInTargetSpace, 2, instr, instr->context());
+ __ StoreToSafepointRegisterSlot(x0, ToRegister(instr->result()));
+}
+
+
+void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
+ Register receiver = ToRegister(instr->receiver());
+ Register function = ToRegister(instr->function());
+ Register length = ToRegister(instr->length());
+ Register elements = ToRegister(instr->elements());
+ Register scratch = x5;
+ ASSERT(receiver.Is(x0)); // Used for parameter count.
+ ASSERT(function.Is(x1)); // Required by InvokeFunction.
+ ASSERT(ToRegister(instr->result()).Is(x0));
+ ASSERT(instr->IsMarkedAsCall());
+
+ // Copy the arguments to this function possibly from the
+ // adaptor frame below it.
+ const uint32_t kArgumentsLimit = 1 * KB;
+ __ Cmp(length, kArgumentsLimit);
+ DeoptimizeIf(hi, instr->environment());
+
+ // Push the receiver and use the register to keep the original
+ // number of arguments.
+ __ Push(receiver);
+ Register argc = receiver;
+ receiver = NoReg;
+ __ Mov(argc, length);
+ // The arguments are at a one pointer size offset from elements.
+ __ Add(elements, elements, 1 * kPointerSize);
+
+ // Loop through the arguments pushing them onto the execution
+ // stack.
+ Label invoke, loop;
+ // length is a small non-negative integer, due to the test above.
+ __ Cbz(length, &invoke);
+ __ Bind(&loop);
+ __ Ldr(scratch, MemOperand(elements, length, LSL, kPointerSizeLog2));
+ __ Push(scratch);
+ __ Subs(length, length, 1);
+ __ B(ne, &loop);
+
+ __ Bind(&invoke);
+ ASSERT(instr->HasPointerMap());
+ LPointerMap* pointers = instr->pointer_map();
+ SafepointGenerator safepoint_generator(this, pointers, Safepoint::kLazyDeopt);
+ // The number of arguments is stored in argc (receiver) which is x0, as
+ // expected by InvokeFunction.
+ ParameterCount actual(argc);
+ __ InvokeFunction(function, actual, CALL_FUNCTION, safepoint_generator);
+}
+
+
+void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
+ Register result = ToRegister(instr->result());
+
+ if (instr->hydrogen()->from_inlined()) {
+ // When we are inside an inlined function, the arguments are the last things
+ // that have been pushed on the stack. Therefore the arguments array can be
+ // accessed directly from jssp.
+ // However in the normal case, it is accessed via fp but there are two words
+ // on the stack between fp and the arguments (the saved lr and fp) and the
+ // LAccessArgumentsAt implementation take that into account.
+ // In the inlined case we need to subtract the size of 2 words to jssp to
+ // get a pointer which will work well with LAccessArgumentsAt.
+ ASSERT(masm()->StackPointer().Is(jssp));
+ __ Sub(result, jssp, 2 * kPointerSize);
+ } else {
+ ASSERT(instr->temp() != NULL);
+ Register previous_fp = ToRegister(instr->temp());
+
+ __ Ldr(previous_fp,
+ MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ Ldr(result,
+ MemOperand(previous_fp, StandardFrameConstants::kContextOffset));
+ __ Cmp(result, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ Csel(result, fp, previous_fp, ne);
+ }
+}
+
+
+void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
+ Register elements = ToRegister(instr->elements());
+ Register result = ToRegister(instr->result());
+ Label done;
+
+ // If no arguments adaptor frame the number of arguments is fixed.
+ __ Cmp(fp, elements);
+ __ Mov(result, scope()->num_parameters());
+ __ B(eq, &done);
+
+ // Arguments adaptor frame present. Get argument length from there.
+ __ Ldr(result, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ Ldrsw(result,
+ UntagSmiMemOperand(result,
+ ArgumentsAdaptorFrameConstants::kLengthOffset));
+
+ // Argument length is in result register.
+ __ Bind(&done);
+}
+
+
+void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
+ DoubleRegister left = ToDoubleRegister(instr->left());
+ DoubleRegister right = ToDoubleRegister(instr->right());
+ DoubleRegister result = ToDoubleRegister(instr->result());
+
+ switch (instr->op()) {
+ case Token::ADD: __ Fadd(result, left, right); break;
+ case Token::SUB: __ Fsub(result, left, right); break;
+ case Token::MUL: __ Fmul(result, left, right); break;
+ case Token::DIV: __ Fdiv(result, left, right); break;
+ case Token::MOD: {
+ // The ECMA-262 remainder operator is the remainder from a truncating
+ // (round-towards-zero) division. Note that this differs from IEEE-754.
+ //
+ // TODO(jbramley): See if it's possible to do this inline, rather than by
+ // calling a helper function. With frintz (to produce the intermediate
+ // quotient) and fmsub (to calculate the remainder without loss of
+ // precision), it should be possible. However, we would need support for
+ // fdiv in round-towards-zero mode, and the A64 simulator doesn't support
+ // that yet.
+ ASSERT(left.Is(d0));
+ ASSERT(right.Is(d1));
+ __ CallCFunction(
+ ExternalReference::mod_two_doubles_operation(isolate()),
+ 0, 2);
+ ASSERT(result.Is(d0));
+ break;
+ }
+ default:
+ UNREACHABLE();
+ break;
+ }
+}
+
+
+void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
+ ASSERT(ToRegister(instr->left()).is(x1));
+ ASSERT(ToRegister(instr->right()).is(x0));
+ ASSERT(ToRegister(instr->result()).is(x0));
+
+ BinaryOpICStub stub(instr->op(), NO_OVERWRITE);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoBitI(LBitI* instr) {
+ Register result = ToRegister32(instr->result());
+ Register left = ToRegister32(instr->left());
+ Operand right = ToOperand32U(instr->right());
+
+ switch (instr->op()) {
+ case Token::BIT_AND: __ And(result, left, right); break;
+ case Token::BIT_OR: __ Orr(result, left, right); break;
+ case Token::BIT_XOR: __ Eor(result, left, right); break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+}
+
+
+void LCodeGen::DoBitS(LBitS* instr) {
+ Register result = ToRegister(instr->result());
+ Register left = ToRegister(instr->left());
+ Operand right = ToOperand(instr->right());
+
+ switch (instr->op()) {
+ case Token::BIT_AND: __ And(result, left, right); break;
+ case Token::BIT_OR: __ Orr(result, left, right); break;
+ case Token::BIT_XOR: __ Eor(result, left, right); break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+}
+
+
+void LCodeGen::ApplyCheckIf(Condition cc, LBoundsCheck* check) {
+ if (FLAG_debug_code && check->hydrogen()->skip_check()) {
+ __ Assert(InvertCondition(cc), kEliminatedBoundsCheckFailed);
+ } else {
+ DeoptimizeIf(cc, check->environment());
+ }
+}
+
+
+void LCodeGen::DoBoundsCheck(LBoundsCheck *instr) {
+ if (instr->hydrogen()->skip_check()) return;
+
+ Register length = ToRegister(instr->length());
+
+ if (instr->index()->IsConstantOperand()) {
+ int constant_index =
+ ToInteger32(LConstantOperand::cast(instr->index()));
+
+ if (instr->hydrogen()->length()->representation().IsSmi()) {
+ __ Cmp(length, Operand(Smi::FromInt(constant_index)));
+ } else {
+ __ Cmp(length, Operand(constant_index));
+ }
+ } else {
+ __ Cmp(length, ToRegister(instr->index()));
+ }
+ Condition condition = instr->hydrogen()->allow_equality() ? lo : ls;
+ ApplyCheckIf(condition, instr);
+}
+
+
+void LCodeGen::DoBranch(LBranch* instr) {
+ Representation r = instr->hydrogen()->value()->representation();
+ Label* true_label = instr->TrueLabel(chunk_);
+ Label* false_label = instr->FalseLabel(chunk_);
+
+ if (r.IsInteger32()) {
+ ASSERT(!info()->IsStub());
+ EmitCompareAndBranch(instr, ne, ToRegister32(instr->value()), 0);
+ } else if (r.IsSmi()) {
+ ASSERT(!info()->IsStub());
+ STATIC_ASSERT(kSmiTag == 0);
+ EmitCompareAndBranch(instr, ne, ToRegister(instr->value()), 0);
+ } else if (r.IsDouble()) {
+ DoubleRegister value = ToDoubleRegister(instr->value());
+ // Test the double value. Zero and NaN are false.
+ EmitBranchIfNonZeroNumber(instr, value, double_scratch());
+ } else {
+ ASSERT(r.IsTagged());
+ Register value = ToRegister(instr->value());
+ HType type = instr->hydrogen()->value()->type();
+
+ if (type.IsBoolean()) {
+ ASSERT(!info()->IsStub());
+ __ CompareRoot(value, Heap::kTrueValueRootIndex);
+ EmitBranch(instr, eq);
+ } else if (type.IsSmi()) {
+ ASSERT(!info()->IsStub());
+ EmitCompareAndBranch(instr, ne, value, Operand(Smi::FromInt(0)));
+ } else if (type.IsJSArray()) {
+ ASSERT(!info()->IsStub());
+ EmitGoto(instr->TrueDestination(chunk()));
+ } else if (type.IsHeapNumber()) {
+ ASSERT(!info()->IsStub());
+ __ Ldr(double_scratch(), FieldMemOperand(value,
+ HeapNumber::kValueOffset));
+ // Test the double value. Zero and NaN are false.
+ EmitBranchIfNonZeroNumber(instr, double_scratch(), double_scratch());
+ } else if (type.IsString()) {
+ ASSERT(!info()->IsStub());
+ Register temp = ToRegister(instr->temp1());
+ __ Ldr(temp, FieldMemOperand(value, String::kLengthOffset));
+ EmitCompareAndBranch(instr, ne, temp, 0);
+ } else {
+ ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
+ // Avoid deopts in the case where we've never executed this path before.
+ if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
+
+ if (expected.Contains(ToBooleanStub::UNDEFINED)) {
+ // undefined -> false.
+ __ JumpIfRoot(
+ value, Heap::kUndefinedValueRootIndex, false_label);
+ }
+
+ if (expected.Contains(ToBooleanStub::BOOLEAN)) {
+ // Boolean -> its value.
+ __ JumpIfRoot(
+ value, Heap::kTrueValueRootIndex, true_label);
+ __ JumpIfRoot(
+ value, Heap::kFalseValueRootIndex, false_label);
+ }
+
+ if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
+ // 'null' -> false.
+ __ JumpIfRoot(
+ value, Heap::kNullValueRootIndex, false_label);
+ }
+
+ if (expected.Contains(ToBooleanStub::SMI)) {
+ // Smis: 0 -> false, all other -> true.
+ ASSERT(Smi::FromInt(0) == 0);
+ __ Cbz(value, false_label);
+ __ JumpIfSmi(value, true_label);
+ } else if (expected.NeedsMap()) {
+ // If we need a map later and have a smi, deopt.
+ DeoptimizeIfSmi(value, instr->environment());
+ }
+
+ Register map = NoReg;
+ Register scratch = NoReg;
+
+ if (expected.NeedsMap()) {
+ ASSERT((instr->temp1() != NULL) && (instr->temp2() != NULL));
+ map = ToRegister(instr->temp1());
+ scratch = ToRegister(instr->temp2());
+
+ __ Ldr(map, FieldMemOperand(value, HeapObject::kMapOffset));
+
+ if (expected.CanBeUndetectable()) {
+ // Undetectable -> false.
+ __ Ldrb(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
+ __ TestAndBranchIfAnySet(
+ scratch, 1 << Map::kIsUndetectable, false_label);
+ }
+ }
+
+ if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
+ // spec object -> true.
+ __ CompareInstanceType(map, scratch, FIRST_SPEC_OBJECT_TYPE);
+ __ B(ge, true_label);
+ }
+
+ if (expected.Contains(ToBooleanStub::STRING)) {
+ // String value -> false iff empty.
+ Label not_string;
+ __ CompareInstanceType(map, scratch, FIRST_NONSTRING_TYPE);
+ __ B(ge, ¬_string);
+ __ Ldr(scratch, FieldMemOperand(value, String::kLengthOffset));
+ __ Cbz(scratch, false_label);
+ __ B(true_label);
+ __ Bind(¬_string);
+ }
+
+ if (expected.Contains(ToBooleanStub::SYMBOL)) {
+ // Symbol value -> true.
+ __ CompareInstanceType(map, scratch, SYMBOL_TYPE);
+ __ B(eq, true_label);
+ }
+
+ if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
+ Label not_heap_number;
+ __ JumpIfNotRoot(map, Heap::kHeapNumberMapRootIndex, ¬_heap_number);
+
+ __ Ldr(double_scratch(),
+ FieldMemOperand(value, HeapNumber::kValueOffset));
+ __ Fcmp(double_scratch(), 0.0);
+ // If we got a NaN (overflow bit is set), jump to the false branch.
+ __ B(vs, false_label);
+ __ B(eq, false_label);
+ __ B(true_label);
+ __ Bind(¬_heap_number);
+ }
+
+ if (!expected.IsGeneric()) {
+ // We've seen something for the first time -> deopt.
+ // This can only happen if we are not generic already.
+ Deoptimize(instr->environment());
+ }
+ }
+ }
+}
+
+
+void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
+ int formal_parameter_count,
+ int arity,
+ LInstruction* instr,
+ Register function_reg) {
+ bool dont_adapt_arguments =
+ formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
+ bool can_invoke_directly =
+ dont_adapt_arguments || formal_parameter_count == arity;
+
+ // The function interface relies on the following register assignments.
+ ASSERT(function_reg.Is(x1) || function_reg.IsNone());
+ Register arity_reg = x0;
+
+ LPointerMap* pointers = instr->pointer_map();
+
+ // If necessary, load the function object.
+ if (function_reg.IsNone()) {
+ function_reg = x1;
+ __ LoadObject(function_reg, function);
+ }
+
+ if (FLAG_debug_code) {
+ Label is_not_smi;
+ // Try to confirm that function_reg (x1) is a tagged pointer.
+ __ JumpIfNotSmi(function_reg, &is_not_smi);
+ __ Abort(kExpectedFunctionObject);
+ __ Bind(&is_not_smi);
+ }
+
+ if (can_invoke_directly) {
+ // Change context.
+ __ Ldr(cp, FieldMemOperand(function_reg, JSFunction::kContextOffset));
+
+ // Set the arguments count if adaption is not needed. Assumes that x0 is
+ // available to write to at this point.
+ if (dont_adapt_arguments) {
+ __ Mov(arity_reg, arity);
+ }
+
+ // Invoke function.
+ __ Ldr(x10, FieldMemOperand(function_reg, JSFunction::kCodeEntryOffset));
+ __ Call(x10);
+
+ // Set up deoptimization.
+ RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
+ } else {
+ SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
+ ParameterCount count(arity);
+ ParameterCount expected(formal_parameter_count);
+ __ InvokeFunction(function_reg, expected, count, CALL_FUNCTION, generator);
+ }
+}
+
+
+void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
+ ASSERT(instr->IsMarkedAsCall());
+ ASSERT(ToRegister(instr->result()).Is(x0));
+
+ LPointerMap* pointers = instr->pointer_map();
+ SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
+
+ if (instr->target()->IsConstantOperand()) {
+ LConstantOperand* target = LConstantOperand::cast(instr->target());
+ Handle<Code> code = Handle<Code>::cast(ToHandle(target));
+ generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
+ // TODO(all): on ARM we use a call descriptor to specify a storage mode
+ // but on A64 we only have one storage mode so it isn't necessary. Check
+ // this understanding is correct.
+ __ Call(code, RelocInfo::CODE_TARGET, TypeFeedbackId::None());
+ } else {
+ ASSERT(instr->target()->IsRegister());
+ Register target = ToRegister(instr->target());
+ generator.BeforeCall(__ CallSize(target));
+ __ Add(target, target, Code::kHeaderSize - kHeapObjectTag);
+ __ Call(target);
+ }
+ generator.AfterCall();
+}
+
+
+void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
+ ASSERT(instr->IsMarkedAsCall());
+ ASSERT(ToRegister(instr->function()).is(x1));
+
+ if (instr->hydrogen()->pass_argument_count()) {
+ __ Mov(x0, Operand(instr->arity()));
+ }
+
+ // Change context.
+ __ Ldr(cp, FieldMemOperand(x1, JSFunction::kContextOffset));
+
+ // Load the code entry address
+ __ Ldr(x10, FieldMemOperand(x1, JSFunction::kCodeEntryOffset));
+ __ Call(x10);
+
+ RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
+}
+
+
+void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
+ CallRuntime(instr->function(), instr->arity(), instr);
+}
+
+
+void LCodeGen::DoCallStub(LCallStub* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
+ ASSERT(ToRegister(instr->result()).is(x0));
+ switch (instr->hydrogen()->major_key()) {
+ case CodeStub::RegExpExec: {
+ RegExpExecStub stub;
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
+ break;
+ }
+ case CodeStub::SubString: {
+ SubStringStub stub;
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
+ break;
+ }
+ case CodeStub::StringCompare: {
+ StringCompareStub stub;
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
+ GenerateOsrPrologue();
+}
+
+
+void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
+ Register temp = ToRegister(instr->temp());
+ {
+ PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+ __ Push(object);
+ __ Mov(cp, 0);
+ __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance);
+ RecordSafepointWithRegisters(
+ instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
+ __ StoreToSafepointRegisterSlot(x0, temp);
+ }
+ DeoptimizeIfSmi(temp, instr->environment());
+}
+
+
+void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
+ class DeferredCheckMaps: public LDeferredCode {
+ public:
+ DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object)
+ : LDeferredCode(codegen), instr_(instr), object_(object) {
+ SetExit(check_maps());
+ }
+ virtual void Generate() {
+ codegen()->DoDeferredInstanceMigration(instr_, object_);
+ }
+ Label* check_maps() { return &check_maps_; }
+ virtual LInstruction* instr() { return instr_; }
+ private:
+ LCheckMaps* instr_;
+ Label check_maps_;
+ Register object_;
+ };
+
+ if (instr->hydrogen()->CanOmitMapChecks()) {
+ ASSERT(instr->value() == NULL);
+ ASSERT(instr->temp() == NULL);
+ return;
+ }
+
+ Register object = ToRegister(instr->value());
+ Register map_reg = ToRegister(instr->temp());
+
+ __ Ldr(map_reg, FieldMemOperand(object, HeapObject::kMapOffset));
+
+ DeferredCheckMaps* deferred = NULL;
+ if (instr->hydrogen()->has_migration_target()) {
+ deferred = new(zone()) DeferredCheckMaps(this, instr, object);
+ __ Bind(deferred->check_maps());
+ }
+
+ UniqueSet<Map> map_set = instr->hydrogen()->map_set();
+ Label success;
+ for (int i = 0; i < map_set.size(); i++) {
+ Handle<Map> map = map_set.at(i).handle();
+ __ CompareMap(map_reg, map, &success);
+ __ B(eq, &success);
+ }
+
+ // We didn't match a map.
+ if (instr->hydrogen()->has_migration_target()) {
+ __ B(deferred->entry());
+ } else {
+ Deoptimize(instr->environment());
+ }
+
+ __ Bind(&success);
+}
+
+
+void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
+ if (!instr->hydrogen()->value()->IsHeapObject()) {
+ // TODO(all): Depending of how we chose to implement the deopt, if we could
+ // guarantee that we have a deopt handler reachable by a tbz instruction,
+ // we could use tbz here and produce less code to support this instruction.
+ DeoptimizeIfSmi(ToRegister(instr->value()), instr->environment());
+ }
+}
+
+
+void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
+ Register value = ToRegister(instr->value());
+ ASSERT(!instr->result() || ToRegister(instr->result()).Is(value));
+ // TODO(all): See DoCheckNonSmi for comments on use of tbz.
+ DeoptimizeIfNotSmi(value, instr->environment());
+}
+
+
+void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
+ Register input = ToRegister(instr->value());
+ Register scratch = ToRegister(instr->temp());
+
+ __ Ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
+ __ Ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
+
+ if (instr->hydrogen()->is_interval_check()) {
+ InstanceType first, last;
+ instr->hydrogen()->GetCheckInterval(&first, &last);
+
+ __ Cmp(scratch, first);
+ if (first == last) {
+ // If there is only one type in the interval check for equality.
+ DeoptimizeIf(ne, instr->environment());
+ } else if (last == LAST_TYPE) {
+ // We don't need to compare with the higher bound of the interval.
+ DeoptimizeIf(lo, instr->environment());
+ } else {
+ // If we are below the lower bound, set the C flag and clear the Z flag
+ // to force a deopt.
+ __ Ccmp(scratch, last, CFlag, hs);
+ DeoptimizeIf(hi, instr->environment());
+ }
+ } else {
+ uint8_t mask;
+ uint8_t tag;
+ instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
+
+ if (IsPowerOf2(mask)) {
+ ASSERT((tag == 0) || (tag == mask));
+ // TODO(all): We might be able to use tbz/tbnz if we can guarantee that
+ // the deopt handler is reachable by a tbz instruction.
+ __ Tst(scratch, mask);
+ DeoptimizeIf(tag == 0 ? ne : eq, instr->environment());
+ } else {
+ if (tag == 0) {
+ __ Tst(scratch, mask);
+ } else {
+ __ And(scratch, scratch, mask);
+ __ Cmp(scratch, tag);
+ }
+ DeoptimizeIf(ne, instr->environment());
+ }
+ }
+}
+
+
+void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
+ DoubleRegister input = ToDoubleRegister(instr->unclamped());
+ Register result = ToRegister(instr->result());
+ __ ClampDoubleToUint8(result, input, double_scratch());
+}
+
+
+void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
+ Register input = ToRegister32(instr->unclamped());
+ Register result = ToRegister32(instr->result());
+ __ ClampInt32ToUint8(result, input);
+}
+
+
+void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
+ Register input = ToRegister(instr->unclamped());
+ Register result = ToRegister(instr->result());
+ Register scratch = ToRegister(instr->temp1());
+ Label done;
+
+ // Both smi and heap number cases are handled.
+ Label is_not_smi;
+ __ JumpIfNotSmi(input, &is_not_smi);
+ __ SmiUntag(result, input);
+ __ ClampInt32ToUint8(result);
+ __ B(&done);
+
+ __ Bind(&is_not_smi);
+
+ // Check for heap number.
+ Label is_heap_number;
+ __ Ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
+ __ JumpIfRoot(scratch, Heap::kHeapNumberMapRootIndex, &is_heap_number);
+
+ // Check for undefined. Undefined is coverted to zero for clamping conversion.
+ DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex,
+ instr->environment());
+ __ Mov(result, 0);
+ __ B(&done);
+
+ // Heap number case.
+ __ Bind(&is_heap_number);
+ DoubleRegister dbl_scratch = double_scratch();
+ DoubleRegister dbl_scratch2 = ToDoubleRegister(instr->temp2());
+ __ Ldr(dbl_scratch, FieldMemOperand(input, HeapNumber::kValueOffset));
+ __ ClampDoubleToUint8(result, dbl_scratch, dbl_scratch2);
+
+ __ Bind(&done);
+}
+
+
+void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
+ Handle<String> class_name = instr->hydrogen()->class_name();
+ Label* true_label = instr->TrueLabel(chunk_);
+ Label* false_label = instr->FalseLabel(chunk_);
+ Register input = ToRegister(instr->value());
+ Register scratch1 = ToRegister(instr->temp1());
+ Register scratch2 = ToRegister(instr->temp2());
+
+ __ JumpIfSmi(input, false_label);
+
+ Register map = scratch2;
+ if (class_name->IsUtf8EqualTo(CStrVector("Function"))) {
+ // Assuming the following assertions, we can use the same compares to test
+ // for both being a function type and being in the object type range.
+ STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
+ STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
+ FIRST_SPEC_OBJECT_TYPE + 1);
+ STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
+ LAST_SPEC_OBJECT_TYPE - 1);
+ STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
+
+ // We expect CompareObjectType to load the object instance type in scratch1.
+ __ CompareObjectType(input, map, scratch1, FIRST_SPEC_OBJECT_TYPE);
+ __ B(lt, false_label);
+ __ B(eq, true_label);
+ __ Cmp(scratch1, LAST_SPEC_OBJECT_TYPE);
+ __ B(eq, true_label);
+ } else {
+ __ IsObjectJSObjectType(input, map, scratch1, false_label);
+ }
+
+ // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
+ // Check if the constructor in the map is a function.
+ __ Ldr(scratch1, FieldMemOperand(map, Map::kConstructorOffset));
+
+ // Objects with a non-function constructor have class 'Object'.
+ if (class_name->IsUtf8EqualTo(CStrVector("Object"))) {
+ __ JumpIfNotObjectType(
+ scratch1, scratch2, scratch2, JS_FUNCTION_TYPE, true_label);
+ } else {
+ __ JumpIfNotObjectType(
+ scratch1, scratch2, scratch2, JS_FUNCTION_TYPE, false_label);
+ }
+
+ // The constructor function is in scratch1. Get its instance class name.
+ __ Ldr(scratch1,
+ FieldMemOperand(scratch1, JSFunction::kSharedFunctionInfoOffset));
+ __ Ldr(scratch1,
+ FieldMemOperand(scratch1,
+ SharedFunctionInfo::kInstanceClassNameOffset));
+
+ // The class name we are testing against is internalized since it's a literal.
+ // The name in the constructor is internalized because of the way the context
+ // is booted. This routine isn't expected to work for random API-created
+ // classes and it doesn't have to because you can't access it with natives
+ // syntax. Since both sides are internalized it is sufficient to use an
+ // identity comparison.
+ EmitCompareAndBranch(instr, eq, scratch1, Operand(class_name));
+}
+
+
+void LCodeGen::DoCmpHoleAndBranchD(LCmpHoleAndBranchD* instr) {
+ ASSERT(instr->hydrogen()->representation().IsDouble());
+ FPRegister object = ToDoubleRegister(instr->object());
+ Register temp = ToRegister(instr->temp());
+
+ // If we don't have a NaN, we don't have the hole, so branch now to avoid the
+ // (relatively expensive) hole-NaN check.
+ __ Fcmp(object, object);
+ __ B(vc, instr->FalseLabel(chunk_));
+
+ // We have a NaN, but is it the hole?
+ __ Fmov(temp, object);
+ EmitCompareAndBranch(instr, eq, temp, kHoleNanInt64);
+}
+
+
+void LCodeGen::DoCmpHoleAndBranchT(LCmpHoleAndBranchT* instr) {
+ ASSERT(instr->hydrogen()->representation().IsTagged());
+ Register object = ToRegister(instr->object());
+
+ EmitBranchIfRoot(instr, object, Heap::kTheHoleValueRootIndex);
+}
+
+
+void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
+ Register value = ToRegister(instr->value());
+ Register map = ToRegister(instr->temp());
+
+ __ Ldr(map, FieldMemOperand(value, HeapObject::kMapOffset));
+ EmitCompareAndBranch(instr, eq, map, Operand(instr->map()));
+}
+
+
+void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) {
+ Representation rep = instr->hydrogen()->value()->representation();
+ ASSERT(!rep.IsInteger32());
+ Register scratch = ToRegister(instr->temp());
+
+ if (rep.IsDouble()) {
+ __ JumpIfMinusZero(ToDoubleRegister(instr->value()),
+ instr->TrueLabel(chunk()));
+ } else {
+ Register value = ToRegister(instr->value());
+ __ CheckMap(value, scratch, Heap::kHeapNumberMapRootIndex,
+ instr->FalseLabel(chunk()), DO_SMI_CHECK);
+ __ Ldr(double_scratch(), FieldMemOperand(value, HeapNumber::kValueOffset));
+ __ JumpIfMinusZero(double_scratch(), instr->TrueLabel(chunk()));
+ }
+ EmitGoto(instr->FalseDestination(chunk()));
+}
+
+
+void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
+ LOperand* left = instr->left();
+ LOperand* right = instr->right();
+ Condition cond = TokenToCondition(instr->op(), false);
+
+ if (left->IsConstantOperand() && right->IsConstantOperand()) {
+ // We can statically evaluate the comparison.
+ double left_val = ToDouble(LConstantOperand::cast(left));
+ double right_val = ToDouble(LConstantOperand::cast(right));
+ int next_block = EvalComparison(instr->op(), left_val, right_val) ?
+ instr->TrueDestination(chunk_) : instr->FalseDestination(chunk_);
+ EmitGoto(next_block);
+ } else {
+ if (instr->is_double()) {
+ if (right->IsConstantOperand()) {
+ __ Fcmp(ToDoubleRegister(left),
+ ToDouble(LConstantOperand::cast(right)));
+ } else if (left->IsConstantOperand()) {
+ // Transpose the operands and reverse the condition.
+ __ Fcmp(ToDoubleRegister(right),
+ ToDouble(LConstantOperand::cast(left)));
+ cond = ReverseConditionForCmp(cond);
+ } else {
+ __ Fcmp(ToDoubleRegister(left), ToDoubleRegister(right));
+ }
+
+ // If a NaN is involved, i.e. the result is unordered (V set),
+ // jump to false block label.
+ __ B(vs, instr->FalseLabel(chunk_));
+ EmitBranch(instr, cond);
+ } else {
+ if (instr->hydrogen_value()->representation().IsInteger32()) {
+ if (right->IsConstantOperand()) {
+ EmitCompareAndBranch(instr,
+ cond,
+ ToRegister32(left),
+ ToOperand32I(right));
+ } else {
+ // Transpose the operands and reverse the condition.
+ EmitCompareAndBranch(instr,
+ ReverseConditionForCmp(cond),
+ ToRegister32(right),
+ ToOperand32I(left));
+ }
+ } else {
+ ASSERT(instr->hydrogen_value()->representation().IsSmi());
+ if (right->IsConstantOperand()) {
+ int32_t value = ToInteger32(LConstantOperand::cast(right));
+ EmitCompareAndBranch(instr,
+ cond,
+ ToRegister(left),
+ Operand(Smi::FromInt(value)));
+ } else if (left->IsConstantOperand()) {
+ // Transpose the operands and reverse the condition.
+ int32_t value = ToInteger32(LConstantOperand::cast(left));
+ EmitCompareAndBranch(instr,
+ ReverseConditionForCmp(cond),
+ ToRegister(right),
+ Operand(Smi::FromInt(value)));
+ } else {
+ EmitCompareAndBranch(instr,
+ cond,
+ ToRegister(left),
+ ToRegister(right));
+ }
+ }
+ }
+ }
+}
+
+
+void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
+ Register left = ToRegister(instr->left());
+ Register right = ToRegister(instr->right());
+ EmitCompareAndBranch(instr, eq, left, right);
+}
+
+
+void LCodeGen::DoCmpT(LCmpT* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
+ Token::Value op = instr->op();
+ Condition cond = TokenToCondition(op, false);
+
+ ASSERT(ToRegister(instr->left()).Is(x1));
+ ASSERT(ToRegister(instr->right()).Is(x0));
+ Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
+ // Signal that we don't inline smi code before this stub.
+ InlineSmiCheckInfo::EmitNotInlined(masm());
+
+ // Return true or false depending on CompareIC result.
+ // This instruction is marked as call. We can clobber any register.
+ ASSERT(instr->IsMarkedAsCall());
+ __ LoadTrueFalseRoots(x1, x2);
+ __ Cmp(x0, 0);
+ __ Csel(ToRegister(instr->result()), x1, x2, cond);
+}
+
+
+void LCodeGen::DoConstantD(LConstantD* instr) {
+ ASSERT(instr->result()->IsDoubleRegister());
+ DoubleRegister result = ToDoubleRegister(instr->result());
+ __ Fmov(result, instr->value());
+}
+
+
+void LCodeGen::DoConstantE(LConstantE* instr) {
+ __ Mov(ToRegister(instr->result()), Operand(instr->value()));
+}
+
+
+void LCodeGen::DoConstantI(LConstantI* instr) {
+ __ Mov(ToRegister(instr->result()), instr->value());
+}
+
+
+void LCodeGen::DoConstantS(LConstantS* instr) {
+ __ Mov(ToRegister(instr->result()), Operand(instr->value()));
+}
+
+
+void LCodeGen::DoConstantT(LConstantT* instr) {
+ Handle<Object> value = instr->value(isolate());
+ AllowDeferredHandleDereference smi_check;
+ __ LoadObject(ToRegister(instr->result()), value);
+}
+
+
+void LCodeGen::DoContext(LContext* instr) {
+ // If there is a non-return use, the context must be moved to a register.
+ Register result = ToRegister(instr->result());
+ if (info()->IsOptimizing()) {
+ __ Ldr(result, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ } else {
+ // If there is no frame, the context must be in cp.
+ ASSERT(result.is(cp));
+ }
+}
+
+
+void LCodeGen::DoCheckValue(LCheckValue* instr) {
+ Register reg = ToRegister(instr->value());
+ Handle<HeapObject> object = instr->hydrogen()->object().handle();
+ AllowDeferredHandleDereference smi_check;
+ if (isolate()->heap()->InNewSpace(*object)) {
+ Register temp = ToRegister(instr->temp());
+ Handle<Cell> cell = isolate()->factory()->NewCell(object);
+ __ Mov(temp, Operand(Handle<Object>(cell)));
+ __ Ldr(temp, FieldMemOperand(temp, Cell::kValueOffset));
+ __ Cmp(reg, temp);
+ } else {
+ __ Cmp(reg, Operand(object));
+ }
+ DeoptimizeIf(ne, instr->environment());
+}
+
+
+void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
+ EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
+ ASSERT(instr->HasEnvironment());
+ LEnvironment* env = instr->environment();
+ RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
+ safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
+}
+
+
+void LCodeGen::DoDateField(LDateField* instr) {
+ Register object = ToRegister(instr->date());
+ Register result = ToRegister(instr->result());
+ Register temp1 = x10;
+ Register temp2 = x11;
+ Smi* index = instr->index();
+ Label runtime, done, deopt, obj_ok;
+
+ ASSERT(object.is(result) && object.Is(x0));
+ ASSERT(instr->IsMarkedAsCall());
+
+ __ JumpIfSmi(object, &deopt);
+ __ CompareObjectType(object, temp1, temp1, JS_DATE_TYPE);
+ __ B(eq, &obj_ok);
+
+ __ Bind(&deopt);
+ Deoptimize(instr->environment());
+
+ __ Bind(&obj_ok);
+ if (index->value() == 0) {
+ __ Ldr(result, FieldMemOperand(object, JSDate::kValueOffset));
+ } else {
+ if (index->value() < JSDate::kFirstUncachedField) {
+ ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
+ __ Mov(temp1, Operand(stamp));
+ __ Ldr(temp1, MemOperand(temp1));
+ __ Ldr(temp2, FieldMemOperand(object, JSDate::kCacheStampOffset));
+ __ Cmp(temp1, temp2);
+ __ B(ne, &runtime);
+ __ Ldr(result, FieldMemOperand(object, JSDate::kValueOffset +
+ kPointerSize * index->value()));
+ __ B(&done);
+ }
+
+ __ Bind(&runtime);
+ __ Mov(x1, Operand(index));
+ __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
+ }
+
+ __ Bind(&done);
+}
+
+
+void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
+ Deoptimizer::BailoutType type = instr->hydrogen()->type();
+ // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the
+ // needed return address), even though the implementation of LAZY and EAGER is
+ // now identical. When LAZY is eventually completely folded into EAGER, remove
+ // the special case below.
+ if (info()->IsStub() && (type == Deoptimizer::EAGER)) {
+ type = Deoptimizer::LAZY;
+ }
+
+ Comment(";;; deoptimize: %s", instr->hydrogen()->reason());
+ DeoptimizeHeader(instr->environment(), &type);
+ Deoptimize(instr->environment(), type);
+}
+
+
+void LCodeGen::DoDivI(LDivI* instr) {
+ Register dividend = ToRegister32(instr->left());
+ Register result = ToRegister32(instr->result());
+
+ bool has_power_of_2_divisor = instr->hydrogen()->RightIsPowerOf2();
+ bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
+ bool bailout_on_minus_zero =
+ instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
+ bool can_be_div_by_zero =
+ instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero);
+ bool all_uses_truncating_to_int32 =
+ instr->hydrogen()->CheckFlag(HInstruction::kAllUsesTruncatingToInt32);
+
+ if (has_power_of_2_divisor) {
+ ASSERT(instr->temp() == NULL);
+ int32_t divisor = ToInteger32(LConstantOperand::cast(instr->right()));
+ int32_t power;
+ int32_t power_mask;
+ Label deopt, done;
+
+ ASSERT(divisor != 0);
+ if (divisor > 0) {
+ power = WhichPowerOf2(divisor);
+ power_mask = divisor - 1;
+ } else {
+ // Check for (0 / -x) as that will produce negative zero.
+ if (bailout_on_minus_zero) {
+ if (all_uses_truncating_to_int32) {
+ // If all uses truncate, and the dividend is zero, the truncated
+ // result is zero.
+ __ Mov(result, 0);
+ __ Cbz(dividend, &done);
+ } else {
+ __ Cbz(dividend, &deopt);
+ }
+ }
+ // Check for (kMinInt / -1).
+ if ((divisor == -1) && can_overflow && !all_uses_truncating_to_int32) {
+ // Check for kMinInt by subtracting one and checking for overflow.
+ __ Cmp(dividend, 1);
+ __ B(vs, &deopt);
+ }
+ power = WhichPowerOf2(-divisor);
+ power_mask = -divisor - 1;
+ }
+
+ if (power_mask != 0) {
+ if (all_uses_truncating_to_int32) {
+ __ Cmp(dividend, 0);
+ __ Cneg(result, dividend, lt);
+ __ Asr(result, result, power);
+ if (divisor > 0) __ Cneg(result, result, lt);
+ if (divisor < 0) __ Cneg(result, result, gt);
+ return; // Don't fall through to negation below.
+ } else {
+ // Deoptimize if remainder is not 0. If the least-significant
+ // power bits aren't 0, it's not a multiple of 2^power, and
+ // therefore, there will be a remainder.
+ __ TestAndBranchIfAnySet(dividend, power_mask, &deopt);
+ __ Asr(result, dividend, power);
+ if (divisor < 0) __ Neg(result, result);
+ }
+ } else {
+ ASSERT((divisor == 1) || (divisor == -1));
+ if (divisor < 0) {
+ __ Neg(result, dividend);
+ } else {
+ __ Mov(result, dividend);
+ }
+ }
+ __ B(&done);
+ __ Bind(&deopt);
+ Deoptimize(instr->environment());
+ __ Bind(&done);
+ } else {
+ Register divisor = ToRegister32(instr->right());
+
+ // Issue the division first, and then check for any deopt cases whilst the
+ // result is computed.
+ __ Sdiv(result, dividend, divisor);
+
+ if (!all_uses_truncating_to_int32) {
+ Label deopt;
+ // Check for x / 0.
+ if (can_be_div_by_zero) {
+ __ Cbz(divisor, &deopt);
+ }
+
+ // Check for (0 / -x) as that will produce negative zero.
+ if (bailout_on_minus_zero) {
+ __ Cmp(divisor, 0);
+
+ // If the divisor < 0 (mi), compare the dividend, and deopt if it is
+ // zero, ie. zero dividend with negative divisor deopts.
+ // If the divisor >= 0 (pl, the opposite of mi) set the flags to
+ // condition ne, so we don't deopt, ie. positive divisor doesn't deopt.
+ __ Ccmp(dividend, 0, NoFlag, mi);
+ __ B(eq, &deopt);
+ }
+
+ // Check for (kMinInt / -1).
+ if (can_overflow) {
+ // Test dividend for kMinInt by subtracting one (cmp) and checking for
+ // overflow.
+ __ Cmp(dividend, 1);
+ // If overflow is set, ie. dividend = kMinInt, compare the divisor with
+ // -1. If overflow is clear, set the flags for condition ne, as the
+ // dividend isn't -1, and thus we shouldn't deopt.
+ __ Ccmp(divisor, -1, NoFlag, vs);
+ __ B(eq, &deopt);
+ }
+
+ // Compute remainder and deopt if it's not zero.
+ Register remainder = ToRegister32(instr->temp());
+ __ Msub(remainder, result, divisor, dividend);
+ __ Cbnz(remainder, &deopt);
+
+ Label div_ok;
+ __ B(&div_ok);
+ __ Bind(&deopt);
+ Deoptimize(instr->environment());
+ __ Bind(&div_ok);
+ } else {
+ ASSERT(instr->temp() == NULL);
+ }
+ }
+}
+
+
+void LCodeGen::DoDoubleToIntOrSmi(LDoubleToIntOrSmi* instr) {
+ DoubleRegister input = ToDoubleRegister(instr->value());
+ Register result = ToRegister32(instr->result());
+ Label done, deopt;
+
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ __ JumpIfMinusZero(input, &deopt);
+ }
+
+ __ TryConvertDoubleToInt32(result, input, double_scratch(), &done);
+ __ Bind(&deopt);
+ Deoptimize(instr->environment());
+ __ Bind(&done);
+
+ if (instr->tag_result()) {
+ __ SmiTag(result.X());
+ }
+}
+
+
+void LCodeGen::DoDrop(LDrop* instr) {
+ __ Drop(instr->count());
+}
+
+
+void LCodeGen::DoDummy(LDummy* instr) {
+ // Nothing to see here, move on!
+}
+
+
+void LCodeGen::DoDummyUse(LDummyUse* instr) {
+ // Nothing to see here, move on!
+}
+
+
+void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
+ // FunctionLiteral instruction is marked as call, we can trash any register.
+ ASSERT(instr->IsMarkedAsCall());
+
+ // Use the fast case closure allocation code that allocates in new
+ // space for nested functions that don't need literals cloning.
+ bool pretenure = instr->hydrogen()->pretenure();
+ if (!pretenure && instr->hydrogen()->has_no_literals()) {
+ FastNewClosureStub stub(instr->hydrogen()->language_mode(),
+ instr->hydrogen()->is_generator());
+ __ Mov(x2, Operand(instr->hydrogen()->shared_info()));
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
+ } else {
+ __ Mov(x2, Operand(instr->hydrogen()->shared_info()));
+ __ Mov(x1, Operand(pretenure ? factory()->true_value()
+ : factory()->false_value()));
+ __ Push(cp, x2, x1);
+ CallRuntime(Runtime::kNewClosure, 3, instr);
+ }
+}
+
+
+void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
+ Register map = ToRegister(instr->map());
+ Register result = ToRegister(instr->result());
+ Label load_cache, done;
+
+ __ EnumLengthUntagged(result, map);
+ __ Cbnz(result, &load_cache);
+
+ __ Mov(result, Operand(isolate()->factory()->empty_fixed_array()));
+ __ B(&done);
+
+ __ Bind(&load_cache);
+ __ LoadInstanceDescriptors(map, result);
+ __ Ldr(result, FieldMemOperand(result, DescriptorArray::kEnumCacheOffset));
+ __ Ldr(result, FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
+ DeoptimizeIfZero(result, instr->environment());
+
+ __ Bind(&done);
+}
+
+
+void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
+ Register object = ToRegister(instr->object());
+ Register null_value = x5;
+
+ ASSERT(instr->IsMarkedAsCall());
+ ASSERT(object.Is(x0));
+
+ Label deopt;
+
+ __ JumpIfRoot(object, Heap::kUndefinedValueRootIndex, &deopt);
+
+ __ LoadRoot(null_value, Heap::kNullValueRootIndex);
+ __ Cmp(object, null_value);
+ __ B(eq, &deopt);
+
+ __ JumpIfSmi(object, &deopt);
+
+ STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
+ __ CompareObjectType(object, x1, x1, LAST_JS_PROXY_TYPE);
+ __ B(le, &deopt);
+
+ Label use_cache, call_runtime;
+ __ CheckEnumCache(object, null_value, x1, x2, x3, x4, &call_runtime);
+
+ __ Ldr(object, FieldMemOperand(object, HeapObject::kMapOffset));
+ __ B(&use_cache);
+
+ __ Bind(&deopt);
+ Deoptimize(instr->environment());
+
+ // Get the set of properties to enumerate.
+ __ Bind(&call_runtime);
+ __ Push(object);
+ CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);
+
+ __ Ldr(x1, FieldMemOperand(object, HeapObject::kMapOffset));
+ __ JumpIfNotRoot(x1, Heap::kMetaMapRootIndex, &deopt);
+
+ __ Bind(&use_cache);
+}
+
+
+void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
+ Register input = ToRegister(instr->value());
+ Register result = ToRegister(instr->result());
+
+ __ AssertString(input);
+
+ // Assert that we can use a W register load to get the hash.
+ ASSERT((String::kHashShift + String::kArrayIndexValueBits) < kWRegSize);
+ __ Ldr(result.W(), FieldMemOperand(input, String::kHashFieldOffset));
+ __ IndexFromHash(result, result);
+}
+
+
+void LCodeGen::EmitGoto(int block) {
+ // Do not emit jump if we are emitting a goto to the next block.
+ if (!IsNextEmittedBlock(block)) {
+ __ B(chunk_->GetAssemblyLabel(LookupDestination(block)));
+ }
+}
+
+
+void LCodeGen::DoGoto(LGoto* instr) {
+ EmitGoto(instr->block_id());
+}
+
+
+void LCodeGen::DoHasCachedArrayIndexAndBranch(
+ LHasCachedArrayIndexAndBranch* instr) {
+ Register input = ToRegister(instr->value());
+ Register temp = ToRegister32(instr->temp());
+
+ // Assert that the cache status bits fit in a W register.
+ ASSERT(is_uint32(String::kContainsCachedArrayIndexMask));
+ __ Ldr(temp, FieldMemOperand(input, String::kHashFieldOffset));
+ __ Tst(temp, String::kContainsCachedArrayIndexMask);
+ EmitBranch(instr, eq);
+}
+
+
+// HHasInstanceTypeAndBranch instruction is built with an interval of type
+// to test but is only used in very restricted ways. The only possible kinds
+// of intervals are:
+// - [ FIRST_TYPE, instr->to() ]
+// - [ instr->form(), LAST_TYPE ]
+// - instr->from() == instr->to()
+//
+// These kinds of intervals can be check with only one compare instruction
+// providing the correct value and test condition are used.
+//
+// TestType() will return the value to use in the compare instruction and
+// BranchCondition() will return the condition to use depending on the kind
+// of interval actually specified in the instruction.
+static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
+ InstanceType from = instr->from();
+ InstanceType to = instr->to();
+ if (from == FIRST_TYPE) return to;
+ ASSERT((from == to) || (to == LAST_TYPE));
+ return from;
+}
+
+
+// See comment above TestType function for what this function does.
+static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
+ InstanceType from = instr->from();
+ InstanceType to = instr->to();
+ if (from == to) return eq;
+ if (to == LAST_TYPE) return hs;
+ if (from == FIRST_TYPE) return ls;
+ UNREACHABLE();
+ return eq;
+}
+
+
+void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
+ Register input = ToRegister(instr->value());
+ Register scratch = ToRegister(instr->temp());
+
+ if (!instr->hydrogen()->value()->IsHeapObject()) {
+ __ JumpIfSmi(input, instr->FalseLabel(chunk_));
+ }
+ __ CompareObjectType(input, scratch, scratch, TestType(instr->hydrogen()));
+ EmitBranch(instr, BranchCondition(instr->hydrogen()));
+}
+
+
+void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
+ Register result = ToRegister(instr->result());
+ Register base = ToRegister(instr->base_object());
+ __ Add(result, base, ToOperand(instr->offset()));
+}
+
+
+void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
+ // Assert that the arguments are in the registers expected by InstanceofStub.
+ ASSERT(ToRegister(instr->left()).Is(InstanceofStub::left()));
+ ASSERT(ToRegister(instr->right()).Is(InstanceofStub::right()));
+
+ InstanceofStub stub(InstanceofStub::kArgsInRegisters);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
+
+ // InstanceofStub returns a result in x0:
+ // 0 => not an instance
+ // smi 1 => instance.
+ __ Cmp(x0, 0);
+ __ LoadTrueFalseRoots(x0, x1);
+ __ Csel(x0, x0, x1, eq);
+}
+
+
+void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
+ class DeferredInstanceOfKnownGlobal: public LDeferredCode {
+ public:
+ DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
+ LInstanceOfKnownGlobal* instr)
+ : LDeferredCode(codegen), instr_(instr) { }
+ virtual void Generate() {
+ codegen()->DoDeferredInstanceOfKnownGlobal(instr_);
+ }
+ virtual LInstruction* instr() { return instr_; }
+ private:
+ LInstanceOfKnownGlobal* instr_;
+ };
+
+ DeferredInstanceOfKnownGlobal* deferred =
+ new(zone()) DeferredInstanceOfKnownGlobal(this, instr);
+
+ Label map_check, return_false, cache_miss, done;
+ Register object = ToRegister(instr->value());
+ Register result = ToRegister(instr->result());
+ // x4 is expected in the associated deferred code and stub.
+ Register map_check_site = x4;
+ Register map = x5;
+
+ // This instruction is marked as call. We can clobber any register.
+ ASSERT(instr->IsMarkedAsCall());
+
+ // We must take into account that object is in x11.
+ ASSERT(object.Is(x11));
+ Register scratch = x10;
+
+ // A Smi is not instance of anything.
+ __ JumpIfSmi(object, &return_false);
+
+ // This is the inlined call site instanceof cache. The two occurences of the
+ // hole value will be patched to the last map/result pair generated by the
+ // instanceof stub.
+ __ Ldr(map, FieldMemOperand(object, HeapObject::kMapOffset));
+ {
+ // Below we use Factory::the_hole_value() on purpose instead of loading from
+ // the root array to force relocation and later be able to patch with a
+ // custom value.
+ InstructionAccurateScope scope(masm(), 5);
+ __ bind(&map_check);
+ // Will be patched with the cached map.
+ Handle<Cell> cell = factory()->NewCell(factory()->the_hole_value());
+ __ LoadRelocated(scratch, Operand(Handle<Object>(cell)));
+ __ ldr(scratch, FieldMemOperand(scratch, PropertyCell::kValueOffset));
+ __ cmp(map, Operand(scratch));
+ __ b(&cache_miss, ne);
+ // The address of this instruction is computed relative to the map check
+ // above, so check the size of the code generated.
+ ASSERT(masm()->InstructionsGeneratedSince(&map_check) == 4);
+ // Will be patched with the cached result.
+ __ LoadRelocated(result, Operand(factory()->the_hole_value()));
+ }
+ __ B(&done);
+
+ // The inlined call site cache did not match.
+ // Check null and string before calling the deferred code.
+ __ Bind(&cache_miss);
+ // Compute the address of the map check. It must not be clobbered until the
+ // InstanceOfStub has used it.
+ __ Adr(map_check_site, &map_check);
+ // Null is not instance of anything.
+ __ JumpIfRoot(object, Heap::kNullValueRootIndex, &return_false);
+
+ // String values are not instances of anything.
+ // Return false if the object is a string. Otherwise, jump to the deferred
+ // code.
+ // Note that we can't jump directly to deferred code from
+ // IsObjectJSStringType, because it uses tbz for the jump and the deferred
+ // code can be out of range.
+ __ IsObjectJSStringType(object, scratch, NULL, &return_false);
+ __ B(deferred->entry());
+
+ __ Bind(&return_false);
+ __ LoadRoot(result, Heap::kFalseValueRootIndex);
+
+ // Here result is either true or false.
+ __ Bind(deferred->exit());
+ __ Bind(&done);
+}
+
+
+void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
+ Register result = ToRegister(instr->result());
+ ASSERT(result.Is(x0)); // InstanceofStub returns its result in x0.
+ InstanceofStub::Flags flags = InstanceofStub::kNoFlags;
+ flags = static_cast<InstanceofStub::Flags>(
+ flags | InstanceofStub::kArgsInRegisters);
+ flags = static_cast<InstanceofStub::Flags>(
+ flags | InstanceofStub::kReturnTrueFalseObject);
+ flags = static_cast<InstanceofStub::Flags>(
+ flags | InstanceofStub::kCallSiteInlineCheck);
+
+ PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+ LoadContextFromDeferred(instr->context());
+
+ // Prepare InstanceofStub arguments.
+ ASSERT(ToRegister(instr->value()).Is(InstanceofStub::left()));
+ __ LoadObject(InstanceofStub::right(), instr->function());
+
+ InstanceofStub stub(flags);
+ CallCodeGeneric(stub.GetCode(isolate()),
+ RelocInfo::CODE_TARGET,
+ instr,
+ RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
+ LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment();
+ safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
+
+ // Put the result value into the result register slot.
+ __ StoreToSafepointRegisterSlot(result, result);
+}
+
+
+void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
+ DoGap(instr);
+}
+
+
+void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
+ Register value = ToRegister32(instr->value());
+ DoubleRegister result = ToDoubleRegister(instr->result());
+ __ Scvtf(result, value);
+}
+
+
+void LCodeGen::DoInteger32ToSmi(LInteger32ToSmi* instr) {
+ // A64 smis can represent all Integer32 values, so this cannot deoptimize.
+ ASSERT(!instr->hydrogen()->value()->HasRange() ||
+ instr->hydrogen()->value()->range()->IsInSmiRange());
+
+ Register value = ToRegister(instr->value());
+ Register result = ToRegister(instr->result());
+ __ SmiTag(result, value);
+}
+
+
+void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
+ // The function is required to be in x1.
+ ASSERT(ToRegister(instr->function()).is(x1));
+ ASSERT(instr->HasPointerMap());
+
+ Handle<JSFunction> known_function = instr->hydrogen()->known_function();
+ if (known_function.is_null()) {
+ LPointerMap* pointers = instr->pointer_map();
+ SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
+ ParameterCount count(instr->arity());
+ __ InvokeFunction(x1, count, CALL_FUNCTION, generator);
+ } else {
+ CallKnownFunction(known_function,
+ instr->hydrogen()->formal_parameter_count(),
+ instr->arity(),
+ instr,
+ x1);
+ }
+}
+
+
+void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
+ Register temp1 = ToRegister(instr->temp1());
+ Register temp2 = ToRegister(instr->temp2());
+
+ // Get the frame pointer for the calling frame.
+ __ Ldr(temp1, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+
+ // Skip the arguments adaptor frame if it exists.
+ Label check_frame_marker;
+ __ Ldr(temp2, MemOperand(temp1, StandardFrameConstants::kContextOffset));
+ __ Cmp(temp2, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ B(ne, &check_frame_marker);
+ __ Ldr(temp1, MemOperand(temp1, StandardFrameConstants::kCallerFPOffset));
+
+ // Check the marker in the calling frame.
+ __ Bind(&check_frame_marker);
+ __ Ldr(temp1, MemOperand(temp1, StandardFrameConstants::kMarkerOffset));
+
+ EmitCompareAndBranch(
+ instr, eq, temp1, Operand(Smi::FromInt(StackFrame::CONSTRUCT)));
+}
+
+
+void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
+ Label* is_object = instr->TrueLabel(chunk_);
+ Label* is_not_object = instr->FalseLabel(chunk_);
+ Register value = ToRegister(instr->value());
+ Register map = ToRegister(instr->temp1());
+ Register scratch = ToRegister(instr->temp2());
+
+ __ JumpIfSmi(value, is_not_object);
+ __ JumpIfRoot(value, Heap::kNullValueRootIndex, is_object);
+
+ __ Ldr(map, FieldMemOperand(value, HeapObject::kMapOffset));
+
+ // Check for undetectable objects.
+ __ Ldrb(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
+ __ TestAndBranchIfAnySet(scratch, 1 << Map::kIsUndetectable, is_not_object);
+
+ // Check that instance type is in object type range.
+ __ IsInstanceJSObjectType(map, scratch, NULL);
+ // Flags have been updated by IsInstanceJSObjectType. We can now test the
+ // flags for "le" condition to check if the object's type is a valid
+ // JS object type.
+ EmitBranch(instr, le);
+}
+
+
+Condition LCodeGen::EmitIsString(Register input,
+ Register temp1,
+ Label* is_not_string,
+ SmiCheck check_needed = INLINE_SMI_CHECK) {
+ if (check_needed == INLINE_SMI_CHECK) {
+ __ JumpIfSmi(input, is_not_string);
+ }
+ __ CompareObjectType(input, temp1, temp1, FIRST_NONSTRING_TYPE);
+
+ return lt;
+}
+
+
+void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
+ Register val = ToRegister(instr->value());
+ Register scratch = ToRegister(instr->temp());
+
+ SmiCheck check_needed =
+ instr->hydrogen()->value()->IsHeapObject()
+ ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
+ Condition true_cond =
+ EmitIsString(val, scratch, instr->FalseLabel(chunk_), check_needed);
+
+ EmitBranch(instr, true_cond);
+}
+
+
+void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
+ Register value = ToRegister(instr->value());
+ STATIC_ASSERT(kSmiTag == 0);
+ EmitTestAndBranch(instr, eq, value, kSmiTagMask);
+}
+
+
+void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
+ Register input = ToRegister(instr->value());
+ Register temp = ToRegister(instr->temp());
+
+ if (!instr->hydrogen()->value()->IsHeapObject()) {
+ __ JumpIfSmi(input, instr->FalseLabel(chunk_));
+ }
+ __ Ldr(temp, FieldMemOperand(input, HeapObject::kMapOffset));
+ __ Ldrb(temp, FieldMemOperand(temp, Map::kBitFieldOffset));
+
+ EmitTestAndBranch(instr, ne, temp, 1 << Map::kIsUndetectable);
+}
+
+
+static const char* LabelType(LLabel* label) {
+ if (label->is_loop_header()) return " (loop header)";
+ if (label->is_osr_entry()) return " (OSR entry)";
+ return "";
+}
+
+
+void LCodeGen::DoLabel(LLabel* label) {
+ Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------",
+ current_instruction_,
+ label->hydrogen_value()->id(),
+ label->block_id(),
+ LabelType(label));
+
+ __ Bind(label->label());
+ current_block_ = label->block_id();
+ DoGap(label);
+}
+
+
+void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
+ Register context = ToRegister(instr->context());
+ Register result = ToRegister(instr->result());
+ __ Ldr(result, ContextMemOperand(context, instr->slot_index()));
+ if (instr->hydrogen()->RequiresHoleCheck()) {
+ if (instr->hydrogen()->DeoptimizesOnHole()) {
+ DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex,
+ instr->environment());
+ } else {
+ Label not_the_hole;
+ __ JumpIfNotRoot(result, Heap::kTheHoleValueRootIndex, ¬_the_hole);
+ __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
+ __ Bind(¬_the_hole);
+ }
+ }
+}
+
+
+void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
+ Register function = ToRegister(instr->function());
+ Register result = ToRegister(instr->result());
+ Register temp = ToRegister(instr->temp());
+ Label deopt;
+
+ // Check that the function really is a function. Leaves map in the result
+ // register.
+ __ JumpIfNotObjectType(function, result, temp, JS_FUNCTION_TYPE, &deopt);
+
+ // Make sure that the function has an instance prototype.
+ Label non_instance;
+ __ Ldrb(temp, FieldMemOperand(result, Map::kBitFieldOffset));
+ __ Tbnz(temp, Map::kHasNonInstancePrototype, &non_instance);
+
+ // Get the prototype or initial map from the function.
+ __ Ldr(result, FieldMemOperand(function,
+ JSFunction::kPrototypeOrInitialMapOffset));
+
+ // Check that the function has a prototype or an initial map.
+ __ JumpIfRoot(result, Heap::kTheHoleValueRootIndex, &deopt);
+
+ // If the function does not have an initial map, we're done.
+ Label done;
+ __ CompareObjectType(result, temp, temp, MAP_TYPE);
+ __ B(ne, &done);
+
+ // Get the prototype from the initial map.
+ __ Ldr(result, FieldMemOperand(result, Map::kPrototypeOffset));
+ __ B(&done);
+
+ // Non-instance prototype: fetch prototype from constructor field in initial
+ // map.
+ __ Bind(&non_instance);
+ __ Ldr(result, FieldMemOperand(result, Map::kConstructorOffset));
+ __ B(&done);
+
+ // Deoptimize case.
+ __ Bind(&deopt);
+ Deoptimize(instr->environment());
+
+ // All done.
+ __ Bind(&done);
+}
+
+
+void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
+ Register result = ToRegister(instr->result());
+ __ Mov(result, Operand(Handle<Object>(instr->hydrogen()->cell().handle())));
+ __ Ldr(result, FieldMemOperand(result, Cell::kValueOffset));
+ if (instr->hydrogen()->RequiresHoleCheck()) {
+ DeoptimizeIfRoot(
+ result, Heap::kTheHoleValueRootIndex, instr->environment());
+ }
+}
+
+
+void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
+ ASSERT(ToRegister(instr->global_object()).Is(x0));
+ ASSERT(ToRegister(instr->result()).Is(x0));
+ __ Mov(x2, Operand(instr->name()));
+ ContextualMode mode = instr->for_typeof() ? NOT_CONTEXTUAL : CONTEXTUAL;
+ Handle<Code> ic = LoadIC::initialize_stub(isolate(), mode);
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
+}
+
+
+MemOperand LCodeGen::PrepareKeyedExternalArrayOperand(
+ Register key,
+ Register base,
+ Register scratch,
+ bool key_is_smi,
+ bool key_is_constant,
+ int constant_key,
+ ElementsKind elements_kind,
+ int additional_index) {
+ int element_size_shift = ElementsKindToShiftSize(elements_kind);
+ int additional_offset = IsFixedTypedArrayElementsKind(elements_kind)
+ ? FixedTypedArrayBase::kDataOffset - kHeapObjectTag
+ : 0;
+
+ if (key_is_constant) {
+ int base_offset = ((constant_key + additional_index) << element_size_shift);
+ return MemOperand(base, base_offset + additional_offset);
+ }
+
+ if (additional_index == 0) {
+ if (key_is_smi) {
+ // Key is smi: untag, and scale by element size.
+ __ Add(scratch, base, Operand::UntagSmiAndScale(key, element_size_shift));
+ return MemOperand(scratch, additional_offset);
+ } else {
+ // Key is not smi, and element size is not byte: scale by element size.
+ if (additional_offset == 0) {
+ return MemOperand(base, key, LSL, element_size_shift);
+ } else {
+ __ Add(scratch, base, Operand(key, LSL, element_size_shift));
+ return MemOperand(scratch, additional_offset);
+ }
+ }
+ } else {
+ // TODO(all): Try to combine these cases a bit more intelligently.
+ if (additional_offset == 0) {
+ if (key_is_smi) {
+ __ SmiUntag(scratch, key);
+ __ Add(scratch, scratch, additional_index);
+ } else {
+ __ Add(scratch, key, additional_index);
+ }
+ return MemOperand(base, scratch, LSL, element_size_shift);
+ } else {
+ if (key_is_smi) {
+ __ Add(scratch, base,
+ Operand::UntagSmiAndScale(key, element_size_shift));
+ } else {
+ __ Add(scratch, base, Operand(key, LSL, element_size_shift));
+ }
+ return MemOperand(
+ scratch,
+ (additional_index << element_size_shift) + additional_offset);
+ }
+ }
+}
+
+
+void LCodeGen::DoLoadKeyedExternal(LLoadKeyedExternal* instr) {
+ Register ext_ptr = ToRegister(instr->elements());
+ Register scratch;
+ ElementsKind elements_kind = instr->elements_kind();
+
+ bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi();
+ bool key_is_constant = instr->key()->IsConstantOperand();
+ Register key = no_reg;
+ int constant_key = 0;
+ if (key_is_constant) {
+ ASSERT(instr->temp() == NULL);
+ constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
+ if (constant_key & 0xf0000000) {
+ Abort(kArrayIndexConstantValueTooBig);
+ }
+ } else {
+ scratch = ToRegister(instr->temp());
+ key = ToRegister(instr->key());
+ }
+
+ MemOperand mem_op =
+ PrepareKeyedExternalArrayOperand(key, ext_ptr, scratch, key_is_smi,
+ key_is_constant, constant_key,
+ elements_kind,
+ instr->additional_index());
+
+ if ((elements_kind == EXTERNAL_FLOAT32_ELEMENTS) ||
+ (elements_kind == FLOAT32_ELEMENTS)) {
+ DoubleRegister result = ToDoubleRegister(instr->result());
+ __ Ldr(result.S(), mem_op);
+ __ Fcvt(result, result.S());
+ } else if ((elements_kind == EXTERNAL_FLOAT64_ELEMENTS) ||
+ (elements_kind == FLOAT64_ELEMENTS)) {
+ DoubleRegister result = ToDoubleRegister(instr->result());
+ __ Ldr(result, mem_op);
+ } else {
+ Register result = ToRegister(instr->result());
+
+ switch (elements_kind) {
+ case EXTERNAL_INT8_ELEMENTS:
+ case INT8_ELEMENTS:
+ __ Ldrsb(result, mem_op);
+ break;
+ case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
+ case EXTERNAL_UINT8_ELEMENTS:
+ case UINT8_ELEMENTS:
+ case UINT8_CLAMPED_ELEMENTS:
+ __ Ldrb(result, mem_op);
+ break;
+ case EXTERNAL_INT16_ELEMENTS:
+ case INT16_ELEMENTS:
+ __ Ldrsh(result, mem_op);
+ break;
+ case EXTERNAL_UINT16_ELEMENTS:
+ case UINT16_ELEMENTS:
+ __ Ldrh(result, mem_op);
+ break;
+ case EXTERNAL_INT32_ELEMENTS:
+ case INT32_ELEMENTS:
+ __ Ldrsw(result, mem_op);
+ break;
+ case EXTERNAL_UINT32_ELEMENTS:
+ case UINT32_ELEMENTS:
+ __ Ldr(result.W(), mem_op);
+ if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
+ // Deopt if value > 0x80000000.
+ __ Tst(result, 0xFFFFFFFF80000000);
+ DeoptimizeIf(ne, instr->environment());
+ }
+ break;
+ case FLOAT32_ELEMENTS:
+ case FLOAT64_ELEMENTS:
+ case EXTERNAL_FLOAT32_ELEMENTS:
+ case EXTERNAL_FLOAT64_ELEMENTS:
+ case FAST_HOLEY_DOUBLE_ELEMENTS:
+ case FAST_HOLEY_ELEMENTS:
+ case FAST_HOLEY_SMI_ELEMENTS:
+ case FAST_DOUBLE_ELEMENTS:
+ case FAST_ELEMENTS:
+ case FAST_SMI_ELEMENTS:
+ case DICTIONARY_ELEMENTS:
+ case NON_STRICT_ARGUMENTS_ELEMENTS:
+ UNREACHABLE();
+ break;
+ }
+ }
+}
+
+
+void LCodeGen::CalcKeyedArrayBaseRegister(Register base,
+ Register elements,
+ Register key,
+ bool key_is_tagged,
+ ElementsKind elements_kind) {
+ int element_size_shift = ElementsKindToShiftSize(elements_kind);
+
+ // Even though the HLoad/StoreKeyed instructions force the input
+ // representation for the key to be an integer, the input gets replaced during
+ // bounds check elimination with the index argument to the bounds check, which
+ // can be tagged, so that case must be handled here, too.
+ if (key_is_tagged) {
+ __ Add(base, elements, Operand::UntagSmiAndScale(key, element_size_shift));
+ } else {
+ // Sign extend key because it could be a 32-bit negative value and the
+ // address computation happens in 64-bit.
+ ASSERT((element_size_shift >= 0) && (element_size_shift <= 4));
+ __ Add(base, elements, Operand(key, SXTW, element_size_shift));
+ }
+}
+
+
+void LCodeGen::DoLoadKeyedFixedDouble(LLoadKeyedFixedDouble* instr) {
+ Register elements = ToRegister(instr->elements());
+ DoubleRegister result = ToDoubleRegister(instr->result());
+ Register load_base;
+ int offset = 0;
+
+ if (instr->key()->IsConstantOperand()) {
+ ASSERT(instr->hydrogen()->RequiresHoleCheck() ||
+ (instr->temp() == NULL));
+
+ int constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
+ if (constant_key & 0xf0000000) {
+ Abort(kArrayIndexConstantValueTooBig);
+ }
+ offset = FixedDoubleArray::OffsetOfElementAt(constant_key +
+ instr->additional_index());
+ load_base = elements;
+ } else {
+ load_base = ToRegister(instr->temp());
+ Register key = ToRegister(instr->key());
+ bool key_is_tagged = instr->hydrogen()->key()->representation().IsSmi();
+ CalcKeyedArrayBaseRegister(load_base, elements, key, key_is_tagged,
+ instr->hydrogen()->elements_kind());
+ offset = FixedDoubleArray::OffsetOfElementAt(instr->additional_index());
+ }
+ __ Ldr(result, FieldMemOperand(load_base, offset));
+
+ if (instr->hydrogen()->RequiresHoleCheck()) {
+ Register scratch = ToRegister(instr->temp());
+
+ // TODO(all): Is it faster to reload this value to an integer register, or
+ // move from fp to integer?
+ __ Fmov(scratch, result);
+ __ Cmp(scratch, kHoleNanInt64);
+ DeoptimizeIf(eq, instr->environment());
+ }
+}
+
+
+void LCodeGen::DoLoadKeyedFixed(LLoadKeyedFixed* instr) {
+ Register elements = ToRegister(instr->elements());
+ Register result = ToRegister(instr->result());
+ Register load_base;
+ int offset = 0;
+
+ if (instr->key()->IsConstantOperand()) {
+ ASSERT(instr->temp() == NULL);
+ LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
+ offset = FixedArray::OffsetOfElementAt(ToInteger32(const_operand) +
+ instr->additional_index());
+ load_base = elements;
+ } else {
+ load_base = ToRegister(instr->temp());
+ Register key = ToRegister(instr->key());
+ bool key_is_tagged = instr->hydrogen()->key()->representation().IsSmi();
+ CalcKeyedArrayBaseRegister(load_base, elements, key, key_is_tagged,
+ instr->hydrogen()->elements_kind());
+ offset = FixedArray::OffsetOfElementAt(instr->additional_index());
+ }
+ Representation representation = instr->hydrogen()->representation();
+
+ if (representation.IsInteger32() &&
+ instr->hydrogen()->elements_kind() == FAST_SMI_ELEMENTS) {
+ STATIC_ASSERT(kSmiValueSize == 32 && kSmiShift == 32 && kSmiTag == 0);
+ __ Load(result, UntagSmiFieldMemOperand(load_base, offset),
+ Representation::Integer32());
+ } else {
+ __ Load(result, FieldMemOperand(load_base, offset),
+ representation);
+ }
+
+ if (instr->hydrogen()->RequiresHoleCheck()) {
+ if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
+ DeoptimizeIfNotSmi(result, instr->environment());
+ } else {
+ DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex,
+ instr->environment());
+ }
+ }
+}
+
+
+void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
+ ASSERT(ToRegister(instr->object()).Is(x1));
+ ASSERT(ToRegister(instr->key()).Is(x0));
+
+ Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
+
+ ASSERT(ToRegister(instr->result()).Is(x0));
+}
+
+
+void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
+ HObjectAccess access = instr->hydrogen()->access();
+ int offset = access.offset();
+ Register object = ToRegister(instr->object());
+
+ if (access.IsExternalMemory()) {
+ Register result = ToRegister(instr->result());
+ __ Load(result, MemOperand(object, offset), access.representation());
+ return;
+ }
+
+ if (instr->hydrogen()->representation().IsDouble()) {
+ FPRegister result = ToDoubleRegister(instr->result());
+ __ Ldr(result, FieldMemOperand(object, offset));
+ return;
+ }
+
+ Register result = ToRegister(instr->result());
+ Register source;
+ if (access.IsInobject()) {
+ source = object;
+ } else {
+ // Load the properties array, using result as a scratch register.
+ __ Ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
+ source = result;
+ }
+
+ if (access.representation().IsSmi() &&
+ instr->hydrogen()->representation().IsInteger32()) {
+ // Read int value directly from upper half of the smi.
+ STATIC_ASSERT(kSmiValueSize == 32 && kSmiShift == 32 && kSmiTag == 0);
+ __ Load(result, UntagSmiFieldMemOperand(source, offset),
+ Representation::Integer32());
+ } else {
+ __ Load(result, FieldMemOperand(source, offset), access.representation());
+ }
+}
+
+
+void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
+ // LoadIC expects x2 to hold the name, and x0 to hold the receiver.
+ ASSERT(ToRegister(instr->object()).is(x0));
+ __ Mov(x2, Operand(instr->name()));
+
+ Handle<Code> ic = LoadIC::initialize_stub(isolate(), NOT_CONTEXTUAL);
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
+
+ ASSERT(ToRegister(instr->result()).is(x0));
+}
+
+
+void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
+ Register result = ToRegister(instr->result());
+ __ LoadRoot(result, instr->index());
+}
+
+
+void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
+ Register result = ToRegister(instr->result());
+ Register map = ToRegister(instr->value());
+ __ EnumLengthSmi(result, map);
+}
+
+
+void LCodeGen::DoMathAbs(LMathAbs* instr) {
+ Representation r = instr->hydrogen()->value()->representation();
+ if (r.IsDouble()) {
+ DoubleRegister input = ToDoubleRegister(instr->value());
+ DoubleRegister result = ToDoubleRegister(instr->result());
+ __ Fabs(result, input);
+ } else if (r.IsSmi() || r.IsInteger32()) {
+ Register input = r.IsSmi() ? ToRegister(instr->value())
+ : ToRegister32(instr->value());
+ Register result = r.IsSmi() ? ToRegister(instr->result())
+ : ToRegister32(instr->result());
+ Label done;
+ __ Abs(result, input, NULL, &done);
+ Deoptimize(instr->environment());
+ __ Bind(&done);
+ }
+}
+
+
+void LCodeGen::DoDeferredMathAbsTagged(LMathAbsTagged* instr,
+ Label* exit,
+ Label* allocation_entry) {
+ // Handle the tricky cases of MathAbsTagged:
+ // - HeapNumber inputs.
+ // - Negative inputs produce a positive result, so a new HeapNumber is
+ // allocated to hold it.
+ // - Positive inputs are returned as-is, since there is no need to allocate
+ // a new HeapNumber for the result.
+ // - The (smi) input -0x80000000, produces +0x80000000, which does not fit
+ // a smi. In this case, the inline code sets the result and jumps directly
+ // to the allocation_entry label.
+ ASSERT(instr->context() != NULL);
+ ASSERT(ToRegister(instr->context()).is(cp));
+ Register input = ToRegister(instr->value());
+ Register temp1 = ToRegister(instr->temp1());
+ Register temp2 = ToRegister(instr->temp2());
+ Register result_bits = ToRegister(instr->temp3());
+ Register result = ToRegister(instr->result());
+
+ Label runtime_allocation;
+
+ // Deoptimize if the input is not a HeapNumber.
+ __ Ldr(temp1, FieldMemOperand(input, HeapObject::kMapOffset));
+ DeoptimizeIfNotRoot(temp1, Heap::kHeapNumberMapRootIndex,
+ instr->environment());
+
+ // If the argument is positive, we can return it as-is, without any need to
+ // allocate a new HeapNumber for the result. We have to do this in integer
+ // registers (rather than with fabs) because we need to be able to distinguish
+ // the two zeroes.
+ __ Ldr(result_bits, FieldMemOperand(input, HeapNumber::kValueOffset));
+ __ Mov(result, input);
+ __ Tbz(result_bits, kXSignBit, exit);
+
+ // Calculate abs(input) by clearing the sign bit.
+ __ Bic(result_bits, result_bits, kXSignMask);
+
+ // Allocate a new HeapNumber to hold the result.
+ // result_bits The bit representation of the (double) result.
+ __ Bind(allocation_entry);
+ __ AllocateHeapNumber(result, &runtime_allocation, temp1, temp2);
+ // The inline (non-deferred) code will store result_bits into result.
+ __ B(exit);
+
+ __ Bind(&runtime_allocation);
+ if (FLAG_debug_code) {
+ // Because result is in the pointer map, we need to make sure it has a valid
+ // tagged value before we call the runtime. We speculatively set it to the
+ // input (for abs(+x)) or to a smi (for abs(-SMI_MIN)), so it should already
+ // be valid.
+ Label result_ok;
+ Register input = ToRegister(instr->value());
+ __ JumpIfSmi(result, &result_ok);
+ __ Cmp(input, result);
+ // TODO(all): Shouldn't we assert here?
+ DeoptimizeIf(ne, instr->environment());
+ __ Bind(&result_ok);
+ }
+
+ { PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+ CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr,
+ instr->context());
+ __ StoreToSafepointRegisterSlot(x0, result);
+ }
+ // The inline (non-deferred) code will store result_bits into result.
+}
+
+
+void LCodeGen::DoMathAbsTagged(LMathAbsTagged* instr) {
+ // Class for deferred case.
+ class DeferredMathAbsTagged: public LDeferredCode {
+ public:
+ DeferredMathAbsTagged(LCodeGen* codegen, LMathAbsTagged* instr)
+ : LDeferredCode(codegen), instr_(instr) { }
+ virtual void Generate() {
+ codegen()->DoDeferredMathAbsTagged(instr_, exit(),
+ allocation_entry());
+ }
+ virtual LInstruction* instr() { return instr_; }
+ Label* allocation_entry() { return &allocation; }
+ private:
+ LMathAbsTagged* instr_;
+ Label allocation;
+ };
+
+ // TODO(jbramley): The early-exit mechanism would skip the new frame handling
+ // in GenerateDeferredCode. Tidy this up.
+ ASSERT(!NeedsDeferredFrame());
+
+ DeferredMathAbsTagged* deferred =
+ new(zone()) DeferredMathAbsTagged(this, instr);
+
+ ASSERT(instr->hydrogen()->value()->representation().IsTagged() ||
+ instr->hydrogen()->value()->representation().IsSmi());
+ Register input = ToRegister(instr->value());
+ Register result_bits = ToRegister(instr->temp3());
+ Register result = ToRegister(instr->result());
+ Label done;
+
+ // Handle smis inline.
+ // We can treat smis as 64-bit integers, since the (low-order) tag bits will
+ // never get set by the negation. This is therefore the same as the Integer32
+ // case in DoMathAbs, except that it operates on 64-bit values.
+ STATIC_ASSERT((kSmiValueSize == 32) && (kSmiShift == 32) && (kSmiTag == 0));
+
+ // TODO(jbramley): We can't use JumpIfNotSmi here because the tbz it uses
+ // doesn't always have enough range. Consider making a variant of it, or a
+ // TestIsSmi helper.
+ STATIC_ASSERT(kSmiTag == 0);
+ __ Tst(input, kSmiTagMask);
+ __ B(ne, deferred->entry());
+
+ __ Abs(result, input, NULL, &done);
+
+ // The result is the magnitude (abs) of the smallest value a smi can
+ // represent, encoded as a double.
+ __ Mov(result_bits, double_to_rawbits(0x80000000));
+ __ B(deferred->allocation_entry());
+
+ __ Bind(deferred->exit());
+ __ Str(result_bits, FieldMemOperand(result, HeapNumber::kValueOffset));
+
+ __ Bind(&done);
+}
+
+
+void LCodeGen::DoMathExp(LMathExp* instr) {
+ DoubleRegister input = ToDoubleRegister(instr->value());
+ DoubleRegister result = ToDoubleRegister(instr->result());
+ DoubleRegister double_temp1 = ToDoubleRegister(instr->double_temp1());
+ DoubleRegister double_temp2 = double_scratch();
+ Register temp1 = ToRegister(instr->temp1());
+ Register temp2 = ToRegister(instr->temp2());
+ Register temp3 = ToRegister(instr->temp3());
+
+ MathExpGenerator::EmitMathExp(masm(), input, result,
+ double_temp1, double_temp2,
+ temp1, temp2, temp3);
+}
+
+
+void LCodeGen::DoMathFloor(LMathFloor* instr) {
+ // TODO(jbramley): If we could provide a double result, we could use frintm
+ // and produce a valid double result in a single instruction.
+ DoubleRegister input = ToDoubleRegister(instr->value());
+ Register result = ToRegister(instr->result());
+ Label deopt;
+ Label done;
+
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ __ JumpIfMinusZero(input, &deopt);
+ }
+
+ __ Fcvtms(result, input);
+
+ // Check that the result fits into a 32-bit integer.
+ // - The result did not overflow.
+ __ Cmp(result, Operand(result, SXTW));
+ // - The input was not NaN.
+ __ Fccmp(input, input, NoFlag, eq);
+ __ B(&done, eq);
+
+ __ Bind(&deopt);
+ Deoptimize(instr->environment());
+
+ __ Bind(&done);
+}
+
+
+void LCodeGen::DoMathFloorOfDiv(LMathFloorOfDiv* instr) {
+ Register result = ToRegister32(instr->result());
+ Register left = ToRegister32(instr->left());
+ Register right = ToRegister32(instr->right());
+ Register remainder = ToRegister32(instr->temp());
+
+ // This can't cause an exception on ARM, so we can speculatively
+ // execute it already now.
+ __ Sdiv(result, left, right);
+
+ // Check for x / 0.
+ DeoptimizeIfZero(right, instr->environment());
+
+ // Check for (kMinInt / -1).
+ if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
+ // The V flag will be set iff left == kMinInt.
+ __ Cmp(left, 1);
+ __ Ccmp(right, -1, NoFlag, vs);
+ DeoptimizeIf(eq, instr->environment());
+ }
+
+ // Check for (0 / -x) that will produce negative zero.
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ __ Cmp(right, 0);
+ __ Ccmp(left, 0, ZFlag, mi);
+ // "right" can't be null because the code would have already been
+ // deoptimized. The Z flag is set only if (right < 0) and (left == 0).
+ // In this case we need to deoptimize to produce a -0.
+ DeoptimizeIf(eq, instr->environment());
+ }
+
+ Label done;
+ // If both operands have the same sign then we are done.
+ __ Eor(remainder, left, right);
+ __ Tbz(remainder, kWSignBit, &done);
+
+ // Check if the result needs to be corrected.
+ __ Msub(remainder, result, right, left);
+ __ Cbz(remainder, &done);
+ __ Sub(result, result, 1);
+
+ __ Bind(&done);
+}
+
+
+void LCodeGen::DoMathLog(LMathLog* instr) {
+ ASSERT(instr->IsMarkedAsCall());
+ ASSERT(ToDoubleRegister(instr->value()).is(d0));
+ __ CallCFunction(ExternalReference::math_log_double_function(isolate()),
+ 0, 1);
+ ASSERT(ToDoubleRegister(instr->result()).Is(d0));
+}
+
+
+void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
+ DoubleRegister input = ToDoubleRegister(instr->value());
+ DoubleRegister result = ToDoubleRegister(instr->result());
+ Label done;
+
+ // Math.pow(x, 0.5) differs from fsqrt(x) in the following cases:
+ // Math.pow(-Infinity, 0.5) == +Infinity
+ // Math.pow(-0.0, 0.5) == +0.0
+
+ // Catch -infinity inputs first.
+ // TODO(jbramley): A constant infinity register would be helpful here.
+ __ Fmov(double_scratch(), kFP64NegativeInfinity);
+ __ Fcmp(double_scratch(), input);
+ __ Fabs(result, input);
+ __ B(&done, eq);
+
+ // Add +0.0 to convert -0.0 to +0.0.
+ // TODO(jbramley): A constant zero register would be helpful here.
+ __ Fmov(double_scratch(), 0.0);
+ __ Fadd(double_scratch(), input, double_scratch());
+ __ Fsqrt(result, double_scratch());
+
+ __ Bind(&done);
+}
+
+
+void LCodeGen::DoPower(LPower* instr) {
+ Representation exponent_type = instr->hydrogen()->right()->representation();
+ // Having marked this as a call, we can use any registers.
+ // Just make sure that the input/output registers are the expected ones.
+ ASSERT(!instr->right()->IsDoubleRegister() ||
+ ToDoubleRegister(instr->right()).is(d1));
+ ASSERT(exponent_type.IsInteger32() || !instr->right()->IsRegister() ||
+ ToRegister(instr->right()).is(x11));
+ ASSERT(!exponent_type.IsInteger32() || ToRegister(instr->right()).is(x12));
+ ASSERT(ToDoubleRegister(instr->left()).is(d0));
+ ASSERT(ToDoubleRegister(instr->result()).is(d0));
+
+ if (exponent_type.IsSmi()) {
+ MathPowStub stub(MathPowStub::TAGGED);
+ __ CallStub(&stub);
+ } else if (exponent_type.IsTagged()) {
+ Label no_deopt;
+ __ JumpIfSmi(x11, &no_deopt);
+ __ Ldr(x0, FieldMemOperand(x11, HeapObject::kMapOffset));
+ DeoptimizeIfNotRoot(x0, Heap::kHeapNumberMapRootIndex,
+ instr->environment());
+ __ Bind(&no_deopt);
+ MathPowStub stub(MathPowStub::TAGGED);
+ __ CallStub(&stub);
+ } else if (exponent_type.IsInteger32()) {
+ MathPowStub stub(MathPowStub::INTEGER);
+ __ CallStub(&stub);
+ } else {
+ ASSERT(exponent_type.IsDouble());
+ MathPowStub stub(MathPowStub::DOUBLE);
+ __ CallStub(&stub);
+ }
+}
+
+
+void LCodeGen::DoMathRound(LMathRound* instr) {
+ // TODO(jbramley): We could provide a double result here using frint.
+ DoubleRegister input = ToDoubleRegister(instr->value());
+ DoubleRegister temp1 = ToDoubleRegister(instr->temp1());
+ Register result = ToRegister(instr->result());
+ Label try_rounding;
+ Label deopt;
+ Label done;
+
+ // Math.round() rounds to the nearest integer, with ties going towards
+ // +infinity. This does not match any IEEE-754 rounding mode.
+ // - Infinities and NaNs are propagated unchanged, but cause deopts because
+ // they can't be represented as integers.
+ // - The sign of the result is the same as the sign of the input. This means
+ // that -0.0 rounds to itself, and values -0.5 <= input < 0 also produce a
+ // result of -0.0.
+
+ DoubleRegister dot_five = double_scratch();
+ __ Fmov(dot_five, 0.5);
+ __ Fabs(temp1, input);
+ __ Fcmp(temp1, dot_five);
+ // If input is in [-0.5, -0], the result is -0.
+ // If input is in [+0, +0.5[, the result is +0.
+ // If the input is +0.5, the result is 1.
+ __ B(hi, &try_rounding); // hi so NaN will also branch.
+
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ __ Fmov(result, input);
+ __ Cmp(result, 0);
+ DeoptimizeIf(mi, instr->environment()); // [-0.5, -0.0].
+ }
+ __ Fcmp(input, dot_five);
+ __ Mov(result, 1); // +0.5.
+ // Remaining cases: [+0, +0.5[ or [-0.5, +0.5[, depending on
+ // flag kBailoutOnMinusZero, will return 0 (xzr).
+ __ Csel(result, result, xzr, eq);
+ __ B(&done);
+
+ __ Bind(&deopt);
+ Deoptimize(instr->environment());
+
+ __ Bind(&try_rounding);
+ // Since we're providing a 32-bit result, we can implement ties-to-infinity by
+ // adding 0.5 to the input, then taking the floor of the result. This does not
+ // work for very large positive doubles because adding 0.5 would cause an
+ // intermediate rounding stage, so a different approach will be necessary if a
+ // double result is needed.
+ __ Fadd(temp1, input, dot_five);
+ __ Fcvtms(result, temp1);
+
+ // Deopt if
+ // * the input was NaN
+ // * the result is not representable using a 32-bit integer.
+ __ Fcmp(input, 0.0);
+ __ Ccmp(result, Operand(result.W(), SXTW), NoFlag, vc);
+ __ B(ne, &deopt);
+
+ __ Bind(&done);
+}
+
+
+void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
+ DoubleRegister input = ToDoubleRegister(instr->value());
+ DoubleRegister result = ToDoubleRegister(instr->result());
+ __ Fsqrt(result, input);
+}
+
+
+void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
+ HMathMinMax::Operation op = instr->hydrogen()->operation();
+ if (instr->hydrogen()->representation().IsInteger32()) {
+ Register result = ToRegister32(instr->result());
+ Register left = ToRegister32(instr->left());
+ Operand right = ToOperand32I(instr->right());
+
+ __ Cmp(left, right);
+ __ Csel(result, left, right, (op == HMathMinMax::kMathMax) ? ge : le);
+ } else if (instr->hydrogen()->representation().IsSmi()) {
+ Register result = ToRegister(instr->result());
+ Register left = ToRegister(instr->left());
+ Operand right = ToOperand(instr->right());
+
+ __ Cmp(left, right);
+ __ Csel(result, left, right, (op == HMathMinMax::kMathMax) ? ge : le);
+ } else {
+ ASSERT(instr->hydrogen()->representation().IsDouble());
+ DoubleRegister result = ToDoubleRegister(instr->result());
+ DoubleRegister left = ToDoubleRegister(instr->left());
+ DoubleRegister right = ToDoubleRegister(instr->right());
+
+ if (op == HMathMinMax::kMathMax) {
+ __ Fmax(result, left, right);
+ } else {
+ ASSERT(op == HMathMinMax::kMathMin);
+ __ Fmin(result, left, right);
+ }
+ }
+}
+
+
+void LCodeGen::DoModI(LModI* instr) {
+ HMod* hmod = instr->hydrogen();
+ HValue* hleft = hmod->left();
+ HValue* hright = hmod->right();
+
+ Label done;
+ Register result = ToRegister32(instr->result());
+ Register dividend = ToRegister32(instr->left());
+
+ bool need_minus_zero_check = (hmod->CheckFlag(HValue::kBailoutOnMinusZero) &&
+ hleft->CanBeNegative() && hmod->CanBeZero());
+
+ if (hmod->RightIsPowerOf2()) {
+ // Note: The code below even works when right contains kMinInt.
+ int32_t divisor = Abs(hright->GetInteger32Constant());
+
+ if (hleft->CanBeNegative()) {
+ __ Cmp(dividend, 0);
+ __ Cneg(result, dividend, mi);
+ __ And(result, result, divisor - 1);
+ __ Cneg(result, result, mi);
+ if (need_minus_zero_check) {
+ __ Cbnz(result, &done);
+ // The result is 0. Deoptimize if the dividend was negative.
+ DeoptimizeIf(mi, instr->environment());
+ }
+ } else {
+ __ And(result, dividend, divisor - 1);
+ }
+
+ } else {
+ Label deopt;
+ Register divisor = ToRegister32(instr->right());
+ // Compute:
+ // modulo = dividend - quotient * divisor
+ __ Sdiv(result, dividend, divisor);
+ if (hright->CanBeZero()) {
+ // Combine the deoptimization sites.
+ Label ok;
+ __ Cbnz(divisor, &ok);
+ __ Bind(&deopt);
+ Deoptimize(instr->environment());
+ __ Bind(&ok);
+ }
+ __ Msub(result, result, divisor, dividend);
+ if (need_minus_zero_check) {
+ __ Cbnz(result, &done);
+ if (deopt.is_bound()) {
+ __ Tbnz(dividend, kWSignBit, &deopt);
+ } else {
+ DeoptimizeIfNegative(dividend, instr->environment());
+ }
+ }
+ }
+ __ Bind(&done);
+}
+
+
+void LCodeGen::DoMulConstIS(LMulConstIS* instr) {
+ ASSERT(instr->hydrogen()->representation().IsSmiOrInteger32());
+ bool is_smi = instr->hydrogen()->representation().IsSmi();
+ Register result =
+ is_smi ? ToRegister(instr->result()) : ToRegister32(instr->result());
+ Register left =
+ is_smi ? ToRegister(instr->left()) : ToRegister32(instr->left()) ;
+ int32_t right = ToInteger32(instr->right());
+
+ bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
+ bool bailout_on_minus_zero =
+ instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
+
+ if (bailout_on_minus_zero) {
+ if (right < 0) {
+ // The result is -0 if right is negative and left is zero.
+ DeoptimizeIfZero(left, instr->environment());
+ } else if (right == 0) {
+ // The result is -0 if the right is zero and the left is negative.
+ DeoptimizeIfNegative(left, instr->environment());
+ }
+ }
+
+ switch (right) {
+ // Cases which can detect overflow.
+ case -1:
+ if (can_overflow) {
+ // Only 0x80000000 can overflow here.
+ __ Negs(result, left);
+ DeoptimizeIf(vs, instr->environment());
+ } else {
+ __ Neg(result, left);
+ }
+ break;
+ case 0:
+ // This case can never overflow.
+ __ Mov(result, 0);
+ break;
+ case 1:
+ // This case can never overflow.
+ __ Mov(result, left, kDiscardForSameWReg);
+ break;
+ case 2:
+ if (can_overflow) {
+ __ Adds(result, left, left);
+ DeoptimizeIf(vs, instr->environment());
+ } else {
+ __ Add(result, left, left);
+ }
+ break;
+
+ // All other cases cannot detect overflow, because it would probably be no
+ // faster than using the smull method in LMulI.
+ // TODO(jbramley): Investigate this, and add overflow support if it would
+ // be useful.
+ default:
+ ASSERT(!can_overflow);
+
+ // Multiplication by constant powers of two (and some related values)
+ // can be done efficiently with shifted operands.
+ if (right >= 0) {
+ if (IsPowerOf2(right)) {
+ // result = left << log2(right)
+ __ Lsl(result, left, WhichPowerOf2(right));
+ } else if (IsPowerOf2(right - 1)) {
+ // result = left + left << log2(right - 1)
+ __ Add(result, left, Operand(left, LSL, WhichPowerOf2(right - 1)));
+ } else if (IsPowerOf2(right + 1)) {
+ // result = -left + left << log2(right + 1)
+ __ Sub(result, left, Operand(left, LSL, WhichPowerOf2(right + 1)));
+ __ Neg(result, result);
+ } else {
+ UNREACHABLE();
+ }
+ } else {
+ if (IsPowerOf2(-right)) {
+ // result = -left << log2(-right)
+ __ Neg(result, Operand(left, LSL, WhichPowerOf2(-right)));
+ } else if (IsPowerOf2(-right + 1)) {
+ // result = left - left << log2(-right + 1)
+ __ Sub(result, left, Operand(left, LSL, WhichPowerOf2(-right + 1)));
+ } else if (IsPowerOf2(-right - 1)) {
+ // result = -left - left << log2(-right - 1)
+ __ Add(result, left, Operand(left, LSL, WhichPowerOf2(-right - 1)));
+ __ Neg(result, result);
+ } else {
+ UNREACHABLE();
+ }
+ }
+ break;
+ }
+}
+
+
+void LCodeGen::DoMulI(LMulI* instr) {
+ Register result = ToRegister32(instr->result());
+ Register left = ToRegister32(instr->left());
+ Register right = ToRegister32(instr->right());
+
+ bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
+ bool bailout_on_minus_zero =
+ instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
+
+ if (bailout_on_minus_zero) {
+ // If one operand is zero and the other is negative, the result is -0.
+ // - Set Z (eq) if either left or right, or both, are 0.
+ __ Cmp(left, 0);
+ __ Ccmp(right, 0, ZFlag, ne);
+ // - If so (eq), set N (mi) if left + right is negative.
+ // - Otherwise, clear N.
+ __ Ccmn(left, right, NoFlag, eq);
+ DeoptimizeIf(mi, instr->environment());
+ }
+
+ if (can_overflow) {
+ __ Smull(result.X(), left, right);
+ __ Cmp(result.X(), Operand(result, SXTW));
+ DeoptimizeIf(ne, instr->environment());
+ } else {
+ __ Mul(result, left, right);
+ }
+}
+
+
+void LCodeGen::DoMulS(LMulS* instr) {
+ Register result = ToRegister(instr->result());
+ Register left = ToRegister(instr->left());
+ Register right = ToRegister(instr->right());
+
+ bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
+ bool bailout_on_minus_zero =
+ instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
+
+ if (bailout_on_minus_zero) {
+ // If one operand is zero and the other is negative, the result is -0.
+ // - Set Z (eq) if either left or right, or both, are 0.
+ __ Cmp(left, 0);
+ __ Ccmp(right, 0, ZFlag, ne);
+ // - If so (eq), set N (mi) if left + right is negative.
+ // - Otherwise, clear N.
+ __ Ccmn(left, right, NoFlag, eq);
+ DeoptimizeIf(mi, instr->environment());
+ }
+
+ STATIC_ASSERT((kSmiShift == 32) && (kSmiTag == 0));
+ if (can_overflow) {
+ __ Smulh(result, left, right);
+ __ Cmp(result, Operand(result.W(), SXTW));
+ __ SmiTag(result);
+ DeoptimizeIf(ne, instr->environment());
+ } else {
+ // TODO(jbramley): This could be rewritten to support UseRegisterAtStart.
+ ASSERT(!AreAliased(result, right));
+ __ SmiUntag(result, left);
+ __ Mul(result, result, right);
+ }
+}
+
+
+void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
+ // TODO(3095996): Get rid of this. For now, we need to make the
+ // result register contain a valid pointer because it is already
+ // contained in the register pointer map.
+ Register result = ToRegister(instr->result());
+ __ Mov(result, 0);
+
+ PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+ // NumberTagU and NumberTagD use the context from the frame, rather than
+ // the environment's HContext or HInlinedContext value.
+ // They only call Runtime::kAllocateHeapNumber.
+ // The corresponding HChange instructions are added in a phase that does
+ // not have easy access to the local context.
+ __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
+ RecordSafepointWithRegisters(
+ instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
+ __ StoreToSafepointRegisterSlot(x0, result);
+}
+
+
+void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
+ class DeferredNumberTagD: public LDeferredCode {
+ public:
+ DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
+ : LDeferredCode(codegen), instr_(instr) { }
+ virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); }
+ virtual LInstruction* instr() { return instr_; }
+ private:
+ LNumberTagD* instr_;
+ };
+
+ DoubleRegister input = ToDoubleRegister(instr->value());
+ Register result = ToRegister(instr->result());
+ Register temp1 = ToRegister(instr->temp1());
+ Register temp2 = ToRegister(instr->temp2());
+
+ DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr);
+ if (FLAG_inline_new) {
+ __ AllocateHeapNumber(result, deferred->entry(), temp1, temp2);
+ } else {
+ __ B(deferred->entry());
+ }
+
+ __ Bind(deferred->exit());
+ __ Str(input, FieldMemOperand(result, HeapNumber::kValueOffset));
+}
+
+
+void LCodeGen::DoDeferredNumberTagU(LInstruction* instr,
+ LOperand* value,
+ LOperand* temp1,
+ LOperand* temp2) {
+ Label slow, convert_and_store;
+ Register src = ToRegister32(value);
+ Register dst = ToRegister(instr->result());
+ Register scratch1 = ToRegister(temp1);
+
+ if (FLAG_inline_new) {
+ Register scratch2 = ToRegister(temp2);
+ __ AllocateHeapNumber(dst, &slow, scratch1, scratch2);
+ __ B(&convert_and_store);
+ }
+
+ // Slow case: call the runtime system to do the number allocation.
+ __ Bind(&slow);
+ // TODO(3095996): Put a valid pointer value in the stack slot where the result
+ // register is stored, as this register is in the pointer map, but contains an
+ // integer value.
+ __ Mov(dst, 0);
+ {
+ // Preserve the value of all registers.
+ PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+
+ // NumberTagU and NumberTagD use the context from the frame, rather than
+ // the environment's HContext or HInlinedContext value.
+ // They only call Runtime::kAllocateHeapNumber.
+ // The corresponding HChange instructions are added in a phase that does
+ // not have easy access to the local context.
+ __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
+ RecordSafepointWithRegisters(
+ instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
+ __ StoreToSafepointRegisterSlot(x0, dst);
+ }
+
+ // Convert number to floating point and store in the newly allocated heap
+ // number.
+ __ Bind(&convert_and_store);
+ DoubleRegister dbl_scratch = double_scratch();
+ __ Ucvtf(dbl_scratch, src);
+ __ Str(dbl_scratch, FieldMemOperand(dst, HeapNumber::kValueOffset));
+}
+
+
+void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
+ class DeferredNumberTagU: public LDeferredCode {
+ public:
+ DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
+ : LDeferredCode(codegen), instr_(instr) { }
+ virtual void Generate() {
+ codegen()->DoDeferredNumberTagU(instr_,
+ instr_->value(),
+ instr_->temp1(),
+ instr_->temp2());
+ }
+ virtual LInstruction* instr() { return instr_; }
+ private:
+ LNumberTagU* instr_;
+ };
+
+ Register value = ToRegister(instr->value());
+ Register result = ToRegister(instr->result());
+
+ DeferredNumberTagU* deferred = new(zone()) DeferredNumberTagU(this, instr);
+ __ Cmp(value, Smi::kMaxValue);
+ __ B(hi, deferred->entry());
+ __ SmiTag(result, value);
+ __ Bind(deferred->exit());
+}
+
+
+void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
+ Register input = ToRegister(instr->value());
+ Register scratch = ToRegister(instr->temp());
+ DoubleRegister result = ToDoubleRegister(instr->result());
+ bool can_convert_undefined_to_nan =
+ instr->hydrogen()->can_convert_undefined_to_nan();
+
+ Label done, load_smi;
+
+ // Work out what untag mode we're working with.
+ HValue* value = instr->hydrogen()->value();
+ NumberUntagDMode mode = value->representation().IsSmi()
+ ? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED;
+
+ if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
+ __ JumpIfSmi(input, &load_smi);
+
+ Label convert_undefined, deopt;
+
+ // Heap number map check.
+ Label* not_heap_number = can_convert_undefined_to_nan ? &convert_undefined
+ : &deopt;
+ __ Ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
+ __ JumpIfNotRoot(scratch, Heap::kHeapNumberMapRootIndex, not_heap_number);
+
+ // Load heap number.
+ __ Ldr(result, FieldMemOperand(input, HeapNumber::kValueOffset));
+ if (instr->hydrogen()->deoptimize_on_minus_zero()) {
+ __ JumpIfMinusZero(result, &deopt);
+ }
+ __ B(&done);
+
+ if (can_convert_undefined_to_nan) {
+ __ Bind(&convert_undefined);
+ __ JumpIfNotRoot(input, Heap::kUndefinedValueRootIndex, &deopt);
+
+ __ LoadRoot(scratch, Heap::kNanValueRootIndex);
+ __ Ldr(result, FieldMemOperand(scratch, HeapNumber::kValueOffset));
+ __ B(&done);
+ }
+
+ __ Bind(&deopt);
+ Deoptimize(instr->environment());
+ } else {
+ ASSERT(mode == NUMBER_CANDIDATE_IS_SMI);
+ // Fall through to load_smi.
+ }
+
+ // Smi to double register conversion.
+ __ Bind(&load_smi);
+ __ SmiUntagToDouble(result, input);
+
+ __ Bind(&done);
+}
+
+
+void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
+ // This is a pseudo-instruction that ensures that the environment here is
+ // properly registered for deoptimization and records the assembler's PC
+ // offset.
+ LEnvironment* environment = instr->environment();
+
+ // If the environment were already registered, we would have no way of
+ // backpatching it with the spill slot operands.
+ ASSERT(!environment->HasBeenRegistered());
+ RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
+
+ GenerateOsrPrologue();
+}
+
+
+void LCodeGen::DoParameter(LParameter* instr) {
+ // Nothing to do.
+}
+
+
+void LCodeGen::DoPushArgument(LPushArgument* instr) {
+ LOperand* argument = instr->value();
+ if (argument->IsDoubleRegister() || argument->IsDoubleStackSlot()) {
+ Abort(kDoPushArgumentNotImplementedForDoubleType);
+ } else {
+ __ Push(ToRegister(argument));
+ }
+}
+
+
+void LCodeGen::DoReturn(LReturn* instr) {
+ if (FLAG_trace && info()->IsOptimizing()) {
+ // Push the return value on the stack as the parameter.
+ // Runtime::TraceExit returns its parameter in x0. We're leaving the code
+ // managed by the register allocator and tearing down the frame, it's
+ // safe to write to the context register.
+ __ Push(x0);
+ __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ CallRuntime(Runtime::kTraceExit, 1);
+ }
+
+ if (info()->saves_caller_doubles()) {
+ RestoreCallerDoubles();
+ }
+
+ int no_frame_start = -1;
+ if (NeedsEagerFrame()) {
+ Register stack_pointer = masm()->StackPointer();
+ __ Mov(stack_pointer, fp);
+ no_frame_start = masm_->pc_offset();
+ __ Pop(fp, lr);
+ }
+
+ if (instr->has_constant_parameter_count()) {
+ int parameter_count = ToInteger32(instr->constant_parameter_count());
+ __ Drop(parameter_count + 1);
+ } else {
+ Register parameter_count = ToRegister(instr->parameter_count());
+ __ DropBySMI(parameter_count);
+ }
+ __ Ret();
+
+ if (no_frame_start != -1) {
+ info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
+ }
+}
+
+
+MemOperand LCodeGen::BuildSeqStringOperand(Register string,
+ Register temp,
+ LOperand* index,
+ String::Encoding encoding) {
+ if (index->IsConstantOperand()) {
+ int offset = ToInteger32(LConstantOperand::cast(index));
+ if (encoding == String::TWO_BYTE_ENCODING) {
+ offset *= kUC16Size;
+ }
+ STATIC_ASSERT(kCharSize == 1);
+ return FieldMemOperand(string, SeqString::kHeaderSize + offset);
+ }
+ ASSERT(!temp.is(string));
+ ASSERT(!temp.is(ToRegister(index)));
+ if (encoding == String::ONE_BYTE_ENCODING) {
+ __ Add(temp, string, Operand(ToRegister(index)));
+ } else {
+ STATIC_ASSERT(kUC16Size == 2);
+ __ Add(temp, string, Operand(ToRegister(index), LSL, 1));
+ }
+ return FieldMemOperand(temp, SeqString::kHeaderSize);
+}
+
+
+void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) {
+ String::Encoding encoding = instr->hydrogen()->encoding();
+ Register string = ToRegister(instr->string());
+ Register result = ToRegister(instr->result());
+ Register temp = ToRegister(instr->temp());
+
+ if (FLAG_debug_code) {
+ __ Ldr(temp, FieldMemOperand(string, HeapObject::kMapOffset));
+ __ Ldrb(temp, FieldMemOperand(temp, Map::kInstanceTypeOffset));
+
+ __ And(temp, temp,
+ Operand(kStringRepresentationMask | kStringEncodingMask));
+ static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
+ static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
+ __ Cmp(temp, Operand(encoding == String::ONE_BYTE_ENCODING
+ ? one_byte_seq_type : two_byte_seq_type));
+ __ Check(eq, kUnexpectedStringType);
+ }
+
+ MemOperand operand =
+ BuildSeqStringOperand(string, temp, instr->index(), encoding);
+ if (encoding == String::ONE_BYTE_ENCODING) {
+ __ Ldrb(result, operand);
+ } else {
+ __ Ldrh(result, operand);
+ }
+}
+
+
+void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
+ String::Encoding encoding = instr->hydrogen()->encoding();
+ Register string = ToRegister(instr->string());
+ Register value = ToRegister(instr->value());
+ Register temp = ToRegister(instr->temp());
+
+ if (FLAG_debug_code) {
+ ASSERT(ToRegister(instr->context()).is(cp));
+ Register index = ToRegister(instr->index());
+ static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
+ static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
+ int encoding_mask =
+ instr->hydrogen()->encoding() == String::ONE_BYTE_ENCODING
+ ? one_byte_seq_type : two_byte_seq_type;
+ __ EmitSeqStringSetCharCheck(string, index, kIndexIsInteger32, temp,
+ encoding_mask);
+ }
+ MemOperand operand =
+ BuildSeqStringOperand(string, temp, instr->index(), encoding);
+ if (encoding == String::ONE_BYTE_ENCODING) {
+ __ Strb(value, operand);
+ } else {
+ __ Strh(value, operand);
+ }
+}
+
+
+void LCodeGen::DoSmiTag(LSmiTag* instr) {
+ ASSERT(!instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow));
+ __ SmiTag(ToRegister(instr->result()), ToRegister(instr->value()));
+}
+
+
+void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
+ Register input = ToRegister(instr->value());
+ Register result = ToRegister(instr->result());
+ Label done, untag;
+
+ if (instr->needs_check()) {
+ DeoptimizeIfNotSmi(input, instr->environment());
+ }
+
+ __ Bind(&untag);
+ __ SmiUntag(result, input);
+ __ Bind(&done);
+}
+
+
+void LCodeGen::DoShiftI(LShiftI* instr) {
+ LOperand* right_op = instr->right();
+ Register left = ToRegister32(instr->left());
+ Register result = ToRegister32(instr->result());
+
+ if (right_op->IsRegister()) {
+ Register right = ToRegister32(instr->right());
+ switch (instr->op()) {
+ case Token::ROR: __ Ror(result, left, right); break;
+ case Token::SAR: __ Asr(result, left, right); break;
+ case Token::SHL: __ Lsl(result, left, right); break;
+ case Token::SHR:
+ if (instr->can_deopt()) {
+ Label right_not_zero;
+ __ Cbnz(right, &right_not_zero);
+ DeoptimizeIfNegative(left, instr->environment());
+ __ Bind(&right_not_zero);
+ }
+ __ Lsr(result, left, right);
+ break;
+ default: UNREACHABLE();
+ }
+ } else {
+ ASSERT(right_op->IsConstantOperand());
+ int shift_count = ToInteger32(LConstantOperand::cast(right_op)) & 0x1f;
+ if (shift_count == 0) {
+ if ((instr->op() == Token::SHR) && instr->can_deopt()) {
+ DeoptimizeIfNegative(left, instr->environment());
+ }
+ __ Mov(result, left, kDiscardForSameWReg);
+ } else {
+ switch (instr->op()) {
+ case Token::ROR: __ Ror(result, left, shift_count); break;
+ case Token::SAR: __ Asr(result, left, shift_count); break;
+ case Token::SHL: __ Lsl(result, left, shift_count); break;
+ case Token::SHR: __ Lsr(result, left, shift_count); break;
+ default: UNREACHABLE();
+ }
+ }
+ }
+}
+
+
+void LCodeGen::DoShiftS(LShiftS* instr) {
+ LOperand* right_op = instr->right();
+ Register left = ToRegister(instr->left());
+ Register result = ToRegister(instr->result());
+
+ // Only ROR by register needs a temp.
+ ASSERT(((instr->op() == Token::ROR) && right_op->IsRegister()) ||
+ (instr->temp() == NULL));
+
+ if (right_op->IsRegister()) {
+ Register right = ToRegister(instr->right());
+ switch (instr->op()) {
+ case Token::ROR: {
+ Register temp = ToRegister(instr->temp());
+ __ Ubfx(temp, right, kSmiShift, 5);
+ __ SmiUntag(result, left);
+ __ Ror(result.W(), result.W(), temp.W());
+ __ SmiTag(result);
+ break;
+ }
+ case Token::SAR:
+ __ Ubfx(result, right, kSmiShift, 5);
+ __ Asr(result, left, result);
+ __ Bic(result, result, kSmiShiftMask);
+ break;
+ case Token::SHL:
+ __ Ubfx(result, right, kSmiShift, 5);
+ __ Lsl(result, left, result);
+ break;
+ case Token::SHR:
+ if (instr->can_deopt()) {
+ Label right_not_zero;
+ __ Cbnz(right, &right_not_zero);
+ DeoptimizeIfNegative(left, instr->environment());
+ __ Bind(&right_not_zero);
+ }
+ __ Ubfx(result, right, kSmiShift, 5);
+ __ Lsr(result, left, result);
+ __ Bic(result, result, kSmiShiftMask);
+ break;
+ default: UNREACHABLE();
+ }
+ } else {
+ ASSERT(right_op->IsConstantOperand());
+ int shift_count = ToInteger32(LConstantOperand::cast(right_op)) & 0x1f;
+ if (shift_count == 0) {
+ if ((instr->op() == Token::SHR) && instr->can_deopt()) {
+ DeoptimizeIfNegative(left, instr->environment());
+ }
+ __ Mov(result, left);
+ } else {
+ switch (instr->op()) {
+ case Token::ROR:
+ __ SmiUntag(result, left);
+ __ Ror(result.W(), result.W(), shift_count);
+ __ SmiTag(result);
+ break;
+ case Token::SAR:
+ __ Asr(result, left, shift_count);
+ __ Bic(result, result, kSmiShiftMask);
+ break;
+ case Token::SHL:
+ __ Lsl(result, left, shift_count);
+ break;
+ case Token::SHR:
+ __ Lsr(result, left, shift_count);
+ __ Bic(result, result, kSmiShiftMask);
+ break;
+ default: UNREACHABLE();
+ }
+ }
+ }
+}
+
+
+void LCodeGen::DoDebugBreak(LDebugBreak* instr) {
+ __ Debug("LDebugBreak", 0, BREAK);
+}
+
+
+void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
+ Register scratch1 = x5;
+ Register scratch2 = x6;
+ ASSERT(instr->IsMarkedAsCall());
+
+ ASM_UNIMPLEMENTED_BREAK("DoDeclareGlobals");
+ // TODO(all): if Mov could handle object in new space then it could be used
+ // here.
+ __ LoadHeapObject(scratch1, instr->hydrogen()->pairs());
+ __ Mov(scratch2, Operand(Smi::FromInt(instr->hydrogen()->flags())));
+ __ Push(cp, scratch1, scratch2); // The context is the first argument.
+ CallRuntime(Runtime::kDeclareGlobals, 3, instr);
+}
+
+
+void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
+ PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+ LoadContextFromDeferred(instr->context());
+ __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
+ RecordSafepointWithLazyDeopt(
+ instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
+ ASSERT(instr->HasEnvironment());
+ LEnvironment* env = instr->environment();
+ safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
+}
+
+
+void LCodeGen::DoStackCheck(LStackCheck* instr) {
+ class DeferredStackCheck: public LDeferredCode {
+ public:
+ DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
+ : LDeferredCode(codegen), instr_(instr) { }
+ virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); }
+ virtual LInstruction* instr() { return instr_; }
+ private:
+ LStackCheck* instr_;
+ };
+
+ ASSERT(instr->HasEnvironment());
+ LEnvironment* env = instr->environment();
+ // There is no LLazyBailout instruction for stack-checks. We have to
+ // prepare for lazy deoptimization explicitly here.
+ if (instr->hydrogen()->is_function_entry()) {
+ // Perform stack overflow check.
+ Label done;
+ __ CompareRoot(masm()->StackPointer(), Heap::kStackLimitRootIndex);
+ __ B(hs, &done);
+
+ PredictableCodeSizeScope predictable(masm_,
+ Assembler::kCallSizeWithRelocation);
+ ASSERT(instr->context()->IsRegister());
+ ASSERT(ToRegister(instr->context()).is(cp));
+ CallCode(isolate()->builtins()->StackCheck(),
+ RelocInfo::CODE_TARGET,
+ instr);
+ EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
+
+ __ Bind(&done);
+ RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
+ safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
+ } else {
+ ASSERT(instr->hydrogen()->is_backwards_branch());
+ // Perform stack overflow check if this goto needs it before jumping.
+ DeferredStackCheck* deferred_stack_check =
+ new(zone()) DeferredStackCheck(this, instr);
+ __ CompareRoot(masm()->StackPointer(), Heap::kStackLimitRootIndex);
+ __ B(lo, deferred_stack_check->entry());
+
+ EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
+ __ Bind(instr->done_label());
+ deferred_stack_check->SetExit(instr->done_label());
+ RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
+ // Don't record a deoptimization index for the safepoint here.
+ // This will be done explicitly when emitting call and the safepoint in
+ // the deferred code.
+ }
+}
+
+
+void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
+ Register function = ToRegister(instr->function());
+ Register code_object = ToRegister(instr->code_object());
+ Register temp = ToRegister(instr->temp());
+ __ Add(temp, code_object, Code::kHeaderSize - kHeapObjectTag);
+ __ Str(temp, FieldMemOperand(function, JSFunction::kCodeEntryOffset));
+}
+
+
+void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
+ Register context = ToRegister(instr->context());
+ Register value = ToRegister(instr->value());
+ Register scratch = ToRegister(instr->temp());
+ MemOperand target = ContextMemOperand(context, instr->slot_index());
+
+ Label skip_assignment;
+
+ if (instr->hydrogen()->RequiresHoleCheck()) {
+ __ Ldr(scratch, target);
+ if (instr->hydrogen()->DeoptimizesOnHole()) {
+ DeoptimizeIfRoot(scratch, Heap::kTheHoleValueRootIndex,
+ instr->environment());
+ } else {
+ __ JumpIfNotRoot(scratch, Heap::kTheHoleValueRootIndex, &skip_assignment);
+ }
+ }
+
+ __ Str(value, target);
+ if (instr->hydrogen()->NeedsWriteBarrier()) {
+ SmiCheck check_needed =
+ instr->hydrogen()->value()->IsHeapObject()
+ ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
+ __ RecordWriteContextSlot(context,
+ target.offset(),
+ value,
+ scratch,
+ GetLinkRegisterState(),
+ kSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ check_needed);
+ }
+ __ Bind(&skip_assignment);
+}
+
+
+void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
+ Register value = ToRegister(instr->value());
+ Register cell = ToRegister(instr->temp1());
+
+ // Load the cell.
+ __ Mov(cell, Operand(instr->hydrogen()->cell().handle()));
+
+ // If the cell we are storing to contains the hole it could have
+ // been deleted from the property dictionary. In that case, we need
+ // to update the property details in the property dictionary to mark
+ // it as no longer deleted. We deoptimize in that case.
+ if (instr->hydrogen()->RequiresHoleCheck()) {
+ Register payload = ToRegister(instr->temp2());
+ __ Ldr(payload, FieldMemOperand(cell, Cell::kValueOffset));
+ DeoptimizeIfRoot(
+ payload, Heap::kTheHoleValueRootIndex, instr->environment());
+ }
+
+ // Store the value.
+ __ Str(value, FieldMemOperand(cell, Cell::kValueOffset));
+ // Cells are always rescanned, so no write barrier here.
+}
+
+
+void LCodeGen::DoStoreKeyedExternal(LStoreKeyedExternal* instr) {
+ Register ext_ptr = ToRegister(instr->elements());
+ Register key = no_reg;
+ Register scratch;
+ ElementsKind elements_kind = instr->elements_kind();
+
+ bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi();
+ bool key_is_constant = instr->key()->IsConstantOperand();
+ int constant_key = 0;
+ if (key_is_constant) {
+ ASSERT(instr->temp() == NULL);
+ constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
+ if (constant_key & 0xf0000000) {
+ Abort(kArrayIndexConstantValueTooBig);
+ }
+ } else {
+ key = ToRegister(instr->key());
+ scratch = ToRegister(instr->temp());
+ }
+
+ MemOperand dst =
+ PrepareKeyedExternalArrayOperand(key, ext_ptr, scratch, key_is_smi,
+ key_is_constant, constant_key,
+ elements_kind,
+ instr->additional_index());
+
+ if ((elements_kind == EXTERNAL_FLOAT32_ELEMENTS) ||
+ (elements_kind == FLOAT32_ELEMENTS)) {
+ DoubleRegister value = ToDoubleRegister(instr->value());
+ DoubleRegister dbl_scratch = double_scratch();
+ __ Fcvt(dbl_scratch.S(), value);
+ __ Str(dbl_scratch.S(), dst);
+ } else if ((elements_kind == EXTERNAL_FLOAT64_ELEMENTS) ||
+ (elements_kind == FLOAT64_ELEMENTS)) {
+ DoubleRegister value = ToDoubleRegister(instr->value());
+ __ Str(value, dst);
+ } else {
+ Register value = ToRegister(instr->value());
+
+ switch (elements_kind) {
+ case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
+ case EXTERNAL_INT8_ELEMENTS:
+ case EXTERNAL_UINT8_ELEMENTS:
+ case UINT8_ELEMENTS:
+ case UINT8_CLAMPED_ELEMENTS:
+ case INT8_ELEMENTS:
+ __ Strb(value, dst);
+ break;
+ case EXTERNAL_INT16_ELEMENTS:
+ case EXTERNAL_UINT16_ELEMENTS:
+ case INT16_ELEMENTS:
+ case UINT16_ELEMENTS:
+ __ Strh(value, dst);
+ break;
+ case EXTERNAL_INT32_ELEMENTS:
+ case EXTERNAL_UINT32_ELEMENTS:
+ case INT32_ELEMENTS:
+ case UINT32_ELEMENTS:
+ __ Str(value.W(), dst);
+ break;
+ case FLOAT32_ELEMENTS:
+ case FLOAT64_ELEMENTS:
+ case EXTERNAL_FLOAT32_ELEMENTS:
+ case EXTERNAL_FLOAT64_ELEMENTS:
+ case FAST_DOUBLE_ELEMENTS:
+ case FAST_ELEMENTS:
+ case FAST_SMI_ELEMENTS:
+ case FAST_HOLEY_DOUBLE_ELEMENTS:
+ case FAST_HOLEY_ELEMENTS:
+ case FAST_HOLEY_SMI_ELEMENTS:
+ case DICTIONARY_ELEMENTS:
+ case NON_STRICT_ARGUMENTS_ELEMENTS:
+ UNREACHABLE();
+ break;
+ }
+ }
+}
+
+
+void LCodeGen::DoStoreKeyedFixedDouble(LStoreKeyedFixedDouble* instr) {
+ Register elements = ToRegister(instr->elements());
+ DoubleRegister value = ToDoubleRegister(instr->value());
+ Register store_base = ToRegister(instr->temp());
+ int offset = 0;
+
+ if (instr->key()->IsConstantOperand()) {
+ int constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
+ if (constant_key & 0xf0000000) {
+ Abort(kArrayIndexConstantValueTooBig);
+ }
+ offset = FixedDoubleArray::OffsetOfElementAt(constant_key +
+ instr->additional_index());
+ store_base = elements;
+ } else {
+ Register key = ToRegister(instr->key());
+ bool key_is_tagged = instr->hydrogen()->key()->representation().IsSmi();
+ CalcKeyedArrayBaseRegister(store_base, elements, key, key_is_tagged,
+ instr->hydrogen()->elements_kind());
+ offset = FixedDoubleArray::OffsetOfElementAt(instr->additional_index());
+ }
+
+ if (instr->NeedsCanonicalization()) {
+ DoubleRegister dbl_scratch = double_scratch();
+ __ Fmov(dbl_scratch,
+ FixedDoubleArray::canonical_not_the_hole_nan_as_double());
+ __ Fmaxnm(dbl_scratch, dbl_scratch, value);
+ __ Str(dbl_scratch, FieldMemOperand(store_base, offset));
+ } else {
+ __ Str(value, FieldMemOperand(store_base, offset));
+ }
+}
+
+
+void LCodeGen::DoStoreKeyedFixed(LStoreKeyedFixed* instr) {
+ Register value = ToRegister(instr->value());
+ Register elements = ToRegister(instr->elements());
+ Register store_base = ToRegister(instr->temp());
+ Register key = no_reg;
+ int offset = 0;
+
+ if (instr->key()->IsConstantOperand()) {
+ ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
+ LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
+ offset = FixedArray::OffsetOfElementAt(ToInteger32(const_operand) +
+ instr->additional_index());
+ store_base = elements;
+ } else {
+ key = ToRegister(instr->key());
+ bool key_is_tagged = instr->hydrogen()->key()->representation().IsSmi();
+ CalcKeyedArrayBaseRegister(store_base, elements, key, key_is_tagged,
+ instr->hydrogen()->elements_kind());
+ offset = FixedArray::OffsetOfElementAt(instr->additional_index());
+ }
+ Representation representation = instr->hydrogen()->value()->representation();
+ if (representation.IsInteger32()) {
+ ASSERT(instr->hydrogen()->store_mode() == STORE_TO_INITIALIZED_ENTRY);
+ ASSERT(instr->hydrogen()->elements_kind() == FAST_SMI_ELEMENTS);
+ STATIC_ASSERT(kSmiValueSize == 32 && kSmiShift == 32 && kSmiTag == 0);
+ __ Store(value, UntagSmiFieldMemOperand(store_base, offset),
+ Representation::Integer32());
+ } else {
+ __ Store(value, FieldMemOperand(store_base, offset), representation);
+ }
+
+ if (instr->hydrogen()->NeedsWriteBarrier()) {
+ SmiCheck check_needed =
+ instr->hydrogen()->value()->IsHeapObject()
+ ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
+ // Compute address of modified element and store it into key register.
+ __ Add(key, store_base, offset - kHeapObjectTag);
+ __ RecordWrite(elements, key, value, GetLinkRegisterState(), kSaveFPRegs,
+ EMIT_REMEMBERED_SET, check_needed);
+ }
+}
+
+
+void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
+ ASSERT(ToRegister(instr->object()).Is(x2));
+ ASSERT(ToRegister(instr->key()).Is(x1));
+ ASSERT(ToRegister(instr->value()).Is(x0));
+
+ Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
+ ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
+ : isolate()->builtins()->KeyedStoreIC_Initialize();
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
+}
+
+
+// TODO(jbramley): Once the merge is done and we're tracking bleeding_edge, try
+// to tidy up this function.
+void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
+ Representation representation = instr->representation();
+
+ Register object = ToRegister(instr->object());
+ Register temp0 = ToRegister(instr->temp0());
+ Register temp1 = ToRegister(instr->temp1());
+ HObjectAccess access = instr->hydrogen()->access();
+ int offset = access.offset();
+
+ if (access.IsExternalMemory()) {
+ Register value = ToRegister(instr->value());
+ __ Store(value, MemOperand(object, offset), representation);
+ return;
+ }
+
+ Handle<Map> transition = instr->transition();
+
+ if (FLAG_track_heap_object_fields && representation.IsHeapObject()) {
+ Register value = ToRegister(instr->value());
+ if (!instr->hydrogen()->value()->type().IsHeapObject()) {
+ DeoptimizeIfSmi(value, instr->environment());
+ }
+ } else if (representation.IsDouble()) {
+ ASSERT(transition.is_null());
+ ASSERT(access.IsInobject());
+ ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
+ FPRegister value = ToDoubleRegister(instr->value());
+ __ Str(value, FieldMemOperand(object, offset));
+ return;
+ }
+
+ if (!transition.is_null()) {
+ // Store the new map value.
+ Register new_map_value = temp0;
+ __ Mov(new_map_value, Operand(transition));
+ __ Str(new_map_value, FieldMemOperand(object, HeapObject::kMapOffset));
+ if (instr->hydrogen()->NeedsWriteBarrierForMap()) {
+ // Update the write barrier for the map field.
+ __ RecordWriteField(object,
+ HeapObject::kMapOffset,
+ new_map_value,
+ temp1,
+ GetLinkRegisterState(),
+ kSaveFPRegs,
+ OMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+ }
+ }
+
+ // Do the store.
+ Register value = ToRegister(instr->value());
+ Register destination;
+ SmiCheck check_needed =
+ instr->hydrogen()->value()->IsHeapObject()
+ ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
+ if (access.IsInobject()) {
+ destination = object;
+ } else {
+ __ Ldr(temp0, FieldMemOperand(object, JSObject::kPropertiesOffset));
+ destination = temp0;
+ }
+
+ if (representation.IsSmi() &&
+ instr->hydrogen()->value()->representation().IsInteger32()) {
+ ASSERT(instr->hydrogen()->store_mode() == STORE_TO_INITIALIZED_ENTRY);
+ STATIC_ASSERT(kSmiValueSize == 32 && kSmiShift == 32 && kSmiTag == 0);
+ __ Store(value, UntagSmiFieldMemOperand(destination, offset),
+ Representation::Integer32());
+ } else {
+ __ Store(value, FieldMemOperand(destination, offset), representation);
+ }
+ if (instr->hydrogen()->NeedsWriteBarrier()) {
+ __ RecordWriteField(destination,
+ offset,
+ value, // Clobbered.
+ temp1, // Clobbered.
+ GetLinkRegisterState(),
+ kSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ check_needed);
+ }
+}
+
+
+void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
+ ASSERT(ToRegister(instr->value()).is(x0));
+ ASSERT(ToRegister(instr->object()).is(x1));
+
+ // Name must be in x2.
+ __ Mov(x2, Operand(instr->name()));
+ Handle<Code> ic = StoreIC::initialize_stub(isolate(),
+ instr->strict_mode_flag());
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoStringAdd(LStringAdd* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
+ ASSERT(ToRegister(instr->left()).Is(x1));
+ ASSERT(ToRegister(instr->right()).Is(x0));
+ StringAddStub stub(instr->hydrogen()->flags(),
+ instr->hydrogen()->pretenure_flag());
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
+ class DeferredStringCharCodeAt: public LDeferredCode {
+ public:
+ DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
+ : LDeferredCode(codegen), instr_(instr) { }
+ virtual void Generate() { codegen()->DoDeferredStringCharCodeAt(instr_); }
+ virtual LInstruction* instr() { return instr_; }
+ private:
+ LStringCharCodeAt* instr_;
+ };
+
+ DeferredStringCharCodeAt* deferred =
+ new(zone()) DeferredStringCharCodeAt(this, instr);
+
+ StringCharLoadGenerator::Generate(masm(),
+ ToRegister(instr->string()),
+ ToRegister(instr->index()),
+ ToRegister(instr->result()),
+ deferred->entry());
+ __ Bind(deferred->exit());
+}
+
+
+void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
+ Register string = ToRegister(instr->string());
+ Register result = ToRegister(instr->result());
+
+ // TODO(3095996): Get rid of this. For now, we need to make the
+ // result register contain a valid pointer because it is already
+ // contained in the register pointer map.
+ __ Mov(result, 0);
+
+ PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+ __ Push(string);
+ // Push the index as a smi. This is safe because of the checks in
+ // DoStringCharCodeAt above.
+ Register index = ToRegister(instr->index());
+ __ SmiTag(index);
+ __ Push(index);
+
+ CallRuntimeFromDeferred(Runtime::kStringCharCodeAt, 2, instr,
+ instr->context());
+ __ AssertSmi(x0);
+ __ SmiUntag(x0);
+ __ StoreToSafepointRegisterSlot(x0, result);
+}
+
+
+void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
+ class DeferredStringCharFromCode: public LDeferredCode {
+ public:
+ DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
+ : LDeferredCode(codegen), instr_(instr) { }
+ virtual void Generate() { codegen()->DoDeferredStringCharFromCode(instr_); }
+ virtual LInstruction* instr() { return instr_; }
+ private:
+ LStringCharFromCode* instr_;
+ };
+
+ DeferredStringCharFromCode* deferred =
+ new(zone()) DeferredStringCharFromCode(this, instr);
+
+ ASSERT(instr->hydrogen()->value()->representation().IsInteger32());
+ Register char_code = ToRegister(instr->char_code());
+ Register result = ToRegister(instr->result());
+
+ __ Cmp(char_code, Operand(String::kMaxOneByteCharCode));
+ __ B(hi, deferred->entry());
+ __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
+ __ Add(result, result, Operand(char_code, LSL, kPointerSizeLog2));
+ __ Ldr(result, FieldMemOperand(result, FixedArray::kHeaderSize));
+ __ CompareRoot(result, Heap::kUndefinedValueRootIndex);
+ __ B(eq, deferred->entry());
+ __ Bind(deferred->exit());
+}
+
+
+void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
+ Register char_code = ToRegister(instr->char_code());
+ Register result = ToRegister(instr->result());
+
+ // TODO(3095996): Get rid of this. For now, we need to make the
+ // result register contain a valid pointer because it is already
+ // contained in the register pointer map.
+ __ Mov(result, 0);
+
+ PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+ __ SmiTag(char_code);
+ __ Push(char_code);
+ CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr, instr->context());
+ __ StoreToSafepointRegisterSlot(x0, result);
+}
+
+
+void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
+ Token::Value op = instr->op();
+
+ Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
+ InlineSmiCheckInfo::EmitNotInlined(masm());
+
+ Condition condition = TokenToCondition(op, false);
+
+ EmitCompareAndBranch(instr, condition, x0, 0);
+}
+
+
+void LCodeGen::DoSubI(LSubI* instr) {
+ bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
+ Register result = ToRegister32(instr->result());
+ Register left = ToRegister32(instr->left());
+ Operand right = ToOperand32I(instr->right());
+ if (can_overflow) {
+ __ Subs(result, left, right);
+ DeoptimizeIf(vs, instr->environment());
+ } else {
+ __ Sub(result, left, right);
+ }
+}
+
+
+void LCodeGen::DoSubS(LSubS* instr) {
+ bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
+ Register result = ToRegister(instr->result());
+ Register left = ToRegister(instr->left());
+ Operand right = ToOperand(instr->right());
+ if (can_overflow) {
+ __ Subs(result, left, right);
+ DeoptimizeIf(vs, instr->environment());
+ } else {
+ __ Sub(result, left, right);
+ }
+}
+
+
+void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr,
+ LOperand* value,
+ LOperand* temp1,
+ LOperand* temp2) {
+ Register input = ToRegister(value);
+ Register scratch1 = ToRegister(temp1);
+ DoubleRegister dbl_scratch1 = double_scratch();
+
+ Label done;
+
+ // Load heap object map.
+ __ Ldr(scratch1, FieldMemOperand(input, HeapObject::kMapOffset));
+
+ if (instr->truncating()) {
+ Register output = ToRegister(instr->result());
+ Register scratch2 = ToRegister(temp2);
+ Label check_bools;
+
+ // If it's not a heap number, jump to undefined check.
+ __ JumpIfNotRoot(scratch1, Heap::kHeapNumberMapRootIndex, &check_bools);
+
+ // A heap number: load value and convert to int32 using truncating function.
+ __ Ldr(dbl_scratch1, FieldMemOperand(input, HeapNumber::kValueOffset));
+ __ ECMA262ToInt32(output, dbl_scratch1, scratch1, scratch2);
+ __ B(&done);
+
+ __ Bind(&check_bools);
+
+ Register true_root = output;
+ Register false_root = scratch2;
+ __ LoadTrueFalseRoots(true_root, false_root);
+ __ Cmp(scratch1, true_root);
+ __ Cset(output, eq);
+ __ Ccmp(scratch1, false_root, ZFlag, ne);
+ __ B(eq, &done);
+
+ // Output contains zero, undefined is converted to zero for truncating
+ // conversions.
+ DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex,
+ instr->environment());
+ } else {
+ Register output = ToRegister32(instr->result());
+
+ DoubleRegister dbl_scratch2 = ToDoubleRegister(temp2);
+ Label converted;
+
+ // Deoptimized if it's not a heap number.
+ DeoptimizeIfNotRoot(scratch1, Heap::kHeapNumberMapRootIndex,
+ instr->environment());
+
+ // A heap number: load value and convert to int32 using non-truncating
+ // function. If the result is out of range, branch to deoptimize.
+ __ Ldr(dbl_scratch1, FieldMemOperand(input, HeapNumber::kValueOffset));
+ __ TryConvertDoubleToInt32(output, dbl_scratch1, dbl_scratch2, &converted);
+ Deoptimize(instr->environment());
+
+ __ Bind(&converted);
+
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ __ Cmp(output, 0);
+ __ B(ne, &done);
+ __ Fmov(scratch1, dbl_scratch1);
+ DeoptimizeIfNegative(scratch1, instr->environment());
+ }
+ }
+ __ Bind(&done);
+}
+
+
+void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
+ class DeferredTaggedToI: public LDeferredCode {
+ public:
+ DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
+ : LDeferredCode(codegen), instr_(instr) { }
+ virtual void Generate() {
+ codegen()->DoDeferredTaggedToI(instr_, instr_->value(), instr_->temp1(),
+ instr_->temp2());
+ }
+
+ virtual LInstruction* instr() { return instr_; }
+ private:
+ LTaggedToI* instr_;
+ };
+
+ Register input = ToRegister(instr->value());
+ Register output = ToRegister(instr->result());
+
+ if (instr->hydrogen()->value()->representation().IsSmi()) {
+ __ SmiUntag(input);
+ } else {
+ DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr);
+
+ // TODO(jbramley): We can't use JumpIfNotSmi here because the tbz it uses
+ // doesn't always have enough range. Consider making a variant of it, or a
+ // TestIsSmi helper.
+ STATIC_ASSERT(kSmiTag == 0);
+ __ Tst(input, kSmiTagMask);
+ __ B(ne, deferred->entry());
+
+ __ SmiUntag(output, input);
+ __ Bind(deferred->exit());
+ }
+}
+
+
+void LCodeGen::DoThisFunction(LThisFunction* instr) {
+ Register result = ToRegister(instr->result());
+ __ Ldr(result, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+}
+
+
+void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
+ ASSERT(ToRegister(instr->value()).Is(x0));
+ ASSERT(ToRegister(instr->result()).Is(x0));
+ __ Push(x0);
+ CallRuntime(Runtime::kToFastProperties, 1, instr);
+}
+
+
+void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
+ Label materialized;
+ // Registers will be used as follows:
+ // x7 = literals array.
+ // x1 = regexp literal.
+ // x0 = regexp literal clone.
+ // x10-x12 are used as temporaries.
+ int literal_offset =
+ FixedArray::OffsetOfElementAt(instr->hydrogen()->literal_index());
+ __ LoadObject(x7, instr->hydrogen()->literals());
+ __ Ldr(x1, FieldMemOperand(x7, literal_offset));
+ __ JumpIfNotRoot(x1, Heap::kUndefinedValueRootIndex, &materialized);
+
+ // Create regexp literal using runtime function
+ // Result will be in x0.
+ __ Mov(x12, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
+ __ Mov(x11, Operand(instr->hydrogen()->pattern()));
+ __ Mov(x10, Operand(instr->hydrogen()->flags()));
+ __ Push(x7, x12, x11, x10);
+ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
+ __ Mov(x1, x0);
+
+ __ Bind(&materialized);
+ int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
+ Label allocated, runtime_allocate;
+
+ __ Allocate(size, x0, x10, x11, &runtime_allocate, TAG_OBJECT);
+ __ B(&allocated);
+
+ __ Bind(&runtime_allocate);
+ __ Mov(x0, Operand(Smi::FromInt(size)));
+ __ Push(x1, x0);
+ CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
+ __ Pop(x1);
+
+ __ Bind(&allocated);
+ // Copy the content into the newly allocated memory.
+ __ CopyFields(x0, x1, CPURegList(x10, x11, x12), size / kPointerSize);
+}
+
+
+void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
+ Register object = ToRegister(instr->object());
+ Register temp1 = ToRegister(instr->temp1());
+
+ Handle<Map> from_map = instr->original_map();
+ Handle<Map> to_map = instr->transitioned_map();
+ ElementsKind from_kind = instr->from_kind();
+ ElementsKind to_kind = instr->to_kind();
+
+ Label not_applicable;
+ __ CheckMap(object, temp1, from_map, ¬_applicable, DONT_DO_SMI_CHECK);
+
+ if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
+ Register new_map = ToRegister(instr->temp2());
+ __ Mov(new_map, Operand(to_map));
+ __ Str(new_map, FieldMemOperand(object, HeapObject::kMapOffset));
+ // Write barrier.
+ __ RecordWriteField(object, HeapObject::kMapOffset, new_map, temp1,
+ GetLinkRegisterState(), kDontSaveFPRegs);
+ } else {
+ ASSERT(ToRegister(instr->context()).is(cp));
+ PushSafepointRegistersScope scope(
+ this, Safepoint::kWithRegistersAndDoubles);
+ __ Mov(x0, object);
+ __ Mov(x1, Operand(to_map));
+ TransitionElementsKindStub stub(from_kind, to_kind);
+ __ CallStub(&stub);
+ RecordSafepointWithRegistersAndDoubles(
+ instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
+ }
+ __ Bind(¬_applicable);
+}
+
+
+void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
+ Register object = ToRegister(instr->object());
+ Register temp1 = ToRegister(instr->temp1());
+ Register temp2 = ToRegister(instr->temp2());
+
+ Label no_memento_found;
+ __ JumpIfJSArrayHasAllocationMemento(object, temp1, temp2, &no_memento_found);
+ Deoptimize(instr->environment());
+ __ Bind(&no_memento_found);
+}
+
+
+void LCodeGen::DoTruncateDoubleToIntOrSmi(LTruncateDoubleToIntOrSmi* instr) {
+ DoubleRegister input = ToDoubleRegister(instr->value());
+ Register result = ToRegister(instr->result());
+ __ ECMA262ToInt32(result, input,
+ ToRegister(instr->temp1()),
+ ToRegister(instr->temp2()),
+ instr->tag_result()
+ ? MacroAssembler::SMI
+ : MacroAssembler::INT32_IN_W);
+}
+
+
+void LCodeGen::DoTypeof(LTypeof* instr) {
+ Register input = ToRegister(instr->value());
+ __ Push(input);
+ CallRuntime(Runtime::kTypeof, 1, instr);
+}
+
+
+void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
+ Handle<String> type_name = instr->type_literal();
+ Label* true_label = instr->TrueLabel(chunk_);
+ Label* false_label = instr->FalseLabel(chunk_);
+ Register value = ToRegister(instr->value());
+
+ if (type_name->Equals(heap()->number_string())) {
+ ASSERT(instr->temp1() != NULL);
+ Register map = ToRegister(instr->temp1());
+
+ __ JumpIfSmi(value, true_label);
+ __ Ldr(map, FieldMemOperand(value, HeapObject::kMapOffset));
+ __ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
+ EmitBranch(instr, eq);
+
+ } else if (type_name->Equals(heap()->string_string())) {
+ ASSERT((instr->temp1() != NULL) && (instr->temp2() != NULL));
+ Register map = ToRegister(instr->temp1());
+ Register scratch = ToRegister(instr->temp2());
+
+ __ JumpIfSmi(value, false_label);
+ __ JumpIfObjectType(
+ value, map, scratch, FIRST_NONSTRING_TYPE, false_label, ge);
+ __ Ldrb(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
+ EmitTestAndBranch(instr, eq, scratch, 1 << Map::kIsUndetectable);
+
+ } else if (type_name->Equals(heap()->symbol_string())) {
+ ASSERT((instr->temp1() != NULL) && (instr->temp2() != NULL));
+ Register map = ToRegister(instr->temp1());
+ Register scratch = ToRegister(instr->temp2());
+
+ __ JumpIfSmi(value, false_label);
+ __ CompareObjectType(value, map, scratch, SYMBOL_TYPE);
+ EmitBranch(instr, eq);
+
+ } else if (type_name->Equals(heap()->boolean_string())) {
+ __ JumpIfRoot(value, Heap::kTrueValueRootIndex, true_label);
+ __ CompareRoot(value, Heap::kFalseValueRootIndex);
+ EmitBranch(instr, eq);
+
+ } else if (FLAG_harmony_typeof && type_name->Equals(heap()->null_string())) {
+ __ CompareRoot(value, Heap::kNullValueRootIndex);
+ EmitBranch(instr, eq);
+
+ } else if (type_name->Equals(heap()->undefined_string())) {
+ ASSERT(instr->temp1() != NULL);
+ Register scratch = ToRegister(instr->temp1());
+
+ __ JumpIfRoot(value, Heap::kUndefinedValueRootIndex, true_label);
+ __ JumpIfSmi(value, false_label);
+ // Check for undetectable objects and jump to the true branch in this case.
+ __ Ldr(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
+ __ Ldrb(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
+ EmitTestAndBranch(instr, ne, scratch, 1 << Map::kIsUndetectable);
+
+ } else if (type_name->Equals(heap()->function_string())) {
+ STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
+ ASSERT(instr->temp1() != NULL);
+ Register type = ToRegister(instr->temp1());
+
+ __ JumpIfSmi(value, false_label);
+ __ JumpIfObjectType(value, type, type, JS_FUNCTION_TYPE, true_label);
+ // HeapObject's type has been loaded into type register by JumpIfObjectType.
+ EmitCompareAndBranch(instr, eq, type, JS_FUNCTION_PROXY_TYPE);
+
+ } else if (type_name->Equals(heap()->object_string())) {
+ ASSERT((instr->temp1() != NULL) && (instr->temp2() != NULL));
+ Register map = ToRegister(instr->temp1());
+ Register scratch = ToRegister(instr->temp2());
+
+ __ JumpIfSmi(value, false_label);
+ if (!FLAG_harmony_typeof) {
+ __ JumpIfRoot(value, Heap::kNullValueRootIndex, true_label);
+ }
+ __ JumpIfObjectType(value, map, scratch,
+ FIRST_NONCALLABLE_SPEC_OBJECT_TYPE, false_label, lt);
+ __ CompareInstanceType(map, scratch, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
+ __ B(gt, false_label);
+ // Check for undetectable objects => false.
+ __ Ldrb(scratch, FieldMemOperand(value, Map::kBitFieldOffset));
+ EmitTestAndBranch(instr, eq, scratch, 1 << Map::kIsUndetectable);
+
+ } else {
+ __ B(false_label);
+ }
+}
+
+
+void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
+ __ Ucvtf(ToDoubleRegister(instr->result()), ToRegister32(instr->value()));
+}
+
+
+void LCodeGen::DoUint32ToSmi(LUint32ToSmi* instr) {
+ Register value = ToRegister(instr->value());
+ Register result = ToRegister(instr->result());
+
+ if (!instr->hydrogen()->value()->HasRange() ||
+ !instr->hydrogen()->value()->range()->IsInSmiRange() ||
+ instr->hydrogen()->value()->range()->upper() == kMaxInt) {
+ // The Range class can't express upper bounds in the (kMaxInt, kMaxUint32]
+ // interval, so we treat kMaxInt as a sentinel for this entire interval.
+ DeoptimizeIfNegative(value.W(), instr->environment());
+ }
+ __ SmiTag(result, value);
+}
+
+
+void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
+ Register object = ToRegister(instr->value());
+ Register map = ToRegister(instr->map());
+ Register temp = ToRegister(instr->temp());
+ __ Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
+ __ Cmp(map, temp);
+ DeoptimizeIf(ne, instr->environment());
+}
+
+
+void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
+ Register receiver = ToRegister(instr->receiver());
+ Register function = ToRegister(instr->function());
+ Register result = ToRegister(instr->result());
+
+ // If the receiver is null or undefined, we have to pass the global object as
+ // a receiver to normal functions. Values have to be passed unchanged to
+ // builtins and strict-mode functions.
+ Label global_object, done, deopt;
+
+ if (!instr->hydrogen()->known_function()) {
+ __ Ldr(result, FieldMemOperand(function,
+ JSFunction::kSharedFunctionInfoOffset));
+
+ // CompilerHints is an int32 field. See objects.h.
+ __ Ldr(result.W(),
+ FieldMemOperand(result, SharedFunctionInfo::kCompilerHintsOffset));
+
+ // Do not transform the receiver to object for strict mode functions.
+ __ Tbnz(result, SharedFunctionInfo::kStrictModeFunction, &done);
+
+ // Do not transform the receiver to object for builtins.
+ __ Tbnz(result, SharedFunctionInfo::kNative, &done);
+ }
+
+ // Normal function. Replace undefined or null with global receiver.
+ __ JumpIfRoot(receiver, Heap::kNullValueRootIndex, &global_object);
+ __ JumpIfRoot(receiver, Heap::kUndefinedValueRootIndex, &global_object);
+
+ // Deoptimize if the receiver is not a JS object.
+ __ JumpIfSmi(receiver, &deopt);
+ __ CompareObjectType(receiver, result, result, FIRST_SPEC_OBJECT_TYPE);
+ __ B(ge, &done);
+ // Otherwise, fall through to deopt.
+
+ __ Bind(&deopt);
+ Deoptimize(instr->environment());
+
+ __ Bind(&global_object);
+ // We could load directly into the result register here, but the additional
+ // branches required are likely to be more time consuming than one additional
+ // move.
+ __ Ldr(receiver, FieldMemOperand(function, JSFunction::kContextOffset));
+ __ Ldr(receiver, ContextMemOperand(receiver, Context::GLOBAL_OBJECT_INDEX));
+ __ Ldr(receiver,
+ FieldMemOperand(receiver, GlobalObject::kGlobalReceiverOffset));
+
+ __ Bind(&done);
+ __ Mov(result, receiver);
+}
+
+
+void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
+ Register object = ToRegister(instr->object());
+ Register index = ToRegister(instr->index());
+ Register result = ToRegister(instr->result());
+
+ __ AssertSmi(index);
+
+ Label out_of_object, done;
+ __ Cmp(index, Operand(Smi::FromInt(0)));
+ __ B(lt, &out_of_object);
+
+ STATIC_ASSERT(kPointerSizeLog2 > kSmiTagSize);
+ __ Add(result, object, Operand::UntagSmiAndScale(index, kPointerSizeLog2));
+ __ Ldr(result, FieldMemOperand(result, JSObject::kHeaderSize));
+
+ __ B(&done);
+
+ __ Bind(&out_of_object);
+ __ Ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
+ // Index is equal to negated out of object property index plus 1.
+ __ Sub(result, result, Operand::UntagSmiAndScale(index, kPointerSizeLog2));
+ __ Ldr(result, FieldMemOperand(result,
+ FixedArray::kHeaderSize - kPointerSize));
+ __ Bind(&done);
+}
+
+} } // namespace v8::internal
--- /dev/null
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_A64_LITHIUM_CODEGEN_A64_H_
+#define V8_A64_LITHIUM_CODEGEN_A64_H_
+
+#include "a64/lithium-a64.h"
+
+#include "a64/lithium-gap-resolver-a64.h"
+#include "deoptimizer.h"
+#include "lithium-codegen.h"
+#include "safepoint-table.h"
+#include "scopes.h"
+#include "v8utils.h"
+
+namespace v8 {
+namespace internal {
+
+// Forward declarations.
+class LDeferredCode;
+class SafepointGenerator;
+class BranchGenerator;
+
+class LCodeGen: public LCodeGenBase {
+ public:
+ LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
+ : LCodeGenBase(chunk, assembler, info),
+ deoptimizations_(4, info->zone()),
+ deopt_jump_table_(4, info->zone()),
+ deoptimization_literals_(8, info->zone()),
+ inlined_function_count_(0),
+ scope_(info->scope()),
+ translations_(info->zone()),
+ deferred_(8, info->zone()),
+ osr_pc_offset_(-1),
+ frame_is_built_(false),
+ safepoints_(info->zone()),
+ resolver_(this),
+ expected_safepoint_kind_(Safepoint::kSimple) {
+ PopulateDeoptimizationLiteralsWithInlinedFunctions();
+ }
+
+ // Simple accessors.
+ Scope* scope() const { return scope_; }
+
+ int LookupDestination(int block_id) const {
+ return chunk()->LookupDestination(block_id);
+ }
+
+ bool IsNextEmittedBlock(int block_id) const {
+ return LookupDestination(block_id) == GetNextEmittedBlock();
+ }
+
+ bool NeedsEagerFrame() const {
+ return GetStackSlotCount() > 0 ||
+ info()->is_non_deferred_calling() ||
+ !info()->IsStub() ||
+ info()->requires_frame();
+ }
+ bool NeedsDeferredFrame() const {
+ return !NeedsEagerFrame() && info()->is_deferred_calling();
+ }
+
+ LinkRegisterStatus GetLinkRegisterState() const {
+ return frame_is_built_ ? kLRHasBeenSaved : kLRHasNotBeenSaved;
+ }
+
+ // Try to generate code for the entire chunk, but it may fail if the
+ // chunk contains constructs we cannot handle. Returns true if the
+ // code generation attempt succeeded.
+ bool GenerateCode();
+
+ // Finish the code by setting stack height, safepoint, and bailout
+ // information on it.
+ void FinishCode(Handle<Code> code);
+
+ // Support for converting LOperands to assembler types.
+ // LOperand must be a register.
+ Register ToRegister(LOperand* op) const;
+ Register ToRegister32(LOperand* op) const;
+ Operand ToOperand(LOperand* op);
+ Operand ToOperand32I(LOperand* op);
+ Operand ToOperand32U(LOperand* op);
+ MemOperand ToMemOperand(LOperand* op) const;
+ Handle<Object> ToHandle(LConstantOperand* op) const;
+
+ // TODO(jbramley): Examine these helpers and check that they make sense.
+ // IsInteger32Constant returns true for smi constants, for example.
+ bool IsInteger32Constant(LConstantOperand* op) const;
+ bool IsSmi(LConstantOperand* op) const;
+
+ int32_t ToInteger32(LConstantOperand* op) const;
+ Smi* ToSmi(LConstantOperand* op) const;
+ double ToDouble(LConstantOperand* op) const;
+ DoubleRegister ToDoubleRegister(LOperand* op) const;
+
+ // Declare methods that deal with the individual node types.
+#define DECLARE_DO(type) void Do##type(L##type* node);
+ LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
+#undef DECLARE_DO
+
+ private:
+ // Return a double scratch register which can be used locally
+ // when generating code for a lithium instruction.
+ DoubleRegister double_scratch() { return crankshaft_fp_scratch; }
+
+ // Deferred code support.
+ void DoDeferredNumberTagD(LNumberTagD* instr);
+ void DoDeferredStackCheck(LStackCheck* instr);
+ void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
+ void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
+ void DoDeferredMathAbsTagged(LMathAbsTagged* instr,
+ Label* exit,
+ Label* allocation_entry);
+
+ enum IntegerSignedness { SIGNED_INT32, UNSIGNED_INT32 };
+ void DoDeferredNumberTagU(LInstruction* instr,
+ LOperand* value,
+ LOperand* temp1,
+ LOperand* temp2);
+ void DoDeferredTaggedToI(LTaggedToI* instr,
+ LOperand* value,
+ LOperand* temp1,
+ LOperand* temp2);
+ void DoDeferredAllocate(LAllocate* instr);
+ void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr);
+ void DoDeferredInstanceMigration(LCheckMaps* instr, Register object);
+
+ Operand ToOperand32(LOperand* op, IntegerSignedness signedness);
+
+ static Condition TokenToCondition(Token::Value op, bool is_unsigned);
+ void EmitGoto(int block);
+ void DoGap(LGap* instr);
+
+ // Generic version of EmitBranch. It contains some code to avoid emitting a
+ // branch on the next emitted basic block where we could just fall-through.
+ // You shouldn't use that directly but rather consider one of the helper like
+ // LCodeGen::EmitBranch, LCodeGen::EmitCompareAndBranch...
+ template<class InstrType>
+ void EmitBranchGeneric(InstrType instr,
+ const BranchGenerator& branch);
+
+ template<class InstrType>
+ void EmitBranch(InstrType instr, Condition condition);
+
+ template<class InstrType>
+ void EmitCompareAndBranch(InstrType instr,
+ Condition condition,
+ const Register& lhs,
+ const Operand& rhs);
+
+ template<class InstrType>
+ void EmitTestAndBranch(InstrType instr,
+ Condition condition,
+ const Register& value,
+ uint64_t mask);
+
+ template<class InstrType>
+ void EmitBranchIfNonZeroNumber(InstrType instr,
+ const FPRegister& value,
+ const FPRegister& scratch);
+
+ template<class InstrType>
+ void EmitBranchIfHeapNumber(InstrType instr,
+ const Register& value);
+
+ template<class InstrType>
+ void EmitBranchIfRoot(InstrType instr,
+ const Register& value,
+ Heap::RootListIndex index);
+
+ // Emits optimized code to deep-copy the contents of statically known object
+ // graphs (e.g. object literal boilerplate). Expects a pointer to the
+ // allocated destination object in the result register, and a pointer to the
+ // source object in the source register.
+ void EmitDeepCopy(Handle<JSObject> object,
+ Register result,
+ Register source,
+ Register scratch,
+ int* offset,
+ AllocationSiteMode mode);
+
+ // Emits optimized code for %_IsString(x). Preserves input register.
+ // Returns the condition on which a final split to
+ // true and false label should be made, to optimize fallthrough.
+ Condition EmitIsString(Register input, Register temp1, Label* is_not_string,
+ SmiCheck check_needed);
+
+ int DefineDeoptimizationLiteral(Handle<Object> literal);
+ void PopulateDeoptimizationData(Handle<Code> code);
+ void PopulateDeoptimizationLiteralsWithInlinedFunctions();
+
+ MemOperand BuildSeqStringOperand(Register string,
+ Register temp,
+ LOperand* index,
+ String::Encoding encoding);
+ Deoptimizer::BailoutType DeoptimizeHeader(
+ LEnvironment* environment,
+ Deoptimizer::BailoutType* override_bailout_type);
+ void Deoptimize(LEnvironment* environment);
+ void Deoptimize(LEnvironment* environment,
+ Deoptimizer::BailoutType bailout_type);
+ void DeoptimizeIf(Condition cc, LEnvironment* environment);
+ void DeoptimizeIfZero(Register rt, LEnvironment* environment);
+ void DeoptimizeIfNegative(Register rt, LEnvironment* environment);
+ void DeoptimizeIfSmi(Register rt, LEnvironment* environment);
+ void DeoptimizeIfNotSmi(Register rt, LEnvironment* environment);
+ void DeoptimizeIfRoot(Register rt,
+ Heap::RootListIndex index,
+ LEnvironment* environment);
+ void DeoptimizeIfNotRoot(Register rt,
+ Heap::RootListIndex index,
+ LEnvironment* environment);
+ void ApplyCheckIf(Condition cc, LBoundsCheck* check);
+
+ MemOperand PrepareKeyedExternalArrayOperand(Register key,
+ Register base,
+ Register scratch,
+ bool key_is_smi,
+ bool key_is_constant,
+ int constant_key,
+ ElementsKind elements_kind,
+ int additional_index);
+ void CalcKeyedArrayBaseRegister(Register base,
+ Register elements,
+ Register key,
+ bool key_is_tagged,
+ ElementsKind elements_kind);
+
+ void RegisterEnvironmentForDeoptimization(LEnvironment* environment,
+ Safepoint::DeoptMode mode);
+
+ int GetStackSlotCount() const { return chunk()->spill_slot_count(); }
+
+ void Abort(BailoutReason reason);
+
+ void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); }
+
+ // Emit frame translation commands for an environment.
+ void WriteTranslation(LEnvironment* environment, Translation* translation);
+
+ void AddToTranslation(LEnvironment* environment,
+ Translation* translation,
+ LOperand* op,
+ bool is_tagged,
+ bool is_uint32,
+ int* object_index_pointer,
+ int* dematerialized_index_pointer);
+
+ void SaveCallerDoubles();
+ void RestoreCallerDoubles();
+
+ // Code generation steps. Returns true if code generation should continue.
+ bool GeneratePrologue();
+ bool GenerateDeferredCode();
+ bool GenerateDeoptJumpTable();
+ bool GenerateSafepointTable();
+
+ // Generates the custom OSR entrypoint and sets the osr_pc_offset.
+ void GenerateOsrPrologue();
+
+ enum SafepointMode {
+ RECORD_SIMPLE_SAFEPOINT,
+ RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS
+ };
+
+ void CallCode(Handle<Code> code,
+ RelocInfo::Mode mode,
+ LInstruction* instr);
+
+ void CallCodeGeneric(Handle<Code> code,
+ RelocInfo::Mode mode,
+ LInstruction* instr,
+ SafepointMode safepoint_mode);
+
+ void CallRuntime(const Runtime::Function* function,
+ int num_arguments,
+ LInstruction* instr,
+ SaveFPRegsMode save_doubles = kDontSaveFPRegs);
+
+ void CallRuntime(Runtime::FunctionId id,
+ int num_arguments,
+ LInstruction* instr) {
+ const Runtime::Function* function = Runtime::FunctionForId(id);
+ CallRuntime(function, num_arguments, instr);
+ }
+
+ void LoadContextFromDeferred(LOperand* context);
+ void CallRuntimeFromDeferred(Runtime::FunctionId id,
+ int argc,
+ LInstruction* instr,
+ LOperand* context);
+
+ // Generate a direct call to a known function.
+ // If the function is already loaded into x1 by the caller, function_reg may
+ // be set to x1. Otherwise, it must be NoReg, and CallKnownFunction will
+ // automatically load it.
+ void CallKnownFunction(Handle<JSFunction> function,
+ int formal_parameter_count,
+ int arity,
+ LInstruction* instr,
+ Register function_reg = NoReg);
+
+ // Support for recording safepoint and position information.
+ void RecordAndWritePosition(int position) V8_OVERRIDE;
+ void RecordSafepoint(LPointerMap* pointers,
+ Safepoint::Kind kind,
+ int arguments,
+ Safepoint::DeoptMode mode);
+ void RecordSafepoint(LPointerMap* pointers, Safepoint::DeoptMode mode);
+ void RecordSafepoint(Safepoint::DeoptMode mode);
+ void RecordSafepointWithRegisters(LPointerMap* pointers,
+ int arguments,
+ Safepoint::DeoptMode mode);
+ void RecordSafepointWithRegistersAndDoubles(LPointerMap* pointers,
+ int arguments,
+ Safepoint::DeoptMode mode);
+ void RecordSafepointWithLazyDeopt(LInstruction* instr,
+ SafepointMode safepoint_mode);
+
+ void EnsureSpaceForLazyDeopt(int space_needed) V8_OVERRIDE;
+
+ ZoneList<LEnvironment*> deoptimizations_;
+ ZoneList<Deoptimizer::JumpTableEntry> deopt_jump_table_;
+ ZoneList<Handle<Object> > deoptimization_literals_;
+ int inlined_function_count_;
+ Scope* const scope_;
+ TranslationBuffer translations_;
+ ZoneList<LDeferredCode*> deferred_;
+ int osr_pc_offset_;
+ bool frame_is_built_;
+
+ // Builder that keeps track of safepoints in the code. The table itself is
+ // emitted at the end of the generated code.
+ SafepointTableBuilder safepoints_;
+
+ // Compiler from a set of parallel moves to a sequential list of moves.
+ LGapResolver resolver_;
+
+ Safepoint::Kind expected_safepoint_kind_;
+
+ int old_position_;
+
+ class PushSafepointRegistersScope BASE_EMBEDDED {
+ public:
+ PushSafepointRegistersScope(LCodeGen* codegen,
+ Safepoint::Kind kind)
+ : codegen_(codegen) {
+ ASSERT(codegen_->info()->is_calling());
+ ASSERT(codegen_->expected_safepoint_kind_ == Safepoint::kSimple);
+ codegen_->expected_safepoint_kind_ = kind;
+
+ switch (codegen_->expected_safepoint_kind_) {
+ case Safepoint::kWithRegisters:
+ codegen_->masm_->PushSafepointRegisters();
+ break;
+ case Safepoint::kWithRegistersAndDoubles:
+ codegen_->masm_->PushSafepointRegisters();
+ codegen_->masm_->PushSafepointFPRegisters();
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+
+ ~PushSafepointRegistersScope() {
+ Safepoint::Kind kind = codegen_->expected_safepoint_kind_;
+ ASSERT((kind & Safepoint::kWithRegisters) != 0);
+ switch (kind) {
+ case Safepoint::kWithRegisters:
+ codegen_->masm_->PopSafepointRegisters();
+ break;
+ case Safepoint::kWithRegistersAndDoubles:
+ codegen_->masm_->PopSafepointFPRegisters();
+ codegen_->masm_->PopSafepointRegisters();
+ break;
+ default:
+ UNREACHABLE();
+ }
+ codegen_->expected_safepoint_kind_ = Safepoint::kSimple;
+ }
+
+ private:
+ LCodeGen* codegen_;
+ };
+
+ friend class LDeferredCode;
+ friend class SafepointGenerator;
+ DISALLOW_COPY_AND_ASSIGN(LCodeGen);
+};
+
+
+class LDeferredCode: public ZoneObject {
+ public:
+ explicit LDeferredCode(LCodeGen* codegen)
+ : codegen_(codegen),
+ external_exit_(NULL),
+ instruction_index_(codegen->current_instruction_) {
+ codegen->AddDeferredCode(this);
+ }
+
+ virtual ~LDeferredCode() { }
+ virtual void Generate() = 0;
+ virtual LInstruction* instr() = 0;
+
+ void SetExit(Label* exit) { external_exit_ = exit; }
+ Label* entry() { return &entry_; }
+ Label* exit() { return (external_exit_ != NULL) ? external_exit_ : &exit_; }
+ int instruction_index() const { return instruction_index_; }
+
+ protected:
+ LCodeGen* codegen() const { return codegen_; }
+ MacroAssembler* masm() const { return codegen_->masm(); }
+
+ private:
+ LCodeGen* codegen_;
+ Label entry_;
+ Label exit_;
+ Label* external_exit_;
+ int instruction_index_;
+};
+
+
+// This is the abstract class used by EmitBranchGeneric.
+// It is used to emit code for conditional branching. The Emit() function
+// emits code to branch when the condition holds and EmitInverted() emits
+// the branch when the inverted condition is verified.
+//
+// For actual examples of condition see the concrete implementation in
+// lithium-codegen-a64.cc (e.g. BranchOnCondition, CompareAndBranch).
+class BranchGenerator BASE_EMBEDDED {
+ public:
+ explicit BranchGenerator(LCodeGen* codegen)
+ : codegen_(codegen) { }
+
+ virtual ~BranchGenerator() { }
+
+ virtual void Emit(Label* label) const = 0;
+ virtual void EmitInverted(Label* label) const = 0;
+
+ protected:
+ MacroAssembler* masm() const { return codegen_->masm(); }
+
+ LCodeGen* codegen_;
+};
+
+} } // namespace v8::internal
+
+#endif // V8_A64_LITHIUM_CODEGEN_A64_H_
--- /dev/null
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "a64/lithium-gap-resolver-a64.h"
+#include "a64/lithium-codegen-a64.h"
+
+namespace v8 {
+namespace internal {
+
+// We use the root register to spill a value while breaking a cycle in parallel
+// moves. We don't need access to roots while resolving the move list and using
+// the root register has two advantages:
+// - It is not in crankshaft allocatable registers list, so it can't interfere
+// with any of the moves we are resolving.
+// - We don't need to push it on the stack, as we can reload it with its value
+// once we have resolved a cycle.
+#define kSavedValue root
+
+LGapResolver::LGapResolver(LCodeGen* owner)
+ : cgen_(owner), moves_(32, owner->zone()), root_index_(0), in_cycle_(false),
+ saved_destination_(NULL), need_to_restore_root_(false) { }
+
+
+#define __ ACCESS_MASM(cgen_->masm())
+
+void LGapResolver::Resolve(LParallelMove* parallel_move) {
+ ASSERT(moves_.is_empty());
+
+ // Build up a worklist of moves.
+ BuildInitialMoveList(parallel_move);
+
+ for (int i = 0; i < moves_.length(); ++i) {
+ LMoveOperands move = moves_[i];
+
+ // Skip constants to perform them last. They don't block other moves
+ // and skipping such moves with register destinations keeps those
+ // registers free for the whole algorithm.
+ if (!move.IsEliminated() && !move.source()->IsConstantOperand()) {
+ root_index_ = i; // Any cycle is found when we reach this move again.
+ PerformMove(i);
+ if (in_cycle_) RestoreValue();
+ }
+ }
+
+ // Perform the moves with constant sources.
+ for (int i = 0; i < moves_.length(); ++i) {
+ LMoveOperands move = moves_[i];
+
+ if (!move.IsEliminated()) {
+ ASSERT(move.source()->IsConstantOperand());
+ EmitMove(i);
+ }
+ }
+
+ if (need_to_restore_root_) {
+ ASSERT(kSavedValue.Is(root));
+ __ InitializeRootRegister();
+ }
+
+ moves_.Rewind(0);
+}
+
+
+void LGapResolver::BuildInitialMoveList(LParallelMove* parallel_move) {
+ // Perform a linear sweep of the moves to add them to the initial list of
+ // moves to perform, ignoring any move that is redundant (the source is
+ // the same as the destination, the destination is ignored and
+ // unallocated, or the move was already eliminated).
+ const ZoneList<LMoveOperands>* moves = parallel_move->move_operands();
+ for (int i = 0; i < moves->length(); ++i) {
+ LMoveOperands move = moves->at(i);
+ if (!move.IsRedundant()) moves_.Add(move, cgen_->zone());
+ }
+ Verify();
+}
+
+
+void LGapResolver::PerformMove(int index) {
+ // Each call to this function performs a move and deletes it from the move
+ // graph. We first recursively perform any move blocking this one. We
+ // mark a move as "pending" on entry to PerformMove in order to detect
+ // cycles in the move graph.
+ LMoveOperands& current_move = moves_[index];
+
+ ASSERT(!current_move.IsPending());
+ ASSERT(!current_move.IsRedundant());
+
+ // Clear this move's destination to indicate a pending move. The actual
+ // destination is saved in a stack allocated local. Multiple moves can
+ // be pending because this function is recursive.
+ ASSERT(current_move.source() != NULL); // Otherwise it will look eliminated.
+ LOperand* destination = current_move.destination();
+ current_move.set_destination(NULL);
+
+ // Perform a depth-first traversal of the move graph to resolve
+ // dependencies. Any unperformed, unpending move with a source the same
+ // as this one's destination blocks this one so recursively perform all
+ // such moves.
+ for (int i = 0; i < moves_.length(); ++i) {
+ LMoveOperands other_move = moves_[i];
+ if (other_move.Blocks(destination) && !other_move.IsPending()) {
+ PerformMove(i);
+ // If there is a blocking, pending move it must be moves_[root_index_]
+ // and all other moves with the same source as moves_[root_index_] are
+ // sucessfully executed (because they are cycle-free) by this loop.
+ }
+ }
+
+ // We are about to resolve this move and don't need it marked as
+ // pending, so restore its destination.
+ current_move.set_destination(destination);
+
+ // The move may be blocked on a pending move, which must be the starting move.
+ // In this case, we have a cycle, and we save the source of this move to
+ // a scratch register to break it.
+ LMoveOperands other_move = moves_[root_index_];
+ if (other_move.Blocks(destination)) {
+ ASSERT(other_move.IsPending());
+ BreakCycle(index);
+ return;
+ }
+
+ // This move is no longer blocked.
+ EmitMove(index);
+}
+
+
+void LGapResolver::Verify() {
+#ifdef ENABLE_SLOW_ASSERTS
+ // No operand should be the destination for more than one move.
+ for (int i = 0; i < moves_.length(); ++i) {
+ LOperand* destination = moves_[i].destination();
+ for (int j = i + 1; j < moves_.length(); ++j) {
+ SLOW_ASSERT(!destination->Equals(moves_[j].destination()));
+ }
+ }
+#endif
+}
+
+
+void LGapResolver::BreakCycle(int index) {
+ ASSERT(moves_[index].destination()->Equals(moves_[root_index_].source()));
+ ASSERT(!in_cycle_);
+
+ // We use a register which is not allocatable by crankshaft to break the cycle
+ // to be sure it doesn't interfere with the moves we are resolving.
+ ASSERT(!kSavedValue.IsAllocatable());
+ need_to_restore_root_ = true;
+
+ // We save in a register the source of that move and we remember its
+ // destination. Then we mark this move as resolved so the cycle is
+ // broken and we can perform the other moves.
+ in_cycle_ = true;
+ LOperand* source = moves_[index].source();
+ saved_destination_ = moves_[index].destination();
+
+ if (source->IsRegister()) {
+ __ Mov(kSavedValue, cgen_->ToRegister(source));
+ } else if (source->IsStackSlot()) {
+ __ Ldr(kSavedValue, cgen_->ToMemOperand(source));
+ } else if (source->IsDoubleRegister()) {
+ // TODO(all): We should use a double register to store the value to avoid
+ // the penalty of the mov across register banks. We are going to reserve
+ // d31 to hold 0.0 value. We could clobber this register while breaking the
+ // cycle and restore it after like we do with the root register.
+ // LGapResolver::RestoreValue() will need to be updated as well when we'll
+ // do that.
+ __ Fmov(kSavedValue, cgen_->ToDoubleRegister(source));
+ } else if (source->IsDoubleStackSlot()) {
+ __ Ldr(kSavedValue, cgen_->ToMemOperand(source));
+ } else {
+ UNREACHABLE();
+ }
+
+ // Mark this move as resolved.
+ // This move will be actually performed by moving the saved value to this
+ // move's destination in LGapResolver::RestoreValue().
+ moves_[index].Eliminate();
+}
+
+
+void LGapResolver::RestoreValue() {
+ ASSERT(in_cycle_);
+ ASSERT(saved_destination_ != NULL);
+
+ if (saved_destination_->IsRegister()) {
+ __ Mov(cgen_->ToRegister(saved_destination_), kSavedValue);
+ } else if (saved_destination_->IsStackSlot()) {
+ __ Str(kSavedValue, cgen_->ToMemOperand(saved_destination_));
+ } else if (saved_destination_->IsDoubleRegister()) {
+ __ Fmov(cgen_->ToDoubleRegister(saved_destination_), kSavedValue);
+ } else if (saved_destination_->IsDoubleStackSlot()) {
+ __ Str(kSavedValue, cgen_->ToMemOperand(saved_destination_));
+ } else {
+ UNREACHABLE();
+ }
+
+ in_cycle_ = false;
+ saved_destination_ = NULL;
+}
+
+
+void LGapResolver::EmitMove(int index) {
+ LOperand* source = moves_[index].source();
+ LOperand* destination = moves_[index].destination();
+
+ // Dispatch on the source and destination operand kinds. Not all
+ // combinations are possible.
+
+ if (source->IsRegister()) {
+ Register source_register = cgen_->ToRegister(source);
+ if (destination->IsRegister()) {
+ __ Mov(cgen_->ToRegister(destination), source_register);
+ } else {
+ ASSERT(destination->IsStackSlot());
+ __ Str(source_register, cgen_->ToMemOperand(destination));
+ }
+
+ } else if (source->IsStackSlot()) {
+ MemOperand source_operand = cgen_->ToMemOperand(source);
+ if (destination->IsRegister()) {
+ __ Ldr(cgen_->ToRegister(destination), source_operand);
+ } else {
+ ASSERT(destination->IsStackSlot());
+ EmitStackSlotMove(index);
+ }
+
+ } else if (source->IsConstantOperand()) {
+ LConstantOperand* constant_source = LConstantOperand::cast(source);
+ if (destination->IsRegister()) {
+ Register dst = cgen_->ToRegister(destination);
+ if (cgen_->IsSmi(constant_source)) {
+ __ Mov(dst, Operand(cgen_->ToSmi(constant_source)));
+ } else if (cgen_->IsInteger32Constant(constant_source)) {
+ __ Mov(dst, cgen_->ToInteger32(constant_source));
+ } else {
+ __ LoadObject(dst, cgen_->ToHandle(constant_source));
+ }
+ } else if (destination->IsDoubleRegister()) {
+ DoubleRegister result = cgen_->ToDoubleRegister(destination);
+ __ Fmov(result, cgen_->ToDouble(constant_source));
+ } else {
+ ASSERT(destination->IsStackSlot());
+ ASSERT(!in_cycle_); // Constant moves happen after all cycles are gone.
+ need_to_restore_root_ = true;
+ if (cgen_->IsSmi(constant_source)) {
+ __ Mov(kSavedValue, Operand(cgen_->ToSmi(constant_source)));
+ } else if (cgen_->IsInteger32Constant(constant_source)) {
+ __ Mov(kSavedValue, cgen_->ToInteger32(constant_source));
+ } else {
+ __ LoadObject(kSavedValue, cgen_->ToHandle(constant_source));
+ }
+ __ Str(kSavedValue, cgen_->ToMemOperand(destination));
+ }
+
+ } else if (source->IsDoubleRegister()) {
+ DoubleRegister src = cgen_->ToDoubleRegister(source);
+ if (destination->IsDoubleRegister()) {
+ __ Fmov(cgen_->ToDoubleRegister(destination), src);
+ } else {
+ ASSERT(destination->IsDoubleStackSlot());
+ __ Str(src, cgen_->ToMemOperand(destination));
+ }
+
+ } else if (source->IsDoubleStackSlot()) {
+ MemOperand src = cgen_->ToMemOperand(source);
+ if (destination->IsDoubleRegister()) {
+ __ Ldr(cgen_->ToDoubleRegister(destination), src);
+ } else {
+ ASSERT(destination->IsDoubleStackSlot());
+ EmitStackSlotMove(index);
+ }
+
+ } else {
+ UNREACHABLE();
+ }
+
+ // The move has been emitted, we can eliminate it.
+ moves_[index].Eliminate();
+}
+
+
+void LGapResolver::EmitStackSlotMove(int index) {
+ // We need a temp register to perform a stack slot to stack slot move, and
+ // the register must not be involved in breaking cycles.
+
+ // Use the Crankshaft double scratch register as the temporary.
+ DoubleRegister temp = crankshaft_fp_scratch;
+
+ LOperand* src = moves_[index].source();
+ LOperand* dst = moves_[index].destination();
+
+ ASSERT(src->IsStackSlot());
+ ASSERT(dst->IsStackSlot());
+ __ Ldr(temp, cgen_->ToMemOperand(src));
+ __ Str(temp, cgen_->ToMemOperand(dst));
+}
+
+} } // namespace v8::internal
--- /dev/null
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_A64_LITHIUM_GAP_RESOLVER_A64_H_
+#define V8_A64_LITHIUM_GAP_RESOLVER_A64_H_
+
+#include "v8.h"
+
+#include "lithium.h"
+
+namespace v8 {
+namespace internal {
+
+class LCodeGen;
+class LGapResolver;
+
+class LGapResolver BASE_EMBEDDED {
+ public:
+ explicit LGapResolver(LCodeGen* owner);
+
+ // Resolve a set of parallel moves, emitting assembler instructions.
+ void Resolve(LParallelMove* parallel_move);
+
+ private:
+ // Build the initial list of moves.
+ void BuildInitialMoveList(LParallelMove* parallel_move);
+
+ // Perform the move at the moves_ index in question (possibly requiring
+ // other moves to satisfy dependencies).
+ void PerformMove(int index);
+
+ // If a cycle is found in the series of moves, save the blocking value to
+ // a scratch register. The cycle must be found by hitting the root of the
+ // depth-first search.
+ void BreakCycle(int index);
+
+ // After a cycle has been resolved, restore the value from the scratch
+ // register to its proper destination.
+ void RestoreValue();
+
+ // Emit a move and remove it from the move graph.
+ void EmitMove(int index);
+
+ // Emit a move from one stack slot to another.
+ void EmitStackSlotMove(int index);
+
+ // Verify the move list before performing moves.
+ void Verify();
+
+ LCodeGen* cgen_;
+
+ // List of moves not yet resolved.
+ ZoneList<LMoveOperands> moves_;
+
+ int root_index_;
+ bool in_cycle_;
+ LOperand* saved_destination_;
+
+ // We use the root register as a scratch in a few places. When that happens,
+ // this flag is set to indicate that it needs to be restored.
+ bool need_to_restore_root_;
+};
+
+} } // namespace v8::internal
+
+#endif // V8_A64_LITHIUM_GAP_RESOLVER_A64_H_
--- /dev/null
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_A64_MACRO_ASSEMBLER_A64_INL_H_
+#define V8_A64_MACRO_ASSEMBLER_A64_INL_H_
+
+#include <ctype.h>
+
+#include "v8globals.h"
+#include "globals.h"
+
+#include "a64/assembler-a64.h"
+#include "a64/assembler-a64-inl.h"
+#include "a64/macro-assembler-a64.h"
+#include "a64/instrument-a64.h"
+
+
+namespace v8 {
+namespace internal {
+
+
+MemOperand FieldMemOperand(Register object, int offset) {
+ return MemOperand(object, offset - kHeapObjectTag);
+}
+
+
+MemOperand UntagSmiFieldMemOperand(Register object, int offset) {
+ return UntagSmiMemOperand(object, offset - kHeapObjectTag);
+}
+
+
+MemOperand UntagSmiMemOperand(Register object, int offset) {
+ // Assumes that Smis are shifted by 32 bits and little endianness.
+ STATIC_ASSERT(kSmiShift == 32);
+ return MemOperand(object, offset + (kSmiShift / kBitsPerByte));
+}
+
+
+Handle<Object> MacroAssembler::CodeObject() {
+ ASSERT(!code_object_.is_null());
+ return code_object_;
+}
+
+
+void MacroAssembler::And(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ LogicalMacro(rd, rn, operand, AND);
+}
+
+
+void MacroAssembler::Ands(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ LogicalMacro(rd, rn, operand, ANDS);
+}
+
+
+void MacroAssembler::Tst(const Register& rn,
+ const Operand& operand) {
+ ASSERT(allow_macro_instructions_);
+ LogicalMacro(AppropriateZeroRegFor(rn), rn, operand, ANDS);
+}
+
+
+void MacroAssembler::Bic(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ LogicalMacro(rd, rn, operand, BIC);
+}
+
+
+void MacroAssembler::Bics(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ LogicalMacro(rd, rn, operand, BICS);
+}
+
+
+void MacroAssembler::Orr(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ LogicalMacro(rd, rn, operand, ORR);
+}
+
+
+void MacroAssembler::Orn(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ LogicalMacro(rd, rn, operand, ORN);
+}
+
+
+void MacroAssembler::Eor(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ LogicalMacro(rd, rn, operand, EOR);
+}
+
+
+void MacroAssembler::Eon(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ LogicalMacro(rd, rn, operand, EON);
+}
+
+
+void MacroAssembler::Ccmp(const Register& rn,
+ const Operand& operand,
+ StatusFlags nzcv,
+ Condition cond) {
+ ASSERT(allow_macro_instructions_);
+ if (operand.IsImmediate() && (operand.immediate() < 0)) {
+ ConditionalCompareMacro(rn, -operand.immediate(), nzcv, cond, CCMN);
+ } else {
+ ConditionalCompareMacro(rn, operand, nzcv, cond, CCMP);
+ }
+}
+
+
+void MacroAssembler::Ccmn(const Register& rn,
+ const Operand& operand,
+ StatusFlags nzcv,
+ Condition cond) {
+ ASSERT(allow_macro_instructions_);
+ if (operand.IsImmediate() && (operand.immediate() < 0)) {
+ ConditionalCompareMacro(rn, -operand.immediate(), nzcv, cond, CCMP);
+ } else {
+ ConditionalCompareMacro(rn, operand, nzcv, cond, CCMN);
+ }
+}
+
+
+void MacroAssembler::Add(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ ASSERT(allow_macro_instructions_);
+ if (operand.IsImmediate() && (operand.immediate() < 0)) {
+ AddSubMacro(rd, rn, -operand.immediate(), LeaveFlags, SUB);
+ } else {
+ AddSubMacro(rd, rn, operand, LeaveFlags, ADD);
+ }
+}
+
+void MacroAssembler::Adds(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ ASSERT(allow_macro_instructions_);
+ if (operand.IsImmediate() && (operand.immediate() < 0)) {
+ AddSubMacro(rd, rn, -operand.immediate(), SetFlags, SUB);
+ } else {
+ AddSubMacro(rd, rn, operand, SetFlags, ADD);
+ }
+}
+
+
+void MacroAssembler::Sub(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ ASSERT(allow_macro_instructions_);
+ if (operand.IsImmediate() && (operand.immediate() < 0)) {
+ AddSubMacro(rd, rn, -operand.immediate(), LeaveFlags, ADD);
+ } else {
+ AddSubMacro(rd, rn, operand, LeaveFlags, SUB);
+ }
+}
+
+
+void MacroAssembler::Subs(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ ASSERT(allow_macro_instructions_);
+ if (operand.IsImmediate() && (operand.immediate() < 0)) {
+ AddSubMacro(rd, rn, -operand.immediate(), SetFlags, ADD);
+ } else {
+ AddSubMacro(rd, rn, operand, SetFlags, SUB);
+ }
+}
+
+
+void MacroAssembler::Cmn(const Register& rn, const Operand& operand) {
+ ASSERT(allow_macro_instructions_);
+ Adds(AppropriateZeroRegFor(rn), rn, operand);
+}
+
+
+void MacroAssembler::Cmp(const Register& rn, const Operand& operand) {
+ ASSERT(allow_macro_instructions_);
+ Subs(AppropriateZeroRegFor(rn), rn, operand);
+}
+
+
+void MacroAssembler::Neg(const Register& rd,
+ const Operand& operand) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ if (operand.IsImmediate()) {
+ Mov(rd, -operand.immediate());
+ } else {
+ Sub(rd, AppropriateZeroRegFor(rd), operand);
+ }
+}
+
+
+void MacroAssembler::Negs(const Register& rd,
+ const Operand& operand) {
+ ASSERT(allow_macro_instructions_);
+ Subs(rd, AppropriateZeroRegFor(rd), operand);
+}
+
+
+void MacroAssembler::Adc(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ AddSubWithCarryMacro(rd, rn, operand, LeaveFlags, ADC);
+}
+
+
+void MacroAssembler::Adcs(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ AddSubWithCarryMacro(rd, rn, operand, SetFlags, ADC);
+}
+
+
+void MacroAssembler::Sbc(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ AddSubWithCarryMacro(rd, rn, operand, LeaveFlags, SBC);
+}
+
+
+void MacroAssembler::Sbcs(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ AddSubWithCarryMacro(rd, rn, operand, SetFlags, SBC);
+}
+
+
+void MacroAssembler::Ngc(const Register& rd,
+ const Operand& operand) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ Register zr = AppropriateZeroRegFor(rd);
+ Sbc(rd, zr, operand);
+}
+
+
+void MacroAssembler::Ngcs(const Register& rd,
+ const Operand& operand) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ Register zr = AppropriateZeroRegFor(rd);
+ Sbcs(rd, zr, operand);
+}
+
+
+void MacroAssembler::Mvn(const Register& rd, uint64_t imm) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ Mov(rd, ~imm);
+}
+
+
+#define DEFINE_FUNCTION(FN, REGTYPE, REG, OP) \
+void MacroAssembler::FN(const REGTYPE REG, const MemOperand& addr) { \
+ ASSERT(allow_macro_instructions_); \
+ LoadStoreMacro(REG, addr, OP); \
+}
+LS_MACRO_LIST(DEFINE_FUNCTION)
+#undef DEFINE_FUNCTION
+
+
+void MacroAssembler::Adr(const Register& rd, Label* label) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ adr(rd, label);
+}
+
+
+void MacroAssembler::Asr(const Register& rd,
+ const Register& rn,
+ unsigned shift) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ asr(rd, rn, shift);
+}
+
+
+void MacroAssembler::Asr(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ asrv(rd, rn, rm);
+}
+
+
+void MacroAssembler::B(Label* label) {
+ b(label);
+}
+
+
+void MacroAssembler::B(Condition cond, Label* label) {
+ ASSERT(allow_macro_instructions_);
+ B(label, cond);
+}
+
+
+void MacroAssembler::B(Label* label, Condition cond) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT((cond != al) && (cond != nv));
+ b(label, cond);
+}
+
+
+void MacroAssembler::Bfi(const Register& rd,
+ const Register& rn,
+ unsigned lsb,
+ unsigned width) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ bfi(rd, rn, lsb, width);
+}
+
+
+void MacroAssembler::Bfxil(const Register& rd,
+ const Register& rn,
+ unsigned lsb,
+ unsigned width) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ bfxil(rd, rn, lsb, width);
+}
+
+
+void MacroAssembler::Bind(Label* label) {
+ ASSERT(allow_macro_instructions_);
+ bind(label);
+}
+
+
+void MacroAssembler::Bl(Label* label) {
+ ASSERT(allow_macro_instructions_);
+ bl(label);
+}
+
+
+void MacroAssembler::Blr(const Register& xn) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!xn.IsZero());
+ blr(xn);
+}
+
+
+void MacroAssembler::Br(const Register& xn) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!xn.IsZero());
+ br(xn);
+}
+
+
+void MacroAssembler::Brk(int code) {
+ ASSERT(allow_macro_instructions_);
+ brk(code);
+}
+
+
+void MacroAssembler::Cbnz(const Register& rt, Label* label) {
+ ASSERT(allow_macro_instructions_);
+ cbnz(rt, label);
+}
+
+
+void MacroAssembler::Cbz(const Register& rt, Label* label) {
+ ASSERT(allow_macro_instructions_);
+ cbz(rt, label);
+}
+
+
+void MacroAssembler::Cinc(const Register& rd,
+ const Register& rn,
+ Condition cond) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ ASSERT((cond != al) && (cond != nv));
+ cinc(rd, rn, cond);
+}
+
+
+void MacroAssembler::Cinv(const Register& rd,
+ const Register& rn,
+ Condition cond) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ ASSERT((cond != al) && (cond != nv));
+ cinv(rd, rn, cond);
+}
+
+
+void MacroAssembler::Cls(const Register& rd, const Register& rn) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ cls(rd, rn);
+}
+
+
+void MacroAssembler::Clz(const Register& rd, const Register& rn) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ clz(rd, rn);
+}
+
+
+void MacroAssembler::Cneg(const Register& rd,
+ const Register& rn,
+ Condition cond) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ ASSERT((cond != al) && (cond != nv));
+ cneg(rd, rn, cond);
+}
+
+
+// Conditionally zero the destination register. Only X registers are supported
+// due to the truncation side-effect when used on W registers.
+void MacroAssembler::CzeroX(const Register& rd,
+ Condition cond) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsSP() && rd.Is64Bits());
+ ASSERT((cond != al) && (cond != nv));
+ csel(rd, xzr, rd, cond);
+}
+
+
+// Conditionally move a value into the destination register. Only X registers
+// are supported due to the truncation side-effect when used on W registers.
+void MacroAssembler::CmovX(const Register& rd,
+ const Register& rn,
+ Condition cond) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsSP());
+ ASSERT(rd.Is64Bits() && rn.Is64Bits());
+ ASSERT((cond != al) && (cond != nv));
+ if (!rd.is(rn)) {
+ csel(rd, rn, rd, cond);
+ }
+}
+
+
+void MacroAssembler::Cset(const Register& rd, Condition cond) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ ASSERT((cond != al) && (cond != nv));
+ cset(rd, cond);
+}
+
+
+void MacroAssembler::Csetm(const Register& rd, Condition cond) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ ASSERT((cond != al) && (cond != nv));
+ csetm(rd, cond);
+}
+
+
+void MacroAssembler::Csinc(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ Condition cond) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ ASSERT((cond != al) && (cond != nv));
+ csinc(rd, rn, rm, cond);
+}
+
+
+void MacroAssembler::Csinv(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ Condition cond) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ ASSERT((cond != al) && (cond != nv));
+ csinv(rd, rn, rm, cond);
+}
+
+
+void MacroAssembler::Csneg(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ Condition cond) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ ASSERT((cond != al) && (cond != nv));
+ csneg(rd, rn, rm, cond);
+}
+
+
+void MacroAssembler::Dmb(BarrierDomain domain, BarrierType type) {
+ ASSERT(allow_macro_instructions_);
+ dmb(domain, type);
+}
+
+
+void MacroAssembler::Dsb(BarrierDomain domain, BarrierType type) {
+ ASSERT(allow_macro_instructions_);
+ dsb(domain, type);
+}
+
+
+void MacroAssembler::Debug(const char* message, uint32_t code, Instr params) {
+ ASSERT(allow_macro_instructions_);
+ debug(message, code, params);
+}
+
+
+void MacroAssembler::Extr(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ unsigned lsb) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ extr(rd, rn, rm, lsb);
+}
+
+
+void MacroAssembler::Fabs(const FPRegister& fd, const FPRegister& fn) {
+ ASSERT(allow_macro_instructions_);
+ fabs(fd, fn);
+}
+
+
+void MacroAssembler::Fadd(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm) {
+ ASSERT(allow_macro_instructions_);
+ fadd(fd, fn, fm);
+}
+
+
+void MacroAssembler::Fccmp(const FPRegister& fn,
+ const FPRegister& fm,
+ StatusFlags nzcv,
+ Condition cond) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT((cond != al) && (cond != nv));
+ fccmp(fn, fm, nzcv, cond);
+}
+
+
+void MacroAssembler::Fcmp(const FPRegister& fn, const FPRegister& fm) {
+ ASSERT(allow_macro_instructions_);
+ fcmp(fn, fm);
+}
+
+
+void MacroAssembler::Fcmp(const FPRegister& fn, double value) {
+ ASSERT(allow_macro_instructions_);
+ if (value != 0.0) {
+ FPRegister tmp = AppropriateTempFor(fn);
+ Fmov(tmp, value);
+ fcmp(fn, tmp);
+ } else {
+ fcmp(fn, value);
+ }
+}
+
+
+void MacroAssembler::Fcsel(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm,
+ Condition cond) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT((cond != al) && (cond != nv));
+ fcsel(fd, fn, fm, cond);
+}
+
+
+void MacroAssembler::Fcvt(const FPRegister& fd, const FPRegister& fn) {
+ ASSERT(allow_macro_instructions_);
+ fcvt(fd, fn);
+}
+
+
+void MacroAssembler::Fcvtas(const Register& rd, const FPRegister& fn) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ fcvtas(rd, fn);
+}
+
+
+void MacroAssembler::Fcvtau(const Register& rd, const FPRegister& fn) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ fcvtau(rd, fn);
+}
+
+
+void MacroAssembler::Fcvtms(const Register& rd, const FPRegister& fn) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ fcvtms(rd, fn);
+}
+
+
+void MacroAssembler::Fcvtmu(const Register& rd, const FPRegister& fn) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ fcvtmu(rd, fn);
+}
+
+
+void MacroAssembler::Fcvtns(const Register& rd, const FPRegister& fn) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ fcvtns(rd, fn);
+}
+
+
+void MacroAssembler::Fcvtnu(const Register& rd, const FPRegister& fn) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ fcvtnu(rd, fn);
+}
+
+
+void MacroAssembler::Fcvtzs(const Register& rd, const FPRegister& fn) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ fcvtzs(rd, fn);
+}
+void MacroAssembler::Fcvtzu(const Register& rd, const FPRegister& fn) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ fcvtzu(rd, fn);
+}
+
+
+void MacroAssembler::Fdiv(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm) {
+ ASSERT(allow_macro_instructions_);
+ fdiv(fd, fn, fm);
+}
+
+
+void MacroAssembler::Fmadd(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm,
+ const FPRegister& fa) {
+ ASSERT(allow_macro_instructions_);
+ fmadd(fd, fn, fm, fa);
+}
+
+
+void MacroAssembler::Fmax(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm) {
+ ASSERT(allow_macro_instructions_);
+ fmax(fd, fn, fm);
+}
+
+
+void MacroAssembler::Fmaxnm(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm) {
+ ASSERT(allow_macro_instructions_);
+ fmaxnm(fd, fn, fm);
+}
+
+
+void MacroAssembler::Fmin(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm) {
+ ASSERT(allow_macro_instructions_);
+ fmin(fd, fn, fm);
+}
+
+
+void MacroAssembler::Fminnm(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm) {
+ ASSERT(allow_macro_instructions_);
+ fminnm(fd, fn, fm);
+}
+
+
+void MacroAssembler::Fmov(FPRegister fd, FPRegister fn) {
+ ASSERT(allow_macro_instructions_);
+ // Only emit an instruction if fd and fn are different, and they are both D
+ // registers. fmov(s0, s0) is not a no-op because it clears the top word of
+ // d0. Technically, fmov(d0, d0) is not a no-op either because it clears the
+ // top of q0, but FPRegister does not currently support Q registers.
+ if (!fd.Is(fn) || !fd.Is64Bits()) {
+ fmov(fd, fn);
+ }
+}
+
+
+void MacroAssembler::Fmov(FPRegister fd, Register rn) {
+ ASSERT(allow_macro_instructions_);
+ fmov(fd, rn);
+}
+
+
+void MacroAssembler::Fmov(FPRegister fd, double imm) {
+ ASSERT(allow_macro_instructions_);
+ if ((fd.Is64Bits() && IsImmFP64(imm)) ||
+ (fd.Is32Bits() && IsImmFP32(imm)) ||
+ ((imm == 0.0) && (copysign(1.0, imm) == 1.0))) {
+ // These cases can be handled by the Assembler.
+ fmov(fd, imm);
+ } else {
+ // TODO(all): The Assembler would try to relocate the immediate with
+ // Assembler::ldr(const FPRegister& ft, double imm) but it is not
+ // implemented yet.
+ if (fd.SizeInBits() == kDRegSize) {
+ Mov(Tmp0(), double_to_rawbits(imm));
+ Fmov(fd, Tmp0());
+ } else {
+ ASSERT(fd.SizeInBits() == kSRegSize);
+ Mov(WTmp0(), float_to_rawbits(static_cast<float>(imm)));
+ Fmov(fd, WTmp0());
+ }
+ }
+}
+
+
+void MacroAssembler::Fmov(Register rd, FPRegister fn) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ fmov(rd, fn);
+}
+
+
+void MacroAssembler::Fmsub(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm,
+ const FPRegister& fa) {
+ ASSERT(allow_macro_instructions_);
+ fmsub(fd, fn, fm, fa);
+}
+
+
+void MacroAssembler::Fmul(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm) {
+ ASSERT(allow_macro_instructions_);
+ fmul(fd, fn, fm);
+}
+
+
+void MacroAssembler::Fneg(const FPRegister& fd, const FPRegister& fn) {
+ ASSERT(allow_macro_instructions_);
+ fneg(fd, fn);
+}
+
+
+void MacroAssembler::Fnmadd(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm,
+ const FPRegister& fa) {
+ ASSERT(allow_macro_instructions_);
+ fnmadd(fd, fn, fm, fa);
+}
+
+
+void MacroAssembler::Fnmsub(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm,
+ const FPRegister& fa) {
+ ASSERT(allow_macro_instructions_);
+ fnmsub(fd, fn, fm, fa);
+}
+
+
+void MacroAssembler::Frinta(const FPRegister& fd, const FPRegister& fn) {
+ ASSERT(allow_macro_instructions_);
+ frinta(fd, fn);
+}
+
+
+void MacroAssembler::Frintn(const FPRegister& fd, const FPRegister& fn) {
+ ASSERT(allow_macro_instructions_);
+ frintn(fd, fn);
+}
+
+
+void MacroAssembler::Frintz(const FPRegister& fd, const FPRegister& fn) {
+ ASSERT(allow_macro_instructions_);
+ frintz(fd, fn);
+}
+
+
+void MacroAssembler::Fsqrt(const FPRegister& fd, const FPRegister& fn) {
+ ASSERT(allow_macro_instructions_);
+ fsqrt(fd, fn);
+}
+
+
+void MacroAssembler::Fsub(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm) {
+ ASSERT(allow_macro_instructions_);
+ fsub(fd, fn, fm);
+}
+
+
+void MacroAssembler::Hint(SystemHint code) {
+ ASSERT(allow_macro_instructions_);
+ hint(code);
+}
+
+
+void MacroAssembler::Hlt(int code) {
+ ASSERT(allow_macro_instructions_);
+ hlt(code);
+}
+
+
+void MacroAssembler::Isb() {
+ ASSERT(allow_macro_instructions_);
+ isb();
+}
+
+
+void MacroAssembler::Ldnp(const CPURegister& rt,
+ const CPURegister& rt2,
+ const MemOperand& src) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!AreAliased(rt, rt2));
+ ldnp(rt, rt2, src);
+}
+
+
+void MacroAssembler::Ldp(const CPURegister& rt,
+ const CPURegister& rt2,
+ const MemOperand& src) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!AreAliased(rt, rt2));
+ ldp(rt, rt2, src);
+}
+
+
+void MacroAssembler::Ldpsw(const Register& rt,
+ const Register& rt2,
+ const MemOperand& src) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rt.IsZero());
+ ASSERT(!rt2.IsZero());
+ ldpsw(rt, rt2, src);
+}
+
+
+void MacroAssembler::Ldr(const FPRegister& ft, double imm) {
+ ASSERT(allow_macro_instructions_);
+ ldr(ft, imm);
+}
+
+
+void MacroAssembler::Ldr(const Register& rt, uint64_t imm) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rt.IsZero());
+ ldr(rt, imm);
+}
+
+
+void MacroAssembler::Lsl(const Register& rd,
+ const Register& rn,
+ unsigned shift) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ lsl(rd, rn, shift);
+}
+
+
+void MacroAssembler::Lsl(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ lslv(rd, rn, rm);
+}
+
+
+void MacroAssembler::Lsr(const Register& rd,
+ const Register& rn,
+ unsigned shift) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ lsr(rd, rn, shift);
+}
+
+
+void MacroAssembler::Lsr(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ lsrv(rd, rn, rm);
+}
+
+
+void MacroAssembler::Madd(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ madd(rd, rn, rm, ra);
+}
+
+
+void MacroAssembler::Mneg(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ mneg(rd, rn, rm);
+}
+
+
+void MacroAssembler::Mov(const Register& rd, const Register& rn) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ // Emit a register move only if the registers are distinct, or if they are
+ // not X registers. Note that mov(w0, w0) is not a no-op because it clears
+ // the top word of x0.
+ if (!rd.Is(rn) || !rd.Is64Bits()) {
+ Assembler::mov(rd, rn);
+ }
+}
+
+
+void MacroAssembler::Movk(const Register& rd, uint64_t imm, int shift) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ movk(rd, imm, shift);
+}
+
+
+void MacroAssembler::Mrs(const Register& rt, SystemRegister sysreg) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rt.IsZero());
+ mrs(rt, sysreg);
+}
+
+
+void MacroAssembler::Msr(SystemRegister sysreg, const Register& rt) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rt.IsZero());
+ msr(sysreg, rt);
+}
+
+
+void MacroAssembler::Msub(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ msub(rd, rn, rm, ra);
+}
+
+
+void MacroAssembler::Mul(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ mul(rd, rn, rm);
+}
+
+
+void MacroAssembler::Rbit(const Register& rd, const Register& rn) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ rbit(rd, rn);
+}
+
+
+void MacroAssembler::Ret(const Register& xn) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!xn.IsZero());
+ ret(xn);
+}
+
+
+void MacroAssembler::Rev(const Register& rd, const Register& rn) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ rev(rd, rn);
+}
+
+
+void MacroAssembler::Rev16(const Register& rd, const Register& rn) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ rev16(rd, rn);
+}
+
+
+void MacroAssembler::Rev32(const Register& rd, const Register& rn) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ rev32(rd, rn);
+}
+
+
+void MacroAssembler::Ror(const Register& rd,
+ const Register& rs,
+ unsigned shift) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ ror(rd, rs, shift);
+}
+
+
+void MacroAssembler::Ror(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ rorv(rd, rn, rm);
+}
+
+
+void MacroAssembler::Sbfiz(const Register& rd,
+ const Register& rn,
+ unsigned lsb,
+ unsigned width) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ sbfiz(rd, rn, lsb, width);
+}
+
+
+void MacroAssembler::Sbfx(const Register& rd,
+ const Register& rn,
+ unsigned lsb,
+ unsigned width) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ sbfx(rd, rn, lsb, width);
+}
+
+
+void MacroAssembler::Scvtf(const FPRegister& fd,
+ const Register& rn,
+ unsigned fbits) {
+ ASSERT(allow_macro_instructions_);
+ scvtf(fd, rn, fbits);
+}
+
+
+void MacroAssembler::Sdiv(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ sdiv(rd, rn, rm);
+}
+
+
+void MacroAssembler::Smaddl(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ smaddl(rd, rn, rm, ra);
+}
+
+
+void MacroAssembler::Smsubl(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ smsubl(rd, rn, rm, ra);
+}
+
+
+void MacroAssembler::Smull(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ smull(rd, rn, rm);
+}
+
+
+void MacroAssembler::Smulh(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ smulh(rd, rn, rm);
+}
+
+
+void MacroAssembler::Stnp(const CPURegister& rt,
+ const CPURegister& rt2,
+ const MemOperand& dst) {
+ ASSERT(allow_macro_instructions_);
+ stnp(rt, rt2, dst);
+}
+
+
+void MacroAssembler::Stp(const CPURegister& rt,
+ const CPURegister& rt2,
+ const MemOperand& dst) {
+ ASSERT(allow_macro_instructions_);
+ stp(rt, rt2, dst);
+}
+
+
+void MacroAssembler::Sxtb(const Register& rd, const Register& rn) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ sxtb(rd, rn);
+}
+
+
+void MacroAssembler::Sxth(const Register& rd, const Register& rn) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ sxth(rd, rn);
+}
+
+
+void MacroAssembler::Sxtw(const Register& rd, const Register& rn) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ sxtw(rd, rn);
+}
+
+
+void MacroAssembler::Tbnz(const Register& rt, unsigned bit_pos, Label* label) {
+ ASSERT(allow_macro_instructions_);
+ tbnz(rt, bit_pos, label);
+}
+
+
+void MacroAssembler::Tbz(const Register& rt, unsigned bit_pos, Label* label) {
+ ASSERT(allow_macro_instructions_);
+ tbz(rt, bit_pos, label);
+}
+
+
+void MacroAssembler::Ubfiz(const Register& rd,
+ const Register& rn,
+ unsigned lsb,
+ unsigned width) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ ubfiz(rd, rn, lsb, width);
+}
+
+
+void MacroAssembler::Ubfx(const Register& rd,
+ const Register& rn,
+ unsigned lsb,
+ unsigned width) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ ubfx(rd, rn, lsb, width);
+}
+
+
+void MacroAssembler::Ucvtf(const FPRegister& fd,
+ const Register& rn,
+ unsigned fbits) {
+ ASSERT(allow_macro_instructions_);
+ ucvtf(fd, rn, fbits);
+}
+
+
+void MacroAssembler::Udiv(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ udiv(rd, rn, rm);
+}
+
+
+void MacroAssembler::Umaddl(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ umaddl(rd, rn, rm, ra);
+}
+
+
+void MacroAssembler::Umsubl(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ umsubl(rd, rn, rm, ra);
+}
+
+
+void MacroAssembler::Unreachable() {
+ ASSERT(allow_macro_instructions_);
+ hlt(kImmExceptionIsUnreachable);
+}
+
+
+void MacroAssembler::Uxtb(const Register& rd, const Register& rn) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ uxtb(rd, rn);
+}
+
+
+void MacroAssembler::Uxth(const Register& rd, const Register& rn) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ uxth(rd, rn);
+}
+
+
+void MacroAssembler::Uxtw(const Register& rd, const Register& rn) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ uxtw(rd, rn);
+}
+
+
+void MacroAssembler::BumpSystemStackPointer(const Operand& space) {
+ ASSERT(!csp.Is(sp_));
+ // TODO(jbramley): Several callers rely on this not using scratch registers,
+ // so we use the assembler directly here. However, this means that large
+ // immediate values of 'space' cannot be handled. Once we merge with V8, we
+ // should try to use the new scope that controls scratch register usage.
+ InstructionAccurateScope scope(this);
+ if ((space.IsImmediate()) && !is_uint12(space.immediate())) {
+ // The subtract instruction supports a 12-bit immediate, shifted left by
+ // zero or 12 bits. So, in two instructions, we can subtract any immediate
+ // between zero and (1 << 24) - 1.
+ int64_t imm = space.immediate();
+ ASSERT(is_uint24(imm));
+
+ int64_t imm_top_12_bits = imm >> 12;
+ sub(csp, StackPointer(), imm_top_12_bits << 12);
+ imm -= imm_top_12_bits << 12;
+ if (imm > 0) {
+ sub(csp, csp, imm);
+ }
+ } else {
+ sub(csp, StackPointer(), space);
+ }
+}
+
+
+void MacroAssembler::InitializeRootRegister() {
+ ExternalReference roots_array_start =
+ ExternalReference::roots_array_start(isolate());
+ Mov(root, Operand(roots_array_start));
+}
+
+
+void MacroAssembler::SmiTag(Register dst, Register src) {
+ ASSERT(dst.Is64Bits() && src.Is64Bits());
+ Lsl(dst, src, kSmiShift);
+}
+
+
+void MacroAssembler::SmiTag(Register smi) { SmiTag(smi, smi); }
+
+
+void MacroAssembler::SmiUntag(Register dst, Register src) {
+ ASSERT(dst.Is64Bits() && src.Is64Bits());
+ if (FLAG_enable_slow_asserts) {
+ AssertSmi(src);
+ }
+ Asr(dst, src, kSmiShift);
+}
+
+
+void MacroAssembler::SmiUntag(Register smi) { SmiUntag(smi, smi); }
+
+
+void MacroAssembler::SmiUntagToDouble(FPRegister dst,
+ Register src,
+ UntagMode mode) {
+ ASSERT(dst.Is64Bits() && src.Is64Bits());
+ if (FLAG_enable_slow_asserts && (mode == kNotSpeculativeUntag)) {
+ AssertSmi(src);
+ }
+ Scvtf(dst, src, kSmiShift);
+}
+
+
+void MacroAssembler::SmiUntagToFloat(FPRegister dst,
+ Register src,
+ UntagMode mode) {
+ ASSERT(dst.Is32Bits() && src.Is64Bits());
+ if (FLAG_enable_slow_asserts && (mode == kNotSpeculativeUntag)) {
+ AssertSmi(src);
+ }
+ Scvtf(dst, src, kSmiShift);
+}
+
+
+void MacroAssembler::JumpIfSmi(Register value,
+ Label* smi_label,
+ Label* not_smi_label) {
+ STATIC_ASSERT((kSmiTagSize == 1) && (kSmiTag == 0));
+ // Check if the tag bit is set.
+ if (smi_label) {
+ Tbz(value, 0, smi_label);
+ if (not_smi_label) {
+ B(not_smi_label);
+ }
+ } else {
+ ASSERT(not_smi_label);
+ Tbnz(value, 0, not_smi_label);
+ }
+}
+
+
+void MacroAssembler::JumpIfNotSmi(Register value, Label* not_smi_label) {
+ JumpIfSmi(value, NULL, not_smi_label);
+}
+
+
+void MacroAssembler::JumpIfBothSmi(Register value1,
+ Register value2,
+ Label* both_smi_label,
+ Label* not_smi_label) {
+ STATIC_ASSERT((kSmiTagSize == 1) && (kSmiTag == 0));
+ // Check if both tag bits are clear.
+ Orr(Tmp0(), value1, value2);
+ JumpIfSmi(Tmp0(), both_smi_label, not_smi_label);
+}
+
+
+void MacroAssembler::JumpIfEitherSmi(Register value1,
+ Register value2,
+ Label* either_smi_label,
+ Label* not_smi_label) {
+ STATIC_ASSERT((kSmiTagSize == 1) && (kSmiTag == 0));
+ // Check if either tag bit is clear.
+ And(Tmp0(), value1, value2);
+ JumpIfSmi(Tmp0(), either_smi_label, not_smi_label);
+}
+
+
+void MacroAssembler::JumpIfEitherNotSmi(Register value1,
+ Register value2,
+ Label* not_smi_label) {
+ JumpIfBothSmi(value1, value2, NULL, not_smi_label);
+}
+
+
+void MacroAssembler::JumpIfBothNotSmi(Register value1,
+ Register value2,
+ Label* not_smi_label) {
+ JumpIfEitherSmi(value1, value2, NULL, not_smi_label);
+}
+
+
+void MacroAssembler::IsObjectNameType(Register object,
+ Register type,
+ Label* fail) {
+ CompareObjectType(object, type, type, LAST_NAME_TYPE);
+ B(hi, fail);
+}
+
+
+void MacroAssembler::IsObjectJSObjectType(Register heap_object,
+ Register map,
+ Register scratch,
+ Label* fail) {
+ Ldr(map, FieldMemOperand(heap_object, HeapObject::kMapOffset));
+ IsInstanceJSObjectType(map, scratch, fail);
+}
+
+
+void MacroAssembler::IsInstanceJSObjectType(Register map,
+ Register scratch,
+ Label* fail) {
+ Ldrb(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
+ // If cmp result is lt, the following ccmp will clear all flags.
+ // Z == 0, N == V implies gt condition.
+ Cmp(scratch, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE);
+ Ccmp(scratch, LAST_NONCALLABLE_SPEC_OBJECT_TYPE, NoFlag, ge);
+
+ // If we didn't get a valid label object just fall through and leave the
+ // flags updated.
+ if (fail != NULL) {
+ B(gt, fail);
+ }
+}
+
+
+void MacroAssembler::IsObjectJSStringType(Register object,
+ Register type,
+ Label* not_string,
+ Label* string) {
+ Ldr(type, FieldMemOperand(object, HeapObject::kMapOffset));
+ Ldrb(type.W(), FieldMemOperand(type, Map::kInstanceTypeOffset));
+
+ STATIC_ASSERT(kStringTag == 0);
+ ASSERT((string != NULL) || (not_string != NULL));
+ if (string == NULL) {
+ TestAndBranchIfAnySet(type.W(), kIsNotStringMask, not_string);
+ } else if (not_string == NULL) {
+ TestAndBranchIfAllClear(type.W(), kIsNotStringMask, string);
+ } else {
+ TestAndBranchIfAnySet(type.W(), kIsNotStringMask, not_string);
+ B(string);
+ }
+}
+
+
+void MacroAssembler::Push(Handle<Object> handle) {
+ Mov(Tmp0(), Operand(handle));
+ Push(Tmp0());
+}
+
+
+void MacroAssembler::Claim(uint64_t count, uint64_t unit_size) {
+ uint64_t size = count * unit_size;
+
+ if (size == 0) {
+ return;
+ }
+
+ if (csp.Is(StackPointer())) {
+ ASSERT(size % 16 == 0);
+ } else {
+ BumpSystemStackPointer(size);
+ }
+
+ Sub(StackPointer(), StackPointer(), size);
+}
+
+
+void MacroAssembler::Claim(const Register& count, uint64_t unit_size) {
+ ASSERT(IsPowerOf2(unit_size));
+
+ if (unit_size == 0) {
+ return;
+ }
+
+ const int shift = CountTrailingZeros(unit_size, kXRegSize);
+ const Operand size(count, LSL, shift);
+
+ if (size.IsZero()) {
+ return;
+ }
+
+ if (!csp.Is(StackPointer())) {
+ BumpSystemStackPointer(size);
+ }
+
+ Sub(StackPointer(), StackPointer(), size);
+}
+
+
+void MacroAssembler::ClaimBySMI(const Register& count_smi, uint64_t unit_size) {
+ ASSERT(IsPowerOf2(unit_size));
+ const int shift = CountTrailingZeros(unit_size, kXRegSize) - kSmiShift;
+ const Operand size(count_smi,
+ (shift >= 0) ? (LSL) : (LSR),
+ (shift >= 0) ? (shift) : (-shift));
+
+ if (size.IsZero()) {
+ return;
+ }
+
+ if (!csp.Is(StackPointer())) {
+ BumpSystemStackPointer(size);
+ }
+
+ Sub(StackPointer(), StackPointer(), size);
+}
+
+
+void MacroAssembler::Drop(uint64_t count, uint64_t unit_size) {
+ uint64_t size = count * unit_size;
+
+ if (size == 0) {
+ return;
+ }
+
+ Add(StackPointer(), StackPointer(), size);
+
+ if (csp.Is(StackPointer())) {
+ ASSERT(size % 16 == 0);
+ } else if (emit_debug_code()) {
+ // It is safe to leave csp where it is when unwinding the JavaScript stack,
+ // but if we keep it matching StackPointer, the simulator can detect memory
+ // accesses in the now-free part of the stack.
+ Mov(csp, StackPointer());
+ }
+}
+
+
+void MacroAssembler::Drop(const Register& count, uint64_t unit_size) {
+ ASSERT(IsPowerOf2(unit_size));
+
+ if (unit_size == 0) {
+ return;
+ }
+
+ const int shift = CountTrailingZeros(unit_size, kXRegSize);
+ const Operand size(count, LSL, shift);
+
+ if (size.IsZero()) {
+ return;
+ }
+
+ Add(StackPointer(), StackPointer(), size);
+
+ if (!csp.Is(StackPointer()) && emit_debug_code()) {
+ // It is safe to leave csp where it is when unwinding the JavaScript stack,
+ // but if we keep it matching StackPointer, the simulator can detect memory
+ // accesses in the now-free part of the stack.
+ Mov(csp, StackPointer());
+ }
+}
+
+
+void MacroAssembler::DropBySMI(const Register& count_smi, uint64_t unit_size) {
+ ASSERT(IsPowerOf2(unit_size));
+ const int shift = CountTrailingZeros(unit_size, kXRegSize) - kSmiShift;
+ const Operand size(count_smi,
+ (shift >= 0) ? (LSL) : (LSR),
+ (shift >= 0) ? (shift) : (-shift));
+
+ if (size.IsZero()) {
+ return;
+ }
+
+ Add(StackPointer(), StackPointer(), size);
+
+ if (!csp.Is(StackPointer()) && emit_debug_code()) {
+ // It is safe to leave csp where it is when unwinding the JavaScript stack,
+ // but if we keep it matching StackPointer, the simulator can detect memory
+ // accesses in the now-free part of the stack.
+ Mov(csp, StackPointer());
+ }
+}
+
+
+void MacroAssembler::CompareAndBranch(const Register& lhs,
+ const Operand& rhs,
+ Condition cond,
+ Label* label) {
+ if (rhs.IsImmediate() && (rhs.immediate() == 0) &&
+ ((cond == eq) || (cond == ne))) {
+ if (cond == eq) {
+ Cbz(lhs, label);
+ } else {
+ Cbnz(lhs, label);
+ }
+ } else {
+ Cmp(lhs, rhs);
+ B(cond, label);
+ }
+}
+
+
+void MacroAssembler::TestAndBranchIfAnySet(const Register& reg,
+ const uint64_t bit_pattern,
+ Label* label) {
+ int bits = reg.SizeInBits();
+ ASSERT(CountSetBits(bit_pattern, bits) > 0);
+ if (CountSetBits(bit_pattern, bits) == 1) {
+ Tbnz(reg, MaskToBit(bit_pattern), label);
+ } else {
+ Tst(reg, bit_pattern);
+ B(ne, label);
+ }
+}
+
+
+void MacroAssembler::TestAndBranchIfAllClear(const Register& reg,
+ const uint64_t bit_pattern,
+ Label* label) {
+ int bits = reg.SizeInBits();
+ ASSERT(CountSetBits(bit_pattern, bits) > 0);
+ if (CountSetBits(bit_pattern, bits) == 1) {
+ Tbz(reg, MaskToBit(bit_pattern), label);
+ } else {
+ Tst(reg, bit_pattern);
+ B(eq, label);
+ }
+}
+
+
+void MacroAssembler::InlineData(uint64_t data) {
+ ASSERT(is_uint16(data));
+ InstructionAccurateScope scope(this, 1);
+ movz(xzr, data);
+}
+
+
+void MacroAssembler::EnableInstrumentation() {
+ InstructionAccurateScope scope(this, 1);
+ movn(xzr, InstrumentStateEnable);
+}
+
+
+void MacroAssembler::DisableInstrumentation() {
+ InstructionAccurateScope scope(this, 1);
+ movn(xzr, InstrumentStateDisable);
+}
+
+
+void MacroAssembler::AnnotateInstrumentation(const char* marker_name) {
+ ASSERT(strlen(marker_name) == 2);
+
+ // We allow only printable characters in the marker names. Unprintable
+ // characters are reserved for controlling features of the instrumentation.
+ ASSERT(isprint(marker_name[0]) && isprint(marker_name[1]));
+
+ InstructionAccurateScope scope(this, 1);
+ movn(xzr, (marker_name[1] << 8) | marker_name[0]);
+}
+
+} } // namespace v8::internal
+
+#endif // V8_A64_MACRO_ASSEMBLER_A64_INL_H_
--- /dev/null
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#if V8_TARGET_ARCH_A64
+
+#include "bootstrapper.h"
+#include "codegen.h"
+#include "cpu-profiler.h"
+#include "debug.h"
+#include "isolate-inl.h"
+#include "runtime.h"
+
+namespace v8 {
+namespace internal {
+
+// Define a fake double underscore to use with the ASM_UNIMPLEMENTED macros.
+#define __
+
+
+MacroAssembler::MacroAssembler(Isolate* arg_isolate,
+ byte * buffer,
+ unsigned buffer_size)
+ : Assembler(arg_isolate, buffer, buffer_size),
+ generating_stub_(false),
+#if DEBUG
+ allow_macro_instructions_(true),
+#endif
+ has_frame_(false),
+ use_real_aborts_(true),
+ sp_(jssp), tmp0_(ip0), tmp1_(ip1), fptmp0_(fp_scratch) {
+ if (isolate() != NULL) {
+ code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
+ isolate());
+ }
+}
+
+
+void MacroAssembler::LogicalMacro(const Register& rd,
+ const Register& rn,
+ const Operand& operand,
+ LogicalOp op) {
+ if (operand.NeedsRelocation()) {
+ LoadRelocated(Tmp0(), operand);
+ Logical(rd, rn, Tmp0(), op);
+
+ } else if (operand.IsImmediate()) {
+ int64_t immediate = operand.immediate();
+ unsigned reg_size = rd.SizeInBits();
+ ASSERT(rd.Is64Bits() || is_uint32(immediate));
+
+ // If the operation is NOT, invert the operation and immediate.
+ if ((op & NOT) == NOT) {
+ op = static_cast<LogicalOp>(op & ~NOT);
+ immediate = ~immediate;
+ if (rd.Is32Bits()) {
+ immediate &= kWRegMask;
+ }
+ }
+
+ // Special cases for all set or all clear immediates.
+ if (immediate == 0) {
+ switch (op) {
+ case AND:
+ Mov(rd, 0);
+ return;
+ case ORR: // Fall through.
+ case EOR:
+ Mov(rd, rn);
+ return;
+ case ANDS: // Fall through.
+ case BICS:
+ break;
+ default:
+ UNREACHABLE();
+ }
+ } else if ((rd.Is64Bits() && (immediate == -1L)) ||
+ (rd.Is32Bits() && (immediate == 0xffffffffL))) {
+ switch (op) {
+ case AND:
+ Mov(rd, rn);
+ return;
+ case ORR:
+ Mov(rd, immediate);
+ return;
+ case EOR:
+ Mvn(rd, rn);
+ return;
+ case ANDS: // Fall through.
+ case BICS:
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+
+ unsigned n, imm_s, imm_r;
+ if (IsImmLogical(immediate, reg_size, &n, &imm_s, &imm_r)) {
+ // Immediate can be encoded in the instruction.
+ LogicalImmediate(rd, rn, n, imm_s, imm_r, op);
+ } else {
+ // Immediate can't be encoded: synthesize using move immediate.
+ Register temp = AppropriateTempFor(rn);
+ Mov(temp, immediate);
+ if (rd.Is(csp)) {
+ // If rd is the stack pointer we cannot use it as the destination
+ // register so we use the temp register as an intermediate again.
+ Logical(temp, rn, temp, op);
+ Mov(csp, temp);
+ } else {
+ Logical(rd, rn, temp, op);
+ }
+ }
+
+ } else if (operand.IsExtendedRegister()) {
+ ASSERT(operand.reg().SizeInBits() <= rd.SizeInBits());
+ // Add/sub extended supports shift <= 4. We want to support exactly the
+ // same modes here.
+ ASSERT(operand.shift_amount() <= 4);
+ ASSERT(operand.reg().Is64Bits() ||
+ ((operand.extend() != UXTX) && (operand.extend() != SXTX)));
+ Register temp = AppropriateTempFor(rn, operand.reg());
+ EmitExtendShift(temp, operand.reg(), operand.extend(),
+ operand.shift_amount());
+ Logical(rd, rn, temp, op);
+
+ } else {
+ // The operand can be encoded in the instruction.
+ ASSERT(operand.IsShiftedRegister());
+ Logical(rd, rn, operand, op);
+ }
+}
+
+
+void MacroAssembler::Mov(const Register& rd, uint64_t imm) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(is_uint32(imm) || is_int32(imm) || rd.Is64Bits());
+ ASSERT(!rd.IsZero());
+
+ // TODO(all) extend to support more immediates.
+ //
+ // Immediates on Aarch64 can be produced using an initial value, and zero to
+ // three move keep operations.
+ //
+ // Initial values can be generated with:
+ // 1. 64-bit move zero (movz).
+ // 2. 32-bit move inverted (movn).
+ // 3. 64-bit move inverted.
+ // 4. 32-bit orr immediate.
+ // 5. 64-bit orr immediate.
+ // Move-keep may then be used to modify each of the 16-bit half-words.
+ //
+ // The code below supports all five initial value generators, and
+ // applying move-keep operations to move-zero and move-inverted initial
+ // values.
+
+ unsigned reg_size = rd.SizeInBits();
+ unsigned n, imm_s, imm_r;
+ if (IsImmMovz(imm, reg_size) && !rd.IsSP()) {
+ // Immediate can be represented in a move zero instruction. Movz can't
+ // write to the stack pointer.
+ movz(rd, imm);
+ } else if (IsImmMovn(imm, reg_size) && !rd.IsSP()) {
+ // Immediate can be represented in a move inverted instruction. Movn can't
+ // write to the stack pointer.
+ movn(rd, rd.Is64Bits() ? ~imm : (~imm & kWRegMask));
+ } else if (IsImmLogical(imm, reg_size, &n, &imm_s, &imm_r)) {
+ // Immediate can be represented in a logical orr instruction.
+ LogicalImmediate(rd, AppropriateZeroRegFor(rd), n, imm_s, imm_r, ORR);
+ } else {
+ // Generic immediate case. Imm will be represented by
+ // [imm3, imm2, imm1, imm0], where each imm is 16 bits.
+ // A move-zero or move-inverted is generated for the first non-zero or
+ // non-0xffff immX, and a move-keep for subsequent non-zero immX.
+
+ uint64_t ignored_halfword = 0;
+ bool invert_move = false;
+ // If the number of 0xffff halfwords is greater than the number of 0x0000
+ // halfwords, it's more efficient to use move-inverted.
+ if (CountClearHalfWords(~imm, reg_size) >
+ CountClearHalfWords(imm, reg_size)) {
+ ignored_halfword = 0xffffL;
+ invert_move = true;
+ }
+
+ // Mov instructions can't move value into the stack pointer, so set up a
+ // temporary register, if needed.
+ Register temp = rd.IsSP() ? AppropriateTempFor(rd) : rd;
+
+ // Iterate through the halfwords. Use movn/movz for the first non-ignored
+ // halfword, and movk for subsequent halfwords.
+ ASSERT((reg_size % 16) == 0);
+ bool first_mov_done = false;
+ for (unsigned i = 0; i < (rd.SizeInBits() / 16); i++) {
+ uint64_t imm16 = (imm >> (16 * i)) & 0xffffL;
+ if (imm16 != ignored_halfword) {
+ if (!first_mov_done) {
+ if (invert_move) {
+ movn(temp, (~imm16) & 0xffffL, 16 * i);
+ } else {
+ movz(temp, imm16, 16 * i);
+ }
+ first_mov_done = true;
+ } else {
+ // Construct a wider constant.
+ movk(temp, imm16, 16 * i);
+ }
+ }
+ }
+ ASSERT(first_mov_done);
+
+ // Move the temporary if the original destination register was the stack
+ // pointer.
+ if (rd.IsSP()) {
+ mov(rd, temp);
+ }
+ }
+}
+
+
+void MacroAssembler::Mov(const Register& rd,
+ const Operand& operand,
+ DiscardMoveMode discard_mode) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ // Provide a swap register for instructions that need to write into the
+ // system stack pointer (and can't do this inherently).
+ Register dst = (rd.Is(csp)) ? (Tmp1()) : (rd);
+
+ if (operand.NeedsRelocation()) {
+ LoadRelocated(dst, operand);
+
+ } else if (operand.IsImmediate()) {
+ // Call the macro assembler for generic immediates.
+ Mov(dst, operand.immediate());
+
+ } else if (operand.IsShiftedRegister() && (operand.shift_amount() != 0)) {
+ // Emit a shift instruction if moving a shifted register. This operation
+ // could also be achieved using an orr instruction (like orn used by Mvn),
+ // but using a shift instruction makes the disassembly clearer.
+ EmitShift(dst, operand.reg(), operand.shift(), operand.shift_amount());
+
+ } else if (operand.IsExtendedRegister()) {
+ // Emit an extend instruction if moving an extended register. This handles
+ // extend with post-shift operations, too.
+ EmitExtendShift(dst, operand.reg(), operand.extend(),
+ operand.shift_amount());
+
+ } else {
+ // Otherwise, emit a register move only if the registers are distinct, or
+ // if they are not X registers.
+ //
+ // Note that mov(w0, w0) is not a no-op because it clears the top word of
+ // x0. A flag is provided (kDiscardForSameWReg) if a move between the same W
+ // registers is not required to clear the top word of the X register. In
+ // this case, the instruction is discarded.
+ //
+ // If csp is an operand, add #0 is emitted, otherwise, orr #0.
+ if (!rd.Is(operand.reg()) || (rd.Is32Bits() &&
+ (discard_mode == kDontDiscardForSameWReg))) {
+ Assembler::mov(rd, operand.reg());
+ }
+ // This case can handle writes into the system stack pointer directly.
+ dst = rd;
+ }
+
+ // Copy the result to the system stack pointer.
+ if (!dst.Is(rd)) {
+ ASSERT(rd.IsZero());
+ ASSERT(dst.Is(Tmp1()));
+ Assembler::mov(rd, dst);
+ }
+}
+
+
+void MacroAssembler::Mvn(const Register& rd, const Operand& operand) {
+ ASSERT(allow_macro_instructions_);
+
+ if (operand.NeedsRelocation()) {
+ LoadRelocated(Tmp0(), operand);
+ Mvn(rd, Tmp0());
+
+ } else if (operand.IsImmediate()) {
+ // Call the macro assembler for generic immediates.
+ Mov(rd, ~operand.immediate());
+
+ } else if (operand.IsExtendedRegister()) {
+ // Emit two instructions for the extend case. This differs from Mov, as
+ // the extend and invert can't be achieved in one instruction.
+ Register temp = AppropriateTempFor(rd, operand.reg());
+ EmitExtendShift(temp, operand.reg(), operand.extend(),
+ operand.shift_amount());
+ mvn(rd, temp);
+
+ } else {
+ // Otherwise, emit a register move only if the registers are distinct.
+ // If the jssp is an operand, add #0 is emitted, otherwise, orr #0.
+ mvn(rd, operand);
+ }
+}
+
+
+unsigned MacroAssembler::CountClearHalfWords(uint64_t imm, unsigned reg_size) {
+ ASSERT((reg_size % 8) == 0);
+ int count = 0;
+ for (unsigned i = 0; i < (reg_size / 16); i++) {
+ if ((imm & 0xffff) == 0) {
+ count++;
+ }
+ imm >>= 16;
+ }
+ return count;
+}
+
+
+// The movz instruction can generate immediates containing an arbitrary 16-bit
+// half-word, with remaining bits clear, eg. 0x00001234, 0x0000123400000000.
+bool MacroAssembler::IsImmMovz(uint64_t imm, unsigned reg_size) {
+ ASSERT((reg_size == kXRegSize) || (reg_size == kWRegSize));
+ return CountClearHalfWords(imm, reg_size) >= ((reg_size / 16) - 1);
+}
+
+
+// The movn instruction can generate immediates containing an arbitrary 16-bit
+// half-word, with remaining bits set, eg. 0xffff1234, 0xffff1234ffffffff.
+bool MacroAssembler::IsImmMovn(uint64_t imm, unsigned reg_size) {
+ return IsImmMovz(~imm, reg_size);
+}
+
+
+void MacroAssembler::ConditionalCompareMacro(const Register& rn,
+ const Operand& operand,
+ StatusFlags nzcv,
+ Condition cond,
+ ConditionalCompareOp op) {
+ ASSERT((cond != al) && (cond != nv));
+ if (operand.NeedsRelocation()) {
+ LoadRelocated(Tmp0(), operand);
+ ConditionalCompareMacro(rn, Tmp0(), nzcv, cond, op);
+
+ } else if ((operand.IsShiftedRegister() && (operand.shift_amount() == 0)) ||
+ (operand.IsImmediate() && IsImmConditionalCompare(operand.immediate()))) {
+ // The immediate can be encoded in the instruction, or the operand is an
+ // unshifted register: call the assembler.
+ ConditionalCompare(rn, operand, nzcv, cond, op);
+
+ } else {
+ // The operand isn't directly supported by the instruction: perform the
+ // operation on a temporary register.
+ Register temp = AppropriateTempFor(rn);
+ Mov(temp, operand);
+ ConditionalCompare(rn, temp, nzcv, cond, op);
+ }
+}
+
+
+void MacroAssembler::Csel(const Register& rd,
+ const Register& rn,
+ const Operand& operand,
+ Condition cond) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ ASSERT((cond != al) && (cond != nv));
+ if (operand.IsImmediate()) {
+ // Immediate argument. Handle special cases of 0, 1 and -1 using zero
+ // register.
+ int64_t imm = operand.immediate();
+ Register zr = AppropriateZeroRegFor(rn);
+ if (imm == 0) {
+ csel(rd, rn, zr, cond);
+ } else if (imm == 1) {
+ csinc(rd, rn, zr, cond);
+ } else if (imm == -1) {
+ csinv(rd, rn, zr, cond);
+ } else {
+ Register temp = AppropriateTempFor(rn);
+ Mov(temp, operand.immediate());
+ csel(rd, rn, temp, cond);
+ }
+ } else if (operand.IsShiftedRegister() && (operand.shift_amount() == 0)) {
+ // Unshifted register argument.
+ csel(rd, rn, operand.reg(), cond);
+ } else {
+ // All other arguments.
+ Register temp = AppropriateTempFor(rn);
+ Mov(temp, operand);
+ csel(rd, rn, temp, cond);
+ }
+}
+
+
+void MacroAssembler::AddSubMacro(const Register& rd,
+ const Register& rn,
+ const Operand& operand,
+ FlagsUpdate S,
+ AddSubOp op) {
+ if (operand.IsZero() && rd.Is(rn) && rd.Is64Bits() && rn.Is64Bits() &&
+ !operand.NeedsRelocation() && (S == LeaveFlags)) {
+ // The instruction would be a nop. Avoid generating useless code.
+ return;
+ }
+
+ if (operand.NeedsRelocation()) {
+ LoadRelocated(Tmp0(), operand);
+ AddSubMacro(rd, rn, Tmp0(), S, op);
+ } else if ((operand.IsImmediate() && !IsImmAddSub(operand.immediate())) ||
+ (rn.IsZero() && !operand.IsShiftedRegister()) ||
+ (operand.IsShiftedRegister() && (operand.shift() == ROR))) {
+ Register temp = AppropriateTempFor(rn);
+ Mov(temp, operand);
+ AddSub(rd, rn, temp, S, op);
+ } else {
+ AddSub(rd, rn, operand, S, op);
+ }
+}
+
+
+void MacroAssembler::AddSubWithCarryMacro(const Register& rd,
+ const Register& rn,
+ const Operand& operand,
+ FlagsUpdate S,
+ AddSubWithCarryOp op) {
+ ASSERT(rd.SizeInBits() == rn.SizeInBits());
+
+ if (operand.NeedsRelocation()) {
+ LoadRelocated(Tmp0(), operand);
+ AddSubWithCarryMacro(rd, rn, Tmp0(), S, op);
+
+ } else if (operand.IsImmediate() ||
+ (operand.IsShiftedRegister() && (operand.shift() == ROR))) {
+ // Add/sub with carry (immediate or ROR shifted register.)
+ Register temp = AppropriateTempFor(rn);
+ Mov(temp, operand);
+ AddSubWithCarry(rd, rn, temp, S, op);
+ } else if (operand.IsShiftedRegister() && (operand.shift_amount() != 0)) {
+ // Add/sub with carry (shifted register).
+ ASSERT(operand.reg().SizeInBits() == rd.SizeInBits());
+ ASSERT(operand.shift() != ROR);
+ ASSERT(is_uintn(operand.shift_amount(),
+ rd.SizeInBits() == kXRegSize ? kXRegSizeLog2 : kWRegSizeLog2));
+ Register temp = AppropriateTempFor(rn, operand.reg());
+ EmitShift(temp, operand.reg(), operand.shift(), operand.shift_amount());
+ AddSubWithCarry(rd, rn, temp, S, op);
+
+ } else if (operand.IsExtendedRegister()) {
+ // Add/sub with carry (extended register).
+ ASSERT(operand.reg().SizeInBits() <= rd.SizeInBits());
+ // Add/sub extended supports a shift <= 4. We want to support exactly the
+ // same modes.
+ ASSERT(operand.shift_amount() <= 4);
+ ASSERT(operand.reg().Is64Bits() ||
+ ((operand.extend() != UXTX) && (operand.extend() != SXTX)));
+ Register temp = AppropriateTempFor(rn, operand.reg());
+ EmitExtendShift(temp, operand.reg(), operand.extend(),
+ operand.shift_amount());
+ AddSubWithCarry(rd, rn, temp, S, op);
+
+ } else {
+ // The addressing mode is directly supported by the instruction.
+ AddSubWithCarry(rd, rn, operand, S, op);
+ }
+}
+
+
+void MacroAssembler::LoadStoreMacro(const CPURegister& rt,
+ const MemOperand& addr,
+ LoadStoreOp op) {
+ int64_t offset = addr.offset();
+ LSDataSize size = CalcLSDataSize(op);
+
+ // Check if an immediate offset fits in the immediate field of the
+ // appropriate instruction. If not, emit two instructions to perform
+ // the operation.
+ if (addr.IsImmediateOffset() && !IsImmLSScaled(offset, size) &&
+ !IsImmLSUnscaled(offset)) {
+ // Immediate offset that can't be encoded using unsigned or unscaled
+ // addressing modes.
+ Register temp = AppropriateTempFor(addr.base());
+ Mov(temp, addr.offset());
+ LoadStore(rt, MemOperand(addr.base(), temp), op);
+ } else if (addr.IsPostIndex() && !IsImmLSUnscaled(offset)) {
+ // Post-index beyond unscaled addressing range.
+ LoadStore(rt, MemOperand(addr.base()), op);
+ add(addr.base(), addr.base(), offset);
+ } else if (addr.IsPreIndex() && !IsImmLSUnscaled(offset)) {
+ // Pre-index beyond unscaled addressing range.
+ add(addr.base(), addr.base(), offset);
+ LoadStore(rt, MemOperand(addr.base()), op);
+ } else {
+ // Encodable in one load/store instruction.
+ LoadStore(rt, addr, op);
+ }
+}
+
+
+void MacroAssembler::Load(const Register& rt,
+ const MemOperand& addr,
+ Representation r) {
+ ASSERT(!r.IsDouble());
+
+ if (r.IsInteger8()) {
+ Ldrsb(rt, addr);
+ } else if (r.IsUInteger8()) {
+ Ldrb(rt, addr);
+ } else if (r.IsInteger16()) {
+ Ldrsh(rt, addr);
+ } else if (r.IsUInteger16()) {
+ Ldrh(rt, addr);
+ } else if (r.IsInteger32()) {
+ Ldr(rt.W(), addr);
+ } else {
+ ASSERT(rt.Is64Bits());
+ Ldr(rt, addr);
+ }
+}
+
+
+void MacroAssembler::Store(const Register& rt,
+ const MemOperand& addr,
+ Representation r) {
+ ASSERT(!r.IsDouble());
+
+ if (r.IsInteger8() || r.IsUInteger8()) {
+ Strb(rt, addr);
+ } else if (r.IsInteger16() || r.IsUInteger16()) {
+ Strh(rt, addr);
+ } else if (r.IsInteger32()) {
+ Str(rt.W(), addr);
+ } else {
+ ASSERT(rt.Is64Bits());
+ Str(rt, addr);
+ }
+}
+
+
+// Pseudo-instructions.
+
+
+void MacroAssembler::Abs(const Register& rd, const Register& rm,
+ Label* is_not_representable,
+ Label* is_representable) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(AreSameSizeAndType(rd, rm));
+
+ Cmp(rm, 1);
+ Cneg(rd, rm, lt);
+
+ // If the comparison sets the v flag, the input was the smallest value
+ // representable by rm, and the mathematical result of abs(rm) is not
+ // representable using two's complement.
+ if ((is_not_representable != NULL) && (is_representable != NULL)) {
+ B(is_not_representable, vs);
+ B(is_representable);
+ } else if (is_not_representable != NULL) {
+ B(is_not_representable, vs);
+ } else if (is_representable != NULL) {
+ B(is_representable, vc);
+ }
+}
+
+
+// Abstracted stack operations.
+
+
+void MacroAssembler::Push(const CPURegister& src0, const CPURegister& src1,
+ const CPURegister& src2, const CPURegister& src3) {
+ ASSERT(AreSameSizeAndType(src0, src1, src2, src3));
+ ASSERT(src0.IsValid());
+
+ int count = 1 + src1.IsValid() + src2.IsValid() + src3.IsValid();
+ int size = src0.SizeInBytes();
+
+ PrepareForPush(count, size);
+ PushHelper(count, size, src0, src1, src2, src3);
+}
+
+
+void MacroAssembler::Pop(const CPURegister& dst0, const CPURegister& dst1,
+ const CPURegister& dst2, const CPURegister& dst3) {
+ // It is not valid to pop into the same register more than once in one
+ // instruction, not even into the zero register.
+ ASSERT(!AreAliased(dst0, dst1, dst2, dst3));
+ ASSERT(AreSameSizeAndType(dst0, dst1, dst2, dst3));
+ ASSERT(dst0.IsValid());
+
+ int count = 1 + dst1.IsValid() + dst2.IsValid() + dst3.IsValid();
+ int size = dst0.SizeInBytes();
+
+ PrepareForPop(count, size);
+ PopHelper(count, size, dst0, dst1, dst2, dst3);
+
+ if (!csp.Is(StackPointer()) && emit_debug_code()) {
+ // It is safe to leave csp where it is when unwinding the JavaScript stack,
+ // but if we keep it matching StackPointer, the simulator can detect memory
+ // accesses in the now-free part of the stack.
+ Mov(csp, StackPointer());
+ }
+}
+
+
+void MacroAssembler::PushCPURegList(CPURegList registers) {
+ int size = registers.RegisterSizeInBytes();
+
+ PrepareForPush(registers.Count(), size);
+ // Push up to four registers at a time because if the current stack pointer is
+ // csp and reg_size is 32, registers must be pushed in blocks of four in order
+ // to maintain the 16-byte alignment for csp.
+ while (!registers.IsEmpty()) {
+ int count_before = registers.Count();
+ const CPURegister& src0 = registers.PopHighestIndex();
+ const CPURegister& src1 = registers.PopHighestIndex();
+ const CPURegister& src2 = registers.PopHighestIndex();
+ const CPURegister& src3 = registers.PopHighestIndex();
+ int count = count_before - registers.Count();
+ PushHelper(count, size, src0, src1, src2, src3);
+ }
+}
+
+
+void MacroAssembler::PopCPURegList(CPURegList registers) {
+ int size = registers.RegisterSizeInBytes();
+
+ PrepareForPop(registers.Count(), size);
+ // Pop up to four registers at a time because if the current stack pointer is
+ // csp and reg_size is 32, registers must be pushed in blocks of four in
+ // order to maintain the 16-byte alignment for csp.
+ while (!registers.IsEmpty()) {
+ int count_before = registers.Count();
+ const CPURegister& dst0 = registers.PopLowestIndex();
+ const CPURegister& dst1 = registers.PopLowestIndex();
+ const CPURegister& dst2 = registers.PopLowestIndex();
+ const CPURegister& dst3 = registers.PopLowestIndex();
+ int count = count_before - registers.Count();
+ PopHelper(count, size, dst0, dst1, dst2, dst3);
+ }
+
+ if (!csp.Is(StackPointer()) && emit_debug_code()) {
+ // It is safe to leave csp where it is when unwinding the JavaScript stack,
+ // but if we keep it matching StackPointer, the simulator can detect memory
+ // accesses in the now-free part of the stack.
+ Mov(csp, StackPointer());
+ }
+}
+
+
+void MacroAssembler::PushMultipleTimes(int count, Register src) {
+ int size = src.SizeInBytes();
+
+ PrepareForPush(count, size);
+
+ if (FLAG_optimize_for_size && count > 8) {
+ Label loop;
+ __ Mov(Tmp0(), count / 2);
+ __ Bind(&loop);
+ PushHelper(2, size, src, src, NoReg, NoReg);
+ __ Subs(Tmp0(), Tmp0(), 1);
+ __ B(ne, &loop);
+
+ count %= 2;
+ }
+
+ // Push up to four registers at a time if possible because if the current
+ // stack pointer is csp and the register size is 32, registers must be pushed
+ // in blocks of four in order to maintain the 16-byte alignment for csp.
+ while (count >= 4) {
+ PushHelper(4, size, src, src, src, src);
+ count -= 4;
+ }
+ if (count >= 2) {
+ PushHelper(2, size, src, src, NoReg, NoReg);
+ count -= 2;
+ }
+ if (count == 1) {
+ PushHelper(1, size, src, NoReg, NoReg, NoReg);
+ count -= 1;
+ }
+ ASSERT(count == 0);
+}
+
+
+void MacroAssembler::PushHelper(int count, int size,
+ const CPURegister& src0,
+ const CPURegister& src1,
+ const CPURegister& src2,
+ const CPURegister& src3) {
+ // Ensure that we don't unintentially modify scratch or debug registers.
+ InstructionAccurateScope scope(this);
+
+ ASSERT(AreSameSizeAndType(src0, src1, src2, src3));
+ ASSERT(size == src0.SizeInBytes());
+
+ // When pushing multiple registers, the store order is chosen such that
+ // Push(a, b) is equivalent to Push(a) followed by Push(b).
+ switch (count) {
+ case 1:
+ ASSERT(src1.IsNone() && src2.IsNone() && src3.IsNone());
+ str(src0, MemOperand(StackPointer(), -1 * size, PreIndex));
+ break;
+ case 2:
+ ASSERT(src2.IsNone() && src3.IsNone());
+ stp(src1, src0, MemOperand(StackPointer(), -2 * size, PreIndex));
+ break;
+ case 3:
+ ASSERT(src3.IsNone());
+ stp(src2, src1, MemOperand(StackPointer(), -3 * size, PreIndex));
+ str(src0, MemOperand(StackPointer(), 2 * size));
+ break;
+ case 4:
+ // Skip over 4 * size, then fill in the gap. This allows four W registers
+ // to be pushed using csp, whilst maintaining 16-byte alignment for csp
+ // at all times.
+ stp(src3, src2, MemOperand(StackPointer(), -4 * size, PreIndex));
+ stp(src1, src0, MemOperand(StackPointer(), 2 * size));
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+void MacroAssembler::PopHelper(int count, int size,
+ const CPURegister& dst0,
+ const CPURegister& dst1,
+ const CPURegister& dst2,
+ const CPURegister& dst3) {
+ // Ensure that we don't unintentially modify scratch or debug registers.
+ InstructionAccurateScope scope(this);
+
+ ASSERT(AreSameSizeAndType(dst0, dst1, dst2, dst3));
+ ASSERT(size == dst0.SizeInBytes());
+
+ // When popping multiple registers, the load order is chosen such that
+ // Pop(a, b) is equivalent to Pop(a) followed by Pop(b).
+ switch (count) {
+ case 1:
+ ASSERT(dst1.IsNone() && dst2.IsNone() && dst3.IsNone());
+ ldr(dst0, MemOperand(StackPointer(), 1 * size, PostIndex));
+ break;
+ case 2:
+ ASSERT(dst2.IsNone() && dst3.IsNone());
+ ldp(dst0, dst1, MemOperand(StackPointer(), 2 * size, PostIndex));
+ break;
+ case 3:
+ ASSERT(dst3.IsNone());
+ ldr(dst2, MemOperand(StackPointer(), 2 * size));
+ ldp(dst0, dst1, MemOperand(StackPointer(), 3 * size, PostIndex));
+ break;
+ case 4:
+ // Load the higher addresses first, then load the lower addresses and
+ // skip the whole block in the second instruction. This allows four W
+ // registers to be popped using csp, whilst maintaining 16-byte alignment
+ // for csp at all times.
+ ldp(dst2, dst3, MemOperand(StackPointer(), 2 * size));
+ ldp(dst0, dst1, MemOperand(StackPointer(), 4 * size, PostIndex));
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+void MacroAssembler::PrepareForPush(int count, int size) {
+ // TODO(jbramley): Use AssertStackConsistency here, if possible. See the
+ // AssertStackConsistency for details of why we can't at the moment.
+ if (csp.Is(StackPointer())) {
+ // If the current stack pointer is csp, then it must be aligned to 16 bytes
+ // on entry and the total size of the specified registers must also be a
+ // multiple of 16 bytes.
+ ASSERT((count * size) % 16 == 0);
+ } else {
+ // Even if the current stack pointer is not the system stack pointer (csp),
+ // the system stack pointer will still be modified in order to comply with
+ // ABI rules about accessing memory below the system stack pointer.
+ BumpSystemStackPointer(count * size);
+ }
+}
+
+
+void MacroAssembler::PrepareForPop(int count, int size) {
+ AssertStackConsistency();
+ if (csp.Is(StackPointer())) {
+ // If the current stack pointer is csp, then it must be aligned to 16 bytes
+ // on entry and the total size of the specified registers must also be a
+ // multiple of 16 bytes.
+ ASSERT((count * size) % 16 == 0);
+ }
+}
+
+
+void MacroAssembler::Poke(const CPURegister& src, const Operand& offset) {
+ if (offset.IsImmediate()) {
+ ASSERT(offset.immediate() >= 0);
+ } else if (emit_debug_code()) {
+ Cmp(xzr, offset);
+ Check(le, kStackAccessBelowStackPointer);
+ }
+
+ Str(src, MemOperand(StackPointer(), offset));
+}
+
+
+void MacroAssembler::Peek(const CPURegister& dst, const Operand& offset) {
+ if (offset.IsImmediate()) {
+ ASSERT(offset.immediate() >= 0);
+ } else if (emit_debug_code()) {
+ Cmp(xzr, offset);
+ Check(le, kStackAccessBelowStackPointer);
+ }
+
+ Ldr(dst, MemOperand(StackPointer(), offset));
+}
+
+
+void MacroAssembler::PokePair(const CPURegister& src1,
+ const CPURegister& src2,
+ int offset) {
+ ASSERT(AreSameSizeAndType(src1, src2));
+ ASSERT((offset >= 0) && ((offset % src1.SizeInBytes()) == 0));
+ Stp(src1, src2, MemOperand(StackPointer(), offset));
+}
+
+
+void MacroAssembler::PeekPair(const CPURegister& dst1,
+ const CPURegister& dst2,
+ int offset) {
+ ASSERT(AreSameSizeAndType(dst1, dst2));
+ ASSERT((offset >= 0) && ((offset % dst1.SizeInBytes()) == 0));
+ Ldp(dst1, dst2, MemOperand(StackPointer(), offset));
+}
+
+
+void MacroAssembler::PushCalleeSavedRegisters() {
+ // Ensure that the macro-assembler doesn't use any scratch registers.
+ InstructionAccurateScope scope(this);
+
+ // This method must not be called unless the current stack pointer is the
+ // system stack pointer (csp).
+ ASSERT(csp.Is(StackPointer()));
+
+ MemOperand tos(csp, -2 * kXRegSizeInBytes, PreIndex);
+
+ stp(d14, d15, tos);
+ stp(d12, d13, tos);
+ stp(d10, d11, tos);
+ stp(d8, d9, tos);
+
+ stp(x29, x30, tos);
+ stp(x27, x28, tos); // x28 = jssp
+ stp(x25, x26, tos);
+ stp(x23, x24, tos);
+ stp(x21, x22, tos);
+ stp(x19, x20, tos);
+}
+
+
+void MacroAssembler::PopCalleeSavedRegisters() {
+ // Ensure that the macro-assembler doesn't use any scratch registers.
+ InstructionAccurateScope scope(this);
+
+ // This method must not be called unless the current stack pointer is the
+ // system stack pointer (csp).
+ ASSERT(csp.Is(StackPointer()));
+
+ MemOperand tos(csp, 2 * kXRegSizeInBytes, PostIndex);
+
+ ldp(x19, x20, tos);
+ ldp(x21, x22, tos);
+ ldp(x23, x24, tos);
+ ldp(x25, x26, tos);
+ ldp(x27, x28, tos); // x28 = jssp
+ ldp(x29, x30, tos);
+
+ ldp(d8, d9, tos);
+ ldp(d10, d11, tos);
+ ldp(d12, d13, tos);
+ ldp(d14, d15, tos);
+}
+
+
+void MacroAssembler::AssertStackConsistency() {
+ if (emit_debug_code() && !csp.Is(StackPointer())) {
+ if (csp.Is(StackPointer())) {
+ // TODO(jbramley): Check for csp alignment if it is the stack pointer.
+ } else {
+ // TODO(jbramley): Currently we cannot use this assertion in Push because
+ // some calling code assumes that the flags are preserved. For an example,
+ // look at Builtins::Generate_ArgumentsAdaptorTrampoline.
+ Cmp(csp, StackPointer());
+ Check(ls, kTheCurrentStackPointerIsBelowCsp);
+ }
+ }
+}
+
+
+void MacroAssembler::LoadRoot(Register destination,
+ Heap::RootListIndex index) {
+ // TODO(jbramley): Most root values are constants, and can be synthesized
+ // without a load. Refer to the ARM back end for details.
+ Ldr(destination, MemOperand(root, index << kPointerSizeLog2));
+}
+
+
+void MacroAssembler::StoreRoot(Register source,
+ Heap::RootListIndex index) {
+ Str(source, MemOperand(root, index << kPointerSizeLog2));
+}
+
+
+void MacroAssembler::LoadTrueFalseRoots(Register true_root,
+ Register false_root) {
+ STATIC_ASSERT((Heap::kTrueValueRootIndex + 1) == Heap::kFalseValueRootIndex);
+ Ldp(true_root, false_root,
+ MemOperand(root, Heap::kTrueValueRootIndex << kPointerSizeLog2));
+}
+
+
+void MacroAssembler::LoadHeapObject(Register result,
+ Handle<HeapObject> object) {
+ AllowDeferredHandleDereference using_raw_address;
+ if (isolate()->heap()->InNewSpace(*object)) {
+ Handle<Cell> cell = isolate()->factory()->NewCell(object);
+ Mov(result, Operand(cell));
+ Ldr(result, FieldMemOperand(result, Cell::kValueOffset));
+ } else {
+ Mov(result, Operand(object));
+ }
+}
+
+
+void MacroAssembler::LoadInstanceDescriptors(Register map,
+ Register descriptors) {
+ Ldr(descriptors, FieldMemOperand(map, Map::kDescriptorsOffset));
+}
+
+
+void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) {
+ Ldr(dst, FieldMemOperand(map, Map::kBitField3Offset));
+ DecodeField<Map::NumberOfOwnDescriptorsBits>(dst);
+}
+
+
+void MacroAssembler::EnumLengthUntagged(Register dst, Register map) {
+ STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
+ Ldrsw(dst, UntagSmiFieldMemOperand(map, Map::kBitField3Offset));
+ And(dst, dst, Map::EnumLengthBits::kMask);
+}
+
+
+void MacroAssembler::EnumLengthSmi(Register dst, Register map) {
+ STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
+ Ldr(dst, FieldMemOperand(map, Map::kBitField3Offset));
+ And(dst, dst, Operand(Smi::FromInt(Map::EnumLengthBits::kMask)));
+}
+
+
+void MacroAssembler::CheckEnumCache(Register object,
+ Register null_value,
+ Register scratch0,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* call_runtime) {
+ ASSERT(!AreAliased(object, null_value, scratch0, scratch1, scratch2,
+ scratch3));
+
+ Register empty_fixed_array_value = scratch0;
+ Register current_object = scratch1;
+
+ LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
+ Label next, start;
+
+ Mov(current_object, object);
+
+ // Check if the enum length field is properly initialized, indicating that
+ // there is an enum cache.
+ Register map = scratch2;
+ Register enum_length = scratch3;
+ Ldr(map, FieldMemOperand(current_object, HeapObject::kMapOffset));
+
+ EnumLengthUntagged(enum_length, map);
+ Cmp(enum_length, kInvalidEnumCacheSentinel);
+ B(eq, call_runtime);
+
+ B(&start);
+
+ Bind(&next);
+ Ldr(map, FieldMemOperand(current_object, HeapObject::kMapOffset));
+
+ // For all objects but the receiver, check that the cache is empty.
+ EnumLengthUntagged(enum_length, map);
+ Cbnz(enum_length, call_runtime);
+
+ Bind(&start);
+
+ // Check that there are no elements. Register current_object contains the
+ // current JS object we've reached through the prototype chain.
+ Label no_elements;
+ Ldr(current_object, FieldMemOperand(current_object,
+ JSObject::kElementsOffset));
+ Cmp(current_object, empty_fixed_array_value);
+ B(eq, &no_elements);
+
+ // Second chance, the object may be using the empty slow element dictionary.
+ CompareRoot(current_object, Heap::kEmptySlowElementDictionaryRootIndex);
+ B(ne, call_runtime);
+
+ Bind(&no_elements);
+ Ldr(current_object, FieldMemOperand(map, Map::kPrototypeOffset));
+ Cmp(current_object, null_value);
+ B(ne, &next);
+}
+
+
+void MacroAssembler::TestJSArrayForAllocationMemento(Register receiver,
+ Register scratch1,
+ Register scratch2,
+ Label* no_memento_found) {
+ ExternalReference new_space_start =
+ ExternalReference::new_space_start(isolate());
+ ExternalReference new_space_allocation_top =
+ ExternalReference::new_space_allocation_top_address(isolate());
+
+ Add(scratch1, receiver,
+ JSArray::kSize + AllocationMemento::kSize - kHeapObjectTag);
+ Cmp(scratch1, Operand(new_space_start));
+ B(lt, no_memento_found);
+
+ Mov(scratch2, Operand(new_space_allocation_top));
+ Ldr(scratch2, MemOperand(scratch2));
+ Cmp(scratch1, scratch2);
+ B(gt, no_memento_found);
+
+ Ldr(scratch1, MemOperand(scratch1, -AllocationMemento::kSize));
+ Cmp(scratch1,
+ Operand(isolate()->factory()->allocation_memento_map()));
+}
+
+
+void MacroAssembler::JumpToHandlerEntry(Register exception,
+ Register object,
+ Register state,
+ Register scratch1,
+ Register scratch2) {
+ // Handler expects argument in x0.
+ ASSERT(exception.Is(x0));
+
+ // Compute the handler entry address and jump to it. The handler table is
+ // a fixed array of (smi-tagged) code offsets.
+ Ldr(scratch1, FieldMemOperand(object, Code::kHandlerTableOffset));
+ Add(scratch1, scratch1, FixedArray::kHeaderSize - kHeapObjectTag);
+ STATIC_ASSERT(StackHandler::kKindWidth < kPointerSizeLog2);
+ Lsr(scratch2, state, StackHandler::kKindWidth);
+ Ldr(scratch2, MemOperand(scratch1, scratch2, LSL, kPointerSizeLog2));
+ Add(scratch1, object, Code::kHeaderSize - kHeapObjectTag);
+ Add(scratch1, scratch1, Operand::UntagSmi(scratch2));
+ Br(scratch1);
+}
+
+
+void MacroAssembler::InNewSpace(Register object,
+ Condition cond,
+ Label* branch) {
+ ASSERT(cond == eq || cond == ne);
+ // Use Tmp1() to have a different destination register, as Tmp0() will be used
+ // for relocation.
+ And(Tmp1(), object, Operand(ExternalReference::new_space_mask(isolate())));
+ Cmp(Tmp1(), Operand(ExternalReference::new_space_start(isolate())));
+ B(cond, branch);
+}
+
+
+void MacroAssembler::Throw(Register value,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Register scratch4) {
+ // Adjust this code if not the case.
+ STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
+ STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
+
+ // The handler expects the exception in x0.
+ ASSERT(value.Is(x0));
+
+ // Drop the stack pointer to the top of the top handler.
+ ASSERT(jssp.Is(StackPointer()));
+ Mov(scratch1, Operand(ExternalReference(Isolate::kHandlerAddress,
+ isolate())));
+ Ldr(jssp, MemOperand(scratch1));
+ // Restore the next handler.
+ Pop(scratch2);
+ Str(scratch2, MemOperand(scratch1));
+
+ // Get the code object and state. Restore the context and frame pointer.
+ Register object = scratch1;
+ Register state = scratch2;
+ Pop(object, state, cp, fp);
+
+ // If the handler is a JS frame, restore the context to the frame.
+ // (kind == ENTRY) == (fp == 0) == (cp == 0), so we could test either fp
+ // or cp.
+ Label not_js_frame;
+ Cbz(cp, ¬_js_frame);
+ Str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ Bind(¬_js_frame);
+
+ JumpToHandlerEntry(value, object, state, scratch3, scratch4);
+}
+
+
+void MacroAssembler::ThrowUncatchable(Register value,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Register scratch4) {
+ // Adjust this code if not the case.
+ STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
+
+ // The handler expects the exception in x0.
+ ASSERT(value.Is(x0));
+
+ // Drop the stack pointer to the top of the top stack handler.
+ ASSERT(jssp.Is(StackPointer()));
+ Mov(scratch1, Operand(ExternalReference(Isolate::kHandlerAddress,
+ isolate())));
+ Ldr(jssp, MemOperand(scratch1));
+
+ // Unwind the handlers until the ENTRY handler is found.
+ Label fetch_next, check_kind;
+ B(&check_kind);
+ Bind(&fetch_next);
+ Peek(jssp, StackHandlerConstants::kNextOffset);
+
+ Bind(&check_kind);
+ STATIC_ASSERT(StackHandler::JS_ENTRY == 0);
+ Peek(scratch2, StackHandlerConstants::kStateOffset);
+ TestAndBranchIfAnySet(scratch2, StackHandler::KindField::kMask, &fetch_next);
+
+ // Set the top handler address to next handler past the top ENTRY handler.
+ Pop(scratch2);
+ Str(scratch2, MemOperand(scratch1));
+
+ // Get the code object and state. Clear the context and frame pointer (0 was
+ // saved in the handler).
+ Register object = scratch1;
+ Register state = scratch2;
+ Pop(object, state, cp, fp);
+
+ JumpToHandlerEntry(value, object, state, scratch3, scratch4);
+}
+
+
+void MacroAssembler::Throw(BailoutReason reason) {
+ Label throw_start;
+ Bind(&throw_start);
+#ifdef DEBUG
+ const char* msg = GetBailoutReason(reason);
+ RecordComment("Throw message: ");
+ RecordComment((msg != NULL) ? msg : "UNKNOWN");
+#endif
+
+ Mov(x0, Operand(Smi::FromInt(reason)));
+ Push(x0);
+
+ // Disable stub call restrictions to always allow calls to throw.
+ if (!has_frame_) {
+ // We don't actually want to generate a pile of code for this, so just
+ // claim there is a stack frame, without generating one.
+ FrameScope scope(this, StackFrame::NONE);
+ CallRuntime(Runtime::kThrowMessage, 1);
+ } else {
+ CallRuntime(Runtime::kThrowMessage, 1);
+ }
+ // ThrowMessage should not return here.
+ Unreachable();
+}
+
+
+void MacroAssembler::ThrowIf(Condition cc, BailoutReason reason) {
+ Label ok;
+ B(InvertCondition(cc), &ok);
+ Throw(reason);
+ Bind(&ok);
+}
+
+
+void MacroAssembler::ThrowIfSmi(const Register& value, BailoutReason reason) {
+ Label ok;
+ JumpIfNotSmi(value, &ok);
+ Throw(reason);
+ Bind(&ok);
+}
+
+
+void MacroAssembler::SmiAbs(const Register& smi, Label* slow) {
+ ASSERT(smi.Is64Bits());
+ Abs(smi, smi, slow);
+}
+
+
+void MacroAssembler::AssertSmi(Register object, BailoutReason reason) {
+ if (emit_debug_code()) {
+ STATIC_ASSERT(kSmiTag == 0);
+ Tst(object, kSmiTagMask);
+ Check(eq, reason);
+ }
+}
+
+
+void MacroAssembler::AssertNotSmi(Register object, BailoutReason reason) {
+ if (emit_debug_code()) {
+ STATIC_ASSERT(kSmiTag == 0);
+ Tst(object, kSmiTagMask);
+ Check(ne, reason);
+ }
+}
+
+
+void MacroAssembler::AssertName(Register object) {
+ if (emit_debug_code()) {
+ STATIC_ASSERT(kSmiTag == 0);
+ // TODO(jbramley): Add AbortIfSmi and related functions.
+ Label not_smi;
+ JumpIfNotSmi(object, ¬_smi);
+ Abort(kOperandIsASmiAndNotAName);
+ Bind(¬_smi);
+
+ Ldr(Tmp1(), FieldMemOperand(object, HeapObject::kMapOffset));
+ CompareInstanceType(Tmp1(), Tmp1(), LAST_NAME_TYPE);
+ Check(ls, kOperandIsNotAName);
+ }
+}
+
+
+void MacroAssembler::AssertString(Register object) {
+ if (emit_debug_code()) {
+ Register temp = Tmp1();
+ STATIC_ASSERT(kSmiTag == 0);
+ Tst(object, kSmiTagMask);
+ Check(ne, kOperandIsASmiAndNotAString);
+ Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
+ CompareInstanceType(temp, temp, FIRST_NONSTRING_TYPE);
+ Check(lo, kOperandIsNotAString);
+ }
+}
+
+
+void MacroAssembler::CallStub(CodeStub* stub, TypeFeedbackId ast_id) {
+ ASSERT(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
+ Call(stub->GetCode(isolate()), RelocInfo::CODE_TARGET, ast_id);
+}
+
+
+void MacroAssembler::TailCallStub(CodeStub* stub) {
+ Jump(stub->GetCode(isolate()), RelocInfo::CODE_TARGET);
+}
+
+
+void MacroAssembler::CallRuntime(const Runtime::Function* f,
+ int num_arguments,
+ SaveFPRegsMode save_doubles) {
+ // All arguments must be on the stack before this function is called.
+ // x0 holds the return value after the call.
+
+ // Check that the number of arguments matches what the function expects.
+ // If f->nargs is -1, the function can accept a variable number of arguments.
+ if (f->nargs >= 0 && f->nargs != num_arguments) {
+ // Illegal operation: drop the stack arguments and return undefined.
+ if (num_arguments > 0) {
+ Drop(num_arguments);
+ }
+ LoadRoot(x0, Heap::kUndefinedValueRootIndex);
+ return;
+ }
+
+ // Place the necessary arguments.
+ Mov(x0, num_arguments);
+ Mov(x1, Operand(ExternalReference(f, isolate())));
+
+ CEntryStub stub(1, save_doubles);
+ CallStub(&stub);
+}
+
+
+static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
+ return ref0.address() - ref1.address();
+}
+
+
+void MacroAssembler::CallApiFunctionAndReturn(
+ Register function_address,
+ ExternalReference thunk_ref,
+ int stack_space,
+ int spill_offset,
+ MemOperand return_value_operand,
+ MemOperand* context_restore_operand) {
+ ASM_LOCATION("CallApiFunctionAndReturn");
+ ExternalReference next_address =
+ ExternalReference::handle_scope_next_address(isolate());
+ const int kNextOffset = 0;
+ const int kLimitOffset = AddressOffset(
+ ExternalReference::handle_scope_limit_address(isolate()),
+ next_address);
+ const int kLevelOffset = AddressOffset(
+ ExternalReference::handle_scope_level_address(isolate()),
+ next_address);
+
+ ASSERT(function_address.is(x1) || function_address.is(x2));
+
+ Label profiler_disabled;
+ Label end_profiler_check;
+ bool* is_profiling_flag = isolate()->cpu_profiler()->is_profiling_address();
+ STATIC_ASSERT(sizeof(*is_profiling_flag) == 1);
+ Mov(x10, reinterpret_cast<uintptr_t>(is_profiling_flag));
+ Ldrb(w10, MemOperand(x10));
+ Cbz(w10, &profiler_disabled);
+ Mov(x3, Operand(thunk_ref));
+ B(&end_profiler_check);
+
+ Bind(&profiler_disabled);
+ Mov(x3, function_address);
+ Bind(&end_profiler_check);
+
+ // Save the callee-save registers we are going to use.
+ // TODO(all): Is this necessary? ARM doesn't do it.
+ STATIC_ASSERT(kCallApiFunctionSpillSpace == 4);
+ Poke(x19, (spill_offset + 0) * kXRegSizeInBytes);
+ Poke(x20, (spill_offset + 1) * kXRegSizeInBytes);
+ Poke(x21, (spill_offset + 2) * kXRegSizeInBytes);
+ Poke(x22, (spill_offset + 3) * kXRegSizeInBytes);
+
+ // Allocate HandleScope in callee-save registers.
+ // We will need to restore the HandleScope after the call to the API function,
+ // by allocating it in callee-save registers they will be preserved by C code.
+ Register handle_scope_base = x22;
+ Register next_address_reg = x19;
+ Register limit_reg = x20;
+ Register level_reg = w21;
+
+ Mov(handle_scope_base, Operand(next_address));
+ Ldr(next_address_reg, MemOperand(handle_scope_base, kNextOffset));
+ Ldr(limit_reg, MemOperand(handle_scope_base, kLimitOffset));
+ Ldr(level_reg, MemOperand(handle_scope_base, kLevelOffset));
+ Add(level_reg, level_reg, 1);
+ Str(level_reg, MemOperand(handle_scope_base, kLevelOffset));
+
+ if (FLAG_log_timer_events) {
+ FrameScope frame(this, StackFrame::MANUAL);
+ PushSafepointRegisters();
+ Mov(x0, Operand(ExternalReference::isolate_address(isolate())));
+ CallCFunction(ExternalReference::log_enter_external_function(isolate()), 1);
+ PopSafepointRegisters();
+ }
+
+ // Native call returns to the DirectCEntry stub which redirects to the
+ // return address pushed on stack (could have moved after GC).
+ // DirectCEntry stub itself is generated early and never moves.
+ DirectCEntryStub stub;
+ stub.GenerateCall(this, x3);
+
+ if (FLAG_log_timer_events) {
+ FrameScope frame(this, StackFrame::MANUAL);
+ PushSafepointRegisters();
+ Mov(x0, Operand(ExternalReference::isolate_address(isolate())));
+ CallCFunction(ExternalReference::log_leave_external_function(isolate()), 1);
+ PopSafepointRegisters();
+ }
+
+ Label promote_scheduled_exception;
+ Label exception_handled;
+ Label delete_allocated_handles;
+ Label leave_exit_frame;
+ Label return_value_loaded;
+
+ // Load value from ReturnValue.
+ Ldr(x0, return_value_operand);
+ Bind(&return_value_loaded);
+ // No more valid handles (the result handle was the last one). Restore
+ // previous handle scope.
+ Str(next_address_reg, MemOperand(handle_scope_base, kNextOffset));
+ if (emit_debug_code()) {
+ Ldr(w1, MemOperand(handle_scope_base, kLevelOffset));
+ Cmp(w1, level_reg);
+ Check(eq, kUnexpectedLevelAfterReturnFromApiCall);
+ }
+ Sub(level_reg, level_reg, 1);
+ Str(level_reg, MemOperand(handle_scope_base, kLevelOffset));
+ Ldr(x1, MemOperand(handle_scope_base, kLimitOffset));
+ Cmp(limit_reg, x1);
+ B(ne, &delete_allocated_handles);
+
+ Bind(&leave_exit_frame);
+ // Restore callee-saved registers.
+ Peek(x19, (spill_offset + 0) * kXRegSizeInBytes);
+ Peek(x20, (spill_offset + 1) * kXRegSizeInBytes);
+ Peek(x21, (spill_offset + 2) * kXRegSizeInBytes);
+ Peek(x22, (spill_offset + 3) * kXRegSizeInBytes);
+
+ // Check if the function scheduled an exception.
+ Mov(x5, Operand(ExternalReference::scheduled_exception_address(isolate())));
+ Ldr(x5, MemOperand(x5));
+ JumpIfNotRoot(x5, Heap::kTheHoleValueRootIndex, &promote_scheduled_exception);
+ Bind(&exception_handled);
+
+ bool restore_context = context_restore_operand != NULL;
+ if (restore_context) {
+ Ldr(cp, *context_restore_operand);
+ }
+
+ LeaveExitFrame(false, x1, !restore_context);
+ Drop(stack_space);
+ Ret();
+
+ Bind(&promote_scheduled_exception);
+ {
+ FrameScope frame(this, StackFrame::INTERNAL);
+ CallExternalReference(
+ ExternalReference(Runtime::kPromoteScheduledException, isolate()), 0);
+ }
+ B(&exception_handled);
+
+ // HandleScope limit has changed. Delete allocated extensions.
+ Bind(&delete_allocated_handles);
+ Str(limit_reg, MemOperand(handle_scope_base, kLimitOffset));
+ // Save the return value in a callee-save register.
+ Register saved_result = x19;
+ Mov(saved_result, x0);
+ Mov(x0, Operand(ExternalReference::isolate_address(isolate())));
+ CallCFunction(
+ ExternalReference::delete_handle_scope_extensions(isolate()), 1);
+ Mov(x0, saved_result);
+ B(&leave_exit_frame);
+}
+
+
+void MacroAssembler::CallExternalReference(const ExternalReference& ext,
+ int num_arguments) {
+ Mov(x0, num_arguments);
+ Mov(x1, Operand(ext));
+
+ CEntryStub stub(1);
+ CallStub(&stub);
+}
+
+
+void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin) {
+ Mov(x1, Operand(builtin));
+ CEntryStub stub(1);
+ Jump(stub.GetCode(isolate()), RelocInfo::CODE_TARGET);
+}
+
+
+void MacroAssembler::GetBuiltinFunction(Register target,
+ Builtins::JavaScript id) {
+ // Load the builtins object into target register.
+ Ldr(target, GlobalObjectMemOperand());
+ Ldr(target, FieldMemOperand(target, GlobalObject::kBuiltinsOffset));
+ // Load the JavaScript builtin function from the builtins object.
+ Ldr(target, FieldMemOperand(target,
+ JSBuiltinsObject::OffsetOfFunctionWithId(id)));
+}
+
+
+void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
+ ASSERT(!target.is(x1));
+ GetBuiltinFunction(x1, id);
+ // Load the code entry point from the builtins object.
+ Ldr(target, FieldMemOperand(x1, JSFunction::kCodeEntryOffset));
+}
+
+
+void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
+ InvokeFlag flag,
+ const CallWrapper& call_wrapper) {
+ ASM_LOCATION("MacroAssembler::InvokeBuiltin");
+ // You can't call a builtin without a valid frame.
+ ASSERT(flag == JUMP_FUNCTION || has_frame());
+
+ GetBuiltinEntry(x2, id);
+ if (flag == CALL_FUNCTION) {
+ call_wrapper.BeforeCall(CallSize(x2));
+ Call(x2);
+ call_wrapper.AfterCall();
+ } else {
+ ASSERT(flag == JUMP_FUNCTION);
+ Jump(x2);
+ }
+}
+
+
+void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
+ int num_arguments,
+ int result_size) {
+ // TODO(1236192): Most runtime routines don't need the number of
+ // arguments passed in because it is constant. At some point we
+ // should remove this need and make the runtime routine entry code
+ // smarter.
+ Mov(x0, num_arguments);
+ JumpToExternalReference(ext);
+}
+
+
+void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
+ int num_arguments,
+ int result_size) {
+ TailCallExternalReference(ExternalReference(fid, isolate()),
+ num_arguments,
+ result_size);
+}
+
+
+void MacroAssembler::InitializeNewString(Register string,
+ Register length,
+ Heap::RootListIndex map_index,
+ Register scratch1,
+ Register scratch2) {
+ ASSERT(!AreAliased(string, length, scratch1, scratch2));
+ LoadRoot(scratch2, map_index);
+ SmiTag(scratch1, length);
+ Str(scratch2, FieldMemOperand(string, HeapObject::kMapOffset));
+
+ Mov(scratch2, String::kEmptyHashField);
+ Str(scratch1, FieldMemOperand(string, String::kLengthOffset));
+ Str(scratch2, FieldMemOperand(string, String::kHashFieldOffset));
+}
+
+
+int MacroAssembler::ActivationFrameAlignment() {
+#if V8_HOST_ARCH_A64
+ // Running on the real platform. Use the alignment as mandated by the local
+ // environment.
+ // Note: This will break if we ever start generating snapshots on one ARM
+ // platform for another ARM platform with a different alignment.
+ return OS::ActivationFrameAlignment();
+#else // V8_HOST_ARCH_A64
+ // If we are using the simulator then we should always align to the expected
+ // alignment. As the simulator is used to generate snapshots we do not know
+ // if the target platform will need alignment, so this is controlled from a
+ // flag.
+ return FLAG_sim_stack_alignment;
+#endif // V8_HOST_ARCH_A64
+}
+
+
+void MacroAssembler::CallCFunction(ExternalReference function,
+ int num_of_reg_args) {
+ CallCFunction(function, num_of_reg_args, 0);
+}
+
+
+void MacroAssembler::CallCFunction(ExternalReference function,
+ int num_of_reg_args,
+ int num_of_double_args) {
+ Mov(Tmp0(), Operand(function));
+ CallCFunction(Tmp0(), num_of_reg_args, num_of_double_args);
+}
+
+
+void MacroAssembler::CallCFunction(Register function,
+ int num_of_reg_args,
+ int num_of_double_args) {
+ ASSERT(has_frame());
+ // We can pass 8 integer arguments in registers. If we need to pass more than
+ // that, we'll need to implement support for passing them on the stack.
+ ASSERT(num_of_reg_args <= 8);
+
+ // If we're passing doubles, we're limited to the following prototypes
+ // (defined by ExternalReference::Type):
+ // BUILTIN_COMPARE_CALL: int f(double, double)
+ // BUILTIN_FP_FP_CALL: double f(double, double)
+ // BUILTIN_FP_CALL: double f(double)
+ // BUILTIN_FP_INT_CALL: double f(double, int)
+ if (num_of_double_args > 0) {
+ ASSERT(num_of_reg_args <= 1);
+ ASSERT((num_of_double_args + num_of_reg_args) <= 2);
+ }
+
+
+ // If the stack pointer is not csp, we need to derive an aligned csp from the
+ // current stack pointer.
+ const Register old_stack_pointer = StackPointer();
+ if (!csp.Is(old_stack_pointer)) {
+ AssertStackConsistency();
+
+ int sp_alignment = ActivationFrameAlignment();
+ // The ABI mandates at least 16-byte alignment.
+ ASSERT(sp_alignment >= 16);
+ ASSERT(IsPowerOf2(sp_alignment));
+
+ // The current stack pointer is a callee saved register, and is preserved
+ // across the call.
+ ASSERT(kCalleeSaved.IncludesAliasOf(old_stack_pointer));
+
+ // Align and synchronize the system stack pointer with jssp.
+ Bic(csp, old_stack_pointer, sp_alignment - 1);
+ SetStackPointer(csp);
+ }
+
+ // Call directly. The function called cannot cause a GC, or allow preemption,
+ // so the return address in the link register stays correct.
+ Call(function);
+
+ if (!csp.Is(old_stack_pointer)) {
+ if (emit_debug_code()) {
+ // Because the stack pointer must be aligned on a 16-byte boundary, the
+ // aligned csp can be up to 12 bytes below the jssp. This is the case
+ // where we only pushed one W register on top of an aligned jssp.
+ Register temp = Tmp1();
+ ASSERT(ActivationFrameAlignment() == 16);
+ Sub(temp, csp, old_stack_pointer);
+ // We want temp <= 0 && temp >= -12.
+ Cmp(temp, 0);
+ Ccmp(temp, -12, NFlag, le);
+ Check(ge, kTheStackWasCorruptedByMacroAssemblerCall);
+ }
+ SetStackPointer(old_stack_pointer);
+ }
+}
+
+
+void MacroAssembler::Jump(Register target) {
+ Br(target);
+}
+
+
+void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode) {
+ Mov(Tmp0(), Operand(target, rmode));
+ Br(Tmp0());
+}
+
+
+void MacroAssembler::Jump(Address target, RelocInfo::Mode rmode) {
+ ASSERT(!RelocInfo::IsCodeTarget(rmode));
+ Jump(reinterpret_cast<intptr_t>(target), rmode);
+}
+
+
+void MacroAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode) {
+ ASSERT(RelocInfo::IsCodeTarget(rmode));
+ AllowDeferredHandleDereference embedding_raw_address;
+ Jump(reinterpret_cast<intptr_t>(code.location()), rmode);
+}
+
+
+void MacroAssembler::Call(Register target) {
+ BlockConstPoolScope scope(this);
+#ifdef DEBUG
+ Label start_call;
+ Bind(&start_call);
+#endif
+
+ Blr(target);
+
+#ifdef DEBUG
+ AssertSizeOfCodeGeneratedSince(&start_call, CallSize(target));
+#endif
+}
+
+
+void MacroAssembler::Call(Label* target) {
+ BlockConstPoolScope scope(this);
+#ifdef DEBUG
+ Label start_call;
+ Bind(&start_call);
+#endif
+
+ Bl(target);
+
+#ifdef DEBUG
+ AssertSizeOfCodeGeneratedSince(&start_call, CallSize(target));
+#endif
+}
+
+
+// MacroAssembler::CallSize is sensitive to changes in this function, as it
+// requires to know how many instructions are used to branch to the target.
+void MacroAssembler::Call(Address target, RelocInfo::Mode rmode) {
+ BlockConstPoolScope scope(this);
+#ifdef DEBUG
+ Label start_call;
+ Bind(&start_call);
+#endif
+ // Statement positions are expected to be recorded when the target
+ // address is loaded.
+ positions_recorder()->WriteRecordedPositions();
+
+ // Addresses always have 64 bits, so we shouldn't encounter NONE32.
+ ASSERT(rmode != RelocInfo::NONE32);
+
+ if (rmode == RelocInfo::NONE64) {
+ uint64_t imm = reinterpret_cast<uint64_t>(target);
+ movz(Tmp0(), (imm >> 0) & 0xffff, 0);
+ movk(Tmp0(), (imm >> 16) & 0xffff, 16);
+ movk(Tmp0(), (imm >> 32) & 0xffff, 32);
+ movk(Tmp0(), (imm >> 48) & 0xffff, 48);
+ } else {
+ LoadRelocated(Tmp0(), Operand(reinterpret_cast<intptr_t>(target), rmode));
+ }
+ Blr(Tmp0());
+#ifdef DEBUG
+ AssertSizeOfCodeGeneratedSince(&start_call, CallSize(target, rmode));
+#endif
+}
+
+
+void MacroAssembler::Call(Handle<Code> code,
+ RelocInfo::Mode rmode,
+ TypeFeedbackId ast_id) {
+#ifdef DEBUG
+ Label start_call;
+ Bind(&start_call);
+#endif
+
+ if ((rmode == RelocInfo::CODE_TARGET) && (!ast_id.IsNone())) {
+ SetRecordedAstId(ast_id);
+ rmode = RelocInfo::CODE_TARGET_WITH_ID;
+ }
+
+ AllowDeferredHandleDereference embedding_raw_address;
+ Call(reinterpret_cast<Address>(code.location()), rmode);
+
+#ifdef DEBUG
+ // Check the size of the code generated.
+ AssertSizeOfCodeGeneratedSince(&start_call, CallSize(code, rmode, ast_id));
+#endif
+}
+
+
+int MacroAssembler::CallSize(Register target) {
+ USE(target);
+ return kInstructionSize;
+}
+
+
+int MacroAssembler::CallSize(Label* target) {
+ USE(target);
+ return kInstructionSize;
+}
+
+
+int MacroAssembler::CallSize(Address target, RelocInfo::Mode rmode) {
+ USE(target);
+
+ // Addresses always have 64 bits, so we shouldn't encounter NONE32.
+ ASSERT(rmode != RelocInfo::NONE32);
+
+ if (rmode == RelocInfo::NONE64) {
+ return kCallSizeWithoutRelocation;
+ } else {
+ return kCallSizeWithRelocation;
+ }
+}
+
+
+int MacroAssembler::CallSize(Handle<Code> code,
+ RelocInfo::Mode rmode,
+ TypeFeedbackId ast_id) {
+ USE(code);
+ USE(ast_id);
+
+ // Addresses always have 64 bits, so we shouldn't encounter NONE32.
+ ASSERT(rmode != RelocInfo::NONE32);
+
+ if (rmode == RelocInfo::NONE64) {
+ return kCallSizeWithoutRelocation;
+ } else {
+ return kCallSizeWithRelocation;
+ }
+}
+
+
+
+
+
+void MacroAssembler::JumpForHeapNumber(Register object,
+ Register heap_number_map,
+ Label* on_heap_number,
+ Label* on_not_heap_number) {
+ ASSERT(on_heap_number || on_not_heap_number);
+ // Tmp0() is used as a scratch register.
+ ASSERT(!AreAliased(Tmp0(), heap_number_map));
+ AssertNotSmi(object);
+
+ // Load the HeapNumber map if it is not passed.
+ if (heap_number_map.Is(NoReg)) {
+ heap_number_map = Tmp1();
+ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+ } else {
+ // This assert clobbers Tmp0(), so do it before loading Tmp0() with the map.
+ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+ }
+
+ Ldr(Tmp0(), FieldMemOperand(object, HeapObject::kMapOffset));
+ Cmp(Tmp0(), heap_number_map);
+
+ if (on_heap_number) {
+ B(eq, on_heap_number);
+ }
+ if (on_not_heap_number) {
+ B(ne, on_not_heap_number);
+ }
+}
+
+
+void MacroAssembler::JumpIfHeapNumber(Register object,
+ Label* on_heap_number,
+ Register heap_number_map) {
+ JumpForHeapNumber(object,
+ heap_number_map,
+ on_heap_number,
+ NULL);
+}
+
+
+void MacroAssembler::JumpIfNotHeapNumber(Register object,
+ Label* on_not_heap_number,
+ Register heap_number_map) {
+ JumpForHeapNumber(object,
+ heap_number_map,
+ NULL,
+ on_not_heap_number);
+}
+
+
+void MacroAssembler::LookupNumberStringCache(Register object,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* not_found) {
+ ASSERT(!AreAliased(object, result, scratch1, scratch2, scratch3));
+
+ // Use of registers. Register result is used as a temporary.
+ Register number_string_cache = result;
+ Register mask = scratch3;
+
+ // Load the number string cache.
+ LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
+
+ // Make the hash mask from the length of the number string cache. It
+ // contains two elements (number and string) for each cache entry.
+ Ldrsw(mask, UntagSmiFieldMemOperand(number_string_cache,
+ FixedArray::kLengthOffset));
+ Asr(mask, mask, 1); // Divide length by two.
+ Sub(mask, mask, 1); // Make mask.
+
+ // Calculate the entry in the number string cache. The hash value in the
+ // number string cache for smis is just the smi value, and the hash for
+ // doubles is the xor of the upper and lower words. See
+ // Heap::GetNumberStringCache.
+ Label is_smi;
+ Label load_result_from_cache;
+
+ JumpIfSmi(object, &is_smi);
+ CheckMap(object, scratch1, Heap::kHeapNumberMapRootIndex, not_found,
+ DONT_DO_SMI_CHECK);
+
+ STATIC_ASSERT(kDoubleSize == (kWRegSizeInBytes * 2));
+ Add(scratch1, object, HeapNumber::kValueOffset - kHeapObjectTag);
+ Ldp(scratch1.W(), scratch2.W(), MemOperand(scratch1));
+ Eor(scratch1, scratch1, scratch2);
+ And(scratch1, scratch1, mask);
+
+ // Calculate address of entry in string cache: each entry consists of two
+ // pointer sized fields.
+ Add(scratch1, number_string_cache,
+ Operand(scratch1, LSL, kPointerSizeLog2 + 1));
+
+ Register probe = mask;
+ Ldr(probe, FieldMemOperand(scratch1, FixedArray::kHeaderSize));
+ JumpIfSmi(probe, not_found);
+ Ldr(d0, FieldMemOperand(object, HeapNumber::kValueOffset));
+ Ldr(d1, FieldMemOperand(probe, HeapNumber::kValueOffset));
+ Fcmp(d0, d1);
+ B(ne, not_found);
+ B(&load_result_from_cache);
+
+ Bind(&is_smi);
+ Register scratch = scratch1;
+ And(scratch, mask, Operand::UntagSmi(object));
+ // Calculate address of entry in string cache: each entry consists
+ // of two pointer sized fields.
+ Add(scratch, number_string_cache,
+ Operand(scratch, LSL, kPointerSizeLog2 + 1));
+
+ // Check if the entry is the smi we are looking for.
+ Ldr(probe, FieldMemOperand(scratch, FixedArray::kHeaderSize));
+ Cmp(object, probe);
+ B(ne, not_found);
+
+ // Get the result from the cache.
+ Bind(&load_result_from_cache);
+ Ldr(result, FieldMemOperand(scratch, FixedArray::kHeaderSize + kPointerSize));
+ IncrementCounter(isolate()->counters()->number_to_string_native(), 1,
+ scratch1, scratch2);
+}
+
+
+void MacroAssembler::TryConvertDoubleToInt(Register as_int,
+ FPRegister value,
+ FPRegister scratch_d,
+ Label* on_successful_conversion,
+ Label* on_failed_conversion) {
+ // Convert to an int and back again, then compare with the original value.
+ Fcvtzs(as_int, value);
+ Scvtf(scratch_d, as_int);
+ Fcmp(value, scratch_d);
+
+ if (on_successful_conversion) {
+ B(on_successful_conversion, eq);
+ }
+ if (on_failed_conversion) {
+ B(on_failed_conversion, ne);
+ }
+}
+
+
+void MacroAssembler::JumpIfMinusZero(DoubleRegister input,
+ Label* on_negative_zero) {
+ // Floating point -0.0 is kMinInt as an integer, so subtracting 1 (cmp) will
+ // cause overflow.
+ Fmov(Tmp0(), input);
+ Cmp(Tmp0(), 1);
+ B(vs, on_negative_zero);
+}
+
+
+void MacroAssembler::ClampInt32ToUint8(Register output, Register input) {
+ // Clamp the value to [0..255].
+ Cmp(input.W(), Operand(input.W(), UXTB));
+ // If input < input & 0xff, it must be < 0, so saturate to 0.
+ Csel(output.W(), wzr, input.W(), lt);
+ // Create a constant 0xff.
+ Mov(WTmp0(), 255);
+ // If input > input & 0xff, it must be > 255, so saturate to 255.
+ Csel(output.W(), WTmp0(), output.W(), gt);
+}
+
+
+void MacroAssembler::ClampInt32ToUint8(Register in_out) {
+ ClampInt32ToUint8(in_out, in_out);
+}
+
+
+void MacroAssembler::ClampDoubleToUint8(Register output,
+ DoubleRegister input,
+ DoubleRegister dbl_scratch) {
+ // This conversion follows the WebIDL "[Clamp]" rules for PIXEL types:
+ // - Inputs lower than 0 (including -infinity) produce 0.
+ // - Inputs higher than 255 (including +infinity) produce 255.
+ // Also, it seems that PIXEL types use round-to-nearest rather than
+ // round-towards-zero.
+
+ // Squash +infinity before the conversion, since Fcvtnu will normally
+ // convert it to 0.
+ Fmov(dbl_scratch, 255);
+ Fmin(dbl_scratch, dbl_scratch, input);
+
+ // Convert double to unsigned integer. Values less than zero become zero.
+ // Values greater than 255 have already been clamped to 255.
+ Fcvtnu(output, dbl_scratch);
+}
+
+
+void MacroAssembler::CopyFieldsLoopPairsHelper(Register dst,
+ Register src,
+ unsigned count,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3) {
+ // Untag src and dst into scratch registers.
+ // Copy src->dst in a tight loop.
+ ASSERT(!AreAliased(dst, src, scratch1, scratch2, scratch3, Tmp0(), Tmp1()));
+ ASSERT(count >= 2);
+
+ const Register& remaining = scratch3;
+ Mov(remaining, count / 2);
+
+ // Only use the Assembler, so we can use Tmp0() and Tmp1().
+ InstructionAccurateScope scope(this);
+
+ const Register& dst_untagged = scratch1;
+ const Register& src_untagged = scratch2;
+ sub(dst_untagged, dst, kHeapObjectTag);
+ sub(src_untagged, src, kHeapObjectTag);
+
+ // Copy fields in pairs.
+ Label loop;
+ bind(&loop);
+ ldp(Tmp0(), Tmp1(), MemOperand(src_untagged, kXRegSizeInBytes * 2,
+ PostIndex));
+ stp(Tmp0(), Tmp1(), MemOperand(dst_untagged, kXRegSizeInBytes * 2,
+ PostIndex));
+ sub(remaining, remaining, 1);
+ cbnz(remaining, &loop);
+
+ // Handle the leftovers.
+ if (count & 1) {
+ ldr(Tmp0(), MemOperand(src_untagged));
+ str(Tmp0(), MemOperand(dst_untagged));
+ }
+}
+
+
+void MacroAssembler::CopyFieldsUnrolledPairsHelper(Register dst,
+ Register src,
+ unsigned count,
+ Register scratch1,
+ Register scratch2) {
+ // Untag src and dst into scratch registers.
+ // Copy src->dst in an unrolled loop.
+ ASSERT(!AreAliased(dst, src, scratch1, scratch2, Tmp0(), Tmp1()));
+
+ // Only use the Assembler, so we can use Tmp0() and Tmp1().
+ InstructionAccurateScope scope(this);
+
+ const Register& dst_untagged = scratch1;
+ const Register& src_untagged = scratch2;
+ sub(dst_untagged, dst, kHeapObjectTag);
+ sub(src_untagged, src, kHeapObjectTag);
+
+ // Copy fields in pairs.
+ for (unsigned i = 0; i < count / 2; i++) {
+ ldp(Tmp0(), Tmp1(), MemOperand(src_untagged, kXRegSizeInBytes * 2,
+ PostIndex));
+ stp(Tmp0(), Tmp1(), MemOperand(dst_untagged, kXRegSizeInBytes * 2,
+ PostIndex));
+ }
+
+ // Handle the leftovers.
+ if (count & 1) {
+ ldr(Tmp0(), MemOperand(src_untagged));
+ str(Tmp0(), MemOperand(dst_untagged));
+ }
+}
+
+
+void MacroAssembler::CopyFieldsUnrolledHelper(Register dst,
+ Register src,
+ unsigned count,
+ Register scratch1) {
+ // Untag src and dst into scratch registers.
+ // Copy src->dst in an unrolled loop.
+ ASSERT(!AreAliased(dst, src, scratch1, Tmp0(), Tmp1()));
+
+ // Only use the Assembler, so we can use Tmp0() and Tmp1().
+ InstructionAccurateScope scope(this);
+
+ const Register& dst_untagged = scratch1;
+ const Register& src_untagged = Tmp1();
+ sub(dst_untagged, dst, kHeapObjectTag);
+ sub(src_untagged, src, kHeapObjectTag);
+
+ // Copy fields one by one.
+ for (unsigned i = 0; i < count; i++) {
+ ldr(Tmp0(), MemOperand(src_untagged, kXRegSizeInBytes, PostIndex));
+ str(Tmp0(), MemOperand(dst_untagged, kXRegSizeInBytes, PostIndex));
+ }
+}
+
+
+void MacroAssembler::CopyFields(Register dst, Register src, CPURegList temps,
+ unsigned count) {
+ // One of two methods is used:
+ //
+ // For high 'count' values where many scratch registers are available:
+ // Untag src and dst into scratch registers.
+ // Copy src->dst in a tight loop.
+ //
+ // For low 'count' values or where few scratch registers are available:
+ // Untag src and dst into scratch registers.
+ // Copy src->dst in an unrolled loop.
+ //
+ // In both cases, fields are copied in pairs if possible, and left-overs are
+ // handled separately.
+ ASSERT(!temps.IncludesAliasOf(dst));
+ ASSERT(!temps.IncludesAliasOf(src));
+ ASSERT(!temps.IncludesAliasOf(Tmp0()));
+ ASSERT(!temps.IncludesAliasOf(Tmp1()));
+ ASSERT(!temps.IncludesAliasOf(xzr));
+ ASSERT(!AreAliased(dst, src, Tmp0(), Tmp1()));
+
+ if (emit_debug_code()) {
+ Cmp(dst, src);
+ Check(ne, kTheSourceAndDestinationAreTheSame);
+ }
+
+ // The value of 'count' at which a loop will be generated (if there are
+ // enough scratch registers).
+ static const unsigned kLoopThreshold = 8;
+
+ ASSERT(!temps.IsEmpty());
+ Register scratch1 = Register(temps.PopLowestIndex());
+ Register scratch2 = Register(temps.PopLowestIndex());
+ Register scratch3 = Register(temps.PopLowestIndex());
+
+ if (scratch3.IsValid() && (count >= kLoopThreshold)) {
+ CopyFieldsLoopPairsHelper(dst, src, count, scratch1, scratch2, scratch3);
+ } else if (scratch2.IsValid()) {
+ CopyFieldsUnrolledPairsHelper(dst, src, count, scratch1, scratch2);
+ } else if (scratch1.IsValid()) {
+ CopyFieldsUnrolledHelper(dst, src, count, scratch1);
+ } else {
+ UNREACHABLE();
+ }
+}
+
+
+void MacroAssembler::CopyBytes(Register dst,
+ Register src,
+ Register length,
+ Register scratch,
+ CopyHint hint) {
+ ASSERT(!AreAliased(src, dst, length, scratch));
+
+ // TODO(all): Implement a faster copy function, and use hint to determine
+ // which algorithm to use for copies.
+ if (emit_debug_code()) {
+ // Check copy length.
+ Cmp(length, 0);
+ Assert(ge, kUnexpectedNegativeValue);
+
+ // Check src and dst buffers don't overlap.
+ Add(scratch, src, length); // Calculate end of src buffer.
+ Cmp(scratch, dst);
+ Add(scratch, dst, length); // Calculate end of dst buffer.
+ Ccmp(scratch, src, ZFlag, gt);
+ Assert(le, kCopyBuffersOverlap);
+ }
+
+ Label loop, done;
+ Cbz(length, &done);
+
+ Bind(&loop);
+ Sub(length, length, 1);
+ Ldrb(scratch, MemOperand(src, 1, PostIndex));
+ Strb(scratch, MemOperand(dst, 1, PostIndex));
+ Cbnz(length, &loop);
+ Bind(&done);
+}
+
+
+void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
+ Register end_offset,
+ Register filler) {
+ Label loop, entry;
+ B(&entry);
+ Bind(&loop);
+ // TODO(all): consider using stp here.
+ Str(filler, MemOperand(start_offset, kPointerSize, PostIndex));
+ Bind(&entry);
+ Cmp(start_offset, end_offset);
+ B(lt, &loop);
+}
+
+
+void MacroAssembler::JumpIfEitherIsNotSequentialAsciiStrings(
+ Register first,
+ Register second,
+ Register scratch1,
+ Register scratch2,
+ Label* failure,
+ SmiCheckType smi_check) {
+
+ if (smi_check == DO_SMI_CHECK) {
+ JumpIfEitherSmi(first, second, failure);
+ } else if (emit_debug_code()) {
+ ASSERT(smi_check == DONT_DO_SMI_CHECK);
+ Label not_smi;
+ JumpIfEitherSmi(first, second, NULL, ¬_smi);
+
+ // At least one input is a smi, but the flags indicated a smi check wasn't
+ // needed.
+ Abort(kUnexpectedSmi);
+
+ Bind(¬_smi);
+ }
+
+ // Test that both first and second are sequential ASCII strings.
+ Ldr(scratch1, FieldMemOperand(first, HeapObject::kMapOffset));
+ Ldr(scratch2, FieldMemOperand(second, HeapObject::kMapOffset));
+ Ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
+ Ldrb(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset));
+
+ JumpIfEitherInstanceTypeIsNotSequentialAscii(scratch1,
+ scratch2,
+ scratch1,
+ scratch2,
+ failure);
+}
+
+
+void MacroAssembler::JumpIfEitherInstanceTypeIsNotSequentialAscii(
+ Register first,
+ Register second,
+ Register scratch1,
+ Register scratch2,
+ Label* failure) {
+ ASSERT(!AreAliased(scratch1, second));
+ ASSERT(!AreAliased(scratch1, scratch2));
+ static const int kFlatAsciiStringMask =
+ kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
+ static const int kFlatAsciiStringTag = ASCII_STRING_TYPE;
+ And(scratch1, first, kFlatAsciiStringMask);
+ And(scratch2, second, kFlatAsciiStringMask);
+ Cmp(scratch1, kFlatAsciiStringTag);
+ Ccmp(scratch2, kFlatAsciiStringTag, NoFlag, eq);
+ B(ne, failure);
+}
+
+
+void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(Register type,
+ Register scratch,
+ Label* failure) {
+ const int kFlatAsciiStringMask =
+ kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
+ const int kFlatAsciiStringTag =
+ kStringTag | kOneByteStringTag | kSeqStringTag;
+ And(scratch, type, kFlatAsciiStringMask);
+ Cmp(scratch, kFlatAsciiStringTag);
+ B(ne, failure);
+}
+
+
+void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii(
+ Register first,
+ Register second,
+ Register scratch1,
+ Register scratch2,
+ Label* failure) {
+ ASSERT(!AreAliased(first, second, scratch1, scratch2));
+ const int kFlatAsciiStringMask =
+ kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
+ const int kFlatAsciiStringTag =
+ kStringTag | kOneByteStringTag | kSeqStringTag;
+ And(scratch1, first, kFlatAsciiStringMask);
+ And(scratch2, second, kFlatAsciiStringMask);
+ Cmp(scratch1, kFlatAsciiStringTag);
+ Ccmp(scratch2, kFlatAsciiStringTag, NoFlag, eq);
+ B(ne, failure);
+}
+
+
+void MacroAssembler::JumpIfNotUniqueName(Register type,
+ Label* not_unique_name) {
+ STATIC_ASSERT((kInternalizedTag == 0) && (kStringTag == 0));
+ // if ((type is string && type is internalized) || type == SYMBOL_TYPE) {
+ // continue
+ // } else {
+ // goto not_unique_name
+ // }
+ Tst(type, kIsNotStringMask | kIsNotInternalizedMask);
+ Ccmp(type, SYMBOL_TYPE, ZFlag, ne);
+ B(ne, not_unique_name);
+}
+
+
+void MacroAssembler::InvokePrologue(const ParameterCount& expected,
+ const ParameterCount& actual,
+ Handle<Code> code_constant,
+ Register code_reg,
+ Label* done,
+ InvokeFlag flag,
+ bool* definitely_mismatches,
+ const CallWrapper& call_wrapper) {
+ bool definitely_matches = false;
+ *definitely_mismatches = false;
+ Label regular_invoke;
+
+ // Check whether the expected and actual arguments count match. If not,
+ // setup registers according to contract with ArgumentsAdaptorTrampoline:
+ // x0: actual arguments count.
+ // x1: function (passed through to callee).
+ // x2: expected arguments count.
+
+ // The code below is made a lot easier because the calling code already sets
+ // up actual and expected registers according to the contract if values are
+ // passed in registers.
+ ASSERT(actual.is_immediate() || actual.reg().is(x0));
+ ASSERT(expected.is_immediate() || expected.reg().is(x2));
+ ASSERT((!code_constant.is_null() && code_reg.is(no_reg)) || code_reg.is(x3));
+
+ if (expected.is_immediate()) {
+ ASSERT(actual.is_immediate());
+ if (expected.immediate() == actual.immediate()) {
+ definitely_matches = true;
+
+ } else {
+ Mov(x0, actual.immediate());
+ if (expected.immediate() ==
+ SharedFunctionInfo::kDontAdaptArgumentsSentinel) {
+ // Don't worry about adapting arguments for builtins that
+ // don't want that done. Skip adaption code by making it look
+ // like we have a match between expected and actual number of
+ // arguments.
+ definitely_matches = true;
+ } else {
+ *definitely_mismatches = true;
+ // Set up x2 for the argument adaptor.
+ Mov(x2, expected.immediate());
+ }
+ }
+
+ } else { // expected is a register.
+ Operand actual_op = actual.is_immediate() ? Operand(actual.immediate())
+ : Operand(actual.reg());
+ // If actual == expected perform a regular invocation.
+ Cmp(expected.reg(), actual_op);
+ B(eq, ®ular_invoke);
+ // Otherwise set up x0 for the argument adaptor.
+ Mov(x0, actual_op);
+ }
+
+ // If the argument counts may mismatch, generate a call to the argument
+ // adaptor.
+ if (!definitely_matches) {
+ if (!code_constant.is_null()) {
+ Mov(x3, Operand(code_constant));
+ Add(x3, x3, Code::kHeaderSize - kHeapObjectTag);
+ }
+
+ Handle<Code> adaptor =
+ isolate()->builtins()->ArgumentsAdaptorTrampoline();
+ if (flag == CALL_FUNCTION) {
+ call_wrapper.BeforeCall(CallSize(adaptor));
+ Call(adaptor);
+ call_wrapper.AfterCall();
+ if (!*definitely_mismatches) {
+ // If the arg counts don't match, no extra code is emitted by
+ // MAsm::InvokeCode and we can just fall through.
+ B(done);
+ }
+ } else {
+ Jump(adaptor, RelocInfo::CODE_TARGET);
+ }
+ }
+ Bind(®ular_invoke);
+}
+
+
+void MacroAssembler::InvokeCode(Register code,
+ const ParameterCount& expected,
+ const ParameterCount& actual,
+ InvokeFlag flag,
+ const CallWrapper& call_wrapper) {
+ // You can't call a function without a valid frame.
+ ASSERT(flag == JUMP_FUNCTION || has_frame());
+
+ Label done;
+
+ bool definitely_mismatches = false;
+ InvokePrologue(expected, actual, Handle<Code>::null(), code, &done, flag,
+ &definitely_mismatches, call_wrapper);
+
+ // If we are certain that actual != expected, then we know InvokePrologue will
+ // have handled the call through the argument adaptor mechanism.
+ // The called function expects the call kind in x5.
+ if (!definitely_mismatches) {
+ if (flag == CALL_FUNCTION) {
+ call_wrapper.BeforeCall(CallSize(code));
+ Call(code);
+ call_wrapper.AfterCall();
+ } else {
+ ASSERT(flag == JUMP_FUNCTION);
+ Jump(code);
+ }
+ }
+
+ // Continue here if InvokePrologue does handle the invocation due to
+ // mismatched parameter counts.
+ Bind(&done);
+}
+
+
+void MacroAssembler::InvokeFunction(Register function,
+ const ParameterCount& actual,
+ InvokeFlag flag,
+ const CallWrapper& call_wrapper) {
+ // You can't call a function without a valid frame.
+ ASSERT(flag == JUMP_FUNCTION || has_frame());
+
+ // Contract with called JS functions requires that function is passed in x1.
+ // (See FullCodeGenerator::Generate().)
+ ASSERT(function.is(x1));
+
+ Register expected_reg = x2;
+ Register code_reg = x3;
+
+ Ldr(cp, FieldMemOperand(function, JSFunction::kContextOffset));
+ // The number of arguments is stored as an int32_t, and -1 is a marker
+ // (SharedFunctionInfo::kDontAdaptArgumentsSentinel), so we need sign
+ // extension to correctly handle it.
+ Ldr(expected_reg, FieldMemOperand(function,
+ JSFunction::kSharedFunctionInfoOffset));
+ Ldrsw(expected_reg,
+ FieldMemOperand(expected_reg,
+ SharedFunctionInfo::kFormalParameterCountOffset));
+ Ldr(code_reg,
+ FieldMemOperand(function, JSFunction::kCodeEntryOffset));
+
+ ParameterCount expected(expected_reg);
+ InvokeCode(code_reg, expected, actual, flag, call_wrapper);
+}
+
+
+void MacroAssembler::InvokeFunction(Register function,
+ const ParameterCount& expected,
+ const ParameterCount& actual,
+ InvokeFlag flag,
+ const CallWrapper& call_wrapper) {
+ // You can't call a function without a valid frame.
+ ASSERT(flag == JUMP_FUNCTION || has_frame());
+
+ // Contract with called JS functions requires that function is passed in x1.
+ // (See FullCodeGenerator::Generate().)
+ ASSERT(function.Is(x1));
+
+ Register code_reg = x3;
+
+ // Set up the context.
+ Ldr(cp, FieldMemOperand(function, JSFunction::kContextOffset));
+
+ // We call indirectly through the code field in the function to
+ // allow recompilation to take effect without changing any of the
+ // call sites.
+ Ldr(code_reg, FieldMemOperand(function, JSFunction::kCodeEntryOffset));
+ InvokeCode(code_reg, expected, actual, flag, call_wrapper);
+}
+
+
+void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
+ const ParameterCount& expected,
+ const ParameterCount& actual,
+ InvokeFlag flag,
+ const CallWrapper& call_wrapper) {
+ // Contract with called JS functions requires that function is passed in x1.
+ // (See FullCodeGenerator::Generate().)
+ __ LoadObject(x1, function);
+ InvokeFunction(x1, expected, actual, flag, call_wrapper);
+}
+
+
+void MacroAssembler::ECMA262ToInt32(Register result,
+ DoubleRegister input,
+ Register scratch1,
+ Register scratch2,
+ ECMA262ToInt32Result format) {
+ ASSERT(!AreAliased(result, scratch1, scratch2));
+ ASSERT(result.Is64Bits() && scratch1.Is64Bits() && scratch2.Is64Bits());
+ STATIC_ASSERT(kSmiTag == 0);
+ STATIC_ASSERT(kSmiValueSize == 32);
+
+ Label done, tag, manual_conversion;
+
+ // 1. Try to convert with a FPU convert instruction. It's trivial to compute
+ // the modulo operation on an integer register so we convert to a 64-bit
+ // integer, then find the 32-bit result from that.
+ //
+ // Fcvtzs will saturate to INT64_MIN (0x800...00) or INT64_MAX (0x7ff...ff)
+ // when the double is out of range. NaNs and infinities will be converted to 0
+ // (as ECMA-262 requires).
+ Fcvtzs(result, input);
+
+ // The values INT64_MIN (0x800...00) or INT64_MAX (0x7ff...ff) are not
+ // representable using a double, so if the result is one of those then we know
+ // that saturation occured, and we need to manually handle the conversion.
+ //
+ // It is easy to detect INT64_MIN and INT64_MAX because adding or subtracting
+ // 1 will cause signed overflow.
+ Cmp(result, 1);
+ Ccmp(result, -1, VFlag, vc);
+ B(vc, &tag);
+
+ // 2. Manually convert the input to an int32.
+ Fmov(result, input);
+
+ // Extract the exponent.
+ Register exponent = scratch1;
+ Ubfx(exponent, result, HeapNumber::kMantissaBits, HeapNumber::kExponentBits);
+
+ // It the exponent is >= 84 (kMantissaBits + 32), the result is always 0 since
+ // the mantissa gets shifted completely out of the int32_t result.
+ Cmp(exponent, HeapNumber::kExponentBias + HeapNumber::kMantissaBits + 32);
+ CzeroX(result, ge);
+ B(ge, &done);
+
+ // The Fcvtzs sequence handles all cases except where the conversion causes
+ // signed overflow in the int64_t target. Since we've already handled
+ // exponents >= 84, we can guarantee that 63 <= exponent < 84.
+
+ if (emit_debug_code()) {
+ Cmp(exponent, HeapNumber::kExponentBias + 63);
+ // Exponents less than this should have been handled by the Fcvt case.
+ Check(ge, kUnexpectedValue);
+ }
+
+ // Isolate the mantissa bits, and set the implicit '1'.
+ Register mantissa = scratch2;
+ Ubfx(mantissa, result, 0, HeapNumber::kMantissaBits);
+ Orr(mantissa, mantissa, 1UL << HeapNumber::kMantissaBits);
+
+ // Negate the mantissa if necessary.
+ Tst(result, kXSignMask);
+ Cneg(mantissa, mantissa, ne);
+
+ // Shift the mantissa bits in the correct place. We know that we have to shift
+ // it left here, because exponent >= 63 >= kMantissaBits.
+ Sub(exponent, exponent,
+ HeapNumber::kExponentBias + HeapNumber::kMantissaBits);
+ Lsl(result, mantissa, exponent);
+
+ Bind(&tag);
+ switch (format) {
+ case INT32_IN_W:
+ // There is nothing to do; the upper 32 bits are undefined.
+ if (emit_debug_code()) {
+ __ Mov(scratch1, 0x55555555);
+ __ Bfi(result, scratch1, 32, 32);
+ }
+ break;
+ case INT32_IN_X:
+ Sxtw(result, result);
+ break;
+ case SMI:
+ SmiTag(result);
+ break;
+ }
+
+ Bind(&done);
+}
+
+
+void MacroAssembler::HeapNumberECMA262ToInt32(Register result,
+ Register heap_number,
+ Register scratch1,
+ Register scratch2,
+ DoubleRegister double_scratch,
+ ECMA262ToInt32Result format) {
+ if (emit_debug_code()) {
+ // Verify we indeed have a HeapNumber.
+ Label ok;
+ JumpIfHeapNumber(heap_number, &ok);
+ Abort(kExpectedHeapNumber);
+ Bind(&ok);
+ }
+
+ Ldr(double_scratch, FieldMemOperand(heap_number, HeapNumber::kValueOffset));
+ ECMA262ToInt32(result, double_scratch, scratch1, scratch2, format);
+}
+
+
+void MacroAssembler::Prologue(PrologueFrameMode frame_mode) {
+ if (frame_mode == BUILD_STUB_FRAME) {
+ ASSERT(StackPointer().Is(jssp));
+ // TODO(jbramley): Does x1 contain a JSFunction here, or does it already
+ // have the special STUB smi?
+ __ Mov(Tmp0(), Operand(Smi::FromInt(StackFrame::STUB)));
+ // Compiled stubs don't age, and so they don't need the predictable code
+ // ageing sequence.
+ __ Push(lr, fp, cp, Tmp0());
+ __ Add(fp, jssp, StandardFrameConstants::kFixedFrameSizeFromFp);
+ } else {
+ if (isolate()->IsCodePreAgingActive()) {
+ Code* stub = Code::GetPreAgedCodeAgeStub(isolate());
+ __ EmitCodeAgeSequence(stub);
+ } else {
+ __ EmitFrameSetupForCodeAgePatching();
+ }
+ }
+}
+
+
+void MacroAssembler::EnterFrame(StackFrame::Type type) {
+ ASSERT(jssp.Is(StackPointer()));
+ Push(lr, fp, cp);
+ Mov(Tmp1(), Operand(Smi::FromInt(type)));
+ Mov(Tmp0(), Operand(CodeObject()));
+ Push(Tmp1(), Tmp0());
+ // jssp[4] : lr
+ // jssp[3] : fp
+ // jssp[2] : cp
+ // jssp[1] : type
+ // jssp[0] : code object
+
+ // Adjust FP to point to saved FP.
+ add(fp, jssp, StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize);
+}
+
+
+void MacroAssembler::LeaveFrame(StackFrame::Type type) {
+ ASSERT(jssp.Is(StackPointer()));
+ // Drop the execution stack down to the frame pointer and restore
+ // the caller frame pointer and return address.
+ Mov(jssp, fp);
+ AssertStackConsistency();
+ Pop(fp, lr);
+}
+
+
+void MacroAssembler::ExitFramePreserveFPRegs() {
+ PushCPURegList(kCallerSavedFP);
+}
+
+
+void MacroAssembler::ExitFrameRestoreFPRegs() {
+ // Read the registers from the stack without popping them. The stack pointer
+ // will be reset as part of the unwinding process.
+ CPURegList saved_fp_regs = kCallerSavedFP;
+ ASSERT(saved_fp_regs.Count() % 2 == 0);
+
+ int offset = ExitFrameConstants::kLastExitFrameField;
+ while (!saved_fp_regs.IsEmpty()) {
+ const CPURegister& dst0 = saved_fp_regs.PopHighestIndex();
+ const CPURegister& dst1 = saved_fp_regs.PopHighestIndex();
+ offset -= 2 * kDRegSizeInBytes;
+ Ldp(dst1, dst0, MemOperand(fp, offset));
+ }
+}
+
+
+// TODO(jbramley): Check that we're handling the frame pointer correctly.
+void MacroAssembler::EnterExitFrame(bool save_doubles,
+ const Register& scratch,
+ int extra_space) {
+ ASSERT(jssp.Is(StackPointer()));
+
+ // Set up the new stack frame.
+ Mov(scratch, Operand(CodeObject()));
+ Push(lr, fp);
+ Mov(fp, StackPointer());
+ Push(xzr, scratch);
+ // fp[8]: CallerPC (lr)
+ // fp -> fp[0]: CallerFP (old fp)
+ // fp[-8]: Space reserved for SPOffset.
+ // jssp -> fp[-16]: CodeObject()
+ STATIC_ASSERT((2 * kPointerSize) ==
+ ExitFrameConstants::kCallerSPDisplacement);
+ STATIC_ASSERT((1 * kPointerSize) == ExitFrameConstants::kCallerPCOffset);
+ STATIC_ASSERT((0 * kPointerSize) == ExitFrameConstants::kCallerFPOffset);
+ STATIC_ASSERT((-1 * kPointerSize) == ExitFrameConstants::kSPOffset);
+ STATIC_ASSERT((-2 * kPointerSize) == ExitFrameConstants::kCodeOffset);
+
+ // Save the frame pointer and context pointer in the top frame.
+ Mov(scratch, Operand(ExternalReference(Isolate::kCEntryFPAddress,
+ isolate())));
+ Str(fp, MemOperand(scratch));
+ Mov(scratch, Operand(ExternalReference(Isolate::kContextAddress,
+ isolate())));
+ Str(cp, MemOperand(scratch));
+
+ STATIC_ASSERT((-2 * kPointerSize) ==
+ ExitFrameConstants::kLastExitFrameField);
+ if (save_doubles) {
+ ExitFramePreserveFPRegs();
+ }
+
+ // Reserve space for the return address and for user requested memory.
+ // We do this before aligning to make sure that we end up correctly
+ // aligned with the minimum of wasted space.
+ Claim(extra_space + 1, kXRegSizeInBytes);
+ // fp[8]: CallerPC (lr)
+ // fp -> fp[0]: CallerFP (old fp)
+ // fp[-8]: Space reserved for SPOffset.
+ // fp[-16]: CodeObject()
+ // jssp[-16 - fp_size]: Saved doubles (if save_doubles is true).
+ // jssp[8]: Extra space reserved for caller (if extra_space != 0).
+ // jssp -> jssp[0]: Space reserved for the return address.
+
+ // Align and synchronize the system stack pointer with jssp.
+ AlignAndSetCSPForFrame();
+ ASSERT(csp.Is(StackPointer()));
+
+ // fp[8]: CallerPC (lr)
+ // fp -> fp[0]: CallerFP (old fp)
+ // fp[-8]: Space reserved for SPOffset.
+ // fp[-16]: CodeObject()
+ // csp[...]: Saved doubles, if saved_doubles is true.
+ // csp[8]: Memory reserved for the caller if extra_space != 0.
+ // Alignment padding, if necessary.
+ // csp -> csp[0]: Space reserved for the return address.
+
+ // ExitFrame::GetStateForFramePointer expects to find the return address at
+ // the memory address immediately below the pointer stored in SPOffset.
+ // It is not safe to derive much else from SPOffset, because the size of the
+ // padding can vary.
+ Add(scratch, csp, kXRegSizeInBytes);
+ Str(scratch, MemOperand(fp, ExitFrameConstants::kSPOffset));
+}
+
+
+// Leave the current exit frame.
+void MacroAssembler::LeaveExitFrame(bool restore_doubles,
+ const Register& scratch,
+ bool restore_context) {
+ ASSERT(csp.Is(StackPointer()));
+
+ if (restore_doubles) {
+ ExitFrameRestoreFPRegs();
+ }
+
+ // Restore the context pointer from the top frame.
+ if (restore_context) {
+ Mov(scratch, Operand(ExternalReference(Isolate::kContextAddress,
+ isolate())));
+ Ldr(cp, MemOperand(scratch));
+ }
+
+ if (emit_debug_code()) {
+ // Also emit debug code to clear the cp in the top frame.
+ Mov(scratch, Operand(ExternalReference(Isolate::kContextAddress,
+ isolate())));
+ Str(xzr, MemOperand(scratch));
+ }
+ // Clear the frame pointer from the top frame.
+ Mov(scratch, Operand(ExternalReference(Isolate::kCEntryFPAddress,
+ isolate())));
+ Str(xzr, MemOperand(scratch));
+
+ // Pop the exit frame.
+ // fp[8]: CallerPC (lr)
+ // fp -> fp[0]: CallerFP (old fp)
+ // fp[...]: The rest of the frame.
+ Mov(jssp, fp);
+ SetStackPointer(jssp);
+ AssertStackConsistency();
+ Pop(fp, lr);
+}
+
+
+void MacroAssembler::SetCounter(StatsCounter* counter, int value,
+ Register scratch1, Register scratch2) {
+ if (FLAG_native_code_counters && counter->Enabled()) {
+ Mov(scratch1, value);
+ Mov(scratch2, Operand(ExternalReference(counter)));
+ Str(scratch1, MemOperand(scratch2));
+ }
+}
+
+
+void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
+ Register scratch1, Register scratch2) {
+ ASSERT(value != 0);
+ if (FLAG_native_code_counters && counter->Enabled()) {
+ Mov(scratch2, Operand(ExternalReference(counter)));
+ Ldr(scratch1, MemOperand(scratch2));
+ Add(scratch1, scratch1, value);
+ Str(scratch1, MemOperand(scratch2));
+ }
+}
+
+
+void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
+ Register scratch1, Register scratch2) {
+ IncrementCounter(counter, -value, scratch1, scratch2);
+}
+
+
+void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
+ if (context_chain_length > 0) {
+ // Move up the chain of contexts to the context containing the slot.
+ Ldr(dst, MemOperand(cp, Context::SlotOffset(Context::PREVIOUS_INDEX)));
+ for (int i = 1; i < context_chain_length; i++) {
+ Ldr(dst, MemOperand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
+ }
+ } else {
+ // Slot is in the current function context. Move it into the
+ // destination register in case we store into it (the write barrier
+ // cannot be allowed to destroy the context in cp).
+ Mov(dst, cp);
+ }
+}
+
+
+#ifdef ENABLE_DEBUGGER_SUPPORT
+void MacroAssembler::DebugBreak() {
+ Mov(x0, 0);
+ Mov(x1, Operand(ExternalReference(Runtime::kDebugBreak, isolate())));
+ CEntryStub ces(1);
+ ASSERT(AllowThisStubCall(&ces));
+ Call(ces.GetCode(isolate()), RelocInfo::DEBUG_BREAK);
+}
+#endif
+
+
+void MacroAssembler::PushTryHandler(StackHandler::Kind kind,
+ int handler_index) {
+ ASSERT(jssp.Is(StackPointer()));
+ // Adjust this code if the asserts don't hold.
+ STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
+
+ // For the JSEntry handler, we must preserve the live registers x0-x4.
+ // (See JSEntryStub::GenerateBody().)
+
+ unsigned state =
+ StackHandler::IndexField::encode(handler_index) |
+ StackHandler::KindField::encode(kind);
+
+ // Set up the code object and the state for pushing.
+ Mov(x10, Operand(CodeObject()));
+ Mov(x11, state);
+
+ // Push the frame pointer, context, state, and code object.
+ if (kind == StackHandler::JS_ENTRY) {
+ ASSERT(Smi::FromInt(0) == 0);
+ Push(xzr, xzr, x11, x10);
+ } else {
+ Push(fp, cp, x11, x10);
+ }
+
+ // Link the current handler as the next handler.
+ Mov(x11, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
+ Ldr(x10, MemOperand(x11));
+ Push(x10);
+ // Set this new handler as the current one.
+ Str(jssp, MemOperand(x11));
+}
+
+
+void MacroAssembler::PopTryHandler() {
+ STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
+ Pop(x10);
+ Mov(x11, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
+ Drop(StackHandlerConstants::kSize - kXRegSizeInBytes, kByteSizeInBytes);
+ Str(x10, MemOperand(x11));
+}
+
+
+void MacroAssembler::Allocate(int object_size,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required,
+ AllocationFlags flags) {
+ ASSERT(object_size <= Page::kMaxRegularHeapObjectSize);
+ if (!FLAG_inline_new) {
+ if (emit_debug_code()) {
+ // Trash the registers to simulate an allocation failure.
+ // We apply salt to the original zap value to easily spot the values.
+ Mov(result, (kDebugZapValue & ~0xffL) | 0x11L);
+ Mov(scratch1, (kDebugZapValue & ~0xffL) | 0x21L);
+ Mov(scratch2, (kDebugZapValue & ~0xffL) | 0x21L);
+ }
+ B(gc_required);
+ return;
+ }
+
+ ASSERT(!AreAliased(result, scratch1, scratch2, Tmp0(), Tmp1()));
+ ASSERT(result.Is64Bits() && scratch1.Is64Bits() && scratch2.Is64Bits() &&
+ Tmp0().Is64Bits() && Tmp1().Is64Bits());
+
+ // Make object size into bytes.
+ if ((flags & SIZE_IN_WORDS) != 0) {
+ object_size *= kPointerSize;
+ }
+ ASSERT(0 == (object_size & kObjectAlignmentMask));
+
+ // Check relative positions of allocation top and limit addresses.
+ // The values must be adjacent in memory to allow the use of LDP.
+ ExternalReference heap_allocation_top =
+ AllocationUtils::GetAllocationTopReference(isolate(), flags);
+ ExternalReference heap_allocation_limit =
+ AllocationUtils::GetAllocationLimitReference(isolate(), flags);
+ intptr_t top = reinterpret_cast<intptr_t>(heap_allocation_top.address());
+ intptr_t limit = reinterpret_cast<intptr_t>(heap_allocation_limit.address());
+ ASSERT((limit - top) == kPointerSize);
+
+ // Set up allocation top address and object size registers.
+ Register top_address = scratch1;
+ Register allocation_limit = scratch2;
+ Mov(top_address, Operand(heap_allocation_top));
+
+ if ((flags & RESULT_CONTAINS_TOP) == 0) {
+ // Load allocation top into result and the allocation limit.
+ Ldp(result, allocation_limit, MemOperand(top_address));
+ } else {
+ if (emit_debug_code()) {
+ // Assert that result actually contains top on entry.
+ Ldr(Tmp0(), MemOperand(top_address));
+ Cmp(result, Tmp0());
+ Check(eq, kUnexpectedAllocationTop);
+ }
+ // Load the allocation limit. 'result' already contains the allocation top.
+ Ldr(allocation_limit, MemOperand(top_address, limit - top));
+ }
+
+ // We can ignore DOUBLE_ALIGNMENT flags here because doubles and pointers have
+ // the same alignment on A64.
+ STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
+
+ // Calculate new top and bail out if new space is exhausted.
+ Adds(Tmp1(), result, object_size);
+ B(vs, gc_required);
+ Cmp(Tmp1(), allocation_limit);
+ B(hi, gc_required);
+ Str(Tmp1(), MemOperand(top_address));
+
+ // Tag the object if requested.
+ if ((flags & TAG_OBJECT) != 0) {
+ Orr(result, result, kHeapObjectTag);
+ }
+}
+
+
+void MacroAssembler::Allocate(Register object_size,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required,
+ AllocationFlags flags) {
+ if (!FLAG_inline_new) {
+ if (emit_debug_code()) {
+ // Trash the registers to simulate an allocation failure.
+ // We apply salt to the original zap value to easily spot the values.
+ Mov(result, (kDebugZapValue & ~0xffL) | 0x11L);
+ Mov(scratch1, (kDebugZapValue & ~0xffL) | 0x21L);
+ Mov(scratch2, (kDebugZapValue & ~0xffL) | 0x21L);
+ }
+ B(gc_required);
+ return;
+ }
+
+ ASSERT(!AreAliased(object_size, result, scratch1, scratch2, Tmp0(), Tmp1()));
+ ASSERT(object_size.Is64Bits() && result.Is64Bits() && scratch1.Is64Bits() &&
+ scratch2.Is64Bits() && Tmp0().Is64Bits() && Tmp1().Is64Bits());
+
+ // Check relative positions of allocation top and limit addresses.
+ // The values must be adjacent in memory to allow the use of LDP.
+ ExternalReference heap_allocation_top =
+ AllocationUtils::GetAllocationTopReference(isolate(), flags);
+ ExternalReference heap_allocation_limit =
+ AllocationUtils::GetAllocationLimitReference(isolate(), flags);
+ intptr_t top = reinterpret_cast<intptr_t>(heap_allocation_top.address());
+ intptr_t limit = reinterpret_cast<intptr_t>(heap_allocation_limit.address());
+ ASSERT((limit - top) == kPointerSize);
+
+ // Set up allocation top address and object size registers.
+ Register top_address = scratch1;
+ Register allocation_limit = scratch2;
+ Mov(top_address, Operand(heap_allocation_top));
+
+ if ((flags & RESULT_CONTAINS_TOP) == 0) {
+ // Load allocation top into result and the allocation limit.
+ Ldp(result, allocation_limit, MemOperand(top_address));
+ } else {
+ if (emit_debug_code()) {
+ // Assert that result actually contains top on entry.
+ Ldr(Tmp0(), MemOperand(top_address));
+ Cmp(result, Tmp0());
+ Check(eq, kUnexpectedAllocationTop);
+ }
+ // Load the allocation limit. 'result' already contains the allocation top.
+ Ldr(allocation_limit, MemOperand(top_address, limit - top));
+ }
+
+ // We can ignore DOUBLE_ALIGNMENT flags here because doubles and pointers have
+ // the same alignment on A64.
+ STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
+
+ // Calculate new top and bail out if new space is exhausted
+ if ((flags & SIZE_IN_WORDS) != 0) {
+ Adds(Tmp1(), result, Operand(object_size, LSL, kPointerSizeLog2));
+ } else {
+ Adds(Tmp1(), result, object_size);
+ }
+
+ if (emit_debug_code()) {
+ Tst(Tmp1(), kObjectAlignmentMask);
+ Check(eq, kUnalignedAllocationInNewSpace);
+ }
+
+ B(vs, gc_required);
+ Cmp(Tmp1(), allocation_limit);
+ B(hi, gc_required);
+ Str(Tmp1(), MemOperand(top_address));
+
+ // Tag the object if requested.
+ if ((flags & TAG_OBJECT) != 0) {
+ Orr(result, result, kHeapObjectTag);
+ }
+}
+
+
+void MacroAssembler::UndoAllocationInNewSpace(Register object,
+ Register scratch) {
+ ExternalReference new_space_allocation_top =
+ ExternalReference::new_space_allocation_top_address(isolate());
+
+ // Make sure the object has no tag before resetting top.
+ Bic(object, object, kHeapObjectTagMask);
+#ifdef DEBUG
+ // Check that the object un-allocated is below the current top.
+ Mov(scratch, Operand(new_space_allocation_top));
+ Ldr(scratch, MemOperand(scratch));
+ Cmp(object, scratch);
+ Check(lt, kUndoAllocationOfNonAllocatedMemory);
+#endif
+ // Write the address of the object to un-allocate as the current top.
+ Mov(scratch, Operand(new_space_allocation_top));
+ Str(object, MemOperand(scratch));
+}
+
+
+void MacroAssembler::AllocateTwoByteString(Register result,
+ Register length,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* gc_required) {
+ ASSERT(!AreAliased(result, length, scratch1, scratch2, scratch3));
+ // Calculate the number of bytes needed for the characters in the string while
+ // observing object alignment.
+ STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
+ Add(scratch1, length, length); // Length in bytes, not chars.
+ Add(scratch1, scratch1, kObjectAlignmentMask + SeqTwoByteString::kHeaderSize);
+ Bic(scratch1, scratch1, kObjectAlignmentMask);
+
+ // Allocate two-byte string in new space.
+ Allocate(scratch1,
+ result,
+ scratch2,
+ scratch3,
+ gc_required,
+ TAG_OBJECT);
+
+ // Set the map, length and hash field.
+ InitializeNewString(result,
+ length,
+ Heap::kStringMapRootIndex,
+ scratch1,
+ scratch2);
+}
+
+
+void MacroAssembler::AllocateAsciiString(Register result,
+ Register length,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* gc_required) {
+ ASSERT(!AreAliased(result, length, scratch1, scratch2, scratch3));
+ // Calculate the number of bytes needed for the characters in the string while
+ // observing object alignment.
+ STATIC_ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
+ STATIC_ASSERT(kCharSize == 1);
+ Add(scratch1, length, kObjectAlignmentMask + SeqOneByteString::kHeaderSize);
+ Bic(scratch1, scratch1, kObjectAlignmentMask);
+
+ // Allocate ASCII string in new space.
+ Allocate(scratch1,
+ result,
+ scratch2,
+ scratch3,
+ gc_required,
+ TAG_OBJECT);
+
+ // Set the map, length and hash field.
+ InitializeNewString(result,
+ length,
+ Heap::kAsciiStringMapRootIndex,
+ scratch1,
+ scratch2);
+}
+
+
+void MacroAssembler::AllocateTwoByteConsString(Register result,
+ Register length,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required) {
+ Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
+ TAG_OBJECT);
+
+ InitializeNewString(result,
+ length,
+ Heap::kConsStringMapRootIndex,
+ scratch1,
+ scratch2);
+}
+
+
+void MacroAssembler::AllocateAsciiConsString(Register result,
+ Register length,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required) {
+ Label allocate_new_space, install_map;
+ AllocationFlags flags = TAG_OBJECT;
+
+ ExternalReference high_promotion_mode = ExternalReference::
+ new_space_high_promotion_mode_active_address(isolate());
+ Mov(scratch1, Operand(high_promotion_mode));
+ Ldr(scratch1, MemOperand(scratch1));
+ Cbz(scratch1, &allocate_new_space);
+
+ Allocate(ConsString::kSize,
+ result,
+ scratch1,
+ scratch2,
+ gc_required,
+ static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE));
+
+ B(&install_map);
+
+ Bind(&allocate_new_space);
+ Allocate(ConsString::kSize,
+ result,
+ scratch1,
+ scratch2,
+ gc_required,
+ flags);
+
+ Bind(&install_map);
+
+ InitializeNewString(result,
+ length,
+ Heap::kConsAsciiStringMapRootIndex,
+ scratch1,
+ scratch2);
+}
+
+
+void MacroAssembler::AllocateTwoByteSlicedString(Register result,
+ Register length,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required) {
+ ASSERT(!AreAliased(result, length, scratch1, scratch2));
+ Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
+ TAG_OBJECT);
+
+ InitializeNewString(result,
+ length,
+ Heap::kSlicedStringMapRootIndex,
+ scratch1,
+ scratch2);
+}
+
+
+void MacroAssembler::AllocateAsciiSlicedString(Register result,
+ Register length,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required) {
+ ASSERT(!AreAliased(result, length, scratch1, scratch2));
+ Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
+ TAG_OBJECT);
+
+ InitializeNewString(result,
+ length,
+ Heap::kSlicedAsciiStringMapRootIndex,
+ scratch1,
+ scratch2);
+}
+
+
+// Allocates a heap number or jumps to the need_gc label if the young space
+// is full and a scavenge is needed.
+void MacroAssembler::AllocateHeapNumber(Register result,
+ Label* gc_required,
+ Register scratch1,
+ Register scratch2,
+ Register heap_number_map) {
+ // Allocate an object in the heap for the heap number and tag it as a heap
+ // object.
+ Allocate(HeapNumber::kSize, result, scratch1, scratch2, gc_required,
+ TAG_OBJECT);
+
+ // Store heap number map in the allocated object.
+ if (heap_number_map.Is(NoReg)) {
+ heap_number_map = scratch1;
+ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+ }
+ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+ Str(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset));
+}
+
+
+void MacroAssembler::AllocateHeapNumberWithValue(Register result,
+ DoubleRegister value,
+ Label* gc_required,
+ Register scratch1,
+ Register scratch2,
+ Register heap_number_map) {
+ // TODO(all): Check if it would be more efficient to use STP to store both
+ // the map and the value.
+ AllocateHeapNumber(result, gc_required, scratch1, scratch2, heap_number_map);
+ Str(value, FieldMemOperand(result, HeapNumber::kValueOffset));
+}
+
+
+void MacroAssembler::JumpIfObjectType(Register object,
+ Register map,
+ Register type_reg,
+ InstanceType type,
+ Label* if_cond_pass,
+ Condition cond) {
+ CompareObjectType(object, map, type_reg, type);
+ B(cond, if_cond_pass);
+}
+
+
+void MacroAssembler::JumpIfNotObjectType(Register object,
+ Register map,
+ Register type_reg,
+ InstanceType type,
+ Label* if_not_object) {
+ JumpIfObjectType(object, map, type_reg, type, if_not_object, ne);
+}
+
+
+// Sets condition flags based on comparison, and returns type in type_reg.
+void MacroAssembler::CompareObjectType(Register object,
+ Register map,
+ Register type_reg,
+ InstanceType type) {
+ Ldr(map, FieldMemOperand(object, HeapObject::kMapOffset));
+ CompareInstanceType(map, type_reg, type);
+}
+
+
+// Sets condition flags based on comparison, and returns type in type_reg.
+void MacroAssembler::CompareInstanceType(Register map,
+ Register type_reg,
+ InstanceType type) {
+ Ldrb(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
+ Cmp(type_reg, type);
+}
+
+
+void MacroAssembler::CompareMap(Register obj,
+ Register scratch,
+ Handle<Map> map,
+ Label* early_success) {
+ // TODO(jbramley): The early_success label isn't used. Remove it.
+ Ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
+ CompareMap(scratch, map, early_success);
+}
+
+
+void MacroAssembler::CompareMap(Register obj_map,
+ Handle<Map> map,
+ Label* early_success) {
+ // TODO(jbramley): The early_success label isn't used. Remove it.
+ Cmp(obj_map, Operand(map));
+}
+
+
+void MacroAssembler::CheckMap(Register obj,
+ Register scratch,
+ Handle<Map> map,
+ Label* fail,
+ SmiCheckType smi_check_type) {
+ if (smi_check_type == DO_SMI_CHECK) {
+ JumpIfSmi(obj, fail);
+ }
+
+ Label success;
+ CompareMap(obj, scratch, map, &success);
+ B(ne, fail);
+ Bind(&success);
+}
+
+
+void MacroAssembler::CheckMap(Register obj,
+ Register scratch,
+ Heap::RootListIndex index,
+ Label* fail,
+ SmiCheckType smi_check_type) {
+ if (smi_check_type == DO_SMI_CHECK) {
+ JumpIfSmi(obj, fail);
+ }
+ Ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
+ JumpIfNotRoot(scratch, index, fail);
+}
+
+
+void MacroAssembler::CheckMap(Register obj_map,
+ Handle<Map> map,
+ Label* fail,
+ SmiCheckType smi_check_type) {
+ if (smi_check_type == DO_SMI_CHECK) {
+ JumpIfSmi(obj_map, fail);
+ }
+ Label success;
+ CompareMap(obj_map, map, &success);
+ B(ne, fail);
+ Bind(&success);
+}
+
+
+void MacroAssembler::DispatchMap(Register obj,
+ Register scratch,
+ Handle<Map> map,
+ Handle<Code> success,
+ SmiCheckType smi_check_type) {
+ Label fail;
+ if (smi_check_type == DO_SMI_CHECK) {
+ JumpIfSmi(obj, &fail);
+ }
+ Ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
+ Cmp(scratch, Operand(map));
+ B(ne, &fail);
+ Jump(success, RelocInfo::CODE_TARGET);
+ Bind(&fail);
+}
+
+
+void MacroAssembler::TestMapBitfield(Register object, uint64_t mask) {
+ Ldr(Tmp0(), FieldMemOperand(object, HeapObject::kMapOffset));
+ Ldrb(Tmp0(), FieldMemOperand(Tmp0(), Map::kBitFieldOffset));
+ Tst(Tmp0(), mask);
+}
+
+
+void MacroAssembler::LoadElementsKind(Register result, Register object) {
+ // Load map.
+ __ Ldr(result, FieldMemOperand(object, HeapObject::kMapOffset));
+ // Load the map's "bit field 2".
+ __ Ldrb(result, FieldMemOperand(result, Map::kBitField2Offset));
+ // Retrieve elements_kind from bit field 2.
+ __ Ubfx(result, result, Map::kElementsKindShift, Map::kElementsKindBitCount);
+}
+
+
+void MacroAssembler::TryGetFunctionPrototype(Register function,
+ Register result,
+ Register scratch,
+ Label* miss,
+ BoundFunctionAction action) {
+ ASSERT(!AreAliased(function, result, scratch));
+
+ // Check that the receiver isn't a smi.
+ JumpIfSmi(function, miss);
+
+ // Check that the function really is a function. Load map into result reg.
+ JumpIfNotObjectType(function, result, scratch, JS_FUNCTION_TYPE, miss);
+
+ if (action == kMissOnBoundFunction) {
+ Register scratch_w = scratch.W();
+ Ldr(scratch,
+ FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
+ // On 64-bit platforms, compiler hints field is not a smi. See definition of
+ // kCompilerHintsOffset in src/objects.h.
+ Ldr(scratch_w,
+ FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset));
+ Tbnz(scratch, SharedFunctionInfo::kBoundFunction, miss);
+ }
+
+ // Make sure that the function has an instance prototype.
+ Label non_instance;
+ Ldrb(scratch, FieldMemOperand(result, Map::kBitFieldOffset));
+ Tbnz(scratch, Map::kHasNonInstancePrototype, &non_instance);
+
+ // Get the prototype or initial map from the function.
+ Ldr(result,
+ FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
+
+ // If the prototype or initial map is the hole, don't return it and simply
+ // miss the cache instead. This will allow us to allocate a prototype object
+ // on-demand in the runtime system.
+ JumpIfRoot(result, Heap::kTheHoleValueRootIndex, miss);
+
+ // If the function does not have an initial map, we're done.
+ Label done;
+ JumpIfNotObjectType(result, scratch, scratch, MAP_TYPE, &done);
+
+ // Get the prototype from the initial map.
+ Ldr(result, FieldMemOperand(result, Map::kPrototypeOffset));
+ B(&done);
+
+ // Non-instance prototype: fetch prototype from constructor field in initial
+ // map.
+ Bind(&non_instance);
+ Ldr(result, FieldMemOperand(result, Map::kConstructorOffset));
+
+ // All done.
+ Bind(&done);
+}
+
+
+void MacroAssembler::CompareRoot(const Register& obj,
+ Heap::RootListIndex index) {
+ ASSERT(!AreAliased(obj, Tmp0()));
+ LoadRoot(Tmp0(), index);
+ Cmp(obj, Tmp0());
+}
+
+
+void MacroAssembler::JumpIfRoot(const Register& obj,
+ Heap::RootListIndex index,
+ Label* if_equal) {
+ CompareRoot(obj, index);
+ B(eq, if_equal);
+}
+
+
+void MacroAssembler::JumpIfNotRoot(const Register& obj,
+ Heap::RootListIndex index,
+ Label* if_not_equal) {
+ CompareRoot(obj, index);
+ B(ne, if_not_equal);
+}
+
+
+void MacroAssembler::CompareAndSplit(const Register& lhs,
+ const Operand& rhs,
+ Condition cond,
+ Label* if_true,
+ Label* if_false,
+ Label* fall_through) {
+ if ((if_true == if_false) && (if_false == fall_through)) {
+ // Fall through.
+ } else if (if_true == if_false) {
+ B(if_true);
+ } else if (if_false == fall_through) {
+ CompareAndBranch(lhs, rhs, cond, if_true);
+ } else if (if_true == fall_through) {
+ CompareAndBranch(lhs, rhs, InvertCondition(cond), if_false);
+ } else {
+ CompareAndBranch(lhs, rhs, cond, if_true);
+ B(if_false);
+ }
+}
+
+
+void MacroAssembler::TestAndSplit(const Register& reg,
+ uint64_t bit_pattern,
+ Label* if_all_clear,
+ Label* if_any_set,
+ Label* fall_through) {
+ if ((if_all_clear == if_any_set) && (if_any_set == fall_through)) {
+ // Fall through.
+ } else if (if_all_clear == if_any_set) {
+ B(if_all_clear);
+ } else if (if_all_clear == fall_through) {
+ TestAndBranchIfAnySet(reg, bit_pattern, if_any_set);
+ } else if (if_any_set == fall_through) {
+ TestAndBranchIfAllClear(reg, bit_pattern, if_all_clear);
+ } else {
+ TestAndBranchIfAnySet(reg, bit_pattern, if_any_set);
+ B(if_all_clear);
+ }
+}
+
+
+void MacroAssembler::CheckFastElements(Register map,
+ Register scratch,
+ Label* fail) {
+ STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
+ STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
+ STATIC_ASSERT(FAST_ELEMENTS == 2);
+ STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
+ Ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
+ Cmp(scratch, Map::kMaximumBitField2FastHoleyElementValue);
+ B(hi, fail);
+}
+
+
+void MacroAssembler::CheckFastObjectElements(Register map,
+ Register scratch,
+ Label* fail) {
+ STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
+ STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
+ STATIC_ASSERT(FAST_ELEMENTS == 2);
+ STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
+ Ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
+ Cmp(scratch, Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
+ // If cond==ls, set cond=hi, otherwise compare.
+ Ccmp(scratch,
+ Operand(Map::kMaximumBitField2FastHoleyElementValue), CFlag, hi);
+ B(hi, fail);
+}
+
+
+void MacroAssembler::CheckFastSmiElements(Register map,
+ Register scratch,
+ Label* fail) {
+ STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
+ STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
+ Ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
+ Cmp(scratch, Map::kMaximumBitField2FastHoleySmiElementValue);
+ B(hi, fail);
+}
+
+
+// Note: The ARM version of this clobbers elements_reg, but this version does
+// not. Some uses of this in A64 assume that elements_reg will be preserved.
+void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
+ Register key_reg,
+ Register elements_reg,
+ Register scratch1,
+ FPRegister fpscratch1,
+ FPRegister fpscratch2,
+ Label* fail,
+ int elements_offset) {
+ ASSERT(!AreAliased(value_reg, key_reg, elements_reg, scratch1));
+ Label store_num;
+
+ // Speculatively convert the smi to a double - all smis can be exactly
+ // represented as a double.
+ SmiUntagToDouble(fpscratch1, value_reg, kSpeculativeUntag);
+
+ // If value_reg is a smi, we're done.
+ JumpIfSmi(value_reg, &store_num);
+
+ // Ensure that the object is a heap number.
+ CheckMap(value_reg, scratch1, isolate()->factory()->heap_number_map(),
+ fail, DONT_DO_SMI_CHECK);
+
+ Ldr(fpscratch1, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
+ Fmov(fpscratch2, FixedDoubleArray::canonical_not_the_hole_nan_as_double());
+
+ // Check for NaN by comparing the number to itself: NaN comparison will
+ // report unordered, indicated by the overflow flag being set.
+ Fcmp(fpscratch1, fpscratch1);
+ Fcsel(fpscratch1, fpscratch2, fpscratch1, vs);
+
+ // Store the result.
+ Bind(&store_num);
+ Add(scratch1, elements_reg,
+ Operand::UntagSmiAndScale(key_reg, kDoubleSizeLog2));
+ Str(fpscratch1,
+ FieldMemOperand(scratch1,
+ FixedDoubleArray::kHeaderSize - elements_offset));
+}
+
+
+bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
+ return has_frame_ || !stub->SometimesSetsUpAFrame();
+}
+
+
+void MacroAssembler::IndexFromHash(Register hash, Register index) {
+ // If the hash field contains an array index pick it out. The assert checks
+ // that the constants for the maximum number of digits for an array index
+ // cached in the hash field and the number of bits reserved for it does not
+ // conflict.
+ ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
+ (1 << String::kArrayIndexValueBits));
+ // We want the smi-tagged index in key. kArrayIndexValueMask has zeros in
+ // the low kHashShift bits.
+ STATIC_ASSERT(kSmiTag == 0);
+ Ubfx(hash, hash, String::kHashShift, String::kArrayIndexValueBits);
+ SmiTag(index, hash);
+}
+
+
+void MacroAssembler::EmitSeqStringSetCharCheck(
+ Register string,
+ Register index,
+ SeqStringSetCharCheckIndexType index_type,
+ Register scratch,
+ uint32_t encoding_mask) {
+ ASSERT(!AreAliased(string, index, scratch));
+
+ if (index_type == kIndexIsSmi) {
+ AssertSmi(index);
+ }
+
+ // Check that string is an object.
+ AssertNotSmi(string, kNonObject);
+
+ // Check that string has an appropriate map.
+ Ldr(scratch, FieldMemOperand(string, HeapObject::kMapOffset));
+ Ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
+
+ And(scratch, scratch, kStringRepresentationMask | kStringEncodingMask);
+ Cmp(scratch, encoding_mask);
+ Check(eq, kUnexpectedStringType);
+
+ Ldr(scratch, FieldMemOperand(string, String::kLengthOffset));
+ Cmp(index, index_type == kIndexIsSmi ? scratch : Operand::UntagSmi(scratch));
+ Check(lt, kIndexIsTooLarge);
+
+ ASSERT_EQ(0, Smi::FromInt(0));
+ Cmp(index, 0);
+ Check(ge, kIndexIsNegative);
+}
+
+
+void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
+ Register scratch,
+ Label* miss) {
+ // TODO(jbramley): Sort out the uses of Tmp0() and Tmp1() in this function.
+ // The ARM version takes two scratch registers, and that should be enough for
+ // all of the checks.
+
+ Label same_contexts;
+
+ ASSERT(!AreAliased(holder_reg, scratch));
+
+ // Load current lexical context from the stack frame.
+ Ldr(scratch, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ // In debug mode, make sure the lexical context is set.
+#ifdef DEBUG
+ Cmp(scratch, 0);
+ Check(ne, kWeShouldNotHaveAnEmptyLexicalContext);
+#endif
+
+ // Load the native context of the current context.
+ int offset =
+ Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
+ Ldr(scratch, FieldMemOperand(scratch, offset));
+ Ldr(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
+
+ // Check the context is a native context.
+ if (emit_debug_code()) {
+ // Read the first word and compare to the global_context_map.
+ Register temp = Tmp1();
+ Ldr(temp, FieldMemOperand(scratch, HeapObject::kMapOffset));
+ CompareRoot(temp, Heap::kNativeContextMapRootIndex);
+ Check(eq, kExpectedNativeContext);
+ }
+
+ // Check if both contexts are the same.
+ ldr(Tmp0(), FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
+ cmp(scratch, Tmp0());
+ b(&same_contexts, eq);
+
+ // Check the context is a native context.
+ if (emit_debug_code()) {
+ // Move Tmp0() into a different register, as CompareRoot will use it.
+ Register temp = Tmp1();
+ mov(temp, Tmp0());
+ CompareRoot(temp, Heap::kNullValueRootIndex);
+ Check(ne, kExpectedNonNullContext);
+
+ Ldr(temp, FieldMemOperand(temp, HeapObject::kMapOffset));
+ CompareRoot(temp, Heap::kNativeContextMapRootIndex);
+ Check(eq, kExpectedNativeContext);
+
+ // Let's consider that Tmp0() has been cloberred by the MacroAssembler.
+ // We reload it with its value.
+ ldr(Tmp0(), FieldMemOperand(holder_reg,
+ JSGlobalProxy::kNativeContextOffset));
+ }
+
+ // Check that the security token in the calling global object is
+ // compatible with the security token in the receiving global
+ // object.
+ int token_offset = Context::kHeaderSize +
+ Context::SECURITY_TOKEN_INDEX * kPointerSize;
+
+ ldr(scratch, FieldMemOperand(scratch, token_offset));
+ ldr(Tmp0(), FieldMemOperand(Tmp0(), token_offset));
+ cmp(scratch, Tmp0());
+ b(miss, ne);
+
+ bind(&same_contexts);
+}
+
+
+// Compute the hash code from the untagged key. This must be kept in sync with
+// ComputeIntegerHash in utils.h and KeyedLoadGenericElementStub in
+// code-stub-hydrogen.cc
+void MacroAssembler::GetNumberHash(Register key, Register scratch) {
+ ASSERT(!AreAliased(key, scratch));
+
+ // Xor original key with a seed.
+ LoadRoot(scratch, Heap::kHashSeedRootIndex);
+ Eor(key, key, Operand::UntagSmi(scratch));
+
+ // The algorithm uses 32-bit integer values.
+ key = key.W();
+ scratch = scratch.W();
+
+ // Compute the hash code from the untagged key. This must be kept in sync
+ // with ComputeIntegerHash in utils.h.
+ //
+ // hash = ~hash + (hash <<1 15);
+ Mvn(scratch, key);
+ Add(key, scratch, Operand(key, LSL, 15));
+ // hash = hash ^ (hash >> 12);
+ Eor(key, key, Operand(key, LSR, 12));
+ // hash = hash + (hash << 2);
+ Add(key, key, Operand(key, LSL, 2));
+ // hash = hash ^ (hash >> 4);
+ Eor(key, key, Operand(key, LSR, 4));
+ // hash = hash * 2057;
+ Mov(scratch, Operand(key, LSL, 11));
+ Add(key, key, Operand(key, LSL, 3));
+ Add(key, key, scratch);
+ // hash = hash ^ (hash >> 16);
+ Eor(key, key, Operand(key, LSR, 16));
+}
+
+
+void MacroAssembler::LoadFromNumberDictionary(Label* miss,
+ Register elements,
+ Register key,
+ Register result,
+ Register scratch0,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3) {
+ ASSERT(!AreAliased(elements, key, scratch0, scratch1, scratch2, scratch3));
+
+ Label done;
+
+ SmiUntag(scratch0, key);
+ GetNumberHash(scratch0, scratch1);
+
+ // Compute the capacity mask.
+ Ldrsw(scratch1,
+ UntagSmiFieldMemOperand(elements,
+ SeededNumberDictionary::kCapacityOffset));
+ Sub(scratch1, scratch1, 1);
+
+ // Generate an unrolled loop that performs a few probes before giving up.
+ for (int i = 0; i < kNumberDictionaryProbes; i++) {
+ // Compute the masked index: (hash + i + i * i) & mask.
+ if (i > 0) {
+ Add(scratch2, scratch0, SeededNumberDictionary::GetProbeOffset(i));
+ } else {
+ Mov(scratch2, scratch0);
+ }
+ And(scratch2, scratch2, scratch1);
+
+ // Scale the index by multiplying by the element size.
+ ASSERT(SeededNumberDictionary::kEntrySize == 3);
+ Add(scratch2, scratch2, Operand(scratch2, LSL, 1));
+
+ // Check if the key is identical to the name.
+ Add(scratch2, elements, Operand(scratch2, LSL, kPointerSizeLog2));
+ Ldr(scratch3,
+ FieldMemOperand(scratch2,
+ SeededNumberDictionary::kElementsStartOffset));
+ Cmp(key, scratch3);
+ if (i != (kNumberDictionaryProbes - 1)) {
+ B(eq, &done);
+ } else {
+ B(ne, miss);
+ }
+ }
+
+ Bind(&done);
+ // Check that the value is a normal property.
+ const int kDetailsOffset =
+ SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
+ Ldrsw(scratch1, UntagSmiFieldMemOperand(scratch2, kDetailsOffset));
+ TestAndBranchIfAnySet(scratch1, PropertyDetails::TypeField::kMask, miss);
+
+ // Get the value at the masked, scaled index and return.
+ const int kValueOffset =
+ SeededNumberDictionary::kElementsStartOffset + kPointerSize;
+ Ldr(result, FieldMemOperand(scratch2, kValueOffset));
+}
+
+
+void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
+ Register address,
+ Register scratch,
+ SaveFPRegsMode fp_mode,
+ RememberedSetFinalAction and_then) {
+ ASSERT(!AreAliased(object, address, scratch));
+ Label done, store_buffer_overflow;
+ if (emit_debug_code()) {
+ Label ok;
+ JumpIfNotInNewSpace(object, &ok);
+ Abort(kRememberedSetPointerInNewSpace);
+ bind(&ok);
+ }
+ // Load store buffer top.
+ Mov(Tmp0(), Operand(ExternalReference::store_buffer_top(isolate())));
+ Ldr(scratch, MemOperand(Tmp0()));
+ // Store pointer to buffer and increment buffer top.
+ Str(address, MemOperand(scratch, kPointerSize, PostIndex));
+ // Write back new top of buffer.
+ Str(scratch, MemOperand(Tmp0()));
+ // Call stub on end of buffer.
+ // Check for end of buffer.
+ ASSERT(StoreBuffer::kStoreBufferOverflowBit ==
+ (1 << (14 + kPointerSizeLog2)));
+ if (and_then == kFallThroughAtEnd) {
+ Tbz(scratch, (14 + kPointerSizeLog2), &done);
+ } else {
+ ASSERT(and_then == kReturnAtEnd);
+ Tbnz(scratch, (14 + kPointerSizeLog2), &store_buffer_overflow);
+ Ret();
+ }
+
+ Bind(&store_buffer_overflow);
+ Push(lr);
+ StoreBufferOverflowStub store_buffer_overflow_stub =
+ StoreBufferOverflowStub(fp_mode);
+ CallStub(&store_buffer_overflow_stub);
+ Pop(lr);
+
+ Bind(&done);
+ if (and_then == kReturnAtEnd) {
+ Ret();
+ }
+}
+
+
+void MacroAssembler::PopSafepointRegisters() {
+ const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
+ PopXRegList(kSafepointSavedRegisters);
+ Drop(num_unsaved);
+}
+
+
+void MacroAssembler::PushSafepointRegisters() {
+ // Safepoints expect a block of kNumSafepointRegisters values on the stack, so
+ // adjust the stack for unsaved registers.
+ const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
+ ASSERT(num_unsaved >= 0);
+ Claim(num_unsaved);
+ PushXRegList(kSafepointSavedRegisters);
+}
+
+
+void MacroAssembler::PushSafepointFPRegisters() {
+ PushCPURegList(CPURegList(CPURegister::kFPRegister, kDRegSize,
+ FPRegister::kAllocatableFPRegisters));
+}
+
+
+void MacroAssembler::PopSafepointFPRegisters() {
+ PopCPURegList(CPURegList(CPURegister::kFPRegister, kDRegSize,
+ FPRegister::kAllocatableFPRegisters));
+}
+
+
+int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
+ // Make sure the safepoint registers list is what we expect.
+ ASSERT(CPURegList::GetSafepointSavedRegisters().list() == 0x6ffcffff);
+
+ // Safepoint registers are stored contiguously on the stack, but not all the
+ // registers are saved. The following registers are excluded:
+ // - x16 and x17 (ip0 and ip1) because they shouldn't be preserved outside of
+ // the macro assembler.
+ // - x28 (jssp) because JS stack pointer doesn't need to be included in
+ // safepoint registers.
+ // - x31 (csp) because the system stack pointer doesn't need to be included
+ // in safepoint registers.
+ //
+ // This function implements the mapping of register code to index into the
+ // safepoint register slots.
+ if ((reg_code >= 0) && (reg_code <= 15)) {
+ return reg_code;
+ } else if ((reg_code >= 18) && (reg_code <= 27)) {
+ // Skip ip0 and ip1.
+ return reg_code - 2;
+ } else if ((reg_code == 29) || (reg_code == 30)) {
+ // Also skip jssp.
+ return reg_code - 3;
+ } else {
+ // This register has no safepoint register slot.
+ UNREACHABLE();
+ return -1;
+ }
+}
+
+
+void MacroAssembler::CheckPageFlagSet(const Register& object,
+ const Register& scratch,
+ int mask,
+ Label* if_any_set) {
+ And(scratch, object, ~Page::kPageAlignmentMask);
+ Ldr(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
+ TestAndBranchIfAnySet(scratch, mask, if_any_set);
+}
+
+
+void MacroAssembler::CheckPageFlagClear(const Register& object,
+ const Register& scratch,
+ int mask,
+ Label* if_all_clear) {
+ And(scratch, object, ~Page::kPageAlignmentMask);
+ Ldr(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
+ TestAndBranchIfAllClear(scratch, mask, if_all_clear);
+}
+
+
+void MacroAssembler::RecordWriteField(
+ Register object,
+ int offset,
+ Register value,
+ Register scratch,
+ LinkRegisterStatus lr_status,
+ SaveFPRegsMode save_fp,
+ RememberedSetAction remembered_set_action,
+ SmiCheck smi_check) {
+ // First, check if a write barrier is even needed. The tests below
+ // catch stores of Smis.
+ Label done;
+
+ // Skip the barrier if writing a smi.
+ if (smi_check == INLINE_SMI_CHECK) {
+ JumpIfSmi(value, &done);
+ }
+
+ // Although the object register is tagged, the offset is relative to the start
+ // of the object, so offset must be a multiple of kPointerSize.
+ ASSERT(IsAligned(offset, kPointerSize));
+
+ Add(scratch, object, offset - kHeapObjectTag);
+ if (emit_debug_code()) {
+ Label ok;
+ Tst(scratch, (1 << kPointerSizeLog2) - 1);
+ B(eq, &ok);
+ Abort(kUnalignedCellInWriteBarrier);
+ Bind(&ok);
+ }
+
+ RecordWrite(object,
+ scratch,
+ value,
+ lr_status,
+ save_fp,
+ remembered_set_action,
+ OMIT_SMI_CHECK);
+
+ Bind(&done);
+
+ // Clobber clobbered input registers when running with the debug-code flag
+ // turned on to provoke errors.
+ if (emit_debug_code()) {
+ Mov(value, Operand(BitCast<int64_t>(kZapValue + 4)));
+ Mov(scratch, Operand(BitCast<int64_t>(kZapValue + 8)));
+ }
+}
+
+
+// Will clobber: object, address, value, Tmp0(), Tmp1().
+// If lr_status is kLRHasBeenSaved, lr will also be clobbered.
+//
+// The register 'object' contains a heap object pointer. The heap object tag is
+// shifted away.
+void MacroAssembler::RecordWrite(Register object,
+ Register address,
+ Register value,
+ LinkRegisterStatus lr_status,
+ SaveFPRegsMode fp_mode,
+ RememberedSetAction remembered_set_action,
+ SmiCheck smi_check) {
+ ASM_LOCATION("MacroAssembler::RecordWrite");
+ ASSERT(!AreAliased(object, value));
+
+ if (emit_debug_code()) {
+ Ldr(Tmp0(), MemOperand(address));
+ Cmp(Tmp0(), value);
+ Check(eq, kWrongAddressOrValuePassedToRecordWrite);
+ }
+
+ // Count number of write barriers in generated code.
+ isolate()->counters()->write_barriers_static()->Increment();
+ // TODO(mstarzinger): Dynamic counter missing.
+
+ // First, check if a write barrier is even needed. The tests below
+ // catch stores of smis and stores into the young generation.
+ Label done;
+
+ if (smi_check == INLINE_SMI_CHECK) {
+ ASSERT_EQ(0, kSmiTag);
+ JumpIfSmi(value, &done);
+ }
+
+ CheckPageFlagClear(value,
+ value, // Used as scratch.
+ MemoryChunk::kPointersToHereAreInterestingMask,
+ &done);
+ CheckPageFlagClear(object,
+ value, // Used as scratch.
+ MemoryChunk::kPointersFromHereAreInterestingMask,
+ &done);
+
+ // Record the actual write.
+ if (lr_status == kLRHasNotBeenSaved) {
+ Push(lr);
+ }
+ RecordWriteStub stub(object, value, address, remembered_set_action, fp_mode);
+ CallStub(&stub);
+ if (lr_status == kLRHasNotBeenSaved) {
+ Pop(lr);
+ }
+
+ Bind(&done);
+
+ // Clobber clobbered registers when running with the debug-code flag
+ // turned on to provoke errors.
+ if (emit_debug_code()) {
+ Mov(address, Operand(BitCast<int64_t>(kZapValue + 12)));
+ Mov(value, Operand(BitCast<int64_t>(kZapValue + 16)));
+ }
+}
+
+
+void MacroAssembler::AssertHasValidColor(const Register& reg) {
+ if (emit_debug_code()) {
+ // The bit sequence is backward. The first character in the string
+ // represents the least significant bit.
+ ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
+
+ Label color_is_valid;
+ Tbnz(reg, 0, &color_is_valid);
+ Tbz(reg, 1, &color_is_valid);
+ Abort(kUnexpectedColorFound);
+ Bind(&color_is_valid);
+ }
+}
+
+
+void MacroAssembler::GetMarkBits(Register addr_reg,
+ Register bitmap_reg,
+ Register shift_reg) {
+ ASSERT(!AreAliased(addr_reg, bitmap_reg, shift_reg, no_reg));
+ // addr_reg is divided into fields:
+ // |63 page base 20|19 high 8|7 shift 3|2 0|
+ // 'high' gives the index of the cell holding color bits for the object.
+ // 'shift' gives the offset in the cell for this object's color.
+ const int kShiftBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2;
+ Ubfx(Tmp0(), addr_reg, kShiftBits, kPageSizeBits - kShiftBits);
+ Bic(bitmap_reg, addr_reg, Page::kPageAlignmentMask);
+ Add(bitmap_reg, bitmap_reg, Operand(Tmp0(), LSL, Bitmap::kBytesPerCellLog2));
+ // bitmap_reg:
+ // |63 page base 20|19 zeros 15|14 high 3|2 0|
+ Ubfx(shift_reg, addr_reg, kPointerSizeLog2, Bitmap::kBitsPerCellLog2);
+}
+
+
+void MacroAssembler::HasColor(Register object,
+ Register bitmap_scratch,
+ Register shift_scratch,
+ Label* has_color,
+ int first_bit,
+ int second_bit) {
+ // See mark-compact.h for color definitions.
+ ASSERT(!AreAliased(object, bitmap_scratch, shift_scratch));
+
+ GetMarkBits(object, bitmap_scratch, shift_scratch);
+ Ldr(bitmap_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
+ // Shift the bitmap down to get the color of the object in bits [1:0].
+ Lsr(bitmap_scratch, bitmap_scratch, shift_scratch);
+
+ AssertHasValidColor(bitmap_scratch);
+
+ // These bit sequences are backwards. The first character in the string
+ // represents the least significant bit.
+ ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0);
+ ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
+ ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0);
+
+ // Check for the color.
+ if (first_bit == 0) {
+ // Checking for white.
+ ASSERT(second_bit == 0);
+ // We only need to test the first bit.
+ Tbz(bitmap_scratch, 0, has_color);
+ } else {
+ Label other_color;
+ // Checking for grey or black.
+ Tbz(bitmap_scratch, 0, &other_color);
+ if (second_bit == 0) {
+ Tbz(bitmap_scratch, 1, has_color);
+ } else {
+ Tbnz(bitmap_scratch, 1, has_color);
+ }
+ Bind(&other_color);
+ }
+
+ // Fall through if it does not have the right color.
+}
+
+
+void MacroAssembler::CheckMapDeprecated(Handle<Map> map,
+ Register scratch,
+ Label* if_deprecated) {
+ if (map->CanBeDeprecated()) {
+ Mov(scratch, Operand(map));
+ Ldrsw(scratch, UntagSmiFieldMemOperand(scratch, Map::kBitField3Offset));
+ TestAndBranchIfAnySet(scratch, Map::Deprecated::kMask, if_deprecated);
+ }
+}
+
+
+void MacroAssembler::JumpIfBlack(Register object,
+ Register scratch0,
+ Register scratch1,
+ Label* on_black) {
+ ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
+ HasColor(object, scratch0, scratch1, on_black, 1, 0); // kBlackBitPattern.
+}
+
+
+void MacroAssembler::JumpIfDictionaryInPrototypeChain(
+ Register object,
+ Register scratch0,
+ Register scratch1,
+ Label* found) {
+ ASSERT(!AreAliased(object, scratch0, scratch1));
+ Factory* factory = isolate()->factory();
+ Register current = scratch0;
+ Label loop_again;
+
+ // Scratch contains elements pointer.
+ Mov(current, object);
+
+ // Loop based on the map going up the prototype chain.
+ Bind(&loop_again);
+ Ldr(current, FieldMemOperand(current, HeapObject::kMapOffset));
+ Ldrb(scratch1, FieldMemOperand(current, Map::kBitField2Offset));
+ Ubfx(scratch1, scratch1, Map::kElementsKindShift, Map::kElementsKindBitCount);
+ CompareAndBranch(scratch1, DICTIONARY_ELEMENTS, eq, found);
+ Ldr(current, FieldMemOperand(current, Map::kPrototypeOffset));
+ CompareAndBranch(current, Operand(factory->null_value()), ne, &loop_again);
+}
+
+
+void MacroAssembler::GetRelocatedValueLocation(Register ldr_location,
+ Register result) {
+ ASSERT(!result.Is(ldr_location));
+ const uint32_t kLdrLitOffset_lsb = 5;
+ const uint32_t kLdrLitOffset_width = 19;
+ Ldr(result, MemOperand(ldr_location));
+ if (emit_debug_code()) {
+ And(result, result, LoadLiteralFMask);
+ Cmp(result, LoadLiteralFixed);
+ Check(eq, kTheInstructionToPatchShouldBeAnLdrLiteral);
+ // The instruction was clobbered. Reload it.
+ Ldr(result, MemOperand(ldr_location));
+ }
+ Sbfx(result, result, kLdrLitOffset_lsb, kLdrLitOffset_width);
+ Add(result, ldr_location, Operand(result, LSL, kWordSizeInBytesLog2));
+}
+
+
+void MacroAssembler::EnsureNotWhite(
+ Register value,
+ Register bitmap_scratch,
+ Register shift_scratch,
+ Register load_scratch,
+ Register length_scratch,
+ Label* value_is_white_and_not_data) {
+ ASSERT(!AreAliased(
+ value, bitmap_scratch, shift_scratch, load_scratch, length_scratch));
+
+ // These bit sequences are backwards. The first character in the string
+ // represents the least significant bit.
+ ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0);
+ ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
+ ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0);
+
+ GetMarkBits(value, bitmap_scratch, shift_scratch);
+ Ldr(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
+ Lsr(load_scratch, load_scratch, shift_scratch);
+
+ AssertHasValidColor(load_scratch);
+
+ // If the value is black or grey we don't need to do anything.
+ // Since both black and grey have a 1 in the first position and white does
+ // not have a 1 there we only need to check one bit.
+ Label done;
+ Tbnz(load_scratch, 0, &done);
+
+ // Value is white. We check whether it is data that doesn't need scanning.
+ Register map = load_scratch; // Holds map while checking type.
+ Label is_data_object;
+
+ // Check for heap-number.
+ Ldr(map, FieldMemOperand(value, HeapObject::kMapOffset));
+ Mov(length_scratch, HeapNumber::kSize);
+ JumpIfRoot(map, Heap::kHeapNumberMapRootIndex, &is_data_object);
+
+ // Check for strings.
+ ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
+ ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
+ // If it's a string and it's not a cons string then it's an object containing
+ // no GC pointers.
+ Register instance_type = load_scratch;
+ Ldrb(instance_type, FieldMemOperand(map, Map::kInstanceTypeOffset));
+ TestAndBranchIfAnySet(instance_type,
+ kIsIndirectStringMask | kIsNotStringMask,
+ value_is_white_and_not_data);
+
+ // It's a non-indirect (non-cons and non-slice) string.
+ // If it's external, the length is just ExternalString::kSize.
+ // Otherwise it's String::kHeaderSize + string->length() * (1 or 2).
+ // External strings are the only ones with the kExternalStringTag bit
+ // set.
+ ASSERT_EQ(0, kSeqStringTag & kExternalStringTag);
+ ASSERT_EQ(0, kConsStringTag & kExternalStringTag);
+ Mov(length_scratch, ExternalString::kSize);
+ TestAndBranchIfAnySet(instance_type, kExternalStringTag, &is_data_object);
+
+ // Sequential string, either ASCII or UC16.
+ // For ASCII (char-size of 1) we shift the smi tag away to get the length.
+ // For UC16 (char-size of 2) we just leave the smi tag in place, thereby
+ // getting the length multiplied by 2.
+ ASSERT(kOneByteStringTag == 4 && kStringEncodingMask == 4);
+ Ldrsw(length_scratch, UntagSmiFieldMemOperand(value,
+ String::kLengthOffset));
+ Tst(instance_type, kStringEncodingMask);
+ Cset(load_scratch, eq);
+ Lsl(length_scratch, length_scratch, load_scratch);
+ Add(length_scratch,
+ length_scratch,
+ SeqString::kHeaderSize + kObjectAlignmentMask);
+ Bic(length_scratch, length_scratch, kObjectAlignmentMask);
+
+ Bind(&is_data_object);
+ // Value is a data object, and it is white. Mark it black. Since we know
+ // that the object is white we can make it black by flipping one bit.
+ Register mask = shift_scratch;
+ Mov(load_scratch, 1);
+ Lsl(mask, load_scratch, shift_scratch);
+
+ Ldr(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
+ Orr(load_scratch, load_scratch, mask);
+ Str(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
+
+ Bic(bitmap_scratch, bitmap_scratch, Page::kPageAlignmentMask);
+ Ldr(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
+ Add(load_scratch, load_scratch, length_scratch);
+ Str(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
+
+ Bind(&done);
+}
+
+
+void MacroAssembler::Assert(Condition cond, BailoutReason reason) {
+ if (emit_debug_code()) {
+ Check(cond, reason);
+ }
+}
+
+
+
+void MacroAssembler::AssertRegisterIsClear(Register reg, BailoutReason reason) {
+ if (emit_debug_code()) {
+ CheckRegisterIsClear(reg, reason);
+ }
+}
+
+
+void MacroAssembler::AssertRegisterIsRoot(Register reg,
+ Heap::RootListIndex index,
+ BailoutReason reason) {
+ // CompareRoot uses Tmp0().
+ ASSERT(!reg.Is(Tmp0()));
+ if (emit_debug_code()) {
+ CompareRoot(reg, index);
+ Check(eq, reason);
+ }
+}
+
+
+void MacroAssembler::AssertFastElements(Register elements) {
+ if (emit_debug_code()) {
+ Register temp = Tmp1();
+ Label ok;
+ Ldr(temp, FieldMemOperand(elements, HeapObject::kMapOffset));
+ JumpIfRoot(temp, Heap::kFixedArrayMapRootIndex, &ok);
+ JumpIfRoot(temp, Heap::kFixedDoubleArrayMapRootIndex, &ok);
+ JumpIfRoot(temp, Heap::kFixedCOWArrayMapRootIndex, &ok);
+ Abort(kJSObjectWithFastElementsMapHasSlowElements);
+ Bind(&ok);
+ }
+}
+
+
+void MacroAssembler::AssertIsString(const Register& object) {
+ if (emit_debug_code()) {
+ Register temp = Tmp1();
+ STATIC_ASSERT(kSmiTag == 0);
+ Tst(object, Operand(kSmiTagMask));
+ Check(ne, kOperandIsNotAString);
+ Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
+ CompareInstanceType(temp, temp, FIRST_NONSTRING_TYPE);
+ Check(lo, kOperandIsNotAString);
+ }
+}
+
+
+void MacroAssembler::Check(Condition cond, BailoutReason reason) {
+ Label ok;
+ B(cond, &ok);
+ Abort(reason);
+ // Will not return here.
+ Bind(&ok);
+}
+
+
+void MacroAssembler::CheckRegisterIsClear(Register reg, BailoutReason reason) {
+ Label ok;
+ Cbz(reg, &ok);
+ Abort(reason);
+ // Will not return here.
+ Bind(&ok);
+}
+
+
+void MacroAssembler::Abort(BailoutReason reason) {
+#ifdef DEBUG
+ RecordComment("Abort message: ");
+ RecordComment(GetBailoutReason(reason));
+
+ if (FLAG_trap_on_abort) {
+ Brk(0);
+ return;
+ }
+#endif
+
+ Label msg_address;
+ Adr(x0, &msg_address);
+
+ if (use_real_aborts()) {
+ // Split the message pointer into two SMI to avoid the GC
+ // trying to scan the string.
+ STATIC_ASSERT((kSmiShift == 32) && (kSmiTag == 0));
+ SmiTag(x1, x0);
+ Bic(x0, x0, kSmiShiftMask);
+
+ Push(x0, x1);
+
+ if (!has_frame_) {
+ // We don't actually want to generate a pile of code for this, so just
+ // claim there is a stack frame, without generating one.
+ FrameScope scope(this, StackFrame::NONE);
+ CallRuntime(Runtime::kAbort, 2);
+ } else {
+ CallRuntime(Runtime::kAbort, 2);
+ }
+ } else {
+ // Call Printf directly, to report the error. The message is in x0, which is
+ // the first argument to Printf.
+ if (!csp.Is(StackPointer())) {
+ Bic(csp, StackPointer(), 0xf);
+ }
+ CallPrintf();
+
+ // The CallPrintf will return, so this point is actually reachable in this
+ // context. However:
+ // - We're already executing an abort (which shouldn't be reachable in
+ // valid code).
+ // - We need a way to stop execution on both the simulator and real
+ // hardware, and Unreachable() is the best option.
+ Unreachable();
+ }
+
+ // Emit the message string directly in the instruction stream.
+ {
+ BlockConstPoolScope scope(this);
+ Bind(&msg_address);
+ // TODO(jbramley): Since the reason is an enum, why do we still encode the
+ // string (and a pointer to it) in the instruction stream?
+ EmitStringData(GetBailoutReason(reason));
+ }
+}
+
+
+void MacroAssembler::LoadTransitionedArrayMapConditional(
+ ElementsKind expected_kind,
+ ElementsKind transitioned_kind,
+ Register map_in_out,
+ Register scratch,
+ Label* no_map_match) {
+ // Load the global or builtins object from the current context.
+ Ldr(scratch, GlobalObjectMemOperand());
+ Ldr(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
+
+ // Check that the function's map is the same as the expected cached map.
+ Ldr(scratch, ContextMemOperand(scratch, Context::JS_ARRAY_MAPS_INDEX));
+ size_t offset = (expected_kind * kPointerSize) + FixedArrayBase::kHeaderSize;
+ Ldr(Tmp0(), FieldMemOperand(scratch, offset));
+ Cmp(map_in_out, Tmp0());
+ B(ne, no_map_match);
+
+ // Use the transitioned cached map.
+ offset = (transitioned_kind * kPointerSize) + FixedArrayBase::kHeaderSize;
+ Ldr(map_in_out, FieldMemOperand(scratch, offset));
+}
+
+
+void MacroAssembler::LoadInitialArrayMap(Register function_in,
+ Register scratch,
+ Register map_out,
+ ArrayHasHoles holes) {
+ ASSERT(!AreAliased(function_in, scratch, map_out));
+ Label done;
+ Ldr(map_out, FieldMemOperand(function_in,
+ JSFunction::kPrototypeOrInitialMapOffset));
+
+ if (!FLAG_smi_only_arrays) {
+ ElementsKind kind = (holes == kArrayCanHaveHoles) ? FAST_HOLEY_ELEMENTS
+ : FAST_ELEMENTS;
+ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, kind, map_out,
+ scratch, &done);
+ } else if (holes == kArrayCanHaveHoles) {
+ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
+ FAST_HOLEY_SMI_ELEMENTS, map_out,
+ scratch, &done);
+ }
+ Bind(&done);
+}
+
+
+void MacroAssembler::LoadArrayFunction(Register function) {
+ // Load the global or builtins object from the current context.
+ Ldr(function, GlobalObjectMemOperand());
+ // Load the global context from the global or builtins object.
+ Ldr(function,
+ FieldMemOperand(function, GlobalObject::kGlobalContextOffset));
+ // Load the array function from the native context.
+ Ldr(function, ContextMemOperand(function, Context::ARRAY_FUNCTION_INDEX));
+}
+
+
+void MacroAssembler::LoadGlobalFunction(int index, Register function) {
+ // Load the global or builtins object from the current context.
+ Ldr(function, GlobalObjectMemOperand());
+ // Load the native context from the global or builtins object.
+ Ldr(function, FieldMemOperand(function,
+ GlobalObject::kNativeContextOffset));
+ // Load the function from the native context.
+ Ldr(function, ContextMemOperand(function, index));
+}
+
+
+void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
+ Register map,
+ Register scratch) {
+ // Load the initial map. The global functions all have initial maps.
+ Ldr(map, FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
+ if (emit_debug_code()) {
+ Label ok, fail;
+ CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, DO_SMI_CHECK);
+ B(&ok);
+ Bind(&fail);
+ Abort(kGlobalFunctionsMustHaveInitialMap);
+ Bind(&ok);
+ }
+}
+
+
+// This is the main Printf implementation. All other Printf variants call
+// PrintfNoPreserve after setting up one or more PreserveRegisterScopes.
+void MacroAssembler::PrintfNoPreserve(const char * format,
+ const CPURegister& arg0,
+ const CPURegister& arg1,
+ const CPURegister& arg2,
+ const CPURegister& arg3) {
+ // We cannot handle a caller-saved stack pointer. It doesn't make much sense
+ // in most cases anyway, so this restriction shouldn't be too serious.
+ ASSERT(!kCallerSaved.IncludesAliasOf(__ StackPointer()));
+
+ // We cannot print Tmp0() or Tmp1() as they're used internally by the macro
+ // assembler. We cannot print the stack pointer because it is typically used
+ // to preserve caller-saved registers (using other Printf variants which
+ // depend on this helper).
+ ASSERT(!AreAliased(Tmp0(), Tmp1(), StackPointer(), arg0));
+ ASSERT(!AreAliased(Tmp0(), Tmp1(), StackPointer(), arg1));
+ ASSERT(!AreAliased(Tmp0(), Tmp1(), StackPointer(), arg2));
+ ASSERT(!AreAliased(Tmp0(), Tmp1(), StackPointer(), arg3));
+
+ static const int kMaxArgCount = 4;
+ // Assume that we have the maximum number of arguments until we know
+ // otherwise.
+ int arg_count = kMaxArgCount;
+
+ // The provided arguments.
+ CPURegister args[kMaxArgCount] = {arg0, arg1, arg2, arg3};
+
+ // The PCS registers where the arguments need to end up.
+ CPURegister pcs[kMaxArgCount] = {NoCPUReg, NoCPUReg, NoCPUReg, NoCPUReg};
+
+ // Promote FP arguments to doubles, and integer arguments to X registers.
+ // Note that FP and integer arguments cannot be mixed, but we'll check
+ // AreSameSizeAndType once we've processed these promotions.
+ for (int i = 0; i < kMaxArgCount; i++) {
+ if (args[i].IsRegister()) {
+ // Note that we use x1 onwards, because x0 will hold the format string.
+ pcs[i] = Register::XRegFromCode(i + 1);
+ // For simplicity, we handle all integer arguments as X registers. An X
+ // register argument takes the same space as a W register argument in the
+ // PCS anyway. The only limitation is that we must explicitly clear the
+ // top word for W register arguments as the callee will expect it to be
+ // clear.
+ if (!args[i].Is64Bits()) {
+ const Register& as_x = args[i].X();
+ And(as_x, as_x, 0x00000000ffffffff);
+ args[i] = as_x;
+ }
+ } else if (args[i].IsFPRegister()) {
+ pcs[i] = FPRegister::DRegFromCode(i);
+ // C and C++ varargs functions (such as printf) implicitly promote float
+ // arguments to doubles.
+ if (!args[i].Is64Bits()) {
+ FPRegister s(args[i]);
+ const FPRegister& as_d = args[i].D();
+ Fcvt(as_d, s);
+ args[i] = as_d;
+ }
+ } else {
+ // This is the first empty (NoCPUReg) argument, so use it to set the
+ // argument count and bail out.
+ arg_count = i;
+ break;
+ }
+ }
+ ASSERT((arg_count >= 0) && (arg_count <= kMaxArgCount));
+ // Check that every remaining argument is NoCPUReg.
+ for (int i = arg_count; i < kMaxArgCount; i++) {
+ ASSERT(args[i].IsNone());
+ }
+ ASSERT((arg_count == 0) || AreSameSizeAndType(args[0], args[1],
+ args[2], args[3],
+ pcs[0], pcs[1],
+ pcs[2], pcs[3]));
+
+ // Move the arguments into the appropriate PCS registers.
+ //
+ // Arranging an arbitrary list of registers into x1-x4 (or d0-d3) is
+ // surprisingly complicated.
+ //
+ // * For even numbers of registers, we push the arguments and then pop them
+ // into their final registers. This maintains 16-byte stack alignment in
+ // case csp is the stack pointer, since we're only handling X or D
+ // registers at this point.
+ //
+ // * For odd numbers of registers, we push and pop all but one register in
+ // the same way, but the left-over register is moved directly, since we
+ // can always safely move one register without clobbering any source.
+ if (arg_count >= 4) {
+ Push(args[3], args[2], args[1], args[0]);
+ } else if (arg_count >= 2) {
+ Push(args[1], args[0]);
+ }
+
+ if ((arg_count % 2) != 0) {
+ // Move the left-over register directly.
+ const CPURegister& leftover_arg = args[arg_count - 1];
+ const CPURegister& leftover_pcs = pcs[arg_count - 1];
+ if (leftover_arg.IsRegister()) {
+ Mov(Register(leftover_pcs), Register(leftover_arg));
+ } else {
+ Fmov(FPRegister(leftover_pcs), FPRegister(leftover_arg));
+ }
+ }
+
+ if (arg_count >= 4) {
+ Pop(pcs[0], pcs[1], pcs[2], pcs[3]);
+ } else if (arg_count >= 2) {
+ Pop(pcs[0], pcs[1]);
+ }
+
+ // Load the format string into x0, as per the procedure-call standard.
+ //
+ // To make the code as portable as possible, the format string is encoded
+ // directly in the instruction stream. It might be cleaner to encode it in a
+ // literal pool, but since Printf is usually used for debugging, it is
+ // beneficial for it to be minimally dependent on other features.
+ Label format_address;
+ Adr(x0, &format_address);
+
+ // Emit the format string directly in the instruction stream.
+ { BlockConstPoolScope scope(this);
+ Label after_data;
+ B(&after_data);
+ Bind(&format_address);
+ EmitStringData(format);
+ Unreachable();
+ Bind(&after_data);
+ }
+
+ // We don't pass any arguments on the stack, but we still need to align the C
+ // stack pointer to a 16-byte boundary for PCS compliance.
+ if (!csp.Is(StackPointer())) {
+ Bic(csp, StackPointer(), 0xf);
+ }
+
+ CallPrintf(pcs[0].type());
+}
+
+
+void MacroAssembler::CallPrintf(CPURegister::RegisterType type) {
+ // A call to printf needs special handling for the simulator, since the system
+ // printf function will use a different instruction set and the procedure-call
+ // standard will not be compatible.
+#ifdef USE_SIMULATOR
+ { InstructionAccurateScope scope(this, kPrintfLength / kInstructionSize);
+ hlt(kImmExceptionIsPrintf);
+ dc32(type);
+ }
+#else
+ Call(FUNCTION_ADDR(printf), RelocInfo::EXTERNAL_REFERENCE);
+#endif
+}
+
+
+void MacroAssembler::Printf(const char * format,
+ const CPURegister& arg0,
+ const CPURegister& arg1,
+ const CPURegister& arg2,
+ const CPURegister& arg3) {
+ // Preserve all caller-saved registers as well as NZCV.
+ // If csp is the stack pointer, PushCPURegList asserts that the size of each
+ // list is a multiple of 16 bytes.
+ PushCPURegList(kCallerSaved);
+ PushCPURegList(kCallerSavedFP);
+ // Use Tmp0() as a scratch register. It is not accepted by Printf so it will
+ // never overlap an argument register.
+ Mrs(Tmp0(), NZCV);
+ Push(Tmp0(), xzr);
+
+ PrintfNoPreserve(format, arg0, arg1, arg2, arg3);
+
+ Pop(xzr, Tmp0());
+ Msr(NZCV, Tmp0());
+ PopCPURegList(kCallerSavedFP);
+ PopCPURegList(kCallerSaved);
+}
+
+
+void MacroAssembler::EmitFrameSetupForCodeAgePatching() {
+ // TODO(jbramley): Other architectures use the internal memcpy to copy the
+ // sequence. If this is a performance bottleneck, we should consider caching
+ // the sequence and copying it in the same way.
+ InstructionAccurateScope scope(this, kCodeAgeSequenceSize / kInstructionSize);
+ ASSERT(jssp.Is(StackPointer()));
+ EmitFrameSetupForCodeAgePatching(this);
+}
+
+
+
+void MacroAssembler::EmitCodeAgeSequence(Code* stub) {
+ InstructionAccurateScope scope(this, kCodeAgeSequenceSize / kInstructionSize);
+ ASSERT(jssp.Is(StackPointer()));
+ EmitCodeAgeSequence(this, stub);
+}
+
+
+#undef __
+#define __ assm->
+
+
+void MacroAssembler::EmitFrameSetupForCodeAgePatching(Assembler * assm) {
+ Label start;
+ __ bind(&start);
+
+ // We can do this sequence using four instructions, but the code ageing
+ // sequence that patches it needs five, so we use the extra space to try to
+ // simplify some addressing modes and remove some dependencies (compared to
+ // using two stp instructions with write-back).
+ __ sub(jssp, jssp, 4 * kXRegSizeInBytes);
+ __ sub(csp, csp, 4 * kXRegSizeInBytes);
+ __ stp(x1, cp, MemOperand(jssp, 0 * kXRegSizeInBytes));
+ __ stp(fp, lr, MemOperand(jssp, 2 * kXRegSizeInBytes));
+ __ add(fp, jssp, StandardFrameConstants::kFixedFrameSizeFromFp);
+
+ __ AssertSizeOfCodeGeneratedSince(&start, kCodeAgeSequenceSize);
+}
+
+
+void MacroAssembler::EmitCodeAgeSequence(Assembler * assm,
+ Code * stub) {
+ Label start;
+ __ bind(&start);
+ // When the stub is called, the sequence is replaced with the young sequence
+ // (as in EmitFrameSetupForCodeAgePatching). After the code is replaced, the
+ // stub jumps to &start, stored in x0. The young sequence does not call the
+ // stub so there is no infinite loop here.
+ //
+ // A branch (br) is used rather than a call (blr) because this code replaces
+ // the frame setup code that would normally preserve lr.
+ __ LoadLiteral(ip0, kCodeAgeStubEntryOffset);
+ __ adr(x0, &start);
+ __ br(ip0);
+ // IsCodeAgeSequence in codegen-a64.cc assumes that the code generated up
+ // until now (kCodeAgeStubEntryOffset) is the same for all code age sequences.
+ __ AssertSizeOfCodeGeneratedSince(&start, kCodeAgeStubEntryOffset);
+ if (stub) {
+ __ dc64(reinterpret_cast<uint64_t>(stub->instruction_start()));
+ __ AssertSizeOfCodeGeneratedSince(&start, kCodeAgeSequenceSize);
+ }
+}
+
+
+bool MacroAssembler::IsYoungSequence(byte* sequence) {
+ // Generate a young sequence to compare with.
+ const int length = kCodeAgeSequenceSize / kInstructionSize;
+ static bool initialized = false;
+ static byte young[kCodeAgeSequenceSize];
+ if (!initialized) {
+ PatchingAssembler patcher(young, length);
+ // The young sequence is the frame setup code for FUNCTION code types. It is
+ // generated by FullCodeGenerator::Generate.
+ MacroAssembler::EmitFrameSetupForCodeAgePatching(&patcher);
+ initialized = true;
+ }
+
+ bool is_young = (memcmp(sequence, young, kCodeAgeSequenceSize) == 0);
+ ASSERT(is_young || IsCodeAgeSequence(sequence));
+ return is_young;
+}
+
+
+#ifdef DEBUG
+bool MacroAssembler::IsCodeAgeSequence(byte* sequence) {
+ // The old sequence varies depending on the code age. However, the code up
+ // until kCodeAgeStubEntryOffset does not change, so we can check that part to
+ // get a reasonable level of verification.
+ const int length = kCodeAgeStubEntryOffset / kInstructionSize;
+ static bool initialized = false;
+ static byte old[kCodeAgeStubEntryOffset];
+ if (!initialized) {
+ PatchingAssembler patcher(old, length);
+ MacroAssembler::EmitCodeAgeSequence(&patcher, NULL);
+ initialized = true;
+ }
+ return memcmp(sequence, old, kCodeAgeStubEntryOffset) == 0;
+}
+#endif
+
+
+#undef __
+#define __ masm->
+
+
+void InlineSmiCheckInfo::Emit(MacroAssembler* masm, const Register& reg,
+ const Label* smi_check) {
+ Assembler::BlockConstPoolScope scope(masm);
+ if (reg.IsValid()) {
+ ASSERT(smi_check->is_bound());
+ ASSERT(reg.Is64Bits());
+
+ // Encode the register (x0-x30) in the lowest 5 bits, then the offset to
+ // 'check' in the other bits. The possible offset is limited in that we
+ // use BitField to pack the data, and the underlying data type is a
+ // uint32_t.
+ uint32_t delta = __ InstructionsGeneratedSince(smi_check);
+ __ InlineData(RegisterBits::encode(reg.code()) | DeltaBits::encode(delta));
+ } else {
+ ASSERT(!smi_check->is_bound());
+
+ // An offset of 0 indicates that there is no patch site.
+ __ InlineData(0);
+ }
+}
+
+
+InlineSmiCheckInfo::InlineSmiCheckInfo(Address info)
+ : reg_(NoReg), smi_check_(NULL) {
+ InstructionSequence* inline_data = InstructionSequence::At(info);
+ ASSERT(inline_data->IsInlineData());
+ if (inline_data->IsInlineData()) {
+ uint64_t payload = inline_data->InlineData();
+ // We use BitField to decode the payload, and BitField can only handle
+ // 32-bit values.
+ ASSERT(is_uint32(payload));
+ if (payload != 0) {
+ int reg_code = RegisterBits::decode(payload);
+ reg_ = Register::XRegFromCode(reg_code);
+ uint64_t smi_check_delta = DeltaBits::decode(payload);
+ ASSERT(smi_check_delta != 0);
+ smi_check_ = inline_data - (smi_check_delta * kInstructionSize);
+ }
+ }
+}
+
+
+#undef __
+
+
+} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_A64
--- /dev/null
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_A64_MACRO_ASSEMBLER_A64_H_
+#define V8_A64_MACRO_ASSEMBLER_A64_H_
+
+#include "v8globals.h"
+#include "globals.h"
+
+#include "a64/assembler-a64-inl.h"
+
+namespace v8 {
+namespace internal {
+
+#define LS_MACRO_LIST(V) \
+ V(Ldrb, Register&, rt, LDRB_w) \
+ V(Strb, Register&, rt, STRB_w) \
+ V(Ldrsb, Register&, rt, rt.Is64Bits() ? LDRSB_x : LDRSB_w) \
+ V(Ldrh, Register&, rt, LDRH_w) \
+ V(Strh, Register&, rt, STRH_w) \
+ V(Ldrsh, Register&, rt, rt.Is64Bits() ? LDRSH_x : LDRSH_w) \
+ V(Ldr, CPURegister&, rt, LoadOpFor(rt)) \
+ V(Str, CPURegister&, rt, StoreOpFor(rt)) \
+ V(Ldrsw, Register&, rt, LDRSW_x)
+
+
+// ----------------------------------------------------------------------------
+// Static helper functions
+
+// Generate a MemOperand for loading a field from an object.
+inline MemOperand FieldMemOperand(Register object, int offset);
+inline MemOperand UntagSmiFieldMemOperand(Register object, int offset);
+
+// Generate a MemOperand for loading a SMI from memory.
+inline MemOperand UntagSmiMemOperand(Register object, int offset);
+
+
+// ----------------------------------------------------------------------------
+// MacroAssembler
+
+enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
+enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
+enum LinkRegisterStatus { kLRHasNotBeenSaved, kLRHasBeenSaved };
+enum TargetAddressStorageMode {
+ CAN_INLINE_TARGET_ADDRESS,
+ NEVER_INLINE_TARGET_ADDRESS
+};
+enum UntagMode { kNotSpeculativeUntag, kSpeculativeUntag };
+enum ArrayHasHoles { kArrayCantHaveHoles, kArrayCanHaveHoles };
+enum CopyHint { kCopyUnknown, kCopyShort, kCopyLong };
+enum DiscardMoveMode { kDontDiscardForSameWReg, kDiscardForSameWReg };
+enum SeqStringSetCharCheckIndexType { kIndexIsSmi, kIndexIsInteger32 };
+
+class MacroAssembler : public Assembler {
+ public:
+ MacroAssembler(Isolate* isolate, byte * buffer, unsigned buffer_size);
+
+ inline Handle<Object> CodeObject();
+
+ // Instruction set functions ------------------------------------------------
+ // Logical macros.
+ inline void And(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+ inline void Ands(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+ inline void Bic(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+ inline void Bics(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+ inline void Orr(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+ inline void Orn(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+ inline void Eor(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+ inline void Eon(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+ inline void Tst(const Register& rn, const Operand& operand);
+ void LogicalMacro(const Register& rd,
+ const Register& rn,
+ const Operand& operand,
+ LogicalOp op);
+
+ // Add and sub macros.
+ inline void Add(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+ inline void Adds(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+ inline void Sub(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+ inline void Subs(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+ inline void Cmn(const Register& rn, const Operand& operand);
+ inline void Cmp(const Register& rn, const Operand& operand);
+ inline void Neg(const Register& rd,
+ const Operand& operand);
+ inline void Negs(const Register& rd,
+ const Operand& operand);
+
+ void AddSubMacro(const Register& rd,
+ const Register& rn,
+ const Operand& operand,
+ FlagsUpdate S,
+ AddSubOp op);
+
+ // Add/sub with carry macros.
+ inline void Adc(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+ inline void Adcs(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+ inline void Sbc(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+ inline void Sbcs(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+ inline void Ngc(const Register& rd,
+ const Operand& operand);
+ inline void Ngcs(const Register& rd,
+ const Operand& operand);
+ void AddSubWithCarryMacro(const Register& rd,
+ const Register& rn,
+ const Operand& operand,
+ FlagsUpdate S,
+ AddSubWithCarryOp op);
+
+ // Move macros.
+ void Mov(const Register& rd,
+ const Operand& operand,
+ DiscardMoveMode discard_mode = kDontDiscardForSameWReg);
+ void Mov(const Register& rd, uint64_t imm);
+ inline void Mvn(const Register& rd, uint64_t imm);
+ void Mvn(const Register& rd, const Operand& operand);
+ static bool IsImmMovn(uint64_t imm, unsigned reg_size);
+ static bool IsImmMovz(uint64_t imm, unsigned reg_size);
+ static unsigned CountClearHalfWords(uint64_t imm, unsigned reg_size);
+
+ // Conditional macros.
+ inline void Ccmp(const Register& rn,
+ const Operand& operand,
+ StatusFlags nzcv,
+ Condition cond);
+ inline void Ccmn(const Register& rn,
+ const Operand& operand,
+ StatusFlags nzcv,
+ Condition cond);
+ void ConditionalCompareMacro(const Register& rn,
+ const Operand& operand,
+ StatusFlags nzcv,
+ Condition cond,
+ ConditionalCompareOp op);
+ void Csel(const Register& rd,
+ const Register& rn,
+ const Operand& operand,
+ Condition cond);
+
+ // Load/store macros.
+#define DECLARE_FUNCTION(FN, REGTYPE, REG, OP) \
+ inline void FN(const REGTYPE REG, const MemOperand& addr);
+ LS_MACRO_LIST(DECLARE_FUNCTION)
+#undef DECLARE_FUNCTION
+
+ void LoadStoreMacro(const CPURegister& rt,
+ const MemOperand& addr,
+ LoadStoreOp op);
+
+ // V8-specific load/store helpers.
+ void Load(const Register& rt, const MemOperand& addr, Representation r);
+ void Store(const Register& rt, const MemOperand& addr, Representation r);
+
+ // Remaining instructions are simple pass-through calls to the assembler.
+ inline void Adr(const Register& rd, Label* label);
+ inline void Asr(const Register& rd, const Register& rn, unsigned shift);
+ inline void Asr(const Register& rd, const Register& rn, const Register& rm);
+ inline void B(Label* label);
+ inline void B(Condition cond, Label* label);
+ inline void B(Label* label, Condition cond);
+ inline void Bfi(const Register& rd,
+ const Register& rn,
+ unsigned lsb,
+ unsigned width);
+ inline void Bfxil(const Register& rd,
+ const Register& rn,
+ unsigned lsb,
+ unsigned width);
+ inline void Bind(Label* label);
+ inline void Bl(Label* label);
+ inline void Blr(const Register& xn);
+ inline void Br(const Register& xn);
+ inline void Brk(int code);
+ inline void Cbnz(const Register& rt, Label* label);
+ inline void Cbz(const Register& rt, Label* label);
+ inline void Cinc(const Register& rd, const Register& rn, Condition cond);
+ inline void Cinv(const Register& rd, const Register& rn, Condition cond);
+ inline void Cls(const Register& rd, const Register& rn);
+ inline void Clz(const Register& rd, const Register& rn);
+ inline void Cneg(const Register& rd, const Register& rn, Condition cond);
+ inline void CzeroX(const Register& rd, Condition cond);
+ inline void CmovX(const Register& rd, const Register& rn, Condition cond);
+ inline void Cset(const Register& rd, Condition cond);
+ inline void Csetm(const Register& rd, Condition cond);
+ inline void Csinc(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ Condition cond);
+ inline void Csinv(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ Condition cond);
+ inline void Csneg(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ Condition cond);
+ inline void Dmb(BarrierDomain domain, BarrierType type);
+ inline void Dsb(BarrierDomain domain, BarrierType type);
+ inline void Debug(const char* message, uint32_t code, Instr params = BREAK);
+ inline void Extr(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ unsigned lsb);
+ inline void Fabs(const FPRegister& fd, const FPRegister& fn);
+ inline void Fadd(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm);
+ inline void Fccmp(const FPRegister& fn,
+ const FPRegister& fm,
+ StatusFlags nzcv,
+ Condition cond);
+ inline void Fcmp(const FPRegister& fn, const FPRegister& fm);
+ inline void Fcmp(const FPRegister& fn, double value);
+ inline void Fcsel(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm,
+ Condition cond);
+ inline void Fcvt(const FPRegister& fd, const FPRegister& fn);
+ inline void Fcvtas(const Register& rd, const FPRegister& fn);
+ inline void Fcvtau(const Register& rd, const FPRegister& fn);
+ inline void Fcvtms(const Register& rd, const FPRegister& fn);
+ inline void Fcvtmu(const Register& rd, const FPRegister& fn);
+ inline void Fcvtns(const Register& rd, const FPRegister& fn);
+ inline void Fcvtnu(const Register& rd, const FPRegister& fn);
+ inline void Fcvtzs(const Register& rd, const FPRegister& fn);
+ inline void Fcvtzu(const Register& rd, const FPRegister& fn);
+ inline void Fdiv(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm);
+ inline void Fmadd(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm,
+ const FPRegister& fa);
+ inline void Fmax(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm);
+ inline void Fmaxnm(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm);
+ inline void Fmin(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm);
+ inline void Fminnm(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm);
+ inline void Fmov(FPRegister fd, FPRegister fn);
+ inline void Fmov(FPRegister fd, Register rn);
+ inline void Fmov(FPRegister fd, double imm);
+ inline void Fmov(Register rd, FPRegister fn);
+ inline void Fmsub(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm,
+ const FPRegister& fa);
+ inline void Fmul(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm);
+ inline void Fneg(const FPRegister& fd, const FPRegister& fn);
+ inline void Fnmadd(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm,
+ const FPRegister& fa);
+ inline void Fnmsub(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm,
+ const FPRegister& fa);
+ inline void Frinta(const FPRegister& fd, const FPRegister& fn);
+ inline void Frintn(const FPRegister& fd, const FPRegister& fn);
+ inline void Frintz(const FPRegister& fd, const FPRegister& fn);
+ inline void Fsqrt(const FPRegister& fd, const FPRegister& fn);
+ inline void Fsub(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm);
+ inline void Hint(SystemHint code);
+ inline void Hlt(int code);
+ inline void Isb();
+ inline void Ldnp(const CPURegister& rt,
+ const CPURegister& rt2,
+ const MemOperand& src);
+ inline void Ldp(const CPURegister& rt,
+ const CPURegister& rt2,
+ const MemOperand& src);
+ inline void Ldpsw(const Register& rt,
+ const Register& rt2,
+ const MemOperand& src);
+ inline void Ldr(const FPRegister& ft, double imm);
+ inline void Ldr(const Register& rt, uint64_t imm);
+ inline void Lsl(const Register& rd, const Register& rn, unsigned shift);
+ inline void Lsl(const Register& rd, const Register& rn, const Register& rm);
+ inline void Lsr(const Register& rd, const Register& rn, unsigned shift);
+ inline void Lsr(const Register& rd, const Register& rn, const Register& rm);
+ inline void Madd(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra);
+ inline void Mneg(const Register& rd, const Register& rn, const Register& rm);
+ inline void Mov(const Register& rd, const Register& rm);
+ inline void Movk(const Register& rd, uint64_t imm, int shift = -1);
+ inline void Mrs(const Register& rt, SystemRegister sysreg);
+ inline void Msr(SystemRegister sysreg, const Register& rt);
+ inline void Msub(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra);
+ inline void Mul(const Register& rd, const Register& rn, const Register& rm);
+ inline void Nop() { nop(); }
+ inline void Rbit(const Register& rd, const Register& rn);
+ inline void Ret(const Register& xn = lr);
+ inline void Rev(const Register& rd, const Register& rn);
+ inline void Rev16(const Register& rd, const Register& rn);
+ inline void Rev32(const Register& rd, const Register& rn);
+ inline void Ror(const Register& rd, const Register& rs, unsigned shift);
+ inline void Ror(const Register& rd, const Register& rn, const Register& rm);
+ inline void Sbfiz(const Register& rd,
+ const Register& rn,
+ unsigned lsb,
+ unsigned width);
+ inline void Sbfx(const Register& rd,
+ const Register& rn,
+ unsigned lsb,
+ unsigned width);
+ inline void Scvtf(const FPRegister& fd,
+ const Register& rn,
+ unsigned fbits = 0);
+ inline void Sdiv(const Register& rd, const Register& rn, const Register& rm);
+ inline void Smaddl(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra);
+ inline void Smsubl(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra);
+ inline void Smull(const Register& rd,
+ const Register& rn,
+ const Register& rm);
+ inline void Smulh(const Register& rd,
+ const Register& rn,
+ const Register& rm);
+ inline void Stnp(const CPURegister& rt,
+ const CPURegister& rt2,
+ const MemOperand& dst);
+ inline void Stp(const CPURegister& rt,
+ const CPURegister& rt2,
+ const MemOperand& dst);
+ inline void Sxtb(const Register& rd, const Register& rn);
+ inline void Sxth(const Register& rd, const Register& rn);
+ inline void Sxtw(const Register& rd, const Register& rn);
+ inline void Tbnz(const Register& rt, unsigned bit_pos, Label* label);
+ inline void Tbz(const Register& rt, unsigned bit_pos, Label* label);
+ inline void Ubfiz(const Register& rd,
+ const Register& rn,
+ unsigned lsb,
+ unsigned width);
+ inline void Ubfx(const Register& rd,
+ const Register& rn,
+ unsigned lsb,
+ unsigned width);
+ inline void Ucvtf(const FPRegister& fd,
+ const Register& rn,
+ unsigned fbits = 0);
+ inline void Udiv(const Register& rd, const Register& rn, const Register& rm);
+ inline void Umaddl(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra);
+ inline void Umsubl(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra);
+ inline void Unreachable();
+ inline void Uxtb(const Register& rd, const Register& rn);
+ inline void Uxth(const Register& rd, const Register& rn);
+ inline void Uxtw(const Register& rd, const Register& rn);
+
+ // Pseudo-instructions ------------------------------------------------------
+
+ // Compute rd = abs(rm).
+ // This function clobbers the condition flags.
+ //
+ // If rm is the minimum representable value, the result is not representable.
+ // Handlers for each case can be specified using the relevant labels.
+ void Abs(const Register& rd, const Register& rm,
+ Label * is_not_representable = NULL,
+ Label * is_representable = NULL);
+
+ // Push or pop up to 4 registers of the same width to or from the stack,
+ // using the current stack pointer as set by SetStackPointer.
+ //
+ // If an argument register is 'NoReg', all further arguments are also assumed
+ // to be 'NoReg', and are thus not pushed or popped.
+ //
+ // Arguments are ordered such that "Push(a, b);" is functionally equivalent
+ // to "Push(a); Push(b);".
+ //
+ // It is valid to push the same register more than once, and there is no
+ // restriction on the order in which registers are specified.
+ //
+ // It is not valid to pop into the same register more than once in one
+ // operation, not even into the zero register.
+ //
+ // If the current stack pointer (as set by SetStackPointer) is csp, then it
+ // must be aligned to 16 bytes on entry and the total size of the specified
+ // registers must also be a multiple of 16 bytes.
+ //
+ // Even if the current stack pointer is not the system stack pointer (csp),
+ // Push (and derived methods) will still modify the system stack pointer in
+ // order to comply with ABI rules about accessing memory below the system
+ // stack pointer.
+ //
+ // Other than the registers passed into Pop, the stack pointer and (possibly)
+ // the system stack pointer, these methods do not modify any other registers.
+ // Scratch registers such as Tmp0() and Tmp1() are preserved.
+ void Push(const CPURegister& src0, const CPURegister& src1 = NoReg,
+ const CPURegister& src2 = NoReg, const CPURegister& src3 = NoReg);
+ void Pop(const CPURegister& dst0, const CPURegister& dst1 = NoReg,
+ const CPURegister& dst2 = NoReg, const CPURegister& dst3 = NoReg);
+
+ // Alternative forms of Push and Pop, taking a RegList or CPURegList that
+ // specifies the registers that are to be pushed or popped. Higher-numbered
+ // registers are associated with higher memory addresses (as in the A32 push
+ // and pop instructions).
+ //
+ // (Push|Pop)SizeRegList allow you to specify the register size as a
+ // parameter. Only kXRegSize, kWRegSize, kDRegSize and kSRegSize are
+ // supported.
+ //
+ // Otherwise, (Push|Pop)(CPU|X|W|D|S)RegList is preferred.
+ void PushCPURegList(CPURegList registers);
+ void PopCPURegList(CPURegList registers);
+
+ inline void PushSizeRegList(RegList registers, unsigned reg_size,
+ CPURegister::RegisterType type = CPURegister::kRegister) {
+ PushCPURegList(CPURegList(type, reg_size, registers));
+ }
+ inline void PopSizeRegList(RegList registers, unsigned reg_size,
+ CPURegister::RegisterType type = CPURegister::kRegister) {
+ PopCPURegList(CPURegList(type, reg_size, registers));
+ }
+ inline void PushXRegList(RegList regs) {
+ PushSizeRegList(regs, kXRegSize);
+ }
+ inline void PopXRegList(RegList regs) {
+ PopSizeRegList(regs, kXRegSize);
+ }
+ inline void PushWRegList(RegList regs) {
+ PushSizeRegList(regs, kWRegSize);
+ }
+ inline void PopWRegList(RegList regs) {
+ PopSizeRegList(regs, kWRegSize);
+ }
+ inline void PushDRegList(RegList regs) {
+ PushSizeRegList(regs, kDRegSize, CPURegister::kFPRegister);
+ }
+ inline void PopDRegList(RegList regs) {
+ PopSizeRegList(regs, kDRegSize, CPURegister::kFPRegister);
+ }
+ inline void PushSRegList(RegList regs) {
+ PushSizeRegList(regs, kSRegSize, CPURegister::kFPRegister);
+ }
+ inline void PopSRegList(RegList regs) {
+ PopSizeRegList(regs, kSRegSize, CPURegister::kFPRegister);
+ }
+
+ // Push the specified register 'count' times.
+ void PushMultipleTimes(int count, Register src);
+
+ // This is a convenience method for pushing a single Handle<Object>.
+ inline void Push(Handle<Object> handle);
+ void Push(Smi* smi) { Push(Handle<Smi>(smi, isolate())); }
+
+ // Aliases of Push and Pop, required for V8 compatibility.
+ inline void push(Register src) {
+ Push(src);
+ }
+ inline void pop(Register dst) {
+ Pop(dst);
+ }
+
+ // Poke 'src' onto the stack. The offset is in bytes.
+ //
+ // If the current stack pointer (according to StackPointer()) is csp, then
+ // csp must be aligned to 16 bytes.
+ void Poke(const CPURegister& src, const Operand& offset);
+
+ // Peek at a value on the stack, and put it in 'dst'. The offset is in bytes.
+ //
+ // If the current stack pointer (according to StackPointer()) is csp, then
+ // csp must be aligned to 16 bytes.
+ void Peek(const CPURegister& dst, const Operand& offset);
+
+ // Poke 'src1' and 'src2' onto the stack. The values written will be adjacent
+ // with 'src2' at a higher address than 'src1'. The offset is in bytes.
+ //
+ // If the current stack pointer (according to StackPointer()) is csp, then
+ // csp must be aligned to 16 bytes.
+ void PokePair(const CPURegister& src1, const CPURegister& src2, int offset);
+
+ // Peek at two values on the stack, and put them in 'dst1' and 'dst2'. The
+ // values peeked will be adjacent, with the value in 'dst2' being from a
+ // higher address than 'dst1'. The offset is in bytes.
+ //
+ // If the current stack pointer (according to StackPointer()) is csp, then
+ // csp must be aligned to 16 bytes.
+ void PeekPair(const CPURegister& dst1, const CPURegister& dst2, int offset);
+
+ // Claim or drop stack space without actually accessing memory.
+ //
+ // In debug mode, both of these will write invalid data into the claimed or
+ // dropped space.
+ //
+ // If the current stack pointer (according to StackPointer()) is csp, then it
+ // must be aligned to 16 bytes and the size claimed or dropped must be a
+ // multiple of 16 bytes.
+ //
+ // Note that unit_size must be specified in bytes. For variants which take a
+ // Register count, the unit size must be a power of two.
+ inline void Claim(uint64_t count, uint64_t unit_size = kXRegSizeInBytes);
+ inline void Claim(const Register& count,
+ uint64_t unit_size = kXRegSizeInBytes);
+ inline void Drop(uint64_t count, uint64_t unit_size = kXRegSizeInBytes);
+ inline void Drop(const Register& count,
+ uint64_t unit_size = kXRegSizeInBytes);
+
+ // Variants of Claim and Drop, where the 'count' parameter is a SMI held in a
+ // register.
+ inline void ClaimBySMI(const Register& count_smi,
+ uint64_t unit_size = kXRegSizeInBytes);
+ inline void DropBySMI(const Register& count_smi,
+ uint64_t unit_size = kXRegSizeInBytes);
+
+ // Compare a register with an operand, and branch to label depending on the
+ // condition. May corrupt the status flags.
+ inline void CompareAndBranch(const Register& lhs,
+ const Operand& rhs,
+ Condition cond,
+ Label* label);
+
+ // Test the bits of register defined by bit_pattern, and branch if ANY of
+ // those bits are set. May corrupt the status flags.
+ inline void TestAndBranchIfAnySet(const Register& reg,
+ const uint64_t bit_pattern,
+ Label* label);
+
+ // Test the bits of register defined by bit_pattern, and branch if ALL of
+ // those bits are clear (ie. not set.) May corrupt the status flags.
+ inline void TestAndBranchIfAllClear(const Register& reg,
+ const uint64_t bit_pattern,
+ Label* label);
+
+ // Insert one or more instructions into the instruction stream that encode
+ // some caller-defined data. The instructions used will be executable with no
+ // side effects.
+ inline void InlineData(uint64_t data);
+
+ // Insert an instrumentation enable marker into the instruction stream.
+ inline void EnableInstrumentation();
+
+ // Insert an instrumentation disable marker into the instruction stream.
+ inline void DisableInstrumentation();
+
+ // Insert an instrumentation event marker into the instruction stream. These
+ // will be picked up by the instrumentation system to annotate an instruction
+ // profile. The argument marker_name must be a printable two character string;
+ // it will be encoded in the event marker.
+ inline void AnnotateInstrumentation(const char* marker_name);
+
+ // If emit_debug_code() is true, emit a run-time check to ensure that
+ // StackPointer() does not point below the system stack pointer.
+ //
+ // Whilst it is architecturally legal for StackPointer() to point below csp,
+ // it can be evidence of a potential bug because the ABI forbids accesses
+ // below csp.
+ //
+ // If emit_debug_code() is false, this emits no code.
+ //
+ // If StackPointer() is the system stack pointer, this emits no code.
+ void AssertStackConsistency();
+
+ // Preserve the callee-saved registers (as defined by AAPCS64).
+ //
+ // Higher-numbered registers are pushed before lower-numbered registers, and
+ // thus get higher addresses.
+ // Floating-point registers are pushed before general-purpose registers, and
+ // thus get higher addresses.
+ //
+ // Note that registers are not checked for invalid values. Use this method
+ // only if you know that the GC won't try to examine the values on the stack.
+ //
+ // This method must not be called unless the current stack pointer (as set by
+ // SetStackPointer) is the system stack pointer (csp), and is aligned to
+ // ActivationFrameAlignment().
+ void PushCalleeSavedRegisters();
+
+ // Restore the callee-saved registers (as defined by AAPCS64).
+ //
+ // Higher-numbered registers are popped after lower-numbered registers, and
+ // thus come from higher addresses.
+ // Floating-point registers are popped after general-purpose registers, and
+ // thus come from higher addresses.
+ //
+ // This method must not be called unless the current stack pointer (as set by
+ // SetStackPointer) is the system stack pointer (csp), and is aligned to
+ // ActivationFrameAlignment().
+ void PopCalleeSavedRegisters();
+
+ // Set the current stack pointer, but don't generate any code.
+ inline void SetStackPointer(const Register& stack_pointer) {
+ ASSERT(!AreAliased(stack_pointer, Tmp0(), Tmp1()));
+ sp_ = stack_pointer;
+ }
+
+ // Return the current stack pointer, as set by SetStackPointer.
+ inline const Register& StackPointer() const {
+ return sp_;
+ }
+
+ // Align csp for a frame, as per ActivationFrameAlignment, and make it the
+ // current stack pointer.
+ inline void AlignAndSetCSPForFrame() {
+ int sp_alignment = ActivationFrameAlignment();
+ // AAPCS64 mandates at least 16-byte alignment.
+ ASSERT(sp_alignment >= 16);
+ ASSERT(IsPowerOf2(sp_alignment));
+ Bic(csp, StackPointer(), sp_alignment - 1);
+ SetStackPointer(csp);
+ }
+
+ // Push the system stack pointer (csp) down to allow the same to be done to
+ // the current stack pointer (according to StackPointer()). This must be
+ // called _before_ accessing the memory.
+ //
+ // This is necessary when pushing or otherwise adding things to the stack, to
+ // satisfy the AAPCS64 constraint that the memory below the system stack
+ // pointer is not accessed.
+ //
+ // This method asserts that StackPointer() is not csp, since the call does
+ // not make sense in that context.
+ //
+ // TODO(jbramley): Currently, this method can only accept values of 'space'
+ // that can be encoded in one instruction. Refer to the implementation for
+ // details.
+ inline void BumpSystemStackPointer(const Operand& space);
+
+ // Helpers ------------------------------------------------------------------
+ // Root register.
+ inline void InitializeRootRegister();
+
+ // Load an object from the root table.
+ void LoadRoot(Register destination,
+ Heap::RootListIndex index);
+ // Store an object to the root table.
+ void StoreRoot(Register source,
+ Heap::RootListIndex index);
+
+ // Load both TrueValue and FalseValue roots.
+ void LoadTrueFalseRoots(Register true_root, Register false_root);
+
+ void LoadHeapObject(Register dst, Handle<HeapObject> object);
+
+ void LoadObject(Register result, Handle<Object> object) {
+ AllowDeferredHandleDereference heap_object_check;
+ if (object->IsHeapObject()) {
+ LoadHeapObject(result, Handle<HeapObject>::cast(object));
+ } else {
+ ASSERT(object->IsSmi());
+ Mov(result, Operand(object));
+ }
+ }
+
+ static int SafepointRegisterStackIndex(int reg_code);
+
+ // This is required for compatibility with architecture independant code.
+ // Remove if not needed.
+ inline void Move(Register dst, Register src) { Mov(dst, src); }
+
+ void LoadInstanceDescriptors(Register map,
+ Register descriptors);
+ void EnumLengthUntagged(Register dst, Register map);
+ void EnumLengthSmi(Register dst, Register map);
+ void NumberOfOwnDescriptors(Register dst, Register map);
+
+ template<typename Field>
+ void DecodeField(Register reg) {
+ static const uint64_t shift = Field::kShift + kSmiShift;
+ static const uint64_t setbits = CountSetBits(Field::kMask, 32);
+ Ubfx(reg, reg, shift, setbits);
+ }
+
+ // ---- SMI and Number Utilities ----
+
+ inline void SmiTag(Register dst, Register src);
+ inline void SmiTag(Register smi);
+ inline void SmiUntag(Register dst, Register src);
+ inline void SmiUntag(Register smi);
+ inline void SmiUntagToDouble(FPRegister dst,
+ Register src,
+ UntagMode mode = kNotSpeculativeUntag);
+ inline void SmiUntagToFloat(FPRegister dst,
+ Register src,
+ UntagMode mode = kNotSpeculativeUntag);
+
+ // Compute the absolute value of 'smi' and leave the result in 'smi'
+ // register. If 'smi' is the most negative SMI, the absolute value cannot
+ // be represented as a SMI and a jump to 'slow' is done.
+ void SmiAbs(const Register& smi, Label* slow);
+
+ inline void JumpIfSmi(Register value,
+ Label* smi_label,
+ Label* not_smi_label = NULL);
+ inline void JumpIfNotSmi(Register value, Label* not_smi_label);
+ inline void JumpIfBothSmi(Register value1,
+ Register value2,
+ Label* both_smi_label,
+ Label* not_smi_label = NULL);
+ inline void JumpIfEitherSmi(Register value1,
+ Register value2,
+ Label* either_smi_label,
+ Label* not_smi_label = NULL);
+ inline void JumpIfEitherNotSmi(Register value1,
+ Register value2,
+ Label* not_smi_label);
+ inline void JumpIfBothNotSmi(Register value1,
+ Register value2,
+ Label* not_smi_label);
+
+ // Abort execution if argument is a smi, enabled via --debug-code.
+ void AssertNotSmi(Register object, BailoutReason reason = kOperandIsASmi);
+ void AssertSmi(Register object, BailoutReason reason = kOperandIsNotASmi);
+
+ // Abort execution if argument is not a name, enabled via --debug-code.
+ void AssertName(Register object);
+
+ // Abort execution if argument is not a string, enabled via --debug-code.
+ void AssertString(Register object);
+
+ void JumpForHeapNumber(Register object,
+ Register heap_number_map,
+ Label* on_heap_number,
+ Label* on_not_heap_number = NULL);
+ void JumpIfHeapNumber(Register object,
+ Label* on_heap_number,
+ Register heap_number_map = NoReg);
+ void JumpIfNotHeapNumber(Register object,
+ Label* on_not_heap_number,
+ Register heap_number_map = NoReg);
+
+ // Jump to label if the input double register contains -0.0.
+ void JumpIfMinusZero(DoubleRegister input, Label* on_negative_zero);
+
+ // Generate code to do a lookup in the number string cache. If the number in
+ // the register object is found in the cache the generated code falls through
+ // with the result in the result register. The object and the result register
+ // can be the same. If the number is not found in the cache the code jumps to
+ // the label not_found with only the content of register object unchanged.
+ void LookupNumberStringCache(Register object,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* not_found);
+
+ // Saturate a signed 32-bit integer in input to an unsigned 8-bit integer in
+ // output.
+ void ClampInt32ToUint8(Register in_out);
+ void ClampInt32ToUint8(Register output, Register input);
+
+ // Saturate a double in input to an unsigned 8-bit integer in output.
+ void ClampDoubleToUint8(Register output,
+ DoubleRegister input,
+ DoubleRegister dbl_scratch);
+
+ // Try to convert a double to a signed 32-bit int.
+ // This succeeds if the result compares equal to the input, so inputs of -0.0
+ // are converted to 0 and handled as a success.
+ void TryConvertDoubleToInt32(Register as_int,
+ FPRegister value,
+ FPRegister scratch_d,
+ Label* on_successful_conversion,
+ Label* on_failed_conversion = NULL) {
+ ASSERT(as_int.Is32Bits());
+ TryConvertDoubleToInt(as_int, value, scratch_d, on_successful_conversion,
+ on_failed_conversion);
+ }
+
+ // Try to convert a double to a signed 64-bit int.
+ // This succeeds if the result compares equal to the input, so inputs of -0.0
+ // are converted to 0 and handled as a success.
+ void TryConvertDoubleToInt64(Register as_int,
+ FPRegister value,
+ FPRegister scratch_d,
+ Label* on_successful_conversion,
+ Label* on_failed_conversion = NULL) {
+ ASSERT(as_int.Is64Bits());
+ TryConvertDoubleToInt(as_int, value, scratch_d, on_successful_conversion,
+ on_failed_conversion);
+ }
+
+ // ---- Object Utilities ----
+
+ // Copy fields from 'src' to 'dst', where both are tagged objects.
+ // The 'temps' list is a list of X registers which can be used for scratch
+ // values. The temps list must include at least one register, and it must not
+ // contain Tmp0() or Tmp1().
+ //
+ // Currently, CopyFields cannot make use of more than three registers from
+ // the 'temps' list.
+ //
+ // As with several MacroAssembler methods, Tmp0() and Tmp1() will be used.
+ void CopyFields(Register dst, Register src, CPURegList temps, unsigned count);
+
+ // Copies a number of bytes from src to dst. All passed registers are
+ // clobbered. On exit src and dst will point to the place just after where the
+ // last byte was read or written and length will be zero. Hint may be used to
+ // determine which is the most efficient algorithm to use for copying.
+ void CopyBytes(Register dst,
+ Register src,
+ Register length,
+ Register scratch,
+ CopyHint hint = kCopyUnknown);
+
+ // Initialize fields with filler values. Fields starting at start_offset not
+ // including end_offset are overwritten with the value in filler. At the end
+ // of the loop, start_offset takes the value of end_offset.
+ void InitializeFieldsWithFiller(Register start_offset,
+ Register end_offset,
+ Register filler);
+
+ // ---- String Utilities ----
+
+
+ // Jump to label if either object is not a sequential ASCII string.
+ // Optionally perform a smi check on the objects first.
+ void JumpIfEitherIsNotSequentialAsciiStrings(
+ Register first,
+ Register second,
+ Register scratch1,
+ Register scratch2,
+ Label* failure,
+ SmiCheckType smi_check = DO_SMI_CHECK);
+
+ // Check if instance type is sequential ASCII string and jump to label if
+ // it is not.
+ void JumpIfInstanceTypeIsNotSequentialAscii(Register type,
+ Register scratch,
+ Label* failure);
+
+ // Checks if both instance types are sequential ASCII strings and jumps to
+ // label if either is not.
+ void JumpIfEitherInstanceTypeIsNotSequentialAscii(
+ Register first_object_instance_type,
+ Register second_object_instance_type,
+ Register scratch1,
+ Register scratch2,
+ Label* failure);
+
+ // Checks if both instance types are sequential ASCII strings and jumps to
+ // label if either is not.
+ void JumpIfBothInstanceTypesAreNotSequentialAscii(
+ Register first_object_instance_type,
+ Register second_object_instance_type,
+ Register scratch1,
+ Register scratch2,
+ Label* failure);
+
+ void JumpIfNotUniqueName(Register type, Label* not_unique_name);
+
+ // ---- Calling / Jumping helpers ----
+
+ // This is required for compatibility in architecture indepenedant code.
+ inline void jmp(Label* L) { B(L); }
+
+ // Passes thrown value to the handler of top of the try handler chain.
+ // Register value must be x0.
+ void Throw(Register value,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Register scratch4);
+
+ // Propagates an uncatchable exception to the top of the current JS stack's
+ // handler chain. Register value must be x0.
+ void ThrowUncatchable(Register value,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Register scratch4);
+
+ // Throw a message string as an exception.
+ void Throw(BailoutReason reason);
+
+ // Throw a message string as an exception if a condition is not true.
+ void ThrowIf(Condition cc, BailoutReason reason);
+
+ // Throw a message string as an exception if the value is a smi.
+ void ThrowIfSmi(const Register& value, BailoutReason reason);
+
+ void CallStub(CodeStub* stub, TypeFeedbackId ast_id = TypeFeedbackId::None());
+ void TailCallStub(CodeStub* stub);
+
+ void CallRuntime(const Runtime::Function* f,
+ int num_arguments,
+ SaveFPRegsMode save_doubles = kDontSaveFPRegs);
+
+ void CallRuntime(Runtime::FunctionId id,
+ int num_arguments,
+ SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
+ CallRuntime(Runtime::FunctionForId(id), num_arguments, save_doubles);
+ }
+
+ // TODO(all): Why does this variant save FP regs unconditionally?
+ void CallRuntimeSaveDoubles(Runtime::FunctionId id) {
+ const Runtime::Function* function = Runtime::FunctionForId(id);
+ CallRuntime(function, function->nargs, kSaveFPRegs);
+ }
+
+ void TailCallRuntime(Runtime::FunctionId fid,
+ int num_arguments,
+ int result_size);
+
+ int ActivationFrameAlignment();
+
+ // Calls a C function.
+ // The called function is not allowed to trigger a
+ // garbage collection, since that might move the code and invalidate the
+ // return address (unless this is somehow accounted for by the called
+ // function).
+ void CallCFunction(ExternalReference function,
+ int num_reg_arguments);
+ void CallCFunction(ExternalReference function,
+ int num_reg_arguments,
+ int num_double_arguments);
+ void CallCFunction(Register function,
+ int num_reg_arguments,
+ int num_double_arguments);
+
+ // Calls an API function. Allocates HandleScope, extracts returned value
+ // from handle and propagates exceptions.
+ // 'stack_space' is the space to be unwound on exit (includes the call JS
+ // arguments space and the additional space allocated for the fast call).
+ // 'spill_offset' is the offset from the stack pointer where
+ // CallApiFunctionAndReturn can spill registers.
+ void CallApiFunctionAndReturn(Register function_address,
+ ExternalReference thunk_ref,
+ int stack_space,
+ int spill_offset,
+ MemOperand return_value_operand,
+ MemOperand* context_restore_operand);
+
+ // The number of register that CallApiFunctionAndReturn will need to save on
+ // the stack. The space for these registers need to be allocated in the
+ // ExitFrame before calling CallApiFunctionAndReturn.
+ static const int kCallApiFunctionSpillSpace = 4;
+
+ // Jump to a runtime routine.
+ void JumpToExternalReference(const ExternalReference& builtin);
+ // Tail call of a runtime routine (jump).
+ // Like JumpToExternalReference, but also takes care of passing the number
+ // of parameters.
+ void TailCallExternalReference(const ExternalReference& ext,
+ int num_arguments,
+ int result_size);
+ void CallExternalReference(const ExternalReference& ext,
+ int num_arguments);
+
+
+ // Invoke specified builtin JavaScript function. Adds an entry to
+ // the unresolved list if the name does not resolve.
+ void InvokeBuiltin(Builtins::JavaScript id,
+ InvokeFlag flag,
+ const CallWrapper& call_wrapper = NullCallWrapper());
+
+ // Store the code object for the given builtin in the target register and
+ // setup the function in x1.
+ // TODO(all): Can we use another register than x1?
+ void GetBuiltinEntry(Register target, Builtins::JavaScript id);
+
+ // Store the function for the given builtin in the target register.
+ void GetBuiltinFunction(Register target, Builtins::JavaScript id);
+
+ void Jump(Register target);
+ void Jump(Address target, RelocInfo::Mode rmode);
+ void Jump(Handle<Code> code, RelocInfo::Mode rmode);
+ void Jump(intptr_t target, RelocInfo::Mode rmode);
+
+ void Call(Register target);
+ void Call(Label* target);
+ void Call(Address target, RelocInfo::Mode rmode);
+ void Call(Handle<Code> code,
+ RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
+ TypeFeedbackId ast_id = TypeFeedbackId::None());
+
+ // For every Call variant, there is a matching CallSize function that returns
+ // the size (in bytes) of the call sequence.
+ static int CallSize(Register target);
+ static int CallSize(Label* target);
+ static int CallSize(Address target, RelocInfo::Mode rmode);
+ static int CallSize(Handle<Code> code,
+ RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
+ TypeFeedbackId ast_id = TypeFeedbackId::None());
+
+ // Registers used through the invocation chain are hard-coded.
+ // We force passing the parameters to ensure the contracts are correctly
+ // honoured by the caller.
+ // 'function' must be x1.
+ // 'actual' must use an immediate or x0.
+ // 'expected' must use an immediate or x2.
+ // 'call_kind' must be x5.
+ void InvokePrologue(const ParameterCount& expected,
+ const ParameterCount& actual,
+ Handle<Code> code_constant,
+ Register code_reg,
+ Label* done,
+ InvokeFlag flag,
+ bool* definitely_mismatches,
+ const CallWrapper& call_wrapper);
+ void InvokeCode(Register code,
+ const ParameterCount& expected,
+ const ParameterCount& actual,
+ InvokeFlag flag,
+ const CallWrapper& call_wrapper);
+ // Invoke the JavaScript function in the given register.
+ // Changes the current context to the context in the function before invoking.
+ void InvokeFunction(Register function,
+ const ParameterCount& actual,
+ InvokeFlag flag,
+ const CallWrapper& call_wrapper);
+ void InvokeFunction(Register function,
+ const ParameterCount& expected,
+ const ParameterCount& actual,
+ InvokeFlag flag,
+ const CallWrapper& call_wrapper);
+ void InvokeFunction(Handle<JSFunction> function,
+ const ParameterCount& expected,
+ const ParameterCount& actual,
+ InvokeFlag flag,
+ const CallWrapper& call_wrapper);
+
+
+ // ---- Floating point helpers ----
+
+ enum ECMA262ToInt32Result {
+ // Provide an untagged int32_t which can be read using result.W(). That is,
+ // the upper 32 bits of result are undefined.
+ INT32_IN_W,
+
+ // Provide an untagged int32_t which can be read using the 64-bit result
+ // register. The int32_t result is sign-extended.
+ INT32_IN_X,
+
+ // Tag the int32_t result as a smi.
+ SMI
+ };
+
+ // Applies ECMA-262 ToInt32 (see section 9.5) to a double value.
+ void ECMA262ToInt32(Register result,
+ DoubleRegister input,
+ Register scratch1,
+ Register scratch2,
+ ECMA262ToInt32Result format = INT32_IN_X);
+
+ // As ECMA262ToInt32, but operate on a HeapNumber.
+ void HeapNumberECMA262ToInt32(Register result,
+ Register heap_number,
+ Register scratch1,
+ Register scratch2,
+ DoubleRegister double_scratch,
+ ECMA262ToInt32Result format = INT32_IN_X);
+
+ // ---- Code generation helpers ----
+
+ void set_generating_stub(bool value) { generating_stub_ = value; }
+ bool generating_stub() const { return generating_stub_; }
+#if DEBUG
+ void set_allow_macro_instructions(bool value) {
+ allow_macro_instructions_ = value;
+ }
+ bool allow_macro_instructions() const { return allow_macro_instructions_; }
+#endif
+ bool use_real_aborts() const { return use_real_aborts_; }
+ void set_has_frame(bool value) { has_frame_ = value; }
+ bool has_frame() const { return has_frame_; }
+ bool AllowThisStubCall(CodeStub* stub);
+
+ class NoUseRealAbortsScope {
+ public:
+ explicit NoUseRealAbortsScope(MacroAssembler* masm) :
+ saved_(masm->use_real_aborts_), masm_(masm) {
+ masm_->use_real_aborts_ = false;
+ }
+ ~NoUseRealAbortsScope() {
+ masm_->use_real_aborts_ = saved_;
+ }
+ private:
+ bool saved_;
+ MacroAssembler* masm_;
+ };
+
+#ifdef ENABLE_DEBUGGER_SUPPORT
+ // ---------------------------------------------------------------------------
+ // Debugger Support
+
+ void DebugBreak();
+#endif
+ // ---------------------------------------------------------------------------
+ // Exception handling
+
+ // Push a new try handler and link into try handler chain.
+ void PushTryHandler(StackHandler::Kind kind, int handler_index);
+
+ // Unlink the stack handler on top of the stack from the try handler chain.
+ // Must preserve the result register.
+ void PopTryHandler();
+
+
+ // ---------------------------------------------------------------------------
+ // Allocation support
+
+ // Allocate an object in new space or old pointer space. The object_size is
+ // specified either in bytes or in words if the allocation flag SIZE_IN_WORDS
+ // is passed. The allocated object is returned in result.
+ //
+ // If the new space is exhausted control continues at the gc_required label.
+ // In this case, the result and scratch registers may still be clobbered.
+ // If flags includes TAG_OBJECT, the result is tagged as as a heap object.
+ void Allocate(Register object_size,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required,
+ AllocationFlags flags);
+
+ void Allocate(int object_size,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required,
+ AllocationFlags flags);
+
+ // Undo allocation in new space. The object passed and objects allocated after
+ // it will no longer be allocated. The caller must make sure that no pointers
+ // are left to the object(s) no longer allocated as they would be invalid when
+ // allocation is undone.
+ void UndoAllocationInNewSpace(Register object, Register scratch);
+
+ void AllocateTwoByteString(Register result,
+ Register length,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* gc_required);
+ void AllocateAsciiString(Register result,
+ Register length,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* gc_required);
+ void AllocateTwoByteConsString(Register result,
+ Register length,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required);
+ void AllocateAsciiConsString(Register result,
+ Register length,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required);
+ void AllocateTwoByteSlicedString(Register result,
+ Register length,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required);
+ void AllocateAsciiSlicedString(Register result,
+ Register length,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required);
+
+ // Allocates a heap number or jumps to the gc_required label if the young
+ // space is full and a scavenge is needed.
+ // All registers are clobbered.
+ // If no heap_number_map register is provided, the function will take care of
+ // loading it.
+ void AllocateHeapNumber(Register result,
+ Label* gc_required,
+ Register scratch1,
+ Register scratch2,
+ Register heap_number_map = NoReg);
+ void AllocateHeapNumberWithValue(Register result,
+ DoubleRegister value,
+ Label* gc_required,
+ Register scratch1,
+ Register scratch2,
+ Register heap_number_map = NoReg);
+
+ // ---------------------------------------------------------------------------
+ // Support functions.
+
+ // Try to get function prototype of a function and puts the value in the
+ // result register. Checks that the function really is a function and jumps
+ // to the miss label if the fast checks fail. The function register will be
+ // untouched; the other registers may be clobbered.
+ enum BoundFunctionAction {
+ kMissOnBoundFunction,
+ kDontMissOnBoundFunction
+ };
+
+ void TryGetFunctionPrototype(Register function,
+ Register result,
+ Register scratch,
+ Label* miss,
+ BoundFunctionAction action =
+ kDontMissOnBoundFunction);
+
+ // Compare object type for heap object. heap_object contains a non-Smi
+ // whose object type should be compared with the given type. This both
+ // sets the flags and leaves the object type in the type_reg register.
+ // It leaves the map in the map register (unless the type_reg and map register
+ // are the same register). It leaves the heap object in the heap_object
+ // register unless the heap_object register is the same register as one of the
+ // other registers.
+ void CompareObjectType(Register heap_object,
+ Register map,
+ Register type_reg,
+ InstanceType type);
+
+
+ // Compare object type for heap object, and branch if equal (or not.)
+ // heap_object contains a non-Smi whose object type should be compared with
+ // the given type. This both sets the flags and leaves the object type in
+ // the type_reg register. It leaves the map in the map register (unless the
+ // type_reg and map register are the same register). It leaves the heap
+ // object in the heap_object register unless the heap_object register is the
+ // same register as one of the other registers.
+ void JumpIfObjectType(Register object,
+ Register map,
+ Register type_reg,
+ InstanceType type,
+ Label* if_cond_pass,
+ Condition cond = eq);
+
+ void JumpIfNotObjectType(Register object,
+ Register map,
+ Register type_reg,
+ InstanceType type,
+ Label* if_not_object);
+
+ // Compare instance type in a map. map contains a valid map object whose
+ // object type should be compared with the given type. This both
+ // sets the flags and leaves the object type in the type_reg register.
+ void CompareInstanceType(Register map,
+ Register type_reg,
+ InstanceType type);
+
+ // Compare an object's map with the specified map and its transitioned
+ // elements maps if mode is ALLOW_ELEMENT_TRANSITION_MAPS. Condition flags are
+ // set with result of map compare. If multiple map compares are required, the
+ // compare sequences branches to early_success.
+ void CompareMap(Register obj,
+ Register scratch,
+ Handle<Map> map,
+ Label* early_success = NULL);
+
+ // As above, but the map of the object is already loaded into the register
+ // which is preserved by the code generated.
+ void CompareMap(Register obj_map,
+ Handle<Map> map,
+ Label* early_success = NULL);
+
+ // Check if the map of an object is equal to a specified map and branch to
+ // label if not. Skip the smi check if not required (object is known to be a
+ // heap object). If mode is ALLOW_ELEMENT_TRANSITION_MAPS, then also match
+ // against maps that are ElementsKind transition maps of the specified map.
+ void CheckMap(Register obj,
+ Register scratch,
+ Handle<Map> map,
+ Label* fail,
+ SmiCheckType smi_check_type);
+
+
+ void CheckMap(Register obj,
+ Register scratch,
+ Heap::RootListIndex index,
+ Label* fail,
+ SmiCheckType smi_check_type);
+
+ // As above, but the map of the object is already loaded into obj_map, and is
+ // preserved.
+ void CheckMap(Register obj_map,
+ Handle<Map> map,
+ Label* fail,
+ SmiCheckType smi_check_type);
+
+ // Check if the map of an object is equal to a specified map and branch to a
+ // specified target if equal. Skip the smi check if not required (object is
+ // known to be a heap object)
+ void DispatchMap(Register obj,
+ Register scratch,
+ Handle<Map> map,
+ Handle<Code> success,
+ SmiCheckType smi_check_type);
+
+ // Test the bitfield of the heap object map with mask and set the condition
+ // flags. The object register is preserved.
+ void TestMapBitfield(Register object, uint64_t mask);
+
+ // Load the elements kind field of an object, and return it in the result
+ // register.
+ void LoadElementsKind(Register result, Register object);
+
+ // Compare the object in a register to a value from the root list.
+ // Uses the Tmp0() register as scratch.
+ void CompareRoot(const Register& obj, Heap::RootListIndex index);
+
+ // Compare the object in a register to a value and jump if they are equal.
+ void JumpIfRoot(const Register& obj,
+ Heap::RootListIndex index,
+ Label* if_equal);
+
+ // Compare the object in a register to a value and jump if they are not equal.
+ void JumpIfNotRoot(const Register& obj,
+ Heap::RootListIndex index,
+ Label* if_not_equal);
+
+ // Load and check the instance type of an object for being a unique name.
+ // Loads the type into the second argument register.
+ // The object and type arguments can be the same register; in that case it
+ // will be overwritten with the type.
+ // Fall-through if the object was a string and jump on fail otherwise.
+ inline void IsObjectNameType(Register object, Register type, Label* fail);
+
+ inline void IsObjectJSObjectType(Register heap_object,
+ Register map,
+ Register scratch,
+ Label* fail);
+
+ // Check the instance type in the given map to see if it corresponds to a
+ // JS object type. Jump to the fail label if this is not the case and fall
+ // through otherwise. However if fail label is NULL, no branch will be
+ // performed and the flag will be updated. You can test the flag for "le"
+ // condition to test if it is a valid JS object type.
+ inline void IsInstanceJSObjectType(Register map,
+ Register scratch,
+ Label* fail);
+
+ // Load and check the instance type of an object for being a string.
+ // Loads the type into the second argument register.
+ // The object and type arguments can be the same register; in that case it
+ // will be overwritten with the type.
+ // Jumps to not_string or string appropriate. If the appropriate label is
+ // NULL, fall through.
+ inline void IsObjectJSStringType(Register object, Register type,
+ Label* not_string, Label* string = NULL);
+
+ // Compare the contents of a register with an operand, and branch to true,
+ // false or fall through, depending on condition.
+ void CompareAndSplit(const Register& lhs,
+ const Operand& rhs,
+ Condition cond,
+ Label* if_true,
+ Label* if_false,
+ Label* fall_through);
+
+ // Test the bits of register defined by bit_pattern, and branch to
+ // if_any_set, if_all_clear or fall_through accordingly.
+ void TestAndSplit(const Register& reg,
+ uint64_t bit_pattern,
+ Label* if_all_clear,
+ Label* if_any_set,
+ Label* fall_through);
+
+ // Check if a map for a JSObject indicates that the object has fast elements.
+ // Jump to the specified label if it does not.
+ void CheckFastElements(Register map,
+ Register scratch,
+ Label* fail);
+
+ // Check if a map for a JSObject indicates that the object can have both smi
+ // and HeapObject elements. Jump to the specified label if it does not.
+ void CheckFastObjectElements(Register map,
+ Register scratch,
+ Label* fail);
+
+ // Check if a map for a JSObject indicates that the object has fast smi only
+ // elements. Jump to the specified label if it does not.
+ void CheckFastSmiElements(Register map, Register scratch, Label* fail);
+
+ // Check to see if number can be stored as a double in FastDoubleElements.
+ // If it can, store it at the index specified by key_reg in the array,
+ // otherwise jump to fail.
+ void StoreNumberToDoubleElements(Register value_reg,
+ Register key_reg,
+ Register elements_reg,
+ Register scratch1,
+ FPRegister fpscratch1,
+ FPRegister fpscratch2,
+ Label* fail,
+ int elements_offset = 0);
+
+ // Picks out an array index from the hash field.
+ // Register use:
+ // hash - holds the index's hash. Clobbered.
+ // index - holds the overwritten index on exit.
+ void IndexFromHash(Register hash, Register index);
+
+ // ---------------------------------------------------------------------------
+ // Inline caching support.
+
+ void EmitSeqStringSetCharCheck(Register string,
+ Register index,
+ SeqStringSetCharCheckIndexType index_type,
+ Register scratch,
+ uint32_t encoding_mask);
+
+ // Generate code for checking access rights - used for security checks
+ // on access to global objects across environments. The holder register
+ // is left untouched, whereas both scratch registers are clobbered.
+ void CheckAccessGlobalProxy(Register holder_reg,
+ Register scratch,
+ Label* miss);
+
+ // Hash the interger value in 'key' register.
+ // It uses the same algorithm as ComputeIntegerHash in utils.h.
+ void GetNumberHash(Register key, Register scratch);
+
+ // Load value from the dictionary.
+ //
+ // elements - holds the slow-case elements of the receiver on entry.
+ // Unchanged unless 'result' is the same register.
+ //
+ // key - holds the smi key on entry.
+ // Unchanged unless 'result' is the same register.
+ //
+ // result - holds the result on exit if the load succeeded.
+ // Allowed to be the same as 'key' or 'result'.
+ // Unchanged on bailout so 'key' or 'result' can be used
+ // in further computation.
+ void LoadFromNumberDictionary(Label* miss,
+ Register elements,
+ Register key,
+ Register result,
+ Register scratch0,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3);
+
+ // ---------------------------------------------------------------------------
+ // Frames.
+
+ // Activation support.
+ // Note that Tmp0() and Tmp1() are used as a scratch registers. This is safe
+ // because these methods are not used in Crankshaft.
+ void EnterFrame(StackFrame::Type type);
+ void LeaveFrame(StackFrame::Type type);
+
+ // Returns map with validated enum cache in object register.
+ void CheckEnumCache(Register object,
+ Register null_value,
+ Register scratch0,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* call_runtime);
+
+ // AllocationMemento support. Arrays may have an associated
+ // AllocationMemento object that can be checked for in order to pretransition
+ // to another type.
+ // On entry, receiver should point to the array object.
+ // If allocation info is present, the Z flag is set (so that the eq
+ // condition will pass).
+ void TestJSArrayForAllocationMemento(Register receiver,
+ Register scratch1,
+ Register scratch2,
+ Label* no_memento_found);
+
+ void JumpIfJSArrayHasAllocationMemento(Register receiver,
+ Register scratch1,
+ Register scratch2,
+ Label* memento_found) {
+ Label no_memento_found;
+ TestJSArrayForAllocationMemento(receiver, scratch1, scratch2,
+ &no_memento_found);
+ B(eq, memento_found);
+ Bind(&no_memento_found);
+ }
+
+ // The stack pointer has to switch between csp and jssp when setting up and
+ // destroying the exit frame. Hence preserving/restoring the registers is
+ // slightly more complicated than simple push/pop operations.
+ void ExitFramePreserveFPRegs();
+ void ExitFrameRestoreFPRegs();
+
+ // Generates function and stub prologue code.
+ void Prologue(PrologueFrameMode frame_mode);
+
+ // Enter exit frame. Exit frames are used when calling C code from generated
+ // (JavaScript) code.
+ //
+ // The stack pointer must be jssp on entry, and will be set to csp by this
+ // function. The frame pointer is also configured, but the only other
+ // registers modified by this function are the provided scratch register, and
+ // jssp.
+ //
+ // The 'extra_space' argument can be used to allocate some space in the exit
+ // frame that will be ignored by the GC. This space will be reserved in the
+ // bottom of the frame immediately above the return address slot.
+ //
+ // Set up a stack frame and registers as follows:
+ // fp[8]: CallerPC (lr)
+ // fp -> fp[0]: CallerFP (old fp)
+ // fp[-8]: SPOffset (new csp)
+ // fp[-16]: CodeObject()
+ // fp[-16 - fp-size]: Saved doubles, if saved_doubles is true.
+ // csp[8]: Memory reserved for the caller if extra_space != 0.
+ // Alignment padding, if necessary.
+ // csp -> csp[0]: Space reserved for the return address.
+ //
+ // This function also stores the new frame information in the top frame, so
+ // that the new frame becomes the current frame.
+ void EnterExitFrame(bool save_doubles,
+ const Register& scratch,
+ int extra_space = 0);
+
+ // Leave the current exit frame, after a C function has returned to generated
+ // (JavaScript) code.
+ //
+ // This effectively unwinds the operation of EnterExitFrame:
+ // * Preserved doubles are restored (if restore_doubles is true).
+ // * The frame information is removed from the top frame.
+ // * The exit frame is dropped.
+ // * The stack pointer is reset to jssp.
+ //
+ // The stack pointer must be csp on entry.
+ void LeaveExitFrame(bool save_doubles,
+ const Register& scratch,
+ bool restore_context);
+
+ void LoadContext(Register dst, int context_chain_length);
+
+ // ---------------------------------------------------------------------------
+ // StatsCounter support
+
+ void SetCounter(StatsCounter* counter, int value, Register scratch1,
+ Register scratch2);
+ void IncrementCounter(StatsCounter* counter, int value, Register scratch1,
+ Register scratch2);
+ void DecrementCounter(StatsCounter* counter, int value, Register scratch1,
+ Register scratch2);
+
+ // ---------------------------------------------------------------------------
+ // Garbage collector support (GC).
+
+ enum RememberedSetFinalAction {
+ kReturnAtEnd,
+ kFallThroughAtEnd
+ };
+
+ // Record in the remembered set the fact that we have a pointer to new space
+ // at the address pointed to by the addr register. Only works if addr is not
+ // in new space.
+ void RememberedSetHelper(Register object, // Used for debug code.
+ Register addr,
+ Register scratch,
+ SaveFPRegsMode save_fp,
+ RememberedSetFinalAction and_then);
+
+ // Push and pop the registers that can hold pointers, as defined by the
+ // RegList constant kSafepointSavedRegisters.
+ void PushSafepointRegisters();
+ void PopSafepointRegisters();
+
+ void PushSafepointFPRegisters();
+ void PopSafepointFPRegisters();
+
+ // Store value in register src in the safepoint stack slot for register dst.
+ void StoreToSafepointRegisterSlot(Register src, Register dst) {
+ Poke(src, SafepointRegisterStackIndex(dst.code()) * kPointerSize);
+ }
+
+ // Load the value of the src register from its safepoint stack slot
+ // into register dst.
+ void LoadFromSafepointRegisterSlot(Register dst, Register src) {
+ Peek(src, SafepointRegisterStackIndex(dst.code()) * kPointerSize);
+ }
+
+ void CheckPageFlagSet(const Register& object,
+ const Register& scratch,
+ int mask,
+ Label* if_any_set);
+
+ void CheckPageFlagClear(const Register& object,
+ const Register& scratch,
+ int mask,
+ Label* if_all_clear);
+
+ void CheckMapDeprecated(Handle<Map> map,
+ Register scratch,
+ Label* if_deprecated);
+
+ // Check if object is in new space and jump accordingly.
+ // Register 'object' is preserved.
+ void JumpIfNotInNewSpace(Register object,
+ Label* branch) {
+ InNewSpace(object, ne, branch);
+ }
+
+ void JumpIfInNewSpace(Register object,
+ Label* branch) {
+ InNewSpace(object, eq, branch);
+ }
+
+ // Notify the garbage collector that we wrote a pointer into an object.
+ // |object| is the object being stored into, |value| is the object being
+ // stored. value and scratch registers are clobbered by the operation.
+ // The offset is the offset from the start of the object, not the offset from
+ // the tagged HeapObject pointer. For use with FieldOperand(reg, off).
+ void RecordWriteField(
+ Register object,
+ int offset,
+ Register value,
+ Register scratch,
+ LinkRegisterStatus lr_status,
+ SaveFPRegsMode save_fp,
+ RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
+ SmiCheck smi_check = INLINE_SMI_CHECK);
+
+ // As above, but the offset has the tag presubtracted. For use with
+ // MemOperand(reg, off).
+ inline void RecordWriteContextSlot(
+ Register context,
+ int offset,
+ Register value,
+ Register scratch,
+ LinkRegisterStatus lr_status,
+ SaveFPRegsMode save_fp,
+ RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
+ SmiCheck smi_check = INLINE_SMI_CHECK) {
+ RecordWriteField(context,
+ offset + kHeapObjectTag,
+ value,
+ scratch,
+ lr_status,
+ save_fp,
+ remembered_set_action,
+ smi_check);
+ }
+
+ // For a given |object| notify the garbage collector that the slot |address|
+ // has been written. |value| is the object being stored. The value and
+ // address registers are clobbered by the operation.
+ void RecordWrite(
+ Register object,
+ Register address,
+ Register value,
+ LinkRegisterStatus lr_status,
+ SaveFPRegsMode save_fp,
+ RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
+ SmiCheck smi_check = INLINE_SMI_CHECK);
+
+ // Checks the color of an object. If the object is already grey or black
+ // then we just fall through, since it is already live. If it is white and
+ // we can determine that it doesn't need to be scanned, then we just mark it
+ // black and fall through. For the rest we jump to the label so the
+ // incremental marker can fix its assumptions.
+ void EnsureNotWhite(Register object,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Register scratch4,
+ Label* object_is_white_and_not_data);
+
+ // Detects conservatively whether an object is data-only, i.e. it does need to
+ // be scanned by the garbage collector.
+ void JumpIfDataObject(Register value,
+ Register scratch,
+ Label* not_data_object);
+
+ // Helper for finding the mark bits for an address.
+ // Note that the behaviour slightly differs from other architectures.
+ // On exit:
+ // - addr_reg is unchanged.
+ // - The bitmap register points at the word with the mark bits.
+ // - The shift register contains the index of the first color bit for this
+ // object in the bitmap.
+ inline void GetMarkBits(Register addr_reg,
+ Register bitmap_reg,
+ Register shift_reg);
+
+ // Check if an object has a given incremental marking color.
+ void HasColor(Register object,
+ Register scratch0,
+ Register scratch1,
+ Label* has_color,
+ int first_bit,
+ int second_bit);
+
+ void JumpIfBlack(Register object,
+ Register scratch0,
+ Register scratch1,
+ Label* on_black);
+
+
+ // Get the location of a relocated constant (its address in the constant pool)
+ // from its load site.
+ void GetRelocatedValueLocation(Register ldr_location,
+ Register result);
+
+
+ // ---------------------------------------------------------------------------
+ // Debugging.
+
+ // Calls Abort(msg) if the condition cond is not satisfied.
+ // Use --debug_code to enable.
+ void Assert(Condition cond, BailoutReason reason);
+ void AssertRegisterIsClear(Register reg, BailoutReason reason);
+ void AssertRegisterIsRoot(
+ Register reg,
+ Heap::RootListIndex index,
+ BailoutReason reason = kRegisterDidNotMatchExpectedRoot);
+ void AssertFastElements(Register elements);
+
+ // Abort if the specified register contains the invalid color bit pattern.
+ // The pattern must be in bits [1:0] of 'reg' register.
+ //
+ // If emit_debug_code() is false, this emits no code.
+ void AssertHasValidColor(const Register& reg);
+
+ // Abort if 'object' register doesn't point to a string object.
+ //
+ // If emit_debug_code() is false, this emits no code.
+ void AssertIsString(const Register& object);
+
+ // Like Assert(), but always enabled.
+ void Check(Condition cond, BailoutReason reason);
+ void CheckRegisterIsClear(Register reg, BailoutReason reason);
+
+ // Print a message to stderr and abort execution.
+ void Abort(BailoutReason reason);
+
+ // Conditionally load the cached Array transitioned map of type
+ // transitioned_kind from the native context if the map in register
+ // map_in_out is the cached Array map in the native context of
+ // expected_kind.
+ void LoadTransitionedArrayMapConditional(
+ ElementsKind expected_kind,
+ ElementsKind transitioned_kind,
+ Register map_in_out,
+ Register scratch,
+ Label* no_map_match);
+
+ // Load the initial map for new Arrays from a JSFunction.
+ void LoadInitialArrayMap(Register function_in,
+ Register scratch,
+ Register map_out,
+ ArrayHasHoles holes);
+
+ void LoadArrayFunction(Register function);
+ void LoadGlobalFunction(int index, Register function);
+
+ // Load the initial map from the global function. The registers function and
+ // map can be the same, function is then overwritten.
+ void LoadGlobalFunctionInitialMap(Register function,
+ Register map,
+ Register scratch);
+
+ // --------------------------------------------------------------------------
+ // Set the registers used internally by the MacroAssembler as scratch
+ // registers. These registers are used to implement behaviours which are not
+ // directly supported by A64, and where an intermediate result is required.
+ //
+ // Both tmp0 and tmp1 may be set to any X register except for xzr, sp,
+ // and StackPointer(). Also, they must not be the same register (though they
+ // may both be NoReg).
+ //
+ // It is valid to set either or both of these registers to NoReg if you don't
+ // want the MacroAssembler to use any scratch registers. In a debug build, the
+ // Assembler will assert that any registers it uses are valid. Be aware that
+ // this check is not present in release builds. If this is a problem, use the
+ // Assembler directly.
+ void SetScratchRegisters(const Register& tmp0, const Register& tmp1) {
+ // V8 assumes the macro assembler uses ip0 and ip1 as temp registers.
+ ASSERT(tmp0.IsNone() || tmp0.Is(ip0));
+ ASSERT(tmp1.IsNone() || tmp1.Is(ip1));
+
+ ASSERT(!AreAliased(xzr, csp, tmp0, tmp1));
+ ASSERT(!AreAliased(StackPointer(), tmp0, tmp1));
+ tmp0_ = tmp0;
+ tmp1_ = tmp1;
+ }
+
+ const Register& Tmp0() const {
+ return tmp0_;
+ }
+
+ const Register& Tmp1() const {
+ return tmp1_;
+ }
+
+ const Register WTmp0() const {
+ return Register::Create(tmp0_.code(), kWRegSize);
+ }
+
+ const Register WTmp1() const {
+ return Register::Create(tmp1_.code(), kWRegSize);
+ }
+
+ void SetFPScratchRegister(const FPRegister& fptmp0) {
+ fptmp0_ = fptmp0;
+ }
+
+ const FPRegister& FPTmp0() const {
+ return fptmp0_;
+ }
+
+ const Register AppropriateTempFor(
+ const Register& target,
+ const CPURegister& forbidden = NoCPUReg) const {
+ Register candidate = forbidden.Is(Tmp0()) ? Tmp1() : Tmp0();
+ ASSERT(!candidate.Is(target));
+ return Register::Create(candidate.code(), target.SizeInBits());
+ }
+
+ const FPRegister AppropriateTempFor(
+ const FPRegister& target,
+ const CPURegister& forbidden = NoCPUReg) const {
+ USE(forbidden);
+ FPRegister candidate = FPTmp0();
+ ASSERT(!candidate.Is(forbidden));
+ ASSERT(!candidate.Is(target));
+ return FPRegister::Create(candidate.code(), target.SizeInBits());
+ }
+
+ // Like printf, but print at run-time from generated code.
+ //
+ // The caller must ensure that arguments for floating-point placeholders
+ // (such as %e, %f or %g) are FPRegisters, and that arguments for integer
+ // placeholders are Registers.
+ //
+ // A maximum of four arguments may be given to any single Printf call. The
+ // arguments must be of the same type, but they do not need to have the same
+ // size.
+ //
+ // The following registers cannot be printed:
+ // Tmp0(), Tmp1(), StackPointer(), csp.
+ //
+ // This function automatically preserves caller-saved registers so that
+ // calling code can use Printf at any point without having to worry about
+ // corruption. The preservation mechanism generates a lot of code. If this is
+ // a problem, preserve the important registers manually and then call
+ // PrintfNoPreserve. Callee-saved registers are not used by Printf, and are
+ // implicitly preserved.
+ //
+ // Unlike many MacroAssembler functions, x8 and x9 are guaranteed to be
+ // preserved, and can be printed. This allows Printf to be used during debug
+ // code.
+ //
+ // This function assumes (and asserts) that the current stack pointer is
+ // callee-saved, not caller-saved. This is most likely the case anyway, as a
+ // caller-saved stack pointer doesn't make a lot of sense.
+ void Printf(const char * format,
+ const CPURegister& arg0 = NoCPUReg,
+ const CPURegister& arg1 = NoCPUReg,
+ const CPURegister& arg2 = NoCPUReg,
+ const CPURegister& arg3 = NoCPUReg);
+
+ // Like Printf, but don't preserve any caller-saved registers, not even 'lr'.
+ //
+ // The return code from the system printf call will be returned in x0.
+ void PrintfNoPreserve(const char * format,
+ const CPURegister& arg0 = NoCPUReg,
+ const CPURegister& arg1 = NoCPUReg,
+ const CPURegister& arg2 = NoCPUReg,
+ const CPURegister& arg3 = NoCPUReg);
+
+ // Code ageing support functions.
+
+ // Code ageing on A64 works similarly to on ARM. When V8 wants to mark a
+ // function as old, it replaces some of the function prologue (generated by
+ // FullCodeGenerator::Generate) with a call to a special stub (ultimately
+ // generated by GenerateMakeCodeYoungAgainCommon). The stub restores the
+ // function prologue to its initial young state (indicating that it has been
+ // recently run) and continues. A young function is therefore one which has a
+ // normal frame setup sequence, and an old function has a code age sequence
+ // which calls a code ageing stub.
+
+ // Set up a basic stack frame for young code (or code exempt from ageing) with
+ // type FUNCTION. It may be patched later for code ageing support. This is
+ // done by to Code::PatchPlatformCodeAge and EmitCodeAgeSequence.
+ //
+ // This function takes an Assembler so it can be called from either a
+ // MacroAssembler or a PatchingAssembler context.
+ static void EmitFrameSetupForCodeAgePatching(Assembler* assm);
+
+ // Call EmitFrameSetupForCodeAgePatching from a MacroAssembler context.
+ void EmitFrameSetupForCodeAgePatching();
+
+ // Emit a code age sequence that calls the relevant code age stub. The code
+ // generated by this sequence is expected to replace the code generated by
+ // EmitFrameSetupForCodeAgePatching, and represents an old function.
+ //
+ // If stub is NULL, this function generates the code age sequence but omits
+ // the stub address that is normally embedded in the instruction stream. This
+ // can be used by debug code to verify code age sequences.
+ static void EmitCodeAgeSequence(Assembler* assm, Code* stub);
+
+ // Call EmitCodeAgeSequence from a MacroAssembler context.
+ void EmitCodeAgeSequence(Code* stub);
+
+ // Return true if the sequence is a young sequence geneated by
+ // EmitFrameSetupForCodeAgePatching. Otherwise, this method asserts that the
+ // sequence is a code age sequence (emitted by EmitCodeAgeSequence).
+ static bool IsYoungSequence(byte* sequence);
+
+#ifdef DEBUG
+ // Return true if the sequence is a code age sequence generated by
+ // EmitCodeAgeSequence.
+ static bool IsCodeAgeSequence(byte* sequence);
+#endif
+
+ // Jumps to found label if a prototype map has dictionary elements.
+ void JumpIfDictionaryInPrototypeChain(Register object, Register scratch0,
+ Register scratch1, Label* found);
+
+ private:
+ // Helpers for CopyFields.
+ // These each implement CopyFields in a different way.
+ void CopyFieldsLoopPairsHelper(Register dst, Register src, unsigned count,
+ Register scratch1, Register scratch2,
+ Register scratch3);
+ void CopyFieldsUnrolledPairsHelper(Register dst, Register src, unsigned count,
+ Register scratch1, Register scratch2);
+ void CopyFieldsUnrolledHelper(Register dst, Register src, unsigned count,
+ Register scratch1);
+
+ // The actual Push and Pop implementations. These don't generate any code
+ // other than that required for the push or pop. This allows
+ // (Push|Pop)CPURegList to bundle together run-time assertions for a large
+ // block of registers.
+ //
+ // Note that size is per register, and is specified in bytes.
+ void PushHelper(int count, int size,
+ const CPURegister& src0, const CPURegister& src1,
+ const CPURegister& src2, const CPURegister& src3);
+ void PopHelper(int count, int size,
+ const CPURegister& dst0, const CPURegister& dst1,
+ const CPURegister& dst2, const CPURegister& dst3);
+
+ // Perform necessary maintenance operations before a push or pop.
+ //
+ // Note that size is per register, and is specified in bytes.
+ void PrepareForPush(int count, int size);
+ void PrepareForPop(int count, int size);
+
+ // Call Printf. On a native build, a simple call will be generated, but if the
+ // simulator is being used then a suitable pseudo-instruction is used. The
+ // arguments and stack (csp) must be prepared by the caller as for a normal
+ // AAPCS64 call to 'printf'.
+ //
+ // The 'type' argument specifies the type of the optional arguments.
+ void CallPrintf(CPURegister::RegisterType type = CPURegister::kNoRegister);
+
+ // Helper for throwing exceptions. Compute a handler address and jump to
+ // it. See the implementation for register usage.
+ void JumpToHandlerEntry(Register exception,
+ Register object,
+ Register state,
+ Register scratch1,
+ Register scratch2);
+
+ // Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
+ void InNewSpace(Register object,
+ Condition cond, // eq for new space, ne otherwise.
+ Label* branch);
+
+ // Try to convert a double to an int so that integer fast-paths may be
+ // used. Not every valid integer value is guaranteed to be caught.
+ // It supports both 32-bit and 64-bit integers depending whether 'as_int'
+ // is a W or X register.
+ //
+ // This does not distinguish between +0 and -0, so if this distinction is
+ // important it must be checked separately.
+ void TryConvertDoubleToInt(Register as_int,
+ FPRegister value,
+ FPRegister scratch_d,
+ Label* on_successful_conversion,
+ Label* on_failed_conversion = NULL);
+
+ bool generating_stub_;
+#if DEBUG
+ // Tell whether any of the macro instruction can be used. When false the
+ // MacroAssembler will assert if a method which can emit a variable number
+ // of instructions is called.
+ bool allow_macro_instructions_;
+#endif
+ bool has_frame_;
+
+ // The Abort method should call a V8 runtime function, but the CallRuntime
+ // mechanism depends on CEntryStub. If use_real_aborts is false, Abort will
+ // use a simpler abort mechanism that doesn't depend on CEntryStub.
+ //
+ // The purpose of this is to allow Aborts to be compiled whilst CEntryStub is
+ // being generated.
+ bool use_real_aborts_;
+
+ // This handle will be patched with the code object on installation.
+ Handle<Object> code_object_;
+
+ // The register to use as a stack pointer for stack operations.
+ Register sp_;
+
+ // Scratch registers used internally by the MacroAssembler.
+ Register tmp0_;
+ Register tmp1_;
+ FPRegister fptmp0_;
+
+ void InitializeNewString(Register string,
+ Register length,
+ Heap::RootListIndex map_index,
+ Register scratch1,
+ Register scratch2);
+};
+
+
+// Use this scope when you need a one-to-one mapping bewteen methods and
+// instructions. This scope prevents the MacroAssembler from being called and
+// literal pools from being emitted. It also asserts the number of instructions
+// emitted is what you specified when creating the scope.
+class InstructionAccurateScope BASE_EMBEDDED {
+ public:
+ explicit InstructionAccurateScope(MacroAssembler* masm)
+ : masm_(masm), size_(0) {
+ masm_->StartBlockConstPool();
+#ifdef DEBUG
+ previous_allow_macro_instructions_ = masm_->allow_macro_instructions();
+ masm_->set_allow_macro_instructions(false);
+#endif
+ }
+
+ InstructionAccurateScope(MacroAssembler* masm, size_t count)
+ : masm_(masm), size_(count * kInstructionSize) {
+ masm_->StartBlockConstPool();
+#ifdef DEBUG
+ masm_->bind(&start_);
+ previous_allow_macro_instructions_ = masm_->allow_macro_instructions();
+ masm_->set_allow_macro_instructions(false);
+#endif
+ }
+
+ ~InstructionAccurateScope() {
+ masm_->EndBlockConstPool();
+#ifdef DEBUG
+ if (start_.is_bound()) {
+ ASSERT(masm_->SizeOfCodeGeneratedSince(&start_) == size_);
+ }
+ masm_->set_allow_macro_instructions(previous_allow_macro_instructions_);
+#endif
+ }
+
+ private:
+ MacroAssembler* masm_;
+ size_t size_;
+#ifdef DEBUG
+ Label start_;
+ bool previous_allow_macro_instructions_;
+#endif
+};
+
+
+inline MemOperand ContextMemOperand(Register context, int index) {
+ return MemOperand(context, Context::SlotOffset(index));
+}
+
+inline MemOperand GlobalObjectMemOperand() {
+ return ContextMemOperand(cp, Context::GLOBAL_OBJECT_INDEX);
+}
+
+
+// Encode and decode information about patchable inline SMI checks.
+class InlineSmiCheckInfo {
+ public:
+ explicit InlineSmiCheckInfo(Address info);
+
+ bool HasSmiCheck() const {
+ return smi_check_ != NULL;
+ }
+
+ const Register& SmiRegister() const {
+ return reg_;
+ }
+
+ Instruction* SmiCheck() const {
+ return smi_check_;
+ }
+
+ // Use MacroAssembler::InlineData to emit information about patchable inline
+ // SMI checks. The caller may specify 'reg' as NoReg and an unbound 'site' to
+ // indicate that there is no inline SMI check. Note that 'reg' cannot be csp.
+ //
+ // The generated patch information can be read using the InlineSMICheckInfo
+ // class.
+ static void Emit(MacroAssembler* masm, const Register& reg,
+ const Label* smi_check);
+
+ // Emit information to indicate that there is no inline SMI check.
+ static void EmitNotInlined(MacroAssembler* masm) {
+ Label unbound;
+ Emit(masm, NoReg, &unbound);
+ }
+
+ private:
+ Register reg_;
+ Instruction* smi_check_;
+
+ // Fields in the data encoded by InlineData.
+
+ // A width of 5 (Rd_width) for the SMI register preclues the use of csp,
+ // since kSPRegInternalCode is 63. However, csp should never hold a SMI or be
+ // used in a patchable check. The Emit() method checks this.
+ //
+ // Note that the total size of the fields is restricted by the underlying
+ // storage size handled by the BitField class, which is a uint32_t.
+ class RegisterBits : public BitField<unsigned, 0, 5> {};
+ class DeltaBits : public BitField<uint32_t, 5, 32-5> {};
+};
+
+} } // namespace v8::internal
+
+#ifdef GENERATED_CODE_COVERAGE
+#error "Unsupported option"
+#define CODE_COVERAGE_STRINGIFY(x) #x
+#define CODE_COVERAGE_TOSTRING(x) CODE_COVERAGE_STRINGIFY(x)
+#define __FILE_LINE__ __FILE__ ":" CODE_COVERAGE_TOSTRING(__LINE__)
+#define ACCESS_MASM(masm) masm->stop(__FILE_LINE__); masm->
+#else
+#define ACCESS_MASM(masm) masm->
+#endif
+
+#endif // V8_A64_MACRO_ASSEMBLER_A64_H_
--- /dev/null
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#if V8_TARGET_ARCH_A64
+
+#include "cpu-profiler.h"
+#include "unicode.h"
+#include "log.h"
+#include "code-stubs.h"
+#include "regexp-stack.h"
+#include "macro-assembler.h"
+#include "regexp-macro-assembler.h"
+#include "a64/regexp-macro-assembler-a64.h"
+
+namespace v8 {
+namespace internal {
+
+#ifndef V8_INTERPRETED_REGEXP
+/*
+ * This assembler uses the following register assignment convention:
+ * - w19 : Used to temporarely store a value before a call to C code.
+ * See CheckNotBackReferenceIgnoreCase.
+ * - x20 : Pointer to the current code object (Code*),
+ * it includes the heap object tag.
+ * - w21 : Current position in input, as negative offset from
+ * the end of the string. Please notice that this is
+ * the byte offset, not the character offset!
+ * - w22 : Currently loaded character. Must be loaded using
+ * LoadCurrentCharacter before using any of the dispatch methods.
+ * - x23 : Points to tip of backtrack stack.
+ * - w24 : Position of the first character minus one: non_position_value.
+ * Used to initialize capture registers.
+ * - x25 : Address at the end of the input string: input_end.
+ * Points to byte after last character in input.
+ * - x26 : Address at the start of the input string: input_start.
+ * - w27 : Where to start in the input string.
+ * - x28 : Output array pointer.
+ * - x29/fp : Frame pointer. Used to access arguments, local variables and
+ * RegExp registers.
+ * - x16/x17 : IP registers, used by assembler. Very volatile.
+ * - csp : Points to tip of C stack.
+ *
+ * - x0-x7 : Used as a cache to store 32 bit capture registers. These
+ * registers need to be retained every time a call to C code
+ * is done.
+ *
+ * The remaining registers are free for computations.
+ * Each call to a public method should retain this convention.
+ *
+ * The stack will have the following structure:
+ *
+ * Location Name Description
+ * (as referred to in
+ * the code)
+ *
+ * - fp[104] isolate Address of the current isolate.
+ * - fp[96] return_address Secondary link/return address
+ * used by an exit frame if this is a
+ * native call.
+ * ^^^ csp when called ^^^
+ * - fp[88] lr Return from the RegExp code.
+ * - fp[80] r29 Old frame pointer (CalleeSaved).
+ * - fp[0..72] r19-r28 Backup of CalleeSaved registers.
+ * - fp[-8] direct_call 1 => Direct call from JavaScript code.
+ * 0 => Call through the runtime system.
+ * - fp[-16] stack_base High end of the memory area to use as
+ * the backtracking stack.
+ * - fp[-24] output_size Output may fit multiple sets of matches.
+ * - fp[-32] input Handle containing the input string.
+ * - fp[-40] success_counter
+ * ^^^^^^^^^^^^^ From here and downwards we store 32 bit values ^^^^^^^^^^^^^
+ * - fp[-44] register N Capture registers initialized with
+ * - fp[-48] register N + 1 non_position_value.
+ * ... The first kNumCachedRegisters (N) registers
+ * ... are cached in x0 to x7.
+ * ... Only positions must be stored in the first
+ * - ... num_saved_registers_ registers.
+ * - ...
+ * - register N + num_registers - 1
+ * ^^^^^^^^^ csp ^^^^^^^^^
+ *
+ * The first num_saved_registers_ registers are initialized to point to
+ * "character -1" in the string (i.e., char_size() bytes before the first
+ * character of the string). The remaining registers start out as garbage.
+ *
+ * The data up to the return address must be placed there by the calling
+ * code and the remaining arguments are passed in registers, e.g. by calling the
+ * code entry as cast to a function with the signature:
+ * int (*match)(String* input,
+ * int start_offset,
+ * Address input_start,
+ * Address input_end,
+ * int* output,
+ * int output_size,
+ * Address stack_base,
+ * bool direct_call = false,
+ * Address secondary_return_address, // Only used by native call.
+ * Isolate* isolate)
+ * The call is performed by NativeRegExpMacroAssembler::Execute()
+ * (in regexp-macro-assembler.cc) via the CALL_GENERATED_REGEXP_CODE macro
+ * in a64/simulator-a64.h.
+ * When calling as a non-direct call (i.e., from C++ code), the return address
+ * area is overwritten with the LR register by the RegExp code. When doing a
+ * direct call from generated code, the return address is placed there by
+ * the calling code, as in a normal exit frame.
+ */
+
+#define __ ACCESS_MASM(masm_)
+
+RegExpMacroAssemblerA64::RegExpMacroAssemblerA64(
+ Mode mode,
+ int registers_to_save,
+ Zone* zone)
+ : NativeRegExpMacroAssembler(zone),
+ masm_(new MacroAssembler(zone->isolate(), NULL, kRegExpCodeSize)),
+ mode_(mode),
+ num_registers_(registers_to_save),
+ num_saved_registers_(registers_to_save),
+ entry_label_(),
+ start_label_(),
+ success_label_(),
+ backtrack_label_(),
+ exit_label_() {
+ __ SetStackPointer(csp);
+ ASSERT_EQ(0, registers_to_save % 2);
+ // We can cache at most 16 W registers in x0-x7.
+ STATIC_ASSERT(kNumCachedRegisters <= 16);
+ STATIC_ASSERT((kNumCachedRegisters % 2) == 0);
+ __ B(&entry_label_); // We'll write the entry code later.
+ __ Bind(&start_label_); // And then continue from here.
+}
+
+
+RegExpMacroAssemblerA64::~RegExpMacroAssemblerA64() {
+ delete masm_;
+ // Unuse labels in case we throw away the assembler without calling GetCode.
+ entry_label_.Unuse();
+ start_label_.Unuse();
+ success_label_.Unuse();
+ backtrack_label_.Unuse();
+ exit_label_.Unuse();
+ check_preempt_label_.Unuse();
+ stack_overflow_label_.Unuse();
+}
+
+int RegExpMacroAssemblerA64::stack_limit_slack() {
+ return RegExpStack::kStackLimitSlack;
+}
+
+
+void RegExpMacroAssemblerA64::AdvanceCurrentPosition(int by) {
+ if (by != 0) {
+ __ Add(current_input_offset(),
+ current_input_offset(), by * char_size());
+ }
+}
+
+
+void RegExpMacroAssemblerA64::AdvanceRegister(int reg, int by) {
+ ASSERT((reg >= 0) && (reg < num_registers_));
+ if (by != 0) {
+ Register to_advance;
+ RegisterState register_state = GetRegisterState(reg);
+ switch (register_state) {
+ case STACKED:
+ __ Ldr(w10, register_location(reg));
+ __ Add(w10, w10, by);
+ __ Str(w10, register_location(reg));
+ break;
+ case CACHED_LSW:
+ to_advance = GetCachedRegister(reg);
+ __ Add(to_advance, to_advance, by);
+ break;
+ case CACHED_MSW:
+ to_advance = GetCachedRegister(reg);
+ __ Add(to_advance, to_advance, static_cast<int64_t>(by) << kWRegSize);
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ }
+}
+
+
+void RegExpMacroAssemblerA64::Backtrack() {
+ CheckPreemption();
+ Pop(w10);
+ __ Add(x10, code_pointer(), Operand(w10, UXTW));
+ __ Br(x10);
+}
+
+
+void RegExpMacroAssemblerA64::Bind(Label* label) {
+ __ Bind(label);
+}
+
+
+void RegExpMacroAssemblerA64::CheckCharacter(uint32_t c, Label* on_equal) {
+ CompareAndBranchOrBacktrack(current_character(), c, eq, on_equal);
+}
+
+
+void RegExpMacroAssemblerA64::CheckCharacterGT(uc16 limit, Label* on_greater) {
+ CompareAndBranchOrBacktrack(current_character(), limit, hi, on_greater);
+}
+
+
+void RegExpMacroAssemblerA64::CheckAtStart(Label* on_at_start) {
+ Label not_at_start;
+ // Did we start the match at the start of the input string?
+ CompareAndBranchOrBacktrack(start_offset(), 0, ne, ¬_at_start);
+ // If we did, are we still at the start of the input string?
+ __ Add(x10, input_end(), Operand(current_input_offset(), SXTW));
+ __ Cmp(x10, input_start());
+ BranchOrBacktrack(eq, on_at_start);
+ __ Bind(¬_at_start);
+}
+
+
+void RegExpMacroAssemblerA64::CheckNotAtStart(Label* on_not_at_start) {
+ // Did we start the match at the start of the input string?
+ CompareAndBranchOrBacktrack(start_offset(), 0, ne, on_not_at_start);
+ // If we did, are we still at the start of the input string?
+ __ Add(x10, input_end(), Operand(current_input_offset(), SXTW));
+ __ Cmp(x10, input_start());
+ BranchOrBacktrack(ne, on_not_at_start);
+}
+
+
+void RegExpMacroAssemblerA64::CheckCharacterLT(uc16 limit, Label* on_less) {
+ CompareAndBranchOrBacktrack(current_character(), limit, lo, on_less);
+}
+
+
+void RegExpMacroAssemblerA64::CheckCharacters(Vector<const uc16> str,
+ int cp_offset,
+ Label* on_failure,
+ bool check_end_of_string) {
+ // This method is only ever called from the cctests.
+
+ if (check_end_of_string) {
+ // Is last character of required match inside string.
+ CheckPosition(cp_offset + str.length() - 1, on_failure);
+ }
+
+ Register characters_address = x11;
+
+ __ Add(characters_address,
+ input_end(),
+ Operand(current_input_offset(), SXTW));
+ if (cp_offset != 0) {
+ __ Add(characters_address, characters_address, cp_offset * char_size());
+ }
+
+ for (int i = 0; i < str.length(); i++) {
+ if (mode_ == ASCII) {
+ __ Ldrb(w10, MemOperand(characters_address, 1, PostIndex));
+ ASSERT(str[i] <= String::kMaxOneByteCharCode);
+ } else {
+ __ Ldrh(w10, MemOperand(characters_address, 2, PostIndex));
+ }
+ CompareAndBranchOrBacktrack(w10, str[i], ne, on_failure);
+ }
+}
+
+
+void RegExpMacroAssemblerA64::CheckGreedyLoop(Label* on_equal) {
+ __ Ldr(w10, MemOperand(backtrack_stackpointer()));
+ __ Cmp(current_input_offset(), w10);
+ __ Cset(x11, eq);
+ __ Add(backtrack_stackpointer(),
+ backtrack_stackpointer(), Operand(x11, LSL, kWRegSizeInBytesLog2));
+ BranchOrBacktrack(eq, on_equal);
+}
+
+void RegExpMacroAssemblerA64::CheckNotBackReferenceIgnoreCase(
+ int start_reg,
+ Label* on_no_match) {
+ Label fallthrough;
+
+ Register capture_start_offset = w10;
+ // Save the capture length in a callee-saved register so it will
+ // be preserved if we call a C helper.
+ Register capture_length = w19;
+ ASSERT(kCalleeSaved.IncludesAliasOf(capture_length));
+
+ // Find length of back-referenced capture.
+ ASSERT((start_reg % 2) == 0);
+ if (start_reg < kNumCachedRegisters) {
+ __ Mov(capture_start_offset.X(), GetCachedRegister(start_reg));
+ __ Lsr(x11, GetCachedRegister(start_reg), kWRegSize);
+ } else {
+ __ Ldp(w11, capture_start_offset, capture_location(start_reg, x10));
+ }
+ __ Sub(capture_length, w11, capture_start_offset); // Length to check.
+ // Succeed on empty capture (including no capture).
+ __ Cbz(capture_length, &fallthrough);
+
+ // Check that there are enough characters left in the input.
+ __ Cmn(capture_length, current_input_offset());
+ BranchOrBacktrack(gt, on_no_match);
+
+ if (mode_ == ASCII) {
+ Label success;
+ Label fail;
+ Label loop_check;
+
+ Register capture_start_address = x12;
+ Register capture_end_addresss = x13;
+ Register current_position_address = x14;
+
+ __ Add(capture_start_address,
+ input_end(),
+ Operand(capture_start_offset, SXTW));
+ __ Add(capture_end_addresss,
+ capture_start_address,
+ Operand(capture_length, SXTW));
+ __ Add(current_position_address,
+ input_end(),
+ Operand(current_input_offset(), SXTW));
+
+ Label loop;
+ __ Bind(&loop);
+ __ Ldrb(w10, MemOperand(capture_start_address, 1, PostIndex));
+ __ Ldrb(w11, MemOperand(current_position_address, 1, PostIndex));
+ __ Cmp(w10, w11);
+ __ B(eq, &loop_check);
+
+ // Mismatch, try case-insensitive match (converting letters to lower-case).
+ __ Orr(w10, w10, 0x20); // Convert capture character to lower-case.
+ __ Orr(w11, w11, 0x20); // Also convert input character.
+ __ Cmp(w11, w10);
+ __ B(ne, &fail);
+ __ Sub(w10, w10, 'a');
+ __ Cmp(w10, 'z' - 'a'); // Is w10 a lowercase letter?
+ __ B(ls, &loop_check); // In range 'a'-'z'.
+ // Latin-1: Check for values in range [224,254] but not 247.
+ __ Sub(w10, w10, 224 - 'a');
+ // TODO(jbramley): Use Ccmp here.
+ __ Cmp(w10, 254 - 224);
+ __ B(hi, &fail); // Weren't Latin-1 letters.
+ __ Cmp(w10, 247 - 224); // Check for 247.
+ __ B(eq, &fail);
+
+ __ Bind(&loop_check);
+ __ Cmp(capture_start_address, capture_end_addresss);
+ __ B(lt, &loop);
+ __ B(&success);
+
+ __ Bind(&fail);
+ BranchOrBacktrack(al, on_no_match);
+
+ __ Bind(&success);
+ // Compute new value of character position after the matched part.
+ __ Sub(current_input_offset().X(), current_position_address, input_end());
+ if (masm_->emit_debug_code()) {
+ __ Cmp(current_input_offset().X(), Operand(current_input_offset(), SXTW));
+ __ Ccmp(current_input_offset(), 0, NoFlag, eq);
+ // The current input offset should be <= 0, and fit in a W register.
+ __ Check(le, kOffsetOutOfRange);
+ }
+ } else {
+ ASSERT(mode_ == UC16);
+ int argument_count = 4;
+
+ // The cached registers need to be retained.
+ CPURegList cached_registers(CPURegister::kRegister, kXRegSize, 0, 7);
+ ASSERT((cached_registers.Count() * 2) == kNumCachedRegisters);
+ __ PushCPURegList(cached_registers);
+
+ // Put arguments into arguments registers.
+ // Parameters are
+ // x0: Address byte_offset1 - Address captured substring's start.
+ // x1: Address byte_offset2 - Address of current character position.
+ // w2: size_t byte_length - length of capture in bytes(!)
+ // x3: Isolate* isolate
+
+ // Address of start of capture.
+ __ Add(x0, input_end(), Operand(capture_start_offset, SXTW));
+ // Length of capture.
+ __ Mov(w2, capture_length);
+ // Address of current input position.
+ __ Add(x1, input_end(), Operand(current_input_offset(), SXTW));
+ // Isolate.
+ __ Mov(x3, Operand(ExternalReference::isolate_address(isolate())));
+
+ {
+ AllowExternalCallThatCantCauseGC scope(masm_);
+ ExternalReference function =
+ ExternalReference::re_case_insensitive_compare_uc16(isolate());
+ __ CallCFunction(function, argument_count);
+ }
+
+ // Check if function returned non-zero for success or zero for failure.
+ CompareAndBranchOrBacktrack(x0, 0, eq, on_no_match);
+ // On success, increment position by length of capture.
+ __ Add(current_input_offset(), current_input_offset(), capture_length);
+ // Reset the cached registers.
+ __ PopCPURegList(cached_registers);
+ }
+
+ __ Bind(&fallthrough);
+}
+
+void RegExpMacroAssemblerA64::CheckNotBackReference(
+ int start_reg,
+ Label* on_no_match) {
+ Label fallthrough;
+
+ Register capture_start_address = x12;
+ Register capture_end_address = x13;
+ Register current_position_address = x14;
+ Register capture_length = w15;
+
+ // Find length of back-referenced capture.
+ ASSERT((start_reg % 2) == 0);
+ if (start_reg < kNumCachedRegisters) {
+ __ Mov(x10, GetCachedRegister(start_reg));
+ __ Lsr(x11, GetCachedRegister(start_reg), kWRegSize);
+ } else {
+ __ Ldp(w11, w10, capture_location(start_reg, x10));
+ }
+ __ Sub(capture_length, w11, w10); // Length to check.
+ // Succeed on empty capture (including no capture).
+ __ Cbz(capture_length, &fallthrough);
+
+ // Check that there are enough characters left in the input.
+ __ Cmn(capture_length, current_input_offset());
+ BranchOrBacktrack(gt, on_no_match);
+
+ // Compute pointers to match string and capture string
+ __ Add(capture_start_address, input_end(), Operand(w10, SXTW));
+ __ Add(capture_end_address,
+ capture_start_address,
+ Operand(capture_length, SXTW));
+ __ Add(current_position_address,
+ input_end(),
+ Operand(current_input_offset(), SXTW));
+
+ Label loop;
+ __ Bind(&loop);
+ if (mode_ == ASCII) {
+ __ Ldrb(w10, MemOperand(capture_start_address, 1, PostIndex));
+ __ Ldrb(w11, MemOperand(current_position_address, 1, PostIndex));
+ } else {
+ ASSERT(mode_ == UC16);
+ __ Ldrh(w10, MemOperand(capture_start_address, 2, PostIndex));
+ __ Ldrh(w11, MemOperand(current_position_address, 2, PostIndex));
+ }
+ __ Cmp(w10, w11);
+ BranchOrBacktrack(ne, on_no_match);
+ __ Cmp(capture_start_address, capture_end_address);
+ __ B(lt, &loop);
+
+ // Move current character position to position after match.
+ __ Sub(current_input_offset().X(), current_position_address, input_end());
+ if (masm_->emit_debug_code()) {
+ __ Cmp(current_input_offset().X(), Operand(current_input_offset(), SXTW));
+ __ Ccmp(current_input_offset(), 0, NoFlag, eq);
+ // The current input offset should be <= 0, and fit in a W register.
+ __ Check(le, kOffsetOutOfRange);
+ }
+ __ Bind(&fallthrough);
+}
+
+
+void RegExpMacroAssemblerA64::CheckNotCharacter(unsigned c,
+ Label* on_not_equal) {
+ CompareAndBranchOrBacktrack(current_character(), c, ne, on_not_equal);
+}
+
+
+void RegExpMacroAssemblerA64::CheckCharacterAfterAnd(uint32_t c,
+ uint32_t mask,
+ Label* on_equal) {
+ __ And(w10, current_character(), mask);
+ CompareAndBranchOrBacktrack(w10, c, eq, on_equal);
+}
+
+
+void RegExpMacroAssemblerA64::CheckNotCharacterAfterAnd(unsigned c,
+ unsigned mask,
+ Label* on_not_equal) {
+ __ And(w10, current_character(), mask);
+ CompareAndBranchOrBacktrack(w10, c, ne, on_not_equal);
+}
+
+
+void RegExpMacroAssemblerA64::CheckNotCharacterAfterMinusAnd(
+ uc16 c,
+ uc16 minus,
+ uc16 mask,
+ Label* on_not_equal) {
+ ASSERT(minus < String::kMaxUtf16CodeUnit);
+ __ Sub(w10, current_character(), minus);
+ __ And(w10, w10, mask);
+ CompareAndBranchOrBacktrack(w10, c, ne, on_not_equal);
+}
+
+
+void RegExpMacroAssemblerA64::CheckCharacterInRange(
+ uc16 from,
+ uc16 to,
+ Label* on_in_range) {
+ __ Sub(w10, current_character(), from);
+ // Unsigned lower-or-same condition.
+ CompareAndBranchOrBacktrack(w10, to - from, ls, on_in_range);
+}
+
+
+void RegExpMacroAssemblerA64::CheckCharacterNotInRange(
+ uc16 from,
+ uc16 to,
+ Label* on_not_in_range) {
+ __ Sub(w10, current_character(), from);
+ // Unsigned higher condition.
+ CompareAndBranchOrBacktrack(w10, to - from, hi, on_not_in_range);
+}
+
+
+void RegExpMacroAssemblerA64::CheckBitInTable(
+ Handle<ByteArray> table,
+ Label* on_bit_set) {
+ __ Mov(x11, Operand(table));
+ if ((mode_ != ASCII) || (kTableMask != String::kMaxOneByteCharCode)) {
+ __ And(w10, current_character(), kTableMask);
+ __ Add(w10, w10, ByteArray::kHeaderSize - kHeapObjectTag);
+ } else {
+ __ Add(w10, current_character(), ByteArray::kHeaderSize - kHeapObjectTag);
+ }
+ __ Ldrb(w11, MemOperand(x11, w10, UXTW));
+ CompareAndBranchOrBacktrack(w11, 0, ne, on_bit_set);
+}
+
+
+bool RegExpMacroAssemblerA64::CheckSpecialCharacterClass(uc16 type,
+ Label* on_no_match) {
+ // Range checks (c in min..max) are generally implemented by an unsigned
+ // (c - min) <= (max - min) check
+ switch (type) {
+ case 's':
+ // Match space-characters
+ if (mode_ == ASCII) {
+ // One byte space characters are '\t'..'\r', ' ' and \u00a0.
+ Label success;
+ // Check for ' ' or 0x00a0.
+ __ Cmp(current_character(), ' ');
+ __ Ccmp(current_character(), 0x00a0, ZFlag, ne);
+ __ B(eq, &success);
+ // Check range 0x09..0x0d.
+ __ Sub(w10, current_character(), '\t');
+ CompareAndBranchOrBacktrack(w10, '\r' - '\t', hi, on_no_match);
+ __ Bind(&success);
+ return true;
+ }
+ return false;
+ case 'S':
+ // The emitted code for generic character classes is good enough.
+ return false;
+ case 'd':
+ // Match ASCII digits ('0'..'9').
+ __ Sub(w10, current_character(), '0');
+ CompareAndBranchOrBacktrack(w10, '9' - '0', hi, on_no_match);
+ return true;
+ case 'D':
+ // Match ASCII non-digits.
+ __ Sub(w10, current_character(), '0');
+ CompareAndBranchOrBacktrack(w10, '9' - '0', ls, on_no_match);
+ return true;
+ case '.': {
+ // Match non-newlines (not 0x0a('\n'), 0x0d('\r'), 0x2028 and 0x2029)
+ // Here we emit the conditional branch only once at the end to make branch
+ // prediction more efficient, even though we could branch out of here
+ // as soon as a character matches.
+ __ Cmp(current_character(), 0x0a);
+ __ Ccmp(current_character(), 0x0d, ZFlag, ne);
+ if (mode_ == UC16) {
+ __ Sub(w10, current_character(), 0x2028);
+ // If the Z flag was set we clear the flags to force a branch.
+ __ Ccmp(w10, 0x2029 - 0x2028, NoFlag, ne);
+ // ls -> !((C==1) && (Z==0))
+ BranchOrBacktrack(ls, on_no_match);
+ } else {
+ BranchOrBacktrack(eq, on_no_match);
+ }
+ return true;
+ }
+ case 'n': {
+ // Match newlines (0x0a('\n'), 0x0d('\r'), 0x2028 and 0x2029)
+ // We have to check all 4 newline characters before emitting
+ // the conditional branch.
+ __ Cmp(current_character(), 0x0a);
+ __ Ccmp(current_character(), 0x0d, ZFlag, ne);
+ if (mode_ == UC16) {
+ __ Sub(w10, current_character(), 0x2028);
+ // If the Z flag was set we clear the flags to force a fall-through.
+ __ Ccmp(w10, 0x2029 - 0x2028, NoFlag, ne);
+ // hi -> (C==1) && (Z==0)
+ BranchOrBacktrack(hi, on_no_match);
+ } else {
+ BranchOrBacktrack(ne, on_no_match);
+ }
+ return true;
+ }
+ case 'w': {
+ if (mode_ != ASCII) {
+ // Table is 128 entries, so all ASCII characters can be tested.
+ CompareAndBranchOrBacktrack(current_character(), 'z', hi, on_no_match);
+ }
+ ExternalReference map = ExternalReference::re_word_character_map();
+ __ Mov(x10, Operand(map));
+ __ Ldrb(w10, MemOperand(x10, current_character(), UXTW));
+ CompareAndBranchOrBacktrack(w10, 0, eq, on_no_match);
+ return true;
+ }
+ case 'W': {
+ Label done;
+ if (mode_ != ASCII) {
+ // Table is 128 entries, so all ASCII characters can be tested.
+ __ Cmp(current_character(), 'z');
+ __ B(hi, &done);
+ }
+ ExternalReference map = ExternalReference::re_word_character_map();
+ __ Mov(x10, Operand(map));
+ __ Ldrb(w10, MemOperand(x10, current_character(), UXTW));
+ CompareAndBranchOrBacktrack(w10, 0, ne, on_no_match);
+ __ Bind(&done);
+ return true;
+ }
+ case '*':
+ // Match any character.
+ return true;
+ // No custom implementation (yet): s(UC16), S(UC16).
+ default:
+ return false;
+ }
+}
+
+
+void RegExpMacroAssemblerA64::Fail() {
+ __ Mov(w0, FAILURE);
+ __ B(&exit_label_);
+}
+
+
+Handle<HeapObject> RegExpMacroAssemblerA64::GetCode(Handle<String> source) {
+ Label return_w0;
+ // Finalize code - write the entry point code now we know how many
+ // registers we need.
+
+ // Entry code:
+ __ Bind(&entry_label_);
+
+ // Arguments on entry:
+ // x0: String* input
+ // x1: int start_offset
+ // x2: byte* input_start
+ // x3: byte* input_end
+ // x4: int* output array
+ // x5: int output array size
+ // x6: Address stack_base
+ // x7: int direct_call
+
+ // The stack pointer should be csp on entry.
+ // csp[8]: address of the current isolate
+ // csp[0]: secondary link/return address used by native call
+
+ // Tell the system that we have a stack frame. Because the type is MANUAL, no
+ // code is generated.
+ FrameScope scope(masm_, StackFrame::MANUAL);
+
+ // Push registers on the stack, only push the argument registers that we need.
+ CPURegList argument_registers(x0, x5, x6, x7);
+
+ CPURegList registers_to_retain = kCalleeSaved;
+ ASSERT(kCalleeSaved.Count() == 11);
+ registers_to_retain.Combine(lr);
+
+ ASSERT(csp.Is(__ StackPointer()));
+ __ PushCPURegList(registers_to_retain);
+ __ PushCPURegList(argument_registers);
+
+ // Set frame pointer in place.
+ __ Add(frame_pointer(), csp, argument_registers.Count() * kPointerSize);
+
+ // Initialize callee-saved registers.
+ __ Mov(start_offset(), w1);
+ __ Mov(input_start(), x2);
+ __ Mov(input_end(), x3);
+ __ Mov(output_array(), x4);
+
+ // Set the number of registers we will need to allocate, that is:
+ // - success_counter (X register)
+ // - (num_registers_ - kNumCachedRegisters) (W registers)
+ int num_wreg_to_allocate = num_registers_ - kNumCachedRegisters;
+ // Do not allocate registers on the stack if they can all be cached.
+ if (num_wreg_to_allocate < 0) { num_wreg_to_allocate = 0; }
+ // Make room for the success_counter.
+ num_wreg_to_allocate += 2;
+
+ // Make sure the stack alignment will be respected.
+ int alignment = masm_->ActivationFrameAlignment();
+ ASSERT_EQ(alignment % 16, 0);
+ int align_mask = (alignment / kWRegSizeInBytes) - 1;
+ num_wreg_to_allocate = (num_wreg_to_allocate + align_mask) & ~align_mask;
+
+ // Check if we have space on the stack.
+ Label stack_limit_hit;
+ Label stack_ok;
+
+ ExternalReference stack_limit =
+ ExternalReference::address_of_stack_limit(isolate());
+ __ Mov(x10, Operand(stack_limit));
+ __ Ldr(x10, MemOperand(x10));
+ __ Subs(x10, csp, x10);
+
+ // Handle it if the stack pointer is already below the stack limit.
+ __ B(ls, &stack_limit_hit);
+
+ // Check if there is room for the variable number of registers above
+ // the stack limit.
+ __ Cmp(x10, num_wreg_to_allocate * kWRegSizeInBytes);
+ __ B(hs, &stack_ok);
+
+ // Exit with OutOfMemory exception. There is not enough space on the stack
+ // for our working registers.
+ __ Mov(w0, EXCEPTION);
+ __ B(&return_w0);
+
+ __ Bind(&stack_limit_hit);
+ CallCheckStackGuardState(x10);
+ // If returned value is non-zero, we exit with the returned value as result.
+ __ Cbnz(w0, &return_w0);
+
+ __ Bind(&stack_ok);
+
+ // Allocate space on stack.
+ __ Claim(num_wreg_to_allocate, kWRegSizeInBytes);
+
+ // Initialize success_counter with 0.
+ __ Str(wzr, MemOperand(frame_pointer(), kSuccessCounter));
+
+ // Find negative length (offset of start relative to end).
+ __ Sub(x10, input_start(), input_end());
+ if (masm_->emit_debug_code()) {
+ // Check that the input string length is < 2^30.
+ __ Neg(x11, x10);
+ __ Cmp(x11, (1<<30) - 1);
+ __ Check(ls, kInputStringTooLong);
+ }
+ __ Mov(current_input_offset(), w10);
+
+ // The non-position value is used as a clearing value for the
+ // capture registers, it corresponds to the position of the first character
+ // minus one.
+ __ Sub(non_position_value(), current_input_offset(), char_size());
+ __ Sub(non_position_value(), non_position_value(),
+ Operand(start_offset(), LSL, (mode_ == UC16) ? 1 : 0));
+ // We can store this value twice in an X register for initializing
+ // on-stack registers later.
+ __ Orr(twice_non_position_value(),
+ non_position_value().X(),
+ Operand(non_position_value().X(), LSL, kWRegSize));
+
+ // Initialize code pointer register.
+ __ Mov(code_pointer(), Operand(masm_->CodeObject()));
+
+ Label load_char_start_regexp, start_regexp;
+ // Load newline if index is at start, previous character otherwise.
+ __ Cbnz(start_offset(), &load_char_start_regexp);
+ __ Mov(current_character(), '\n');
+ __ B(&start_regexp);
+
+ // Global regexp restarts matching here.
+ __ Bind(&load_char_start_regexp);
+ // Load previous char as initial value of current character register.
+ LoadCurrentCharacterUnchecked(-1, 1);
+ __ Bind(&start_regexp);
+ // Initialize on-stack registers.
+ if (num_saved_registers_ > 0) {
+ ClearRegisters(0, num_saved_registers_ - 1);
+ }
+
+ // Initialize backtrack stack pointer.
+ __ Ldr(backtrack_stackpointer(), MemOperand(frame_pointer(), kStackBase));
+
+ // Execute
+ __ B(&start_label_);
+
+ if (backtrack_label_.is_linked()) {
+ __ Bind(&backtrack_label_);
+ Backtrack();
+ }
+
+ if (success_label_.is_linked()) {
+ Register first_capture_start = w15;
+
+ // Save captures when successful.
+ __ Bind(&success_label_);
+
+ if (num_saved_registers_ > 0) {
+ // V8 expects the output to be an int32_t array.
+ Register capture_start = w12;
+ Register capture_end = w13;
+ Register input_length = w14;
+
+ // Copy captures to output.
+
+ // Get string length.
+ __ Sub(x10, input_end(), input_start());
+ if (masm_->emit_debug_code()) {
+ // Check that the input string length is < 2^30.
+ __ Cmp(x10, (1<<30) - 1);
+ __ Check(ls, kInputStringTooLong);
+ }
+ // input_start has a start_offset offset on entry. We need to include
+ // it when computing the length of the whole string.
+ if (mode_ == UC16) {
+ __ Add(input_length, start_offset(), Operand(w10, LSR, 1));
+ } else {
+ __ Add(input_length, start_offset(), w10);
+ }
+
+ // Copy the results to the output array from the cached registers first.
+ for (int i = 0;
+ (i < num_saved_registers_) && (i < kNumCachedRegisters);
+ i += 2) {
+ __ Mov(capture_start.X(), GetCachedRegister(i));
+ __ Lsr(capture_end.X(), capture_start.X(), kWRegSize);
+ if ((i == 0) && global_with_zero_length_check()) {
+ // Keep capture start for the zero-length check later.
+ __ Mov(first_capture_start, capture_start);
+ }
+ // Offsets need to be relative to the start of the string.
+ if (mode_ == UC16) {
+ __ Add(capture_start, input_length, Operand(capture_start, ASR, 1));
+ __ Add(capture_end, input_length, Operand(capture_end, ASR, 1));
+ } else {
+ __ Add(capture_start, input_length, capture_start);
+ __ Add(capture_end, input_length, capture_end);
+ }
+ // The output pointer advances for a possible global match.
+ __ Stp(capture_start,
+ capture_end,
+ MemOperand(output_array(), kPointerSize, PostIndex));
+ }
+
+ // Only carry on if there are more than kNumCachedRegisters capture
+ // registers.
+ int num_registers_left_on_stack =
+ num_saved_registers_ - kNumCachedRegisters;
+ if (num_registers_left_on_stack > 0) {
+ Register base = x10;
+ // There are always an even number of capture registers. A couple of
+ // registers determine one match with two offsets.
+ ASSERT_EQ(0, num_registers_left_on_stack % 2);
+ __ Add(base, frame_pointer(), kFirstCaptureOnStack);
+
+ // We can unroll the loop here, we should not unroll for less than 2
+ // registers.
+ STATIC_ASSERT(kNumRegistersToUnroll > 2);
+ if (num_registers_left_on_stack <= kNumRegistersToUnroll) {
+ for (int i = 0; i < num_registers_left_on_stack / 2; i++) {
+ __ Ldp(capture_end,
+ capture_start,
+ MemOperand(base, -kPointerSize, PostIndex));
+ if ((i == 0) && global_with_zero_length_check()) {
+ // Keep capture start for the zero-length check later.
+ __ Mov(first_capture_start, capture_start);
+ }
+ // Offsets need to be relative to the start of the string.
+ if (mode_ == UC16) {
+ __ Add(capture_start,
+ input_length,
+ Operand(capture_start, ASR, 1));
+ __ Add(capture_end, input_length, Operand(capture_end, ASR, 1));
+ } else {
+ __ Add(capture_start, input_length, capture_start);
+ __ Add(capture_end, input_length, capture_end);
+ }
+ // The output pointer advances for a possible global match.
+ __ Stp(capture_start,
+ capture_end,
+ MemOperand(output_array(), kPointerSize, PostIndex));
+ }
+ } else {
+ Label loop, start;
+ __ Mov(x11, num_registers_left_on_stack);
+
+ __ Ldp(capture_end,
+ capture_start,
+ MemOperand(base, -kPointerSize, PostIndex));
+ if (global_with_zero_length_check()) {
+ __ Mov(first_capture_start, capture_start);
+ }
+ __ B(&start);
+
+ __ Bind(&loop);
+ __ Ldp(capture_end,
+ capture_start,
+ MemOperand(base, -kPointerSize, PostIndex));
+ __ Bind(&start);
+ if (mode_ == UC16) {
+ __ Add(capture_start, input_length, Operand(capture_start, ASR, 1));
+ __ Add(capture_end, input_length, Operand(capture_end, ASR, 1));
+ } else {
+ __ Add(capture_start, input_length, capture_start);
+ __ Add(capture_end, input_length, capture_end);
+ }
+ // The output pointer advances for a possible global match.
+ __ Stp(capture_start,
+ capture_end,
+ MemOperand(output_array(), kPointerSize, PostIndex));
+ __ Sub(x11, x11, 2);
+ __ Cbnz(x11, &loop);
+ }
+ }
+ }
+
+ if (global()) {
+ Register success_counter = w0;
+ Register output_size = x10;
+ // Restart matching if the regular expression is flagged as global.
+
+ // Increment success counter.
+ __ Ldr(success_counter, MemOperand(frame_pointer(), kSuccessCounter));
+ __ Add(success_counter, success_counter, 1);
+ __ Str(success_counter, MemOperand(frame_pointer(), kSuccessCounter));
+
+ // Capture results have been stored, so the number of remaining global
+ // output registers is reduced by the number of stored captures.
+ __ Ldr(output_size, MemOperand(frame_pointer(), kOutputSize));
+ __ Sub(output_size, output_size, num_saved_registers_);
+ // Check whether we have enough room for another set of capture results.
+ __ Cmp(output_size, num_saved_registers_);
+ __ B(lt, &return_w0);
+
+ // The output pointer is already set to the next field in the output
+ // array.
+ // Update output size on the frame before we restart matching.
+ __ Str(output_size, MemOperand(frame_pointer(), kOutputSize));
+
+ if (global_with_zero_length_check()) {
+ // Special case for zero-length matches.
+ __ Cmp(current_input_offset(), first_capture_start);
+ // Not a zero-length match, restart.
+ __ B(ne, &load_char_start_regexp);
+ // Offset from the end is zero if we already reached the end.
+ __ Cbz(current_input_offset(), &return_w0);
+ // Advance current position after a zero-length match.
+ __ Add(current_input_offset(),
+ current_input_offset(),
+ Operand((mode_ == UC16) ? 2 : 1));
+ }
+
+ __ B(&load_char_start_regexp);
+ } else {
+ __ Mov(w0, SUCCESS);
+ }
+ }
+
+ if (exit_label_.is_linked()) {
+ // Exit and return w0
+ __ Bind(&exit_label_);
+ if (global()) {
+ __ Ldr(w0, MemOperand(frame_pointer(), kSuccessCounter));
+ }
+ }
+
+ __ Bind(&return_w0);
+
+ // Set stack pointer back to first register to retain
+ ASSERT(csp.Is(__ StackPointer()));
+ __ Mov(csp, fp);
+
+ // Restore registers.
+ __ PopCPURegList(registers_to_retain);
+
+ __ Ret();
+
+ Label exit_with_exception;
+ // Registers x0 to x7 are used to store the first captures, they need to be
+ // retained over calls to C++ code.
+ CPURegList cached_registers(CPURegister::kRegister, kXRegSize, 0, 7);
+ ASSERT((cached_registers.Count() * 2) == kNumCachedRegisters);
+
+ if (check_preempt_label_.is_linked()) {
+ __ Bind(&check_preempt_label_);
+ SaveLinkRegister();
+ // The cached registers need to be retained.
+ __ PushCPURegList(cached_registers);
+ CallCheckStackGuardState(x10);
+ // Returning from the regexp code restores the stack (csp <- fp)
+ // so we don't need to drop the link register from it before exiting.
+ __ Cbnz(w0, &return_w0);
+ // Reset the cached registers.
+ __ PopCPURegList(cached_registers);
+ RestoreLinkRegister();
+ __ Ret();
+ }
+
+ if (stack_overflow_label_.is_linked()) {
+ __ Bind(&stack_overflow_label_);
+ SaveLinkRegister();
+ // The cached registers need to be retained.
+ __ PushCPURegList(cached_registers);
+ // Call GrowStack(backtrack_stackpointer(), &stack_base)
+ __ Mov(x2, Operand(ExternalReference::isolate_address(isolate())));
+ __ Add(x1, frame_pointer(), kStackBase);
+ __ Mov(x0, backtrack_stackpointer());
+ ExternalReference grow_stack =
+ ExternalReference::re_grow_stack(isolate());
+ __ CallCFunction(grow_stack, 3);
+ // If return NULL, we have failed to grow the stack, and
+ // must exit with a stack-overflow exception.
+ // Returning from the regexp code restores the stack (csp <- fp)
+ // so we don't need to drop the link register from it before exiting.
+ __ Cbz(w0, &exit_with_exception);
+ // Otherwise use return value as new stack pointer.
+ __ Mov(backtrack_stackpointer(), x0);
+ // Reset the cached registers.
+ __ PopCPURegList(cached_registers);
+ RestoreLinkRegister();
+ __ Ret();
+ }
+
+ if (exit_with_exception.is_linked()) {
+ __ Bind(&exit_with_exception);
+ __ Mov(w0, EXCEPTION);
+ __ B(&return_w0);
+ }
+
+ CodeDesc code_desc;
+ masm_->GetCode(&code_desc);
+ Handle<Code> code = isolate()->factory()->NewCode(
+ code_desc, Code::ComputeFlags(Code::REGEXP), masm_->CodeObject());
+ PROFILE(masm_->isolate(), RegExpCodeCreateEvent(*code, *source));
+ return Handle<HeapObject>::cast(code);
+}
+
+
+void RegExpMacroAssemblerA64::GoTo(Label* to) {
+ BranchOrBacktrack(al, to);
+}
+
+void RegExpMacroAssemblerA64::IfRegisterGE(int reg,
+ int comparand,
+ Label* if_ge) {
+ Register to_compare = GetRegister(reg, w10);
+ CompareAndBranchOrBacktrack(to_compare, comparand, ge, if_ge);
+}
+
+
+void RegExpMacroAssemblerA64::IfRegisterLT(int reg,
+ int comparand,
+ Label* if_lt) {
+ Register to_compare = GetRegister(reg, w10);
+ CompareAndBranchOrBacktrack(to_compare, comparand, lt, if_lt);
+}
+
+
+void RegExpMacroAssemblerA64::IfRegisterEqPos(int reg,
+ Label* if_eq) {
+ Register to_compare = GetRegister(reg, w10);
+ __ Cmp(to_compare, current_input_offset());
+ BranchOrBacktrack(eq, if_eq);
+}
+
+RegExpMacroAssembler::IrregexpImplementation
+ RegExpMacroAssemblerA64::Implementation() {
+ return kA64Implementation;
+}
+
+
+void RegExpMacroAssemblerA64::LoadCurrentCharacter(int cp_offset,
+ Label* on_end_of_input,
+ bool check_bounds,
+ int characters) {
+ // TODO(pielan): Make sure long strings are caught before this, and not
+ // just asserted in debug mode.
+ ASSERT(cp_offset >= -1); // ^ and \b can look behind one character.
+ // Be sane! (And ensure that an int32_t can be used to index the string)
+ ASSERT(cp_offset < (1<<30));
+ if (check_bounds) {
+ CheckPosition(cp_offset + characters - 1, on_end_of_input);
+ }
+ LoadCurrentCharacterUnchecked(cp_offset, characters);
+}
+
+
+void RegExpMacroAssemblerA64::PopCurrentPosition() {
+ Pop(current_input_offset());
+}
+
+
+void RegExpMacroAssemblerA64::PopRegister(int register_index) {
+ Pop(w10);
+ StoreRegister(register_index, w10);
+}
+
+
+void RegExpMacroAssemblerA64::PushBacktrack(Label* label) {
+ if (label->is_bound()) {
+ int target = label->pos();
+ __ Mov(w10, target + Code::kHeaderSize - kHeapObjectTag);
+ } else {
+ __ Adr(x10, label);
+ __ Sub(x10, x10, code_pointer());
+ if (masm_->emit_debug_code()) {
+ __ Cmp(x10, kWRegMask);
+ // The code offset has to fit in a W register.
+ __ Check(ls, kOffsetOutOfRange);
+ }
+ }
+ Push(w10);
+ CheckStackLimit();
+}
+
+
+void RegExpMacroAssemblerA64::PushCurrentPosition() {
+ Push(current_input_offset());
+}
+
+
+void RegExpMacroAssemblerA64::PushRegister(int register_index,
+ StackCheckFlag check_stack_limit) {
+ Register to_push = GetRegister(register_index, w10);
+ Push(to_push);
+ if (check_stack_limit) CheckStackLimit();
+}
+
+
+void RegExpMacroAssemblerA64::ReadCurrentPositionFromRegister(int reg) {
+ Register cached_register;
+ RegisterState register_state = GetRegisterState(reg);
+ switch (register_state) {
+ case STACKED:
+ __ Ldr(current_input_offset(), register_location(reg));
+ break;
+ case CACHED_LSW:
+ cached_register = GetCachedRegister(reg);
+ __ Mov(current_input_offset(), cached_register.W());
+ break;
+ case CACHED_MSW:
+ cached_register = GetCachedRegister(reg);
+ __ Lsr(current_input_offset().X(), cached_register, kWRegSize);
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+}
+
+
+void RegExpMacroAssemblerA64::ReadStackPointerFromRegister(int reg) {
+ Register read_from = GetRegister(reg, w10);
+ __ Ldr(x11, MemOperand(frame_pointer(), kStackBase));
+ __ Add(backtrack_stackpointer(), x11, Operand(read_from, SXTW));
+}
+
+
+void RegExpMacroAssemblerA64::SetCurrentPositionFromEnd(int by) {
+ Label after_position;
+ __ Cmp(current_input_offset(), -by * char_size());
+ __ B(ge, &after_position);
+ __ Mov(current_input_offset(), -by * char_size());
+ // On RegExp code entry (where this operation is used), the character before
+ // the current position is expected to be already loaded.
+ // We have advanced the position, so it's safe to read backwards.
+ LoadCurrentCharacterUnchecked(-1, 1);
+ __ Bind(&after_position);
+}
+
+
+void RegExpMacroAssemblerA64::SetRegister(int register_index, int to) {
+ ASSERT(register_index >= num_saved_registers_); // Reserved for positions!
+ Register set_to = wzr;
+ if (to != 0) {
+ set_to = w10;
+ __ Mov(set_to, to);
+ }
+ StoreRegister(register_index, set_to);
+}
+
+
+bool RegExpMacroAssemblerA64::Succeed() {
+ __ B(&success_label_);
+ return global();
+}
+
+
+void RegExpMacroAssemblerA64::WriteCurrentPositionToRegister(int reg,
+ int cp_offset) {
+ Register position = current_input_offset();
+ if (cp_offset != 0) {
+ position = w10;
+ __ Add(position, current_input_offset(), cp_offset * char_size());
+ }
+ StoreRegister(reg, position);
+}
+
+
+void RegExpMacroAssemblerA64::ClearRegisters(int reg_from, int reg_to) {
+ ASSERT(reg_from <= reg_to);
+ int num_registers = reg_to - reg_from + 1;
+
+ // If the first capture register is cached in a hardware register but not
+ // aligned on a 64-bit one, we need to clear the first one specifically.
+ if ((reg_from < kNumCachedRegisters) && ((reg_from % 2) != 0)) {
+ StoreRegister(reg_from, non_position_value());
+ num_registers--;
+ reg_from++;
+ }
+
+ // Clear cached registers in pairs as far as possible.
+ while ((num_registers >= 2) && (reg_from < kNumCachedRegisters)) {
+ ASSERT(GetRegisterState(reg_from) == CACHED_LSW);
+ __ Mov(GetCachedRegister(reg_from), twice_non_position_value());
+ reg_from += 2;
+ num_registers -= 2;
+ }
+
+ if ((num_registers % 2) == 1) {
+ StoreRegister(reg_from, non_position_value());
+ num_registers--;
+ reg_from++;
+ }
+
+ if (num_registers > 0) {
+ // If there are some remaining registers, they are stored on the stack.
+ ASSERT(reg_from >= kNumCachedRegisters);
+
+ // Move down the indexes of the registers on stack to get the correct offset
+ // in memory.
+ reg_from -= kNumCachedRegisters;
+ reg_to -= kNumCachedRegisters;
+ // We should not unroll the loop for less than 2 registers.
+ STATIC_ASSERT(kNumRegistersToUnroll > 2);
+ // We position the base pointer to (reg_from + 1).
+ int base_offset = kFirstRegisterOnStack -
+ kWRegSizeInBytes - (kWRegSizeInBytes * reg_from);
+ if (num_registers > kNumRegistersToUnroll) {
+ Register base = x10;
+ __ Add(base, frame_pointer(), base_offset);
+
+ Label loop;
+ __ Mov(x11, num_registers);
+ __ Bind(&loop);
+ __ Str(twice_non_position_value(),
+ MemOperand(base, -kPointerSize, PostIndex));
+ __ Sub(x11, x11, 2);
+ __ Cbnz(x11, &loop);
+ } else {
+ for (int i = reg_from; i <= reg_to; i += 2) {
+ __ Str(twice_non_position_value(),
+ MemOperand(frame_pointer(), base_offset));
+ base_offset -= kWRegSizeInBytes * 2;
+ }
+ }
+ }
+}
+
+
+void RegExpMacroAssemblerA64::WriteStackPointerToRegister(int reg) {
+ __ Ldr(x10, MemOperand(frame_pointer(), kStackBase));
+ __ Sub(x10, backtrack_stackpointer(), x10);
+ if (masm_->emit_debug_code()) {
+ __ Cmp(x10, Operand(w10, SXTW));
+ // The stack offset needs to fit in a W register.
+ __ Check(eq, kOffsetOutOfRange);
+ }
+ StoreRegister(reg, w10);
+}
+
+
+// Helper function for reading a value out of a stack frame.
+template <typename T>
+static T& frame_entry(Address re_frame, int frame_offset) {
+ return *reinterpret_cast<T*>(re_frame + frame_offset);
+}
+
+
+int RegExpMacroAssemblerA64::CheckStackGuardState(Address* return_address,
+ Code* re_code,
+ Address re_frame,
+ int start_offset,
+ const byte** input_start,
+ const byte** input_end) {
+ Isolate* isolate = frame_entry<Isolate*>(re_frame, kIsolate);
+ if (isolate->stack_guard()->IsStackOverflow()) {
+ isolate->StackOverflow();
+ return EXCEPTION;
+ }
+
+ // If not real stack overflow the stack guard was used to interrupt
+ // execution for another purpose.
+
+ // If this is a direct call from JavaScript retry the RegExp forcing the call
+ // through the runtime system. Currently the direct call cannot handle a GC.
+ if (frame_entry<int>(re_frame, kDirectCall) == 1) {
+ return RETRY;
+ }
+
+ // Prepare for possible GC.
+ HandleScope handles(isolate);
+ Handle<Code> code_handle(re_code);
+
+ Handle<String> subject(frame_entry<String*>(re_frame, kInput));
+
+ // Current string.
+ bool is_ascii = subject->IsOneByteRepresentationUnderneath();
+
+ ASSERT(re_code->instruction_start() <= *return_address);
+ ASSERT(*return_address <=
+ re_code->instruction_start() + re_code->instruction_size());
+
+ MaybeObject* result = Execution::HandleStackGuardInterrupt(isolate);
+
+ if (*code_handle != re_code) { // Return address no longer valid
+ int delta = code_handle->address() - re_code->address();
+ // Overwrite the return address on the stack.
+ *return_address += delta;
+ }
+
+ if (result->IsException()) {
+ return EXCEPTION;
+ }
+
+ Handle<String> subject_tmp = subject;
+ int slice_offset = 0;
+
+ // Extract the underlying string and the slice offset.
+ if (StringShape(*subject_tmp).IsCons()) {
+ subject_tmp = Handle<String>(ConsString::cast(*subject_tmp)->first());
+ } else if (StringShape(*subject_tmp).IsSliced()) {
+ SlicedString* slice = SlicedString::cast(*subject_tmp);
+ subject_tmp = Handle<String>(slice->parent());
+ slice_offset = slice->offset();
+ }
+
+ // String might have changed.
+ if (subject_tmp->IsOneByteRepresentation() != is_ascii) {
+ // If we changed between an ASCII and an UC16 string, the specialized
+ // code cannot be used, and we need to restart regexp matching from
+ // scratch (including, potentially, compiling a new version of the code).
+ return RETRY;
+ }
+
+ // Otherwise, the content of the string might have moved. It must still
+ // be a sequential or external string with the same content.
+ // Update the start and end pointers in the stack frame to the current
+ // location (whether it has actually moved or not).
+ ASSERT(StringShape(*subject_tmp).IsSequential() ||
+ StringShape(*subject_tmp).IsExternal());
+
+ // The original start address of the characters to match.
+ const byte* start_address = *input_start;
+
+ // Find the current start address of the same character at the current string
+ // position.
+ const byte* new_address = StringCharacterPosition(*subject_tmp,
+ start_offset + slice_offset);
+
+ if (start_address != new_address) {
+ // If there is a difference, update the object pointer and start and end
+ // addresses in the RegExp stack frame to match the new value.
+ const byte* end_address = *input_end;
+ int byte_length = static_cast<int>(end_address - start_address);
+ frame_entry<const String*>(re_frame, kInput) = *subject;
+ *input_start = new_address;
+ *input_end = new_address + byte_length;
+ } else if (frame_entry<const String*>(re_frame, kInput) != *subject) {
+ // Subject string might have been a ConsString that underwent
+ // short-circuiting during GC. That will not change start_address but
+ // will change pointer inside the subject handle.
+ frame_entry<const String*>(re_frame, kInput) = *subject;
+ }
+
+ return 0;
+}
+
+
+void RegExpMacroAssemblerA64::CheckPosition(int cp_offset,
+ Label* on_outside_input) {
+ CompareAndBranchOrBacktrack(current_input_offset(),
+ -cp_offset * char_size(),
+ ge,
+ on_outside_input);
+}
+
+
+bool RegExpMacroAssemblerA64::CanReadUnaligned() {
+ // TODO(pielan): See whether or not we should disable unaligned accesses.
+ return !slow_safe();
+}
+
+
+// Private methods:
+
+void RegExpMacroAssemblerA64::CallCheckStackGuardState(Register scratch) {
+ // Allocate space on the stack to store the return address. The
+ // CheckStackGuardState C++ function will override it if the code
+ // moved. Allocate extra space for 2 arguments passed by pointers.
+ // AAPCS64 requires the stack to be 16 byte aligned.
+ int alignment = masm_->ActivationFrameAlignment();
+ ASSERT_EQ(alignment % 16, 0);
+ int align_mask = (alignment / kXRegSizeInBytes) - 1;
+ int xreg_to_claim = (3 + align_mask) & ~align_mask;
+
+ ASSERT(csp.Is(__ StackPointer()));
+ __ Claim(xreg_to_claim);
+
+ // CheckStackGuardState needs the end and start addresses of the input string.
+ __ Poke(input_end(), 2 * kPointerSize);
+ __ Add(x5, csp, 2 * kPointerSize);
+ __ Poke(input_start(), kPointerSize);
+ __ Add(x4, csp, kPointerSize);
+
+ __ Mov(w3, start_offset());
+ // RegExp code frame pointer.
+ __ Mov(x2, frame_pointer());
+ // Code* of self.
+ __ Mov(x1, Operand(masm_->CodeObject()));
+
+ // We need to pass a pointer to the return address as first argument.
+ // The DirectCEntry stub will place the return address on the stack before
+ // calling so the stack pointer will point to it.
+ __ Mov(x0, csp);
+
+ ExternalReference check_stack_guard_state =
+ ExternalReference::re_check_stack_guard_state(isolate());
+ __ Mov(scratch, Operand(check_stack_guard_state));
+ DirectCEntryStub stub;
+ stub.GenerateCall(masm_, scratch);
+
+ // The input string may have been moved in memory, we need to reload it.
+ __ Peek(input_start(), kPointerSize);
+ __ Peek(input_end(), 2 * kPointerSize);
+
+ ASSERT(csp.Is(__ StackPointer()));
+ __ Drop(xreg_to_claim);
+
+ // Reload the Code pointer.
+ __ Mov(code_pointer(), Operand(masm_->CodeObject()));
+}
+
+void RegExpMacroAssemblerA64::BranchOrBacktrack(Condition condition,
+ Label* to) {
+ if (condition == al) { // Unconditional.
+ if (to == NULL) {
+ Backtrack();
+ return;
+ }
+ __ B(to);
+ return;
+ }
+ if (to == NULL) {
+ to = &backtrack_label_;
+ }
+ // TODO(ulan): do direct jump when jump distance is known and fits in imm19.
+ Condition inverted_condition = InvertCondition(condition);
+ Label no_branch;
+ __ B(inverted_condition, &no_branch);
+ __ B(to);
+ __ Bind(&no_branch);
+}
+
+void RegExpMacroAssemblerA64::CompareAndBranchOrBacktrack(Register reg,
+ int immediate,
+ Condition condition,
+ Label* to) {
+ if ((immediate == 0) && ((condition == eq) || (condition == ne))) {
+ if (to == NULL) {
+ to = &backtrack_label_;
+ }
+ // TODO(ulan): do direct jump when jump distance is known and fits in imm19.
+ Label no_branch;
+ if (condition == eq) {
+ __ Cbnz(reg, &no_branch);
+ } else {
+ __ Cbz(reg, &no_branch);
+ }
+ __ B(to);
+ __ Bind(&no_branch);
+ } else {
+ __ Cmp(reg, immediate);
+ BranchOrBacktrack(condition, to);
+ }
+}
+
+
+void RegExpMacroAssemblerA64::CheckPreemption() {
+ // Check for preemption.
+ ExternalReference stack_limit =
+ ExternalReference::address_of_stack_limit(isolate());
+ __ Mov(x10, Operand(stack_limit));
+ __ Ldr(x10, MemOperand(x10));
+ ASSERT(csp.Is(__ StackPointer()));
+ __ Cmp(csp, x10);
+ CallIf(&check_preempt_label_, ls);
+}
+
+
+void RegExpMacroAssemblerA64::CheckStackLimit() {
+ ExternalReference stack_limit =
+ ExternalReference::address_of_regexp_stack_limit(isolate());
+ __ Mov(x10, Operand(stack_limit));
+ __ Ldr(x10, MemOperand(x10));
+ __ Cmp(backtrack_stackpointer(), x10);
+ CallIf(&stack_overflow_label_, ls);
+}
+
+
+void RegExpMacroAssemblerA64::Push(Register source) {
+ ASSERT(source.Is32Bits());
+ ASSERT(!source.is(backtrack_stackpointer()));
+ __ Str(source,
+ MemOperand(backtrack_stackpointer(),
+ -static_cast<int>(kWRegSizeInBytes),
+ PreIndex));
+}
+
+
+void RegExpMacroAssemblerA64::Pop(Register target) {
+ ASSERT(target.Is32Bits());
+ ASSERT(!target.is(backtrack_stackpointer()));
+ __ Ldr(target,
+ MemOperand(backtrack_stackpointer(), kWRegSizeInBytes, PostIndex));
+}
+
+
+Register RegExpMacroAssemblerA64::GetCachedRegister(int register_index) {
+ ASSERT(register_index < kNumCachedRegisters);
+ return Register::Create(register_index / 2, kXRegSize);
+}
+
+
+Register RegExpMacroAssemblerA64::GetRegister(int register_index,
+ Register maybe_result) {
+ ASSERT(maybe_result.Is32Bits());
+ ASSERT(register_index >= 0);
+ if (num_registers_ <= register_index) {
+ num_registers_ = register_index + 1;
+ }
+ Register result;
+ RegisterState register_state = GetRegisterState(register_index);
+ switch (register_state) {
+ case STACKED:
+ __ Ldr(maybe_result, register_location(register_index));
+ result = maybe_result;
+ break;
+ case CACHED_LSW:
+ result = GetCachedRegister(register_index).W();
+ break;
+ case CACHED_MSW:
+ __ Lsr(maybe_result.X(), GetCachedRegister(register_index), kWRegSize);
+ result = maybe_result;
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ ASSERT(result.Is32Bits());
+ return result;
+}
+
+
+void RegExpMacroAssemblerA64::StoreRegister(int register_index,
+ Register source) {
+ ASSERT(source.Is32Bits());
+ ASSERT(register_index >= 0);
+ if (num_registers_ <= register_index) {
+ num_registers_ = register_index + 1;
+ }
+
+ Register cached_register;
+ RegisterState register_state = GetRegisterState(register_index);
+ switch (register_state) {
+ case STACKED:
+ __ Str(source, register_location(register_index));
+ break;
+ case CACHED_LSW:
+ cached_register = GetCachedRegister(register_index);
+ if (!source.Is(cached_register.W())) {
+ __ Bfi(cached_register, source.X(), 0, kWRegSize);
+ }
+ break;
+ case CACHED_MSW:
+ cached_register = GetCachedRegister(register_index);
+ __ Bfi(cached_register, source.X(), kWRegSize, kWRegSize);
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+}
+
+
+void RegExpMacroAssemblerA64::CallIf(Label* to, Condition condition) {
+ Label skip_call;
+ if (condition != al) __ B(&skip_call, InvertCondition(condition));
+ __ Bl(to);
+ __ Bind(&skip_call);
+}
+
+
+void RegExpMacroAssemblerA64::RestoreLinkRegister() {
+ ASSERT(csp.Is(__ StackPointer()));
+ __ Pop(lr, xzr);
+ __ Add(lr, lr, Operand(masm_->CodeObject()));
+}
+
+
+void RegExpMacroAssemblerA64::SaveLinkRegister() {
+ ASSERT(csp.Is(__ StackPointer()));
+ __ Sub(lr, lr, Operand(masm_->CodeObject()));
+ __ Push(xzr, lr);
+}
+
+
+MemOperand RegExpMacroAssemblerA64::register_location(int register_index) {
+ ASSERT(register_index < (1<<30));
+ ASSERT(register_index >= kNumCachedRegisters);
+ if (num_registers_ <= register_index) {
+ num_registers_ = register_index + 1;
+ }
+ register_index -= kNumCachedRegisters;
+ int offset = kFirstRegisterOnStack - register_index * kWRegSizeInBytes;
+ return MemOperand(frame_pointer(), offset);
+}
+
+MemOperand RegExpMacroAssemblerA64::capture_location(int register_index,
+ Register scratch) {
+ ASSERT(register_index < (1<<30));
+ ASSERT(register_index < num_saved_registers_);
+ ASSERT(register_index >= kNumCachedRegisters);
+ ASSERT_EQ(register_index % 2, 0);
+ register_index -= kNumCachedRegisters;
+ int offset = kFirstCaptureOnStack - register_index * kWRegSizeInBytes;
+ // capture_location is used with Stp instructions to load/store 2 registers.
+ // The immediate field in the encoding is limited to 7 bits (signed).
+ if (is_int7(offset)) {
+ return MemOperand(frame_pointer(), offset);
+ } else {
+ __ Add(scratch, frame_pointer(), offset);
+ return MemOperand(scratch);
+ }
+}
+
+void RegExpMacroAssemblerA64::LoadCurrentCharacterUnchecked(int cp_offset,
+ int characters) {
+ Register offset = current_input_offset();
+
+ // The ldr, str, ldrh, strh instructions can do unaligned accesses, if the CPU
+ // and the operating system running on the target allow it.
+ // If unaligned load/stores are not supported then this function must only
+ // be used to load a single character at a time.
+
+ // ARMv8 supports unaligned accesses but V8 or the kernel can decide to
+ // disable it.
+ // TODO(pielan): See whether or not we should disable unaligned accesses.
+ if (!CanReadUnaligned()) {
+ ASSERT(characters == 1);
+ }
+
+ if (cp_offset != 0) {
+ if (masm_->emit_debug_code()) {
+ __ Mov(x10, cp_offset * char_size());
+ __ Add(x10, x10, Operand(current_input_offset(), SXTW));
+ __ Cmp(x10, Operand(w10, SXTW));
+ // The offset needs to fit in a W register.
+ __ Check(eq, kOffsetOutOfRange);
+ } else {
+ __ Add(w10, current_input_offset(), cp_offset * char_size());
+ }
+ offset = w10;
+ }
+
+ if (mode_ == ASCII) {
+ if (characters == 4) {
+ __ Ldr(current_character(), MemOperand(input_end(), offset, SXTW));
+ } else if (characters == 2) {
+ __ Ldrh(current_character(), MemOperand(input_end(), offset, SXTW));
+ } else {
+ ASSERT(characters == 1);
+ __ Ldrb(current_character(), MemOperand(input_end(), offset, SXTW));
+ }
+ } else {
+ ASSERT(mode_ == UC16);
+ if (characters == 2) {
+ __ Ldr(current_character(), MemOperand(input_end(), offset, SXTW));
+ } else {
+ ASSERT(characters == 1);
+ __ Ldrh(current_character(), MemOperand(input_end(), offset, SXTW));
+ }
+ }
+}
+
+#endif // V8_INTERPRETED_REGEXP
+
+}} // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_A64
--- /dev/null
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_A64_REGEXP_MACRO_ASSEMBLER_A64_H_
+#define V8_A64_REGEXP_MACRO_ASSEMBLER_A64_H_
+
+#include "a64/assembler-a64.h"
+#include "a64/assembler-a64-inl.h"
+#include "macro-assembler.h"
+
+namespace v8 {
+namespace internal {
+
+
+#ifndef V8_INTERPRETED_REGEXP
+class RegExpMacroAssemblerA64: public NativeRegExpMacroAssembler {
+ public:
+ RegExpMacroAssemblerA64(Mode mode, int registers_to_save, Zone* zone);
+ virtual ~RegExpMacroAssemblerA64();
+ virtual int stack_limit_slack();
+ virtual void AdvanceCurrentPosition(int by);
+ virtual void AdvanceRegister(int reg, int by);
+ virtual void Backtrack();
+ virtual void Bind(Label* label);
+ virtual void CheckAtStart(Label* on_at_start);
+ virtual void CheckCharacter(unsigned c, Label* on_equal);
+ virtual void CheckCharacterAfterAnd(unsigned c,
+ unsigned mask,
+ Label* on_equal);
+ virtual void CheckCharacterGT(uc16 limit, Label* on_greater);
+ virtual void CheckCharacterLT(uc16 limit, Label* on_less);
+ virtual void CheckCharacters(Vector<const uc16> str,
+ int cp_offset,
+ Label* on_failure,
+ bool check_end_of_string);
+ // A "greedy loop" is a loop that is both greedy and with a simple
+ // body. It has a particularly simple implementation.
+ virtual void CheckGreedyLoop(Label* on_tos_equals_current_position);
+ virtual void CheckNotAtStart(Label* on_not_at_start);
+ virtual void CheckNotBackReference(int start_reg, Label* on_no_match);
+ virtual void CheckNotBackReferenceIgnoreCase(int start_reg,
+ Label* on_no_match);
+ virtual void CheckNotCharacter(unsigned c, Label* on_not_equal);
+ virtual void CheckNotCharacterAfterAnd(unsigned c,
+ unsigned mask,
+ Label* on_not_equal);
+ virtual void CheckNotCharacterAfterMinusAnd(uc16 c,
+ uc16 minus,
+ uc16 mask,
+ Label* on_not_equal);
+ virtual void CheckCharacterInRange(uc16 from,
+ uc16 to,
+ Label* on_in_range);
+ virtual void CheckCharacterNotInRange(uc16 from,
+ uc16 to,
+ Label* on_not_in_range);
+ virtual void CheckBitInTable(Handle<ByteArray> table, Label* on_bit_set);
+
+ // Checks whether the given offset from the current position is before
+ // the end of the string.
+ virtual void CheckPosition(int cp_offset, Label* on_outside_input);
+ virtual bool CheckSpecialCharacterClass(uc16 type,
+ Label* on_no_match);
+ virtual void Fail();
+ virtual Handle<HeapObject> GetCode(Handle<String> source);
+ virtual void GoTo(Label* label);
+ virtual void IfRegisterGE(int reg, int comparand, Label* if_ge);
+ virtual void IfRegisterLT(int reg, int comparand, Label* if_lt);
+ virtual void IfRegisterEqPos(int reg, Label* if_eq);
+ virtual IrregexpImplementation Implementation();
+ virtual void LoadCurrentCharacter(int cp_offset,
+ Label* on_end_of_input,
+ bool check_bounds = true,
+ int characters = 1);
+ virtual void PopCurrentPosition();
+ virtual void PopRegister(int register_index);
+ virtual void PushBacktrack(Label* label);
+ virtual void PushCurrentPosition();
+ virtual void PushRegister(int register_index,
+ StackCheckFlag check_stack_limit);
+ virtual void ReadCurrentPositionFromRegister(int reg);
+ virtual void ReadStackPointerFromRegister(int reg);
+ virtual void SetCurrentPositionFromEnd(int by);
+ virtual void SetRegister(int register_index, int to);
+ virtual bool Succeed();
+ virtual void WriteCurrentPositionToRegister(int reg, int cp_offset);
+ virtual void ClearRegisters(int reg_from, int reg_to);
+ virtual void WriteStackPointerToRegister(int reg);
+ virtual bool CanReadUnaligned();
+
+ // Called from RegExp if the stack-guard is triggered.
+ // If the code object is relocated, the return address is fixed before
+ // returning.
+ static int CheckStackGuardState(Address* return_address,
+ Code* re_code,
+ Address re_frame,
+ int start_offset,
+ const byte** input_start,
+ const byte** input_end);
+
+ private:
+ // Above the frame pointer - Stored registers and stack passed parameters.
+ // Callee-saved registers x19-x29, where x29 is the old frame pointer.
+ static const int kCalleeSavedRegisters = 0;
+ // Return address.
+ // It is placed above the 11 callee-saved registers.
+ static const int kReturnAddress = kCalleeSavedRegisters + 11 * kPointerSize;
+ static const int kSecondaryReturnAddress = kReturnAddress + kPointerSize;
+ // Stack parameter placed by caller.
+ static const int kIsolate = kSecondaryReturnAddress + kPointerSize;
+
+ // Below the frame pointer.
+ // Register parameters stored by setup code.
+ static const int kDirectCall = kCalleeSavedRegisters - kPointerSize;
+ static const int kStackBase = kDirectCall - kPointerSize;
+ static const int kOutputSize = kStackBase - kPointerSize;
+ static const int kInput = kOutputSize - kPointerSize;
+ // When adding local variables remember to push space for them in
+ // the frame in GetCode.
+ static const int kSuccessCounter = kInput - kPointerSize;
+ // First position register address on the stack. Following positions are
+ // below it. A position is a 32 bit value.
+ static const int kFirstRegisterOnStack = kSuccessCounter - kWRegSizeInBytes;
+ // A capture is a 64 bit value holding two position.
+ static const int kFirstCaptureOnStack = kSuccessCounter - kXRegSizeInBytes;
+
+ // Initial size of code buffer.
+ static const size_t kRegExpCodeSize = 1024;
+
+ // When initializing registers to a non-position value we can unroll
+ // the loop. Set the limit of registers to unroll.
+ static const int kNumRegistersToUnroll = 16;
+
+ // We are using x0 to x7 as a register cache. Each hardware register must
+ // contain one capture, that is two 32 bit registers. We can cache at most
+ // 16 registers.
+ static const int kNumCachedRegisters = 16;
+
+ // Load a number of characters at the given offset from the
+ // current position, into the current-character register.
+ void LoadCurrentCharacterUnchecked(int cp_offset, int character_count);
+
+ // Check whether preemption has been requested.
+ void CheckPreemption();
+
+ // Check whether we are exceeding the stack limit on the backtrack stack.
+ void CheckStackLimit();
+
+ // Generate a call to CheckStackGuardState.
+ void CallCheckStackGuardState(Register scratch);
+
+ // Location of a 32 bit position register.
+ MemOperand register_location(int register_index);
+
+ // Location of a 64 bit capture, combining two position registers.
+ MemOperand capture_location(int register_index, Register scratch);
+
+ // Register holding the current input position as negative offset from
+ // the end of the string.
+ Register current_input_offset() { return w21; }
+
+ // The register containing the current character after LoadCurrentCharacter.
+ Register current_character() { return w22; }
+
+ // Register holding address of the end of the input string.
+ Register input_end() { return x25; }
+
+ // Register holding address of the start of the input string.
+ Register input_start() { return x26; }
+
+ // Register holding the offset from the start of the string where we should
+ // start matching.
+ Register start_offset() { return w27; }
+
+ // Pointer to the output array's first element.
+ Register output_array() { return x28; }
+
+ // Register holding the frame address. Local variables, parameters and
+ // regexp registers are addressed relative to this.
+ Register frame_pointer() { return fp; }
+
+ // The register containing the backtrack stack top. Provides a meaningful
+ // name to the register.
+ Register backtrack_stackpointer() { return x23; }
+
+ // Register holding pointer to the current code object.
+ Register code_pointer() { return x20; }
+
+ // Register holding the value used for clearing capture registers.
+ Register non_position_value() { return w24; }
+ // The top 32 bit of this register is used to store this value
+ // twice. This is used for clearing more than one register at a time.
+ Register twice_non_position_value() { return x24; }
+
+ // Byte size of chars in the string to match (decided by the Mode argument)
+ int char_size() { return static_cast<int>(mode_); }
+
+ // Equivalent to a conditional branch to the label, unless the label
+ // is NULL, in which case it is a conditional Backtrack.
+ void BranchOrBacktrack(Condition condition, Label* to);
+
+ // Compares reg against immmediate before calling BranchOrBacktrack.
+ // It makes use of the Cbz and Cbnz instructions.
+ void CompareAndBranchOrBacktrack(Register reg,
+ int immediate,
+ Condition condition,
+ Label* to);
+
+ inline void CallIf(Label* to, Condition condition);
+
+ // Save and restore the link register on the stack in a way that
+ // is GC-safe.
+ inline void SaveLinkRegister();
+ inline void RestoreLinkRegister();
+
+ // Pushes the value of a register on the backtrack stack. Decrements the
+ // stack pointer by a word size and stores the register's value there.
+ inline void Push(Register source);
+
+ // Pops a value from the backtrack stack. Reads the word at the stack pointer
+ // and increments it by a word size.
+ inline void Pop(Register target);
+
+ // This state indicates where the register actually is.
+ enum RegisterState {
+ STACKED, // Resides in memory.
+ CACHED_LSW, // Least Significant Word of a 64 bit hardware register.
+ CACHED_MSW // Most Significant Word of a 64 bit hardware register.
+ };
+
+ RegisterState GetRegisterState(int register_index) {
+ ASSERT(register_index >= 0);
+ if (register_index >= kNumCachedRegisters) {
+ return STACKED;
+ } else {
+ if ((register_index % 2) == 0) {
+ return CACHED_LSW;
+ } else {
+ return CACHED_MSW;
+ }
+ }
+ }
+
+ // Store helper that takes the state of the register into account.
+ inline void StoreRegister(int register_index, Register source);
+
+ // Returns a hardware W register that holds the value of the capture
+ // register.
+ //
+ // This function will try to use an existing cache register (w0-w7) for the
+ // result. Otherwise, it will load the value into maybe_result.
+ //
+ // If the returned register is anything other than maybe_result, calling code
+ // must not write to it.
+ inline Register GetRegister(int register_index, Register maybe_result);
+
+ // Returns the harware register (x0-x7) holding the value of the capture
+ // register.
+ // This assumes that the state of the register is not STACKED.
+ inline Register GetCachedRegister(int register_index);
+
+ Isolate* isolate() const { return masm_->isolate(); }
+
+ MacroAssembler* masm_;
+
+ // Which mode to generate code for (ASCII or UC16).
+ Mode mode_;
+
+ // One greater than maximal register index actually used.
+ int num_registers_;
+
+ // Number of registers to output at the end (the saved registers
+ // are always 0..num_saved_registers_-1)
+ int num_saved_registers_;
+
+ // Labels used internally.
+ Label entry_label_;
+ Label start_label_;
+ Label success_label_;
+ Label backtrack_label_;
+ Label exit_label_;
+ Label check_preempt_label_;
+ Label stack_overflow_label_;
+};
+
+#endif // V8_INTERPRETED_REGEXP
+
+
+}} // namespace v8::internal
+
+#endif // V8_A64_REGEXP_MACRO_ASSEMBLER_A64_H_
--- /dev/null
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <stdlib.h>
+#include <cmath>
+#include <cstdarg>
+#include "v8.h"
+
+#if V8_TARGET_ARCH_A64
+
+#include "disasm.h"
+#include "assembler.h"
+#include "a64/simulator-a64.h"
+#include "macro-assembler.h"
+
+namespace v8 {
+namespace internal {
+
+#if defined(USE_SIMULATOR)
+
+
+// This macro provides a platform independent use of sscanf. The reason for
+// SScanF not being implemented in a platform independent way through
+// ::v8::internal::OS in the same way as SNPrintF is that the
+// Windows C Run-Time Library does not provide vsscanf.
+#define SScanF sscanf // NOLINT
+
+
+// This is basically the same as PrintF, with a guard for FLAG_trace_sim.
+void PRINTF_CHECKING TraceSim(const char* format, ...) {
+ if (FLAG_trace_sim) {
+ va_list arguments;
+ va_start(arguments, format);
+ OS::VPrint(format, arguments);
+ va_end(arguments);
+ }
+}
+
+
+const Instruction* Simulator::kEndOfSimAddress = NULL;
+
+
+void SimSystemRegister::SetBits(int msb, int lsb, uint32_t bits) {
+ int width = msb - lsb + 1;
+ ASSERT(is_uintn(bits, width) || is_intn(bits, width));
+
+ bits <<= lsb;
+ uint32_t mask = ((1 << width) - 1) << lsb;
+ ASSERT((mask & write_ignore_mask_) == 0);
+
+ value_ = (value_ & ~mask) | (bits & mask);
+}
+
+
+SimSystemRegister SimSystemRegister::DefaultValueFor(SystemRegister id) {
+ switch (id) {
+ case NZCV:
+ return SimSystemRegister(0x00000000, NZCVWriteIgnoreMask);
+ case FPCR:
+ return SimSystemRegister(0x00000000, FPCRWriteIgnoreMask);
+ default:
+ UNREACHABLE();
+ return SimSystemRegister();
+ }
+}
+
+
+void Simulator::Initialize(Isolate* isolate) {
+ if (isolate->simulator_initialized()) return;
+ isolate->set_simulator_initialized(true);
+ ExternalReference::set_redirector(isolate, &RedirectExternalReference);
+}
+
+
+// Get the active Simulator for the current thread.
+Simulator* Simulator::current(Isolate* isolate) {
+ Isolate::PerIsolateThreadData* isolate_data =
+ isolate->FindOrAllocatePerThreadDataForThisThread();
+ ASSERT(isolate_data != NULL);
+
+ Simulator* sim = isolate_data->simulator();
+ if (sim == NULL) {
+ // TODO(146): delete the simulator object when a thread/isolate goes away.
+ sim = new Simulator(new Decoder(), isolate);
+ isolate_data->set_simulator(sim);
+ }
+ return sim;
+}
+
+
+void Simulator::CallVoid(byte* entry, va_list args) {
+ int index_x = 0;
+ int index_d = 0;
+
+ // At this point, we don't know how much stack space we need (for arguments
+ // that don't fit into registers). We can only do one pass through the
+ // va_list, so we store the extra arguments in a vector, then copy them to
+ // their proper locations later.
+ std::vector<int64_t> stack_args(0);
+
+ // Process register arguments.
+ CallArgument arg = va_arg(args, CallArgument);
+ while (!arg.IsEnd()) {
+ if (arg.IsX() && (index_x < 8)) {
+ set_xreg(index_x++, arg.bits());
+ } else if (arg.IsD() && (index_d < 8)) {
+ set_dreg_bits(index_d++, arg.bits());
+ } else {
+ ASSERT(arg.IsD() || arg.IsX());
+ stack_args.push_back(arg.bits());
+ }
+ arg = va_arg(args, CallArgument);
+ }
+
+ // Process stack arguments, and make sure the stack is suitably aligned.
+ uintptr_t original_stack = sp();
+ uintptr_t entry_stack = original_stack -
+ stack_args.size() * sizeof(stack_args[0]);
+ if (OS::ActivationFrameAlignment() != 0) {
+ entry_stack &= -OS::ActivationFrameAlignment();
+ }
+ char * stack = reinterpret_cast<char*>(entry_stack);
+ std::vector<int64_t>::const_iterator it;
+ for (it = stack_args.begin(); it != stack_args.end(); it++) {
+ memcpy(stack, &(*it), sizeof(*it));
+ stack += sizeof(*it);
+ }
+
+ ASSERT(reinterpret_cast<uintptr_t>(stack) <= original_stack);
+ set_sp(entry_stack);
+
+ // Call the generated code.
+ set_pc(entry);
+ set_lr(kEndOfSimAddress);
+ CheckPCSComplianceAndRun();
+
+ set_sp(original_stack);
+}
+
+
+
+void Simulator::CallVoid(byte* entry, ...) {
+ va_list args;
+ va_start(args, entry);
+ CallVoid(entry, args);
+ va_end(args);
+}
+
+
+int64_t Simulator::CallInt64(byte* entry, ...) {
+ va_list args;
+ va_start(args, entry);
+ CallVoid(entry, args);
+ va_end(args);
+
+ return xreg(0);
+}
+
+
+double Simulator::CallDouble(byte* entry, ...) {
+ va_list args;
+ va_start(args, entry);
+ CallVoid(entry, args);
+ va_end(args);
+
+ return dreg(0);
+}
+
+
+int64_t Simulator::CallJS(byte* entry,
+ byte* function_entry,
+ JSFunction* func,
+ Object* revc,
+ int64_t argc,
+ Object*** argv) {
+ return CallInt64(entry,
+ CallArgument(function_entry),
+ CallArgument(func),
+ CallArgument(revc),
+ CallArgument(argc),
+ CallArgument(argv),
+ CallArgument::End());
+}
+
+int64_t Simulator::CallRegExp(byte* entry,
+ String* input,
+ int64_t start_offset,
+ const byte* input_start,
+ const byte* input_end,
+ int* output,
+ int64_t output_size,
+ Address stack_base,
+ int64_t direct_call,
+ void* return_address,
+ Isolate* isolate) {
+ return CallInt64(entry,
+ CallArgument(input),
+ CallArgument(start_offset),
+ CallArgument(input_start),
+ CallArgument(input_end),
+ CallArgument(output),
+ CallArgument(output_size),
+ CallArgument(stack_base),
+ CallArgument(direct_call),
+ CallArgument(return_address),
+ CallArgument(isolate),
+ CallArgument::End());
+}
+
+
+void Simulator::CheckPCSComplianceAndRun() {
+#ifdef DEBUG
+ CHECK_EQ(kNumberOfCalleeSavedRegisters, kCalleeSaved.Count());
+ CHECK_EQ(kNumberOfCalleeSavedFPRegisters, kCalleeSavedFP.Count());
+
+ int64_t saved_registers[kNumberOfCalleeSavedRegisters];
+ uint64_t saved_fpregisters[kNumberOfCalleeSavedFPRegisters];
+
+ CPURegList register_list = kCalleeSaved;
+ CPURegList fpregister_list = kCalleeSavedFP;
+
+ for (int i = 0; i < kNumberOfCalleeSavedRegisters; i++) {
+ // x31 is not a caller saved register, so no need to specify if we want
+ // the stack or zero.
+ saved_registers[i] = xreg(register_list.PopLowestIndex().code());
+ }
+ for (int i = 0; i < kNumberOfCalleeSavedFPRegisters; i++) {
+ saved_fpregisters[i] =
+ dreg_bits(fpregister_list.PopLowestIndex().code());
+ }
+ int64_t original_stack = sp();
+#endif
+ // Start the simulation!
+ Run();
+#ifdef DEBUG
+ CHECK_EQ(original_stack, sp());
+ // Check that callee-saved registers have been preserved.
+ register_list = kCalleeSaved;
+ fpregister_list = kCalleeSavedFP;
+ for (int i = 0; i < kNumberOfCalleeSavedRegisters; i++) {
+ CHECK_EQ(saved_registers[i], xreg(register_list.PopLowestIndex().code()));
+ }
+ for (int i = 0; i < kNumberOfCalleeSavedFPRegisters; i++) {
+ ASSERT(saved_fpregisters[i] ==
+ dreg_bits(fpregister_list.PopLowestIndex().code()));
+ }
+
+ // Corrupt caller saved register minus the return regiters.
+
+ // In theory x0 to x7 can be used for return values, but V8 only uses x0, x1
+ // for now .
+ register_list = kCallerSaved;
+ register_list.Remove(x0);
+ register_list.Remove(x1);
+
+ // In theory d0 to d7 can be used for return values, but V8 only uses d0
+ // for now .
+ fpregister_list = kCallerSavedFP;
+ fpregister_list.Remove(d0);
+
+ CorruptRegisters(®ister_list, kCallerSavedRegisterCorruptionValue);
+ CorruptRegisters(&fpregister_list, kCallerSavedFPRegisterCorruptionValue);
+#endif
+}
+
+
+#ifdef DEBUG
+// The least significant byte of the curruption value holds the corresponding
+// register's code.
+void Simulator::CorruptRegisters(CPURegList* list, uint64_t value) {
+ if (list->type() == CPURegister::kRegister) {
+ while (!list->IsEmpty()) {
+ unsigned code = list->PopLowestIndex().code();
+ set_xreg(code, value | code);
+ }
+ } else {
+ ASSERT(list->type() == CPURegister::kFPRegister);
+ while (!list->IsEmpty()) {
+ unsigned code = list->PopLowestIndex().code();
+ set_dreg_bits(code, value | code);
+ }
+ }
+}
+
+
+void Simulator::CorruptAllCallerSavedCPURegisters() {
+ // Corrupt alters its parameter so copy them first.
+ CPURegList register_list = kCallerSaved;
+ CPURegList fpregister_list = kCallerSavedFP;
+
+ CorruptRegisters(®ister_list, kCallerSavedRegisterCorruptionValue);
+ CorruptRegisters(&fpregister_list, kCallerSavedFPRegisterCorruptionValue);
+}
+#endif
+
+
+// Extending the stack by 2 * 64 bits is required for stack alignment purposes.
+// TODO(all): Insert a marker in the extra space allocated on the stack.
+uintptr_t Simulator::PushAddress(uintptr_t address) {
+ ASSERT(sizeof(uintptr_t) < 2 * kXRegSizeInBytes);
+ intptr_t new_sp = sp() - 2 * kXRegSizeInBytes;
+ uintptr_t* stack_slot = reinterpret_cast<uintptr_t*>(new_sp);
+ *stack_slot = address;
+ set_sp(new_sp);
+ return new_sp;
+}
+
+
+uintptr_t Simulator::PopAddress() {
+ intptr_t current_sp = sp();
+ uintptr_t* stack_slot = reinterpret_cast<uintptr_t*>(current_sp);
+ uintptr_t address = *stack_slot;
+ ASSERT(sizeof(uintptr_t) < 2 * kXRegSizeInBytes);
+ set_sp(current_sp + 2 * kXRegSizeInBytes);
+ return address;
+}
+
+
+// Returns the limit of the stack area to enable checking for stack overflows.
+uintptr_t Simulator::StackLimit() const {
+ // Leave a safety margin of 1024 bytes to prevent overrunning the stack when
+ // pushing values.
+ // TODO(all): Increase the stack limit protection.
+
+ // The margin was decreased to 256 bytes, because we are intensively using
+ // the stack. The stack usage should decrease when our code improves. Then
+ // we can set it to 1024 again.
+ return reinterpret_cast<uintptr_t>(stack_limit_) + 256;
+}
+
+
+Simulator::Simulator(Decoder* decoder, Isolate* isolate, FILE* stream)
+ : decoder_(decoder), last_debugger_input_(NULL), log_parameters_(NO_PARAM),
+ isolate_(isolate) {
+ // Setup the decoder.
+ decoder_->AppendVisitor(this);
+
+ ResetState();
+
+ // Allocate and setup the simulator stack.
+ stack_size_ = (FLAG_sim_stack_size * KB) + (2 * stack_protection_size_);
+ stack_ = new byte[stack_size_];
+ stack_limit_ = stack_ + stack_protection_size_;
+ byte* tos = stack_ + stack_size_ - stack_protection_size_;
+ // The stack pointer must be 16 bytes aligned.
+ set_sp(reinterpret_cast<int64_t>(tos) & ~0xfUL);
+
+ stream_ = stream;
+ print_disasm_ = new PrintDisassembler(stream_);
+
+ if (FLAG_trace_sim) {
+ decoder_->InsertVisitorBefore(print_disasm_, this);
+ log_parameters_ = LOG_ALL;
+ }
+
+ // The debugger needs to disassemble code without the simulator executing an
+ // instruction, so we create a dedicated decoder.
+ disassembler_decoder_ = new Decoder();
+ disassembler_decoder_->AppendVisitor(print_disasm_);
+
+ if (FLAG_log_instruction_stats) {
+ instrument_ = new Instrument(FLAG_log_instruction_file,
+ FLAG_log_instruction_period);
+ decoder_->AppendVisitor(instrument_);
+ }
+}
+
+
+void Simulator::ResetState() {
+ // Reset the system registers.
+ nzcv_ = SimSystemRegister::DefaultValueFor(NZCV);
+ fpcr_ = SimSystemRegister::DefaultValueFor(FPCR);
+
+ // Reset registers to 0.
+ pc_ = NULL;
+ for (unsigned i = 0; i < kNumberOfRegisters; i++) {
+ set_xreg(i, 0xbadbeef);
+ }
+ for (unsigned i = 0; i < kNumberOfFPRegisters; i++) {
+ // Set FP registers to a value that is NaN in both 32-bit and 64-bit FP.
+ set_dreg_bits(i, 0x7ff000007f800001UL);
+ }
+ // Returning to address 0 exits the Simulator.
+ set_lr(kEndOfSimAddress);
+
+ // Reset debug helpers.
+ breakpoints_.empty();
+ break_on_next_= false;
+}
+
+
+Simulator::~Simulator() {
+ delete[] stack_;
+ if (FLAG_log_instruction_stats) {
+ delete instrument_;
+ }
+ delete disassembler_decoder_;
+ delete print_disasm_;
+ DeleteArray(last_debugger_input_);
+}
+
+
+void Simulator::Run() {
+ pc_modified_ = false;
+ while (pc_ != kEndOfSimAddress) {
+ ExecuteInstruction();
+ }
+}
+
+
+void Simulator::RunFrom(Instruction* start) {
+ set_pc(start);
+ Run();
+}
+
+
+void Simulator::CheckStackAlignment() {
+ // TODO(aleram): The sp alignment check to perform depends on the processor
+ // state. Check the specifications for more details.
+}
+
+
+// When the generated code calls an external reference we need to catch that in
+// the simulator. The external reference will be a function compiled for the
+// host architecture. We need to call that function instead of trying to
+// execute it with the simulator. We do that by redirecting the external
+// reference to a svc (Supervisor Call) instruction that is handled by
+// the simulator. We write the original destination of the jump just at a known
+// offset from the svc instruction so the simulator knows what to call.
+class Redirection {
+ public:
+ Redirection(void* external_function, ExternalReference::Type type)
+ : external_function_(external_function),
+ type_(type),
+ next_(NULL) {
+ redirect_call_.SetInstructionBits(
+ HLT | Assembler::ImmException(kImmExceptionIsRedirectedCall));
+ Isolate* isolate = Isolate::Current();
+ next_ = isolate->simulator_redirection();
+ // TODO(all): Simulator flush I cache
+ isolate->set_simulator_redirection(this);
+ }
+
+ void* address_of_redirect_call() {
+ return reinterpret_cast<void*>(&redirect_call_);
+ }
+
+ void* external_function() { return external_function_; }
+ ExternalReference::Type type() { return type_; }
+
+ static Redirection* Get(void* external_function,
+ ExternalReference::Type type) {
+ Isolate* isolate = Isolate::Current();
+ Redirection* current = isolate->simulator_redirection();
+ for (; current != NULL; current = current->next_) {
+ if (current->external_function_ == external_function) {
+ ASSERT_EQ(current->type(), type);
+ return current;
+ }
+ }
+ return new Redirection(external_function, type);
+ }
+
+ static Redirection* FromHltInstruction(Instruction* redirect_call) {
+ char* addr_of_hlt = reinterpret_cast<char*>(redirect_call);
+ char* addr_of_redirection =
+ addr_of_hlt - OFFSET_OF(Redirection, redirect_call_);
+ return reinterpret_cast<Redirection*>(addr_of_redirection);
+ }
+
+ static void* ReverseRedirection(int64_t reg) {
+ Redirection* redirection =
+ FromHltInstruction(reinterpret_cast<Instruction*>(reg));
+ return redirection->external_function();
+ }
+
+ private:
+ void* external_function_;
+ Instruction redirect_call_;
+ ExternalReference::Type type_;
+ Redirection* next_;
+};
+
+
+void* Simulator::RedirectExternalReference(void* external_function,
+ ExternalReference::Type type) {
+ Redirection* redirection = Redirection::Get(external_function, type);
+ return redirection->address_of_redirect_call();
+}
+
+
+const char* Simulator::xreg_names[] = {
+"x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7",
+"x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15",
+"ip0", "ip1", "x18", "x19", "x20", "x21", "x22", "x23",
+"x24", "x25", "x26", "cp", "jssp", "fp", "lr", "xzr", "csp"};
+
+const char* Simulator::wreg_names[] = {
+"w0", "w1", "w2", "w3", "w4", "w5", "w6", "w7",
+"w8", "w9", "w10", "w11", "w12", "w13", "w14", "w15",
+"w16", "w17", "w18", "w19", "w20", "w21", "w22", "w23",
+"w24", "w25", "w26", "wcp", "wjssp", "wfp", "wlr", "wzr", "wcsp"};
+
+const char* Simulator::sreg_names[] = {
+"s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7",
+"s8", "s9", "s10", "s11", "s12", "s13", "s14", "s15",
+"s16", "s17", "s18", "s19", "s20", "s21", "s22", "s23",
+"s24", "s25", "s26", "s27", "s28", "s29", "s30", "s31"};
+
+const char* Simulator::dreg_names[] = {
+"d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7",
+"d8", "d9", "d10", "d11", "d12", "d13", "d14", "d15",
+"d16", "d17", "d18", "d19", "d20", "d21", "d22", "d23",
+"d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31"};
+
+const char* Simulator::vreg_names[] = {
+"v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7",
+"v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15",
+"v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23",
+"v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"};
+
+
+const char* Simulator::WRegNameForCode(unsigned code, Reg31Mode mode) {
+ ASSERT(code < kNumberOfRegisters);
+ // If the code represents the stack pointer, index the name after zr.
+ if ((code == kZeroRegCode) && (mode == Reg31IsStackPointer)) {
+ code = kZeroRegCode + 1;
+ }
+ return wreg_names[code];
+}
+
+
+const char* Simulator::XRegNameForCode(unsigned code, Reg31Mode mode) {
+ ASSERT(code < kNumberOfRegisters);
+ // If the code represents the stack pointer, index the name after zr.
+ if ((code == kZeroRegCode) && (mode == Reg31IsStackPointer)) {
+ code = kZeroRegCode + 1;
+ }
+ return xreg_names[code];
+}
+
+
+const char* Simulator::SRegNameForCode(unsigned code) {
+ ASSERT(code < kNumberOfFPRegisters);
+ return sreg_names[code];
+}
+
+
+const char* Simulator::DRegNameForCode(unsigned code) {
+ ASSERT(code < kNumberOfFPRegisters);
+ return dreg_names[code];
+}
+
+
+const char* Simulator::VRegNameForCode(unsigned code) {
+ ASSERT(code < kNumberOfFPRegisters);
+ return vreg_names[code];
+}
+
+
+int Simulator::CodeFromName(const char* name) {
+ for (unsigned i = 0; i < kNumberOfRegisters; i++) {
+ if ((strcmp(xreg_names[i], name) == 0) ||
+ (strcmp(wreg_names[i], name) == 0)) {
+ return i;
+ }
+ }
+ for (unsigned i = 0; i < kNumberOfFPRegisters; i++) {
+ if ((strcmp(vreg_names[i], name) == 0) ||
+ (strcmp(dreg_names[i], name) == 0) ||
+ (strcmp(sreg_names[i], name) == 0)) {
+ return i;
+ }
+ }
+ if ((strcmp("csp", name) == 0) || (strcmp("wcsp", name) == 0)) {
+ return kSPRegInternalCode;
+ }
+ return -1;
+}
+
+
+// Helpers ---------------------------------------------------------------------
+int64_t Simulator::AddWithCarry(unsigned reg_size,
+ bool set_flags,
+ int64_t src1,
+ int64_t src2,
+ int64_t carry_in) {
+ ASSERT((carry_in == 0) || (carry_in == 1));
+ ASSERT((reg_size == kXRegSize) || (reg_size == kWRegSize));
+
+ uint64_t u1, u2;
+ int64_t result;
+ int64_t signed_sum = src1 + src2 + carry_in;
+
+ uint32_t N, Z, C, V;
+
+ if (reg_size == kWRegSize) {
+ u1 = static_cast<uint64_t>(src1) & kWRegMask;
+ u2 = static_cast<uint64_t>(src2) & kWRegMask;
+
+ result = signed_sum & kWRegMask;
+ // Compute the C flag by comparing the sum to the max unsigned integer.
+ C = ((kWMaxUInt - u1) < (u2 + carry_in)) ||
+ ((kWMaxUInt - u1 - carry_in) < u2);
+ // Overflow iff the sign bit is the same for the two inputs and different
+ // for the result.
+ int64_t s_src1 = src1 << (kXRegSize - kWRegSize);
+ int64_t s_src2 = src2 << (kXRegSize - kWRegSize);
+ int64_t s_result = result << (kXRegSize - kWRegSize);
+ V = ((s_src1 ^ s_src2) >= 0) && ((s_src1 ^ s_result) < 0);
+
+ } else {
+ u1 = static_cast<uint64_t>(src1);
+ u2 = static_cast<uint64_t>(src2);
+
+ result = signed_sum;
+ // Compute the C flag by comparing the sum to the max unsigned integer.
+ C = ((kXMaxUInt - u1) < (u2 + carry_in)) ||
+ ((kXMaxUInt - u1 - carry_in) < u2);
+ // Overflow iff the sign bit is the same for the two inputs and different
+ // for the result.
+ V = ((src1 ^ src2) >= 0) && ((src1 ^ result) < 0);
+ }
+
+ N = CalcNFlag(result, reg_size);
+ Z = CalcZFlag(result);
+
+ if (set_flags) {
+ nzcv().SetN(N);
+ nzcv().SetZ(Z);
+ nzcv().SetC(C);
+ nzcv().SetV(V);
+ }
+ return result;
+}
+
+
+int64_t Simulator::ShiftOperand(unsigned reg_size,
+ int64_t value,
+ Shift shift_type,
+ unsigned amount) {
+ if (amount == 0) {
+ return value;
+ }
+ int64_t mask = reg_size == kXRegSize ? kXRegMask : kWRegMask;
+ switch (shift_type) {
+ case LSL:
+ return (value << amount) & mask;
+ case LSR:
+ return static_cast<uint64_t>(value) >> amount;
+ case ASR: {
+ // Shift used to restore the sign.
+ unsigned s_shift = kXRegSize - reg_size;
+ // Value with its sign restored.
+ int64_t s_value = (value << s_shift) >> s_shift;
+ return (s_value >> amount) & mask;
+ }
+ case ROR: {
+ if (reg_size == kWRegSize) {
+ value &= kWRegMask;
+ }
+ return (static_cast<uint64_t>(value) >> amount) |
+ ((value & ((1L << amount) - 1L)) << (reg_size - amount));
+ }
+ default:
+ UNIMPLEMENTED();
+ return 0;
+ }
+}
+
+
+int64_t Simulator::ExtendValue(unsigned reg_size,
+ int64_t value,
+ Extend extend_type,
+ unsigned left_shift) {
+ switch (extend_type) {
+ case UXTB:
+ value &= kByteMask;
+ break;
+ case UXTH:
+ value &= kHalfWordMask;
+ break;
+ case UXTW:
+ value &= kWordMask;
+ break;
+ case SXTB:
+ value = (value << 56) >> 56;
+ break;
+ case SXTH:
+ value = (value << 48) >> 48;
+ break;
+ case SXTW:
+ value = (value << 32) >> 32;
+ break;
+ case UXTX:
+ case SXTX:
+ break;
+ default:
+ UNREACHABLE();
+ }
+ int64_t mask = (reg_size == kXRegSize) ? kXRegMask : kWRegMask;
+ return (value << left_shift) & mask;
+}
+
+
+void Simulator::FPCompare(double val0, double val1) {
+ AssertSupportedFPCR();
+
+ // TODO(jbramley): This assumes that the C++ implementation handles
+ // comparisons in the way that we expect (as per AssertSupportedFPCR()).
+ if ((std::isnan(val0) != 0) || (std::isnan(val1) != 0)) {
+ nzcv().SetRawValue(FPUnorderedFlag);
+ } else if (val0 < val1) {
+ nzcv().SetRawValue(FPLessThanFlag);
+ } else if (val0 > val1) {
+ nzcv().SetRawValue(FPGreaterThanFlag);
+ } else if (val0 == val1) {
+ nzcv().SetRawValue(FPEqualFlag);
+ } else {
+ UNREACHABLE();
+ }
+}
+
+
+void Simulator::SetBreakpoint(Instruction* location) {
+ for (unsigned i = 0; i < breakpoints_.size(); i++) {
+ if (breakpoints_.at(i).location == location) {
+ PrintF("Existing breakpoint at %p was %s\n",
+ reinterpret_cast<void*>(location),
+ breakpoints_.at(i).enabled ? "disabled" : "enabled");
+ breakpoints_.at(i).enabled = !breakpoints_.at(i).enabled;
+ return;
+ }
+ }
+ Breakpoint new_breakpoint = {location, true};
+ breakpoints_.push_back(new_breakpoint);
+ PrintF("Set a breakpoint at %p\n", reinterpret_cast<void*>(location));
+}
+
+
+void Simulator::ListBreakpoints() {
+ PrintF("Breakpoints:\n");
+ for (unsigned i = 0; i < breakpoints_.size(); i++) {
+ PrintF("%p : %s\n",
+ reinterpret_cast<void*>(breakpoints_.at(i).location),
+ breakpoints_.at(i).enabled ? "enabled" : "disabled");
+ }
+}
+
+
+void Simulator::CheckBreakpoints() {
+ bool hit_a_breakpoint = false;
+ for (unsigned i = 0; i < breakpoints_.size(); i++) {
+ if ((breakpoints_.at(i).location == pc_) &&
+ breakpoints_.at(i).enabled) {
+ hit_a_breakpoint = true;
+ // Disable this breakpoint.
+ breakpoints_.at(i).enabled = false;
+ }
+ }
+ if (hit_a_breakpoint) {
+ PrintF("Hit and disabled a breakpoint at %p.\n",
+ reinterpret_cast<void*>(pc_));
+ Debug();
+ }
+}
+
+
+void Simulator::CheckBreakNext() {
+ // If the current instruction is a BL, insert a breakpoint just after it.
+ if (break_on_next_ && pc_->IsBranchAndLinkToRegister()) {
+ SetBreakpoint(pc_->NextInstruction());
+ break_on_next_ = false;
+ }
+}
+
+
+void Simulator::PrintInstructionsAt(Instruction* start, uint64_t count) {
+ Instruction* end = start->InstructionAtOffset(count * kInstructionSize);
+ for (Instruction* pc = start; pc < end; pc = pc->NextInstruction()) {
+ disassembler_decoder_->Decode(pc);
+ }
+}
+
+
+void Simulator::PrintSystemRegisters(bool print_all) {
+ static bool first_run = true;
+
+ // Define some colour codes to use for the register dump.
+ // TODO(jbramley): Find a more elegant way of defining these.
+ char const * const clr_normal = (FLAG_log_colour) ? ("\033[m") : ("");
+ char const * const clr_flag_name = (FLAG_log_colour) ? ("\033[1;30m") : ("");
+ char const * const clr_flag_value = (FLAG_log_colour) ? ("\033[1;37m") : ("");
+
+ static SimSystemRegister last_nzcv;
+ if (print_all || first_run || (last_nzcv.RawValue() != nzcv().RawValue())) {
+ fprintf(stream_, "# %sFLAGS: %sN:%d Z:%d C:%d V:%d%s\n",
+ clr_flag_name,
+ clr_flag_value,
+ N(), Z(), C(), V(),
+ clr_normal);
+ }
+ last_nzcv = nzcv();
+
+ static SimSystemRegister last_fpcr;
+ if (print_all || first_run || (last_fpcr.RawValue() != fpcr().RawValue())) {
+ static const char * rmode[] = {
+ "0b00 (Round to Nearest)",
+ "0b01 (Round towards Plus Infinity)",
+ "0b10 (Round towards Minus Infinity)",
+ "0b11 (Round towards Zero)"
+ };
+ ASSERT(fpcr().RMode() <= (sizeof(rmode) / sizeof(rmode[0])));
+ fprintf(stream_, "# %sFPCR: %sAHP:%d DN:%d FZ:%d RMode:%s%s\n",
+ clr_flag_name,
+ clr_flag_value,
+ fpcr().AHP(), fpcr().DN(), fpcr().FZ(), rmode[fpcr().RMode()],
+ clr_normal);
+ }
+ last_fpcr = fpcr();
+
+ first_run = false;
+}
+
+
+void Simulator::PrintRegisters(bool print_all_regs) {
+ static bool first_run = true;
+ static int64_t last_regs[kNumberOfRegisters];
+
+ // Define some colour codes to use for the register dump.
+ // TODO(jbramley): Find a more elegant way of defining these.
+ char const * const clr_normal = (FLAG_log_colour) ? ("\033[m") : ("");
+ char const * const clr_reg_name = (FLAG_log_colour) ? ("\033[1;34m") : ("");
+ char const * const clr_reg_value = (FLAG_log_colour) ? ("\033[1;36m") : ("");
+
+ for (unsigned i = 0; i < kNumberOfRegisters; i++) {
+ if (print_all_regs || first_run ||
+ (last_regs[i] != xreg(i, Reg31IsStackPointer))) {
+ fprintf(stream_,
+ "# %s%4s:%s 0x%016" PRIx64 "%s\n",
+ clr_reg_name,
+ XRegNameForCode(i, Reg31IsStackPointer),
+ clr_reg_value,
+ xreg(i, Reg31IsStackPointer),
+ clr_normal);
+ }
+ // Cache the new register value so the next run can detect any changes.
+ last_regs[i] = xreg(i, Reg31IsStackPointer);
+ }
+ first_run = false;
+}
+
+
+void Simulator::PrintFPRegisters(bool print_all_regs) {
+ static bool first_run = true;
+ static uint64_t last_regs[kNumberOfFPRegisters];
+
+ // Define some colour codes to use for the register dump.
+ // TODO(jbramley): Find a more elegant way of defining these.
+ char const * const clr_normal = (FLAG_log_colour) ? ("\033[m") : ("");
+ char const * const clr_reg_name = (FLAG_log_colour) ? ("\033[1;33m") : ("");
+ char const * const clr_reg_value = (FLAG_log_colour) ? ("\033[1;35m") : ("");
+
+ // Print as many rows of registers as necessary, keeping each individual
+ // register in the same column each time (to make it easy to visually scan
+ // for changes).
+ for (unsigned i = 0; i < kNumberOfFPRegisters; i++) {
+ if (print_all_regs || first_run || (last_regs[i] != dreg_bits(i))) {
+ fprintf(stream_,
+ "# %s %4s:%s 0x%016" PRIx64 "%s (%s%s:%s %g%s %s:%s %g%s)\n",
+ clr_reg_name,
+ VRegNameForCode(i),
+ clr_reg_value,
+ dreg_bits(i),
+ clr_normal,
+ clr_reg_name,
+ DRegNameForCode(i),
+ clr_reg_value,
+ dreg(i),
+ clr_reg_name,
+ SRegNameForCode(i),
+ clr_reg_value,
+ sreg(i),
+ clr_normal);
+ }
+ // Cache the new register value so the next run can detect any changes.
+ last_regs[i] = dreg_bits(i);
+ }
+ first_run = false;
+}
+
+
+void Simulator::PrintProcessorState() {
+ PrintSystemRegisters();
+ PrintRegisters();
+ PrintFPRegisters();
+}
+
+
+void Simulator::PrintWrite(uint8_t* address,
+ uint64_t value,
+ unsigned num_bytes) {
+ // Define some color codes to use for memory logging.
+ const char* const clr_normal = (FLAG_log_colour) ? ("\033[m")
+ : ("");
+ const char* const clr_memory_value = (FLAG_log_colour) ? ("\033[1;32m")
+ : ("");
+ const char* const clr_memory_address = (FLAG_log_colour) ? ("\033[32m")
+ : ("");
+
+ // The template is "# value -> address". The template is not directly used
+ // in the printf since compilers tend to struggle with the parametrized
+ // width (%0*).
+ const char* format = "# %s0x%0*" PRIx64 "%s -> %s0x%016" PRIx64 "%s\n";
+ fprintf(stream_,
+ format,
+ clr_memory_value,
+ num_bytes * 2, // The width in hexa characters.
+ value,
+ clr_normal,
+ clr_memory_address,
+ address,
+ clr_normal);
+}
+
+
+// Visitors---------------------------------------------------------------------
+
+void Simulator::VisitUnimplemented(Instruction* instr) {
+ fprintf(stream_, "Unimplemented instruction at %p: 0x%08" PRIx32 "\n",
+ reinterpret_cast<void*>(instr), instr->InstructionBits());
+ UNIMPLEMENTED();
+}
+
+
+void Simulator::VisitUnallocated(Instruction* instr) {
+ fprintf(stream_, "Unallocated instruction at %p: 0x%08" PRIx32 "\n",
+ reinterpret_cast<void*>(instr), instr->InstructionBits());
+ UNIMPLEMENTED();
+}
+
+
+void Simulator::VisitPCRelAddressing(Instruction* instr) {
+ switch (instr->Mask(PCRelAddressingMask)) {
+ case ADR:
+ set_reg(instr->Rd(), instr->ImmPCOffsetTarget());
+ break;
+ case ADRP: // Not implemented in the assembler.
+ UNIMPLEMENTED();
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+}
+
+
+void Simulator::VisitUnconditionalBranch(Instruction* instr) {
+ switch (instr->Mask(UnconditionalBranchMask)) {
+ case BL:
+ set_lr(instr->NextInstruction());
+ // Fall through.
+ case B:
+ set_pc(instr->ImmPCOffsetTarget());
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+void Simulator::VisitConditionalBranch(Instruction* instr) {
+ ASSERT(instr->Mask(ConditionalBranchMask) == B_cond);
+ if (ConditionPassed(static_cast<Condition>(instr->ConditionBranch()))) {
+ set_pc(instr->ImmPCOffsetTarget());
+ }
+}
+
+
+void Simulator::VisitUnconditionalBranchToRegister(Instruction* instr) {
+ Instruction* target = reg<Instruction*>(instr->Rn());
+ switch (instr->Mask(UnconditionalBranchToRegisterMask)) {
+ case BLR: {
+ set_lr(instr->NextInstruction());
+ // Fall through.
+ }
+ case BR:
+ case RET: set_pc(target); break;
+ default: UNIMPLEMENTED();
+ }
+}
+
+
+void Simulator::VisitTestBranch(Instruction* instr) {
+ unsigned bit_pos = (instr->ImmTestBranchBit5() << 5) |
+ instr->ImmTestBranchBit40();
+ bool take_branch = ((xreg(instr->Rt()) & (1UL << bit_pos)) == 0);
+ switch (instr->Mask(TestBranchMask)) {
+ case TBZ: break;
+ case TBNZ: take_branch = !take_branch; break;
+ default: UNIMPLEMENTED();
+ }
+ if (take_branch) {
+ set_pc(instr->ImmPCOffsetTarget());
+ }
+}
+
+
+void Simulator::VisitCompareBranch(Instruction* instr) {
+ unsigned rt = instr->Rt();
+ bool take_branch = false;
+ switch (instr->Mask(CompareBranchMask)) {
+ case CBZ_w: take_branch = (wreg(rt) == 0); break;
+ case CBZ_x: take_branch = (xreg(rt) == 0); break;
+ case CBNZ_w: take_branch = (wreg(rt) != 0); break;
+ case CBNZ_x: take_branch = (xreg(rt) != 0); break;
+ default: UNIMPLEMENTED();
+ }
+ if (take_branch) {
+ set_pc(instr->ImmPCOffsetTarget());
+ }
+}
+
+
+void Simulator::AddSubHelper(Instruction* instr, int64_t op2) {
+ unsigned reg_size = instr->SixtyFourBits() ? kXRegSize : kWRegSize;
+ bool set_flags = instr->FlagsUpdate();
+ int64_t new_val = 0;
+ Instr operation = instr->Mask(AddSubOpMask);
+
+ switch (operation) {
+ case ADD:
+ case ADDS: {
+ new_val = AddWithCarry(reg_size,
+ set_flags,
+ reg(reg_size, instr->Rn(), instr->RnMode()),
+ op2);
+ break;
+ }
+ case SUB:
+ case SUBS: {
+ new_val = AddWithCarry(reg_size,
+ set_flags,
+ reg(reg_size, instr->Rn(), instr->RnMode()),
+ ~op2,
+ 1);
+ break;
+ }
+ default: UNREACHABLE();
+ }
+
+ set_reg(reg_size, instr->Rd(), new_val, instr->RdMode());
+}
+
+
+void Simulator::VisitAddSubShifted(Instruction* instr) {
+ unsigned reg_size = instr->SixtyFourBits() ? kXRegSize : kWRegSize;
+ int64_t op2 = ShiftOperand(reg_size,
+ reg(reg_size, instr->Rm()),
+ static_cast<Shift>(instr->ShiftDP()),
+ instr->ImmDPShift());
+ AddSubHelper(instr, op2);
+}
+
+
+void Simulator::VisitAddSubImmediate(Instruction* instr) {
+ int64_t op2 = instr->ImmAddSub() << ((instr->ShiftAddSub() == 1) ? 12 : 0);
+ AddSubHelper(instr, op2);
+}
+
+
+void Simulator::VisitAddSubExtended(Instruction* instr) {
+ unsigned reg_size = instr->SixtyFourBits() ? kXRegSize : kWRegSize;
+ int64_t op2 = ExtendValue(reg_size,
+ reg(reg_size, instr->Rm()),
+ static_cast<Extend>(instr->ExtendMode()),
+ instr->ImmExtendShift());
+ AddSubHelper(instr, op2);
+}
+
+
+void Simulator::VisitAddSubWithCarry(Instruction* instr) {
+ unsigned reg_size = instr->SixtyFourBits() ? kXRegSize : kWRegSize;
+ int64_t op2 = reg(reg_size, instr->Rm());
+ int64_t new_val;
+
+ if ((instr->Mask(AddSubOpMask) == SUB) || instr->Mask(AddSubOpMask) == SUBS) {
+ op2 = ~op2;
+ }
+
+ new_val = AddWithCarry(reg_size,
+ instr->FlagsUpdate(),
+ reg(reg_size, instr->Rn()),
+ op2,
+ C());
+
+ set_reg(reg_size, instr->Rd(), new_val);
+}
+
+
+void Simulator::VisitLogicalShifted(Instruction* instr) {
+ unsigned reg_size = instr->SixtyFourBits() ? kXRegSize : kWRegSize;
+ Shift shift_type = static_cast<Shift>(instr->ShiftDP());
+ unsigned shift_amount = instr->ImmDPShift();
+ int64_t op2 = ShiftOperand(reg_size, reg(reg_size, instr->Rm()), shift_type,
+ shift_amount);
+ if (instr->Mask(NOT) == NOT) {
+ op2 = ~op2;
+ }
+ LogicalHelper(instr, op2);
+}
+
+
+void Simulator::VisitLogicalImmediate(Instruction* instr) {
+ LogicalHelper(instr, instr->ImmLogical());
+}
+
+
+void Simulator::LogicalHelper(Instruction* instr, int64_t op2) {
+ unsigned reg_size = instr->SixtyFourBits() ? kXRegSize : kWRegSize;
+ int64_t op1 = reg(reg_size, instr->Rn());
+ int64_t result = 0;
+ bool update_flags = false;
+
+ // Switch on the logical operation, stripping out the NOT bit, as it has a
+ // different meaning for logical immediate instructions.
+ switch (instr->Mask(LogicalOpMask & ~NOT)) {
+ case ANDS: update_flags = true; // Fall through.
+ case AND: result = op1 & op2; break;
+ case ORR: result = op1 | op2; break;
+ case EOR: result = op1 ^ op2; break;
+ default:
+ UNIMPLEMENTED();
+ }
+
+ if (update_flags) {
+ nzcv().SetN(CalcNFlag(result, reg_size));
+ nzcv().SetZ(CalcZFlag(result));
+ nzcv().SetC(0);
+ nzcv().SetV(0);
+ }
+
+ set_reg(reg_size, instr->Rd(), result, instr->RdMode());
+}
+
+
+void Simulator::VisitConditionalCompareRegister(Instruction* instr) {
+ unsigned reg_size = instr->SixtyFourBits() ? kXRegSize : kWRegSize;
+ ConditionalCompareHelper(instr, reg(reg_size, instr->Rm()));
+}
+
+
+void Simulator::VisitConditionalCompareImmediate(Instruction* instr) {
+ ConditionalCompareHelper(instr, instr->ImmCondCmp());
+}
+
+
+void Simulator::ConditionalCompareHelper(Instruction* instr, int64_t op2) {
+ unsigned reg_size = instr->SixtyFourBits() ? kXRegSize : kWRegSize;
+ int64_t op1 = reg(reg_size, instr->Rn());
+
+ if (ConditionPassed(static_cast<Condition>(instr->Condition()))) {
+ // If the condition passes, set the status flags to the result of comparing
+ // the operands.
+ if (instr->Mask(ConditionalCompareMask) == CCMP) {
+ AddWithCarry(reg_size, true, op1, ~op2, 1);
+ } else {
+ ASSERT(instr->Mask(ConditionalCompareMask) == CCMN);
+ AddWithCarry(reg_size, true, op1, op2, 0);
+ }
+ } else {
+ // If the condition fails, set the status flags to the nzcv immediate.
+ nzcv().SetFlags(instr->Nzcv());
+ }
+}
+
+
+void Simulator::VisitLoadStoreUnsignedOffset(Instruction* instr) {
+ int offset = instr->ImmLSUnsigned() << instr->SizeLS();
+ LoadStoreHelper(instr, offset, Offset);
+}
+
+
+void Simulator::VisitLoadStoreUnscaledOffset(Instruction* instr) {
+ LoadStoreHelper(instr, instr->ImmLS(), Offset);
+}
+
+
+void Simulator::VisitLoadStorePreIndex(Instruction* instr) {
+ LoadStoreHelper(instr, instr->ImmLS(), PreIndex);
+}
+
+
+void Simulator::VisitLoadStorePostIndex(Instruction* instr) {
+ LoadStoreHelper(instr, instr->ImmLS(), PostIndex);
+}
+
+
+void Simulator::VisitLoadStoreRegisterOffset(Instruction* instr) {
+ Extend ext = static_cast<Extend>(instr->ExtendMode());
+ ASSERT((ext == UXTW) || (ext == UXTX) || (ext == SXTW) || (ext == SXTX));
+ unsigned shift_amount = instr->ImmShiftLS() * instr->SizeLS();
+
+ int64_t offset = ExtendValue(kXRegSize, xreg(instr->Rm()), ext,
+ shift_amount);
+ LoadStoreHelper(instr, offset, Offset);
+}
+
+
+void Simulator::LoadStoreHelper(Instruction* instr,
+ int64_t offset,
+ AddrMode addrmode) {
+ unsigned srcdst = instr->Rt();
+ unsigned addr_reg = instr->Rn();
+ uint8_t* address = LoadStoreAddress(addr_reg, offset, addrmode);
+ int num_bytes = 1 << instr->SizeLS();
+ uint8_t* stack = NULL;
+
+ // Handle the writeback for stores before the store. On a CPU the writeback
+ // and the store are atomic, but when running on the simulator it is possible
+ // to be interrupted in between. The simulator is not thread safe and V8 does
+ // not require it to be to run JavaScript therefore the profiler may sample
+ // the "simulated" CPU in the middle of load/store with writeback. The code
+ // below ensures that push operations are safe even when interrupted: the
+ // stack pointer will be decremented before adding an element to the stack.
+ if (instr->IsStore()) {
+ LoadStoreWriteBack(addr_reg, offset, addrmode);
+
+ // For store the address post writeback is used to check access below the
+ // stack.
+ stack = reinterpret_cast<uint8_t*>(sp());
+ }
+
+ LoadStoreOp op = static_cast<LoadStoreOp>(instr->Mask(LoadStoreOpMask));
+ switch (op) {
+ case LDRB_w:
+ case LDRH_w:
+ case LDR_w:
+ case LDR_x: set_xreg(srcdst, MemoryRead(address, num_bytes)); break;
+ case STRB_w:
+ case STRH_w:
+ case STR_w:
+ case STR_x: MemoryWrite(address, xreg(srcdst), num_bytes); break;
+ case LDRSB_w: {
+ set_wreg(srcdst, ExtendValue(kWRegSize, MemoryRead8(address), SXTB));
+ break;
+ }
+ case LDRSB_x: {
+ set_xreg(srcdst, ExtendValue(kXRegSize, MemoryRead8(address), SXTB));
+ break;
+ }
+ case LDRSH_w: {
+ set_wreg(srcdst, ExtendValue(kWRegSize, MemoryRead16(address), SXTH));
+ break;
+ }
+ case LDRSH_x: {
+ set_xreg(srcdst, ExtendValue(kXRegSize, MemoryRead16(address), SXTH));
+ break;
+ }
+ case LDRSW_x: {
+ set_xreg(srcdst, ExtendValue(kXRegSize, MemoryRead32(address), SXTW));
+ break;
+ }
+ case LDR_s: set_sreg(srcdst, MemoryReadFP32(address)); break;
+ case LDR_d: set_dreg(srcdst, MemoryReadFP64(address)); break;
+ case STR_s: MemoryWriteFP32(address, sreg(srcdst)); break;
+ case STR_d: MemoryWriteFP64(address, dreg(srcdst)); break;
+ default: UNIMPLEMENTED();
+ }
+
+ // Handle the writeback for loads after the load to ensure safe pop
+ // operation even when interrupted in the middle of it. The stack pointer
+ // is only updated after the load so pop(fp) will never break the invariant
+ // sp <= fp expected while walking the stack in the sampler.
+ if (instr->IsLoad()) {
+ // For loads the address pre writeback is used to check access below the
+ // stack.
+ stack = reinterpret_cast<uint8_t*>(sp());
+
+ LoadStoreWriteBack(addr_reg, offset, addrmode);
+ }
+
+ // Accesses below the stack pointer (but above the platform stack limit) are
+ // not allowed in the ABI.
+ CheckMemoryAccess(address, stack);
+}
+
+
+void Simulator::VisitLoadStorePairOffset(Instruction* instr) {
+ LoadStorePairHelper(instr, Offset);
+}
+
+
+void Simulator::VisitLoadStorePairPreIndex(Instruction* instr) {
+ LoadStorePairHelper(instr, PreIndex);
+}
+
+
+void Simulator::VisitLoadStorePairPostIndex(Instruction* instr) {
+ LoadStorePairHelper(instr, PostIndex);
+}
+
+
+void Simulator::VisitLoadStorePairNonTemporal(Instruction* instr) {
+ LoadStorePairHelper(instr, Offset);
+}
+
+
+void Simulator::LoadStorePairHelper(Instruction* instr,
+ AddrMode addrmode) {
+ unsigned rt = instr->Rt();
+ unsigned rt2 = instr->Rt2();
+ unsigned addr_reg = instr->Rn();
+ int offset = instr->ImmLSPair() << instr->SizeLSPair();
+ uint8_t* address = LoadStoreAddress(addr_reg, offset, addrmode);
+ uint8_t* stack = NULL;
+
+ // Handle the writeback for stores before the store. On a CPU the writeback
+ // and the store are atomic, but when running on the simulator it is possible
+ // to be interrupted in between. The simulator is not thread safe and V8 does
+ // not require it to be to run JavaScript therefore the profiler may sample
+ // the "simulated" CPU in the middle of load/store with writeback. The code
+ // below ensures that push operations are safe even when interrupted: the
+ // stack pointer will be decremented before adding an element to the stack.
+ if (instr->IsStore()) {
+ LoadStoreWriteBack(addr_reg, offset, addrmode);
+
+ // For store the address post writeback is used to check access below the
+ // stack.
+ stack = reinterpret_cast<uint8_t*>(sp());
+ }
+
+ LoadStorePairOp op =
+ static_cast<LoadStorePairOp>(instr->Mask(LoadStorePairMask));
+
+ // 'rt' and 'rt2' can only be aliased for stores.
+ ASSERT(((op & LoadStorePairLBit) == 0) || (rt != rt2));
+
+ switch (op) {
+ case LDP_w: {
+ set_wreg(rt, MemoryRead32(address));
+ set_wreg(rt2, MemoryRead32(address + kWRegSizeInBytes));
+ break;
+ }
+ case LDP_s: {
+ set_sreg(rt, MemoryReadFP32(address));
+ set_sreg(rt2, MemoryReadFP32(address + kSRegSizeInBytes));
+ break;
+ }
+ case LDP_x: {
+ set_xreg(rt, MemoryRead64(address));
+ set_xreg(rt2, MemoryRead64(address + kXRegSizeInBytes));
+ break;
+ }
+ case LDP_d: {
+ set_dreg(rt, MemoryReadFP64(address));
+ set_dreg(rt2, MemoryReadFP64(address + kDRegSizeInBytes));
+ break;
+ }
+ case LDPSW_x: {
+ set_xreg(rt, ExtendValue(kXRegSize, MemoryRead32(address), SXTW));
+ set_xreg(rt2, ExtendValue(kXRegSize,
+ MemoryRead32(address + kWRegSizeInBytes), SXTW));
+ break;
+ }
+ case STP_w: {
+ MemoryWrite32(address, wreg(rt));
+ MemoryWrite32(address + kWRegSizeInBytes, wreg(rt2));
+ break;
+ }
+ case STP_s: {
+ MemoryWriteFP32(address, sreg(rt));
+ MemoryWriteFP32(address + kSRegSizeInBytes, sreg(rt2));
+ break;
+ }
+ case STP_x: {
+ MemoryWrite64(address, xreg(rt));
+ MemoryWrite64(address + kXRegSizeInBytes, xreg(rt2));
+ break;
+ }
+ case STP_d: {
+ MemoryWriteFP64(address, dreg(rt));
+ MemoryWriteFP64(address + kDRegSizeInBytes, dreg(rt2));
+ break;
+ }
+ default: UNREACHABLE();
+ }
+
+ // Handle the writeback for loads after the load to ensure safe pop
+ // operation even when interrupted in the middle of it. The stack pointer
+ // is only updated after the load so pop(fp) will never break the invariant
+ // sp <= fp expected while walking the stack in the sampler.
+ if (instr->IsLoad()) {
+ // For loads the address pre writeback is used to check access below the
+ // stack.
+ stack = reinterpret_cast<uint8_t*>(sp());
+
+ LoadStoreWriteBack(addr_reg, offset, addrmode);
+ }
+
+ // Accesses below the stack pointer (but above the platform stack limit) are
+ // not allowed in the ABI.
+ CheckMemoryAccess(address, stack);
+}
+
+
+void Simulator::VisitLoadLiteral(Instruction* instr) {
+ uint8_t* address = instr->LiteralAddress();
+ unsigned rt = instr->Rt();
+
+ switch (instr->Mask(LoadLiteralMask)) {
+ case LDR_w_lit: set_wreg(rt, MemoryRead32(address)); break;
+ case LDR_x_lit: set_xreg(rt, MemoryRead64(address)); break;
+ case LDR_s_lit: set_sreg(rt, MemoryReadFP32(address)); break;
+ case LDR_d_lit: set_dreg(rt, MemoryReadFP64(address)); break;
+ default: UNREACHABLE();
+ }
+}
+
+
+uint8_t* Simulator::LoadStoreAddress(unsigned addr_reg,
+ int64_t offset,
+ AddrMode addrmode) {
+ const unsigned kSPRegCode = kSPRegInternalCode & kRegCodeMask;
+ int64_t address = xreg(addr_reg, Reg31IsStackPointer);
+ if ((addr_reg == kSPRegCode) && ((address % 16) != 0)) {
+ // When the base register is SP the stack pointer is required to be
+ // quadword aligned prior to the address calculation and write-backs.
+ // Misalignment will cause a stack alignment fault.
+ ALIGNMENT_EXCEPTION();
+ }
+
+ if ((addrmode == Offset) || (addrmode == PreIndex)) {
+ address += offset;
+ }
+
+ return reinterpret_cast<uint8_t*>(address);
+}
+
+
+void Simulator::LoadStoreWriteBack(unsigned addr_reg,
+ int64_t offset,
+ AddrMode addrmode) {
+ if ((addrmode == PreIndex) || (addrmode == PostIndex)) {
+ ASSERT(offset != 0);
+ uint64_t address = xreg(addr_reg, Reg31IsStackPointer);
+ set_reg(addr_reg, address + offset, Reg31IsStackPointer);
+ }
+}
+
+
+void Simulator::CheckMemoryAccess(uint8_t* address, uint8_t* stack) {
+ if ((address >= stack_limit_) && (address < stack)) {
+ fprintf(stream_, "ACCESS BELOW STACK POINTER:\n");
+ fprintf(stream_, " sp is here: 0x%16p\n", stack);
+ fprintf(stream_, " access was here: 0x%16p\n", address);
+ fprintf(stream_, " stack limit is here: 0x%16p\n", stack_limit_);
+ fprintf(stream_, "\n");
+ ABORT();
+ }
+}
+
+
+uint64_t Simulator::MemoryRead(uint8_t* address, unsigned num_bytes) {
+ ASSERT(address != NULL);
+ ASSERT((num_bytes > 0) && (num_bytes <= sizeof(uint64_t)));
+ uint64_t read = 0;
+ memcpy(&read, address, num_bytes);
+ return read;
+}
+
+
+uint8_t Simulator::MemoryRead8(uint8_t* address) {
+ return MemoryRead(address, sizeof(uint8_t));
+}
+
+
+uint16_t Simulator::MemoryRead16(uint8_t* address) {
+ return MemoryRead(address, sizeof(uint16_t));
+}
+
+
+uint32_t Simulator::MemoryRead32(uint8_t* address) {
+ return MemoryRead(address, sizeof(uint32_t));
+}
+
+
+float Simulator::MemoryReadFP32(uint8_t* address) {
+ return rawbits_to_float(MemoryRead32(address));
+}
+
+
+uint64_t Simulator::MemoryRead64(uint8_t* address) {
+ return MemoryRead(address, sizeof(uint64_t));
+}
+
+
+double Simulator::MemoryReadFP64(uint8_t* address) {
+ return rawbits_to_double(MemoryRead64(address));
+}
+
+
+void Simulator::MemoryWrite(uint8_t* address,
+ uint64_t value,
+ unsigned num_bytes) {
+ ASSERT(address != NULL);
+ ASSERT((num_bytes > 0) && (num_bytes <= sizeof(uint64_t)));
+
+ LogWrite(address, value, num_bytes);
+ memcpy(address, &value, num_bytes);
+}
+
+
+void Simulator::MemoryWrite32(uint8_t* address, uint32_t value) {
+ MemoryWrite(address, value, sizeof(uint32_t));
+}
+
+
+void Simulator::MemoryWriteFP32(uint8_t* address, float value) {
+ MemoryWrite32(address, float_to_rawbits(value));
+}
+
+
+void Simulator::MemoryWrite64(uint8_t* address, uint64_t value) {
+ MemoryWrite(address, value, sizeof(uint64_t));
+}
+
+
+void Simulator::MemoryWriteFP64(uint8_t* address, double value) {
+ MemoryWrite64(address, double_to_rawbits(value));
+}
+
+
+void Simulator::VisitMoveWideImmediate(Instruction* instr) {
+ MoveWideImmediateOp mov_op =
+ static_cast<MoveWideImmediateOp>(instr->Mask(MoveWideImmediateMask));
+ int64_t new_xn_val = 0;
+
+ bool is_64_bits = instr->SixtyFourBits() == 1;
+ // Shift is limited for W operations.
+ ASSERT(is_64_bits || (instr->ShiftMoveWide() < 2));
+
+ // Get the shifted immediate.
+ int64_t shift = instr->ShiftMoveWide() * 16;
+ int64_t shifted_imm16 = instr->ImmMoveWide() << shift;
+
+ // Compute the new value.
+ switch (mov_op) {
+ case MOVN_w:
+ case MOVN_x: {
+ new_xn_val = ~shifted_imm16;
+ if (!is_64_bits) new_xn_val &= kWRegMask;
+ break;
+ }
+ case MOVK_w:
+ case MOVK_x: {
+ unsigned reg_code = instr->Rd();
+ int64_t prev_xn_val = is_64_bits ? xreg(reg_code)
+ : wreg(reg_code);
+ new_xn_val = (prev_xn_val & ~(0xffffL << shift)) | shifted_imm16;
+ break;
+ }
+ case MOVZ_w:
+ case MOVZ_x: {
+ new_xn_val = shifted_imm16;
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+
+ // Update the destination register.
+ set_xreg(instr->Rd(), new_xn_val);
+}
+
+
+void Simulator::VisitConditionalSelect(Instruction* instr) {
+ uint64_t new_val = xreg(instr->Rn());
+
+ if (ConditionFailed(static_cast<Condition>(instr->Condition()))) {
+ new_val = xreg(instr->Rm());
+ switch (instr->Mask(ConditionalSelectMask)) {
+ case CSEL_w:
+ case CSEL_x: break;
+ case CSINC_w:
+ case CSINC_x: new_val++; break;
+ case CSINV_w:
+ case CSINV_x: new_val = ~new_val; break;
+ case CSNEG_w:
+ case CSNEG_x: new_val = -new_val; break;
+ default: UNIMPLEMENTED();
+ }
+ }
+ unsigned reg_size = instr->SixtyFourBits() ? kXRegSize : kWRegSize;
+ set_reg(reg_size, instr->Rd(), new_val);
+}
+
+
+void Simulator::VisitDataProcessing1Source(Instruction* instr) {
+ unsigned dst = instr->Rd();
+ unsigned src = instr->Rn();
+
+ switch (instr->Mask(DataProcessing1SourceMask)) {
+ case RBIT_w: set_wreg(dst, ReverseBits(wreg(src), kWRegSize)); break;
+ case RBIT_x: set_xreg(dst, ReverseBits(xreg(src), kXRegSize)); break;
+ case REV16_w: set_wreg(dst, ReverseBytes(wreg(src), Reverse16)); break;
+ case REV16_x: set_xreg(dst, ReverseBytes(xreg(src), Reverse16)); break;
+ case REV_w: set_wreg(dst, ReverseBytes(wreg(src), Reverse32)); break;
+ case REV32_x: set_xreg(dst, ReverseBytes(xreg(src), Reverse32)); break;
+ case REV_x: set_xreg(dst, ReverseBytes(xreg(src), Reverse64)); break;
+ case CLZ_w: set_wreg(dst, CountLeadingZeros(wreg(src), kWRegSize)); break;
+ case CLZ_x: set_xreg(dst, CountLeadingZeros(xreg(src), kXRegSize)); break;
+ case CLS_w: {
+ set_wreg(dst, CountLeadingSignBits(wreg(src), kWRegSize));
+ break;
+ }
+ case CLS_x: {
+ set_xreg(dst, CountLeadingSignBits(xreg(src), kXRegSize));
+ break;
+ }
+ default: UNIMPLEMENTED();
+ }
+}
+
+
+uint64_t Simulator::ReverseBits(uint64_t value, unsigned num_bits) {
+ ASSERT((num_bits == kWRegSize) || (num_bits == kXRegSize));
+ uint64_t result = 0;
+ for (unsigned i = 0; i < num_bits; i++) {
+ result = (result << 1) | (value & 1);
+ value >>= 1;
+ }
+ return result;
+}
+
+
+uint64_t Simulator::ReverseBytes(uint64_t value, ReverseByteMode mode) {
+ // Split the 64-bit value into an 8-bit array, where b[0] is the least
+ // significant byte, and b[7] is the most significant.
+ uint8_t bytes[8];
+ uint64_t mask = 0xff00000000000000UL;
+ for (int i = 7; i >= 0; i--) {
+ bytes[i] = (value & mask) >> (i * 8);
+ mask >>= 8;
+ }
+
+ // Permutation tables for REV instructions.
+ // permute_table[Reverse16] is used by REV16_x, REV16_w
+ // permute_table[Reverse32] is used by REV32_x, REV_w
+ // permute_table[Reverse64] is used by REV_x
+ ASSERT((Reverse16 == 0) && (Reverse32 == 1) && (Reverse64 == 2));
+ static const uint8_t permute_table[3][8] = { {6, 7, 4, 5, 2, 3, 0, 1},
+ {4, 5, 6, 7, 0, 1, 2, 3},
+ {0, 1, 2, 3, 4, 5, 6, 7} };
+ uint64_t result = 0;
+ for (int i = 0; i < 8; i++) {
+ result <<= 8;
+ result |= bytes[permute_table[mode][i]];
+ }
+ return result;
+}
+
+
+void Simulator::VisitDataProcessing2Source(Instruction* instr) {
+ // TODO(mcapewel) move these to a higher level file, as they are global
+ // assumptions.
+ ASSERT((static_cast<int32_t>(-1) >> 1) == -1);
+ ASSERT((static_cast<uint32_t>(-1) >> 1) == 0x7FFFFFFF);
+
+ Shift shift_op = NO_SHIFT;
+ int64_t result = 0;
+ switch (instr->Mask(DataProcessing2SourceMask)) {
+ case SDIV_w: {
+ int32_t rn = wreg(instr->Rn());
+ int32_t rm = wreg(instr->Rm());
+ if ((rn == kWMinInt) && (rm == -1)) {
+ result = kWMinInt;
+ } else if (rm == 0) {
+ // Division by zero can be trapped, but not on A-class processors.
+ result = 0;
+ } else {
+ result = rn / rm;
+ }
+ break;
+ }
+ case SDIV_x: {
+ int64_t rn = xreg(instr->Rn());
+ int64_t rm = xreg(instr->Rm());
+ if ((rn == kXMinInt) && (rm == -1)) {
+ result = kXMinInt;
+ } else if (rm == 0) {
+ // Division by zero can be trapped, but not on A-class processors.
+ result = 0;
+ } else {
+ result = rn / rm;
+ }
+ break;
+ }
+ case UDIV_w: {
+ uint32_t rn = static_cast<uint32_t>(wreg(instr->Rn()));
+ uint32_t rm = static_cast<uint32_t>(wreg(instr->Rm()));
+ if (rm == 0) {
+ // Division by zero can be trapped, but not on A-class processors.
+ result = 0;
+ } else {
+ result = rn / rm;
+ }
+ break;
+ }
+ case UDIV_x: {
+ uint64_t rn = static_cast<uint64_t>(xreg(instr->Rn()));
+ uint64_t rm = static_cast<uint64_t>(xreg(instr->Rm()));
+ if (rm == 0) {
+ // Division by zero can be trapped, but not on A-class processors.
+ result = 0;
+ } else {
+ result = rn / rm;
+ }
+ break;
+ }
+ case LSLV_w:
+ case LSLV_x: shift_op = LSL; break;
+ case LSRV_w:
+ case LSRV_x: shift_op = LSR; break;
+ case ASRV_w:
+ case ASRV_x: shift_op = ASR; break;
+ case RORV_w:
+ case RORV_x: shift_op = ROR; break;
+ default: UNIMPLEMENTED();
+ }
+
+ unsigned reg_size = instr->SixtyFourBits() ? kXRegSize : kWRegSize;
+ if (shift_op != NO_SHIFT) {
+ // Shift distance encoded in the least-significant five/six bits of the
+ // register.
+ int mask = (instr->SixtyFourBits() == 1) ? 0x3f : 0x1f;
+ unsigned shift = wreg(instr->Rm()) & mask;
+ result = ShiftOperand(reg_size, reg(reg_size, instr->Rn()), shift_op,
+ shift);
+ }
+ set_reg(reg_size, instr->Rd(), result);
+}
+
+
+// The algorithm used is described in section 8.2 of
+// Hacker's Delight, by Henry S. Warren, Jr.
+// It assumes that a right shift on a signed integer is an arithmetic shift.
+static int64_t MultiplyHighSigned(int64_t u, int64_t v) {
+ uint64_t u0, v0, w0;
+ int64_t u1, v1, w1, w2, t;
+
+ u0 = u & 0xffffffffL;
+ u1 = u >> 32;
+ v0 = v & 0xffffffffL;
+ v1 = v >> 32;
+
+ w0 = u0 * v0;
+ t = u1 * v0 + (w0 >> 32);
+ w1 = t & 0xffffffffL;
+ w2 = t >> 32;
+ w1 = u0 * v1 + w1;
+
+ return u1 * v1 + w2 + (w1 >> 32);
+}
+
+
+void Simulator::VisitDataProcessing3Source(Instruction* instr) {
+ unsigned reg_size = instr->SixtyFourBits() ? kXRegSize : kWRegSize;
+
+ int64_t result = 0;
+ // Extract and sign- or zero-extend 32-bit arguments for widening operations.
+ uint64_t rn_u32 = reg<uint32_t>(instr->Rn());
+ uint64_t rm_u32 = reg<uint32_t>(instr->Rm());
+ int64_t rn_s32 = reg<int32_t>(instr->Rn());
+ int64_t rm_s32 = reg<int32_t>(instr->Rm());
+ switch (instr->Mask(DataProcessing3SourceMask)) {
+ case MADD_w:
+ case MADD_x:
+ result = xreg(instr->Ra()) + (xreg(instr->Rn()) * xreg(instr->Rm()));
+ break;
+ case MSUB_w:
+ case MSUB_x:
+ result = xreg(instr->Ra()) - (xreg(instr->Rn()) * xreg(instr->Rm()));
+ break;
+ case SMADDL_x: result = xreg(instr->Ra()) + (rn_s32 * rm_s32); break;
+ case SMSUBL_x: result = xreg(instr->Ra()) - (rn_s32 * rm_s32); break;
+ case UMADDL_x: result = xreg(instr->Ra()) + (rn_u32 * rm_u32); break;
+ case UMSUBL_x: result = xreg(instr->Ra()) - (rn_u32 * rm_u32); break;
+ case SMULH_x:
+ ASSERT(instr->Ra() == kZeroRegCode);
+ result = MultiplyHighSigned(xreg(instr->Rn()), xreg(instr->Rm()));
+ break;
+ default: UNIMPLEMENTED();
+ }
+ set_reg(reg_size, instr->Rd(), result);
+}
+
+
+void Simulator::VisitBitfield(Instruction* instr) {
+ unsigned reg_size = instr->SixtyFourBits() ? kXRegSize : kWRegSize;
+ int64_t reg_mask = instr->SixtyFourBits() ? kXRegMask : kWRegMask;
+ int64_t R = instr->ImmR();
+ int64_t S = instr->ImmS();
+ int64_t diff = S - R;
+ int64_t mask;
+ if (diff >= 0) {
+ mask = diff < reg_size - 1 ? (1L << (diff + 1)) - 1
+ : reg_mask;
+ } else {
+ mask = ((1L << (S + 1)) - 1);
+ mask = (static_cast<uint64_t>(mask) >> R) | (mask << (reg_size - R));
+ diff += reg_size;
+ }
+
+ // inzero indicates if the extracted bitfield is inserted into the
+ // destination register value or in zero.
+ // If extend is true, extend the sign of the extracted bitfield.
+ bool inzero = false;
+ bool extend = false;
+ switch (instr->Mask(BitfieldMask)) {
+ case BFM_x:
+ case BFM_w:
+ break;
+ case SBFM_x:
+ case SBFM_w:
+ inzero = true;
+ extend = true;
+ break;
+ case UBFM_x:
+ case UBFM_w:
+ inzero = true;
+ break;
+ default:
+ UNIMPLEMENTED();
+ }
+
+ int64_t dst = inzero ? 0 : reg(reg_size, instr->Rd());
+ int64_t src = reg(reg_size, instr->Rn());
+ // Rotate source bitfield into place.
+ int64_t result = (static_cast<uint64_t>(src) >> R) | (src << (reg_size - R));
+ // Determine the sign extension.
+ int64_t topbits = ((1L << (reg_size - diff - 1)) - 1) << (diff + 1);
+ int64_t signbits = extend && ((src >> S) & 1) ? topbits : 0;
+
+ // Merge sign extension, dest/zero and bitfield.
+ result = signbits | (result & mask) | (dst & ~mask);
+
+ set_reg(reg_size, instr->Rd(), result);
+}
+
+
+void Simulator::VisitExtract(Instruction* instr) {
+ unsigned lsb = instr->ImmS();
+ unsigned reg_size = (instr->SixtyFourBits() == 1) ? kXRegSize
+ : kWRegSize;
+ set_reg(reg_size,
+ instr->Rd(),
+ (static_cast<uint64_t>(reg(reg_size, instr->Rm())) >> lsb) |
+ (reg(reg_size, instr->Rn()) << (reg_size - lsb)));
+}
+
+
+void Simulator::VisitFPImmediate(Instruction* instr) {
+ AssertSupportedFPCR();
+
+ unsigned dest = instr->Rd();
+ switch (instr->Mask(FPImmediateMask)) {
+ case FMOV_s_imm: set_sreg(dest, instr->ImmFP32()); break;
+ case FMOV_d_imm: set_dreg(dest, instr->ImmFP64()); break;
+ default: UNREACHABLE();
+ }
+}
+
+
+void Simulator::VisitFPIntegerConvert(Instruction* instr) {
+ AssertSupportedFPCR();
+
+ unsigned dst = instr->Rd();
+ unsigned src = instr->Rn();
+
+ FPRounding round = RMode();
+
+ switch (instr->Mask(FPIntegerConvertMask)) {
+ case FCVTAS_ws: set_wreg(dst, FPToInt32(sreg(src), FPTieAway)); break;
+ case FCVTAS_xs: set_xreg(dst, FPToInt64(sreg(src), FPTieAway)); break;
+ case FCVTAS_wd: set_wreg(dst, FPToInt32(dreg(src), FPTieAway)); break;
+ case FCVTAS_xd: set_xreg(dst, FPToInt64(dreg(src), FPTieAway)); break;
+ case FCVTAU_ws: set_wreg(dst, FPToUInt32(sreg(src), FPTieAway)); break;
+ case FCVTAU_xs: set_xreg(dst, FPToUInt64(sreg(src), FPTieAway)); break;
+ case FCVTAU_wd: set_wreg(dst, FPToUInt32(dreg(src), FPTieAway)); break;
+ case FCVTAU_xd: set_xreg(dst, FPToUInt64(dreg(src), FPTieAway)); break;
+ case FCVTMS_ws:
+ set_wreg(dst, FPToInt32(sreg(src), FPNegativeInfinity));
+ break;
+ case FCVTMS_xs:
+ set_xreg(dst, FPToInt64(sreg(src), FPNegativeInfinity));
+ break;
+ case FCVTMS_wd:
+ set_wreg(dst, FPToInt32(dreg(src), FPNegativeInfinity));
+ break;
+ case FCVTMS_xd:
+ set_xreg(dst, FPToInt64(dreg(src), FPNegativeInfinity));
+ break;
+ case FCVTMU_ws:
+ set_wreg(dst, FPToUInt32(sreg(src), FPNegativeInfinity));
+ break;
+ case FCVTMU_xs:
+ set_xreg(dst, FPToUInt64(sreg(src), FPNegativeInfinity));
+ break;
+ case FCVTMU_wd:
+ set_wreg(dst, FPToUInt32(dreg(src), FPNegativeInfinity));
+ break;
+ case FCVTMU_xd:
+ set_xreg(dst, FPToUInt64(dreg(src), FPNegativeInfinity));
+ break;
+ case FCVTNS_ws: set_wreg(dst, FPToInt32(sreg(src), FPTieEven)); break;
+ case FCVTNS_xs: set_xreg(dst, FPToInt64(sreg(src), FPTieEven)); break;
+ case FCVTNS_wd: set_wreg(dst, FPToInt32(dreg(src), FPTieEven)); break;
+ case FCVTNS_xd: set_xreg(dst, FPToInt64(dreg(src), FPTieEven)); break;
+ case FCVTNU_ws: set_wreg(dst, FPToUInt32(sreg(src), FPTieEven)); break;
+ case FCVTNU_xs: set_xreg(dst, FPToUInt64(sreg(src), FPTieEven)); break;
+ case FCVTNU_wd: set_wreg(dst, FPToUInt32(dreg(src), FPTieEven)); break;
+ case FCVTNU_xd: set_xreg(dst, FPToUInt64(dreg(src), FPTieEven)); break;
+ case FCVTZS_ws: set_wreg(dst, FPToInt32(sreg(src), FPZero)); break;
+ case FCVTZS_xs: set_xreg(dst, FPToInt64(sreg(src), FPZero)); break;
+ case FCVTZS_wd: set_wreg(dst, FPToInt32(dreg(src), FPZero)); break;
+ case FCVTZS_xd: set_xreg(dst, FPToInt64(dreg(src), FPZero)); break;
+ case FCVTZU_ws: set_wreg(dst, FPToUInt32(sreg(src), FPZero)); break;
+ case FCVTZU_xs: set_xreg(dst, FPToUInt64(sreg(src), FPZero)); break;
+ case FCVTZU_wd: set_wreg(dst, FPToUInt32(dreg(src), FPZero)); break;
+ case FCVTZU_xd: set_xreg(dst, FPToUInt64(dreg(src), FPZero)); break;
+ case FMOV_ws: set_wreg(dst, sreg_bits(src)); break;
+ case FMOV_xd: set_xreg(dst, dreg_bits(src)); break;
+ case FMOV_sw: set_sreg_bits(dst, wreg(src)); break;
+ case FMOV_dx: set_dreg_bits(dst, xreg(src)); break;
+
+ // A 32-bit input can be handled in the same way as a 64-bit input, since
+ // the sign- or zero-extension will not affect the conversion.
+ case SCVTF_dx: set_dreg(dst, FixedToDouble(xreg(src), 0, round)); break;
+ case SCVTF_dw: set_dreg(dst, FixedToDouble(wreg(src), 0, round)); break;
+ case UCVTF_dx: set_dreg(dst, UFixedToDouble(xreg(src), 0, round)); break;
+ case UCVTF_dw: {
+ set_dreg(dst, UFixedToDouble(reg<uint32_t>(src), 0, round));
+ break;
+ }
+ case SCVTF_sx: set_sreg(dst, FixedToFloat(xreg(src), 0, round)); break;
+ case SCVTF_sw: set_sreg(dst, FixedToFloat(wreg(src), 0, round)); break;
+ case UCVTF_sx: set_sreg(dst, UFixedToFloat(xreg(src), 0, round)); break;
+ case UCVTF_sw: {
+ set_sreg(dst, UFixedToFloat(reg<uint32_t>(src), 0, round));
+ break;
+ }
+
+ default: UNREACHABLE();
+ }
+}
+
+
+void Simulator::VisitFPFixedPointConvert(Instruction* instr) {
+ AssertSupportedFPCR();
+
+ unsigned dst = instr->Rd();
+ unsigned src = instr->Rn();
+ int fbits = 64 - instr->FPScale();
+
+ FPRounding round = RMode();
+
+ switch (instr->Mask(FPFixedPointConvertMask)) {
+ // A 32-bit input can be handled in the same way as a 64-bit input, since
+ // the sign- or zero-extension will not affect the conversion.
+ case SCVTF_dx_fixed:
+ set_dreg(dst, FixedToDouble(xreg(src), fbits, round));
+ break;
+ case SCVTF_dw_fixed:
+ set_dreg(dst, FixedToDouble(wreg(src), fbits, round));
+ break;
+ case UCVTF_dx_fixed:
+ set_dreg(dst, UFixedToDouble(xreg(src), fbits, round));
+ break;
+ case UCVTF_dw_fixed: {
+ set_dreg(dst,
+ UFixedToDouble(reg<uint32_t>(src), fbits, round));
+ break;
+ }
+ case SCVTF_sx_fixed:
+ set_sreg(dst, FixedToFloat(xreg(src), fbits, round));
+ break;
+ case SCVTF_sw_fixed:
+ set_sreg(dst, FixedToFloat(wreg(src), fbits, round));
+ break;
+ case UCVTF_sx_fixed:
+ set_sreg(dst, UFixedToFloat(xreg(src), fbits, round));
+ break;
+ case UCVTF_sw_fixed: {
+ set_sreg(dst,
+ UFixedToFloat(reg<uint32_t>(src), fbits, round));
+ break;
+ }
+ default: UNREACHABLE();
+ }
+}
+
+
+int32_t Simulator::FPToInt32(double value, FPRounding rmode) {
+ value = FPRoundInt(value, rmode);
+ if (value >= kWMaxInt) {
+ return kWMaxInt;
+ } else if (value < kWMinInt) {
+ return kWMinInt;
+ }
+ return std::isnan(value) ? 0 : static_cast<int32_t>(value);
+}
+
+
+int64_t Simulator::FPToInt64(double value, FPRounding rmode) {
+ value = FPRoundInt(value, rmode);
+ if (value >= kXMaxInt) {
+ return kXMaxInt;
+ } else if (value < kXMinInt) {
+ return kXMinInt;
+ }
+ return std::isnan(value) ? 0 : static_cast<int64_t>(value);
+}
+
+
+uint32_t Simulator::FPToUInt32(double value, FPRounding rmode) {
+ value = FPRoundInt(value, rmode);
+ if (value >= kWMaxUInt) {
+ return kWMaxUInt;
+ } else if (value < 0.0) {
+ return 0;
+ }
+ return std::isnan(value) ? 0 : static_cast<uint32_t>(value);
+}
+
+
+uint64_t Simulator::FPToUInt64(double value, FPRounding rmode) {
+ value = FPRoundInt(value, rmode);
+ if (value >= kXMaxUInt) {
+ return kXMaxUInt;
+ } else if (value < 0.0) {
+ return 0;
+ }
+ return std::isnan(value) ? 0 : static_cast<uint64_t>(value);
+}
+
+
+void Simulator::VisitFPCompare(Instruction* instr) {
+ AssertSupportedFPCR();
+
+ unsigned reg_size = instr->FPType() == FP32 ? kSRegSize : kDRegSize;
+ double fn_val = fpreg(reg_size, instr->Rn());
+
+ switch (instr->Mask(FPCompareMask)) {
+ case FCMP_s:
+ case FCMP_d: FPCompare(fn_val, fpreg(reg_size, instr->Rm())); break;
+ case FCMP_s_zero:
+ case FCMP_d_zero: FPCompare(fn_val, 0.0); break;
+ default: UNIMPLEMENTED();
+ }
+}
+
+
+void Simulator::VisitFPConditionalCompare(Instruction* instr) {
+ AssertSupportedFPCR();
+
+ switch (instr->Mask(FPConditionalCompareMask)) {
+ case FCCMP_s:
+ case FCCMP_d: {
+ if (ConditionPassed(static_cast<Condition>(instr->Condition()))) {
+ // If the condition passes, set the status flags to the result of
+ // comparing the operands.
+ unsigned reg_size = instr->FPType() == FP32 ? kSRegSize : kDRegSize;
+ FPCompare(fpreg(reg_size, instr->Rn()), fpreg(reg_size, instr->Rm()));
+ } else {
+ // If the condition fails, set the status flags to the nzcv immediate.
+ nzcv().SetFlags(instr->Nzcv());
+ }
+ break;
+ }
+ default: UNIMPLEMENTED();
+ }
+}
+
+
+void Simulator::VisitFPConditionalSelect(Instruction* instr) {
+ AssertSupportedFPCR();
+
+ Instr selected;
+ if (ConditionPassed(static_cast<Condition>(instr->Condition()))) {
+ selected = instr->Rn();
+ } else {
+ selected = instr->Rm();
+ }
+
+ switch (instr->Mask(FPConditionalSelectMask)) {
+ case FCSEL_s: set_sreg(instr->Rd(), sreg(selected)); break;
+ case FCSEL_d: set_dreg(instr->Rd(), dreg(selected)); break;
+ default: UNIMPLEMENTED();
+ }
+}
+
+
+void Simulator::VisitFPDataProcessing1Source(Instruction* instr) {
+ AssertSupportedFPCR();
+
+ unsigned fd = instr->Rd();
+ unsigned fn = instr->Rn();
+
+ switch (instr->Mask(FPDataProcessing1SourceMask)) {
+ case FMOV_s: set_sreg(fd, sreg(fn)); break;
+ case FMOV_d: set_dreg(fd, dreg(fn)); break;
+ case FABS_s: set_sreg(fd, std::fabs(sreg(fn))); break;
+ case FABS_d: set_dreg(fd, std::fabs(dreg(fn))); break;
+ case FNEG_s: set_sreg(fd, -sreg(fn)); break;
+ case FNEG_d: set_dreg(fd, -dreg(fn)); break;
+ case FSQRT_s: set_sreg(fd, std::sqrt(sreg(fn))); break;
+ case FSQRT_d: set_dreg(fd, std::sqrt(dreg(fn))); break;
+ case FRINTA_s: set_sreg(fd, FPRoundInt(sreg(fn), FPTieAway)); break;
+ case FRINTA_d: set_dreg(fd, FPRoundInt(dreg(fn), FPTieAway)); break;
+ case FRINTN_s: set_sreg(fd, FPRoundInt(sreg(fn), FPTieEven)); break;
+ case FRINTN_d: set_dreg(fd, FPRoundInt(dreg(fn), FPTieEven)); break;
+ case FRINTZ_s: set_sreg(fd, FPRoundInt(sreg(fn), FPZero)); break;
+ case FRINTZ_d: set_dreg(fd, FPRoundInt(dreg(fn), FPZero)); break;
+ case FCVT_ds: set_dreg(fd, FPToDouble(sreg(fn))); break;
+ case FCVT_sd: set_sreg(fd, FPToFloat(dreg(fn), FPTieEven)); break;
+ default: UNIMPLEMENTED();
+ }
+}
+
+
+// Assemble the specified IEEE-754 components into the target type and apply
+// appropriate rounding.
+// sign: 0 = positive, 1 = negative
+// exponent: Unbiased IEEE-754 exponent.
+// mantissa: The mantissa of the input. The top bit (which is not encoded for
+// normal IEEE-754 values) must not be omitted. This bit has the
+// value 'pow(2, exponent)'.
+//
+// The input value is assumed to be a normalized value. That is, the input may
+// not be infinity or NaN. If the source value is subnormal, it must be
+// normalized before calling this function such that the highest set bit in the
+// mantissa has the value 'pow(2, exponent)'.
+//
+// Callers should use FPRoundToFloat or FPRoundToDouble directly, rather than
+// calling a templated FPRound.
+template <class T, int ebits, int mbits>
+static T FPRound(int64_t sign, int64_t exponent, uint64_t mantissa,
+ FPRounding round_mode) {
+ ASSERT((sign == 0) || (sign == 1));
+
+ // Only the FPTieEven rounding mode is implemented.
+ ASSERT(round_mode == FPTieEven);
+ USE(round_mode);
+
+ // Rounding can promote subnormals to normals, and normals to infinities. For
+ // example, a double with exponent 127 (FLT_MAX_EXP) would appear to be
+ // encodable as a float, but rounding based on the low-order mantissa bits
+ // could make it overflow. With ties-to-even rounding, this value would become
+ // an infinity.
+
+ // ---- Rounding Method ----
+ //
+ // The exponent is irrelevant in the rounding operation, so we treat the
+ // lowest-order bit that will fit into the result ('onebit') as having
+ // the value '1'. Similarly, the highest-order bit that won't fit into
+ // the result ('halfbit') has the value '0.5'. The 'point' sits between
+ // 'onebit' and 'halfbit':
+ //
+ // These bits fit into the result.
+ // |---------------------|
+ // mantissa = 0bxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
+ // ||
+ // / |
+ // / halfbit
+ // onebit
+ //
+ // For subnormal outputs, the range of representable bits is smaller and
+ // the position of onebit and halfbit depends on the exponent of the
+ // input, but the method is otherwise similar.
+ //
+ // onebit(frac)
+ // |
+ // | halfbit(frac) halfbit(adjusted)
+ // | / /
+ // | | |
+ // 0b00.0 (exact) -> 0b00.0 (exact) -> 0b00
+ // 0b00.0... -> 0b00.0... -> 0b00
+ // 0b00.1 (exact) -> 0b00.0111..111 -> 0b00
+ // 0b00.1... -> 0b00.1... -> 0b01
+ // 0b01.0 (exact) -> 0b01.0 (exact) -> 0b01
+ // 0b01.0... -> 0b01.0... -> 0b01
+ // 0b01.1 (exact) -> 0b01.1 (exact) -> 0b10
+ // 0b01.1... -> 0b01.1... -> 0b10
+ // 0b10.0 (exact) -> 0b10.0 (exact) -> 0b10
+ // 0b10.0... -> 0b10.0... -> 0b10
+ // 0b10.1 (exact) -> 0b10.0111..111 -> 0b10
+ // 0b10.1... -> 0b10.1... -> 0b11
+ // 0b11.0 (exact) -> 0b11.0 (exact) -> 0b11
+ // ... / | / |
+ // / | / |
+ // / |
+ // adjusted = frac - (halfbit(mantissa) & ~onebit(frac)); / |
+ //
+ // mantissa = (mantissa >> shift) + halfbit(adjusted);
+
+ static const int mantissa_offset = 0;
+ static const int exponent_offset = mantissa_offset + mbits;
+ static const int sign_offset = exponent_offset + ebits;
+ STATIC_ASSERT(sign_offset == (sizeof(T) * kByteSize - 1));
+
+ // Bail out early for zero inputs.
+ if (mantissa == 0) {
+ return sign << sign_offset;
+ }
+
+ // If all bits in the exponent are set, the value is infinite or NaN.
+ // This is true for all binary IEEE-754 formats.
+ static const int infinite_exponent = (1 << ebits) - 1;
+ static const int max_normal_exponent = infinite_exponent - 1;
+
+ // Apply the exponent bias to encode it for the result. Doing this early makes
+ // it easy to detect values that will be infinite or subnormal.
+ exponent += max_normal_exponent >> 1;
+
+ if (exponent > max_normal_exponent) {
+ // Overflow: The input is too large for the result type to represent. The
+ // FPTieEven rounding mode handles overflows using infinities.
+ exponent = infinite_exponent;
+ mantissa = 0;
+ return (sign << sign_offset) |
+ (exponent << exponent_offset) |
+ (mantissa << mantissa_offset);
+ }
+
+ // Calculate the shift required to move the top mantissa bit to the proper
+ // place in the destination type.
+ const int highest_significant_bit = 63 - CountLeadingZeros(mantissa, 64);
+ int shift = highest_significant_bit - mbits;
+
+ if (exponent <= 0) {
+ // The output will be subnormal (before rounding).
+
+ // For subnormal outputs, the shift must be adjusted by the exponent. The +1
+ // is necessary because the exponent of a subnormal value (encoded as 0) is
+ // the same as the exponent of the smallest normal value (encoded as 1).
+ shift += -exponent + 1;
+
+ // Handle inputs that would produce a zero output.
+ //
+ // Shifts higher than highest_significant_bit+1 will always produce a zero
+ // result. A shift of exactly highest_significant_bit+1 might produce a
+ // non-zero result after rounding.
+ if (shift > (highest_significant_bit + 1)) {
+ // The result will always be +/-0.0.
+ return sign << sign_offset;
+ }
+
+ // Properly encode the exponent for a subnormal output.
+ exponent = 0;
+ } else {
+ // Clear the topmost mantissa bit, since this is not encoded in IEEE-754
+ // normal values.
+ mantissa &= ~(1UL << highest_significant_bit);
+ }
+
+ if (shift > 0) {
+ // We have to shift the mantissa to the right. Some precision is lost, so we
+ // need to apply rounding.
+ uint64_t onebit_mantissa = (mantissa >> (shift)) & 1;
+ uint64_t halfbit_mantissa = (mantissa >> (shift-1)) & 1;
+ uint64_t adjusted = mantissa - (halfbit_mantissa & ~onebit_mantissa);
+ T halfbit_adjusted = (adjusted >> (shift-1)) & 1;
+
+ T result = (sign << sign_offset) |
+ (exponent << exponent_offset) |
+ ((mantissa >> shift) << mantissa_offset);
+
+ // A very large mantissa can overflow during rounding. If this happens, the
+ // exponent should be incremented and the mantissa set to 1.0 (encoded as
+ // 0). Applying halfbit_adjusted after assembling the float has the nice
+ // side-effect that this case is handled for free.
+ //
+ // This also handles cases where a very large finite value overflows to
+ // infinity, or where a very large subnormal value overflows to become
+ // normal.
+ return result + halfbit_adjusted;
+ } else {
+ // We have to shift the mantissa to the left (or not at all). The input
+ // mantissa is exactly representable in the output mantissa, so apply no
+ // rounding correction.
+ return (sign << sign_offset) |
+ (exponent << exponent_offset) |
+ ((mantissa << -shift) << mantissa_offset);
+ }
+}
+
+
+// See FPRound for a description of this function.
+static inline double FPRoundToDouble(int64_t sign, int64_t exponent,
+ uint64_t mantissa, FPRounding round_mode) {
+ int64_t bits =
+ FPRound<int64_t, kDoubleExponentBits, kDoubleMantissaBits>(sign,
+ exponent,
+ mantissa,
+ round_mode);
+ return rawbits_to_double(bits);
+}
+
+
+// See FPRound for a description of this function.
+static inline float FPRoundToFloat(int64_t sign, int64_t exponent,
+ uint64_t mantissa, FPRounding round_mode) {
+ int32_t bits =
+ FPRound<int32_t, kFloatExponentBits, kFloatMantissaBits>(sign,
+ exponent,
+ mantissa,
+ round_mode);
+ return rawbits_to_float(bits);
+}
+
+
+double Simulator::FixedToDouble(int64_t src, int fbits, FPRounding round) {
+ if (src >= 0) {
+ return UFixedToDouble(src, fbits, round);
+ } else {
+ // This works for all negative values, including INT64_MIN.
+ return -UFixedToDouble(-src, fbits, round);
+ }
+}
+
+
+double Simulator::UFixedToDouble(uint64_t src, int fbits, FPRounding round) {
+ // An input of 0 is a special case because the result is effectively
+ // subnormal: The exponent is encoded as 0 and there is no implicit 1 bit.
+ if (src == 0) {
+ return 0.0;
+ }
+
+ // Calculate the exponent. The highest significant bit will have the value
+ // 2^exponent.
+ const int highest_significant_bit = 63 - CountLeadingZeros(src, 64);
+ const int64_t exponent = highest_significant_bit - fbits;
+
+ return FPRoundToDouble(0, exponent, src, round);
+}
+
+
+float Simulator::FixedToFloat(int64_t src, int fbits, FPRounding round) {
+ if (src >= 0) {
+ return UFixedToFloat(src, fbits, round);
+ } else {
+ // This works for all negative values, including INT64_MIN.
+ return -UFixedToFloat(-src, fbits, round);
+ }
+}
+
+
+float Simulator::UFixedToFloat(uint64_t src, int fbits, FPRounding round) {
+ // An input of 0 is a special case because the result is effectively
+ // subnormal: The exponent is encoded as 0 and there is no implicit 1 bit.
+ if (src == 0) {
+ return 0.0f;
+ }
+
+ // Calculate the exponent. The highest significant bit will have the value
+ // 2^exponent.
+ const int highest_significant_bit = 63 - CountLeadingZeros(src, 64);
+ const int32_t exponent = highest_significant_bit - fbits;
+
+ return FPRoundToFloat(0, exponent, src, round);
+}
+
+
+double Simulator::FPRoundInt(double value, FPRounding round_mode) {
+ if ((value == 0.0) || (value == kFP64PositiveInfinity) ||
+ (value == kFP64NegativeInfinity) || std::isnan(value)) {
+ return value;
+ }
+
+ double int_result = floor(value);
+ double error = value - int_result;
+ switch (round_mode) {
+ case FPTieAway: {
+ // If the error is greater than 0.5, or is equal to 0.5 and the integer
+ // result is positive, round up.
+ if ((error > 0.5) || ((error == 0.5) && (int_result >= 0.0))) {
+ int_result++;
+ }
+ break;
+ }
+ case FPTieEven: {
+ // If the error is greater than 0.5, or is equal to 0.5 and the integer
+ // result is odd, round up.
+ if ((error > 0.5) ||
+ ((error == 0.5) && (fmod(int_result, 2) != 0))) {
+ int_result++;
+ }
+ break;
+ }
+ case FPZero: {
+ // If value > 0 then we take floor(value)
+ // otherwise, ceil(value)
+ if (value < 0) {
+ int_result = ceil(value);
+ }
+ break;
+ }
+ case FPNegativeInfinity: {
+ // We always use floor(value).
+ break;
+ }
+ default: UNIMPLEMENTED();
+ }
+ return int_result;
+}
+
+
+double Simulator::FPToDouble(float value) {
+ switch (std::fpclassify(value)) {
+ case FP_NAN: {
+ // Convert NaNs as the processor would, assuming that FPCR.DN (default
+ // NaN) is not set:
+ // - The sign is propagated.
+ // - The payload (mantissa) is transferred entirely, except that the top
+ // bit is forced to '1', making the result a quiet NaN. The unused
+ // (low-order) payload bits are set to 0.
+ uint32_t raw = float_to_rawbits(value);
+
+ uint64_t sign = raw >> 31;
+ uint64_t exponent = (1 << 11) - 1;
+ uint64_t payload = unsigned_bitextract_64(21, 0, raw);
+ payload <<= (52 - 23); // The unused low-order bits should be 0.
+ payload |= (1L << 51); // Force a quiet NaN.
+
+ return rawbits_to_double((sign << 63) | (exponent << 52) | payload);
+ }
+
+ case FP_ZERO:
+ case FP_NORMAL:
+ case FP_SUBNORMAL:
+ case FP_INFINITE: {
+ // All other inputs are preserved in a standard cast, because every value
+ // representable using an IEEE-754 float is also representable using an
+ // IEEE-754 double.
+ return static_cast<double>(value);
+ }
+ }
+
+ UNREACHABLE();
+ return static_cast<double>(value);
+}
+
+
+float Simulator::FPToFloat(double value, FPRounding round_mode) {
+ // Only the FPTieEven rounding mode is implemented.
+ ASSERT(round_mode == FPTieEven);
+ USE(round_mode);
+
+ switch (std::fpclassify(value)) {
+ case FP_NAN: {
+ // Convert NaNs as the processor would, assuming that FPCR.DN (default
+ // NaN) is not set:
+ // - The sign is propagated.
+ // - The payload (mantissa) is transferred as much as possible, except
+ // that the top bit is forced to '1', making the result a quiet NaN.
+ uint64_t raw = double_to_rawbits(value);
+
+ uint32_t sign = raw >> 63;
+ uint32_t exponent = (1 << 8) - 1;
+ uint32_t payload = unsigned_bitextract_64(50, 52 - 23, raw);
+ payload |= (1 << 22); // Force a quiet NaN.
+
+ return rawbits_to_float((sign << 31) | (exponent << 23) | payload);
+ }
+
+ case FP_ZERO:
+ case FP_INFINITE: {
+ // In a C++ cast, any value representable in the target type will be
+ // unchanged. This is always the case for +/-0.0 and infinities.
+ return static_cast<float>(value);
+ }
+
+ case FP_NORMAL:
+ case FP_SUBNORMAL: {
+ // Convert double-to-float as the processor would, assuming that FPCR.FZ
+ // (flush-to-zero) is not set.
+ uint64_t raw = double_to_rawbits(value);
+ // Extract the IEEE-754 double components.
+ uint32_t sign = raw >> 63;
+ // Extract the exponent and remove the IEEE-754 encoding bias.
+ int32_t exponent = unsigned_bitextract_64(62, 52, raw) - 1023;
+ // Extract the mantissa and add the implicit '1' bit.
+ uint64_t mantissa = unsigned_bitextract_64(51, 0, raw);
+ if (std::fpclassify(value) == FP_NORMAL) {
+ mantissa |= (1UL << 52);
+ }
+ return FPRoundToFloat(sign, exponent, mantissa, round_mode);
+ }
+ }
+
+ UNREACHABLE();
+ return value;
+}
+
+
+void Simulator::VisitFPDataProcessing2Source(Instruction* instr) {
+ AssertSupportedFPCR();
+
+ unsigned fd = instr->Rd();
+ unsigned fn = instr->Rn();
+ unsigned fm = instr->Rm();
+
+ switch (instr->Mask(FPDataProcessing2SourceMask)) {
+ case FADD_s: set_sreg(fd, sreg(fn) + sreg(fm)); break;
+ case FADD_d: set_dreg(fd, dreg(fn) + dreg(fm)); break;
+ case FSUB_s: set_sreg(fd, sreg(fn) - sreg(fm)); break;
+ case FSUB_d: set_dreg(fd, dreg(fn) - dreg(fm)); break;
+ case FMUL_s: set_sreg(fd, sreg(fn) * sreg(fm)); break;
+ case FMUL_d: set_dreg(fd, dreg(fn) * dreg(fm)); break;
+ case FDIV_s: set_sreg(fd, sreg(fn) / sreg(fm)); break;
+ case FDIV_d: set_dreg(fd, dreg(fn) / dreg(fm)); break;
+ case FMAX_s: set_sreg(fd, FPMax(sreg(fn), sreg(fm))); break;
+ case FMAX_d: set_dreg(fd, FPMax(dreg(fn), dreg(fm))); break;
+ case FMIN_s: set_sreg(fd, FPMin(sreg(fn), sreg(fm))); break;
+ case FMIN_d: set_dreg(fd, FPMin(dreg(fn), dreg(fm))); break;
+ case FMAXNM_s: set_sreg(fd, FPMaxNM(sreg(fn), sreg(fm))); break;
+ case FMAXNM_d: set_dreg(fd, FPMaxNM(dreg(fn), dreg(fm))); break;
+ case FMINNM_s: set_sreg(fd, FPMinNM(sreg(fn), sreg(fm))); break;
+ case FMINNM_d: set_dreg(fd, FPMinNM(dreg(fn), dreg(fm))); break;
+ default: UNIMPLEMENTED();
+ }
+}
+
+
+void Simulator::VisitFPDataProcessing3Source(Instruction* instr) {
+ AssertSupportedFPCR();
+
+ unsigned fd = instr->Rd();
+ unsigned fn = instr->Rn();
+ unsigned fm = instr->Rm();
+ unsigned fa = instr->Ra();
+
+ // The C99 (and C++11) fma function performs a fused multiply-accumulate.
+ switch (instr->Mask(FPDataProcessing3SourceMask)) {
+ // fd = fa +/- (fn * fm)
+ case FMADD_s: set_sreg(fd, fmaf(sreg(fn), sreg(fm), sreg(fa))); break;
+ case FMSUB_s: set_sreg(fd, fmaf(-sreg(fn), sreg(fm), sreg(fa))); break;
+ case FMADD_d: set_dreg(fd, fma(dreg(fn), dreg(fm), dreg(fa))); break;
+ case FMSUB_d: set_dreg(fd, fma(-dreg(fn), dreg(fm), dreg(fa))); break;
+ // Variants of the above where the result is negated.
+ case FNMADD_s: set_sreg(fd, -fmaf(sreg(fn), sreg(fm), sreg(fa))); break;
+ case FNMSUB_s: set_sreg(fd, -fmaf(-sreg(fn), sreg(fm), sreg(fa))); break;
+ case FNMADD_d: set_dreg(fd, -fma(dreg(fn), dreg(fm), dreg(fa))); break;
+ case FNMSUB_d: set_dreg(fd, -fma(-dreg(fn), dreg(fm), dreg(fa))); break;
+ default: UNIMPLEMENTED();
+ }
+}
+
+
+template <typename T>
+T Simulator::FPMax(T a, T b) {
+ if (IsSignallingNaN(a)) {
+ return a;
+ } else if (IsSignallingNaN(b)) {
+ return b;
+ } else if (std::isnan(a)) {
+ ASSERT(IsQuietNaN(a));
+ return a;
+ } else if (std::isnan(b)) {
+ ASSERT(IsQuietNaN(b));
+ return b;
+ }
+
+ if ((a == 0.0) && (b == 0.0) &&
+ (copysign(1.0, a) != copysign(1.0, b))) {
+ // a and b are zero, and the sign differs: return +0.0.
+ return 0.0;
+ } else {
+ return (a > b) ? a : b;
+ }
+}
+
+
+template <typename T>
+T Simulator::FPMaxNM(T a, T b) {
+ if (IsQuietNaN(a) && !IsQuietNaN(b)) {
+ a = kFP64NegativeInfinity;
+ } else if (!IsQuietNaN(a) && IsQuietNaN(b)) {
+ b = kFP64NegativeInfinity;
+ }
+ return FPMax(a, b);
+}
+
+template <typename T>
+T Simulator::FPMin(T a, T b) {
+ if (IsSignallingNaN(a)) {
+ return a;
+ } else if (IsSignallingNaN(b)) {
+ return b;
+ } else if (std::isnan(a)) {
+ ASSERT(IsQuietNaN(a));
+ return a;
+ } else if (std::isnan(b)) {
+ ASSERT(IsQuietNaN(b));
+ return b;
+ }
+
+ if ((a == 0.0) && (b == 0.0) &&
+ (copysign(1.0, a) != copysign(1.0, b))) {
+ // a and b are zero, and the sign differs: return -0.0.
+ return -0.0;
+ } else {
+ return (a < b) ? a : b;
+ }
+}
+
+
+template <typename T>
+T Simulator::FPMinNM(T a, T b) {
+ if (IsQuietNaN(a) && !IsQuietNaN(b)) {
+ a = kFP64PositiveInfinity;
+ } else if (!IsQuietNaN(a) && IsQuietNaN(b)) {
+ b = kFP64PositiveInfinity;
+ }
+ return FPMin(a, b);
+}
+
+
+void Simulator::VisitSystem(Instruction* instr) {
+ // Some system instructions hijack their Op and Cp fields to represent a
+ // range of immediates instead of indicating a different instruction. This
+ // makes the decoding tricky.
+ if (instr->Mask(SystemSysRegFMask) == SystemSysRegFixed) {
+ switch (instr->Mask(SystemSysRegMask)) {
+ case MRS: {
+ switch (instr->ImmSystemRegister()) {
+ case NZCV: set_xreg(instr->Rt(), nzcv().RawValue()); break;
+ case FPCR: set_xreg(instr->Rt(), fpcr().RawValue()); break;
+ default: UNIMPLEMENTED();
+ }
+ break;
+ }
+ case MSR: {
+ switch (instr->ImmSystemRegister()) {
+ case NZCV: nzcv().SetRawValue(xreg(instr->Rt())); break;
+ case FPCR: fpcr().SetRawValue(xreg(instr->Rt())); break;
+ default: UNIMPLEMENTED();
+ }
+ break;
+ }
+ }
+ } else if (instr->Mask(SystemHintFMask) == SystemHintFixed) {
+ ASSERT(instr->Mask(SystemHintMask) == HINT);
+ switch (instr->ImmHint()) {
+ case NOP: break;
+ default: UNIMPLEMENTED();
+ }
+ } else if (instr->Mask(MemBarrierFMask) == MemBarrierFixed) {
+ __sync_synchronize();
+ } else {
+ UNIMPLEMENTED();
+ }
+}
+
+
+bool Simulator::GetValue(const char* desc, int64_t* value) {
+ int regnum = CodeFromName(desc);
+ if (regnum >= 0) {
+ unsigned code = regnum;
+ if (code == kZeroRegCode) {
+ // Catch the zero register and return 0.
+ *value = 0;
+ return true;
+ } else if (code == kSPRegInternalCode) {
+ // Translate the stack pointer code to 31, for Reg31IsStackPointer.
+ code = 31;
+ }
+ if (desc[0] == 'w') {
+ *value = wreg(code, Reg31IsStackPointer);
+ } else {
+ *value = xreg(code, Reg31IsStackPointer);
+ }
+ return true;
+ } else if (strncmp(desc, "0x", 2) == 0) {
+ return SScanF(desc + 2, "%" SCNx64,
+ reinterpret_cast<uint64_t*>(value)) == 1;
+ } else {
+ return SScanF(desc, "%" SCNu64,
+ reinterpret_cast<uint64_t*>(value)) == 1;
+ }
+}
+
+
+bool Simulator::PrintValue(const char* desc) {
+ // Define some colour codes to use for the register dump.
+ // TODO(jbramley): Find a more elegant way of defining these.
+ char const * const clr_normal = FLAG_log_colour ? "\033[m" : "";
+ char const * const clr_reg_name = FLAG_log_colour ? "\033[1;34m" : "";
+ char const * const clr_reg_value = FLAG_log_colour ? "\033[1;36m" : "";
+ char const * const clr_fpreg_name = FLAG_log_colour ? "\033[1;33m" : "";
+ char const * const clr_fpreg_value = FLAG_log_colour ? "\033[1;35m" : "";
+
+ if (strcmp(desc, "csp") == 0) {
+ ASSERT(CodeFromName(desc) == static_cast<int>(kSPRegInternalCode));
+ PrintF("%s csp:%s 0x%016" PRIx64 "%s\n",
+ clr_reg_name, clr_reg_value, xreg(31, Reg31IsStackPointer), clr_normal);
+ return true;
+ } else if (strcmp(desc, "wcsp") == 0) {
+ ASSERT(CodeFromName(desc) == static_cast<int>(kSPRegInternalCode));
+ PrintF("%s wcsp:%s 0x%08" PRIx32 "%s\n",
+ clr_reg_name, clr_reg_value, wreg(31, Reg31IsStackPointer), clr_normal);
+ return true;
+ }
+
+ int i = CodeFromName(desc);
+ if (i == -1) {
+ return false;
+ }
+ ASSERT(i >= 0);
+
+ if (desc[0] == 'v') {
+ PrintF("%s %s:%s 0x%016" PRIx64 "%s (%s%s:%s %g%s %s:%s %g%s)\n",
+ clr_fpreg_name, VRegNameForCode(i),
+ clr_fpreg_value, double_to_rawbits(dreg(i)),
+ clr_normal,
+ clr_fpreg_name, DRegNameForCode(i),
+ clr_fpreg_value, dreg(i),
+ clr_fpreg_name, SRegNameForCode(i),
+ clr_fpreg_value, sreg(i),
+ clr_normal);
+ return true;
+ } else if (desc[0] == 'd') {
+ PrintF("%s %s:%s %g%s\n",
+ clr_fpreg_name, DRegNameForCode(i),
+ clr_fpreg_value, dreg(i),
+ clr_normal);
+ return true;
+ } else if (desc[0] == 's') {
+ PrintF("%s %s:%s %g%s\n",
+ clr_fpreg_name, SRegNameForCode(i),
+ clr_fpreg_value, sreg(i),
+ clr_normal);
+ return true;
+ } else if (desc[0] == 'w') {
+ PrintF("%s %s:%s 0x%08" PRIx32 "%s\n",
+ clr_reg_name, WRegNameForCode(i), clr_reg_value, wreg(i), clr_normal);
+ return true;
+ } else {
+ // X register names have a wide variety of starting characters, but anything
+ // else will be an X register.
+ PrintF("%s %s:%s 0x%016" PRIx64 "%s\n",
+ clr_reg_name, XRegNameForCode(i), clr_reg_value, xreg(i), clr_normal);
+ return true;
+ }
+}
+
+
+void Simulator::Debug() {
+#define COMMAND_SIZE 63
+#define ARG_SIZE 255
+
+#define STR(a) #a
+#define XSTR(a) STR(a)
+
+ char cmd[COMMAND_SIZE + 1];
+ char arg1[ARG_SIZE + 1];
+ char arg2[ARG_SIZE + 1];
+ char* argv[3] = { cmd, arg1, arg2 };
+
+ // Make sure to have a proper terminating character if reaching the limit.
+ cmd[COMMAND_SIZE] = 0;
+ arg1[ARG_SIZE] = 0;
+ arg2[ARG_SIZE] = 0;
+
+ bool done = false;
+ bool cleared_log_disasm_bit = false;
+
+ while (!done) {
+ // Disassemble the next instruction to execute before doing anything else.
+ PrintInstructionsAt(pc_, 1);
+ // Read the command line.
+ char* line = ReadLine("sim> ");
+ if (line == NULL) {
+ break;
+ } else {
+ // Repeat last command by default.
+ char* last_input = last_debugger_input();
+ if (strcmp(line, "\n") == 0 && (last_input != NULL)) {
+ DeleteArray(line);
+ line = last_input;
+ } else {
+ // Update the latest command ran
+ set_last_debugger_input(line);
+ }
+
+ // Use sscanf to parse the individual parts of the command line. At the
+ // moment no command expects more than two parameters.
+ int argc = SScanF(line,
+ "%" XSTR(COMMAND_SIZE) "s "
+ "%" XSTR(ARG_SIZE) "s "
+ "%" XSTR(ARG_SIZE) "s",
+ cmd, arg1, arg2);
+
+ // stepi / si ------------------------------------------------------------
+ if ((strcmp(cmd, "si") == 0) || (strcmp(cmd, "stepi") == 0)) {
+ // We are about to execute instructions, after which by default we
+ // should increment the pc_. If it was set when reaching this debug
+ // instruction, it has not been cleared because this instruction has not
+ // completed yet. So clear it manually.
+ pc_modified_ = false;
+
+ if (argc == 1) {
+ ExecuteInstruction();
+ } else {
+ int64_t number_of_instructions_to_execute = 1;
+ GetValue(arg1, &number_of_instructions_to_execute);
+
+ set_log_parameters(log_parameters() | LOG_DISASM);
+ while (number_of_instructions_to_execute-- > 0) {
+ ExecuteInstruction();
+ }
+ set_log_parameters(log_parameters() & ~LOG_DISASM);
+ PrintF("\n");
+ }
+
+ // If it was necessary, the pc has already been updated or incremented
+ // when executing the instruction. So we do not want it to be updated
+ // again. It will be cleared when exiting.
+ pc_modified_ = true;
+
+ // next / n --------------------------------------------------------------
+ } else if ((strcmp(cmd, "next") == 0) || (strcmp(cmd, "n") == 0)) {
+ // Tell the simulator to break after the next executed BL.
+ break_on_next_ = true;
+ // Continue.
+ done = true;
+
+ // continue / cont / c ---------------------------------------------------
+ } else if ((strcmp(cmd, "continue") == 0) ||
+ (strcmp(cmd, "cont") == 0) ||
+ (strcmp(cmd, "c") == 0)) {
+ // Leave the debugger shell.
+ done = true;
+
+ // disassemble / disasm / di ---------------------------------------------
+ } else if (strcmp(cmd, "disassemble") == 0 ||
+ strcmp(cmd, "disasm") == 0 ||
+ strcmp(cmd, "di") == 0) {
+ int64_t n_of_instrs_to_disasm = 10; // default value.
+ int64_t address = reinterpret_cast<int64_t>(pc_); // default value.
+ if (argc >= 2) { // disasm <n of instrs>
+ GetValue(arg1, &n_of_instrs_to_disasm);
+ }
+ if (argc >= 3) { // disasm <n of instrs> <address>
+ GetValue(arg2, &address);
+ }
+
+ // Disassemble.
+ PrintInstructionsAt(reinterpret_cast<Instruction*>(address),
+ n_of_instrs_to_disasm);
+ PrintF("\n");
+
+ // print / p -------------------------------------------------------------
+ } else if ((strcmp(cmd, "print") == 0) || (strcmp(cmd, "p") == 0)) {
+ if (argc == 2) {
+ if (strcmp(arg1, "all") == 0) {
+ // TODO(all): better support for printing in the debugger.
+ PrintRegisters(true);
+ PrintFPRegisters(true);
+ } else {
+ if (!PrintValue(arg1)) {
+ PrintF("%s unrecognized\n", arg1);
+ }
+ }
+ } else {
+ PrintF(
+ "print <register>\n"
+ " Print the content of a register. (alias 'p')\n"
+ " 'print all' will print all registers.\n"
+ " Use 'printobject' to get more details about the value.\n");
+ }
+
+ // printobject / po ------------------------------------------------------
+ } else if ((strcmp(cmd, "printobject") == 0) ||
+ (strcmp(cmd, "po") == 0)) {
+ if (argc == 2) {
+ int64_t value;
+ if (GetValue(arg1, &value)) {
+ Object* obj = reinterpret_cast<Object*>(value);
+ PrintF("%s: \n", arg1);
+#ifdef DEBUG
+ obj->PrintLn();
+#else
+ obj->ShortPrint();
+ PrintF("\n");
+#endif
+ } else {
+ PrintF("%s unrecognized\n", arg1);
+ }
+ } else {
+ PrintF("printobject <value>\n"
+ "printobject <register>\n"
+ " Print details about the value. (alias 'po')\n");
+ }
+
+ // stack / mem ----------------------------------------------------------
+ } else if (strcmp(cmd, "stack") == 0 || strcmp(cmd, "mem") == 0) {
+ int64_t* cur = NULL;
+ int64_t* end = NULL;
+ int next_arg = 1;
+
+ if (strcmp(cmd, "stack") == 0) {
+ cur = reinterpret_cast<int64_t*>(jssp());
+
+ } else { // "mem"
+ int64_t value;
+ if (!GetValue(arg1, &value)) {
+ PrintF("%s unrecognized\n", arg1);
+ continue;
+ }
+ cur = reinterpret_cast<int64_t*>(value);
+ next_arg++;
+ }
+
+ int64_t words;
+ if (argc == next_arg) {
+ words = 10;
+ } else if (argc == next_arg + 1) {
+ if (!GetValue(argv[next_arg], &words)) {
+ PrintF("%s unrecognized\n", argv[next_arg]);
+ PrintF("Printing 10 double words by default");
+ words = 10;
+ }
+ }
+ end = cur + words;
+
+ while (cur < end) {
+ PrintF(" 0x%016" PRIx64 ": 0x%016" PRIx64 " %10" PRId64,
+ reinterpret_cast<uint64_t>(cur), *cur, *cur);
+ HeapObject* obj = reinterpret_cast<HeapObject*>(*cur);
+ int64_t value = *cur;
+ Heap* current_heap = v8::internal::Isolate::Current()->heap();
+ if (((value & 1) == 0) || current_heap->Contains(obj)) {
+ PrintF(" (");
+ if ((value & kSmiTagMask) == 0) {
+ STATIC_ASSERT(kSmiValueSize == 32);
+ int32_t untagged = (value >> kSmiShift) & 0xffffffff;
+ PrintF("smi %" PRId32, untagged);
+ } else {
+ obj->ShortPrint();
+ }
+ PrintF(")");
+ }
+ PrintF("\n");
+ cur++;
+ }
+
+ // trace / t -------------------------------------------------------------
+ } else if (strcmp(cmd, "trace") == 0 || strcmp(cmd, "t") == 0) {
+ if ((log_parameters() & (LOG_DISASM | LOG_REGS)) !=
+ (LOG_DISASM | LOG_REGS)) {
+ PrintF("Enabling disassembly and registers tracing\n");
+ set_log_parameters(log_parameters() | LOG_DISASM | LOG_REGS);
+ } else {
+ PrintF("Disabling disassembly and registers tracing\n");
+ set_log_parameters(log_parameters() & ~(LOG_DISASM | LOG_REGS));
+ }
+
+ // break / b -------------------------------------------------------------
+ } else if (strcmp(cmd, "break") == 0 || strcmp(cmd, "b") == 0) {
+ if (argc == 2) {
+ int64_t value;
+ if (GetValue(arg1, &value)) {
+ SetBreakpoint(reinterpret_cast<Instruction*>(value));
+ } else {
+ PrintF("%s unrecognized\n", arg1);
+ }
+ } else {
+ ListBreakpoints();
+ PrintF("Use `break <address>` to set or disable a breakpoint\n");
+ }
+
+ // gdb -------------------------------------------------------------------
+ } else if (strcmp(cmd, "gdb") == 0) {
+ PrintF("Relinquishing control to gdb.\n");
+ OS::DebugBreak();
+ PrintF("Regaining control from gdb.\n");
+
+ // sysregs ---------------------------------------------------------------
+ } else if (strcmp(cmd, "sysregs") == 0) {
+ PrintSystemRegisters();
+
+ // help / h --------------------------------------------------------------
+ } else if (strcmp(cmd, "help") == 0 || strcmp(cmd, "h") == 0) {
+ PrintF(
+ "stepi / si\n"
+ " stepi <n>\n"
+ " Step <n> instructions.\n"
+ "next / n\n"
+ " Continue execution until a BL instruction is reached.\n"
+ " At this point a breakpoint is set just after this BL.\n"
+ " Then execution is resumed. It will probably later hit the\n"
+ " breakpoint just set.\n"
+ "continue / cont / c\n"
+ " Continue execution from here.\n"
+ "disassemble / disasm / di\n"
+ " disassemble <n> <address>\n"
+ " Disassemble <n> instructions from current <address>.\n"
+ " By default <n> is 20 and <address> is the current pc.\n"
+ "print / p\n"
+ " print <register>\n"
+ " Print the content of a register.\n"
+ " 'print all' will print all registers.\n"
+ " Use 'printobject' to get more details about the value.\n"
+ "printobject / po\n"
+ " printobject <value>\n"
+ " printobject <register>\n"
+ " Print details about the value.\n"
+ "stack\n"
+ " stack [<words>]\n"
+ " Dump stack content, default dump 10 words\n"
+ "mem\n"
+ " mem <address> [<words>]\n"
+ " Dump memory content, default dump 10 words\n"
+ "trace / t\n"
+ " Toggle disassembly and register tracing\n"
+ "break / b\n"
+ " break : list all breakpoints\n"
+ " break <address> : set / enable / disable a breakpoint.\n"
+ "gdb\n"
+ " Enter gdb.\n"
+ "sysregs\n"
+ " Print all system registers (including NZCV).\n");
+ } else {
+ PrintF("Unknown command: %s\n", cmd);
+ PrintF("Use 'help' for more information.\n");
+ }
+ }
+ if (cleared_log_disasm_bit == true) {
+ set_log_parameters(log_parameters_ | LOG_DISASM);
+ }
+ }
+}
+
+
+// Calls into the V8 runtime are based on this very simple interface.
+// Note: To be able to return two values from some calls the code in runtime.cc
+// uses the ObjectPair structure.
+// The simulator assumes all runtime calls return two 64-bits values. If they
+// don't, register x1 is clobbered. This is fine because x1 is caller-saved.
+struct ObjectPair {
+ int64_t res0;
+ int64_t res1;
+};
+
+
+typedef ObjectPair (*SimulatorRuntimeCall)(int64_t arg0,
+ int64_t arg1,
+ int64_t arg2,
+ int64_t arg3,
+ int64_t arg4,
+ int64_t arg5,
+ int64_t arg6,
+ int64_t arg7);
+
+typedef int64_t (*SimulatorRuntimeCompareCall)(double arg1, double arg2);
+typedef double (*SimulatorRuntimeFPFPCall)(double arg1, double arg2);
+typedef double (*SimulatorRuntimeFPCall)(double arg1);
+typedef double (*SimulatorRuntimeFPIntCall)(double arg1, int32_t arg2);
+
+// This signature supports direct call in to API function native callback
+// (refer to InvocationCallback in v8.h).
+typedef void (*SimulatorRuntimeDirectApiCall)(int64_t arg0);
+typedef void (*SimulatorRuntimeProfilingApiCall)(int64_t arg0, void* arg1);
+
+// This signature supports direct call to accessor getter callback.
+typedef void (*SimulatorRuntimeDirectGetterCall)(int64_t arg0, int64_t arg1);
+typedef void (*SimulatorRuntimeProfilingGetterCall)(int64_t arg0, int64_t arg1,
+ void* arg2);
+
+void Simulator::VisitException(Instruction* instr) {
+ // Define some colour codes to use for log messages.
+ // TODO(jbramley): Find a more elegant way of defining these.
+ char const* const clr_normal = (FLAG_log_colour) ? ("\033[m")
+ : ("");
+ char const* const clr_debug_number = (FLAG_log_colour) ? ("\033[1;33m")
+ : ("");
+ char const* const clr_debug_message = (FLAG_log_colour) ? ("\033[0;33m")
+ : ("");
+ char const* const clr_printf = (FLAG_log_colour) ? ("\033[0;32m")
+ : ("");
+
+ switch (instr->Mask(ExceptionMask)) {
+ case HLT: {
+ if (instr->ImmException() == kImmExceptionIsDebug) {
+ // Read the arguments encoded inline in the instruction stream.
+ uint32_t code;
+ uint32_t parameters;
+ char const * message;
+
+ ASSERT(sizeof(*pc_) == 1);
+ memcpy(&code, pc_ + kDebugCodeOffset, sizeof(code));
+ memcpy(¶meters, pc_ + kDebugParamsOffset, sizeof(parameters));
+ message = reinterpret_cast<char const *>(pc_ + kDebugMessageOffset);
+
+ // Always print something when we hit a debug point that breaks.
+ // We are going to break, so printing something is not an issue in
+ // terms of speed.
+ if (FLAG_trace_sim_messages || FLAG_trace_sim || (parameters & BREAK)) {
+ if (message != NULL) {
+ PrintF("%sDebugger hit %d: %s%s%s\n",
+ clr_debug_number,
+ code,
+ clr_debug_message,
+ message,
+ clr_normal);
+ } else {
+ PrintF("%sDebugger hit %d.%s\n",
+ clr_debug_number,
+ code,
+ clr_normal);
+ }
+ }
+
+ // Other options.
+ switch (parameters & kDebuggerTracingDirectivesMask) {
+ case TRACE_ENABLE:
+ set_log_parameters(log_parameters() | parameters);
+ if (parameters & LOG_SYS_REGS) { PrintSystemRegisters(); }
+ if (parameters & LOG_REGS) { PrintRegisters(); }
+ if (parameters & LOG_FP_REGS) { PrintFPRegisters(); }
+ break;
+ case TRACE_DISABLE:
+ set_log_parameters(log_parameters() & ~parameters);
+ break;
+ case TRACE_OVERRIDE:
+ set_log_parameters(parameters);
+ break;
+ default:
+ // We don't support a one-shot LOG_DISASM.
+ ASSERT((parameters & LOG_DISASM) == 0);
+ // Don't print information that is already being traced.
+ parameters &= ~log_parameters();
+ // Print the requested information.
+ if (parameters & LOG_SYS_REGS) PrintSystemRegisters(true);
+ if (parameters & LOG_REGS) PrintRegisters(true);
+ if (parameters & LOG_FP_REGS) PrintFPRegisters(true);
+ }
+
+ // The stop parameters are inlined in the code. Skip them:
+ // - Skip to the end of the message string.
+ pc_ += kDebugMessageOffset + strlen(message) + 1;
+ // - Advance to the next aligned location.
+ pc_ = AlignUp(pc_, kInstructionSize);
+ // - Verify that the unreachable marker is present.
+ ASSERT(pc_->Mask(ExceptionMask) == HLT);
+ ASSERT(pc_->ImmException() == kImmExceptionIsUnreachable);
+ // - Skip past the unreachable marker.
+ set_pc(pc_->NextInstruction());
+
+ // Check if the debugger should break.
+ if (parameters & BREAK) Debug();
+
+ } else if (instr->ImmException() == kImmExceptionIsRedirectedCall) {
+ // TODO(all): Extract the call redirection code into a separate
+ // function.
+
+ Redirection* redirection = Redirection::FromHltInstruction(instr);
+
+ // The called C code might itself call simulated code, so any
+ // caller-saved registers (including lr) could still be clobbered by a
+ // redirected call.
+ Instruction* return_address = lr();
+
+ // TODO(jbramley): Make external_function() a template so that we don't
+ // have to explicitly cast the result for each redirection type.
+ int64_t external =
+ reinterpret_cast<int64_t>(redirection->external_function());
+
+ TraceSim("Call to host function at %p\n",
+ reinterpret_cast<void*>(redirection->external_function()));
+
+ // SP must be 16 bytes aligned at the call interface.
+ bool stack_alignment_exception = ((sp() & 0xf) != 0);
+ if (stack_alignment_exception) {
+ TraceSim(" with unaligned stack 0x%016" PRIx64 ".\n", sp());
+ ALIGNMENT_EXCEPTION();
+ }
+
+ switch (redirection->type()) {
+ default:
+ TraceSim("Type: Unknown.\n");
+ UNREACHABLE();
+ break;
+
+ case ExternalReference::BUILTIN_CALL: {
+ // MaybeObject* f(v8::internal::Arguments).
+ TraceSim("Type: BUILTIN_CALL\n");
+ SimulatorRuntimeCall target =
+ reinterpret_cast<SimulatorRuntimeCall>(external);
+
+ // We don't know how many arguments are being passed, but we can
+ // pass 8 without touching the stack. They will be ignored by the
+ // host function if they aren't used.
+ TraceSim("Arguments: "
+ "0x%016" PRIx64 ", 0x%016" PRIx64 ", "
+ "0x%016" PRIx64 ", 0x%016" PRIx64 ", "
+ "0x%016" PRIx64 ", 0x%016" PRIx64 ", "
+ "0x%016" PRIx64 ", 0x%016" PRIx64,
+ xreg(0), xreg(1), xreg(2), xreg(3),
+ xreg(4), xreg(5), xreg(6), xreg(7));
+ ObjectPair result = target(xreg(0), xreg(1), xreg(2), xreg(3),
+ xreg(4), xreg(5), xreg(6), xreg(7));
+ TraceSim("Returned: {0x%" PRIx64 ", 0x%" PRIx64"}\n",
+ result.res0, result.res1);
+#ifdef DEBUG
+ CorruptAllCallerSavedCPURegisters();
+#endif
+ set_xreg(0, result.res0);
+ set_xreg(1, result.res1);
+ break;
+ }
+
+ case ExternalReference::DIRECT_API_CALL: {
+ // void f(v8::FunctionCallbackInfo&)
+ TraceSim("Type: DIRECT_API_CALL\n");
+ SimulatorRuntimeDirectApiCall target =
+ reinterpret_cast<SimulatorRuntimeDirectApiCall>(external);
+ TraceSim("Arguments: 0x%016" PRIx64 "\n", xreg(0));
+ target(xreg(0));
+ TraceSim("No return value.");
+#ifdef DEBUG
+ CorruptAllCallerSavedCPURegisters();
+#endif
+ break;
+ }
+
+ case ExternalReference::BUILTIN_COMPARE_CALL: {
+ // int f(double, double)
+ TraceSim("Type: BUILTIN_COMPARE_CALL\n");
+ SimulatorRuntimeCompareCall target =
+ reinterpret_cast<SimulatorRuntimeCompareCall>(external);
+ TraceSim("Arguments: %f, %f\n", dreg(0), dreg(1));
+ int64_t result = target(dreg(0), dreg(1));
+ TraceSim("Returned: %" PRId64 "\n", result);
+#ifdef DEBUG
+ CorruptAllCallerSavedCPURegisters();
+#endif
+ set_xreg(0, result);
+ break;
+ }
+
+ case ExternalReference::BUILTIN_FP_CALL: {
+ // double f(double)
+ TraceSim("Type: BUILTIN_FP_CALL\n");
+ SimulatorRuntimeFPCall target =
+ reinterpret_cast<SimulatorRuntimeFPCall>(external);
+ TraceSim("Argument: %f\n", dreg(0));
+ double result = target(dreg(0));
+ TraceSim("Returned: %f\n", result);
+#ifdef DEBUG
+ CorruptAllCallerSavedCPURegisters();
+#endif
+ set_dreg(0, result);
+ break;
+ }
+
+ case ExternalReference::BUILTIN_FP_FP_CALL: {
+ // double f(double, double)
+ TraceSim("Type: BUILTIN_FP_FP_CALL\n");
+ SimulatorRuntimeFPFPCall target =
+ reinterpret_cast<SimulatorRuntimeFPFPCall>(external);
+ TraceSim("Arguments: %f, %f\n", dreg(0), dreg(1));
+ double result = target(dreg(0), dreg(1));
+ TraceSim("Returned: %f\n", result);
+#ifdef DEBUG
+ CorruptAllCallerSavedCPURegisters();
+#endif
+ set_dreg(0, result);
+ break;
+ }
+
+ case ExternalReference::BUILTIN_FP_INT_CALL: {
+ // double f(double, int)
+ TraceSim("Type: BUILTIN_FP_INT_CALL\n");
+ SimulatorRuntimeFPIntCall target =
+ reinterpret_cast<SimulatorRuntimeFPIntCall>(external);
+ TraceSim("Arguments: %f, %d\n", dreg(0), wreg(0));
+ double result = target(dreg(0), wreg(0));
+ TraceSim("Returned: %f\n", result);
+#ifdef DEBUG
+ CorruptAllCallerSavedCPURegisters();
+#endif
+ set_dreg(0, result);
+ break;
+ }
+
+ case ExternalReference::DIRECT_GETTER_CALL: {
+ // void f(Local<String> property, PropertyCallbackInfo& info)
+ TraceSim("Type: DIRECT_GETTER_CALL\n");
+ SimulatorRuntimeDirectGetterCall target =
+ reinterpret_cast<SimulatorRuntimeDirectGetterCall>(external);
+ TraceSim("Arguments: 0x%016" PRIx64 ", 0x%016" PRIx64 "\n",
+ xreg(0), xreg(1));
+ target(xreg(0), xreg(1));
+ TraceSim("No return value.");
+#ifdef DEBUG
+ CorruptAllCallerSavedCPURegisters();
+#endif
+ break;
+ }
+
+ case ExternalReference::PROFILING_API_CALL: {
+ // void f(v8::FunctionCallbackInfo&, v8::FunctionCallback)
+ TraceSim("Type: PROFILING_API_CALL\n");
+ SimulatorRuntimeProfilingApiCall target =
+ reinterpret_cast<SimulatorRuntimeProfilingApiCall>(external);
+ void* arg1 = Redirection::ReverseRedirection(xreg(1));
+ TraceSim("Arguments: 0x%016" PRIx64 ", %p\n", xreg(0), arg1);
+ target(xreg(0), arg1);
+ TraceSim("No return value.");
+#ifdef DEBUG
+ CorruptAllCallerSavedCPURegisters();
+#endif
+ break;
+ }
+
+ case ExternalReference::PROFILING_GETTER_CALL: {
+ // void f(Local<String> property, PropertyCallbackInfo& info,
+ // AccessorGetterCallback callback)
+ TraceSim("Type: PROFILING_GETTER_CALL\n");
+ SimulatorRuntimeProfilingGetterCall target =
+ reinterpret_cast<SimulatorRuntimeProfilingGetterCall>(
+ external);
+ void* arg2 = Redirection::ReverseRedirection(xreg(2));
+ TraceSim("Arguments: 0x%016" PRIx64 ", 0x%016" PRIx64 ", %p\n",
+ xreg(0), xreg(1), arg2);
+ target(xreg(0), xreg(1), arg2);
+ TraceSim("No return value.");
+#ifdef DEBUG
+ CorruptAllCallerSavedCPURegisters();
+#endif
+ break;
+ }
+ }
+
+ set_lr(return_address);
+ set_pc(return_address);
+ } else if (instr->ImmException() == kImmExceptionIsPrintf) {
+ // Read the argument encoded inline in the instruction stream.
+ uint32_t type;
+ ASSERT(sizeof(*pc_) == 1);
+ memcpy(&type, pc_ + kPrintfTypeOffset, sizeof(type));
+
+ const char* format = reg<const char*>(0);
+
+ // Pass all of the relevant PCS registers onto printf. It doesn't
+ // matter if we pass too many as the extra ones won't be read.
+ int result;
+ fputs(clr_printf, stream_);
+ if (type == CPURegister::kRegister) {
+ result = fprintf(stream_, format,
+ xreg(1), xreg(2), xreg(3), xreg(4),
+ xreg(5), xreg(6), xreg(7));
+ } else if (type == CPURegister::kFPRegister) {
+ result = fprintf(stream_, format,
+ dreg(0), dreg(1), dreg(2), dreg(3),
+ dreg(4), dreg(5), dreg(6), dreg(7));
+ } else {
+ ASSERT(type == CPURegister::kNoRegister);
+ result = fprintf(stream_, "%s", format);
+ }
+ fputs(clr_normal, stream_);
+ set_xreg(0, result);
+
+ // TODO(jbramley): Consider clobbering all caller-saved registers here.
+
+ // The printf parameters are inlined in the code, so skip them.
+ set_pc(pc_->InstructionAtOffset(kPrintfLength));
+
+ // Set LR as if we'd just called a native printf function.
+ set_lr(pc());
+
+ } else if (instr->ImmException() == kImmExceptionIsUnreachable) {
+ fprintf(stream_, "Hit UNREACHABLE marker at PC=%p.\n",
+ reinterpret_cast<void*>(pc_));
+ abort();
+
+ } else {
+ OS::DebugBreak();
+ }
+ break;
+ }
+
+ default:
+ UNIMPLEMENTED();
+ }
+}
+
+#endif // USE_SIMULATOR
+
+} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_A64
--- /dev/null
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_A64_SIMULATOR_A64_H_
+#define V8_A64_SIMULATOR_A64_H_
+
+#include <stdarg.h>
+#include <vector>
+
+#include "v8.h"
+
+#include "globals.h"
+#include "utils.h"
+#include "allocation.h"
+#include "assembler.h"
+#include "a64/assembler-a64.h"
+#include "a64/decoder-a64.h"
+#include "a64/disasm-a64.h"
+#include "a64/instrument-a64.h"
+
+#define REGISTER_CODE_LIST(R) \
+R(0) R(1) R(2) R(3) R(4) R(5) R(6) R(7) \
+R(8) R(9) R(10) R(11) R(12) R(13) R(14) R(15) \
+R(16) R(17) R(18) R(19) R(20) R(21) R(22) R(23) \
+R(24) R(25) R(26) R(27) R(28) R(29) R(30) R(31)
+
+namespace v8 {
+namespace internal {
+
+#if !defined(USE_SIMULATOR)
+
+// Running without a simulator on a native A64 platform.
+// When running without a simulator we call the entry directly.
+#define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \
+ (entry(p0, p1, p2, p3, p4))
+
+typedef int (*a64_regexp_matcher)(String* input,
+ int64_t start_offset,
+ const byte* input_start,
+ const byte* input_end,
+ int* output,
+ int64_t output_size,
+ Address stack_base,
+ int64_t direct_call,
+ void* return_address,
+ Isolate* isolate);
+
+// Call the generated regexp code directly. The code at the entry address
+// should act as a function matching the type a64_regexp_matcher.
+// The ninth argument is a dummy that reserves the space used for
+// the return address added by the ExitFrame in native calls.
+#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7, p8) \
+ (FUNCTION_CAST<a64_regexp_matcher>(entry)( \
+ p0, p1, p2, p3, p4, p5, p6, p7, NULL, p8))
+
+#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
+ reinterpret_cast<TryCatch*>(try_catch_address)
+
+// Running without a simulator there is nothing to do.
+class SimulatorStack : public v8::internal::AllStatic {
+ public:
+ static uintptr_t JsLimitFromCLimit(v8::internal::Isolate* isolate,
+ uintptr_t c_limit) {
+ USE(isolate);
+ return c_limit;
+ }
+
+ static uintptr_t RegisterCTryCatch(uintptr_t try_catch_address) {
+ return try_catch_address;
+ }
+
+ static void UnregisterCTryCatch() { }
+};
+
+#else // !defined(USE_SIMULATOR)
+
+enum ReverseByteMode {
+ Reverse16 = 0,
+ Reverse32 = 1,
+ Reverse64 = 2
+};
+
+
+// The proper way to initialize a simulated system register (such as NZCV) is as
+// follows:
+// SimSystemRegister nzcv = SimSystemRegister::DefaultValueFor(NZCV);
+class SimSystemRegister {
+ public:
+ // The default constructor represents a register which has no writable bits.
+ // It is not possible to set its value to anything other than 0.
+ SimSystemRegister() : value_(0), write_ignore_mask_(0xffffffff) { }
+
+ uint32_t RawValue() const {
+ return value_;
+ }
+
+ void SetRawValue(uint32_t new_value) {
+ value_ = (value_ & write_ignore_mask_) | (new_value & ~write_ignore_mask_);
+ }
+
+ uint32_t Bits(int msb, int lsb) const {
+ return unsigned_bitextract_32(msb, lsb, value_);
+ }
+
+ int32_t SignedBits(int msb, int lsb) const {
+ return signed_bitextract_32(msb, lsb, value_);
+ }
+
+ void SetBits(int msb, int lsb, uint32_t bits);
+
+ // Default system register values.
+ static SimSystemRegister DefaultValueFor(SystemRegister id);
+
+#define DEFINE_GETTER(Name, HighBit, LowBit, Func) \
+ uint32_t Name() const { return Func(HighBit, LowBit); } \
+ void Set##Name(uint32_t bits) { SetBits(HighBit, LowBit, bits); }
+#define DEFINE_WRITE_IGNORE_MASK(Name, Mask) \
+ static const uint32_t Name##WriteIgnoreMask = ~static_cast<uint32_t>(Mask);
+
+ SYSTEM_REGISTER_FIELDS_LIST(DEFINE_GETTER, DEFINE_WRITE_IGNORE_MASK)
+
+#undef DEFINE_ZERO_BITS
+#undef DEFINE_GETTER
+
+ protected:
+ // Most system registers only implement a few of the bits in the word. Other
+ // bits are "read-as-zero, write-ignored". The write_ignore_mask argument
+ // describes the bits which are not modifiable.
+ SimSystemRegister(uint32_t value, uint32_t write_ignore_mask)
+ : value_(value), write_ignore_mask_(write_ignore_mask) { }
+
+ uint32_t value_;
+ uint32_t write_ignore_mask_;
+};
+
+
+// Represent a register (r0-r31, v0-v31).
+template<int kSizeInBytes>
+class SimRegisterBase {
+ public:
+ template<typename T>
+ void Set(T new_value, unsigned size = sizeof(T)) {
+ ASSERT(size <= kSizeInBytes);
+ ASSERT(size <= sizeof(new_value));
+ // All AArch64 registers are zero-extending; Writing a W register clears the
+ // top bits of the corresponding X register.
+ memset(value_, 0, kSizeInBytes);
+ memcpy(value_, &new_value, size);
+ }
+
+ // Copy 'size' bytes of the register to the result, and zero-extend to fill
+ // the result.
+ template<typename T>
+ T Get(unsigned size = sizeof(T)) const {
+ ASSERT(size <= kSizeInBytes);
+ T result;
+ memset(&result, 0, sizeof(result));
+ memcpy(&result, value_, size);
+ return result;
+ }
+
+ protected:
+ uint8_t value_[kSizeInBytes];
+};
+typedef SimRegisterBase<kXRegSizeInBytes> SimRegister; // r0-r31
+typedef SimRegisterBase<kDRegSizeInBytes> SimFPRegister; // v0-v31
+
+
+class Simulator : public DecoderVisitor {
+ public:
+ explicit Simulator(Decoder* decoder,
+ Isolate* isolate = NULL,
+ FILE* stream = stderr);
+ ~Simulator();
+
+ // System functions.
+
+ static void Initialize(Isolate* isolate);
+
+ static Simulator* current(v8::internal::Isolate* isolate);
+
+ // Call an arbitrary function taking an arbitrary number of arguments. The
+ // varargs list must be a set of arguments with type CallArgument, and
+ // terminated by CallArgument::End().
+ void CallVoid(byte* entry, ...);
+ void CallVoid(byte* entry, va_list args);
+
+ // Like CallVoid, but expect a return value.
+ int64_t CallInt64(byte* entry, ...);
+ double CallDouble(byte* entry, ...);
+
+ // V8 calls into generated JS code with 5 parameters and into
+ // generated RegExp code with 10 parameters. These are convenience functions,
+ // which set up the simulator state and grab the result on return.
+ int64_t CallJS(byte* entry,
+ byte* function_entry,
+ JSFunction* func,
+ Object* revc,
+ int64_t argc,
+ Object*** argv);
+ int64_t CallRegExp(byte* entry,
+ String* input,
+ int64_t start_offset,
+ const byte* input_start,
+ const byte* input_end,
+ int* output,
+ int64_t output_size,
+ Address stack_base,
+ int64_t direct_call,
+ void* return_address,
+ Isolate* isolate);
+
+ // A wrapper class that stores an argument for one of the above Call
+ // functions.
+ //
+ // Only arguments up to 64 bits in size are supported.
+ class CallArgument {
+ public:
+ template<typename T>
+ explicit CallArgument(T argument) {
+ ASSERT(sizeof(argument) <= sizeof(bits_));
+ memcpy(&bits_, &argument, sizeof(argument));
+ type_ = X_ARG;
+ }
+
+ explicit CallArgument(double argument) {
+ ASSERT(sizeof(argument) == sizeof(bits_));
+ memcpy(&bits_, &argument, sizeof(argument));
+ type_ = D_ARG;
+ }
+
+ explicit CallArgument(float argument) {
+ TODO_UNIMPLEMENTED("CallArgument(float) is untested.");
+ // Make the D register a NaN to try to trap errors if the callee expects a
+ // double. If it expects a float, the callee should ignore the top word.
+ ASSERT(sizeof(kFP64SignallingNaN) == sizeof(bits_));
+ memcpy(&bits_, &kFP64SignallingNaN, sizeof(kFP64SignallingNaN));
+ // Write the float payload to the S register.
+ ASSERT(sizeof(argument) <= sizeof(bits_));
+ memcpy(&bits_, &argument, sizeof(argument));
+ type_ = D_ARG;
+ }
+
+ // This indicates the end of the arguments list, so that CallArgument
+ // objects can be passed into varargs functions.
+ static CallArgument End() { return CallArgument(); }
+
+ int64_t bits() const { return bits_; }
+ bool IsEnd() const { return type_ == NO_ARG; }
+ bool IsX() const { return type_ == X_ARG; }
+ bool IsD() const { return type_ == D_ARG; }
+
+ private:
+ enum CallArgumentType { X_ARG, D_ARG, NO_ARG };
+
+ // All arguments are aligned to at least 64 bits and we don't support
+ // passing bigger arguments, so the payload size can be fixed at 64 bits.
+ int64_t bits_;
+ CallArgumentType type_;
+
+ CallArgument() { type_ = NO_ARG; }
+ };
+
+
+ // Start the debugging command line.
+ void Debug();
+
+ bool GetValue(const char* desc, int64_t* value);
+
+ bool PrintValue(const char* desc);
+
+ // Push an address onto the JS stack.
+ uintptr_t PushAddress(uintptr_t address);
+
+ // Pop an address from the JS stack.
+ uintptr_t PopAddress();
+
+ // Accessor to the internal simulator stack area.
+ uintptr_t StackLimit() const;
+
+ void ResetState();
+
+ // Runtime call support.
+ static void* RedirectExternalReference(void* external_function,
+ ExternalReference::Type type);
+
+ // Run the simulator.
+ static const Instruction* kEndOfSimAddress;
+ void DecodeInstruction();
+ void Run();
+ void RunFrom(Instruction* start);
+
+ // Simulation helpers.
+ template <typename T>
+ void set_pc(T new_pc) {
+ ASSERT(sizeof(T) == sizeof(pc_));
+ memcpy(&pc_, &new_pc, sizeof(T));
+ pc_modified_ = true;
+ }
+ Instruction* pc() { return pc_; }
+
+ void increment_pc() {
+ if (!pc_modified_) {
+ pc_ = pc_->NextInstruction();
+ }
+
+ pc_modified_ = false;
+ }
+
+ void ExecuteInstruction() {
+ ASSERT(IsAligned(reinterpret_cast<uintptr_t>(pc_), kInstructionSize));
+ CheckBreakNext();
+ decoder_->Decode(pc_);
+ LogProcessorState();
+ increment_pc();
+ CheckBreakpoints();
+ }
+
+ // Declare all Visitor functions.
+ #define DECLARE(A) void Visit##A(Instruction* instr);
+ VISITOR_LIST(DECLARE)
+ #undef DECLARE
+
+ // Register accessors.
+
+ // Return 'size' bits of the value of an integer register, as the specified
+ // type. The value is zero-extended to fill the result.
+ //
+ // The only supported values of 'size' are kXRegSize and kWRegSize.
+ template<typename T>
+ T reg(unsigned size, unsigned code,
+ Reg31Mode r31mode = Reg31IsZeroRegister) const {
+ unsigned size_in_bytes = size / 8;
+ ASSERT(size_in_bytes <= sizeof(T));
+ ASSERT((size == kXRegSize) || (size == kWRegSize));
+ ASSERT(code < kNumberOfRegisters);
+
+ if ((code == 31) && (r31mode == Reg31IsZeroRegister)) {
+ T result;
+ memset(&result, 0, sizeof(result));
+ return result;
+ }
+ return registers_[code].Get<T>(size_in_bytes);
+ }
+
+ // Like reg(), but infer the access size from the template type.
+ template<typename T>
+ T reg(unsigned code, Reg31Mode r31mode = Reg31IsZeroRegister) const {
+ return reg<T>(sizeof(T) * 8, code, r31mode);
+ }
+
+ // Common specialized accessors for the reg() template.
+ int32_t wreg(unsigned code,
+ Reg31Mode r31mode = Reg31IsZeroRegister) const {
+ return reg<int32_t>(code, r31mode);
+ }
+
+ int64_t xreg(unsigned code,
+ Reg31Mode r31mode = Reg31IsZeroRegister) const {
+ return reg<int64_t>(code, r31mode);
+ }
+
+ int64_t reg(unsigned size, unsigned code,
+ Reg31Mode r31mode = Reg31IsZeroRegister) const {
+ return reg<int64_t>(size, code, r31mode);
+ }
+
+ // Write 'size' bits of 'value' into an integer register. The value is
+ // zero-extended. This behaviour matches AArch64 register writes.
+ //
+ // The only supported values of 'size' are kXRegSize and kWRegSize.
+ template<typename T>
+ void set_reg(unsigned size, unsigned code, T value,
+ Reg31Mode r31mode = Reg31IsZeroRegister) {
+ unsigned size_in_bytes = size / 8;
+ ASSERT(size_in_bytes <= sizeof(T));
+ ASSERT((size == kXRegSize) || (size == kWRegSize));
+ ASSERT(code < kNumberOfRegisters);
+
+ if ((code == 31) && (r31mode == Reg31IsZeroRegister)) {
+ return;
+ }
+ return registers_[code].Set(value, size_in_bytes);
+ }
+
+ // Like set_reg(), but infer the access size from the template type.
+ template<typename T>
+ void set_reg(unsigned code, T value,
+ Reg31Mode r31mode = Reg31IsZeroRegister) {
+ set_reg(sizeof(value) * 8, code, value, r31mode);
+ }
+
+ // Common specialized accessors for the set_reg() template.
+ void set_wreg(unsigned code, int32_t value,
+ Reg31Mode r31mode = Reg31IsZeroRegister) {
+ set_reg(kWRegSize, code, value, r31mode);
+ }
+
+ void set_xreg(unsigned code, int64_t value,
+ Reg31Mode r31mode = Reg31IsZeroRegister) {
+ set_reg(kXRegSize, code, value, r31mode);
+ }
+
+ // Commonly-used special cases.
+ template<typename T>
+ void set_lr(T value) {
+ ASSERT(sizeof(T) == kPointerSize);
+ set_reg(kLinkRegCode, value);
+ }
+
+ template<typename T>
+ void set_sp(T value) {
+ ASSERT(sizeof(T) == kPointerSize);
+ set_reg(31, value, Reg31IsStackPointer);
+ }
+
+ int64_t sp() { return xreg(31, Reg31IsStackPointer); }
+ int64_t jssp() { return xreg(kJSSPCode, Reg31IsStackPointer); }
+ int64_t fp() {
+ return xreg(kFramePointerRegCode, Reg31IsStackPointer);
+ }
+ Instruction* lr() { return reg<Instruction*>(kLinkRegCode); }
+
+ Address get_sp() { return reinterpret_cast<Address>(sp()); }
+
+ // Return 'size' bits of the value of a floating-point register, as the
+ // specified type. The value is zero-extended to fill the result.
+ //
+ // The only supported values of 'size' are kDRegSize and kSRegSize.
+ template<typename T>
+ T fpreg(unsigned size, unsigned code) const {
+ unsigned size_in_bytes = size / 8;
+ ASSERT(size_in_bytes <= sizeof(T));
+ ASSERT((size == kDRegSize) || (size == kSRegSize));
+ ASSERT(code < kNumberOfFPRegisters);
+ return fpregisters_[code].Get<T>(size_in_bytes);
+ }
+
+ // Like fpreg(), but infer the access size from the template type.
+ template<typename T>
+ T fpreg(unsigned code) const {
+ return fpreg<T>(sizeof(T) * 8, code);
+ }
+
+ // Common specialized accessors for the fpreg() template.
+ float sreg(unsigned code) const {
+ return fpreg<float>(code);
+ }
+
+ uint32_t sreg_bits(unsigned code) const {
+ return fpreg<uint32_t>(code);
+ }
+
+ double dreg(unsigned code) const {
+ return fpreg<double>(code);
+ }
+
+ uint64_t dreg_bits(unsigned code) const {
+ return fpreg<uint64_t>(code);
+ }
+
+ double fpreg(unsigned size, unsigned code) const {
+ switch (size) {
+ case kSRegSize: return sreg(code);
+ case kDRegSize: return dreg(code);
+ default:
+ UNREACHABLE();
+ return 0.0;
+ }
+ }
+
+ // Write 'value' into a floating-point register. The value is zero-extended.
+ // This behaviour matches AArch64 register writes.
+ template<typename T>
+ void set_fpreg(unsigned code, T value) {
+ ASSERT((sizeof(value) == kDRegSizeInBytes) ||
+ (sizeof(value) == kSRegSizeInBytes));
+ ASSERT(code < kNumberOfFPRegisters);
+ fpregisters_[code].Set(value, sizeof(value));
+ }
+
+ // Common specialized accessors for the set_fpreg() template.
+ void set_sreg(unsigned code, float value) {
+ set_fpreg(code, value);
+ }
+
+ void set_sreg_bits(unsigned code, uint32_t value) {
+ set_fpreg(code, value);
+ }
+
+ void set_dreg(unsigned code, double value) {
+ set_fpreg(code, value);
+ }
+
+ void set_dreg_bits(unsigned code, uint64_t value) {
+ set_fpreg(code, value);
+ }
+
+ bool N() { return nzcv_.N() != 0; }
+ bool Z() { return nzcv_.Z() != 0; }
+ bool C() { return nzcv_.C() != 0; }
+ bool V() { return nzcv_.V() != 0; }
+ SimSystemRegister& nzcv() { return nzcv_; }
+
+ // TODO(jbramley): Find a way to make the fpcr_ members return the proper
+ // types, so this accessor is not necessary.
+ FPRounding RMode() { return static_cast<FPRounding>(fpcr_.RMode()); }
+ SimSystemRegister& fpcr() { return fpcr_; }
+
+ // Debug helpers
+
+ // Simulator breakpoints.
+ struct Breakpoint {
+ Instruction* location;
+ bool enabled;
+ };
+ std::vector<Breakpoint> breakpoints_;
+ void SetBreakpoint(Instruction* breakpoint);
+ void ListBreakpoints();
+ void CheckBreakpoints();
+
+ // Helpers for the 'next' command.
+ // When this is set, the Simulator will insert a breakpoint after the next BL
+ // instruction it meets.
+ bool break_on_next_;
+ // Check if the Simulator should insert a break after the current instruction
+ // for the 'next' command.
+ void CheckBreakNext();
+
+ // Disassemble instruction at the given address.
+ void PrintInstructionsAt(Instruction* pc, uint64_t count);
+
+ void PrintSystemRegisters(bool print_all = false);
+ void PrintRegisters(bool print_all_regs = false);
+ void PrintFPRegisters(bool print_all_regs = false);
+ void PrintProcessorState();
+ void PrintWrite(uint8_t* address, uint64_t value, unsigned num_bytes);
+ void LogSystemRegisters() {
+ if (log_parameters_ & LOG_SYS_REGS) PrintSystemRegisters();
+ }
+ void LogRegisters() {
+ if (log_parameters_ & LOG_REGS) PrintRegisters();
+ }
+ void LogFPRegisters() {
+ if (log_parameters_ & LOG_FP_REGS) PrintFPRegisters();
+ }
+ void LogProcessorState() {
+ LogSystemRegisters();
+ LogRegisters();
+ LogFPRegisters();
+ }
+ void LogWrite(uint8_t* address, uint64_t value, unsigned num_bytes) {
+ if (log_parameters_ & LOG_WRITE) PrintWrite(address, value, num_bytes);
+ }
+
+ int log_parameters() { return log_parameters_; }
+ void set_log_parameters(int new_parameters) {
+ if (new_parameters & LOG_DISASM) {
+ decoder_->InsertVisitorBefore(print_disasm_, this);
+ } else {
+ decoder_->RemoveVisitor(print_disasm_);
+ }
+ log_parameters_ = new_parameters;
+ }
+
+ static inline const char* WRegNameForCode(unsigned code,
+ Reg31Mode mode = Reg31IsZeroRegister);
+ static inline const char* XRegNameForCode(unsigned code,
+ Reg31Mode mode = Reg31IsZeroRegister);
+ static inline const char* SRegNameForCode(unsigned code);
+ static inline const char* DRegNameForCode(unsigned code);
+ static inline const char* VRegNameForCode(unsigned code);
+ static inline int CodeFromName(const char* name);
+
+ protected:
+ // Simulation helpers ------------------------------------
+ bool ConditionPassed(Condition cond) {
+ switch (cond) {
+ case eq:
+ return Z();
+ case ne:
+ return !Z();
+ case hs:
+ return C();
+ case lo:
+ return !C();
+ case mi:
+ return N();
+ case pl:
+ return !N();
+ case vs:
+ return V();
+ case vc:
+ return !V();
+ case hi:
+ return C() && !Z();
+ case ls:
+ return !(C() && !Z());
+ case ge:
+ return N() == V();
+ case lt:
+ return N() != V();
+ case gt:
+ return !Z() && (N() == V());
+ case le:
+ return !(!Z() && (N() == V()));
+ case nv: // Fall through.
+ case al:
+ return true;
+ default:
+ UNREACHABLE();
+ return false;
+ }
+ }
+
+ bool ConditionFailed(Condition cond) {
+ return !ConditionPassed(cond);
+ }
+
+ void AddSubHelper(Instruction* instr, int64_t op2);
+ int64_t AddWithCarry(unsigned reg_size,
+ bool set_flags,
+ int64_t src1,
+ int64_t src2,
+ int64_t carry_in = 0);
+ void LogicalHelper(Instruction* instr, int64_t op2);
+ void ConditionalCompareHelper(Instruction* instr, int64_t op2);
+ void LoadStoreHelper(Instruction* instr,
+ int64_t offset,
+ AddrMode addrmode);
+ void LoadStorePairHelper(Instruction* instr, AddrMode addrmode);
+ uint8_t* LoadStoreAddress(unsigned addr_reg,
+ int64_t offset,
+ AddrMode addrmode);
+ void LoadStoreWriteBack(unsigned addr_reg,
+ int64_t offset,
+ AddrMode addrmode);
+ void CheckMemoryAccess(uint8_t* address, uint8_t* stack);
+
+ uint64_t MemoryRead(uint8_t* address, unsigned num_bytes);
+ uint8_t MemoryRead8(uint8_t* address);
+ uint16_t MemoryRead16(uint8_t* address);
+ uint32_t MemoryRead32(uint8_t* address);
+ float MemoryReadFP32(uint8_t* address);
+ uint64_t MemoryRead64(uint8_t* address);
+ double MemoryReadFP64(uint8_t* address);
+
+ void MemoryWrite(uint8_t* address, uint64_t value, unsigned num_bytes);
+ void MemoryWrite32(uint8_t* address, uint32_t value);
+ void MemoryWriteFP32(uint8_t* address, float value);
+ void MemoryWrite64(uint8_t* address, uint64_t value);
+ void MemoryWriteFP64(uint8_t* address, double value);
+
+ int64_t ShiftOperand(unsigned reg_size,
+ int64_t value,
+ Shift shift_type,
+ unsigned amount);
+ int64_t Rotate(unsigned reg_width,
+ int64_t value,
+ Shift shift_type,
+ unsigned amount);
+ int64_t ExtendValue(unsigned reg_width,
+ int64_t value,
+ Extend extend_type,
+ unsigned left_shift = 0);
+
+ uint64_t ReverseBits(uint64_t value, unsigned num_bits);
+ uint64_t ReverseBytes(uint64_t value, ReverseByteMode mode);
+
+ void FPCompare(double val0, double val1);
+ double FPRoundInt(double value, FPRounding round_mode);
+ double FPToDouble(float value);
+ float FPToFloat(double value, FPRounding round_mode);
+ double FixedToDouble(int64_t src, int fbits, FPRounding round_mode);
+ double UFixedToDouble(uint64_t src, int fbits, FPRounding round_mode);
+ float FixedToFloat(int64_t src, int fbits, FPRounding round_mode);
+ float UFixedToFloat(uint64_t src, int fbits, FPRounding round_mode);
+ int32_t FPToInt32(double value, FPRounding rmode);
+ int64_t FPToInt64(double value, FPRounding rmode);
+ uint32_t FPToUInt32(double value, FPRounding rmode);
+ uint64_t FPToUInt64(double value, FPRounding rmode);
+
+ template <typename T>
+ T FPMax(T a, T b);
+
+ template <typename T>
+ T FPMin(T a, T b);
+
+ template <typename T>
+ T FPMaxNM(T a, T b);
+
+ template <typename T>
+ T FPMinNM(T a, T b);
+
+ void CheckStackAlignment();
+
+ inline void CheckPCSComplianceAndRun();
+
+#ifdef DEBUG
+ // Corruption values should have their least significant byte cleared to
+ // allow the code of the register being corrupted to be inserted.
+ static const uint64_t kCallerSavedRegisterCorruptionValue =
+ 0xca11edc0de000000UL;
+ // This value is a NaN in both 32-bit and 64-bit FP.
+ static const uint64_t kCallerSavedFPRegisterCorruptionValue =
+ 0x7ff000007f801000UL;
+ // This value is a mix of 32/64-bits NaN and "verbose" immediate.
+ static const uint64_t kDefaultCPURegisterCorruptionValue =
+ 0x7ffbad007f8bad00UL;
+
+ void CorruptRegisters(CPURegList* list,
+ uint64_t value = kDefaultCPURegisterCorruptionValue);
+ void CorruptAllCallerSavedCPURegisters();
+#endif
+
+ // Processor state ---------------------------------------
+
+ // Output stream.
+ FILE* stream_;
+ PrintDisassembler* print_disasm_;
+
+ // Instrumentation.
+ Instrument* instrument_;
+
+ // General purpose registers. Register 31 is the stack pointer.
+ SimRegister registers_[kNumberOfRegisters];
+
+ // Floating point registers
+ SimFPRegister fpregisters_[kNumberOfFPRegisters];
+
+ // Processor state
+ // bits[31, 27]: Condition flags N, Z, C, and V.
+ // (Negative, Zero, Carry, Overflow)
+ SimSystemRegister nzcv_;
+
+ // Floating-Point Control Register
+ SimSystemRegister fpcr_;
+
+ // Only a subset of FPCR features are supported by the simulator. This helper
+ // checks that the FPCR settings are supported.
+ //
+ // This is checked when floating-point instructions are executed, not when
+ // FPCR is set. This allows generated code to modify FPCR for external
+ // functions, or to save and restore it when entering and leaving generated
+ // code.
+ void AssertSupportedFPCR() {
+ ASSERT(fpcr().DN() == 0); // No default-NaN support.
+ ASSERT(fpcr().FZ() == 0); // No flush-to-zero support.
+ ASSERT(fpcr().RMode() == FPTieEven); // Ties-to-even rounding only.
+
+ // The simulator does not support half-precision operations so fpcr().AHP()
+ // is irrelevant, and is not checked here.
+ }
+
+ static int CalcNFlag(uint64_t result, unsigned reg_size) {
+ return (result >> (reg_size - 1)) & 1;
+ }
+
+ static int CalcZFlag(uint64_t result) {
+ return result == 0;
+ }
+
+ static const uint32_t kConditionFlagsMask = 0xf0000000;
+
+ // Stack
+ byte* stack_;
+ static const intptr_t stack_protection_size_ = KB;
+ intptr_t stack_size_;
+ byte* stack_limit_;
+ // TODO(aleram): protect the stack.
+
+ Decoder* decoder_;
+ Decoder* disassembler_decoder_;
+
+ // Indicates if the pc has been modified by the instruction and should not be
+ // automatically incremented.
+ bool pc_modified_;
+ Instruction* pc_;
+
+ static const char* xreg_names[];
+ static const char* wreg_names[];
+ static const char* sreg_names[];
+ static const char* dreg_names[];
+ static const char* vreg_names[];
+
+ // Debugger input.
+ void set_last_debugger_input(char* input) {
+ DeleteArray(last_debugger_input_);
+ last_debugger_input_ = input;
+ }
+ char* last_debugger_input() { return last_debugger_input_; }
+ char* last_debugger_input_;
+
+ private:
+ int log_parameters_;
+ Isolate* isolate_;
+};
+
+
+// When running with the simulator transition into simulated execution at this
+// point.
+#define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \
+ reinterpret_cast<Object*>(Simulator::current(Isolate::Current())->CallJS( \
+ FUNCTION_ADDR(entry), \
+ p0, p1, p2, p3, p4))
+
+#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7, p8) \
+ Simulator::current(Isolate::Current())->CallRegExp( \
+ entry, \
+ p0, p1, p2, p3, p4, p5, p6, p7, NULL, p8)
+
+#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
+ try_catch_address == NULL ? \
+ NULL : *(reinterpret_cast<TryCatch**>(try_catch_address))
+
+
+// The simulator has its own stack. Thus it has a different stack limit from
+// the C-based native code.
+// See also 'class SimulatorStack' in arm/simulator-arm.h.
+class SimulatorStack : public v8::internal::AllStatic {
+ public:
+ static uintptr_t JsLimitFromCLimit(v8::internal::Isolate* isolate,
+ uintptr_t c_limit) {
+ return Simulator::current(isolate)->StackLimit();
+ }
+
+ static uintptr_t RegisterCTryCatch(uintptr_t try_catch_address) {
+ Simulator* sim = Simulator::current(Isolate::Current());
+ return sim->PushAddress(try_catch_address);
+ }
+
+ static void UnregisterCTryCatch() {
+ Simulator::current(Isolate::Current())->PopAddress();
+ }
+};
+
+#endif // !defined(USE_SIMULATOR)
+
+} } // namespace v8::internal
+
+#endif // V8_A64_SIMULATOR_A64_H_
--- /dev/null
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#if V8_TARGET_ARCH_A64
+
+#include "ic-inl.h"
+#include "codegen.h"
+#include "stub-cache.h"
+
+namespace v8 {
+namespace internal {
+
+
+#define __ ACCESS_MASM(masm)
+
+
+void StubCompiler::GenerateDictionaryNegativeLookup(MacroAssembler* masm,
+ Label* miss_label,
+ Register receiver,
+ Handle<Name> name,
+ Register scratch0,
+ Register scratch1) {
+ ASSERT(!AreAliased(receiver, scratch0, scratch1));
+ ASSERT(name->IsUniqueName());
+ Counters* counters = masm->isolate()->counters();
+ __ IncrementCounter(counters->negative_lookups(), 1, scratch0, scratch1);
+ __ IncrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
+
+ Label done;
+
+ const int kInterceptorOrAccessCheckNeededMask =
+ (1 << Map::kHasNamedInterceptor) | (1 << Map::kIsAccessCheckNeeded);
+
+ // Bail out if the receiver has a named interceptor or requires access checks.
+ Register map = scratch1;
+ __ Ldr(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ Ldrb(scratch0, FieldMemOperand(map, Map::kBitFieldOffset));
+ __ Tst(scratch0, kInterceptorOrAccessCheckNeededMask);
+ __ B(ne, miss_label);
+
+ // Check that receiver is a JSObject.
+ __ Ldrb(scratch0, FieldMemOperand(map, Map::kInstanceTypeOffset));
+ __ Cmp(scratch0, FIRST_SPEC_OBJECT_TYPE);
+ __ B(lt, miss_label);
+
+ // Load properties array.
+ Register properties = scratch0;
+ __ Ldr(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+ // Check that the properties array is a dictionary.
+ __ Ldr(map, FieldMemOperand(properties, HeapObject::kMapOffset));
+ __ JumpIfNotRoot(map, Heap::kHashTableMapRootIndex, miss_label);
+
+ NameDictionaryLookupStub::GenerateNegativeLookup(masm,
+ miss_label,
+ &done,
+ receiver,
+ properties,
+ name,
+ scratch1);
+ __ Bind(&done);
+ __ DecrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
+}
+
+
+// Probe primary or secondary table.
+// If the entry is found in the cache, the generated code jump to the first
+// instruction of the stub in the cache.
+// If there is a miss the code fall trough.
+//
+// 'receiver', 'name' and 'offset' registers are preserved on miss.
+static void ProbeTable(Isolate* isolate,
+ MacroAssembler* masm,
+ Code::Flags flags,
+ StubCache::Table table,
+ Register receiver,
+ Register name,
+ Register offset,
+ Register scratch,
+ Register scratch2,
+ Register scratch3) {
+ // Some code below relies on the fact that the Entry struct contains
+ // 3 pointers (name, code, map).
+ STATIC_ASSERT(sizeof(StubCache::Entry) == (3 * kPointerSize));
+
+ ExternalReference key_offset(isolate->stub_cache()->key_reference(table));
+ ExternalReference value_offset(isolate->stub_cache()->value_reference(table));
+ ExternalReference map_offset(isolate->stub_cache()->map_reference(table));
+
+ uintptr_t key_off_addr = reinterpret_cast<uintptr_t>(key_offset.address());
+ uintptr_t value_off_addr =
+ reinterpret_cast<uintptr_t>(value_offset.address());
+ uintptr_t map_off_addr = reinterpret_cast<uintptr_t>(map_offset.address());
+
+ Label miss;
+
+ ASSERT(!AreAliased(name, offset, scratch, scratch2, scratch3));
+
+ // Multiply by 3 because there are 3 fields per entry.
+ __ Add(scratch3, offset, Operand(offset, LSL, 1));
+
+ // Calculate the base address of the entry.
+ __ Mov(scratch, Operand(key_offset));
+ __ Add(scratch, scratch, Operand(scratch3, LSL, kPointerSizeLog2));
+
+ // Check that the key in the entry matches the name.
+ __ Ldr(scratch2, MemOperand(scratch));
+ __ Cmp(name, scratch2);
+ __ B(ne, &miss);
+
+ // Check the map matches.
+ __ Ldr(scratch2, MemOperand(scratch, map_off_addr - key_off_addr));
+ __ Ldr(scratch3, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ Cmp(scratch2, scratch3);
+ __ B(ne, &miss);
+
+ // Get the code entry from the cache.
+ __ Ldr(scratch, MemOperand(scratch, value_off_addr - key_off_addr));
+
+ // Check that the flags match what we're looking for.
+ __ Ldr(scratch2.W(), FieldMemOperand(scratch, Code::kFlagsOffset));
+ __ Bic(scratch2.W(), scratch2.W(), Code::kFlagsNotUsedInLookup);
+ __ Cmp(scratch2.W(), flags);
+ __ B(ne, &miss);
+
+#ifdef DEBUG
+ if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
+ __ B(&miss);
+ } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) {
+ __ B(&miss);
+ }
+#endif
+
+ // Jump to the first instruction in the code stub.
+ __ Add(scratch, scratch, Code::kHeaderSize - kHeapObjectTag);
+ __ Br(scratch);
+
+ // Miss: fall through.
+ __ Bind(&miss);
+}
+
+
+void StubCache::GenerateProbe(MacroAssembler* masm,
+ Code::Flags flags,
+ Register receiver,
+ Register name,
+ Register scratch,
+ Register extra,
+ Register extra2,
+ Register extra3) {
+ Isolate* isolate = masm->isolate();
+ Label miss;
+
+ // Make sure the flags does not name a specific type.
+ ASSERT(Code::ExtractTypeFromFlags(flags) == 0);
+
+ // Make sure that there are no register conflicts.
+ ASSERT(!AreAliased(receiver, name, scratch, extra, extra2, extra3));
+
+ // Make sure extra and extra2 registers are valid.
+ ASSERT(!extra.is(no_reg));
+ ASSERT(!extra2.is(no_reg));
+ ASSERT(!extra3.is(no_reg));
+
+ Counters* counters = masm->isolate()->counters();
+ __ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1,
+ extra2, extra3);
+
+ // Check that the receiver isn't a smi.
+ __ JumpIfSmi(receiver, &miss);
+
+ // Compute the hash for primary table.
+ __ Ldr(scratch, FieldMemOperand(name, Name::kHashFieldOffset));
+ __ Ldr(extra, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ Add(scratch, scratch, extra);
+ __ Eor(scratch, scratch, flags);
+ // We shift out the last two bits because they are not part of the hash.
+ __ Ubfx(scratch, scratch, kHeapObjectTagSize,
+ CountTrailingZeros(kPrimaryTableSize, 64));
+
+ // Probe the primary table.
+ ProbeTable(isolate, masm, flags, kPrimary, receiver, name,
+ scratch, extra, extra2, extra3);
+
+ // Primary miss: Compute hash for secondary table.
+ __ Sub(scratch, scratch, Operand(name, LSR, kHeapObjectTagSize));
+ __ Add(scratch, scratch, flags >> kHeapObjectTagSize);
+ __ And(scratch, scratch, kSecondaryTableSize - 1);
+
+ // Probe the secondary table.
+ ProbeTable(isolate, masm, flags, kSecondary, receiver, name,
+ scratch, extra, extra2, extra3);
+
+ // Cache miss: Fall-through and let caller handle the miss by
+ // entering the runtime system.
+ __ Bind(&miss);
+ __ IncrementCounter(counters->megamorphic_stub_cache_misses(), 1,
+ extra2, extra3);
+}
+
+
+void StubCompiler::GenerateLoadGlobalFunctionPrototype(MacroAssembler* masm,
+ int index,
+ Register prototype) {
+ // Load the global or builtins object from the current context.
+ __ Ldr(prototype, GlobalObjectMemOperand());
+ // Load the native context from the global or builtins object.
+ __ Ldr(prototype,
+ FieldMemOperand(prototype, GlobalObject::kNativeContextOffset));
+ // Load the function from the native context.
+ __ Ldr(prototype, ContextMemOperand(prototype, index));
+ // Load the initial map. The global functions all have initial maps.
+ __ Ldr(prototype,
+ FieldMemOperand(prototype, JSFunction::kPrototypeOrInitialMapOffset));
+ // Load the prototype from the initial map.
+ __ Ldr(prototype, FieldMemOperand(prototype, Map::kPrototypeOffset));
+}
+
+
+void StubCompiler::GenerateDirectLoadGlobalFunctionPrototype(
+ MacroAssembler* masm,
+ int index,
+ Register prototype,
+ Label* miss) {
+ Isolate* isolate = masm->isolate();
+ // Get the global function with the given index.
+ Handle<JSFunction> function(
+ JSFunction::cast(isolate->native_context()->get(index)));
+
+ // Check we're still in the same context.
+ Register scratch = prototype;
+ __ Ldr(scratch, GlobalObjectMemOperand());
+ __ Ldr(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
+ __ Ldr(scratch, ContextMemOperand(scratch, index));
+ __ Cmp(scratch, Operand(function));
+ __ B(ne, miss);
+
+ // Load its initial map. The global functions all have initial maps.
+ __ Mov(prototype, Operand(Handle<Map>(function->initial_map())));
+ // Load the prototype from the initial map.
+ __ Ldr(prototype, FieldMemOperand(prototype, Map::kPrototypeOffset));
+}
+
+
+void StubCompiler::GenerateFastPropertyLoad(MacroAssembler* masm,
+ Register dst,
+ Register src,
+ bool inobject,
+ int index,
+ Representation representation) {
+ ASSERT(!FLAG_track_double_fields || !representation.IsDouble());
+ USE(representation);
+ if (inobject) {
+ int offset = index * kPointerSize;
+ __ Ldr(dst, FieldMemOperand(src, offset));
+ } else {
+ // Calculate the offset into the properties array.
+ int offset = index * kPointerSize + FixedArray::kHeaderSize;
+ __ Ldr(dst, FieldMemOperand(src, JSObject::kPropertiesOffset));
+ __ Ldr(dst, FieldMemOperand(dst, offset));
+ }
+}
+
+
+void StubCompiler::GenerateLoadArrayLength(MacroAssembler* masm,
+ Register receiver,
+ Register scratch,
+ Label* miss_label) {
+ ASSERT(!AreAliased(receiver, scratch));
+
+ // Check that the receiver isn't a smi.
+ __ JumpIfSmi(receiver, miss_label);
+
+ // Check that the object is a JS array.
+ __ JumpIfNotObjectType(receiver, scratch, scratch, JS_ARRAY_TYPE,
+ miss_label);
+
+ // Load length directly from the JS array.
+ __ Ldr(x0, FieldMemOperand(receiver, JSArray::kLengthOffset));
+ __ Ret();
+}
+
+
+// Generate code to check if an object is a string. If the object is a
+// heap object, its map's instance type is left in the scratch1 register.
+static void GenerateStringCheck(MacroAssembler* masm,
+ Register receiver,
+ Register scratch1,
+ Label* smi,
+ Label* non_string_object) {
+ // Check that the receiver isn't a smi.
+ __ JumpIfSmi(receiver, smi);
+
+ // Get the object's instance type filed.
+ __ Ldr(scratch1, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ Ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
+ // Check if the "not string" bit is set.
+ __ Tbnz(scratch1, MaskToBit(kNotStringTag), non_string_object);
+}
+
+
+// Generate code to load the length from a string object and return the length.
+// If the receiver object is not a string or a wrapped string object the
+// execution continues at the miss label. The register containing the
+// receiver is not clobbered if the receiver is not a string.
+void StubCompiler::GenerateLoadStringLength(MacroAssembler* masm,
+ Register receiver,
+ Register scratch1,
+ Register scratch2,
+ Label* miss) {
+ // Input registers can't alias because we don't want to clobber the
+ // receiver register if the object is not a string.
+ ASSERT(!AreAliased(receiver, scratch1, scratch2));
+
+ Label check_wrapper;
+
+ // Check if the object is a string leaving the instance type in the
+ // scratch1 register.
+ GenerateStringCheck(masm, receiver, scratch1, miss, &check_wrapper);
+
+ // Load length directly from the string.
+ __ Ldr(x0, FieldMemOperand(receiver, String::kLengthOffset));
+ __ Ret();
+
+ // Check if the object is a JSValue wrapper.
+ __ Bind(&check_wrapper);
+ __ Cmp(scratch1, Operand(JS_VALUE_TYPE));
+ __ B(ne, miss);
+
+ // Unwrap the value and check if the wrapped value is a string.
+ __ Ldr(scratch1, FieldMemOperand(receiver, JSValue::kValueOffset));
+ GenerateStringCheck(masm, scratch1, scratch2, miss, miss);
+ __ Ldr(x0, FieldMemOperand(scratch1, String::kLengthOffset));
+ __ Ret();
+}
+
+
+void StubCompiler::GenerateLoadFunctionPrototype(MacroAssembler* masm,
+ Register receiver,
+ Register scratch1,
+ Register scratch2,
+ Label* miss_label) {
+ __ TryGetFunctionPrototype(receiver, scratch1, scratch2, miss_label);
+ // TryGetFunctionPrototype can't put the result directly in x0 because the
+ // 3 inputs registers can't alias and we call this function from
+ // LoadIC::GenerateFunctionPrototype, where receiver is x0. So we explicitly
+ // move the result in x0.
+ __ Mov(x0, scratch1);
+ __ Ret();
+}
+
+
+// Generate code to check that a global property cell is empty. Create
+// the property cell at compilation time if no cell exists for the
+// property.
+void StubCompiler::GenerateCheckPropertyCell(MacroAssembler* masm,
+ Handle<JSGlobalObject> global,
+ Handle<Name> name,
+ Register scratch,
+ Label* miss) {
+ Handle<Cell> cell = JSGlobalObject::EnsurePropertyCell(global, name);
+ ASSERT(cell->value()->IsTheHole());
+ __ Mov(scratch, Operand(cell));
+ __ Ldr(scratch, FieldMemOperand(scratch, Cell::kValueOffset));
+ __ JumpIfNotRoot(scratch, Heap::kTheHoleValueRootIndex, miss);
+}
+
+
+void StoreStubCompiler::GenerateNegativeHolderLookup(
+ MacroAssembler* masm,
+ Handle<JSObject> holder,
+ Register holder_reg,
+ Handle<Name> name,
+ Label* miss) {
+ if (holder->IsJSGlobalObject()) {
+ GenerateCheckPropertyCell(
+ masm, Handle<JSGlobalObject>::cast(holder), name, scratch1(), miss);
+ } else if (!holder->HasFastProperties() && !holder->IsJSGlobalProxy()) {
+ GenerateDictionaryNegativeLookup(
+ masm, miss, holder_reg, name, scratch1(), scratch2());
+ }
+}
+
+
+// Generate StoreTransition code, value is passed in x0 register.
+// When leaving generated code after success, the receiver_reg and storage_reg
+// may be clobbered. Upon branch to miss_label, the receiver and name registers
+// have their original values.
+void StoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm,
+ Handle<JSObject> object,
+ LookupResult* lookup,
+ Handle<Map> transition,
+ Handle<Name> name,
+ Register receiver_reg,
+ Register storage_reg,
+ Register value_reg,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* miss_label,
+ Label* slow) {
+ Label exit;
+
+ ASSERT(!AreAliased(receiver_reg, storage_reg, value_reg,
+ scratch1, scratch2, scratch3));
+
+ // We don't need scratch3.
+ scratch3 = NoReg;
+
+ int descriptor = transition->LastAdded();
+ DescriptorArray* descriptors = transition->instance_descriptors();
+ PropertyDetails details = descriptors->GetDetails(descriptor);
+ Representation representation = details.representation();
+ ASSERT(!representation.IsNone());
+
+ if (details.type() == CONSTANT) {
+ Handle<Object> constant(descriptors->GetValue(descriptor), masm->isolate());
+ __ LoadObject(scratch1, constant);
+ __ Cmp(value_reg, scratch1);
+ __ B(ne, miss_label);
+ } else if (FLAG_track_fields && representation.IsSmi()) {
+ __ JumpIfNotSmi(value_reg, miss_label);
+ } else if (FLAG_track_heap_object_fields && representation.IsHeapObject()) {
+ __ JumpIfSmi(value_reg, miss_label);
+ } else if (FLAG_track_double_fields && representation.IsDouble()) {
+ Label do_store, heap_number;
+ __ AllocateHeapNumber(storage_reg, slow, scratch1, scratch2);
+
+ // TODO(jbramley): Is fp_scratch the most appropriate FP scratch register?
+ // It's only used in Fcmp, but it's not really safe to use it like this.
+ __ JumpIfNotSmi(value_reg, &heap_number);
+ __ SmiUntagToDouble(fp_scratch, value_reg);
+ __ B(&do_store);
+
+ __ Bind(&heap_number);
+ __ CheckMap(value_reg, scratch1, Heap::kHeapNumberMapRootIndex,
+ miss_label, DONT_DO_SMI_CHECK);
+ __ Ldr(fp_scratch, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
+
+ __ Bind(&do_store);
+ __ Str(fp_scratch, FieldMemOperand(storage_reg, HeapNumber::kValueOffset));
+ }
+
+ // Stub never generated for non-global objects that require access checks.
+ ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
+
+ // Perform map transition for the receiver if necessary.
+ if ((details.type() == FIELD) &&
+ (object->map()->unused_property_fields() == 0)) {
+ // The properties must be extended before we can store the value.
+ // We jump to a runtime call that extends the properties array.
+ __ Mov(scratch1, Operand(transition));
+ __ Push(receiver_reg, scratch1, value_reg);
+ __ TailCallExternalReference(
+ ExternalReference(IC_Utility(IC::kSharedStoreIC_ExtendStorage),
+ masm->isolate()),
+ 3,
+ 1);
+ return;
+ }
+
+ // Update the map of the object.
+ __ Mov(scratch1, Operand(transition));
+ __ Str(scratch1, FieldMemOperand(receiver_reg, HeapObject::kMapOffset));
+
+ // Update the write barrier for the map field.
+ __ RecordWriteField(receiver_reg,
+ HeapObject::kMapOffset,
+ scratch1,
+ scratch2,
+ kLRHasNotBeenSaved,
+ kDontSaveFPRegs,
+ OMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+
+ if (details.type() == CONSTANT) {
+ ASSERT(value_reg.is(x0));
+ __ Ret();
+ return;
+ }
+
+ int index = transition->instance_descriptors()->GetFieldIndex(
+ transition->LastAdded());
+
+ // Adjust for the number of properties stored in the object. Even in the
+ // face of a transition we can use the old map here because the size of the
+ // object and the number of in-object properties is not going to change.
+ index -= object->map()->inobject_properties();
+
+ // TODO(verwaest): Share this code as a code stub.
+ SmiCheck smi_check = representation.IsTagged()
+ ? INLINE_SMI_CHECK : OMIT_SMI_CHECK;
+ if (index < 0) {
+ // Set the property straight into the object.
+ int offset = object->map()->instance_size() + (index * kPointerSize);
+ // TODO(jbramley): This construct appears in several places in this
+ // function. Try to clean it up, perhaps using a result_reg.
+ if (FLAG_track_double_fields && representation.IsDouble()) {
+ __ Str(storage_reg, FieldMemOperand(receiver_reg, offset));
+ } else {
+ __ Str(value_reg, FieldMemOperand(receiver_reg, offset));
+ }
+
+ if (!FLAG_track_fields || !representation.IsSmi()) {
+ // Update the write barrier for the array address.
+ if (!FLAG_track_double_fields || !representation.IsDouble()) {
+ __ Mov(storage_reg, value_reg);
+ }
+ __ RecordWriteField(receiver_reg,
+ offset,
+ storage_reg,
+ scratch1,
+ kLRHasNotBeenSaved,
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ smi_check);
+ }
+ } else {
+ // Write to the properties array.
+ int offset = index * kPointerSize + FixedArray::kHeaderSize;
+ // Get the properties array
+ __ Ldr(scratch1,
+ FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset));
+ if (FLAG_track_double_fields && representation.IsDouble()) {
+ __ Str(storage_reg, FieldMemOperand(scratch1, offset));
+ } else {
+ __ Str(value_reg, FieldMemOperand(scratch1, offset));
+ }
+
+ if (!FLAG_track_fields || !representation.IsSmi()) {
+ // Update the write barrier for the array address.
+ if (!FLAG_track_double_fields || !representation.IsDouble()) {
+ __ Mov(storage_reg, value_reg);
+ }
+ __ RecordWriteField(scratch1,
+ offset,
+ storage_reg,
+ receiver_reg,
+ kLRHasNotBeenSaved,
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ smi_check);
+ }
+ }
+
+ __ Bind(&exit);
+ // Return the value (register x0).
+ ASSERT(value_reg.is(x0));
+ __ Ret();
+}
+
+
+// Generate StoreField code, value is passed in x0 register.
+// When leaving generated code after success, the receiver_reg and name_reg may
+// be clobbered. Upon branch to miss_label, the receiver and name registers have
+// their original values.
+void StoreStubCompiler::GenerateStoreField(MacroAssembler* masm,
+ Handle<JSObject> object,
+ LookupResult* lookup,
+ Register receiver_reg,
+ Register name_reg,
+ Register value_reg,
+ Register scratch1,
+ Register scratch2,
+ Label* miss_label) {
+ // x0 : value
+ Label exit;
+
+ // Stub never generated for non-global objects that require access
+ // checks.
+ ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
+
+ int index = lookup->GetFieldIndex().field_index();
+
+ // Adjust for the number of properties stored in the object. Even in the
+ // face of a transition we can use the old map here because the size of the
+ // object and the number of in-object properties is not going to change.
+ index -= object->map()->inobject_properties();
+
+ Representation representation = lookup->representation();
+ ASSERT(!representation.IsNone());
+ if (FLAG_track_fields && representation.IsSmi()) {
+ __ JumpIfNotSmi(value_reg, miss_label);
+ } else if (FLAG_track_heap_object_fields && representation.IsHeapObject()) {
+ __ JumpIfSmi(value_reg, miss_label);
+ } else if (FLAG_track_double_fields && representation.IsDouble()) {
+ // Load the double storage.
+ if (index < 0) {
+ int offset = (index * kPointerSize) + object->map()->instance_size();
+ __ Ldr(scratch1, FieldMemOperand(receiver_reg, offset));
+ } else {
+ int offset = (index * kPointerSize) + FixedArray::kHeaderSize;
+ __ Ldr(scratch1,
+ FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset));
+ __ Ldr(scratch1, FieldMemOperand(scratch1, offset));
+ }
+
+ // Store the value into the storage.
+ Label do_store, heap_number;
+ // TODO(jbramley): Is fp_scratch the most appropriate FP scratch register?
+ // It's only used in Fcmp, but it's not really safe to use it like this.
+ __ JumpIfNotSmi(value_reg, &heap_number);
+ __ SmiUntagToDouble(fp_scratch, value_reg);
+ __ B(&do_store);
+
+ __ Bind(&heap_number);
+ __ CheckMap(value_reg, scratch2, Heap::kHeapNumberMapRootIndex,
+ miss_label, DONT_DO_SMI_CHECK);
+ __ Ldr(fp_scratch, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
+
+ __ Bind(&do_store);
+ __ Str(fp_scratch, FieldMemOperand(scratch1, HeapNumber::kValueOffset));
+
+ // Return the value (register x0).
+ ASSERT(value_reg.is(x0));
+ __ Ret();
+ return;
+ }
+
+ // TODO(verwaest): Share this code as a code stub.
+ SmiCheck smi_check = representation.IsTagged()
+ ? INLINE_SMI_CHECK : OMIT_SMI_CHECK;
+ if (index < 0) {
+ // Set the property straight into the object.
+ int offset = object->map()->instance_size() + (index * kPointerSize);
+ __ Str(value_reg, FieldMemOperand(receiver_reg, offset));
+
+ if (!FLAG_track_fields || !representation.IsSmi()) {
+ // Skip updating write barrier if storing a smi.
+ __ JumpIfSmi(value_reg, &exit);
+
+ // Update the write barrier for the array address.
+ // Pass the now unused name_reg as a scratch register.
+ __ Mov(name_reg, value_reg);
+ __ RecordWriteField(receiver_reg,
+ offset,
+ name_reg,
+ scratch1,
+ kLRHasNotBeenSaved,
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ smi_check);
+ }
+ } else {
+ // Write to the properties array.
+ int offset = index * kPointerSize + FixedArray::kHeaderSize;
+ // Get the properties array
+ __ Ldr(scratch1,
+ FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset));
+ __ Str(value_reg, FieldMemOperand(scratch1, offset));
+
+ if (!FLAG_track_fields || !representation.IsSmi()) {
+ // Skip updating write barrier if storing a smi.
+ __ JumpIfSmi(value_reg, &exit);
+
+ // Update the write barrier for the array address.
+ // Ok to clobber receiver_reg and name_reg, since we return.
+ __ Mov(name_reg, value_reg);
+ __ RecordWriteField(scratch1,
+ offset,
+ name_reg,
+ receiver_reg,
+ kLRHasNotBeenSaved,
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ smi_check);
+ }
+ }
+
+ __ Bind(&exit);
+ // Return the value (register x0).
+ ASSERT(value_reg.is(x0));
+ __ Ret();
+}
+
+
+void StoreStubCompiler::GenerateRestoreName(MacroAssembler* masm,
+ Label* label,
+ Handle<Name> name) {
+ if (!label->is_unused()) {
+ __ Bind(label);
+ __ Mov(this->name(), Operand(name));
+ }
+}
+
+
+static void PushInterceptorArguments(MacroAssembler* masm,
+ Register receiver,
+ Register holder,
+ Register name,
+ Handle<JSObject> holder_obj) {
+ STATIC_ASSERT(StubCache::kInterceptorArgsNameIndex == 0);
+ STATIC_ASSERT(StubCache::kInterceptorArgsInfoIndex == 1);
+ STATIC_ASSERT(StubCache::kInterceptorArgsThisIndex == 2);
+ STATIC_ASSERT(StubCache::kInterceptorArgsHolderIndex == 3);
+ STATIC_ASSERT(StubCache::kInterceptorArgsLength == 4);
+
+ __ Push(name);
+ Handle<InterceptorInfo> interceptor(holder_obj->GetNamedInterceptor());
+ ASSERT(!masm->isolate()->heap()->InNewSpace(*interceptor));
+ Register scratch = name;
+ __ Mov(scratch, Operand(interceptor));
+ __ Push(scratch, receiver, holder);
+}
+
+
+static void CompileCallLoadPropertyWithInterceptor(
+ MacroAssembler* masm,
+ Register receiver,
+ Register holder,
+ Register name,
+ Handle<JSObject> holder_obj,
+ IC::UtilityId id) {
+ PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
+
+ __ CallExternalReference(
+ ExternalReference(IC_Utility(id), masm->isolate()),
+ StubCache::kInterceptorArgsLength);
+}
+
+
+// Generate call to api function.
+static void GenerateFastApiCall(MacroAssembler* masm,
+ const CallOptimization& optimization,
+ Handle<Map> receiver_map,
+ Register receiver,
+ Register scratch,
+ int argc,
+ Register* values) {
+ ASSERT(!AreAliased(receiver, scratch));
+ __ Push(receiver);
+ // Write the arguments to stack frame.
+ for (int i = 0; i < argc; i++) {
+ // TODO(jbramley): Push these in as few Push() calls as possible.
+ Register arg = values[argc-1-i];
+ ASSERT(!AreAliased(receiver, scratch, arg));
+ __ Push(arg);
+ }
+
+ ASSERT(optimization.is_simple_api_call());
+
+ // Abi for CallApiFunctionStub.
+ Register callee = x0;
+ Register call_data = x4;
+ Register holder = x2;
+ Register api_function_address = x1;
+
+ // Put holder in place.
+ CallOptimization::HolderLookup holder_lookup;
+ Handle<JSObject> api_holder =
+ optimization.LookupHolderOfExpectedType(receiver_map, &holder_lookup);
+ switch (holder_lookup) {
+ case CallOptimization::kHolderIsReceiver:
+ __ Mov(holder, receiver);
+ break;
+ case CallOptimization::kHolderFound:
+ __ LoadObject(holder, api_holder);
+ break;
+ case CallOptimization::kHolderNotFound:
+ UNREACHABLE();
+ break;
+ }
+
+ Isolate* isolate = masm->isolate();
+ Handle<JSFunction> function = optimization.constant_function();
+ Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
+ Handle<Object> call_data_obj(api_call_info->data(), isolate);
+
+ // Put callee in place.
+ __ LoadObject(callee, function);
+
+ bool call_data_undefined = false;
+ // Put call_data in place.
+ if (isolate->heap()->InNewSpace(*call_data_obj)) {
+ __ LoadObject(call_data, api_call_info);
+ __ Ldr(call_data, FieldMemOperand(call_data, CallHandlerInfo::kDataOffset));
+ } else if (call_data_obj->IsUndefined()) {
+ call_data_undefined = true;
+ __ LoadRoot(call_data, Heap::kUndefinedValueRootIndex);
+ } else {
+ __ LoadObject(call_data, call_data_obj);
+ }
+
+ // Put api_function_address in place.
+ Address function_address = v8::ToCData<Address>(api_call_info->callback());
+ ApiFunction fun(function_address);
+ ExternalReference ref = ExternalReference(&fun,
+ ExternalReference::DIRECT_API_CALL,
+ masm->isolate());
+ __ Mov(api_function_address, Operand(ref));
+
+ // Jump to stub.
+ CallApiFunctionStub stub(true, call_data_undefined, argc);
+ __ TailCallStub(&stub);
+}
+
+
+void StubCompiler::GenerateTailCall(MacroAssembler* masm, Handle<Code> code) {
+ __ Jump(code, RelocInfo::CODE_TARGET);
+}
+
+
+#undef __
+#define __ ACCESS_MASM(masm())
+
+
+Register StubCompiler::CheckPrototypes(Handle<HeapType> type,
+ Register object_reg,
+ Handle<JSObject> holder,
+ Register holder_reg,
+ Register scratch1,
+ Register scratch2,
+ Handle<Name> name,
+ Label* miss,
+ PrototypeCheckType check) {
+ Handle<Map> receiver_map(IC::TypeToMap(*type, isolate()));
+ // Make sure that the type feedback oracle harvests the receiver map.
+ // TODO(svenpanne) Remove this hack when all ICs are reworked.
+ __ Mov(scratch1, Operand(receiver_map));
+
+ // object_reg and holder_reg registers can alias.
+ ASSERT(!AreAliased(object_reg, scratch1, scratch2));
+ ASSERT(!AreAliased(holder_reg, scratch1, scratch2));
+
+ // Keep track of the current object in register reg.
+ Register reg = object_reg;
+ int depth = 0;
+
+ Handle<JSObject> current = Handle<JSObject>::null();
+ if (type->IsConstant()) {
+ current = Handle<JSObject>::cast(type->AsConstant());
+ }
+ Handle<JSObject> prototype = Handle<JSObject>::null();
+ Handle<Map> current_map = receiver_map;
+ Handle<Map> holder_map(holder->map());
+ // Traverse the prototype chain and check the maps in the prototype chain for
+ // fast and global objects or do negative lookup for normal objects.
+ while (!current_map.is_identical_to(holder_map)) {
+ ++depth;
+
+ // Only global objects and objects that do not require access
+ // checks are allowed in stubs.
+ ASSERT(current_map->IsJSGlobalProxyMap() ||
+ !current_map->is_access_check_needed());
+
+ prototype = handle(JSObject::cast(current_map->prototype()));
+ if (current_map->is_dictionary_map() &&
+ !current_map->IsJSGlobalObjectMap() &&
+ !current_map->IsJSGlobalProxyMap()) {
+ if (!name->IsUniqueName()) {
+ ASSERT(name->IsString());
+ name = factory()->InternalizeString(Handle<String>::cast(name));
+ }
+ ASSERT(current.is_null() ||
+ (current->property_dictionary()->FindEntry(*name) ==
+ NameDictionary::kNotFound));
+
+ GenerateDictionaryNegativeLookup(masm(), miss, reg, name,
+ scratch1, scratch2);
+
+ __ Ldr(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
+ reg = holder_reg; // From now on the object will be in holder_reg.
+ __ Ldr(reg, FieldMemOperand(scratch1, Map::kPrototypeOffset));
+ } else {
+ Register map_reg = scratch1;
+ // TODO(jbramley): Skip this load when we don't need the map.
+ __ Ldr(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
+
+ if (depth != 1 || check == CHECK_ALL_MAPS) {
+ __ CheckMap(map_reg, current_map, miss, DONT_DO_SMI_CHECK);
+ }
+
+ // Check access rights to the global object. This has to happen after
+ // the map check so that we know that the object is actually a global
+ // object.
+ if (current_map->IsJSGlobalProxyMap()) {
+ __ CheckAccessGlobalProxy(reg, scratch2, miss);
+ } else if (current_map->IsJSGlobalObjectMap()) {
+ GenerateCheckPropertyCell(
+ masm(), Handle<JSGlobalObject>::cast(current), name,
+ scratch2, miss);
+ }
+
+ reg = holder_reg; // From now on the object will be in holder_reg.
+
+ if (heap()->InNewSpace(*prototype)) {
+ // The prototype is in new space; we cannot store a reference to it
+ // in the code. Load it from the map.
+ __ Ldr(reg, FieldMemOperand(map_reg, Map::kPrototypeOffset));
+ } else {
+ // The prototype is in old space; load it directly.
+ __ Mov(reg, Operand(prototype));
+ }
+ }
+
+ // Go to the next object in the prototype chain.
+ current = prototype;
+ current_map = handle(current->map());
+ }
+
+ // Log the check depth.
+ LOG(isolate(), IntEvent("check-maps-depth", depth + 1));
+
+ // Check the holder map.
+ if (depth != 0 || check == CHECK_ALL_MAPS) {
+ // Check the holder map.
+ __ CheckMap(reg, scratch1, current_map, miss, DONT_DO_SMI_CHECK);
+ }
+
+ // Perform security check for access to the global object.
+ ASSERT(current_map->IsJSGlobalProxyMap() ||
+ !current_map->is_access_check_needed());
+ if (current_map->IsJSGlobalProxyMap()) {
+ __ CheckAccessGlobalProxy(reg, scratch1, miss);
+ }
+
+ // Return the register containing the holder.
+ return reg;
+}
+
+
+void LoadStubCompiler::HandlerFrontendFooter(Handle<Name> name, Label* miss) {
+ if (!miss->is_unused()) {
+ Label success;
+ __ B(&success);
+
+ __ Bind(miss);
+ TailCallBuiltin(masm(), MissBuiltin(kind()));
+
+ __ Bind(&success);
+ }
+}
+
+
+void StoreStubCompiler::HandlerFrontendFooter(Handle<Name> name, Label* miss) {
+ if (!miss->is_unused()) {
+ Label success;
+ __ B(&success);
+
+ GenerateRestoreName(masm(), miss, name);
+ TailCallBuiltin(masm(), MissBuiltin(kind()));
+
+ __ Bind(&success);
+ }
+}
+
+
+Register LoadStubCompiler::CallbackHandlerFrontend(Handle<HeapType> type,
+ Register object_reg,
+ Handle<JSObject> holder,
+ Handle<Name> name,
+ Handle<Object> callback) {
+ Label miss;
+
+ Register reg = HandlerFrontendHeader(type, object_reg, holder, name, &miss);
+
+ // TODO(jbramely): HandlerFrontendHeader returns its result in scratch1(), so
+ // we can't use it below, but that isn't very obvious. Is there a better way
+ // of handling this?
+
+ if (!holder->HasFastProperties() && !holder->IsJSGlobalObject()) {
+ ASSERT(!AreAliased(reg, scratch2(), scratch3(), scratch4()));
+
+ // Load the properties dictionary.
+ Register dictionary = scratch4();
+ __ Ldr(dictionary, FieldMemOperand(reg, JSObject::kPropertiesOffset));
+
+ // Probe the dictionary.
+ Label probe_done;
+ NameDictionaryLookupStub::GeneratePositiveLookup(masm(),
+ &miss,
+ &probe_done,
+ dictionary,
+ this->name(),
+ scratch2(),
+ scratch3());
+ __ Bind(&probe_done);
+
+ // If probing finds an entry in the dictionary, scratch3 contains the
+ // pointer into the dictionary. Check that the value is the callback.
+ Register pointer = scratch3();
+ const int kElementsStartOffset = NameDictionary::kHeaderSize +
+ NameDictionary::kElementsStartIndex * kPointerSize;
+ const int kValueOffset = kElementsStartOffset + kPointerSize;
+ __ Ldr(scratch2(), FieldMemOperand(pointer, kValueOffset));
+ __ Cmp(scratch2(), Operand(callback));
+ __ B(ne, &miss);
+ }
+
+ HandlerFrontendFooter(name, &miss);
+ return reg;
+}
+
+
+void LoadStubCompiler::GenerateLoadField(Register reg,
+ Handle<JSObject> holder,
+ PropertyIndex field,
+ Representation representation) {
+ __ Mov(receiver(), reg);
+ if (kind() == Code::LOAD_IC) {
+ LoadFieldStub stub(field.is_inobject(holder),
+ field.translate(holder),
+ representation);
+ GenerateTailCall(masm(), stub.GetCode(isolate()));
+ } else {
+ KeyedLoadFieldStub stub(field.is_inobject(holder),
+ field.translate(holder),
+ representation);
+ GenerateTailCall(masm(), stub.GetCode(isolate()));
+ }
+}
+
+
+void LoadStubCompiler::GenerateLoadConstant(Handle<Object> value) {
+ // Return the constant value.
+ __ LoadObject(x0, value);
+ __ Ret();
+}
+
+
+void LoadStubCompiler::GenerateLoadCallback(
+ const CallOptimization& call_optimization,
+ Handle<Map> receiver_map) {
+ GenerateFastApiCall(
+ masm(), call_optimization, receiver_map, receiver(), scratch3(), 0, NULL);
+}
+
+
+void LoadStubCompiler::GenerateLoadCallback(
+ Register reg,
+ Handle<ExecutableAccessorInfo> callback) {
+ ASSERT(!AreAliased(scratch2(), scratch3(), scratch4(), reg));
+
+ // Build ExecutableAccessorInfo::args_ list on the stack and push property
+ // name below the exit frame to make GC aware of them and store pointers to
+ // them.
+ STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 0);
+ STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 1);
+ STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 2);
+ STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 3);
+ STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 4);
+ STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 5);
+ STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 6);
+
+ __ Push(receiver());
+
+ if (heap()->InNewSpace(callback->data())) {
+ __ Mov(scratch3(), Operand(callback));
+ __ Ldr(scratch3(), FieldMemOperand(scratch3(),
+ ExecutableAccessorInfo::kDataOffset));
+ } else {
+ __ Mov(scratch3(), Operand(Handle<Object>(callback->data(), isolate())));
+ }
+ // TODO(jbramley): Find another scratch register and combine the pushes
+ // together. Can we use scratch1() here?
+ __ LoadRoot(scratch4(), Heap::kUndefinedValueRootIndex);
+ __ Push(scratch3(), scratch4());
+ __ Mov(scratch3(), Operand(ExternalReference::isolate_address(isolate())));
+ __ Push(scratch4(), scratch3(), reg, name());
+
+ Register args_addr = scratch2();
+ __ Add(args_addr, __ StackPointer(), kPointerSize);
+
+ // Stack at this point:
+ // sp[40] callback data
+ // sp[32] undefined
+ // sp[24] undefined
+ // sp[16] isolate
+ // args_addr -> sp[8] reg
+ // sp[0] name
+
+ // Abi for CallApiGetter.
+ Register getter_address_reg = x2;
+
+ // Set up the call.
+ Address getter_address = v8::ToCData<Address>(callback->getter());
+ ApiFunction fun(getter_address);
+ ExternalReference::Type type = ExternalReference::DIRECT_GETTER_CALL;
+ ExternalReference ref = ExternalReference(&fun, type, isolate());
+ __ Mov(getter_address_reg, Operand(ref));
+
+ CallApiGetterStub stub;
+ __ TailCallStub(&stub);
+}
+
+
+void LoadStubCompiler::GenerateLoadInterceptor(
+ Register holder_reg,
+ Handle<Object> object,
+ Handle<JSObject> interceptor_holder,
+ LookupResult* lookup,
+ Handle<Name> name) {
+ ASSERT(!AreAliased(receiver(), this->name(),
+ scratch1(), scratch2(), scratch3()));
+ ASSERT(interceptor_holder->HasNamedInterceptor());
+ ASSERT(!interceptor_holder->GetNamedInterceptor()->getter()->IsUndefined());
+
+ // So far the most popular follow ups for interceptor loads are FIELD
+ // and CALLBACKS, so inline only them, other cases may be added later.
+ bool compile_followup_inline = false;
+ if (lookup->IsFound() && lookup->IsCacheable()) {
+ if (lookup->IsField()) {
+ compile_followup_inline = true;
+ } else if (lookup->type() == CALLBACKS &&
+ lookup->GetCallbackObject()->IsExecutableAccessorInfo()) {
+ ExecutableAccessorInfo* callback =
+ ExecutableAccessorInfo::cast(lookup->GetCallbackObject());
+ compile_followup_inline = callback->getter() != NULL &&
+ callback->IsCompatibleReceiver(*object);
+ }
+ }
+
+ if (compile_followup_inline) {
+ // Compile the interceptor call, followed by inline code to load the
+ // property from further up the prototype chain if the call fails.
+ // Check that the maps haven't changed.
+ ASSERT(holder_reg.is(receiver()) || holder_reg.is(scratch1()));
+
+ // Preserve the receiver register explicitly whenever it is different from
+ // the holder and it is needed should the interceptor return without any
+ // result. The CALLBACKS case needs the receiver to be passed into C++ code,
+ // the FIELD case might cause a miss during the prototype check.
+ bool must_perfrom_prototype_check = *interceptor_holder != lookup->holder();
+ bool must_preserve_receiver_reg = !receiver().Is(holder_reg) &&
+ (lookup->type() == CALLBACKS || must_perfrom_prototype_check);
+
+ // Save necessary data before invoking an interceptor.
+ // Requires a frame to make GC aware of pushed pointers.
+ {
+ FrameScope frame_scope(masm(), StackFrame::INTERNAL);
+ if (must_preserve_receiver_reg) {
+ __ Push(receiver(), holder_reg, this->name());
+ } else {
+ __ Push(holder_reg, this->name());
+ }
+ // Invoke an interceptor. Note: map checks from receiver to
+ // interceptor's holder has been compiled before (see a caller
+ // of this method.)
+ CompileCallLoadPropertyWithInterceptor(
+ masm(), receiver(), holder_reg, this->name(), interceptor_holder,
+ IC::kLoadPropertyWithInterceptorOnly);
+
+ // Check if interceptor provided a value for property. If it's
+ // the case, return immediately.
+ Label interceptor_failed;
+ __ JumpIfRoot(x0,
+ Heap::kNoInterceptorResultSentinelRootIndex,
+ &interceptor_failed);
+ frame_scope.GenerateLeaveFrame();
+ __ Ret();
+
+ __ Bind(&interceptor_failed);
+ if (must_preserve_receiver_reg) {
+ __ Pop(this->name(), holder_reg, receiver());
+ } else {
+ __ Pop(this->name(), holder_reg);
+ }
+ // Leave the internal frame.
+ }
+ GenerateLoadPostInterceptor(holder_reg, interceptor_holder, name, lookup);
+ } else { // !compile_followup_inline
+ // Call the runtime system to load the interceptor.
+ // Check that the maps haven't changed.
+ PushInterceptorArguments(
+ masm(), receiver(), holder_reg, this->name(), interceptor_holder);
+
+ ExternalReference ref =
+ ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorForLoad),
+ isolate());
+ __ TailCallExternalReference(ref, StubCache::kInterceptorArgsLength, 1);
+ }
+}
+
+
+void StubCompiler::GenerateBooleanCheck(Register object, Label* miss) {
+ Label success;
+ // Check that the object is a boolean.
+ // TODO(all): Optimize this like LCodeGen::DoDeferredTaggedToI.
+ __ JumpIfRoot(object, Heap::kTrueValueRootIndex, &success);
+ __ JumpIfNotRoot(object, Heap::kFalseValueRootIndex, miss);
+ __ Bind(&success);
+}
+
+
+Handle<Code> StoreStubCompiler::CompileStoreCallback(
+ Handle<JSObject> object,
+ Handle<JSObject> holder,
+ Handle<Name> name,
+ Handle<ExecutableAccessorInfo> callback) {
+ ASM_LOCATION("StoreStubCompiler::CompileStoreCallback");
+ Register holder_reg = HandlerFrontend(
+ IC::CurrentTypeOf(object, isolate()), receiver(), holder, name);
+
+ // Stub never generated for non-global objects that require access checks.
+ ASSERT(holder->IsJSGlobalProxy() || !holder->IsAccessCheckNeeded());
+
+ // TODO(jbramley): Make Push take more than four arguments and combine these
+ // two calls.
+ __ Push(receiver(), holder_reg);
+ __ Mov(scratch1(), Operand(callback));
+ __ Mov(scratch2(), Operand(name));
+ __ Push(scratch1(), scratch2(), value());
+
+ // Do tail-call to the runtime system.
+ ExternalReference store_callback_property =
+ ExternalReference(IC_Utility(IC::kStoreCallbackProperty), isolate());
+ __ TailCallExternalReference(store_callback_property, 5, 1);
+
+ // Return the generated code.
+ return GetCode(kind(), Code::FAST, name);
+}
+
+
+#undef __
+#define __ ACCESS_MASM(masm)
+
+
+void StoreStubCompiler::GenerateStoreViaSetter(
+ MacroAssembler* masm,
+ Handle<HeapType> type,
+ Handle<JSFunction> setter) {
+ // ----------- S t a t e -------------
+ // -- x0 : value
+ // -- x1 : receiver
+ // -- x2 : name
+ // -- lr : return address
+ // -----------------------------------
+ Register value = x0;
+ Register receiver = x1;
+ Label miss;
+
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ // Save value register, so we can restore it later.
+ __ Push(value);
+
+ if (!setter.is_null()) {
+ // Call the JavaScript setter with receiver and value on the stack.
+ if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) {
+ // Swap in the global receiver.
+ __ Ldr(receiver,
+ FieldMemOperand(
+ receiver, JSGlobalObject::kGlobalReceiverOffset));
+ }
+ __ Push(receiver, value);
+ ParameterCount actual(1);
+ ParameterCount expected(setter);
+ __ InvokeFunction(setter, expected, actual,
+ CALL_FUNCTION, NullCallWrapper());
+ } else {
+ // If we generate a global code snippet for deoptimization only, remember
+ // the place to continue after deoptimization.
+ masm->isolate()->heap()->SetSetterStubDeoptPCOffset(masm->pc_offset());
+ }
+
+ // We have to return the passed value, not the return value of the setter.
+ __ Pop(value);
+
+ // Restore context register.
+ __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ }
+ __ Ret();
+}
+
+
+#undef __
+#define __ ACCESS_MASM(masm())
+
+
+Handle<Code> StoreStubCompiler::CompileStoreInterceptor(
+ Handle<JSObject> object,
+ Handle<Name> name) {
+ Label miss;
+
+ ASM_LOCATION("StoreStubCompiler::CompileStoreInterceptor");
+
+ // Check that the map of the object hasn't changed.
+ __ CheckMap(receiver(), scratch1(), Handle<Map>(object->map()), &miss,
+ DO_SMI_CHECK);
+
+ // Perform global security token check if needed.
+ if (object->IsJSGlobalProxy()) {
+ __ CheckAccessGlobalProxy(receiver(), scratch1(), &miss);
+ }
+
+ // Stub is never generated for non-global objects that require access checks.
+ ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
+
+ __ Push(receiver(), this->name(), value());
+
+ // Do tail-call to the runtime system.
+ ExternalReference store_ic_property =
+ ExternalReference(IC_Utility(IC::kStoreInterceptorProperty), isolate());
+ __ TailCallExternalReference(store_ic_property, 3, 1);
+
+ // Handle store cache miss.
+ __ Bind(&miss);
+ TailCallBuiltin(masm(), MissBuiltin(kind()));
+
+ // Return the generated code.
+ return GetCode(kind(), Code::FAST, name);
+}
+
+
+Handle<Code> LoadStubCompiler::CompileLoadNonexistent(Handle<HeapType> type,
+ Handle<JSObject> last,
+ Handle<Name> name) {
+ NonexistentHandlerFrontend(type, last, name);
+
+ // Return undefined if maps of the full prototype chain are still the
+ // same and no global property with this name contains a value.
+ __ LoadRoot(x0, Heap::kUndefinedValueRootIndex);
+ __ Ret();
+
+ // Return the generated code.
+ return GetCode(kind(), Code::FAST, name);
+}
+
+
+// TODO(all): The so-called scratch registers are significant in some cases. For
+// example, KeyedStoreStubCompiler::registers()[3] (x3) is actually used for
+// KeyedStoreCompiler::transition_map(). We should verify which registers are
+// actually scratch registers, and which are important. For now, we use the same
+// assignments as ARM to remain on the safe side.
+
+Register* LoadStubCompiler::registers() {
+ // receiver, name, scratch1, scratch2, scratch3, scratch4.
+ static Register registers[] = { x0, x2, x3, x1, x4, x5 };
+ return registers;
+}
+
+
+Register* KeyedLoadStubCompiler::registers() {
+ // receiver, name/key, scratch1, scratch2, scratch3, scratch4.
+ static Register registers[] = { x1, x0, x2, x3, x4, x5 };
+ return registers;
+}
+
+
+Register* StoreStubCompiler::registers() {
+ // receiver, name, value, scratch1, scratch2, scratch3.
+ static Register registers[] = { x1, x2, x0, x3, x4, x5 };
+ return registers;
+}
+
+
+Register* KeyedStoreStubCompiler::registers() {
+ // receiver, name, value, scratch1, scratch2, scratch3.
+ static Register registers[] = { x2, x1, x0, x3, x4, x5 };
+ return registers;
+}
+
+
+#undef __
+#define __ ACCESS_MASM(masm)
+
+void LoadStubCompiler::GenerateLoadViaGetter(MacroAssembler* masm,
+ Handle<HeapType> type,
+ Register receiver,
+ Handle<JSFunction> getter) {
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ if (!getter.is_null()) {
+ // Call the JavaScript getter with the receiver on the stack.
+ if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) {
+ // Swap in the global receiver.
+ __ Ldr(receiver,
+ FieldMemOperand(
+ receiver, JSGlobalObject::kGlobalReceiverOffset));
+ }
+ __ Push(receiver);
+ ParameterCount actual(0);
+ ParameterCount expected(getter);
+ __ InvokeFunction(getter, expected, actual,
+ CALL_FUNCTION, NullCallWrapper());
+ } else {
+ // If we generate a global code snippet for deoptimization only, remember
+ // the place to continue after deoptimization.
+ masm->isolate()->heap()->SetGetterStubDeoptPCOffset(masm->pc_offset());
+ }
+
+ // Restore context register.
+ __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ }
+ __ Ret();
+}
+
+
+#undef __
+#define __ ACCESS_MASM(masm())
+
+
+Handle<Code> LoadStubCompiler::CompileLoadGlobal(
+ Handle<HeapType> type,
+ Handle<GlobalObject> global,
+ Handle<PropertyCell> cell,
+ Handle<Name> name,
+ bool is_dont_delete) {
+ Label miss;
+ HandlerFrontendHeader(type, receiver(), global, name, &miss);
+
+ // Get the value from the cell.
+ __ Mov(x3, Operand(cell));
+ __ Ldr(x4, FieldMemOperand(x3, Cell::kValueOffset));
+
+ // Check for deleted property if property can actually be deleted.
+ if (!is_dont_delete) {
+ __ JumpIfRoot(x4, Heap::kTheHoleValueRootIndex, &miss);
+ }
+
+ Counters* counters = isolate()->counters();
+ __ IncrementCounter(counters->named_load_global_stub(), 1, x1, x3);
+ __ Mov(x0, x4);
+ __ Ret();
+
+ HandlerFrontendFooter(name, &miss);
+
+ // Return the generated code.
+ return GetCode(kind(), Code::NORMAL, name);
+}
+
+
+Handle<Code> BaseLoadStoreStubCompiler::CompilePolymorphicIC(
+ TypeHandleList* types,
+ CodeHandleList* handlers,
+ Handle<Name> name,
+ Code::StubType type,
+ IcCheckType check) {
+ Label miss;
+
+ if (check == PROPERTY &&
+ (kind() == Code::KEYED_LOAD_IC || kind() == Code::KEYED_STORE_IC)) {
+ __ CompareAndBranch(this->name(), Operand(name), ne, &miss);
+ }
+
+ Label number_case;
+ Label* smi_target = IncludesNumberType(types) ? &number_case : &miss;
+ __ JumpIfSmi(receiver(), smi_target);
+
+ Register map_reg = scratch1();
+ __ Ldr(map_reg, FieldMemOperand(receiver(), HeapObject::kMapOffset));
+ int receiver_count = types->length();
+ int number_of_handled_maps = 0;
+ for (int current = 0; current < receiver_count; ++current) {
+ Handle<HeapType> type = types->at(current);
+ Handle<Map> map = IC::TypeToMap(*type, isolate());
+ if (!map->is_deprecated()) {
+ number_of_handled_maps++;
+ Label try_next;
+ __ Cmp(map_reg, Operand(map));
+ __ B(ne, &try_next);
+ if (type->Is(HeapType::Number())) {
+ ASSERT(!number_case.is_unused());
+ __ Bind(&number_case);
+ }
+ __ Jump(handlers->at(current), RelocInfo::CODE_TARGET);
+ __ Bind(&try_next);
+ }
+ }
+ ASSERT(number_of_handled_maps != 0);
+
+ __ Bind(&miss);
+ TailCallBuiltin(masm(), MissBuiltin(kind()));
+
+ // Return the generated code.
+ InlineCacheState state =
+ (number_of_handled_maps > 1) ? POLYMORPHIC : MONOMORPHIC;
+ return GetICCode(kind(), type, name, state);
+}
+
+
+Handle<Code> KeyedStoreStubCompiler::CompileStorePolymorphic(
+ MapHandleList* receiver_maps,
+ CodeHandleList* handler_stubs,
+ MapHandleList* transitioned_maps) {
+ Label miss;
+
+ ASM_LOCATION("KeyedStoreStubCompiler::CompileStorePolymorphic");
+
+ __ JumpIfSmi(receiver(), &miss);
+
+ int receiver_count = receiver_maps->length();
+ __ Ldr(scratch1(), FieldMemOperand(receiver(), HeapObject::kMapOffset));
+ for (int i = 0; i < receiver_count; i++) {
+ __ Cmp(scratch1(), Operand(receiver_maps->at(i)));
+
+ Label skip;
+ __ B(&skip, ne);
+ if (!transitioned_maps->at(i).is_null()) {
+ // This argument is used by the handler stub. For example, see
+ // ElementsTransitionGenerator::GenerateMapChangeElementsTransition.
+ __ Mov(transition_map(), Operand(transitioned_maps->at(i)));
+ }
+ __ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET);
+ __ Bind(&skip);
+ }
+
+ __ Bind(&miss);
+ TailCallBuiltin(masm(), MissBuiltin(kind()));
+
+ return GetICCode(
+ kind(), Code::NORMAL, factory()->empty_string(), POLYMORPHIC);
+}
+
+
+Handle<Code> StoreStubCompiler::CompileStoreCallback(
+ Handle<JSObject> object,
+ Handle<JSObject> holder,
+ Handle<Name> name,
+ const CallOptimization& call_optimization) {
+ HandlerFrontend(IC::CurrentTypeOf(object, isolate()),
+ receiver(), holder, name);
+
+ Register values[] = { value() };
+ GenerateFastApiCall(masm(), call_optimization, handle(object->map()),
+ receiver(), scratch3(), 1, values);
+
+ // Return the generated code.
+ return GetCode(kind(), Code::FAST, name);
+}
+
+
+#undef __
+#define __ ACCESS_MASM(masm)
+
+void KeyedLoadStubCompiler::GenerateLoadDictionaryElement(
+ MacroAssembler* masm) {
+ // ---------- S t a t e --------------
+ // -- lr : return address
+ // -- x0 : key
+ // -- x1 : receiver
+ // -----------------------------------
+ Label slow, miss;
+
+ Register result = x0;
+ Register key = x0;
+ Register receiver = x1;
+
+ __ JumpIfNotSmi(key, &miss);
+ __ Ldr(x4, FieldMemOperand(receiver, JSObject::kElementsOffset));
+ __ LoadFromNumberDictionary(&slow, x4, key, result, x2, x3, x5, x6);
+ __ Ret();
+
+ __ Bind(&slow);
+ __ IncrementCounter(
+ masm->isolate()->counters()->keyed_load_external_array_slow(), 1, x2, x3);
+ TailCallBuiltin(masm, Builtins::kKeyedLoadIC_Slow);
+
+ // Miss case, call the runtime.
+ __ Bind(&miss);
+ TailCallBuiltin(masm, Builtins::kKeyedLoadIC_Miss);
+}
+
+
+} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_A64
--- /dev/null
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#if V8_TARGET_ARCH_A64
+
+#include "a64/utils-a64.h"
+
+
+namespace v8 {
+namespace internal {
+
+#define __ assm->
+
+
+int CountLeadingZeros(uint64_t value, int width) {
+ // TODO(jbramley): Optimize this for A64 hosts.
+ ASSERT((width == 32) || (width == 64));
+ int count = 0;
+ uint64_t bit_test = 1UL << (width - 1);
+ while ((count < width) && ((bit_test & value) == 0)) {
+ count++;
+ bit_test >>= 1;
+ }
+ return count;
+}
+
+
+int CountLeadingSignBits(int64_t value, int width) {
+ // TODO(jbramley): Optimize this for A64 hosts.
+ ASSERT((width == 32) || (width == 64));
+ if (value >= 0) {
+ return CountLeadingZeros(value, width) - 1;
+ } else {
+ return CountLeadingZeros(~value, width) - 1;
+ }
+}
+
+
+int CountTrailingZeros(uint64_t value, int width) {
+ // TODO(jbramley): Optimize this for A64 hosts.
+ ASSERT((width == 32) || (width == 64));
+ int count = 0;
+ while ((count < width) && (((value >> count) & 1) == 0)) {
+ count++;
+ }
+ return count;
+}
+
+
+int CountSetBits(uint64_t value, int width) {
+ // TODO(jbramley): Would it be useful to allow other widths? The
+ // implementation already supports them.
+ ASSERT((width == 32) || (width == 64));
+
+ // Mask out unused bits to ensure that they are not counted.
+ value &= (0xffffffffffffffffUL >> (64-width));
+
+ // Add up the set bits.
+ // The algorithm works by adding pairs of bit fields together iteratively,
+ // where the size of each bit field doubles each time.
+ // An example for an 8-bit value:
+ // Bits: h g f e d c b a
+ // \ | \ | \ | \ |
+ // value = h+g f+e d+c b+a
+ // \ | \ |
+ // value = h+g+f+e d+c+b+a
+ // \ |
+ // value = h+g+f+e+d+c+b+a
+ value = ((value >> 1) & 0x5555555555555555) + (value & 0x5555555555555555);
+ value = ((value >> 2) & 0x3333333333333333) + (value & 0x3333333333333333);
+ value = ((value >> 4) & 0x0f0f0f0f0f0f0f0f) + (value & 0x0f0f0f0f0f0f0f0f);
+ value = ((value >> 8) & 0x00ff00ff00ff00ff) + (value & 0x00ff00ff00ff00ff);
+ value = ((value >> 16) & 0x0000ffff0000ffff) + (value & 0x0000ffff0000ffff);
+ value = ((value >> 32) & 0x00000000ffffffff) + (value & 0x00000000ffffffff);
+
+ return value;
+}
+
+
+int MaskToBit(uint64_t mask) {
+ ASSERT(CountSetBits(mask, 64) == 1);
+ return CountTrailingZeros(mask, 64);
+}
+
+
+} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_A64
--- /dev/null
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_A64_UTILS_A64_H_
+#define V8_A64_UTILS_A64_H_
+
+#include <cmath>
+#include "v8.h"
+#include "a64/constants-a64.h"
+
+#define REGISTER_CODE_LIST(R) \
+R(0) R(1) R(2) R(3) R(4) R(5) R(6) R(7) \
+R(8) R(9) R(10) R(11) R(12) R(13) R(14) R(15) \
+R(16) R(17) R(18) R(19) R(20) R(21) R(22) R(23) \
+R(24) R(25) R(26) R(27) R(28) R(29) R(30) R(31)
+
+namespace v8 {
+namespace internal {
+
+// Floating point representation.
+static inline uint32_t float_to_rawbits(float value) {
+ uint32_t bits = 0;
+ memcpy(&bits, &value, 4);
+ return bits;
+}
+
+
+static inline uint64_t double_to_rawbits(double value) {
+ uint64_t bits = 0;
+ memcpy(&bits, &value, 8);
+ return bits;
+}
+
+
+static inline float rawbits_to_float(uint32_t bits) {
+ float value = 0.0;
+ memcpy(&value, &bits, 4);
+ return value;
+}
+
+
+static inline double rawbits_to_double(uint64_t bits) {
+ double value = 0.0;
+ memcpy(&value, &bits, 8);
+ return value;
+}
+
+
+// Bits counting.
+int CountLeadingZeros(uint64_t value, int width);
+int CountLeadingSignBits(int64_t value, int width);
+int CountTrailingZeros(uint64_t value, int width);
+int CountSetBits(uint64_t value, int width);
+int MaskToBit(uint64_t mask);
+
+
+// NaN tests.
+inline bool IsSignallingNaN(double num) {
+ const uint64_t kFP64QuietNaNMask = 0x0008000000000000UL;
+ uint64_t raw = double_to_rawbits(num);
+ if (std::isnan(num) && ((raw & kFP64QuietNaNMask) == 0)) {
+ return true;
+ }
+ return false;
+}
+
+
+inline bool IsSignallingNaN(float num) {
+ const uint64_t kFP32QuietNaNMask = 0x00400000UL;
+ uint32_t raw = float_to_rawbits(num);
+ if (std::isnan(num) && ((raw & kFP32QuietNaNMask) == 0)) {
+ return true;
+ }
+ return false;
+}
+
+
+template <typename T>
+inline bool IsQuietNaN(T num) {
+ return std::isnan(num) && !IsSignallingNaN(num);
+}
+
+} } // namespace v8::internal
+
+#endif // V8_A64_UTILS_A64_H_
void set_pc(int32_t value);
int32_t get_pc() const;
+ Address get_sp() {
+ return reinterpret_cast<Address>(static_cast<intptr_t>(get_register(sp)));
+ }
+
// Accessor to the internal simulator stack area.
uintptr_t StackLimit() const;
#include "ia32/assembler-ia32-inl.h"
#elif V8_TARGET_ARCH_X64
#include "x64/assembler-x64-inl.h"
+#elif V8_TARGET_ARCH_A64
+#include "a64/assembler-a64-inl.h"
#elif V8_TARGET_ARCH_ARM
#include "arm/assembler-arm-inl.h"
#elif V8_TARGET_ARCH_MIPS
#include "ia32/regexp-macro-assembler-ia32.h"
#elif V8_TARGET_ARCH_X64
#include "x64/regexp-macro-assembler-x64.h"
+#elif V8_TARGET_ARCH_A64
+#include "a64/regexp-macro-assembler-a64.h"
#elif V8_TARGET_ARCH_ARM
#include "arm/regexp-macro-assembler-arm.h"
#elif V8_TARGET_ARCH_MIPS
if (FLAG_mask_constants_with_cookie && isolate != NULL) {
jit_cookie_ = isolate->random_number_generator()->NextInt();
}
-
if (buffer == NULL) {
// Do our own buffer management.
if (buffer_size <= kMinimalBufferSize) {
function = FUNCTION_ADDR(RegExpMacroAssemblerX64::CheckStackGuardState);
#elif V8_TARGET_ARCH_IA32
function = FUNCTION_ADDR(RegExpMacroAssemblerIA32::CheckStackGuardState);
+#elif V8_TARGET_ARCH_A64
+ function = FUNCTION_ADDR(RegExpMacroAssemblerA64::CheckStackGuardState);
#elif V8_TARGET_ARCH_ARM
function = FUNCTION_ADDR(RegExpMacroAssemblerARM::CheckStackGuardState);
#elif V8_TARGET_ARCH_MIPS
// -----------------------------------------------------------------------------
// Utility functions
-inline bool is_intn(int x, int n) {
- return -(1 << (n-1)) <= x && x < (1 << (n-1));
-}
-
-inline bool is_int8(int x) { return is_intn(x, 8); }
-inline bool is_int16(int x) { return is_intn(x, 16); }
-inline bool is_int18(int x) { return is_intn(x, 18); }
-inline bool is_int24(int x) { return is_intn(x, 24); }
-
-inline bool is_uintn(int x, int n) {
- return (x & -(1 << n)) == 0;
-}
-
-inline bool is_uint2(int x) { return is_uintn(x, 2); }
-inline bool is_uint3(int x) { return is_uintn(x, 3); }
-inline bool is_uint4(int x) { return is_uintn(x, 4); }
-inline bool is_uint5(int x) { return is_uintn(x, 5); }
-inline bool is_uint6(int x) { return is_uintn(x, 6); }
-inline bool is_uint8(int x) { return is_uintn(x, 8); }
-inline bool is_uint10(int x) { return is_uintn(x, 10); }
-inline bool is_uint12(int x) { return is_uintn(x, 12); }
-inline bool is_uint16(int x) { return is_uintn(x, 16); }
-inline bool is_uint24(int x) { return is_uintn(x, 24); }
-inline bool is_uint26(int x) { return is_uintn(x, 26); }
-inline bool is_uint28(int x) { return is_uintn(x, 28); }
-
inline int NumberOfBitsSet(uint32_t x) {
unsigned int num_bits_set;
for (num_bits_set = 0; x; x >>= 1) {
#include "atomicops_internals_x86_macosx.h"
#elif defined(__GNUC__) && (V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64)
#include "atomicops_internals_x86_gcc.h"
+#elif defined(__GNUC__) && V8_HOST_ARCH_A64
+#include "atomicops_internals_a64_gcc.h"
#elif defined(__GNUC__) && V8_HOST_ARCH_ARM
#include "atomicops_internals_arm_gcc.h"
#elif defined(__GNUC__) && V8_HOST_ARCH_MIPS
--- /dev/null
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// This file is an internal atomic implementation, use atomicops.h instead.
+
+#ifndef V8_ATOMICOPS_INTERNALS_ARM_GCC_H_
+#define V8_ATOMICOPS_INTERNALS_ARM_GCC_H_
+
+namespace v8 {
+namespace internal {
+
+inline void MemoryBarrier() { /* Not used. */ }
+
+inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
+ Atomic32 old_value,
+ Atomic32 new_value) {
+ Atomic32 prev;
+ int32_t temp;
+
+ __asm__ __volatile__ ( // NOLINT
+ "0: \n\t"
+ "ldxr %w[prev], [%[ptr]] \n\t" // Load the previous value.
+ "cmp %w[prev], %w[old_value] \n\t"
+ "bne 1f \n\t"
+ "stxr %w[temp], %w[new_value], [%[ptr]]\n\t" // Try to store the new value.
+ "cbnz %w[temp], 0b \n\t" // Retry if it did not work.
+ "1: \n\t"
+ "clrex \n\t" // In case we didn't swap.
+ : [prev]"=&r" (prev),
+ [temp]"=&r" (temp)
+ : [ptr]"r" (ptr),
+ [old_value]"r" (old_value),
+ [new_value]"r" (new_value)
+ : "memory", "cc"
+ ); // NOLINT
+
+ return prev;
+}
+
+inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
+ Atomic32 new_value) {
+ Atomic32 result;
+ int32_t temp;
+
+ __asm__ __volatile__ ( // NOLINT
+ "0: \n\t"
+ "ldxr %w[result], [%[ptr]] \n\t" // Load the previous value.
+ "stxr %w[temp], %w[new_value], [%[ptr]]\n\t" // Try to store the new value.
+ "cbnz %w[temp], 0b \n\t" // Retry if it did not work.
+ : [result]"=&r" (result),
+ [temp]"=&r" (temp)
+ : [ptr]"r" (ptr),
+ [new_value]"r" (new_value)
+ : "memory"
+ ); // NOLINT
+
+ return result;
+}
+
+inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
+ Atomic32 increment) {
+ Atomic32 result;
+ int32_t temp;
+
+ __asm__ __volatile__ ( // NOLINT
+ "0: \n\t"
+ "ldxr %w[result], [%[ptr]] \n\t" // Load the previous value.
+ "add %w[result], %w[result], %w[increment]\n\t"
+ "stxr %w[temp], %w[result], [%[ptr]] \n\t" // Try to store the result.
+ "cbnz %w[temp], 0b \n\t" // Retry on failure.
+ : [result]"=&r" (result),
+ [temp]"=&r" (temp)
+ : [ptr]"r" (ptr),
+ [increment]"r" (increment)
+ : "memory"
+ ); // NOLINT
+
+ return result;
+}
+
+inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
+ Atomic32 increment) {
+ Atomic32 result;
+ int32_t temp;
+
+ __asm__ __volatile__ ( // NOLINT
+ "dmb ish \n\t" // Data memory barrier.
+ "0: \n\t"
+ "ldxr %w[result], [%[ptr]] \n\t" // Load the previous value.
+ "add %w[result], %w[result], %w[increment]\n\t"
+ "stxr %w[temp], %w[result], [%[ptr]] \n\t" // Try to store the result.
+ "cbnz %w[temp], 0b \n\t" // Retry on failure.
+ "dmb ish \n\t" // Data memory barrier.
+ : [result]"=&r" (result),
+ [temp]"=&r" (temp)
+ : [ptr]"r" (ptr),
+ [increment]"r" (increment)
+ : "memory"
+ ); // NOLINT
+
+ return result;
+}
+
+inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
+ Atomic32 old_value,
+ Atomic32 new_value) {
+ Atomic32 prev;
+ int32_t temp;
+
+ __asm__ __volatile__ ( // NOLINT
+ "0: \n\t"
+ "ldxr %w[prev], [%[ptr]] \n\t" // Load the previous value.
+ "cmp %w[prev], %w[old_value] \n\t"
+ "bne 1f \n\t"
+ "stxr %w[temp], %w[new_value], [%[ptr]]\n\t" // Try to store the new value.
+ "cbnz %w[temp], 0b \n\t" // Retry if it did not work.
+ "dmb ish \n\t" // Data memory barrier.
+ "1: \n\t"
+ // If the compare failed the 'dmb' is unnecessary, but we still need a
+ // 'clrex'.
+ "clrex \n\t"
+ : [prev]"=&r" (prev),
+ [temp]"=&r" (temp)
+ : [ptr]"r" (ptr),
+ [old_value]"r" (old_value),
+ [new_value]"r" (new_value)
+ : "memory", "cc"
+ ); // NOLINT
+
+ return prev;
+}
+
+inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
+ Atomic32 old_value,
+ Atomic32 new_value) {
+ Atomic32 prev;
+ int32_t temp;
+
+ __asm__ __volatile__ ( // NOLINT
+ "dmb ish \n\t" // Data memory barrier.
+ "0: \n\t"
+ "ldxr %w[prev], [%[ptr]] \n\t" // Load the previous value.
+ "cmp %w[prev], %w[old_value] \n\t"
+ "bne 1f \n\t"
+ "stxr %w[temp], %w[new_value], [%[ptr]]\n\t" // Try to store the new value.
+ "cbnz %w[temp], 0b \n\t" // Retry if it did not work.
+ "1: \n\t"
+ // If the compare failed the we still need a 'clrex'.
+ "clrex \n\t"
+ : [prev]"=&r" (prev),
+ [temp]"=&r" (temp)
+ : [ptr]"r" (ptr),
+ [old_value]"r" (old_value),
+ [new_value]"r" (new_value)
+ : "memory", "cc"
+ ); // NOLINT
+
+ return prev;
+}
+
+inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
+ *ptr = value;
+}
+
+inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
+ *ptr = value;
+ __asm__ __volatile__ ( // NOLINT
+ "dmb ish \n\t" // Data memory barrier.
+ ::: "memory" // Prevent gcc from reordering before the store above.
+ ); // NOLINT
+}
+
+inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
+ __asm__ __volatile__ ( // NOLINT
+ "dmb ish \n\t" // Data memory barrier.
+ ::: "memory" // Prevent gcc from reordering after the store below.
+ ); // NOLINT
+ *ptr = value;
+}
+
+inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
+ return *ptr;
+}
+
+inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
+ Atomic32 value = *ptr;
+ __asm__ __volatile__ ( // NOLINT
+ "dmb ish \n\t" // Data memory barrier.
+ ::: "memory" // Prevent gcc from reordering before the load above.
+ ); // NOLINT
+ return value;
+}
+
+inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
+ __asm__ __volatile__ ( // NOLINT
+ "dmb ish \n\t" // Data memory barrier.
+ ::: "memory" // Prevent gcc from reordering after the load below.
+ ); // NOLINT
+ return *ptr;
+}
+
+// 64-bit versions of the operations.
+// See the 32-bit versions for comments.
+
+inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
+ Atomic64 old_value,
+ Atomic64 new_value) {
+ Atomic64 prev;
+ int32_t temp;
+
+ __asm__ __volatile__ ( // NOLINT
+ "0: \n\t"
+ "ldxr %[prev], [%[ptr]] \n\t"
+ "cmp %[prev], %[old_value] \n\t"
+ "bne 1f \n\t"
+ "stxr %w[temp], %[new_value], [%[ptr]] \n\t"
+ "cbnz %w[temp], 0b \n\t"
+ "1: \n\t"
+ "clrex \n\t"
+ : [prev]"=&r" (prev),
+ [temp]"=&r" (temp)
+ : [ptr]"r" (ptr),
+ [old_value]"r" (old_value),
+ [new_value]"r" (new_value)
+ : "memory", "cc"
+ ); // NOLINT
+
+ return prev;
+}
+
+inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
+ Atomic64 new_value) {
+ Atomic64 result;
+ int32_t temp;
+
+ __asm__ __volatile__ ( // NOLINT
+ "0: \n\t"
+ "ldxr %[result], [%[ptr]] \n\t"
+ "stxr %w[temp], %[new_value], [%[ptr]] \n\t"
+ "cbnz %w[temp], 0b \n\t"
+ : [result]"=&r" (result),
+ [temp]"=&r" (temp)
+ : [ptr]"r" (ptr),
+ [new_value]"r" (new_value)
+ : "memory"
+ ); // NOLINT
+
+ return result;
+}
+
+inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
+ Atomic64 increment) {
+ Atomic64 result;
+ int32_t temp;
+
+ __asm__ __volatile__ ( // NOLINT
+ "0: \n\t"
+ "ldxr %[result], [%[ptr]] \n\t"
+ "add %[result], %[result], %[increment] \n\t"
+ "stxr %w[temp], %[result], [%[ptr]] \n\t"
+ "cbnz %w[temp], 0b \n\t"
+ : [result]"=&r" (result),
+ [temp]"=&r" (temp)
+ : [ptr]"r" (ptr),
+ [increment]"r" (increment)
+ : "memory"
+ ); // NOLINT
+
+ return result;
+}
+
+inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
+ Atomic64 increment) {
+ Atomic64 result;
+ int32_t temp;
+
+ __asm__ __volatile__ ( // NOLINT
+ "dmb ish \n\t"
+ "0: \n\t"
+ "ldxr %[result], [%[ptr]] \n\t"
+ "add %[result], %[result], %[increment] \n\t"
+ "stxr %w[temp], %[result], [%[ptr]] \n\t"
+ "cbnz %w[temp], 0b \n\t"
+ "dmb ish \n\t"
+ : [result]"=&r" (result),
+ [temp]"=&r" (temp)
+ : [ptr]"r" (ptr),
+ [increment]"r" (increment)
+ : "memory"
+ ); // NOLINT
+
+ return result;
+}
+
+inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
+ Atomic64 old_value,
+ Atomic64 new_value) {
+ Atomic64 prev;
+ int32_t temp;
+
+ __asm__ __volatile__ ( // NOLINT
+ "0: \n\t"
+ "ldxr %[prev], [%[ptr]] \n\t"
+ "cmp %[prev], %[old_value] \n\t"
+ "bne 1f \n\t"
+ "stxr %w[temp], %[new_value], [%[ptr]] \n\t"
+ "cbnz %w[temp], 0b \n\t"
+ "dmb ish \n\t"
+ "1: \n\t"
+ "clrex \n\t"
+ : [prev]"=&r" (prev),
+ [temp]"=&r" (temp)
+ : [ptr]"r" (ptr),
+ [old_value]"r" (old_value),
+ [new_value]"r" (new_value)
+ : "memory", "cc"
+ ); // NOLINT
+
+ return prev;
+}
+
+inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
+ Atomic64 old_value,
+ Atomic64 new_value) {
+ Atomic64 prev;
+ int32_t temp;
+
+ __asm__ __volatile__ ( // NOLINT
+ "dmb ish \n\t"
+ "0: \n\t"
+ "ldxr %[prev], [%[ptr]] \n\t"
+ "cmp %[prev], %[old_value] \n\t"
+ "bne 1f \n\t"
+ "stxr %w[temp], %[new_value], [%[ptr]] \n\t"
+ "cbnz %w[temp], 0b \n\t"
+ "1: \n\t"
+ "clrex \n\t"
+ : [prev]"=&r" (prev),
+ [temp]"=&r" (temp)
+ : [ptr]"r" (ptr),
+ [old_value]"r" (old_value),
+ [new_value]"r" (new_value)
+ : "memory", "cc"
+ ); // NOLINT
+
+ return prev;
+}
+
+inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
+ *ptr = value;
+}
+
+inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
+ *ptr = value;
+ __asm__ __volatile__ ( // NOLINT
+ "dmb ish \n\t"
+ ::: "memory"
+ ); // NOLINT
+}
+
+inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
+ __asm__ __volatile__ ( // NOLINT
+ "dmb ish \n\t"
+ ::: "memory"
+ ); // NOLINT
+ *ptr = value;
+}
+
+inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
+ return *ptr;
+}
+
+inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
+ Atomic64 value = *ptr;
+ __asm__ __volatile__ ( // NOLINT
+ "dmb ish \n\t"
+ ::: "memory"
+ ); // NOLINT
+ return value;
+}
+
+inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
+ __asm__ __volatile__ ( // NOLINT
+ "dmb ish \n\t"
+ ::: "memory"
+ ); // NOLINT
+ return *ptr;
+}
+
+} } // namespace v8::internal
+
+#endif // V8_ATOMICOPS_INTERNALS_ARM_GCC_H_
MakeFunctionInstancePrototypeWritable();
- if (!ConfigureGlobalObjects(global_template)) return;
- isolate->counters()->contexts_created_from_scratch()->Increment();
+ if (!FLAG_disable_native_files) {
+ if (!ConfigureGlobalObjects(global_template)) return;
+ isolate->counters()->contexts_created_from_scratch()->Increment();
+ }
}
// Initialize experimental globals and install experimental natives.
// For now we generate builtin adaptor code into a stack-allocated
// buffer, before copying it into individual code objects. Be careful
// with alignment, some platforms don't like unaligned code.
- union { int force_alignment; byte buffer[8*KB]; } u;
+ // TODO(jbramley): I had to increase the size of this buffer from 8KB because
+ // we can generate a lot of debug code on A64.
+ union { int force_alignment; byte buffer[16*KB]; } u;
// Traverse the list of builtins and generate an adaptor in a
// separate code object for each one.
extern "C" void V8_Fatal(const char* file, int line, const char* format, ...);
+// Define custom A64 preprocessor helpers to facilitate development.
+#ifndef V8_TARGET_ARCH_A64
+
// The FATAL, UNREACHABLE and UNIMPLEMENTED macros are useful during
// development, but they should not be relied on in the final product.
#ifdef DEBUG
#define UNREACHABLE() ((void) 0)
#endif
+#else
+
+ #ifdef DEBUG
+ #define FATAL(msg) \
+ V8_Fatal(__FILE__, __LINE__, "%s", (msg))
+ #define UNREACHABLE() \
+ V8_Fatal(__FILE__, __LINE__, "unreachable code")
+ #else
+ #define FATAL(msg) \
+ V8_Fatal("", 0, "%s", (msg))
+ #define UNREACHABLE() ((void) 0)
+ #endif
+
+ #define ABORT() printf("in %s, line %i, %s", __FILE__, __LINE__, __func__); \
+ abort()
+
+ #define ALIGNMENT_EXCEPTION() printf("ALIGNMENT EXCEPTION\t"); ABORT()
+
+ // Helpers for unimplemented sections.
+ #define UNIMPLEMENTED() \
+ do { \
+ printf("UNIMPLEMENTED: %s, line %d, %s\n", \
+ __FILE__, __LINE__, __func__); \
+ V8_Fatal(__FILE__, __LINE__, "unimplemented code"); \
+ } while (0)
+ #define UNIMPLEMENTED_M(message) \
+ do { \
+ printf("UNIMPLEMENTED: %s, line %d, %s : %s\n", \
+ __FILE__, __LINE__, __func__, message); \
+ V8_Fatal(__FILE__, __LINE__, "unimplemented code"); \
+ } while (0)
+ // Like UNIMPLEMENTED, but does not abort.
+ #define TODO_UNIMPLEMENTED(message) \
+ do { \
+ static const unsigned int kLimit = 1; \
+ static unsigned int printed = 0; \
+ if (printed < UINT_MAX) { \
+ printed++; \
+ } \
+ if (printed <= kLimit) { \
+ printf("UNIMPLEMENTED: %s, line %d, %s: %s\n", \
+ __FILE__, __LINE__, __func__, message); \
+ } \
+ } while (0)
+
+ // Simulator specific helpers.
+ #ifdef USE_SIMULATOR
+ // Helpers for unimplemented sections.
+ // TODO(all): If possible automatically prepend an indicator like
+ // UNIMPLEMENTED or LOCATION.
+ #define ASM_UNIMPLEMENTED(message) \
+ __ Debug(message, __LINE__, NO_PARAM)
+ #define ASM_UNIMPLEMENTED_BREAK(message) \
+ __ Debug(message, __LINE__, \
+ FLAG_ignore_asm_unimplemented_break ? NO_PARAM : BREAK)
+ #define ASM_LOCATION(message) \
+ __ Debug("LOCATION: " message, __LINE__, NO_PARAM)
+ #else
+ #define ASM_UNIMPLEMENTED(message)
+ #define ASM_UNIMPLEMENTED_BREAK(message)
+ #define ASM_LOCATION(message)
+ #endif
+
+#endif
+
// The CHECK macro checks that the given condition is true; if not, it
// prints a message to stderr and aborts.
// List of code stubs only used on ARM platforms.
#if V8_TARGET_ARCH_ARM
+#define CODE_STUB_LIST_ARM(V) \
+ V(GetProperty) \
+ V(SetProperty) \
+ V(InvokeBuiltin) \
+ V(DirectCEntry)
+#elif V8_TARGET_ARCH_A64
#define CODE_STUB_LIST_ARM(V) \
V(GetProperty) \
V(SetProperty) \
#include "ia32/code-stubs-ia32.h"
#elif V8_TARGET_ARCH_X64
#include "x64/code-stubs-x64.h"
+#elif V8_TARGET_ARCH_A64
+#include "a64/code-stubs-a64.h"
#elif V8_TARGET_ARCH_ARM
#include "arm/code-stubs-arm.h"
#elif V8_TARGET_ARCH_MIPS
int offset,
bool is_truncating,
bool skip_fastpath = false) : bit_field_(0) {
- bit_field_ = SourceRegisterBits::encode(source.code_) |
- DestinationRegisterBits::encode(destination.code_) |
+ bit_field_ = SourceRegisterBits::encode(source.code()) |
+ DestinationRegisterBits::encode(destination.code()) |
OffsetBits::encode(offset) |
IsTruncatingBits::encode(is_truncating) |
SkipFastPathBits::encode(skip_fastpath) |
SSEBits::encode(CpuFeatures::IsSafeForSnapshot(SSE2) ?
- CpuFeatures::IsSafeForSnapshot(SSE3) ? 2 : 1 : 0);
+ CpuFeatures::IsSafeForSnapshot(SSE3) ? 2 : 1 : 0);
}
Register source() {
- Register result = { SourceRegisterBits::decode(bit_field_) };
- return result;
+ return Register::from_code(SourceRegisterBits::decode(bit_field_));
}
Register destination() {
- Register result = { DestinationRegisterBits::decode(bit_field_) };
- return result;
+ return Register::from_code(DestinationRegisterBits::decode(bit_field_));
}
bool is_truncating() {
#include "ia32/codegen-ia32.h"
#elif V8_TARGET_ARCH_X64
#include "x64/codegen-x64.h"
+#elif V8_TARGET_ARCH_A64
+#include "a64/codegen-a64.h"
#elif V8_TARGET_ARCH_ARM
#include "arm/codegen-arm.h"
#elif V8_TARGET_ARCH_MIPS
constant_pool_(kZapUint32) {
// Zap all the registers.
for (int r = 0; r < Register::kNumRegisters; r++) {
+ // TODO(jbramley): It isn't safe to use kZapUint32 here. If the register
+ // isn't used before the next safepoint, the GC will try to scan it as a
+ // tagged value. kZapUint32 looks like a valid tagged pointer, but it isn't.
SetRegister(r, kZapUint32);
}
void EnableInterrupts();
void DisableInterrupts();
-#if V8_TARGET_ARCH_X64
+#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_A64
static const uintptr_t kInterruptLimit = V8_UINT64_C(0xfffffffffffffffe);
static const uintptr_t kIllegalLimit = V8_UINT64_C(0xfffffffffffffff8);
#else
DEFINE_bool(allow_natives_syntax, false, "allow natives syntax")
DEFINE_bool(trace_parse, false, "trace parsing and preparsing")
-// simulator-arm.cc and simulator-mips.cc
+// simulator-arm.cc, simulator-a64.cc and simulator-mips.cc
DEFINE_bool(trace_sim, false, "Trace simulator execution")
DEFINE_bool(check_icache, false,
"Check icache flushes in ARM and MIPS simulator")
DEFINE_int(stop_sim_at, 0, "Simulator stop after x number of instructions")
+#ifdef V8_TARGET_ARCH_A64
+DEFINE_int(sim_stack_alignment, 16,
+ "Stack alignment in bytes in simulator. This must be a power of two "
+ "and it must be at least 16. 16 is default.")
+#else
DEFINE_int(sim_stack_alignment, 8,
"Stack alingment in bytes in simulator (4 or 8, 8 is default)")
+#endif
+DEFINE_int(sim_stack_size, 2 * MB / KB,
+ "Stack size of the A64 simulator in kBytes (default is 2 MB)")
+DEFINE_bool(log_regs_modified, true,
+ "When logging register values, only print modified registers.")
+DEFINE_bool(log_colour, true,
+ "When logging, try to use coloured output.")
+DEFINE_bool(ignore_asm_unimplemented_break, false,
+ "Don't break for ASM_UNIMPLEMENTED_BREAK macros.")
+DEFINE_bool(trace_sim_messages, false,
+ "Trace simulator debug messages. Implied by --trace-sim.")
// isolate.cc
DEFINE_bool(stack_trace_on_illegal, false,
"Time events including external callbacks.")
DEFINE_implication(log_timer_events, log_internal_timer_events)
DEFINE_implication(log_internal_timer_events, prof)
+DEFINE_bool(log_instruction_stats, false, "Log AArch64 instruction statistics.")
+DEFINE_string(log_instruction_file, "a64_inst.csv",
+ "AArch64 instruction statistics log file.")
+DEFINE_int(log_instruction_period, 1 << 22,
+ "AArch64 instruction statistics logging period.")
DEFINE_bool(redirect_code_traces, false,
"output deopt information and disassembly into file "
#include "ia32/frames-ia32.h"
#elif V8_TARGET_ARCH_X64
#include "x64/frames-x64.h"
+#elif V8_TARGET_ARCH_A64
+#include "a64/frames-a64.h"
#elif V8_TARGET_ARCH_ARM
#include "arm/frames-arm.h"
#elif V8_TARGET_ARCH_MIPS
namespace v8 {
namespace internal {
+#if V8_TARGET_ARCH_A64
+typedef uint64_t RegList;
+#else
typedef uint32_t RegList;
+#endif
// Get the number of registers in a given register list.
int NumRegs(RegList list);
static const int kCodeSizeMultiplier = 162;
#elif V8_TARGET_ARCH_ARM
static const int kCodeSizeMultiplier = 142;
+#elif V8_TARGET_ARCH_A64
+// TODO(all): Copied ARM value. Check this is sensible for A64.
+ static const int kCodeSizeMultiplier = 142;
#elif V8_TARGET_ARCH_MIPS
static const int kCodeSizeMultiplier = 142;
#else
#define V8_HOST_ARCH_IA32 1
#define V8_HOST_ARCH_32_BIT 1
#define V8_HOST_CAN_READ_UNALIGNED 1
+#elif defined(__AARCH64EL__)
+#define V8_HOST_ARCH_A64 1
+#define V8_HOST_ARCH_64_BIT 1
+#define V8_HOST_CAN_READ_UNALIGNED 1
#elif defined(__ARMEL__)
#define V8_HOST_ARCH_ARM 1
#define V8_HOST_ARCH_32_BIT 1
#define V8_HOST_ARCH_MIPS 1
#define V8_HOST_ARCH_32_BIT 1
#else
-#error Host architecture was not detected as supported by v8
+#error "Host architecture was not detected as supported by v8"
#endif
#if defined(__ARM_ARCH_7A__) || \
// in the same way as the host architecture, that is, target the native
// environment as presented by the compiler.
#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_IA32 && \
- !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS
+ !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_A64 && !V8_TARGET_ARCH_MIPS
#if defined(_M_X64) || defined(__x86_64__)
#define V8_TARGET_ARCH_X64 1
#elif defined(_M_IX86) || defined(__i386__)
#define V8_TARGET_ARCH_IA32 1
+#elif defined(__AARCH64EL__)
+#define V8_TARGET_ARCH_A64 1
#elif defined(__ARMEL__)
#define V8_TARGET_ARCH_ARM 1
#elif defined(__MIPSEL__)
#if (V8_TARGET_ARCH_ARM && !(V8_HOST_ARCH_IA32 || V8_HOST_ARCH_ARM))
#error Target architecture arm is only supported on arm and ia32 host
#endif
+#if (V8_TARGET_ARCH_A64 && !(V8_HOST_ARCH_X64 || V8_HOST_ARCH_A64))
+#error Target architecture a64 is only supported on a64 and x64 host
+#endif
#if (V8_TARGET_ARCH_MIPS && !(V8_HOST_ARCH_IA32 || V8_HOST_ARCH_MIPS))
#error Target architecture mips is only supported on mips and ia32 host
#endif
// Setting USE_SIMULATOR explicitly from the build script will force
// the use of a simulated environment.
#if !defined(USE_SIMULATOR)
+#if (V8_TARGET_ARCH_A64 && !V8_HOST_ARCH_A64)
+#define USE_SIMULATOR 1
+#endif
#if (V8_TARGET_ARCH_ARM && !V8_HOST_ARCH_ARM)
#define USE_SIMULATOR 1
#endif
#define V8_TARGET_LITTLE_ENDIAN 1
#elif V8_TARGET_ARCH_ARM
#define V8_TARGET_LITTLE_ENDIAN 1
+#elif V8_TARGET_ARCH_A64
+#define V8_TARGET_LITTLE_ENDIAN 1
#elif V8_TARGET_ARCH_MIPS
#define V8_TARGET_LITTLE_ENDIAN 1
#else
// The eliminates the need for doing dictionary lookup in the
// stub cache for these stubs.
HandleScope scope(isolate());
+
+ // Create stubs that should be there, so we don't unexpectedly have to
+ // create them if we need them during the creation of another stub.
+ // Stub creation mixes raw pointers and handles in an unsafe manner so
+ // we cannot create stubs while we are creating stubs.
+ CodeStub::GenerateStubsAheadOfTime(isolate());
+
+ // MacroAssembler::Abort calls (usually enabled with --debug-code) depend on
+ // CEntryStub, so we need to call GenerateStubsAheadOfTime before JSEntryStub
+ // is created.
+
// gcc-4.4 has problem generating correct code of following snippet:
// { JSEntryStub stub;
// js_entry_code_ = *stub.GetCode();
// To workaround the problem, make separate functions without inlining.
Heap::CreateJSEntryStub();
Heap::CreateJSConstructEntryStub();
-
- // Create stubs that should be there, so we don't unexpectedly have to
- // create them if we need them during the creation of another stub.
- // Stub creation mixes raw pointers and handles in an unsafe manner so
- // we cannot create stubs while we are creating stubs.
- CodeStub::GenerateStubsAheadOfTime(isolate());
}
#include "ia32/lithium-ia32.h"
#elif V8_TARGET_ARCH_X64
#include "x64/lithium-x64.h"
+#elif V8_TARGET_ARCH_A64
+#include "a64/lithium-a64.h"
#elif V8_TARGET_ARCH_ARM
#include "arm/lithium-arm.h"
#elif V8_TARGET_ARCH_MIPS
#include "ia32/lithium-codegen-ia32.h"
#elif V8_TARGET_ARCH_X64
#include "x64/lithium-codegen-x64.h"
+#elif V8_TARGET_ARCH_A64
+#include "a64/lithium-codegen-a64.h"
#elif V8_TARGET_ARCH_ARM
#include "arm/lithium-codegen-arm.h"
#elif V8_TARGET_ARCH_MIPS
thread_manager_->isolate_ = this;
#if V8_TARGET_ARCH_ARM && !defined(__arm__) || \
+ V8_TARGET_ARCH_A64 && !defined(__aarch64__) || \
V8_TARGET_ARCH_MIPS && !defined(__mips__)
simulator_initialized_ = false;
simulator_i_cache_ = NULL;
// Initialize other runtime facilities
#if defined(USE_SIMULATOR)
-#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_MIPS
+#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_A64 || V8_TARGET_ARCH_MIPS
Simulator::Initialize(this);
#endif
#endif
#endif
#if !defined(__arm__) && V8_TARGET_ARCH_ARM || \
+ !defined(__aarch64__) && V8_TARGET_ARCH_A64 || \
!defined(__mips__) && V8_TARGET_ARCH_MIPS
class Redirection;
class Simulator;
stack_limit_(0),
thread_state_(NULL),
#if !defined(__arm__) && V8_TARGET_ARCH_ARM || \
+ !defined(__aarch64__) && V8_TARGET_ARCH_A64 || \
!defined(__mips__) && V8_TARGET_ARCH_MIPS
simulator_(NULL),
#endif
FIELD_ACCESSOR(ThreadState*, thread_state)
#if !defined(__arm__) && V8_TARGET_ARCH_ARM || \
+ !defined(__aarch64__) && V8_TARGET_ARCH_A64 || \
!defined(__mips__) && V8_TARGET_ARCH_MIPS
FIELD_ACCESSOR(Simulator*, simulator)
#endif
ThreadState* thread_state_;
#if !defined(__arm__) && V8_TARGET_ARCH_ARM || \
+ !defined(__aarch64__) && V8_TARGET_ARCH_A64 || \
!defined(__mips__) && V8_TARGET_ARCH_MIPS
Simulator* simulator_;
#endif
#endif
#if V8_TARGET_ARCH_ARM && !defined(__arm__) || \
+ V8_TARGET_ARCH_A64 && !defined(__aarch64__) || \
V8_TARGET_ARCH_MIPS && !defined(__mips__)
FIELD_ACCESSOR(bool, simulator_initialized)
FIELD_ACCESSOR(HashMap*, simulator_i_cache)
double time_millis_at_init_;
#if V8_TARGET_ARCH_ARM && !defined(__arm__) || \
+ V8_TARGET_ARCH_A64 && !defined(__aarch64__) || \
V8_TARGET_ARCH_MIPS && !defined(__mips__)
bool simulator_initialized_;
HashMap* simulator_i_cache_;
#include "ia32/regexp-macro-assembler-ia32.h"
#elif V8_TARGET_ARCH_X64
#include "x64/regexp-macro-assembler-x64.h"
+#elif V8_TARGET_ARCH_A64
+#include "a64/regexp-macro-assembler-a64.h"
#elif V8_TARGET_ARCH_ARM
#include "arm/regexp-macro-assembler-arm.h"
#elif V8_TARGET_ARCH_MIPS
#elif V8_TARGET_ARCH_ARM
RegExpMacroAssemblerARM macro_assembler(mode, (data->capture_count + 1) * 2,
zone);
+#elif V8_TARGET_ARCH_A64
+ RegExpMacroAssemblerA64 macro_assembler(mode, (data->capture_count + 1) * 2,
+ zone);
#elif V8_TARGET_ARCH_MIPS
RegExpMacroAssemblerMIPS macro_assembler(mode, (data->capture_count + 1) * 2,
zone);
+#else
+#error "Unsupported architecture"
#endif
#else // V8_INTERPRETED_REGEXP
#include "ia32/lithium-ia32.h"
#elif V8_TARGET_ARCH_X64
#include "x64/lithium-x64.h"
+#elif V8_TARGET_ARCH_A64
+#include "a64/lithium-a64.h"
#elif V8_TARGET_ARCH_ARM
#include "arm/lithium-arm.h"
#elif V8_TARGET_ARCH_MIPS
#include "ia32/lithium-ia32.h"
#elif V8_TARGET_ARCH_X64
#include "x64/lithium-x64.h"
+#elif V8_TARGET_ARCH_A64
+#include "a64/lithium-a64.h"
#elif V8_TARGET_ARCH_ARM
#include "arm/lithium-arm.h"
#elif V8_TARGET_ARCH_MIPS
#elif V8_TARGET_ARCH_ARM
#include "arm/lithium-arm.h"
#include "arm/lithium-codegen-arm.h"
+#elif V8_TARGET_ARCH_A64
+#include "a64/lithium-a64.h"
+#include "a64/lithium-codegen-a64.h"
#elif V8_TARGET_ARCH_MIPS
#include "mips/lithium-mips.h"
#include "mips/lithium-codegen-mips.h"
#elif V8_TARGET_ARCH_MIPS
#include "mips/lithium-mips.h"
#include "mips/lithium-codegen-mips.h"
+#elif V8_TARGET_ARCH_A64
+#include "a64/lithium-a64.h"
+#include "a64/lithium-codegen-a64.h"
#else
#error "Unknown architecture."
#endif
#include "x64/assembler-x64-inl.h"
#include "code.h" // must be after assembler_*.h
#include "x64/macro-assembler-x64.h"
+#elif V8_TARGET_ARCH_A64
+#include "a64/constants-a64.h"
+#include "assembler.h"
+#include "a64/assembler-a64.h"
+#include "a64/assembler-a64-inl.h"
+#include "code.h" // must be after assembler_*.h
+#include "a64/macro-assembler-a64.h"
+#include "a64/macro-assembler-a64-inl.h"
#elif V8_TARGET_ARCH_ARM
#include "arm/constants-arm.h"
#include "assembler.h"
void set_pc(int32_t value);
int32_t get_pc() const;
+ Address get_sp() {
+ return reinterpret_cast<Address>(static_cast<intptr_t>(get_register(sp)));
+ }
+
// Accessor to the internal simulator stack area.
uintptr_t StackLimit() const;
#include "property-details.h"
#include "smart-pointers.h"
#include "unicode-inl.h"
-#if V8_TARGET_ARCH_ARM
+#if V8_TARGET_ARCH_A64
+#include "a64/constants-a64.h"
+#elif V8_TARGET_ARCH_ARM
#include "arm/constants-arm.h"
#elif V8_TARGET_ARCH_MIPS
#include "mips/constants-mips.h"
V(kCodeObjectNotProperlyPatched, "Code object not properly patched") \
V(kCompoundAssignmentToLookupSlot, "Compound assignment to lookup slot") \
V(kContextAllocatedArguments, "Context-allocated arguments") \
+ V(kCopyBuffersOverlap, "Copy buffers overlap") \
+ V(kCouldNotGenerateZero, "Could not generate +0.0") \
+ V(kCouldNotGenerateNegativeZero, "Could not generate -0.0") \
V(kDebuggerIsActive, "Debugger is active") \
V(kDebuggerStatement, "DebuggerStatement") \
V(kDeclarationInCatchContext, "Declaration in catch context") \
"DontDelete cells can't contain the hole") \
V(kDoPushArgumentNotImplementedForDoubleType, \
"DoPushArgument not implemented for double type") \
+ V(kEliminatedBoundsCheckFailed, "Eliminated bounds check failed") \
V(kEmitLoadRegisterUnsupportedDoubleImmediate, \
"EmitLoadRegister: Unsupported double immediate") \
V(kEval, "eval") \
V(kExpected0AsASmiSentinel, "Expected 0 as a Smi sentinel") \
- V(kExpectedAlignmentMarker, "expected alignment marker") \
- V(kExpectedAllocationSite, "expected allocation site") \
+ V(kExpectedAlignmentMarker, "Expected alignment marker") \
+ V(kExpectedAllocationSite, "Expected allocation site") \
+ V(kExpectedFunctionObject, "Expected function object in register") \
+ V(kExpectedHeapNumber, "Expected HeapNumber") \
+ V(kExpectedNativeContext, "Expected native context") \
+ V(kExpectedNonIdenticalObjects, "Expected non-identical objects") \
+ V(kExpectedNonNullContext, "Expected non-null context") \
+ V(kExpectedPositiveZero, "Expected +0.0") \
+ V(kExpectedAllocationSiteInCell, \
+ "Expected AllocationSite in property cell") \
+ V(kExpectedFixedArrayInFeedbackVector, \
+ "Expected fixed array in feedback vector") \
V(kExpectedFixedArrayInRegisterA2, \
"Expected fixed array in register a2") \
V(kExpectedFixedArrayInRegisterEbx, \
"Expected fixed array in register r2") \
V(kExpectedFixedArrayInRegisterRbx, \
"Expected fixed array in register rbx") \
+ V(kExpectedSmiOrHeapNumber, "Expected smi or HeapNumber") \
V(kExpectingAlignmentForCopyBytes, \
"Expecting alignment for CopyBytes") \
V(kExportDeclaration, "Export declaration") \
V(kInliningBailedOut, "Inlining bailed out") \
V(kInputGPRIsExpectedToHaveUpper32Cleared, \
"Input GPR is expected to have upper32 cleared") \
+ V(kInputStringTooLong, "Input string too long") \
V(kInstanceofStubUnexpectedCallSiteCacheCheck, \
"InstanceofStub unexpected call site cache (check)") \
V(kInstanceofStubUnexpectedCallSiteCacheCmp1, \
V(kInvalidCaptureReferenced, "Invalid capture referenced") \
V(kInvalidElementsKindForInternalArrayOrInternalPackedArray, \
"Invalid ElementsKind for InternalArray or InternalPackedArray") \
+ V(kInvalidFullCodegenState, "invalid full-codegen state") \
V(kInvalidHandleScopeLevel, "Invalid HandleScope level") \
V(kInvalidLeftHandSideInAssignment, "Invalid left-hand side in assignment") \
V(kInvalidLhsInCompoundAssignment, "Invalid lhs in compound assignment") \
V(kJSObjectWithFastElementsMapHasSlowElements, \
"JSObject with fast elements map has slow elements") \
V(kLetBindingReInitialization, "Let binding re-initialization") \
+ V(kLhsHasBeenClobbered, "lhs has been clobbered") \
V(kLiveBytesCountOverflowChunkSize, "Live Bytes Count overflow chunk size") \
+ V(kLiveEditFrameDroppingIsNotSupportedOnA64, \
+ "LiveEdit frame dropping is not supported on a64") \
V(kLiveEditFrameDroppingIsNotSupportedOnArm, \
"LiveEdit frame dropping is not supported on arm") \
V(kLiveEditFrameDroppingIsNotSupportedOnMips, \
"Object literal with complex property") \
V(kOddballInStringTableIsNotUndefinedOrTheHole, \
"Oddball in string table is not undefined or the hole") \
+ V(kOffsetOutOfRange, "Offset out of range") \
V(kOperandIsASmiAndNotAName, "Operand is a smi and not a name") \
V(kOperandIsASmiAndNotAString, "Operand is a smi and not a string") \
V(kOperandIsASmi, "Operand is a smi") \
"Out of virtual registers while trying to allocate temp register") \
V(kParseScopeError, "Parse/scope error") \
V(kPossibleDirectCallToEval, "Possible direct call to eval") \
+ V(kPreconditionsWereNotMet, "Preconditions were not met") \
V(kPropertyAllocationCountFailed, "Property allocation count failed") \
V(kReceivedInvalidReturnAddress, "Received invalid return address") \
V(kReferenceToAVariableWhichRequiresDynamicLookup, \
V(kReferenceToUninitializedVariable, "Reference to uninitialized variable") \
V(kRegisterDidNotMatchExpectedRoot, "Register did not match expected root") \
V(kRegisterWasClobbered, "Register was clobbered") \
+ V(kRememberedSetPointerInNewSpace, "Remembered set pointer is in new space") \
+ V(kReturnAddressNotFoundInFrame, "Return address not found in frame") \
+ V(kRhsHasBeenClobbered, "Rhs has been clobbered") \
V(kScopedBlock, "ScopedBlock") \
V(kSmiAdditionOverflow, "Smi addition overflow") \
V(kSmiSubtractionOverflow, "Smi subtraction overflow") \
+ V(kStackAccessBelowStackPointer, "Stack access below stack pointer") \
V(kStackFrameTypesMustMatch, "Stack frame types must match") \
V(kSwitchStatementMixedOrNonLiteralSwitchLabels, \
"SwitchStatement: mixed or non-literal switch labels") \
V(kSwitchStatementTooManyClauses, "SwitchStatement: too many clauses") \
+ V(kTheCurrentStackPointerIsBelowCsp, \
+ "The current stack pointer is below csp") \
V(kTheInstructionShouldBeALui, "The instruction should be a lui") \
V(kTheInstructionShouldBeAnOri, "The instruction should be an ori") \
V(kTheInstructionToPatchShouldBeALoadFromPc, \
"The instruction to patch should be a load from pc") \
+ V(kTheInstructionToPatchShouldBeAnLdrLiteral, \
+ "The instruction to patch should be a ldr literal") \
V(kTheInstructionToPatchShouldBeALui, \
"The instruction to patch should be a lui") \
V(kTheInstructionToPatchShouldBeAnOri, \
"The instruction to patch should be an ori") \
+ V(kTheSourceAndDestinationAreTheSame, \
+ "The source and destination are the same") \
+ V(kTheStackWasCorruptedByMacroAssemblerCall, \
+ "The stack was corrupted by MacroAssembler::Call()") \
V(kTooManyParametersLocals, "Too many parameters/locals") \
V(kTooManyParameters, "Too many parameters") \
V(kTooManySpillSlotsNeededForOSR, "Too many spill slots needed for OSR") \
+ V(kToOperand32UnsupportedImmediate, "ToOperand32 unsupported immediate.") \
V(kToOperandIsDoubleRegisterUnimplemented, \
"ToOperand IsDoubleRegister unimplemented") \
V(kToOperandUnsupportedDoubleImmediate, \
V(kTryFinallyStatement, "TryFinallyStatement") \
V(kUnableToEncodeValueAsSmi, "Unable to encode value as smi") \
V(kUnalignedAllocationInNewSpace, "Unaligned allocation in new space") \
+ V(kUnalignedCellInWriteBarrier, "Unaligned cell in write barrier") \
V(kUndefinedValueNotLoaded, "Undefined value not loaded") \
V(kUndoAllocationOfNonAllocatedMemory, \
"Undo allocation of non allocated memory") \
V(kUnexpectedAllocationTop, "Unexpected allocation top") \
+ V(kUnexpectedColorFound, "Unexpected color bit pattern found") \
V(kUnexpectedElementsKindInArrayConstructor, \
"Unexpected ElementsKind in array constructor") \
V(kUnexpectedFallthroughFromCharCodeAtSlowCase, \
"Unexpected initial map for InternalArray function") \
V(kUnexpectedLevelAfterReturnFromApiCall, \
"Unexpected level after return from api call") \
+ V(kUnexpectedNegativeValue, "Unexpected negative value") \
V(kUnexpectedNumberOfPreAllocatedPropertyFields, \
"Unexpected number of pre-allocated property fields") \
+ V(kUnexpectedSmi, "Unexpected smi value") \
V(kUnexpectedStringFunction, "Unexpected String function") \
V(kUnexpectedStringType, "Unexpected string type") \
V(kUnexpectedStringWrapperInstanceSize, \
"Unexpected string wrapper instance size") \
V(kUnexpectedTypeForRegExpDataFixedArrayExpected, \
"Unexpected type for RegExp data, FixedArray expected") \
+ V(kUnexpectedValue, "Unexpected value") \
V(kUnexpectedUnusedPropertiesOfStringWrapper, \
"Unexpected unused properties of string wrapper") \
+ V(kUnimplemented, "unimplemented") \
V(kUninitializedKSmiConstantRegister, "Uninitialized kSmiConstantRegister") \
V(kUnknown, "Unknown") \
V(kUnsupportedConstCompoundAssignment, \
// kMaxCachedArrayIndexLength.
STATIC_CHECK(IS_POWER_OF_TWO(kMaxCachedArrayIndexLength + 1));
- static const int kContainsCachedArrayIndexMask =
+ static const unsigned int kContainsCachedArrayIndexMask =
(~kMaxCachedArrayIndexLength << kArrayIndexHashLengthShift) |
kIsNotArrayIndexMask;
// GLibc on ARM defines mcontext_t has a typedef for 'struct sigcontext'.
// Old versions of the C library <signal.h> didn't define the type.
#if defined(__ANDROID__) && !defined(__BIONIC_HAVE_UCONTEXT_T) && \
- defined(__arm__) && !defined(__BIONIC_HAVE_STRUCT_SIGCONTEXT)
+ (defined(__arm__) || defined(__aarch64__)) && \
+ !defined(__BIONIC_HAVE_STRUCT_SIGCONTEXT)
#include <asm/sigcontext.h>
#endif
void OS::DebugBreak() {
#if V8_HOST_ARCH_ARM
asm("bkpt 0");
+#elif V8_HOST_ARCH_A64
+ asm("brk 0");
#elif V8_HOST_ARCH_MIPS
asm("break");
#elif V8_HOST_ARCH_IA32
RegExpMacroAssembler(assembler->zone()),
assembler_(assembler) {
unsigned int type = assembler->Implementation();
- ASSERT(type < 5);
- const char* impl_names[] = {"IA32", "ARM", "MIPS", "X64", "Bytecode"};
+ ASSERT(type < 6);
+ const char* impl_names[] = {"IA32", "ARM", "A64", "MIPS", "X64", "Bytecode"};
PrintF("RegExpMacroAssembler%s();\n", impl_names[type]);
}
enum IrregexpImplementation {
kIA32Implementation,
kARMImplementation,
+ kA64Implementation,
kMIPSImplementation,
kX64Implementation,
kBytecodeImplementation
// GLibc on ARM defines mcontext_t has a typedef for 'struct sigcontext'.
// Old versions of the C library <signal.h> didn't define the type.
#if V8_OS_ANDROID && !defined(__BIONIC_HAVE_UCONTEXT_T) && \
- defined(__arm__) && !defined(__BIONIC_HAVE_STRUCT_SIGCONTEXT)
+ (defined(__arm__) || defined(__aarch64__)) && \
+ !defined(__BIONIC_HAVE_STRUCT_SIGCONTEXT)
#include <asm/sigcontext.h>
#endif
// Other fields are not used by V8, don't define them here.
} ucontext_t;
+#elif defined(__aarch64__)
+
+typedef struct sigcontext mcontext_t;
+
+typedef struct ucontext {
+ uint64_t uc_flags;
+ struct ucontext *uc_link;
+ stack_t uc_stack;
+ mcontext_t uc_mcontext;
+ // Other fields are not used by V8, don't define them here.
+} ucontext_t;
+
#elif defined(__mips__)
// MIPS version of sigcontext, for Android bionic.
typedef struct {
}
inline void FillRegisters(RegisterState* state) {
+#if V8_TARGET_ARCH_ARM
state->pc = reinterpret_cast<Address>(simulator_->get_pc());
state->sp = reinterpret_cast<Address>(simulator_->get_register(
Simulator::sp));
-#if V8_TARGET_ARCH_ARM
state->fp = reinterpret_cast<Address>(simulator_->get_register(
Simulator::r11));
+#elif V8_TARGET_ARCH_A64
+ if (simulator_->sp() == 0 || simulator_->fp() == 0) {
+ // It possible that the simulator is interrupted while it is updating
+ // the sp or fp register. A64 simulator does this in two steps:
+ // first setting it to zero and then setting it to the new value.
+ // Bailout if sp/fp doesn't contain the new value.
+ return;
+ }
+ state->pc = reinterpret_cast<Address>(simulator_->pc());
+ state->sp = reinterpret_cast<Address>(simulator_->sp());
+ state->fp = reinterpret_cast<Address>(simulator_->fp());
#elif V8_TARGET_ARCH_MIPS
+ state->pc = reinterpret_cast<Address>(simulator_->get_pc());
+ state->sp = reinterpret_cast<Address>(simulator_->get_register(
+ Simulator::sp));
state->fp = reinterpret_cast<Address>(simulator_->get_register(
Simulator::fp));
#endif
SimulatorHelper helper;
if (!helper.Init(sampler, isolate)) return;
helper.FillRegisters(&state);
+ // It possible that the simulator is interrupted while it is updating
+ // the sp or fp register. A64 simulator does this in two steps:
+ // first setting it to zero and then setting it to the new value.
+ // Bailout if sp/fp doesn't contain the new value.
+ if (state.sp == 0 || state.fp == 0) return;
#else
// Extracting the sample from the context is extremely machine dependent.
ucontext_t* ucontext = reinterpret_cast<ucontext_t*>(context);
state.fp = reinterpret_cast<Address>(mcontext.arm_fp);
#endif // defined(__GLIBC__) && !defined(__UCLIBC__) &&
// (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
+#elif V8_HOST_ARCH_A64
+ state.pc = reinterpret_cast<Address>(mcontext.pc);
+ state.sp = reinterpret_cast<Address>(mcontext.sp);
+ // FP is an alias for x29.
+ state.fp = reinterpret_cast<Address>(mcontext.regs[29]);
#elif V8_HOST_ARCH_MIPS
state.pc = reinterpret_cast<Address>(mcontext.pc);
state.sp = reinterpret_cast<Address>(mcontext.gregs[29]);
#include "ia32/simulator-ia32.h"
#elif V8_TARGET_ARCH_X64
#include "x64/simulator-x64.h"
+#elif V8_TARGET_ARCH_A64
+#include "a64/simulator-a64.h"
#elif V8_TARGET_ARCH_ARM
#include "arm/simulator-arm.h"
#elif V8_TARGET_ARCH_MIPS
}
+// Increment a pointer until it has the specified alignment.
+// This works like RoundUp, but it works correctly on pointer types where
+// sizeof(*pointer) might not be 1.
+template<class T>
+T AlignUp(T pointer, size_t alignment) {
+ ASSERT(sizeof(pointer) == sizeof(uintptr_t));
+ uintptr_t pointer_raw = reinterpret_cast<uintptr_t>(pointer);
+ return reinterpret_cast<T>(RoundUp(pointer_raw, alignment));
+}
+
+
template <typename T>
int Compare(const T& a, const T& b) {
if (a == b)
T bits_;
};
+// Bit field extraction.
+inline uint32_t unsigned_bitextract_32(int msb, int lsb, uint32_t x) {
+ return (x >> lsb) & ((1 << (1 + msb - lsb)) - 1);
+}
+
+inline uint64_t unsigned_bitextract_64(int msb, int lsb, uint64_t x) {
+ return (x >> lsb) & ((static_cast<uint64_t>(1) << (1 + msb - lsb)) - 1);
+}
+
+inline int32_t signed_bitextract_32(int msb, int lsb, int32_t x) {
+ return (x << (31 - msb)) >> (lsb + 31 - msb);
+}
+
+inline int signed_bitextract_64(int msb, int lsb, int x) {
+ // TODO(jbramley): This is broken for big bitfields.
+ return (x << (63 - msb)) >> (lsb + 63 - msb);
+}
+
+// Check number width.
+inline bool is_intn(int64_t x, unsigned n) {
+ ASSERT((0 < n) && (n < 64));
+ int64_t limit = static_cast<int64_t>(1) << (n - 1);
+ return (-limit <= x) && (x < limit);
+}
+
+inline bool is_uintn(int64_t x, unsigned n) {
+ ASSERT((0 < n) && (n < (sizeof(x) * kBitsPerByte)));
+ return !(x >> n);
+}
+
+template <class T>
+inline T truncate_to_intn(T x, unsigned n) {
+ ASSERT((0 < n) && (n < (sizeof(x) * kBitsPerByte)));
+ return (x & ((static_cast<T>(1) << n) - 1));
+}
+
+#define INT_1_TO_63_LIST(V) \
+V(1) V(2) V(3) V(4) V(5) V(6) V(7) V(8) \
+V(9) V(10) V(11) V(12) V(13) V(14) V(15) V(16) \
+V(17) V(18) V(19) V(20) V(21) V(22) V(23) V(24) \
+V(25) V(26) V(27) V(28) V(29) V(30) V(31) V(32) \
+V(33) V(34) V(35) V(36) V(37) V(38) V(39) V(40) \
+V(41) V(42) V(43) V(44) V(45) V(46) V(47) V(48) \
+V(49) V(50) V(51) V(52) V(53) V(54) V(55) V(56) \
+V(57) V(58) V(59) V(60) V(61) V(62) V(63)
+
+#define DECLARE_IS_INT_N(N) \
+inline bool is_int##N(int64_t x) { return is_intn(x, N); }
+#define DECLARE_IS_UINT_N(N) \
+template <class T> \
+inline bool is_uint##N(T x) { return is_uintn(x, N); }
+#define DECLARE_TRUNCATE_TO_INT_N(N) \
+template <class T> \
+inline T truncate_to_int##N(T x) { return truncate_to_intn(x, N); }
+INT_1_TO_63_LIST(DECLARE_IS_INT_N)
+INT_1_TO_63_LIST(DECLARE_IS_UINT_N)
+INT_1_TO_63_LIST(DECLARE_TRUNCATE_TO_INT_N)
+#undef DECLARE_IS_INT_N
+#undef DECLARE_IS_UINT_N
+#undef DECLARE_TRUNCATE_TO_INT_N
class TypeFeedbackId {
public:
callback_(callback),
previous_scope_(isolate->external_callback_scope()) {
#ifdef USE_SIMULATOR
- int32_t sp = Simulator::current(isolate)->get_register(Simulator::sp);
- scope_address_ = reinterpret_cast<Address>(static_cast<intptr_t>(sp));
+ scope_address_ = Simulator::current(isolate)->get_sp();
#endif
isolate_->set_external_callback_scope(this);
}
// Utility functions
-// Test whether a 64-bit value is in a specific range.
-inline bool is_uint32(int64_t x) {
- static const uint64_t kMaxUInt32 = V8_UINT64_C(0xffffffff);
- return static_cast<uint64_t>(x) <= kMaxUInt32;
-}
-
-inline bool is_int32(int64_t x) {
- static const int64_t kMinInt32 = -V8_INT64_C(0x80000000);
- return is_uint32(x - kMinInt32);
-}
-
-inline bool uint_is_int32(uint64_t x) {
- static const uint64_t kMaxInt32 = V8_UINT64_C(0x7fffffff);
- return x <= kMaxInt32;
-}
-
-inline bool is_uint32(uint64_t x) {
- static const uint64_t kMaxUInt32 = V8_UINT64_C(0xffffffff);
- return x <= kMaxUInt32;
-}
-
// CPU Registers.
//
// 1) We would prefer to use an enum, but enum values are assignment-
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# Too slow in Debug mode.
[
-['mode == debug', {
- # Too slow in Debug mode.
- 'octane/mandreel': [SKIP],
-}], # 'mode == debug'
+[ALWAYS, {
+ 'octane/mandreel': [PASS, ['mode == debug', SKIP]],
+}], # ALWAYS
]
'test-macro-assembler-arm.cc'
],
}],
+ ['v8_target_arch=="a64"', {
+ 'sources': [
+ 'test-utils-a64.cc',
+ 'test-assembler-a64.cc',
+ 'test-disasm-a64.cc',
+ 'test-fuzz-a64.cc',
+ 'test-javascript-a64.cc',
+ 'test-js-a64-variables.cc'
+ ],
+ }],
['v8_target_arch=="mipsel"', {
'sources': [
'test-assembler-mips.cc',
'test-api/Threading4': [PASS, ['mode == debug', SLOW]],
}], # ALWAYS
+##############################################################################
+['arch == a64', {
+
+ 'test-api/Bug618': [PASS],
+}], # 'arch == a64'
+
+['arch == a64 and simulator_run == True', {
+
+ # Pass but take too long with the simulator.
+ 'test-api/ExternalArrays': [PASS, TIMEOUT],
+ 'test-api/Threading1': [SKIP],
+}], # 'arch == a64 and simulator_run == True'
+
+['arch == a64 and mode == debug and simulator_run == True', {
+
+ # Pass but take too long with the simulator in debug mode.
+ 'test-api/ExternalDoubleArray': [SKIP],
+ 'test-api/ExternalFloat32Array': [SKIP],
+ 'test-api/ExternalFloat64Array': [SKIP],
+ 'test-api/ExternalFloatArray': [SKIP],
+ 'test-api/Float32Array': [SKIP],
+ 'test-api/Float64Array': [SKIP],
+ 'test-debug/DebugBreakLoop': [SKIP],
+}], # 'arch == a64 and mode == debug and simulator_run == True'
+
##############################################################################
['asan == True', {
# Skip tests not suitable for ASAN.
--- /dev/null
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <cmath>
+#include <limits>
+
+#include "v8.h"
+
+#include "macro-assembler.h"
+#include "a64/simulator-a64.h"
+#include "a64/disasm-a64.h"
+#include "a64/utils-a64.h"
+#include "cctest.h"
+#include "test-utils-a64.h"
+
+using namespace v8::internal;
+
+// Test infrastructure.
+//
+// Tests are functions which accept no parameters and have no return values.
+// The testing code should not perform an explicit return once completed. For
+// example to test the mov immediate instruction a very simple test would be:
+//
+// TEST(mov_x0_one) {
+// SETUP();
+//
+// START();
+// __ mov(x0, Operand(1));
+// END();
+//
+// RUN();
+//
+// ASSERT_EQUAL_64(1, x0);
+//
+// TEARDOWN();
+// }
+//
+// Within a START ... END block all registers but sp can be modified. sp has to
+// be explicitly saved/restored. The END() macro replaces the function return
+// so it may appear multiple times in a test if the test has multiple exit
+// points.
+//
+// Once the test has been run all integer and floating point registers as well
+// as flags are accessible through a RegisterDump instance, see
+// utils-a64.cc for more info on RegisterDump.
+//
+// We provide some helper assert to handle common cases:
+//
+// ASSERT_EQUAL_32(int32_t, int_32t)
+// ASSERT_EQUAL_FP32(float, float)
+// ASSERT_EQUAL_32(int32_t, W register)
+// ASSERT_EQUAL_FP32(float, S register)
+// ASSERT_EQUAL_64(int64_t, int_64t)
+// ASSERT_EQUAL_FP64(double, double)
+// ASSERT_EQUAL_64(int64_t, X register)
+// ASSERT_EQUAL_64(X register, X register)
+// ASSERT_EQUAL_FP64(double, D register)
+//
+// e.g. ASSERT_EQUAL_64(0.5, d30);
+//
+// If more advance computation is required before the assert then access the
+// RegisterDump named core directly:
+//
+// ASSERT_EQUAL_64(0x1234, core.xreg(0) & 0xffff);
+
+
+#if 0 // TODO(all): enable.
+static v8::Persistent<v8::Context> env;
+
+static void InitializeVM() {
+ if (env.IsEmpty()) {
+ env = v8::Context::New();
+ }
+}
+#endif
+
+#define __ masm.
+
+#define BUF_SIZE 8192
+#define SETUP() SETUP_SIZE(BUF_SIZE)
+
+#define INIT_V8() \
+ CcTest::InitializeVM(); \
+
+#ifdef USE_SIMULATOR
+
+// Run tests with the simulator.
+#define SETUP_SIZE(buf_size) \
+ Isolate* isolate = Isolate::Current(); \
+ HandleScope scope(isolate); \
+ ASSERT(isolate != NULL); \
+ byte* buf = new byte[buf_size]; \
+ MacroAssembler masm(isolate, buf, buf_size); \
+ Decoder decoder; \
+ Simulator simulator(&decoder); \
+ PrintDisassembler* pdis = NULL; \
+ RegisterDump core;
+
+/* if (Cctest::trace_sim()) { \
+ pdis = new PrintDisassembler(stdout); \
+ decoder.PrependVisitor(pdis); \
+ } \
+ */
+
+// Reset the assembler and simulator, so that instructions can be generated,
+// but don't actually emit any code. This can be used by tests that need to
+// emit instructions at the start of the buffer. Note that START_AFTER_RESET
+// must be called before any callee-saved register is modified, and before an
+// END is encountered.
+//
+// Most tests should call START, rather than call RESET directly.
+#define RESET() \
+ __ Reset(); \
+ simulator.ResetState();
+
+#define START_AFTER_RESET() \
+ __ SetStackPointer(csp); \
+ __ PushCalleeSavedRegisters(); \
+ __ Debug("Start test.", __LINE__, TRACE_ENABLE | LOG_ALL);
+
+#define START() \
+ RESET(); \
+ START_AFTER_RESET();
+
+#define RUN() \
+ simulator.RunFrom(reinterpret_cast<Instruction*>(buf))
+
+#define END() \
+ __ Debug("End test.", __LINE__, TRACE_DISABLE | LOG_ALL); \
+ core.Dump(&masm); \
+ __ PopCalleeSavedRegisters(); \
+ __ Ret(); \
+ __ GetCode(NULL);
+
+#define TEARDOWN() \
+ delete pdis; \
+ delete[] buf;
+
+#else // ifdef USE_SIMULATOR.
+// Run the test on real hardware or models.
+#define SETUP_SIZE(buf_size) \
+ Isolate* isolate = Isolate::Current(); \
+ HandleScope scope(isolate); \
+ ASSERT(isolate != NULL); \
+ byte* buf = new byte[buf_size]; \
+ MacroAssembler masm(isolate, buf, buf_size); \
+ RegisterDump core; \
+ CPU::SetUp();
+
+#define RESET() \
+ __ Reset();
+
+#define START_AFTER_RESET() \
+ __ SetStackPointer(csp); \
+ __ PushCalleeSavedRegisters();
+
+#define START() \
+ RESET(); \
+ START_AFTER_RESET();
+
+#define RUN() \
+ CPU::FlushICache(buf, masm.SizeOfGeneratedCode()); \
+ { \
+ void (*test_function)(void); \
+ memcpy(&test_function, &buf, sizeof(buf)); \
+ test_function(); \
+ }
+
+#define END() \
+ core.Dump(&masm); \
+ __ PopCalleeSavedRegisters(); \
+ __ Ret(); \
+ __ GetCode(NULL);
+
+#define TEARDOWN() \
+ delete[] buf;
+
+#endif // ifdef USE_SIMULATOR.
+
+#define ASSERT_EQUAL_NZCV(expected) \
+ CHECK(EqualNzcv(expected, core.flags_nzcv()))
+
+#define ASSERT_EQUAL_REGISTERS(expected) \
+ CHECK(EqualRegisters(&expected, &core))
+
+#define ASSERT_EQUAL_32(expected, result) \
+ CHECK(Equal32(static_cast<uint32_t>(expected), &core, result))
+
+#define ASSERT_EQUAL_FP32(expected, result) \
+ CHECK(EqualFP32(expected, &core, result))
+
+#define ASSERT_EQUAL_64(expected, result) \
+ CHECK(Equal64(expected, &core, result))
+
+#define ASSERT_EQUAL_FP64(expected, result) \
+ CHECK(EqualFP64(expected, &core, result))
+
+#ifdef DEBUG
+#define ASSERT_LITERAL_POOL_SIZE(expected) \
+ CHECK((expected) == (__ LiteralPoolSize()))
+#else
+#define ASSERT_LITERAL_POOL_SIZE(expected) \
+ ((void) 0)
+#endif
+
+
+TEST(stack_ops) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ // save csp.
+ __ Mov(x29, csp);
+
+ // Set the csp to a known value.
+ __ Mov(x16, 0x1000);
+ __ Mov(csp, x16);
+ __ Mov(x0, csp);
+
+ // Add immediate to the csp, and move the result to a normal register.
+ __ Add(csp, csp, Operand(0x50));
+ __ Mov(x1, csp);
+
+ // Add extended to the csp, and move the result to a normal register.
+ __ Mov(x17, 0xfff);
+ __ Add(csp, csp, Operand(x17, SXTB));
+ __ Mov(x2, csp);
+
+ // Create an csp using a logical instruction, and move to normal register.
+ __ Orr(csp, xzr, Operand(0x1fff));
+ __ Mov(x3, csp);
+
+ // Write wcsp using a logical instruction.
+ __ Orr(wcsp, wzr, Operand(0xfffffff8L));
+ __ Mov(x4, csp);
+
+ // Write csp, and read back wcsp.
+ __ Orr(csp, xzr, Operand(0xfffffff8L));
+ __ Mov(w5, wcsp);
+
+ // restore csp.
+ __ Mov(csp, x29);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0x1000, x0);
+ ASSERT_EQUAL_64(0x1050, x1);
+ ASSERT_EQUAL_64(0x104f, x2);
+ ASSERT_EQUAL_64(0x1fff, x3);
+ ASSERT_EQUAL_64(0xfffffff8, x4);
+ ASSERT_EQUAL_64(0xfffffff8, x5);
+
+ TEARDOWN();
+}
+
+
+TEST(mvn) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mvn(w0, 0xfff);
+ __ Mvn(x1, 0xfff);
+ __ Mvn(w2, Operand(w0, LSL, 1));
+ __ Mvn(x3, Operand(x1, LSL, 2));
+ __ Mvn(w4, Operand(w0, LSR, 3));
+ __ Mvn(x5, Operand(x1, LSR, 4));
+ __ Mvn(w6, Operand(w0, ASR, 11));
+ __ Mvn(x7, Operand(x1, ASR, 12));
+ __ Mvn(w8, Operand(w0, ROR, 13));
+ __ Mvn(x9, Operand(x1, ROR, 14));
+ __ Mvn(w10, Operand(w2, UXTB));
+ __ Mvn(x11, Operand(x2, SXTB, 1));
+ __ Mvn(w12, Operand(w2, UXTH, 2));
+ __ Mvn(x13, Operand(x2, SXTH, 3));
+ __ Mvn(x14, Operand(w2, UXTW, 4));
+ __ Mvn(x15, Operand(w2, SXTW, 4));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0xfffff000, x0);
+ ASSERT_EQUAL_64(0xfffffffffffff000UL, x1);
+ ASSERT_EQUAL_64(0x00001fff, x2);
+ ASSERT_EQUAL_64(0x0000000000003fffUL, x3);
+ ASSERT_EQUAL_64(0xe00001ff, x4);
+ ASSERT_EQUAL_64(0xf0000000000000ffUL, x5);
+ ASSERT_EQUAL_64(0x00000001, x6);
+ ASSERT_EQUAL_64(0x0, x7);
+ ASSERT_EQUAL_64(0x7ff80000, x8);
+ ASSERT_EQUAL_64(0x3ffc000000000000UL, x9);
+ ASSERT_EQUAL_64(0xffffff00, x10);
+ ASSERT_EQUAL_64(0x0000000000000001UL, x11);
+ ASSERT_EQUAL_64(0xffff8003, x12);
+ ASSERT_EQUAL_64(0xffffffffffff0007UL, x13);
+ ASSERT_EQUAL_64(0xfffffffffffe000fUL, x14);
+ ASSERT_EQUAL_64(0xfffffffffffe000fUL, x15);
+
+ TEARDOWN();
+}
+
+
+TEST(mov) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x0, 0xffffffffffffffffL);
+ __ Mov(x1, 0xffffffffffffffffL);
+ __ Mov(x2, 0xffffffffffffffffL);
+ __ Mov(x3, 0xffffffffffffffffL);
+
+ __ Mov(x0, 0x0123456789abcdefL);
+
+ __ movz(x1, 0xabcdL << 16);
+ __ movk(x2, 0xabcdL << 32);
+ __ movn(x3, 0xabcdL << 48);
+
+ __ Mov(x4, 0x0123456789abcdefL);
+ __ Mov(x5, x4);
+
+ __ Mov(w6, -1);
+
+ // Test that moves back to the same register have the desired effect. This
+ // is a no-op for X registers, and a truncation for W registers.
+ __ Mov(x7, 0x0123456789abcdefL);
+ __ Mov(x7, x7);
+ __ Mov(x8, 0x0123456789abcdefL);
+ __ Mov(w8, w8);
+ __ Mov(x9, 0x0123456789abcdefL);
+ __ Mov(x9, Operand(x9));
+ __ Mov(x10, 0x0123456789abcdefL);
+ __ Mov(w10, Operand(w10));
+
+ __ Mov(w11, 0xfff);
+ __ Mov(x12, 0xfff);
+ __ Mov(w13, Operand(w11, LSL, 1));
+ __ Mov(x14, Operand(x12, LSL, 2));
+ __ Mov(w15, Operand(w11, LSR, 3));
+ __ Mov(x18, Operand(x12, LSR, 4));
+ __ Mov(w19, Operand(w11, ASR, 11));
+ __ Mov(x20, Operand(x12, ASR, 12));
+ __ Mov(w21, Operand(w11, ROR, 13));
+ __ Mov(x22, Operand(x12, ROR, 14));
+ __ Mov(w23, Operand(w13, UXTB));
+ __ Mov(x24, Operand(x13, SXTB, 1));
+ __ Mov(w25, Operand(w13, UXTH, 2));
+ __ Mov(x26, Operand(x13, SXTH, 3));
+ __ Mov(x27, Operand(w13, UXTW, 4));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0x0123456789abcdefL, x0);
+ ASSERT_EQUAL_64(0x00000000abcd0000L, x1);
+ ASSERT_EQUAL_64(0xffffabcdffffffffL, x2);
+ ASSERT_EQUAL_64(0x5432ffffffffffffL, x3);
+ ASSERT_EQUAL_64(x4, x5);
+ ASSERT_EQUAL_32(-1, w6);
+ ASSERT_EQUAL_64(0x0123456789abcdefL, x7);
+ ASSERT_EQUAL_32(0x89abcdefL, w8);
+ ASSERT_EQUAL_64(0x0123456789abcdefL, x9);
+ ASSERT_EQUAL_32(0x89abcdefL, w10);
+ ASSERT_EQUAL_64(0x00000fff, x11);
+ ASSERT_EQUAL_64(0x0000000000000fffUL, x12);
+ ASSERT_EQUAL_64(0x00001ffe, x13);
+ ASSERT_EQUAL_64(0x0000000000003ffcUL, x14);
+ ASSERT_EQUAL_64(0x000001ff, x15);
+ ASSERT_EQUAL_64(0x00000000000000ffUL, x18);
+ ASSERT_EQUAL_64(0x00000001, x19);
+ ASSERT_EQUAL_64(0x0, x20);
+ ASSERT_EQUAL_64(0x7ff80000, x21);
+ ASSERT_EQUAL_64(0x3ffc000000000000UL, x22);
+ ASSERT_EQUAL_64(0x000000fe, x23);
+ ASSERT_EQUAL_64(0xfffffffffffffffcUL, x24);
+ ASSERT_EQUAL_64(0x00007ff8, x25);
+ ASSERT_EQUAL_64(0x000000000000fff0UL, x26);
+ ASSERT_EQUAL_64(0x000000000001ffe0UL, x27);
+
+ TEARDOWN();
+}
+
+
+TEST(mov_imm_w) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(w0, 0xffffffffL);
+ __ Mov(w1, 0xffff1234L);
+ __ Mov(w2, 0x1234ffffL);
+ __ Mov(w3, 0x00000000L);
+ __ Mov(w4, 0x00001234L);
+ __ Mov(w5, 0x12340000L);
+ __ Mov(w6, 0x12345678L);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0xffffffffL, x0);
+ ASSERT_EQUAL_64(0xffff1234L, x1);
+ ASSERT_EQUAL_64(0x1234ffffL, x2);
+ ASSERT_EQUAL_64(0x00000000L, x3);
+ ASSERT_EQUAL_64(0x00001234L, x4);
+ ASSERT_EQUAL_64(0x12340000L, x5);
+ ASSERT_EQUAL_64(0x12345678L, x6);
+
+ TEARDOWN();
+}
+
+
+TEST(mov_imm_x) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x0, 0xffffffffffffffffL);
+ __ Mov(x1, 0xffffffffffff1234L);
+ __ Mov(x2, 0xffffffff12345678L);
+ __ Mov(x3, 0xffff1234ffff5678L);
+ __ Mov(x4, 0x1234ffffffff5678L);
+ __ Mov(x5, 0x1234ffff5678ffffL);
+ __ Mov(x6, 0x12345678ffffffffL);
+ __ Mov(x7, 0x1234ffffffffffffL);
+ __ Mov(x8, 0x123456789abcffffL);
+ __ Mov(x9, 0x12345678ffff9abcL);
+ __ Mov(x10, 0x1234ffff56789abcL);
+ __ Mov(x11, 0xffff123456789abcL);
+ __ Mov(x12, 0x0000000000000000L);
+ __ Mov(x13, 0x0000000000001234L);
+ __ Mov(x14, 0x0000000012345678L);
+ __ Mov(x15, 0x0000123400005678L);
+ __ Mov(x18, 0x1234000000005678L);
+ __ Mov(x19, 0x1234000056780000L);
+ __ Mov(x20, 0x1234567800000000L);
+ __ Mov(x21, 0x1234000000000000L);
+ __ Mov(x22, 0x123456789abc0000L);
+ __ Mov(x23, 0x1234567800009abcL);
+ __ Mov(x24, 0x1234000056789abcL);
+ __ Mov(x25, 0x0000123456789abcL);
+ __ Mov(x26, 0x123456789abcdef0L);
+ __ Mov(x27, 0xffff000000000001L);
+ __ Mov(x28, 0x8000ffff00000000L);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0xffffffffffff1234L, x1);
+ ASSERT_EQUAL_64(0xffffffff12345678L, x2);
+ ASSERT_EQUAL_64(0xffff1234ffff5678L, x3);
+ ASSERT_EQUAL_64(0x1234ffffffff5678L, x4);
+ ASSERT_EQUAL_64(0x1234ffff5678ffffL, x5);
+ ASSERT_EQUAL_64(0x12345678ffffffffL, x6);
+ ASSERT_EQUAL_64(0x1234ffffffffffffL, x7);
+ ASSERT_EQUAL_64(0x123456789abcffffL, x8);
+ ASSERT_EQUAL_64(0x12345678ffff9abcL, x9);
+ ASSERT_EQUAL_64(0x1234ffff56789abcL, x10);
+ ASSERT_EQUAL_64(0xffff123456789abcL, x11);
+ ASSERT_EQUAL_64(0x0000000000000000L, x12);
+ ASSERT_EQUAL_64(0x0000000000001234L, x13);
+ ASSERT_EQUAL_64(0x0000000012345678L, x14);
+ ASSERT_EQUAL_64(0x0000123400005678L, x15);
+ ASSERT_EQUAL_64(0x1234000000005678L, x18);
+ ASSERT_EQUAL_64(0x1234000056780000L, x19);
+ ASSERT_EQUAL_64(0x1234567800000000L, x20);
+ ASSERT_EQUAL_64(0x1234000000000000L, x21);
+ ASSERT_EQUAL_64(0x123456789abc0000L, x22);
+ ASSERT_EQUAL_64(0x1234567800009abcL, x23);
+ ASSERT_EQUAL_64(0x1234000056789abcL, x24);
+ ASSERT_EQUAL_64(0x0000123456789abcL, x25);
+ ASSERT_EQUAL_64(0x123456789abcdef0L, x26);
+ ASSERT_EQUAL_64(0xffff000000000001L, x27);
+ ASSERT_EQUAL_64(0x8000ffff00000000L, x28);
+
+ TEARDOWN();
+}
+
+
+TEST(orr) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x0, 0xf0f0);
+ __ Mov(x1, 0xf00000ff);
+
+ __ Orr(x2, x0, Operand(x1));
+ __ Orr(w3, w0, Operand(w1, LSL, 28));
+ __ Orr(x4, x0, Operand(x1, LSL, 32));
+ __ Orr(x5, x0, Operand(x1, LSR, 4));
+ __ Orr(w6, w0, Operand(w1, ASR, 4));
+ __ Orr(x7, x0, Operand(x1, ASR, 4));
+ __ Orr(w8, w0, Operand(w1, ROR, 12));
+ __ Orr(x9, x0, Operand(x1, ROR, 12));
+ __ Orr(w10, w0, Operand(0xf));
+ __ Orr(x11, x0, Operand(0xf0000000f0000000L));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0xf000f0ff, x2);
+ ASSERT_EQUAL_64(0xf000f0f0, x3);
+ ASSERT_EQUAL_64(0xf00000ff0000f0f0L, x4);
+ ASSERT_EQUAL_64(0x0f00f0ff, x5);
+ ASSERT_EQUAL_64(0xff00f0ff, x6);
+ ASSERT_EQUAL_64(0x0f00f0ff, x7);
+ ASSERT_EQUAL_64(0x0ffff0f0, x8);
+ ASSERT_EQUAL_64(0x0ff00000000ff0f0L, x9);
+ ASSERT_EQUAL_64(0xf0ff, x10);
+ ASSERT_EQUAL_64(0xf0000000f000f0f0L, x11);
+
+ TEARDOWN();
+}
+
+
+TEST(orr_extend) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x0, 1);
+ __ Mov(x1, 0x8000000080008080UL);
+ __ Orr(w6, w0, Operand(w1, UXTB));
+ __ Orr(x7, x0, Operand(x1, UXTH, 1));
+ __ Orr(w8, w0, Operand(w1, UXTW, 2));
+ __ Orr(x9, x0, Operand(x1, UXTX, 3));
+ __ Orr(w10, w0, Operand(w1, SXTB));
+ __ Orr(x11, x0, Operand(x1, SXTH, 1));
+ __ Orr(x12, x0, Operand(x1, SXTW, 2));
+ __ Orr(x13, x0, Operand(x1, SXTX, 3));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0x00000081, x6);
+ ASSERT_EQUAL_64(0x00010101, x7);
+ ASSERT_EQUAL_64(0x00020201, x8);
+ ASSERT_EQUAL_64(0x0000000400040401UL, x9);
+ ASSERT_EQUAL_64(0x00000000ffffff81UL, x10);
+ ASSERT_EQUAL_64(0xffffffffffff0101UL, x11);
+ ASSERT_EQUAL_64(0xfffffffe00020201UL, x12);
+ ASSERT_EQUAL_64(0x0000000400040401UL, x13);
+
+ TEARDOWN();
+}
+
+
+TEST(bitwise_wide_imm) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x0, 0);
+ __ Mov(x1, 0xf0f0f0f0f0f0f0f0UL);
+
+ __ Orr(x10, x0, Operand(0x1234567890abcdefUL));
+ __ Orr(w11, w1, Operand(0x90abcdef));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0, x0);
+ ASSERT_EQUAL_64(0xf0f0f0f0f0f0f0f0UL, x1);
+ ASSERT_EQUAL_64(0x1234567890abcdefUL, x10);
+ ASSERT_EQUAL_64(0xf0fbfdffUL, x11);
+
+ TEARDOWN();
+}
+
+
+TEST(orn) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x0, 0xf0f0);
+ __ Mov(x1, 0xf00000ff);
+
+ __ Orn(x2, x0, Operand(x1));
+ __ Orn(w3, w0, Operand(w1, LSL, 4));
+ __ Orn(x4, x0, Operand(x1, LSL, 4));
+ __ Orn(x5, x0, Operand(x1, LSR, 1));
+ __ Orn(w6, w0, Operand(w1, ASR, 1));
+ __ Orn(x7, x0, Operand(x1, ASR, 1));
+ __ Orn(w8, w0, Operand(w1, ROR, 16));
+ __ Orn(x9, x0, Operand(x1, ROR, 16));
+ __ Orn(w10, w0, Operand(0xffff));
+ __ Orn(x11, x0, Operand(0xffff0000ffffL));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0xffffffff0ffffff0L, x2);
+ ASSERT_EQUAL_64(0xfffff0ff, x3);
+ ASSERT_EQUAL_64(0xfffffff0fffff0ffL, x4);
+ ASSERT_EQUAL_64(0xffffffff87fffff0L, x5);
+ ASSERT_EQUAL_64(0x07fffff0, x6);
+ ASSERT_EQUAL_64(0xffffffff87fffff0L, x7);
+ ASSERT_EQUAL_64(0xff00ffff, x8);
+ ASSERT_EQUAL_64(0xff00ffffffffffffL, x9);
+ ASSERT_EQUAL_64(0xfffff0f0, x10);
+ ASSERT_EQUAL_64(0xffff0000fffff0f0L, x11);
+
+ TEARDOWN();
+}
+
+
+TEST(orn_extend) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x0, 1);
+ __ Mov(x1, 0x8000000080008081UL);
+ __ Orn(w6, w0, Operand(w1, UXTB));
+ __ Orn(x7, x0, Operand(x1, UXTH, 1));
+ __ Orn(w8, w0, Operand(w1, UXTW, 2));
+ __ Orn(x9, x0, Operand(x1, UXTX, 3));
+ __ Orn(w10, w0, Operand(w1, SXTB));
+ __ Orn(x11, x0, Operand(x1, SXTH, 1));
+ __ Orn(x12, x0, Operand(x1, SXTW, 2));
+ __ Orn(x13, x0, Operand(x1, SXTX, 3));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0xffffff7f, x6);
+ ASSERT_EQUAL_64(0xfffffffffffefefdUL, x7);
+ ASSERT_EQUAL_64(0xfffdfdfb, x8);
+ ASSERT_EQUAL_64(0xfffffffbfffbfbf7UL, x9);
+ ASSERT_EQUAL_64(0x0000007f, x10);
+ ASSERT_EQUAL_64(0x0000fefd, x11);
+ ASSERT_EQUAL_64(0x00000001fffdfdfbUL, x12);
+ ASSERT_EQUAL_64(0xfffffffbfffbfbf7UL, x13);
+
+ TEARDOWN();
+}
+
+
+TEST(and_) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x0, 0xfff0);
+ __ Mov(x1, 0xf00000ff);
+
+ __ And(x2, x0, Operand(x1));
+ __ And(w3, w0, Operand(w1, LSL, 4));
+ __ And(x4, x0, Operand(x1, LSL, 4));
+ __ And(x5, x0, Operand(x1, LSR, 1));
+ __ And(w6, w0, Operand(w1, ASR, 20));
+ __ And(x7, x0, Operand(x1, ASR, 20));
+ __ And(w8, w0, Operand(w1, ROR, 28));
+ __ And(x9, x0, Operand(x1, ROR, 28));
+ __ And(w10, w0, Operand(0xff00));
+ __ And(x11, x0, Operand(0xff));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0x000000f0, x2);
+ ASSERT_EQUAL_64(0x00000ff0, x3);
+ ASSERT_EQUAL_64(0x00000ff0, x4);
+ ASSERT_EQUAL_64(0x00000070, x5);
+ ASSERT_EQUAL_64(0x0000ff00, x6);
+ ASSERT_EQUAL_64(0x00000f00, x7);
+ ASSERT_EQUAL_64(0x00000ff0, x8);
+ ASSERT_EQUAL_64(0x00000000, x9);
+ ASSERT_EQUAL_64(0x0000ff00, x10);
+ ASSERT_EQUAL_64(0x000000f0, x11);
+
+ TEARDOWN();
+}
+
+
+TEST(and_extend) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x0, 0xffffffffffffffffUL);
+ __ Mov(x1, 0x8000000080008081UL);
+ __ And(w6, w0, Operand(w1, UXTB));
+ __ And(x7, x0, Operand(x1, UXTH, 1));
+ __ And(w8, w0, Operand(w1, UXTW, 2));
+ __ And(x9, x0, Operand(x1, UXTX, 3));
+ __ And(w10, w0, Operand(w1, SXTB));
+ __ And(x11, x0, Operand(x1, SXTH, 1));
+ __ And(x12, x0, Operand(x1, SXTW, 2));
+ __ And(x13, x0, Operand(x1, SXTX, 3));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0x00000081, x6);
+ ASSERT_EQUAL_64(0x00010102, x7);
+ ASSERT_EQUAL_64(0x00020204, x8);
+ ASSERT_EQUAL_64(0x0000000400040408UL, x9);
+ ASSERT_EQUAL_64(0xffffff81, x10);
+ ASSERT_EQUAL_64(0xffffffffffff0102UL, x11);
+ ASSERT_EQUAL_64(0xfffffffe00020204UL, x12);
+ ASSERT_EQUAL_64(0x0000000400040408UL, x13);
+
+ TEARDOWN();
+}
+
+
+TEST(ands) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x1, 0xf00000ff);
+ __ Ands(w0, w1, Operand(w1));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_NZCV(NFlag);
+ ASSERT_EQUAL_64(0xf00000ff, x0);
+
+ START();
+ __ Mov(x0, 0xfff0);
+ __ Mov(x1, 0xf00000ff);
+ __ Ands(w0, w0, Operand(w1, LSR, 4));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_NZCV(ZFlag);
+ ASSERT_EQUAL_64(0x00000000, x0);
+
+ START();
+ __ Mov(x0, 0x8000000000000000L);
+ __ Mov(x1, 0x00000001);
+ __ Ands(x0, x0, Operand(x1, ROR, 1));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_NZCV(NFlag);
+ ASSERT_EQUAL_64(0x8000000000000000L, x0);
+
+ START();
+ __ Mov(x0, 0xfff0);
+ __ Ands(w0, w0, Operand(0xf));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_NZCV(ZFlag);
+ ASSERT_EQUAL_64(0x00000000, x0);
+
+ START();
+ __ Mov(x0, 0xff000000);
+ __ Ands(w0, w0, Operand(0x80000000));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_NZCV(NFlag);
+ ASSERT_EQUAL_64(0x80000000, x0);
+
+ TEARDOWN();
+}
+
+
+TEST(bic) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x0, 0xfff0);
+ __ Mov(x1, 0xf00000ff);
+
+ __ Bic(x2, x0, Operand(x1));
+ __ Bic(w3, w0, Operand(w1, LSL, 4));
+ __ Bic(x4, x0, Operand(x1, LSL, 4));
+ __ Bic(x5, x0, Operand(x1, LSR, 1));
+ __ Bic(w6, w0, Operand(w1, ASR, 20));
+ __ Bic(x7, x0, Operand(x1, ASR, 20));
+ __ Bic(w8, w0, Operand(w1, ROR, 28));
+ __ Bic(x9, x0, Operand(x1, ROR, 24));
+ __ Bic(x10, x0, Operand(0x1f));
+ __ Bic(x11, x0, Operand(0x100));
+
+ // Test bic into csp when the constant cannot be encoded in the immediate
+ // field.
+ // Use x20 to preserve csp. We check for the result via x21 because the
+ // test infrastructure requires that csp be restored to its original value.
+ __ Mov(x20, csp);
+ __ Mov(x0, 0xffffff);
+ __ Bic(csp, x0, Operand(0xabcdef));
+ __ Mov(x21, csp);
+ __ Mov(csp, x20);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0x0000ff00, x2);
+ ASSERT_EQUAL_64(0x0000f000, x3);
+ ASSERT_EQUAL_64(0x0000f000, x4);
+ ASSERT_EQUAL_64(0x0000ff80, x5);
+ ASSERT_EQUAL_64(0x000000f0, x6);
+ ASSERT_EQUAL_64(0x0000f0f0, x7);
+ ASSERT_EQUAL_64(0x0000f000, x8);
+ ASSERT_EQUAL_64(0x0000ff00, x9);
+ ASSERT_EQUAL_64(0x0000ffe0, x10);
+ ASSERT_EQUAL_64(0x0000fef0, x11);
+
+ ASSERT_EQUAL_64(0x543210, x21);
+
+ TEARDOWN();
+}
+
+
+TEST(bic_extend) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x0, 0xffffffffffffffffUL);
+ __ Mov(x1, 0x8000000080008081UL);
+ __ Bic(w6, w0, Operand(w1, UXTB));
+ __ Bic(x7, x0, Operand(x1, UXTH, 1));
+ __ Bic(w8, w0, Operand(w1, UXTW, 2));
+ __ Bic(x9, x0, Operand(x1, UXTX, 3));
+ __ Bic(w10, w0, Operand(w1, SXTB));
+ __ Bic(x11, x0, Operand(x1, SXTH, 1));
+ __ Bic(x12, x0, Operand(x1, SXTW, 2));
+ __ Bic(x13, x0, Operand(x1, SXTX, 3));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0xffffff7e, x6);
+ ASSERT_EQUAL_64(0xfffffffffffefefdUL, x7);
+ ASSERT_EQUAL_64(0xfffdfdfb, x8);
+ ASSERT_EQUAL_64(0xfffffffbfffbfbf7UL, x9);
+ ASSERT_EQUAL_64(0x0000007e, x10);
+ ASSERT_EQUAL_64(0x0000fefd, x11);
+ ASSERT_EQUAL_64(0x00000001fffdfdfbUL, x12);
+ ASSERT_EQUAL_64(0xfffffffbfffbfbf7UL, x13);
+
+ TEARDOWN();
+}
+
+
+TEST(bics) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x1, 0xffff);
+ __ Bics(w0, w1, Operand(w1));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_NZCV(ZFlag);
+ ASSERT_EQUAL_64(0x00000000, x0);
+
+ START();
+ __ Mov(x0, 0xffffffff);
+ __ Bics(w0, w0, Operand(w0, LSR, 1));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_NZCV(NFlag);
+ ASSERT_EQUAL_64(0x80000000, x0);
+
+ START();
+ __ Mov(x0, 0x8000000000000000L);
+ __ Mov(x1, 0x00000001);
+ __ Bics(x0, x0, Operand(x1, ROR, 1));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_NZCV(ZFlag);
+ ASSERT_EQUAL_64(0x00000000, x0);
+
+ START();
+ __ Mov(x0, 0xffffffffffffffffL);
+ __ Bics(x0, x0, Operand(0x7fffffffffffffffL));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_NZCV(NFlag);
+ ASSERT_EQUAL_64(0x8000000000000000L, x0);
+
+ START();
+ __ Mov(w0, 0xffff0000);
+ __ Bics(w0, w0, Operand(0xfffffff0));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_NZCV(ZFlag);
+ ASSERT_EQUAL_64(0x00000000, x0);
+
+ TEARDOWN();
+}
+
+
+TEST(eor) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x0, 0xfff0);
+ __ Mov(x1, 0xf00000ff);
+
+ __ Eor(x2, x0, Operand(x1));
+ __ Eor(w3, w0, Operand(w1, LSL, 4));
+ __ Eor(x4, x0, Operand(x1, LSL, 4));
+ __ Eor(x5, x0, Operand(x1, LSR, 1));
+ __ Eor(w6, w0, Operand(w1, ASR, 20));
+ __ Eor(x7, x0, Operand(x1, ASR, 20));
+ __ Eor(w8, w0, Operand(w1, ROR, 28));
+ __ Eor(x9, x0, Operand(x1, ROR, 28));
+ __ Eor(w10, w0, Operand(0xff00ff00));
+ __ Eor(x11, x0, Operand(0xff00ff00ff00ff00L));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0xf000ff0f, x2);
+ ASSERT_EQUAL_64(0x0000f000, x3);
+ ASSERT_EQUAL_64(0x0000000f0000f000L, x4);
+ ASSERT_EQUAL_64(0x7800ff8f, x5);
+ ASSERT_EQUAL_64(0xffff00f0, x6);
+ ASSERT_EQUAL_64(0x0000f0f0, x7);
+ ASSERT_EQUAL_64(0x0000f00f, x8);
+ ASSERT_EQUAL_64(0x00000ff00000ffffL, x9);
+ ASSERT_EQUAL_64(0xff0000f0, x10);
+ ASSERT_EQUAL_64(0xff00ff00ff0000f0L, x11);
+
+ TEARDOWN();
+}
+
+
+TEST(eor_extend) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x0, 0x1111111111111111UL);
+ __ Mov(x1, 0x8000000080008081UL);
+ __ Eor(w6, w0, Operand(w1, UXTB));
+ __ Eor(x7, x0, Operand(x1, UXTH, 1));
+ __ Eor(w8, w0, Operand(w1, UXTW, 2));
+ __ Eor(x9, x0, Operand(x1, UXTX, 3));
+ __ Eor(w10, w0, Operand(w1, SXTB));
+ __ Eor(x11, x0, Operand(x1, SXTH, 1));
+ __ Eor(x12, x0, Operand(x1, SXTW, 2));
+ __ Eor(x13, x0, Operand(x1, SXTX, 3));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0x11111190, x6);
+ ASSERT_EQUAL_64(0x1111111111101013UL, x7);
+ ASSERT_EQUAL_64(0x11131315, x8);
+ ASSERT_EQUAL_64(0x1111111511151519UL, x9);
+ ASSERT_EQUAL_64(0xeeeeee90, x10);
+ ASSERT_EQUAL_64(0xeeeeeeeeeeee1013UL, x11);
+ ASSERT_EQUAL_64(0xeeeeeeef11131315UL, x12);
+ ASSERT_EQUAL_64(0x1111111511151519UL, x13);
+
+ TEARDOWN();
+}
+
+
+TEST(eon) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x0, 0xfff0);
+ __ Mov(x1, 0xf00000ff);
+
+ __ Eon(x2, x0, Operand(x1));
+ __ Eon(w3, w0, Operand(w1, LSL, 4));
+ __ Eon(x4, x0, Operand(x1, LSL, 4));
+ __ Eon(x5, x0, Operand(x1, LSR, 1));
+ __ Eon(w6, w0, Operand(w1, ASR, 20));
+ __ Eon(x7, x0, Operand(x1, ASR, 20));
+ __ Eon(w8, w0, Operand(w1, ROR, 28));
+ __ Eon(x9, x0, Operand(x1, ROR, 28));
+ __ Eon(w10, w0, Operand(0x03c003c0));
+ __ Eon(x11, x0, Operand(0x0000100000001000L));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0xffffffff0fff00f0L, x2);
+ ASSERT_EQUAL_64(0xffff0fff, x3);
+ ASSERT_EQUAL_64(0xfffffff0ffff0fffL, x4);
+ ASSERT_EQUAL_64(0xffffffff87ff0070L, x5);
+ ASSERT_EQUAL_64(0x0000ff0f, x6);
+ ASSERT_EQUAL_64(0xffffffffffff0f0fL, x7);
+ ASSERT_EQUAL_64(0xffff0ff0, x8);
+ ASSERT_EQUAL_64(0xfffff00fffff0000L, x9);
+ ASSERT_EQUAL_64(0xfc3f03cf, x10);
+ ASSERT_EQUAL_64(0xffffefffffff100fL, x11);
+
+ TEARDOWN();
+}
+
+
+TEST(eon_extend) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x0, 0x1111111111111111UL);
+ __ Mov(x1, 0x8000000080008081UL);
+ __ Eon(w6, w0, Operand(w1, UXTB));
+ __ Eon(x7, x0, Operand(x1, UXTH, 1));
+ __ Eon(w8, w0, Operand(w1, UXTW, 2));
+ __ Eon(x9, x0, Operand(x1, UXTX, 3));
+ __ Eon(w10, w0, Operand(w1, SXTB));
+ __ Eon(x11, x0, Operand(x1, SXTH, 1));
+ __ Eon(x12, x0, Operand(x1, SXTW, 2));
+ __ Eon(x13, x0, Operand(x1, SXTX, 3));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0xeeeeee6f, x6);
+ ASSERT_EQUAL_64(0xeeeeeeeeeeefefecUL, x7);
+ ASSERT_EQUAL_64(0xeeececea, x8);
+ ASSERT_EQUAL_64(0xeeeeeeeaeeeaeae6UL, x9);
+ ASSERT_EQUAL_64(0x1111116f, x10);
+ ASSERT_EQUAL_64(0x111111111111efecUL, x11);
+ ASSERT_EQUAL_64(0x11111110eeececeaUL, x12);
+ ASSERT_EQUAL_64(0xeeeeeeeaeeeaeae6UL, x13);
+
+ TEARDOWN();
+}
+
+
+TEST(mul) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x16, 0);
+ __ Mov(x17, 1);
+ __ Mov(x18, 0xffffffff);
+ __ Mov(x19, 0xffffffffffffffffUL);
+
+ __ Mul(w0, w16, w16);
+ __ Mul(w1, w16, w17);
+ __ Mul(w2, w17, w18);
+ __ Mul(w3, w18, w19);
+ __ Mul(x4, x16, x16);
+ __ Mul(x5, x17, x18);
+ __ Mul(x6, x18, x19);
+ __ Mul(x7, x19, x19);
+ __ Smull(x8, w17, w18);
+ __ Smull(x9, w18, w18);
+ __ Smull(x10, w19, w19);
+ __ Mneg(w11, w16, w16);
+ __ Mneg(w12, w16, w17);
+ __ Mneg(w13, w17, w18);
+ __ Mneg(w14, w18, w19);
+ __ Mneg(x20, x16, x16);
+ __ Mneg(x21, x17, x18);
+ __ Mneg(x22, x18, x19);
+ __ Mneg(x23, x19, x19);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0, x0);
+ ASSERT_EQUAL_64(0, x1);
+ ASSERT_EQUAL_64(0xffffffff, x2);
+ ASSERT_EQUAL_64(1, x3);
+ ASSERT_EQUAL_64(0, x4);
+ ASSERT_EQUAL_64(0xffffffff, x5);
+ ASSERT_EQUAL_64(0xffffffff00000001UL, x6);
+ ASSERT_EQUAL_64(1, x7);
+ ASSERT_EQUAL_64(0xffffffffffffffffUL, x8);
+ ASSERT_EQUAL_64(1, x9);
+ ASSERT_EQUAL_64(1, x10);
+ ASSERT_EQUAL_64(0, x11);
+ ASSERT_EQUAL_64(0, x12);
+ ASSERT_EQUAL_64(1, x13);
+ ASSERT_EQUAL_64(0xffffffff, x14);
+ ASSERT_EQUAL_64(0, x20);
+ ASSERT_EQUAL_64(0xffffffff00000001UL, x21);
+ ASSERT_EQUAL_64(0xffffffff, x22);
+ ASSERT_EQUAL_64(0xffffffffffffffffUL, x23);
+
+ TEARDOWN();
+}
+
+
+static void SmullHelper(int64_t expected, int64_t a, int64_t b) {
+ SETUP();
+ START();
+ __ Mov(w0, a);
+ __ Mov(w1, b);
+ __ Smull(x2, w0, w1);
+ END();
+ RUN();
+ ASSERT_EQUAL_64(expected, x2);
+ TEARDOWN();
+}
+
+
+TEST(smull) {
+ INIT_V8();
+ SmullHelper(0, 0, 0);
+ SmullHelper(1, 1, 1);
+ SmullHelper(-1, -1, 1);
+ SmullHelper(1, -1, -1);
+ SmullHelper(0xffffffff80000000, 0x80000000, 1);
+ SmullHelper(0x0000000080000000, 0x00010000, 0x00008000);
+}
+
+
+TEST(madd) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x16, 0);
+ __ Mov(x17, 1);
+ __ Mov(x18, 0xffffffff);
+ __ Mov(x19, 0xffffffffffffffffUL);
+
+ __ Madd(w0, w16, w16, w16);
+ __ Madd(w1, w16, w16, w17);
+ __ Madd(w2, w16, w16, w18);
+ __ Madd(w3, w16, w16, w19);
+ __ Madd(w4, w16, w17, w17);
+ __ Madd(w5, w17, w17, w18);
+ __ Madd(w6, w17, w17, w19);
+ __ Madd(w7, w17, w18, w16);
+ __ Madd(w8, w17, w18, w18);
+ __ Madd(w9, w18, w18, w17);
+ __ Madd(w10, w18, w19, w18);
+ __ Madd(w11, w19, w19, w19);
+
+ __ Madd(x12, x16, x16, x16);
+ __ Madd(x13, x16, x16, x17);
+ __ Madd(x14, x16, x16, x18);
+ __ Madd(x15, x16, x16, x19);
+ __ Madd(x20, x16, x17, x17);
+ __ Madd(x21, x17, x17, x18);
+ __ Madd(x22, x17, x17, x19);
+ __ Madd(x23, x17, x18, x16);
+ __ Madd(x24, x17, x18, x18);
+ __ Madd(x25, x18, x18, x17);
+ __ Madd(x26, x18, x19, x18);
+ __ Madd(x27, x19, x19, x19);
+
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0, x0);
+ ASSERT_EQUAL_64(1, x1);
+ ASSERT_EQUAL_64(0xffffffff, x2);
+ ASSERT_EQUAL_64(0xffffffff, x3);
+ ASSERT_EQUAL_64(1, x4);
+ ASSERT_EQUAL_64(0, x5);
+ ASSERT_EQUAL_64(0, x6);
+ ASSERT_EQUAL_64(0xffffffff, x7);
+ ASSERT_EQUAL_64(0xfffffffe, x8);
+ ASSERT_EQUAL_64(2, x9);
+ ASSERT_EQUAL_64(0, x10);
+ ASSERT_EQUAL_64(0, x11);
+
+ ASSERT_EQUAL_64(0, x12);
+ ASSERT_EQUAL_64(1, x13);
+ ASSERT_EQUAL_64(0xffffffff, x14);
+ ASSERT_EQUAL_64(0xffffffffffffffff, x15);
+ ASSERT_EQUAL_64(1, x20);
+ ASSERT_EQUAL_64(0x100000000UL, x21);
+ ASSERT_EQUAL_64(0, x22);
+ ASSERT_EQUAL_64(0xffffffff, x23);
+ ASSERT_EQUAL_64(0x1fffffffe, x24);
+ ASSERT_EQUAL_64(0xfffffffe00000002UL, x25);
+ ASSERT_EQUAL_64(0, x26);
+ ASSERT_EQUAL_64(0, x27);
+
+ TEARDOWN();
+}
+
+
+TEST(msub) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x16, 0);
+ __ Mov(x17, 1);
+ __ Mov(x18, 0xffffffff);
+ __ Mov(x19, 0xffffffffffffffffUL);
+
+ __ Msub(w0, w16, w16, w16);
+ __ Msub(w1, w16, w16, w17);
+ __ Msub(w2, w16, w16, w18);
+ __ Msub(w3, w16, w16, w19);
+ __ Msub(w4, w16, w17, w17);
+ __ Msub(w5, w17, w17, w18);
+ __ Msub(w6, w17, w17, w19);
+ __ Msub(w7, w17, w18, w16);
+ __ Msub(w8, w17, w18, w18);
+ __ Msub(w9, w18, w18, w17);
+ __ Msub(w10, w18, w19, w18);
+ __ Msub(w11, w19, w19, w19);
+
+ __ Msub(x12, x16, x16, x16);
+ __ Msub(x13, x16, x16, x17);
+ __ Msub(x14, x16, x16, x18);
+ __ Msub(x15, x16, x16, x19);
+ __ Msub(x20, x16, x17, x17);
+ __ Msub(x21, x17, x17, x18);
+ __ Msub(x22, x17, x17, x19);
+ __ Msub(x23, x17, x18, x16);
+ __ Msub(x24, x17, x18, x18);
+ __ Msub(x25, x18, x18, x17);
+ __ Msub(x26, x18, x19, x18);
+ __ Msub(x27, x19, x19, x19);
+
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0, x0);
+ ASSERT_EQUAL_64(1, x1);
+ ASSERT_EQUAL_64(0xffffffff, x2);
+ ASSERT_EQUAL_64(0xffffffff, x3);
+ ASSERT_EQUAL_64(1, x4);
+ ASSERT_EQUAL_64(0xfffffffe, x5);
+ ASSERT_EQUAL_64(0xfffffffe, x6);
+ ASSERT_EQUAL_64(1, x7);
+ ASSERT_EQUAL_64(0, x8);
+ ASSERT_EQUAL_64(0, x9);
+ ASSERT_EQUAL_64(0xfffffffe, x10);
+ ASSERT_EQUAL_64(0xfffffffe, x11);
+
+ ASSERT_EQUAL_64(0, x12);
+ ASSERT_EQUAL_64(1, x13);
+ ASSERT_EQUAL_64(0xffffffff, x14);
+ ASSERT_EQUAL_64(0xffffffffffffffffUL, x15);
+ ASSERT_EQUAL_64(1, x20);
+ ASSERT_EQUAL_64(0xfffffffeUL, x21);
+ ASSERT_EQUAL_64(0xfffffffffffffffeUL, x22);
+ ASSERT_EQUAL_64(0xffffffff00000001UL, x23);
+ ASSERT_EQUAL_64(0, x24);
+ ASSERT_EQUAL_64(0x200000000UL, x25);
+ ASSERT_EQUAL_64(0x1fffffffeUL, x26);
+ ASSERT_EQUAL_64(0xfffffffffffffffeUL, x27);
+
+ TEARDOWN();
+}
+
+
+TEST(smulh) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x20, 0);
+ __ Mov(x21, 1);
+ __ Mov(x22, 0x0000000100000000L);
+ __ Mov(x23, 0x12345678);
+ __ Mov(x24, 0x0123456789abcdefL);
+ __ Mov(x25, 0x0000000200000000L);
+ __ Mov(x26, 0x8000000000000000UL);
+ __ Mov(x27, 0xffffffffffffffffUL);
+ __ Mov(x28, 0x5555555555555555UL);
+ __ Mov(x29, 0xaaaaaaaaaaaaaaaaUL);
+
+ __ Smulh(x0, x20, x24);
+ __ Smulh(x1, x21, x24);
+ __ Smulh(x2, x22, x23);
+ __ Smulh(x3, x22, x24);
+ __ Smulh(x4, x24, x25);
+ __ Smulh(x5, x23, x27);
+ __ Smulh(x6, x26, x26);
+ __ Smulh(x7, x26, x27);
+ __ Smulh(x8, x27, x27);
+ __ Smulh(x9, x28, x28);
+ __ Smulh(x10, x28, x29);
+ __ Smulh(x11, x29, x29);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0, x0);
+ ASSERT_EQUAL_64(0, x1);
+ ASSERT_EQUAL_64(0, x2);
+ ASSERT_EQUAL_64(0x01234567, x3);
+ ASSERT_EQUAL_64(0x02468acf, x4);
+ ASSERT_EQUAL_64(0xffffffffffffffffUL, x5);
+ ASSERT_EQUAL_64(0x4000000000000000UL, x6);
+ ASSERT_EQUAL_64(0, x7);
+ ASSERT_EQUAL_64(0, x8);
+ ASSERT_EQUAL_64(0x1c71c71c71c71c71UL, x9);
+ ASSERT_EQUAL_64(0xe38e38e38e38e38eUL, x10);
+ ASSERT_EQUAL_64(0x1c71c71c71c71c72UL, x11);
+
+ TEARDOWN();
+}
+
+
+TEST(smaddl_umaddl) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x17, 1);
+ __ Mov(x18, 0xffffffff);
+ __ Mov(x19, 0xffffffffffffffffUL);
+ __ Mov(x20, 4);
+ __ Mov(x21, 0x200000000UL);
+
+ __ Smaddl(x9, w17, w18, x20);
+ __ Smaddl(x10, w18, w18, x20);
+ __ Smaddl(x11, w19, w19, x20);
+ __ Smaddl(x12, w19, w19, x21);
+ __ Umaddl(x13, w17, w18, x20);
+ __ Umaddl(x14, w18, w18, x20);
+ __ Umaddl(x15, w19, w19, x20);
+ __ Umaddl(x22, w19, w19, x21);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(3, x9);
+ ASSERT_EQUAL_64(5, x10);
+ ASSERT_EQUAL_64(5, x11);
+ ASSERT_EQUAL_64(0x200000001UL, x12);
+ ASSERT_EQUAL_64(0x100000003UL, x13);
+ ASSERT_EQUAL_64(0xfffffffe00000005UL, x14);
+ ASSERT_EQUAL_64(0xfffffffe00000005UL, x15);
+ ASSERT_EQUAL_64(0x1, x22);
+
+ TEARDOWN();
+}
+
+
+TEST(smsubl_umsubl) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x17, 1);
+ __ Mov(x18, 0xffffffff);
+ __ Mov(x19, 0xffffffffffffffffUL);
+ __ Mov(x20, 4);
+ __ Mov(x21, 0x200000000UL);
+
+ __ Smsubl(x9, w17, w18, x20);
+ __ Smsubl(x10, w18, w18, x20);
+ __ Smsubl(x11, w19, w19, x20);
+ __ Smsubl(x12, w19, w19, x21);
+ __ Umsubl(x13, w17, w18, x20);
+ __ Umsubl(x14, w18, w18, x20);
+ __ Umsubl(x15, w19, w19, x20);
+ __ Umsubl(x22, w19, w19, x21);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(5, x9);
+ ASSERT_EQUAL_64(3, x10);
+ ASSERT_EQUAL_64(3, x11);
+ ASSERT_EQUAL_64(0x1ffffffffUL, x12);
+ ASSERT_EQUAL_64(0xffffffff00000005UL, x13);
+ ASSERT_EQUAL_64(0x200000003UL, x14);
+ ASSERT_EQUAL_64(0x200000003UL, x15);
+ ASSERT_EQUAL_64(0x3ffffffffUL, x22);
+
+ TEARDOWN();
+}
+
+
+TEST(div) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x16, 1);
+ __ Mov(x17, 0xffffffff);
+ __ Mov(x18, 0xffffffffffffffffUL);
+ __ Mov(x19, 0x80000000);
+ __ Mov(x20, 0x8000000000000000UL);
+ __ Mov(x21, 2);
+
+ __ Udiv(w0, w16, w16);
+ __ Udiv(w1, w17, w16);
+ __ Sdiv(w2, w16, w16);
+ __ Sdiv(w3, w16, w17);
+ __ Sdiv(w4, w17, w18);
+
+ __ Udiv(x5, x16, x16);
+ __ Udiv(x6, x17, x18);
+ __ Sdiv(x7, x16, x16);
+ __ Sdiv(x8, x16, x17);
+ __ Sdiv(x9, x17, x18);
+
+ __ Udiv(w10, w19, w21);
+ __ Sdiv(w11, w19, w21);
+ __ Udiv(x12, x19, x21);
+ __ Sdiv(x13, x19, x21);
+ __ Udiv(x14, x20, x21);
+ __ Sdiv(x15, x20, x21);
+
+ __ Udiv(w22, w19, w17);
+ __ Sdiv(w23, w19, w17);
+ __ Udiv(x24, x20, x18);
+ __ Sdiv(x25, x20, x18);
+
+ __ Udiv(x26, x16, x21);
+ __ Sdiv(x27, x16, x21);
+ __ Udiv(x28, x18, x21);
+ __ Sdiv(x29, x18, x21);
+
+ __ Mov(x17, 0);
+ __ Udiv(w18, w16, w17);
+ __ Sdiv(w19, w16, w17);
+ __ Udiv(x20, x16, x17);
+ __ Sdiv(x21, x16, x17);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(1, x0);
+ ASSERT_EQUAL_64(0xffffffff, x1);
+ ASSERT_EQUAL_64(1, x2);
+ ASSERT_EQUAL_64(0xffffffff, x3);
+ ASSERT_EQUAL_64(1, x4);
+ ASSERT_EQUAL_64(1, x5);
+ ASSERT_EQUAL_64(0, x6);
+ ASSERT_EQUAL_64(1, x7);
+ ASSERT_EQUAL_64(0, x8);
+ ASSERT_EQUAL_64(0xffffffff00000001UL, x9);
+ ASSERT_EQUAL_64(0x40000000, x10);
+ ASSERT_EQUAL_64(0xC0000000, x11);
+ ASSERT_EQUAL_64(0x40000000, x12);
+ ASSERT_EQUAL_64(0x40000000, x13);
+ ASSERT_EQUAL_64(0x4000000000000000UL, x14);
+ ASSERT_EQUAL_64(0xC000000000000000UL, x15);
+ ASSERT_EQUAL_64(0, x22);
+ ASSERT_EQUAL_64(0x80000000, x23);
+ ASSERT_EQUAL_64(0, x24);
+ ASSERT_EQUAL_64(0x8000000000000000UL, x25);
+ ASSERT_EQUAL_64(0, x26);
+ ASSERT_EQUAL_64(0, x27);
+ ASSERT_EQUAL_64(0x7fffffffffffffffUL, x28);
+ ASSERT_EQUAL_64(0, x29);
+ ASSERT_EQUAL_64(0, x18);
+ ASSERT_EQUAL_64(0, x19);
+ ASSERT_EQUAL_64(0, x20);
+ ASSERT_EQUAL_64(0, x21);
+
+ TEARDOWN();
+}
+
+
+TEST(rbit_rev) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x24, 0xfedcba9876543210UL);
+ __ Rbit(w0, w24);
+ __ Rbit(x1, x24);
+ __ Rev16(w2, w24);
+ __ Rev16(x3, x24);
+ __ Rev(w4, w24);
+ __ Rev32(x5, x24);
+ __ Rev(x6, x24);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0x084c2a6e, x0);
+ ASSERT_EQUAL_64(0x084c2a6e195d3b7fUL, x1);
+ ASSERT_EQUAL_64(0x54761032, x2);
+ ASSERT_EQUAL_64(0xdcfe98ba54761032UL, x3);
+ ASSERT_EQUAL_64(0x10325476, x4);
+ ASSERT_EQUAL_64(0x98badcfe10325476UL, x5);
+ ASSERT_EQUAL_64(0x1032547698badcfeUL, x6);
+
+ TEARDOWN();
+}
+
+
+TEST(clz_cls) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x24, 0x0008000000800000UL);
+ __ Mov(x25, 0xff800000fff80000UL);
+ __ Mov(x26, 0);
+ __ Clz(w0, w24);
+ __ Clz(x1, x24);
+ __ Clz(w2, w25);
+ __ Clz(x3, x25);
+ __ Clz(w4, w26);
+ __ Clz(x5, x26);
+ __ Cls(w6, w24);
+ __ Cls(x7, x24);
+ __ Cls(w8, w25);
+ __ Cls(x9, x25);
+ __ Cls(w10, w26);
+ __ Cls(x11, x26);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(8, x0);
+ ASSERT_EQUAL_64(12, x1);
+ ASSERT_EQUAL_64(0, x2);
+ ASSERT_EQUAL_64(0, x3);
+ ASSERT_EQUAL_64(32, x4);
+ ASSERT_EQUAL_64(64, x5);
+ ASSERT_EQUAL_64(7, x6);
+ ASSERT_EQUAL_64(11, x7);
+ ASSERT_EQUAL_64(12, x8);
+ ASSERT_EQUAL_64(8, x9);
+ ASSERT_EQUAL_64(31, x10);
+ ASSERT_EQUAL_64(63, x11);
+
+ TEARDOWN();
+}
+
+
+TEST(label) {
+ INIT_V8();
+ SETUP();
+
+ Label label_1, label_2, label_3, label_4;
+
+ START();
+ __ Mov(x0, 0x1);
+ __ Mov(x1, 0x0);
+ __ Mov(x22, lr); // Save lr.
+
+ __ B(&label_1);
+ __ B(&label_1);
+ __ B(&label_1); // Multiple branches to the same label.
+ __ Mov(x0, 0x0);
+ __ Bind(&label_2);
+ __ B(&label_3); // Forward branch.
+ __ Mov(x0, 0x0);
+ __ Bind(&label_1);
+ __ B(&label_2); // Backward branch.
+ __ Mov(x0, 0x0);
+ __ Bind(&label_3);
+ __ Bl(&label_4);
+ END();
+
+ __ Bind(&label_4);
+ __ Mov(x1, 0x1);
+ __ Mov(lr, x22);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0x1, x0);
+ ASSERT_EQUAL_64(0x1, x1);
+
+ TEARDOWN();
+}
+
+
+TEST(branch_at_start) {
+ INIT_V8();
+ SETUP();
+
+ Label good, exit;
+
+ // Test that branches can exist at the start of the buffer. (This is a
+ // boundary condition in the label-handling code.) To achieve this, we have
+ // to work around the code generated by START.
+ RESET();
+ __ B(&good);
+
+ START_AFTER_RESET();
+ __ Mov(x0, 0x0);
+ END();
+
+ __ Bind(&exit);
+ START_AFTER_RESET();
+ __ Mov(x0, 0x1);
+ END();
+
+ __ Bind(&good);
+ __ B(&exit);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0x1, x0);
+ TEARDOWN();
+}
+
+
+TEST(adr) {
+ INIT_V8();
+ SETUP();
+
+ Label label_1, label_2, label_3, label_4;
+
+ START();
+ __ Mov(x0, 0x0); // Set to non-zero to indicate failure.
+ __ Adr(x1, &label_3); // Set to zero to indicate success.
+
+ __ Adr(x2, &label_1); // Multiple forward references to the same label.
+ __ Adr(x3, &label_1);
+ __ Adr(x4, &label_1);
+
+ __ Bind(&label_2);
+ __ Eor(x5, x2, Operand(x3)); // Ensure that x2,x3 and x4 are identical.
+ __ Eor(x6, x2, Operand(x4));
+ __ Orr(x0, x0, Operand(x5));
+ __ Orr(x0, x0, Operand(x6));
+ __ Br(x2); // label_1, label_3
+
+ __ Bind(&label_3);
+ __ Adr(x2, &label_3); // Self-reference (offset 0).
+ __ Eor(x1, x1, Operand(x2));
+ __ Adr(x2, &label_4); // Simple forward reference.
+ __ Br(x2); // label_4
+
+ __ Bind(&label_1);
+ __ Adr(x2, &label_3); // Multiple reverse references to the same label.
+ __ Adr(x3, &label_3);
+ __ Adr(x4, &label_3);
+ __ Adr(x5, &label_2); // Simple reverse reference.
+ __ Br(x5); // label_2
+
+ __ Bind(&label_4);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0x0, x0);
+ ASSERT_EQUAL_64(0x0, x1);
+
+ TEARDOWN();
+}
+
+
+TEST(branch_cond) {
+ INIT_V8();
+ SETUP();
+
+ Label wrong;
+
+ START();
+ __ Mov(x0, 0x1);
+ __ Mov(x1, 0x1);
+ __ Mov(x2, 0x8000000000000000L);
+
+ // For each 'cmp' instruction below, condition codes other than the ones
+ // following it would branch.
+
+ __ Cmp(x1, 0);
+ __ B(&wrong, eq);
+ __ B(&wrong, lo);
+ __ B(&wrong, mi);
+ __ B(&wrong, vs);
+ __ B(&wrong, ls);
+ __ B(&wrong, lt);
+ __ B(&wrong, le);
+ Label ok_1;
+ __ B(&ok_1, ne);
+ __ Mov(x0, 0x0);
+ __ Bind(&ok_1);
+
+ __ Cmp(x1, 1);
+ __ B(&wrong, ne);
+ __ B(&wrong, lo);
+ __ B(&wrong, mi);
+ __ B(&wrong, vs);
+ __ B(&wrong, hi);
+ __ B(&wrong, lt);
+ __ B(&wrong, gt);
+ Label ok_2;
+ __ B(&ok_2, pl);
+ __ Mov(x0, 0x0);
+ __ Bind(&ok_2);
+
+ __ Cmp(x1, 2);
+ __ B(&wrong, eq);
+ __ B(&wrong, hs);
+ __ B(&wrong, pl);
+ __ B(&wrong, vs);
+ __ B(&wrong, hi);
+ __ B(&wrong, ge);
+ __ B(&wrong, gt);
+ Label ok_3;
+ __ B(&ok_3, vc);
+ __ Mov(x0, 0x0);
+ __ Bind(&ok_3);
+
+ __ Cmp(x2, 1);
+ __ B(&wrong, eq);
+ __ B(&wrong, lo);
+ __ B(&wrong, mi);
+ __ B(&wrong, vc);
+ __ B(&wrong, ls);
+ __ B(&wrong, ge);
+ __ B(&wrong, gt);
+ Label ok_4;
+ __ B(&ok_4, le);
+ __ Mov(x0, 0x0);
+ __ Bind(&ok_4);
+
+ Label ok_5;
+ __ b(&ok_5, al);
+ __ Mov(x0, 0x0);
+ __ Bind(&ok_5);
+
+ Label ok_6;
+ __ b(&ok_6, nv);
+ __ Mov(x0, 0x0);
+ __ Bind(&ok_6);
+
+ END();
+
+ __ Bind(&wrong);
+ __ Mov(x0, 0x0);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0x1, x0);
+
+ TEARDOWN();
+}
+
+
+TEST(branch_to_reg) {
+ INIT_V8();
+ SETUP();
+
+ // Test br.
+ Label fn1, after_fn1;
+
+ START();
+ __ Mov(x29, lr);
+
+ __ Mov(x1, 0);
+ __ B(&after_fn1);
+
+ __ Bind(&fn1);
+ __ Mov(x0, lr);
+ __ Mov(x1, 42);
+ __ Br(x0);
+
+ __ Bind(&after_fn1);
+ __ Bl(&fn1);
+
+ // Test blr.
+ Label fn2, after_fn2;
+
+ __ Mov(x2, 0);
+ __ B(&after_fn2);
+
+ __ Bind(&fn2);
+ __ Mov(x0, lr);
+ __ Mov(x2, 84);
+ __ Blr(x0);
+
+ __ Bind(&after_fn2);
+ __ Bl(&fn2);
+ __ Mov(x3, lr);
+
+ __ Mov(lr, x29);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(core.xreg(3) + kInstructionSize, x0);
+ ASSERT_EQUAL_64(42, x1);
+ ASSERT_EQUAL_64(84, x2);
+
+ TEARDOWN();
+}
+
+
+TEST(compare_branch) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x0, 0);
+ __ Mov(x1, 0);
+ __ Mov(x2, 0);
+ __ Mov(x3, 0);
+ __ Mov(x4, 0);
+ __ Mov(x5, 0);
+ __ Mov(x16, 0);
+ __ Mov(x17, 42);
+
+ Label zt, zt_end;
+ __ Cbz(w16, &zt);
+ __ B(&zt_end);
+ __ Bind(&zt);
+ __ Mov(x0, 1);
+ __ Bind(&zt_end);
+
+ Label zf, zf_end;
+ __ Cbz(x17, &zf);
+ __ B(&zf_end);
+ __ Bind(&zf);
+ __ Mov(x1, 1);
+ __ Bind(&zf_end);
+
+ Label nzt, nzt_end;
+ __ Cbnz(w17, &nzt);
+ __ B(&nzt_end);
+ __ Bind(&nzt);
+ __ Mov(x2, 1);
+ __ Bind(&nzt_end);
+
+ Label nzf, nzf_end;
+ __ Cbnz(x16, &nzf);
+ __ B(&nzf_end);
+ __ Bind(&nzf);
+ __ Mov(x3, 1);
+ __ Bind(&nzf_end);
+
+ __ Mov(x18, 0xffffffff00000000UL);
+
+ Label a, a_end;
+ __ Cbz(w18, &a);
+ __ B(&a_end);
+ __ Bind(&a);
+ __ Mov(x4, 1);
+ __ Bind(&a_end);
+
+ Label b, b_end;
+ __ Cbnz(w18, &b);
+ __ B(&b_end);
+ __ Bind(&b);
+ __ Mov(x5, 1);
+ __ Bind(&b_end);
+
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(1, x0);
+ ASSERT_EQUAL_64(0, x1);
+ ASSERT_EQUAL_64(1, x2);
+ ASSERT_EQUAL_64(0, x3);
+ ASSERT_EQUAL_64(1, x4);
+ ASSERT_EQUAL_64(0, x5);
+
+ TEARDOWN();
+}
+
+
+TEST(test_branch) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x0, 0);
+ __ Mov(x1, 0);
+ __ Mov(x2, 0);
+ __ Mov(x3, 0);
+ __ Mov(x16, 0xaaaaaaaaaaaaaaaaUL);
+
+ Label bz, bz_end;
+ __ Tbz(w16, 0, &bz);
+ __ B(&bz_end);
+ __ Bind(&bz);
+ __ Mov(x0, 1);
+ __ Bind(&bz_end);
+
+ Label bo, bo_end;
+ __ Tbz(x16, 63, &bo);
+ __ B(&bo_end);
+ __ Bind(&bo);
+ __ Mov(x1, 1);
+ __ Bind(&bo_end);
+
+ Label nbz, nbz_end;
+ __ Tbnz(x16, 61, &nbz);
+ __ B(&nbz_end);
+ __ Bind(&nbz);
+ __ Mov(x2, 1);
+ __ Bind(&nbz_end);
+
+ Label nbo, nbo_end;
+ __ Tbnz(w16, 2, &nbo);
+ __ B(&nbo_end);
+ __ Bind(&nbo);
+ __ Mov(x3, 1);
+ __ Bind(&nbo_end);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(1, x0);
+ ASSERT_EQUAL_64(0, x1);
+ ASSERT_EQUAL_64(1, x2);
+ ASSERT_EQUAL_64(0, x3);
+
+ TEARDOWN();
+}
+
+
+TEST(ldr_str_offset) {
+ INIT_V8();
+ SETUP();
+
+ uint64_t src[2] = {0xfedcba9876543210UL, 0x0123456789abcdefUL};
+ uint64_t dst[5] = {0, 0, 0, 0, 0};
+ uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
+ uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
+
+ START();
+ __ Mov(x17, src_base);
+ __ Mov(x18, dst_base);
+ __ Ldr(w0, MemOperand(x17));
+ __ Str(w0, MemOperand(x18));
+ __ Ldr(w1, MemOperand(x17, 4));
+ __ Str(w1, MemOperand(x18, 12));
+ __ Ldr(x2, MemOperand(x17, 8));
+ __ Str(x2, MemOperand(x18, 16));
+ __ Ldrb(w3, MemOperand(x17, 1));
+ __ Strb(w3, MemOperand(x18, 25));
+ __ Ldrh(w4, MemOperand(x17, 2));
+ __ Strh(w4, MemOperand(x18, 33));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0x76543210, x0);
+ ASSERT_EQUAL_64(0x76543210, dst[0]);
+ ASSERT_EQUAL_64(0xfedcba98, x1);
+ ASSERT_EQUAL_64(0xfedcba9800000000UL, dst[1]);
+ ASSERT_EQUAL_64(0x0123456789abcdefUL, x2);
+ ASSERT_EQUAL_64(0x0123456789abcdefUL, dst[2]);
+ ASSERT_EQUAL_64(0x32, x3);
+ ASSERT_EQUAL_64(0x3200, dst[3]);
+ ASSERT_EQUAL_64(0x7654, x4);
+ ASSERT_EQUAL_64(0x765400, dst[4]);
+ ASSERT_EQUAL_64(src_base, x17);
+ ASSERT_EQUAL_64(dst_base, x18);
+
+ TEARDOWN();
+}
+
+
+TEST(ldr_str_wide) {
+ INIT_V8();
+ SETUP();
+
+ uint32_t src[8192];
+ uint32_t dst[8192];
+ uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
+ uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
+ memset(src, 0xaa, 8192 * sizeof(src[0]));
+ memset(dst, 0xaa, 8192 * sizeof(dst[0]));
+ src[0] = 0;
+ src[6144] = 6144;
+ src[8191] = 8191;
+
+ START();
+ __ Mov(x22, src_base);
+ __ Mov(x23, dst_base);
+ __ Mov(x24, src_base);
+ __ Mov(x25, dst_base);
+ __ Mov(x26, src_base);
+ __ Mov(x27, dst_base);
+
+ __ Ldr(w0, MemOperand(x22, 8191 * sizeof(src[0])));
+ __ Str(w0, MemOperand(x23, 8191 * sizeof(dst[0])));
+ __ Ldr(w1, MemOperand(x24, 4096 * sizeof(src[0]), PostIndex));
+ __ Str(w1, MemOperand(x25, 4096 * sizeof(dst[0]), PostIndex));
+ __ Ldr(w2, MemOperand(x26, 6144 * sizeof(src[0]), PreIndex));
+ __ Str(w2, MemOperand(x27, 6144 * sizeof(dst[0]), PreIndex));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_32(8191, w0);
+ ASSERT_EQUAL_32(8191, dst[8191]);
+ ASSERT_EQUAL_64(src_base, x22);
+ ASSERT_EQUAL_64(dst_base, x23);
+ ASSERT_EQUAL_32(0, w1);
+ ASSERT_EQUAL_32(0, dst[0]);
+ ASSERT_EQUAL_64(src_base + 4096 * sizeof(src[0]), x24);
+ ASSERT_EQUAL_64(dst_base + 4096 * sizeof(dst[0]), x25);
+ ASSERT_EQUAL_32(6144, w2);
+ ASSERT_EQUAL_32(6144, dst[6144]);
+ ASSERT_EQUAL_64(src_base + 6144 * sizeof(src[0]), x26);
+ ASSERT_EQUAL_64(dst_base + 6144 * sizeof(dst[0]), x27);
+
+ TEARDOWN();
+}
+
+
+TEST(ldr_str_preindex) {
+ INIT_V8();
+ SETUP();
+
+ uint64_t src[2] = {0xfedcba9876543210UL, 0x0123456789abcdefUL};
+ uint64_t dst[6] = {0, 0, 0, 0, 0, 0};
+ uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
+ uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
+
+ START();
+ __ Mov(x17, src_base);
+ __ Mov(x18, dst_base);
+ __ Mov(x19, src_base);
+ __ Mov(x20, dst_base);
+ __ Mov(x21, src_base + 16);
+ __ Mov(x22, dst_base + 40);
+ __ Mov(x23, src_base);
+ __ Mov(x24, dst_base);
+ __ Mov(x25, src_base);
+ __ Mov(x26, dst_base);
+ __ Ldr(w0, MemOperand(x17, 4, PreIndex));
+ __ Str(w0, MemOperand(x18, 12, PreIndex));
+ __ Ldr(x1, MemOperand(x19, 8, PreIndex));
+ __ Str(x1, MemOperand(x20, 16, PreIndex));
+ __ Ldr(w2, MemOperand(x21, -4, PreIndex));
+ __ Str(w2, MemOperand(x22, -4, PreIndex));
+ __ Ldrb(w3, MemOperand(x23, 1, PreIndex));
+ __ Strb(w3, MemOperand(x24, 25, PreIndex));
+ __ Ldrh(w4, MemOperand(x25, 3, PreIndex));
+ __ Strh(w4, MemOperand(x26, 41, PreIndex));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0xfedcba98, x0);
+ ASSERT_EQUAL_64(0xfedcba9800000000UL, dst[1]);
+ ASSERT_EQUAL_64(0x0123456789abcdefUL, x1);
+ ASSERT_EQUAL_64(0x0123456789abcdefUL, dst[2]);
+ ASSERT_EQUAL_64(0x01234567, x2);
+ ASSERT_EQUAL_64(0x0123456700000000UL, dst[4]);
+ ASSERT_EQUAL_64(0x32, x3);
+ ASSERT_EQUAL_64(0x3200, dst[3]);
+ ASSERT_EQUAL_64(0x9876, x4);
+ ASSERT_EQUAL_64(0x987600, dst[5]);
+ ASSERT_EQUAL_64(src_base + 4, x17);
+ ASSERT_EQUAL_64(dst_base + 12, x18);
+ ASSERT_EQUAL_64(src_base + 8, x19);
+ ASSERT_EQUAL_64(dst_base + 16, x20);
+ ASSERT_EQUAL_64(src_base + 12, x21);
+ ASSERT_EQUAL_64(dst_base + 36, x22);
+ ASSERT_EQUAL_64(src_base + 1, x23);
+ ASSERT_EQUAL_64(dst_base + 25, x24);
+ ASSERT_EQUAL_64(src_base + 3, x25);
+ ASSERT_EQUAL_64(dst_base + 41, x26);
+
+ TEARDOWN();
+}
+
+
+TEST(ldr_str_postindex) {
+ INIT_V8();
+ SETUP();
+
+ uint64_t src[2] = {0xfedcba9876543210UL, 0x0123456789abcdefUL};
+ uint64_t dst[6] = {0, 0, 0, 0, 0, 0};
+ uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
+ uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
+
+ START();
+ __ Mov(x17, src_base + 4);
+ __ Mov(x18, dst_base + 12);
+ __ Mov(x19, src_base + 8);
+ __ Mov(x20, dst_base + 16);
+ __ Mov(x21, src_base + 8);
+ __ Mov(x22, dst_base + 32);
+ __ Mov(x23, src_base + 1);
+ __ Mov(x24, dst_base + 25);
+ __ Mov(x25, src_base + 3);
+ __ Mov(x26, dst_base + 41);
+ __ Ldr(w0, MemOperand(x17, 4, PostIndex));
+ __ Str(w0, MemOperand(x18, 12, PostIndex));
+ __ Ldr(x1, MemOperand(x19, 8, PostIndex));
+ __ Str(x1, MemOperand(x20, 16, PostIndex));
+ __ Ldr(x2, MemOperand(x21, -8, PostIndex));
+ __ Str(x2, MemOperand(x22, -32, PostIndex));
+ __ Ldrb(w3, MemOperand(x23, 1, PostIndex));
+ __ Strb(w3, MemOperand(x24, 5, PostIndex));
+ __ Ldrh(w4, MemOperand(x25, -3, PostIndex));
+ __ Strh(w4, MemOperand(x26, -41, PostIndex));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0xfedcba98, x0);
+ ASSERT_EQUAL_64(0xfedcba9800000000UL, dst[1]);
+ ASSERT_EQUAL_64(0x0123456789abcdefUL, x1);
+ ASSERT_EQUAL_64(0x0123456789abcdefUL, dst[2]);
+ ASSERT_EQUAL_64(0x0123456789abcdefUL, x2);
+ ASSERT_EQUAL_64(0x0123456789abcdefUL, dst[4]);
+ ASSERT_EQUAL_64(0x32, x3);
+ ASSERT_EQUAL_64(0x3200, dst[3]);
+ ASSERT_EQUAL_64(0x9876, x4);
+ ASSERT_EQUAL_64(0x987600, dst[5]);
+ ASSERT_EQUAL_64(src_base + 8, x17);
+ ASSERT_EQUAL_64(dst_base + 24, x18);
+ ASSERT_EQUAL_64(src_base + 16, x19);
+ ASSERT_EQUAL_64(dst_base + 32, x20);
+ ASSERT_EQUAL_64(src_base, x21);
+ ASSERT_EQUAL_64(dst_base, x22);
+ ASSERT_EQUAL_64(src_base + 2, x23);
+ ASSERT_EQUAL_64(dst_base + 30, x24);
+ ASSERT_EQUAL_64(src_base, x25);
+ ASSERT_EQUAL_64(dst_base, x26);
+
+ TEARDOWN();
+}
+
+
+TEST(load_signed) {
+ INIT_V8();
+ SETUP();
+
+ uint32_t src[2] = {0x80008080, 0x7fff7f7f};
+ uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
+
+ START();
+ __ Mov(x24, src_base);
+ __ Ldrsb(w0, MemOperand(x24));
+ __ Ldrsb(w1, MemOperand(x24, 4));
+ __ Ldrsh(w2, MemOperand(x24));
+ __ Ldrsh(w3, MemOperand(x24, 4));
+ __ Ldrsb(x4, MemOperand(x24));
+ __ Ldrsb(x5, MemOperand(x24, 4));
+ __ Ldrsh(x6, MemOperand(x24));
+ __ Ldrsh(x7, MemOperand(x24, 4));
+ __ Ldrsw(x8, MemOperand(x24));
+ __ Ldrsw(x9, MemOperand(x24, 4));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0xffffff80, x0);
+ ASSERT_EQUAL_64(0x0000007f, x1);
+ ASSERT_EQUAL_64(0xffff8080, x2);
+ ASSERT_EQUAL_64(0x00007f7f, x3);
+ ASSERT_EQUAL_64(0xffffffffffffff80UL, x4);
+ ASSERT_EQUAL_64(0x000000000000007fUL, x5);
+ ASSERT_EQUAL_64(0xffffffffffff8080UL, x6);
+ ASSERT_EQUAL_64(0x0000000000007f7fUL, x7);
+ ASSERT_EQUAL_64(0xffffffff80008080UL, x8);
+ ASSERT_EQUAL_64(0x000000007fff7f7fUL, x9);
+
+ TEARDOWN();
+}
+
+
+TEST(load_store_regoffset) {
+ INIT_V8();
+ SETUP();
+
+ uint32_t src[3] = {1, 2, 3};
+ uint32_t dst[4] = {0, 0, 0, 0};
+ uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
+ uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
+
+ START();
+ __ Mov(x16, src_base);
+ __ Mov(x17, dst_base);
+ __ Mov(x18, src_base + 3 * sizeof(src[0]));
+ __ Mov(x19, dst_base + 3 * sizeof(dst[0]));
+ __ Mov(x20, dst_base + 4 * sizeof(dst[0]));
+ __ Mov(x24, 0);
+ __ Mov(x25, 4);
+ __ Mov(x26, -4);
+ __ Mov(x27, 0xfffffffc); // 32-bit -4.
+ __ Mov(x28, 0xfffffffe); // 32-bit -2.
+ __ Mov(x29, 0xffffffff); // 32-bit -1.
+
+ __ Ldr(w0, MemOperand(x16, x24));
+ __ Ldr(x1, MemOperand(x16, x25));
+ __ Ldr(w2, MemOperand(x18, x26));
+ __ Ldr(w3, MemOperand(x18, x27, SXTW));
+ __ Ldr(w4, MemOperand(x18, x28, SXTW, 2));
+ __ Str(w0, MemOperand(x17, x24));
+ __ Str(x1, MemOperand(x17, x25));
+ __ Str(w2, MemOperand(x20, x29, SXTW, 2));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(1, x0);
+ ASSERT_EQUAL_64(0x0000000300000002UL, x1);
+ ASSERT_EQUAL_64(3, x2);
+ ASSERT_EQUAL_64(3, x3);
+ ASSERT_EQUAL_64(2, x4);
+ ASSERT_EQUAL_32(1, dst[0]);
+ ASSERT_EQUAL_32(2, dst[1]);
+ ASSERT_EQUAL_32(3, dst[2]);
+ ASSERT_EQUAL_32(3, dst[3]);
+
+ TEARDOWN();
+}
+
+
+TEST(load_store_float) {
+ INIT_V8();
+ SETUP();
+
+ float src[3] = {1.0, 2.0, 3.0};
+ float dst[3] = {0.0, 0.0, 0.0};
+ uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
+ uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
+
+ START();
+ __ Mov(x17, src_base);
+ __ Mov(x18, dst_base);
+ __ Mov(x19, src_base);
+ __ Mov(x20, dst_base);
+ __ Mov(x21, src_base);
+ __ Mov(x22, dst_base);
+ __ Ldr(s0, MemOperand(x17, sizeof(src[0])));
+ __ Str(s0, MemOperand(x18, sizeof(dst[0]), PostIndex));
+ __ Ldr(s1, MemOperand(x19, sizeof(src[0]), PostIndex));
+ __ Str(s1, MemOperand(x20, 2 * sizeof(dst[0]), PreIndex));
+ __ Ldr(s2, MemOperand(x21, 2 * sizeof(src[0]), PreIndex));
+ __ Str(s2, MemOperand(x22, sizeof(dst[0])));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_FP32(2.0, s0);
+ ASSERT_EQUAL_FP32(2.0, dst[0]);
+ ASSERT_EQUAL_FP32(1.0, s1);
+ ASSERT_EQUAL_FP32(1.0, dst[2]);
+ ASSERT_EQUAL_FP32(3.0, s2);
+ ASSERT_EQUAL_FP32(3.0, dst[1]);
+ ASSERT_EQUAL_64(src_base, x17);
+ ASSERT_EQUAL_64(dst_base + sizeof(dst[0]), x18);
+ ASSERT_EQUAL_64(src_base + sizeof(src[0]), x19);
+ ASSERT_EQUAL_64(dst_base + 2 * sizeof(dst[0]), x20);
+ ASSERT_EQUAL_64(src_base + 2 * sizeof(src[0]), x21);
+ ASSERT_EQUAL_64(dst_base, x22);
+
+ TEARDOWN();
+}
+
+
+TEST(load_store_double) {
+ INIT_V8();
+ SETUP();
+
+ double src[3] = {1.0, 2.0, 3.0};
+ double dst[3] = {0.0, 0.0, 0.0};
+ uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
+ uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
+
+ START();
+ __ Mov(x17, src_base);
+ __ Mov(x18, dst_base);
+ __ Mov(x19, src_base);
+ __ Mov(x20, dst_base);
+ __ Mov(x21, src_base);
+ __ Mov(x22, dst_base);
+ __ Ldr(d0, MemOperand(x17, sizeof(src[0])));
+ __ Str(d0, MemOperand(x18, sizeof(dst[0]), PostIndex));
+ __ Ldr(d1, MemOperand(x19, sizeof(src[0]), PostIndex));
+ __ Str(d1, MemOperand(x20, 2 * sizeof(dst[0]), PreIndex));
+ __ Ldr(d2, MemOperand(x21, 2 * sizeof(src[0]), PreIndex));
+ __ Str(d2, MemOperand(x22, sizeof(dst[0])));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_FP64(2.0, d0);
+ ASSERT_EQUAL_FP64(2.0, dst[0]);
+ ASSERT_EQUAL_FP64(1.0, d1);
+ ASSERT_EQUAL_FP64(1.0, dst[2]);
+ ASSERT_EQUAL_FP64(3.0, d2);
+ ASSERT_EQUAL_FP64(3.0, dst[1]);
+ ASSERT_EQUAL_64(src_base, x17);
+ ASSERT_EQUAL_64(dst_base + sizeof(dst[0]), x18);
+ ASSERT_EQUAL_64(src_base + sizeof(src[0]), x19);
+ ASSERT_EQUAL_64(dst_base + 2 * sizeof(dst[0]), x20);
+ ASSERT_EQUAL_64(src_base + 2 * sizeof(src[0]), x21);
+ ASSERT_EQUAL_64(dst_base, x22);
+
+ TEARDOWN();
+}
+
+
+TEST(ldp_stp_float) {
+ INIT_V8();
+ SETUP();
+
+ float src[2] = {1.0, 2.0};
+ float dst[3] = {0.0, 0.0, 0.0};
+ uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
+ uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
+
+ START();
+ __ Mov(x16, src_base);
+ __ Mov(x17, dst_base);
+ __ Ldp(s31, s0, MemOperand(x16, 2 * sizeof(src[0]), PostIndex));
+ __ Stp(s0, s31, MemOperand(x17, sizeof(dst[1]), PreIndex));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_FP32(1.0, s31);
+ ASSERT_EQUAL_FP32(2.0, s0);
+ ASSERT_EQUAL_FP32(0.0, dst[0]);
+ ASSERT_EQUAL_FP32(2.0, dst[1]);
+ ASSERT_EQUAL_FP32(1.0, dst[2]);
+ ASSERT_EQUAL_64(src_base + 2 * sizeof(src[0]), x16);
+ ASSERT_EQUAL_64(dst_base + sizeof(dst[1]), x17);
+
+ TEARDOWN();
+}
+
+
+TEST(ldp_stp_double) {
+ INIT_V8();
+ SETUP();
+
+ double src[2] = {1.0, 2.0};
+ double dst[3] = {0.0, 0.0, 0.0};
+ uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
+ uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
+
+ START();
+ __ Mov(x16, src_base);
+ __ Mov(x17, dst_base);
+ __ Ldp(d31, d0, MemOperand(x16, 2 * sizeof(src[0]), PostIndex));
+ __ Stp(d0, d31, MemOperand(x17, sizeof(dst[1]), PreIndex));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_FP64(1.0, d31);
+ ASSERT_EQUAL_FP64(2.0, d0);
+ ASSERT_EQUAL_FP64(0.0, dst[0]);
+ ASSERT_EQUAL_FP64(2.0, dst[1]);
+ ASSERT_EQUAL_FP64(1.0, dst[2]);
+ ASSERT_EQUAL_64(src_base + 2 * sizeof(src[0]), x16);
+ ASSERT_EQUAL_64(dst_base + sizeof(dst[1]), x17);
+
+ TEARDOWN();
+}
+
+
+TEST(ldp_stp_offset) {
+ INIT_V8();
+ SETUP();
+
+ uint64_t src[3] = {0x0011223344556677UL, 0x8899aabbccddeeffUL,
+ 0xffeeddccbbaa9988UL};
+ uint64_t dst[7] = {0, 0, 0, 0, 0, 0, 0};
+ uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
+ uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
+
+ START();
+ __ Mov(x16, src_base);
+ __ Mov(x17, dst_base);
+ __ Mov(x18, src_base + 24);
+ __ Mov(x19, dst_base + 56);
+ __ Ldp(w0, w1, MemOperand(x16));
+ __ Ldp(w2, w3, MemOperand(x16, 4));
+ __ Ldp(x4, x5, MemOperand(x16, 8));
+ __ Ldp(w6, w7, MemOperand(x18, -12));
+ __ Ldp(x8, x9, MemOperand(x18, -16));
+ __ Stp(w0, w1, MemOperand(x17));
+ __ Stp(w2, w3, MemOperand(x17, 8));
+ __ Stp(x4, x5, MemOperand(x17, 16));
+ __ Stp(w6, w7, MemOperand(x19, -24));
+ __ Stp(x8, x9, MemOperand(x19, -16));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0x44556677, x0);
+ ASSERT_EQUAL_64(0x00112233, x1);
+ ASSERT_EQUAL_64(0x0011223344556677UL, dst[0]);
+ ASSERT_EQUAL_64(0x00112233, x2);
+ ASSERT_EQUAL_64(0xccddeeff, x3);
+ ASSERT_EQUAL_64(0xccddeeff00112233UL, dst[1]);
+ ASSERT_EQUAL_64(0x8899aabbccddeeffUL, x4);
+ ASSERT_EQUAL_64(0x8899aabbccddeeffUL, dst[2]);
+ ASSERT_EQUAL_64(0xffeeddccbbaa9988UL, x5);
+ ASSERT_EQUAL_64(0xffeeddccbbaa9988UL, dst[3]);
+ ASSERT_EQUAL_64(0x8899aabb, x6);
+ ASSERT_EQUAL_64(0xbbaa9988, x7);
+ ASSERT_EQUAL_64(0xbbaa99888899aabbUL, dst[4]);
+ ASSERT_EQUAL_64(0x8899aabbccddeeffUL, x8);
+ ASSERT_EQUAL_64(0x8899aabbccddeeffUL, dst[5]);
+ ASSERT_EQUAL_64(0xffeeddccbbaa9988UL, x9);
+ ASSERT_EQUAL_64(0xffeeddccbbaa9988UL, dst[6]);
+ ASSERT_EQUAL_64(src_base, x16);
+ ASSERT_EQUAL_64(dst_base, x17);
+ ASSERT_EQUAL_64(src_base + 24, x18);
+ ASSERT_EQUAL_64(dst_base + 56, x19);
+
+ TEARDOWN();
+}
+
+
+TEST(ldnp_stnp_offset) {
+ INIT_V8();
+ SETUP();
+
+ uint64_t src[3] = {0x0011223344556677UL, 0x8899aabbccddeeffUL,
+ 0xffeeddccbbaa9988UL};
+ uint64_t dst[7] = {0, 0, 0, 0, 0, 0, 0};
+ uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
+ uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
+
+ START();
+ __ Mov(x16, src_base);
+ __ Mov(x17, dst_base);
+ __ Mov(x18, src_base + 24);
+ __ Mov(x19, dst_base + 56);
+ __ Ldnp(w0, w1, MemOperand(x16));
+ __ Ldnp(w2, w3, MemOperand(x16, 4));
+ __ Ldnp(x4, x5, MemOperand(x16, 8));
+ __ Ldnp(w6, w7, MemOperand(x18, -12));
+ __ Ldnp(x8, x9, MemOperand(x18, -16));
+ __ Stnp(w0, w1, MemOperand(x17));
+ __ Stnp(w2, w3, MemOperand(x17, 8));
+ __ Stnp(x4, x5, MemOperand(x17, 16));
+ __ Stnp(w6, w7, MemOperand(x19, -24));
+ __ Stnp(x8, x9, MemOperand(x19, -16));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0x44556677, x0);
+ ASSERT_EQUAL_64(0x00112233, x1);
+ ASSERT_EQUAL_64(0x0011223344556677UL, dst[0]);
+ ASSERT_EQUAL_64(0x00112233, x2);
+ ASSERT_EQUAL_64(0xccddeeff, x3);
+ ASSERT_EQUAL_64(0xccddeeff00112233UL, dst[1]);
+ ASSERT_EQUAL_64(0x8899aabbccddeeffUL, x4);
+ ASSERT_EQUAL_64(0x8899aabbccddeeffUL, dst[2]);
+ ASSERT_EQUAL_64(0xffeeddccbbaa9988UL, x5);
+ ASSERT_EQUAL_64(0xffeeddccbbaa9988UL, dst[3]);
+ ASSERT_EQUAL_64(0x8899aabb, x6);
+ ASSERT_EQUAL_64(0xbbaa9988, x7);
+ ASSERT_EQUAL_64(0xbbaa99888899aabbUL, dst[4]);
+ ASSERT_EQUAL_64(0x8899aabbccddeeffUL, x8);
+ ASSERT_EQUAL_64(0x8899aabbccddeeffUL, dst[5]);
+ ASSERT_EQUAL_64(0xffeeddccbbaa9988UL, x9);
+ ASSERT_EQUAL_64(0xffeeddccbbaa9988UL, dst[6]);
+ ASSERT_EQUAL_64(src_base, x16);
+ ASSERT_EQUAL_64(dst_base, x17);
+ ASSERT_EQUAL_64(src_base + 24, x18);
+ ASSERT_EQUAL_64(dst_base + 56, x19);
+
+ TEARDOWN();
+}
+
+
+TEST(ldp_stp_preindex) {
+ INIT_V8();
+ SETUP();
+
+ uint64_t src[3] = {0x0011223344556677UL, 0x8899aabbccddeeffUL,
+ 0xffeeddccbbaa9988UL};
+ uint64_t dst[5] = {0, 0, 0, 0, 0};
+ uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
+ uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
+
+ START();
+ __ Mov(x16, src_base);
+ __ Mov(x17, dst_base);
+ __ Mov(x18, dst_base + 16);
+ __ Ldp(w0, w1, MemOperand(x16, 4, PreIndex));
+ __ Mov(x19, x16);
+ __ Ldp(w2, w3, MemOperand(x16, -4, PreIndex));
+ __ Stp(w2, w3, MemOperand(x17, 4, PreIndex));
+ __ Mov(x20, x17);
+ __ Stp(w0, w1, MemOperand(x17, -4, PreIndex));
+ __ Ldp(x4, x5, MemOperand(x16, 8, PreIndex));
+ __ Mov(x21, x16);
+ __ Ldp(x6, x7, MemOperand(x16, -8, PreIndex));
+ __ Stp(x7, x6, MemOperand(x18, 8, PreIndex));
+ __ Mov(x22, x18);
+ __ Stp(x5, x4, MemOperand(x18, -8, PreIndex));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0x00112233, x0);
+ ASSERT_EQUAL_64(0xccddeeff, x1);
+ ASSERT_EQUAL_64(0x44556677, x2);
+ ASSERT_EQUAL_64(0x00112233, x3);
+ ASSERT_EQUAL_64(0xccddeeff00112233UL, dst[0]);
+ ASSERT_EQUAL_64(0x0000000000112233UL, dst[1]);
+ ASSERT_EQUAL_64(0x8899aabbccddeeffUL, x4);
+ ASSERT_EQUAL_64(0xffeeddccbbaa9988UL, x5);
+ ASSERT_EQUAL_64(0x0011223344556677UL, x6);
+ ASSERT_EQUAL_64(0x8899aabbccddeeffUL, x7);
+ ASSERT_EQUAL_64(0xffeeddccbbaa9988UL, dst[2]);
+ ASSERT_EQUAL_64(0x8899aabbccddeeffUL, dst[3]);
+ ASSERT_EQUAL_64(0x0011223344556677UL, dst[4]);
+ ASSERT_EQUAL_64(src_base, x16);
+ ASSERT_EQUAL_64(dst_base, x17);
+ ASSERT_EQUAL_64(dst_base + 16, x18);
+ ASSERT_EQUAL_64(src_base + 4, x19);
+ ASSERT_EQUAL_64(dst_base + 4, x20);
+ ASSERT_EQUAL_64(src_base + 8, x21);
+ ASSERT_EQUAL_64(dst_base + 24, x22);
+
+ TEARDOWN();
+}
+
+
+TEST(ldp_stp_postindex) {
+ INIT_V8();
+ SETUP();
+
+ uint64_t src[4] = {0x0011223344556677UL, 0x8899aabbccddeeffUL,
+ 0xffeeddccbbaa9988UL, 0x7766554433221100UL};
+ uint64_t dst[5] = {0, 0, 0, 0, 0};
+ uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
+ uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
+
+ START();
+ __ Mov(x16, src_base);
+ __ Mov(x17, dst_base);
+ __ Mov(x18, dst_base + 16);
+ __ Ldp(w0, w1, MemOperand(x16, 4, PostIndex));
+ __ Mov(x19, x16);
+ __ Ldp(w2, w3, MemOperand(x16, -4, PostIndex));
+ __ Stp(w2, w3, MemOperand(x17, 4, PostIndex));
+ __ Mov(x20, x17);
+ __ Stp(w0, w1, MemOperand(x17, -4, PostIndex));
+ __ Ldp(x4, x5, MemOperand(x16, 8, PostIndex));
+ __ Mov(x21, x16);
+ __ Ldp(x6, x7, MemOperand(x16, -8, PostIndex));
+ __ Stp(x7, x6, MemOperand(x18, 8, PostIndex));
+ __ Mov(x22, x18);
+ __ Stp(x5, x4, MemOperand(x18, -8, PostIndex));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0x44556677, x0);
+ ASSERT_EQUAL_64(0x00112233, x1);
+ ASSERT_EQUAL_64(0x00112233, x2);
+ ASSERT_EQUAL_64(0xccddeeff, x3);
+ ASSERT_EQUAL_64(0x4455667700112233UL, dst[0]);
+ ASSERT_EQUAL_64(0x0000000000112233UL, dst[1]);
+ ASSERT_EQUAL_64(0x0011223344556677UL, x4);
+ ASSERT_EQUAL_64(0x8899aabbccddeeffUL, x5);
+ ASSERT_EQUAL_64(0x8899aabbccddeeffUL, x6);
+ ASSERT_EQUAL_64(0xffeeddccbbaa9988UL, x7);
+ ASSERT_EQUAL_64(0xffeeddccbbaa9988UL, dst[2]);
+ ASSERT_EQUAL_64(0x8899aabbccddeeffUL, dst[3]);
+ ASSERT_EQUAL_64(0x0011223344556677UL, dst[4]);
+ ASSERT_EQUAL_64(src_base, x16);
+ ASSERT_EQUAL_64(dst_base, x17);
+ ASSERT_EQUAL_64(dst_base + 16, x18);
+ ASSERT_EQUAL_64(src_base + 4, x19);
+ ASSERT_EQUAL_64(dst_base + 4, x20);
+ ASSERT_EQUAL_64(src_base + 8, x21);
+ ASSERT_EQUAL_64(dst_base + 24, x22);
+
+ TEARDOWN();
+}
+
+
+TEST(ldp_sign_extend) {
+ INIT_V8();
+ SETUP();
+
+ uint32_t src[2] = {0x80000000, 0x7fffffff};
+ uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
+
+ START();
+ __ Mov(x24, src_base);
+ __ Ldpsw(x0, x1, MemOperand(x24));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0xffffffff80000000UL, x0);
+ ASSERT_EQUAL_64(0x000000007fffffffUL, x1);
+
+ TEARDOWN();
+}
+
+
+TEST(ldur_stur) {
+ INIT_V8();
+ SETUP();
+
+ int64_t src[2] = {0x0123456789abcdefUL, 0x0123456789abcdefUL};
+ int64_t dst[5] = {0, 0, 0, 0, 0};
+ uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
+ uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
+
+ START();
+ __ Mov(x17, src_base);
+ __ Mov(x18, dst_base);
+ __ Mov(x19, src_base + 16);
+ __ Mov(x20, dst_base + 32);
+ __ Mov(x21, dst_base + 40);
+ __ Ldr(w0, MemOperand(x17, 1));
+ __ Str(w0, MemOperand(x18, 2));
+ __ Ldr(x1, MemOperand(x17, 3));
+ __ Str(x1, MemOperand(x18, 9));
+ __ Ldr(w2, MemOperand(x19, -9));
+ __ Str(w2, MemOperand(x20, -5));
+ __ Ldrb(w3, MemOperand(x19, -1));
+ __ Strb(w3, MemOperand(x21, -1));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0x6789abcd, x0);
+ ASSERT_EQUAL_64(0x6789abcd0000L, dst[0]);
+ ASSERT_EQUAL_64(0xabcdef0123456789L, x1);
+ ASSERT_EQUAL_64(0xcdef012345678900L, dst[1]);
+ ASSERT_EQUAL_64(0x000000ab, dst[2]);
+ ASSERT_EQUAL_64(0xabcdef01, x2);
+ ASSERT_EQUAL_64(0x00abcdef01000000L, dst[3]);
+ ASSERT_EQUAL_64(0x00000001, x3);
+ ASSERT_EQUAL_64(0x0100000000000000L, dst[4]);
+ ASSERT_EQUAL_64(src_base, x17);
+ ASSERT_EQUAL_64(dst_base, x18);
+ ASSERT_EQUAL_64(src_base + 16, x19);
+ ASSERT_EQUAL_64(dst_base + 32, x20);
+
+ TEARDOWN();
+}
+
+
+#if 0 // TODO(all) enable.
+// TODO(rodolph): Adapt w16 Literal tests for RelocInfo.
+TEST(ldr_literal) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Ldr(x2, 0x1234567890abcdefUL);
+ __ Ldr(w3, 0xfedcba09);
+ __ Ldr(d13, 1.234);
+ __ Ldr(s25, 2.5);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0x1234567890abcdefUL, x2);
+ ASSERT_EQUAL_64(0xfedcba09, x3);
+ ASSERT_EQUAL_FP64(1.234, d13);
+ ASSERT_EQUAL_FP32(2.5, s25);
+
+ TEARDOWN();
+}
+
+
+static void LdrLiteralRangeHelper(ptrdiff_t range_,
+ LiteralPoolEmitOption option,
+ bool expect_dump) {
+ ASSERT(range_ > 0);
+ SETUP_SIZE(range_ + 1024);
+
+ Label label_1, label_2;
+
+ size_t range = static_cast<size_t>(range_);
+ size_t code_size = 0;
+ size_t pool_guard_size;
+
+ if (option == NoJumpRequired) {
+ // Space for an explicit branch.
+ pool_guard_size = sizeof(Instr);
+ } else {
+ pool_guard_size = 0;
+ }
+
+ START();
+ // Force a pool dump so the pool starts off empty.
+ __ EmitLiteralPool(JumpRequired);
+ ASSERT_LITERAL_POOL_SIZE(0);
+
+ __ Ldr(x0, 0x1234567890abcdefUL);
+ __ Ldr(w1, 0xfedcba09);
+ __ Ldr(d0, 1.234);
+ __ Ldr(s1, 2.5);
+ ASSERT_LITERAL_POOL_SIZE(4);
+
+ code_size += 4 * sizeof(Instr);
+
+ // Check that the requested range (allowing space for a branch over the pool)
+ // can be handled by this test.
+ ASSERT((code_size + pool_guard_size) <= range);
+
+ // Emit NOPs up to 'range', leaving space for the pool guard.
+ while ((code_size + pool_guard_size) < range) {
+ __ Nop();
+ code_size += sizeof(Instr);
+ }
+
+ // Emit the guard sequence before the literal pool.
+ if (option == NoJumpRequired) {
+ __ B(&label_1);
+ code_size += sizeof(Instr);
+ }
+
+ ASSERT(code_size == range);
+ ASSERT_LITERAL_POOL_SIZE(4);
+
+ // Possibly generate a literal pool.
+ __ CheckLiteralPool(option);
+ __ Bind(&label_1);
+ if (expect_dump) {
+ ASSERT_LITERAL_POOL_SIZE(0);
+ } else {
+ ASSERT_LITERAL_POOL_SIZE(4);
+ }
+
+ // Force a pool flush to check that a second pool functions correctly.
+ __ EmitLiteralPool(JumpRequired);
+ ASSERT_LITERAL_POOL_SIZE(0);
+
+ // These loads should be after the pool (and will require a new one).
+ __ Ldr(x4, 0x34567890abcdef12UL);
+ __ Ldr(w5, 0xdcba09fe);
+ __ Ldr(d4, 123.4);
+ __ Ldr(s5, 250.0);
+ ASSERT_LITERAL_POOL_SIZE(4);
+ END();
+
+ RUN();
+
+ // Check that the literals loaded correctly.
+ ASSERT_EQUAL_64(0x1234567890abcdefUL, x0);
+ ASSERT_EQUAL_64(0xfedcba09, x1);
+ ASSERT_EQUAL_FP64(1.234, d0);
+ ASSERT_EQUAL_FP32(2.5, s1);
+ ASSERT_EQUAL_64(0x34567890abcdef12UL, x4);
+ ASSERT_EQUAL_64(0xdcba09fe, x5);
+ ASSERT_EQUAL_FP64(123.4, d4);
+ ASSERT_EQUAL_FP32(250.0, s5);
+
+ TEARDOWN();
+}
+
+
+TEST(ldr_literal_range_1) {
+ INIT_V8();
+ LdrLiteralRangeHelper(kRecommendedLiteralPoolRange,
+ NoJumpRequired,
+ true);
+}
+
+
+TEST(ldr_literal_range_2) {
+ INIT_V8();
+ LdrLiteralRangeHelper(kRecommendedLiteralPoolRange-sizeof(Instr),
+ NoJumpRequired,
+ false);
+}
+
+
+TEST(ldr_literal_range_3) {
+ INIT_V8();
+ LdrLiteralRangeHelper(2 * kRecommendedLiteralPoolRange,
+ JumpRequired,
+ true);
+}
+
+
+TEST(ldr_literal_range_4) {
+ INIT_V8();
+ LdrLiteralRangeHelper(2 * kRecommendedLiteralPoolRange-sizeof(Instr),
+ JumpRequired,
+ false);
+}
+
+
+TEST(ldr_literal_range_5) {
+ INIT_V8();
+ LdrLiteralRangeHelper(kLiteralPoolCheckInterval,
+ JumpRequired,
+ false);
+}
+
+
+TEST(ldr_literal_range_6) {
+ INIT_V8();
+ LdrLiteralRangeHelper(kLiteralPoolCheckInterval-sizeof(Instr),
+ JumpRequired,
+ false);
+}
+#endif
+
+TEST(add_sub_imm) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x0, 0x0);
+ __ Mov(x1, 0x1111);
+ __ Mov(x2, 0xffffffffffffffffL);
+ __ Mov(x3, 0x8000000000000000L);
+
+ __ Add(x10, x0, Operand(0x123));
+ __ Add(x11, x1, Operand(0x122000));
+ __ Add(x12, x0, Operand(0xabc << 12));
+ __ Add(x13, x2, Operand(1));
+
+ __ Add(w14, w0, Operand(0x123));
+ __ Add(w15, w1, Operand(0x122000));
+ __ Add(w16, w0, Operand(0xabc << 12));
+ __ Add(w17, w2, Operand(1));
+
+ __ Sub(x20, x0, Operand(0x1));
+ __ Sub(x21, x1, Operand(0x111));
+ __ Sub(x22, x1, Operand(0x1 << 12));
+ __ Sub(x23, x3, Operand(1));
+
+ __ Sub(w24, w0, Operand(0x1));
+ __ Sub(w25, w1, Operand(0x111));
+ __ Sub(w26, w1, Operand(0x1 << 12));
+ __ Sub(w27, w3, Operand(1));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0x123, x10);
+ ASSERT_EQUAL_64(0x123111, x11);
+ ASSERT_EQUAL_64(0xabc000, x12);
+ ASSERT_EQUAL_64(0x0, x13);
+
+ ASSERT_EQUAL_32(0x123, w14);
+ ASSERT_EQUAL_32(0x123111, w15);
+ ASSERT_EQUAL_32(0xabc000, w16);
+ ASSERT_EQUAL_32(0x0, w17);
+
+ ASSERT_EQUAL_64(0xffffffffffffffffL, x20);
+ ASSERT_EQUAL_64(0x1000, x21);
+ ASSERT_EQUAL_64(0x111, x22);
+ ASSERT_EQUAL_64(0x7fffffffffffffffL, x23);
+
+ ASSERT_EQUAL_32(0xffffffff, w24);
+ ASSERT_EQUAL_32(0x1000, w25);
+ ASSERT_EQUAL_32(0x111, w26);
+ ASSERT_EQUAL_32(0xffffffff, w27);
+
+ TEARDOWN();
+}
+
+
+TEST(add_sub_wide_imm) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x0, 0x0);
+ __ Mov(x1, 0x1);
+
+ __ Add(x10, x0, Operand(0x1234567890abcdefUL));
+ __ Add(x11, x1, Operand(0xffffffff));
+
+ __ Add(w12, w0, Operand(0x12345678));
+ __ Add(w13, w1, Operand(0xffffffff));
+
+ __ Sub(x20, x0, Operand(0x1234567890abcdefUL));
+
+ __ Sub(w21, w0, Operand(0x12345678));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0x1234567890abcdefUL, x10);
+ ASSERT_EQUAL_64(0x100000000UL, x11);
+
+ ASSERT_EQUAL_32(0x12345678, w12);
+ ASSERT_EQUAL_64(0x0, x13);
+
+ ASSERT_EQUAL_64(-0x1234567890abcdefUL, x20);
+
+ ASSERT_EQUAL_32(-0x12345678, w21);
+
+ TEARDOWN();
+}
+
+
+TEST(add_sub_shifted) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x0, 0);
+ __ Mov(x1, 0x0123456789abcdefL);
+ __ Mov(x2, 0xfedcba9876543210L);
+ __ Mov(x3, 0xffffffffffffffffL);
+
+ __ Add(x10, x1, Operand(x2));
+ __ Add(x11, x0, Operand(x1, LSL, 8));
+ __ Add(x12, x0, Operand(x1, LSR, 8));
+ __ Add(x13, x0, Operand(x1, ASR, 8));
+ __ Add(x14, x0, Operand(x2, ASR, 8));
+ __ Add(w15, w0, Operand(w1, ASR, 8));
+ __ Add(w18, w3, Operand(w1, ROR, 8));
+ __ Add(x19, x3, Operand(x1, ROR, 8));
+
+ __ Sub(x20, x3, Operand(x2));
+ __ Sub(x21, x3, Operand(x1, LSL, 8));
+ __ Sub(x22, x3, Operand(x1, LSR, 8));
+ __ Sub(x23, x3, Operand(x1, ASR, 8));
+ __ Sub(x24, x3, Operand(x2, ASR, 8));
+ __ Sub(w25, w3, Operand(w1, ASR, 8));
+ __ Sub(w26, w3, Operand(w1, ROR, 8));
+ __ Sub(x27, x3, Operand(x1, ROR, 8));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0xffffffffffffffffL, x10);
+ ASSERT_EQUAL_64(0x23456789abcdef00L, x11);
+ ASSERT_EQUAL_64(0x000123456789abcdL, x12);
+ ASSERT_EQUAL_64(0x000123456789abcdL, x13);
+ ASSERT_EQUAL_64(0xfffedcba98765432L, x14);
+ ASSERT_EQUAL_64(0xff89abcd, x15);
+ ASSERT_EQUAL_64(0xef89abcc, x18);
+ ASSERT_EQUAL_64(0xef0123456789abccL, x19);
+
+ ASSERT_EQUAL_64(0x0123456789abcdefL, x20);
+ ASSERT_EQUAL_64(0xdcba9876543210ffL, x21);
+ ASSERT_EQUAL_64(0xfffedcba98765432L, x22);
+ ASSERT_EQUAL_64(0xfffedcba98765432L, x23);
+ ASSERT_EQUAL_64(0x000123456789abcdL, x24);
+ ASSERT_EQUAL_64(0x00765432, x25);
+ ASSERT_EQUAL_64(0x10765432, x26);
+ ASSERT_EQUAL_64(0x10fedcba98765432L, x27);
+
+ TEARDOWN();
+}
+
+
+TEST(add_sub_extended) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x0, 0);
+ __ Mov(x1, 0x0123456789abcdefL);
+ __ Mov(x2, 0xfedcba9876543210L);
+ __ Mov(w3, 0x80);
+
+ __ Add(x10, x0, Operand(x1, UXTB, 0));
+ __ Add(x11, x0, Operand(x1, UXTB, 1));
+ __ Add(x12, x0, Operand(x1, UXTH, 2));
+ __ Add(x13, x0, Operand(x1, UXTW, 4));
+
+ __ Add(x14, x0, Operand(x1, SXTB, 0));
+ __ Add(x15, x0, Operand(x1, SXTB, 1));
+ __ Add(x16, x0, Operand(x1, SXTH, 2));
+ __ Add(x17, x0, Operand(x1, SXTW, 3));
+ __ Add(x18, x0, Operand(x2, SXTB, 0));
+ __ Add(x19, x0, Operand(x2, SXTB, 1));
+ __ Add(x20, x0, Operand(x2, SXTH, 2));
+ __ Add(x21, x0, Operand(x2, SXTW, 3));
+
+ __ Add(x22, x1, Operand(x2, SXTB, 1));
+ __ Sub(x23, x1, Operand(x2, SXTB, 1));
+
+ __ Add(w24, w1, Operand(w2, UXTB, 2));
+ __ Add(w25, w0, Operand(w1, SXTB, 0));
+ __ Add(w26, w0, Operand(w1, SXTB, 1));
+ __ Add(w27, w2, Operand(w1, SXTW, 3));
+
+ __ Add(w28, w0, Operand(w1, SXTW, 3));
+ __ Add(x29, x0, Operand(w1, SXTW, 3));
+
+ __ Sub(x30, x0, Operand(w3, SXTB, 1));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0xefL, x10);
+ ASSERT_EQUAL_64(0x1deL, x11);
+ ASSERT_EQUAL_64(0x337bcL, x12);
+ ASSERT_EQUAL_64(0x89abcdef0L, x13);
+
+ ASSERT_EQUAL_64(0xffffffffffffffefL, x14);
+ ASSERT_EQUAL_64(0xffffffffffffffdeL, x15);
+ ASSERT_EQUAL_64(0xffffffffffff37bcL, x16);
+ ASSERT_EQUAL_64(0xfffffffc4d5e6f78L, x17);
+ ASSERT_EQUAL_64(0x10L, x18);
+ ASSERT_EQUAL_64(0x20L, x19);
+ ASSERT_EQUAL_64(0xc840L, x20);
+ ASSERT_EQUAL_64(0x3b2a19080L, x21);
+
+ ASSERT_EQUAL_64(0x0123456789abce0fL, x22);
+ ASSERT_EQUAL_64(0x0123456789abcdcfL, x23);
+
+ ASSERT_EQUAL_32(0x89abce2f, w24);
+ ASSERT_EQUAL_32(0xffffffef, w25);
+ ASSERT_EQUAL_32(0xffffffde, w26);
+ ASSERT_EQUAL_32(0xc3b2a188, w27);
+
+ ASSERT_EQUAL_32(0x4d5e6f78, w28);
+ ASSERT_EQUAL_64(0xfffffffc4d5e6f78L, x29);
+
+ ASSERT_EQUAL_64(256, x30);
+
+ TEARDOWN();
+}
+
+
+TEST(add_sub_negative) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x0, 0);
+ __ Mov(x1, 4687);
+ __ Mov(x2, 0x1122334455667788);
+ __ Mov(w3, 0x11223344);
+ __ Mov(w4, 400000);
+
+ __ Add(x10, x0, -42);
+ __ Add(x11, x1, -687);
+ __ Add(x12, x2, -0x88);
+
+ __ Sub(x13, x0, -600);
+ __ Sub(x14, x1, -313);
+ __ Sub(x15, x2, -0x555);
+
+ __ Add(w19, w3, -0x344);
+ __ Add(w20, w4, -2000);
+
+ __ Sub(w21, w3, -0xbc);
+ __ Sub(w22, w4, -2000);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(-42, x10);
+ ASSERT_EQUAL_64(4000, x11);
+ ASSERT_EQUAL_64(0x1122334455667700, x12);
+
+ ASSERT_EQUAL_64(600, x13);
+ ASSERT_EQUAL_64(5000, x14);
+ ASSERT_EQUAL_64(0x1122334455667cdd, x15);
+
+ ASSERT_EQUAL_32(0x11223000, w19);
+ ASSERT_EQUAL_32(398000, w20);
+
+ ASSERT_EQUAL_32(0x11223400, w21);
+ ASSERT_EQUAL_32(402000, w22);
+
+ TEARDOWN();
+}
+
+
+TEST(add_sub_zero) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x0, 0);
+ __ Mov(x1, 0);
+ __ Mov(x2, 0);
+
+ Label blob1;
+ __ Bind(&blob1);
+ __ Add(x0, x0, 0);
+ __ Sub(x1, x1, 0);
+ __ Sub(x2, x2, xzr);
+ CHECK_EQ(0, __ SizeOfCodeGeneratedSince(&blob1));
+
+ Label blob2;
+ __ Bind(&blob2);
+ __ Add(w3, w3, 0);
+ CHECK_NE(0, __ SizeOfCodeGeneratedSince(&blob2));
+
+ Label blob3;
+ __ Bind(&blob3);
+ __ Sub(w3, w3, wzr);
+ CHECK_NE(0, __ SizeOfCodeGeneratedSince(&blob3));
+
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0, x0);
+ ASSERT_EQUAL_64(0, x1);
+ ASSERT_EQUAL_64(0, x2);
+
+ TEARDOWN();
+}
+
+
+TEST(claim_drop_zero) {
+ INIT_V8();
+ SETUP();
+
+ START();
+
+ Label start;
+ __ Bind(&start);
+ __ Claim(0);
+ __ Drop(0);
+ __ Claim(xzr, 8);
+ __ Drop(xzr, 8);
+ __ Claim(xzr, 0);
+ __ Drop(xzr, 0);
+ __ Claim(x7, 0);
+ __ Drop(x7, 0);
+ __ ClaimBySMI(xzr, 8);
+ __ DropBySMI(xzr, 8);
+ __ ClaimBySMI(xzr, 0);
+ __ DropBySMI(xzr, 0);
+ CHECK_EQ(0, __ SizeOfCodeGeneratedSince(&start));
+
+ END();
+
+ RUN();
+
+ TEARDOWN();
+}
+
+
+TEST(neg) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x0, 0xf123456789abcdefL);
+
+ // Immediate.
+ __ Neg(x1, 0x123);
+ __ Neg(w2, 0x123);
+
+ // Shifted.
+ __ Neg(x3, Operand(x0, LSL, 1));
+ __ Neg(w4, Operand(w0, LSL, 2));
+ __ Neg(x5, Operand(x0, LSR, 3));
+ __ Neg(w6, Operand(w0, LSR, 4));
+ __ Neg(x7, Operand(x0, ASR, 5));
+ __ Neg(w8, Operand(w0, ASR, 6));
+
+ // Extended.
+ __ Neg(w9, Operand(w0, UXTB));
+ __ Neg(x10, Operand(x0, SXTB, 1));
+ __ Neg(w11, Operand(w0, UXTH, 2));
+ __ Neg(x12, Operand(x0, SXTH, 3));
+ __ Neg(w13, Operand(w0, UXTW, 4));
+ __ Neg(x14, Operand(x0, SXTW, 4));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0xfffffffffffffeddUL, x1);
+ ASSERT_EQUAL_64(0xfffffedd, x2);
+ ASSERT_EQUAL_64(0x1db97530eca86422UL, x3);
+ ASSERT_EQUAL_64(0xd950c844, x4);
+ ASSERT_EQUAL_64(0xe1db97530eca8643UL, x5);
+ ASSERT_EQUAL_64(0xf7654322, x6);
+ ASSERT_EQUAL_64(0x0076e5d4c3b2a191UL, x7);
+ ASSERT_EQUAL_64(0x01d950c9, x8);
+ ASSERT_EQUAL_64(0xffffff11, x9);
+ ASSERT_EQUAL_64(0x0000000000000022UL, x10);
+ ASSERT_EQUAL_64(0xfffcc844, x11);
+ ASSERT_EQUAL_64(0x0000000000019088UL, x12);
+ ASSERT_EQUAL_64(0x65432110, x13);
+ ASSERT_EQUAL_64(0x0000000765432110UL, x14);
+
+ TEARDOWN();
+}
+
+
+TEST(adc_sbc_shift) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x0, 0);
+ __ Mov(x1, 1);
+ __ Mov(x2, 0x0123456789abcdefL);
+ __ Mov(x3, 0xfedcba9876543210L);
+ __ Mov(x4, 0xffffffffffffffffL);
+
+ // Clear the C flag.
+ __ Adds(x0, x0, Operand(0));
+
+ __ Adc(x5, x2, Operand(x3));
+ __ Adc(x6, x0, Operand(x1, LSL, 60));
+ __ Sbc(x7, x4, Operand(x3, LSR, 4));
+ __ Adc(x8, x2, Operand(x3, ASR, 4));
+ __ Adc(x9, x2, Operand(x3, ROR, 8));
+
+ __ Adc(w10, w2, Operand(w3));
+ __ Adc(w11, w0, Operand(w1, LSL, 30));
+ __ Sbc(w12, w4, Operand(w3, LSR, 4));
+ __ Adc(w13, w2, Operand(w3, ASR, 4));
+ __ Adc(w14, w2, Operand(w3, ROR, 8));
+
+ // Set the C flag.
+ __ Cmp(w0, Operand(w0));
+
+ __ Adc(x18, x2, Operand(x3));
+ __ Adc(x19, x0, Operand(x1, LSL, 60));
+ __ Sbc(x20, x4, Operand(x3, LSR, 4));
+ __ Adc(x21, x2, Operand(x3, ASR, 4));
+ __ Adc(x22, x2, Operand(x3, ROR, 8));
+
+ __ Adc(w23, w2, Operand(w3));
+ __ Adc(w24, w0, Operand(w1, LSL, 30));
+ __ Sbc(w25, w4, Operand(w3, LSR, 4));
+ __ Adc(w26, w2, Operand(w3, ASR, 4));
+ __ Adc(w27, w2, Operand(w3, ROR, 8));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0xffffffffffffffffL, x5);
+ ASSERT_EQUAL_64(1L << 60, x6);
+ ASSERT_EQUAL_64(0xf0123456789abcddL, x7);
+ ASSERT_EQUAL_64(0x0111111111111110L, x8);
+ ASSERT_EQUAL_64(0x1222222222222221L, x9);
+
+ ASSERT_EQUAL_32(0xffffffff, w10);
+ ASSERT_EQUAL_32(1 << 30, w11);
+ ASSERT_EQUAL_32(0xf89abcdd, w12);
+ ASSERT_EQUAL_32(0x91111110, w13);
+ ASSERT_EQUAL_32(0x9a222221, w14);
+
+ ASSERT_EQUAL_64(0xffffffffffffffffL + 1, x18);
+ ASSERT_EQUAL_64((1L << 60) + 1, x19);
+ ASSERT_EQUAL_64(0xf0123456789abcddL + 1, x20);
+ ASSERT_EQUAL_64(0x0111111111111110L + 1, x21);
+ ASSERT_EQUAL_64(0x1222222222222221L + 1, x22);
+
+ ASSERT_EQUAL_32(0xffffffff + 1, w23);
+ ASSERT_EQUAL_32((1 << 30) + 1, w24);
+ ASSERT_EQUAL_32(0xf89abcdd + 1, w25);
+ ASSERT_EQUAL_32(0x91111110 + 1, w26);
+ ASSERT_EQUAL_32(0x9a222221 + 1, w27);
+
+ // Check that adc correctly sets the condition flags.
+ START();
+ __ Mov(x0, 1);
+ __ Mov(x1, 0xffffffffffffffffL);
+ // Clear the C flag.
+ __ Adds(x0, x0, Operand(0));
+ __ Adcs(x10, x0, Operand(x1));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_NZCV(ZCFlag);
+ ASSERT_EQUAL_64(0, x10);
+
+ START();
+ __ Mov(x0, 1);
+ __ Mov(x1, 0x8000000000000000L);
+ // Clear the C flag.
+ __ Adds(x0, x0, Operand(0));
+ __ Adcs(x10, x0, Operand(x1, ASR, 63));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_NZCV(ZCFlag);
+ ASSERT_EQUAL_64(0, x10);
+
+ START();
+ __ Mov(x0, 0x10);
+ __ Mov(x1, 0x07ffffffffffffffL);
+ // Clear the C flag.
+ __ Adds(x0, x0, Operand(0));
+ __ Adcs(x10, x0, Operand(x1, LSL, 4));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_NZCV(NVFlag);
+ ASSERT_EQUAL_64(0x8000000000000000L, x10);
+
+ // Check that sbc correctly sets the condition flags.
+ START();
+ __ Mov(x0, 0);
+ __ Mov(x1, 0xffffffffffffffffL);
+ // Clear the C flag.
+ __ Adds(x0, x0, Operand(0));
+ __ Sbcs(x10, x0, Operand(x1));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_NZCV(ZFlag);
+ ASSERT_EQUAL_64(0, x10);
+
+ START();
+ __ Mov(x0, 1);
+ __ Mov(x1, 0xffffffffffffffffL);
+ // Clear the C flag.
+ __ Adds(x0, x0, Operand(0));
+ __ Sbcs(x10, x0, Operand(x1, LSR, 1));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_NZCV(NFlag);
+ ASSERT_EQUAL_64(0x8000000000000001L, x10);
+
+ START();
+ __ Mov(x0, 0);
+ // Clear the C flag.
+ __ Adds(x0, x0, Operand(0));
+ __ Sbcs(x10, x0, Operand(0xffffffffffffffffL));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_NZCV(ZFlag);
+ ASSERT_EQUAL_64(0, x10);
+
+ START()
+ __ Mov(w0, 0x7fffffff);
+ // Clear the C flag.
+ __ Adds(x0, x0, Operand(0));
+ __ Ngcs(w10, w0);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_NZCV(NFlag);
+ ASSERT_EQUAL_64(0x80000000, x10);
+
+ START();
+ // Clear the C flag.
+ __ Adds(x0, x0, Operand(0));
+ __ Ngcs(x10, 0x7fffffffffffffffL);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_NZCV(NFlag);
+ ASSERT_EQUAL_64(0x8000000000000000L, x10);
+
+ START()
+ __ Mov(x0, 0);
+ // Set the C flag.
+ __ Cmp(x0, Operand(x0));
+ __ Sbcs(x10, x0, Operand(1));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_NZCV(NFlag);
+ ASSERT_EQUAL_64(0xffffffffffffffffL, x10);
+
+ START()
+ __ Mov(x0, 0);
+ // Set the C flag.
+ __ Cmp(x0, Operand(x0));
+ __ Ngcs(x10, 0x7fffffffffffffffL);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_NZCV(NFlag);
+ ASSERT_EQUAL_64(0x8000000000000001L, x10);
+
+ TEARDOWN();
+}
+
+
+TEST(adc_sbc_extend) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ // Clear the C flag.
+ __ Adds(x0, x0, Operand(0));
+
+ __ Mov(x0, 0);
+ __ Mov(x1, 1);
+ __ Mov(x2, 0x0123456789abcdefL);
+
+ __ Adc(x10, x1, Operand(w2, UXTB, 1));
+ __ Adc(x11, x1, Operand(x2, SXTH, 2));
+ __ Sbc(x12, x1, Operand(w2, UXTW, 4));
+ __ Adc(x13, x1, Operand(x2, UXTX, 4));
+
+ __ Adc(w14, w1, Operand(w2, UXTB, 1));
+ __ Adc(w15, w1, Operand(w2, SXTH, 2));
+ __ Adc(w9, w1, Operand(w2, UXTW, 4));
+
+ // Set the C flag.
+ __ Cmp(w0, Operand(w0));
+
+ __ Adc(x20, x1, Operand(w2, UXTB, 1));
+ __ Adc(x21, x1, Operand(x2, SXTH, 2));
+ __ Sbc(x22, x1, Operand(w2, UXTW, 4));
+ __ Adc(x23, x1, Operand(x2, UXTX, 4));
+
+ __ Adc(w24, w1, Operand(w2, UXTB, 1));
+ __ Adc(w25, w1, Operand(w2, SXTH, 2));
+ __ Adc(w26, w1, Operand(w2, UXTW, 4));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0x1df, x10);
+ ASSERT_EQUAL_64(0xffffffffffff37bdL, x11);
+ ASSERT_EQUAL_64(0xfffffff765432110L, x12);
+ ASSERT_EQUAL_64(0x123456789abcdef1L, x13);
+
+ ASSERT_EQUAL_32(0x1df, w14);
+ ASSERT_EQUAL_32(0xffff37bd, w15);
+ ASSERT_EQUAL_32(0x9abcdef1, w9);
+
+ ASSERT_EQUAL_64(0x1df + 1, x20);
+ ASSERT_EQUAL_64(0xffffffffffff37bdL + 1, x21);
+ ASSERT_EQUAL_64(0xfffffff765432110L + 1, x22);
+ ASSERT_EQUAL_64(0x123456789abcdef1L + 1, x23);
+
+ ASSERT_EQUAL_32(0x1df + 1, w24);
+ ASSERT_EQUAL_32(0xffff37bd + 1, w25);
+ ASSERT_EQUAL_32(0x9abcdef1 + 1, w26);
+
+ // Check that adc correctly sets the condition flags.
+ START();
+ __ Mov(x0, 0xff);
+ __ Mov(x1, 0xffffffffffffffffL);
+ // Clear the C flag.
+ __ Adds(x0, x0, Operand(0));
+ __ Adcs(x10, x0, Operand(x1, SXTX, 1));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_NZCV(CFlag);
+
+ START();
+ __ Mov(x0, 0x7fffffffffffffffL);
+ __ Mov(x1, 1);
+ // Clear the C flag.
+ __ Adds(x0, x0, Operand(0));
+ __ Adcs(x10, x0, Operand(x1, UXTB, 2));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_NZCV(NVFlag);
+
+ START();
+ __ Mov(x0, 0x7fffffffffffffffL);
+ // Clear the C flag.
+ __ Adds(x0, x0, Operand(0));
+ __ Adcs(x10, x0, Operand(1));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_NZCV(NVFlag);
+
+ TEARDOWN();
+}
+
+
+TEST(adc_sbc_wide_imm) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x0, 0);
+
+ // Clear the C flag.
+ __ Adds(x0, x0, Operand(0));
+
+ __ Adc(x7, x0, Operand(0x1234567890abcdefUL));
+ __ Adc(w8, w0, Operand(0xffffffff));
+ __ Sbc(x9, x0, Operand(0x1234567890abcdefUL));
+ __ Sbc(w10, w0, Operand(0xffffffff));
+ __ Ngc(x11, Operand(0xffffffff00000000UL));
+ __ Ngc(w12, Operand(0xffff0000));
+
+ // Set the C flag.
+ __ Cmp(w0, Operand(w0));
+
+ __ Adc(x18, x0, Operand(0x1234567890abcdefUL));
+ __ Adc(w19, w0, Operand(0xffffffff));
+ __ Sbc(x20, x0, Operand(0x1234567890abcdefUL));
+ __ Sbc(w21, w0, Operand(0xffffffff));
+ __ Ngc(x22, Operand(0xffffffff00000000UL));
+ __ Ngc(w23, Operand(0xffff0000));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0x1234567890abcdefUL, x7);
+ ASSERT_EQUAL_64(0xffffffff, x8);
+ ASSERT_EQUAL_64(0xedcba9876f543210UL, x9);
+ ASSERT_EQUAL_64(0, x10);
+ ASSERT_EQUAL_64(0xffffffff, x11);
+ ASSERT_EQUAL_64(0xffff, x12);
+
+ ASSERT_EQUAL_64(0x1234567890abcdefUL + 1, x18);
+ ASSERT_EQUAL_64(0, x19);
+ ASSERT_EQUAL_64(0xedcba9876f543211UL, x20);
+ ASSERT_EQUAL_64(1, x21);
+ ASSERT_EQUAL_64(0x100000000UL, x22);
+ ASSERT_EQUAL_64(0x10000, x23);
+
+ TEARDOWN();
+}
+
+
+TEST(flags) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x0, 0);
+ __ Mov(x1, 0x1111111111111111L);
+ __ Neg(x10, Operand(x0));
+ __ Neg(x11, Operand(x1));
+ __ Neg(w12, Operand(w1));
+ // Clear the C flag.
+ __ Adds(x0, x0, Operand(0));
+ __ Ngc(x13, Operand(x0));
+ // Set the C flag.
+ __ Cmp(x0, Operand(x0));
+ __ Ngc(w14, Operand(w0));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0, x10);
+ ASSERT_EQUAL_64(-0x1111111111111111L, x11);
+ ASSERT_EQUAL_32(-0x11111111, w12);
+ ASSERT_EQUAL_64(-1L, x13);
+ ASSERT_EQUAL_32(0, w14);
+
+ START();
+ __ Mov(x0, 0);
+ __ Cmp(x0, Operand(x0));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_NZCV(ZCFlag);
+
+ START();
+ __ Mov(w0, 0);
+ __ Cmp(w0, Operand(w0));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_NZCV(ZCFlag);
+
+ START();
+ __ Mov(x0, 0);
+ __ Mov(x1, 0x1111111111111111L);
+ __ Cmp(x0, Operand(x1));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_NZCV(NFlag);
+
+ START();
+ __ Mov(w0, 0);
+ __ Mov(w1, 0x11111111);
+ __ Cmp(w0, Operand(w1));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_NZCV(NFlag);
+
+ START();
+ __ Mov(x1, 0x1111111111111111L);
+ __ Cmp(x1, Operand(0));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_NZCV(CFlag);
+
+ START();
+ __ Mov(w1, 0x11111111);
+ __ Cmp(w1, Operand(0));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_NZCV(CFlag);
+
+ START();
+ __ Mov(x0, 1);
+ __ Mov(x1, 0x7fffffffffffffffL);
+ __ Cmn(x1, Operand(x0));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_NZCV(NVFlag);
+
+ START();
+ __ Mov(w0, 1);
+ __ Mov(w1, 0x7fffffff);
+ __ Cmn(w1, Operand(w0));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_NZCV(NVFlag);
+
+ START();
+ __ Mov(x0, 1);
+ __ Mov(x1, 0xffffffffffffffffL);
+ __ Cmn(x1, Operand(x0));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_NZCV(ZCFlag);
+
+ START();
+ __ Mov(w0, 1);
+ __ Mov(w1, 0xffffffff);
+ __ Cmn(w1, Operand(w0));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_NZCV(ZCFlag);
+
+ START();
+ __ Mov(w0, 0);
+ __ Mov(w1, 1);
+ // Clear the C flag.
+ __ Adds(w0, w0, Operand(0));
+ __ Ngcs(w0, Operand(w1));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_NZCV(NFlag);
+
+ START();
+ __ Mov(w0, 0);
+ __ Mov(w1, 0);
+ // Set the C flag.
+ __ Cmp(w0, Operand(w0));
+ __ Ngcs(w0, Operand(w1));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_NZCV(ZCFlag);
+
+ TEARDOWN();
+}
+
+
+TEST(cmp_shift) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x18, 0xf0000000);
+ __ Mov(x19, 0xf000000010000000UL);
+ __ Mov(x20, 0xf0000000f0000000UL);
+ __ Mov(x21, 0x7800000078000000UL);
+ __ Mov(x22, 0x3c0000003c000000UL);
+ __ Mov(x23, 0x8000000780000000UL);
+ __ Mov(x24, 0x0000000f00000000UL);
+ __ Mov(x25, 0x00000003c0000000UL);
+ __ Mov(x26, 0x8000000780000000UL);
+ __ Mov(x27, 0xc0000003);
+
+ __ Cmp(w20, Operand(w21, LSL, 1));
+ __ Mrs(x0, NZCV);
+
+ __ Cmp(x20, Operand(x22, LSL, 2));
+ __ Mrs(x1, NZCV);
+
+ __ Cmp(w19, Operand(w23, LSR, 3));
+ __ Mrs(x2, NZCV);
+
+ __ Cmp(x18, Operand(x24, LSR, 4));
+ __ Mrs(x3, NZCV);
+
+ __ Cmp(w20, Operand(w25, ASR, 2));
+ __ Mrs(x4, NZCV);
+
+ __ Cmp(x20, Operand(x26, ASR, 3));
+ __ Mrs(x5, NZCV);
+
+ __ Cmp(w27, Operand(w22, ROR, 28));
+ __ Mrs(x6, NZCV);
+
+ __ Cmp(x20, Operand(x21, ROR, 31));
+ __ Mrs(x7, NZCV);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_32(ZCFlag, w0);
+ ASSERT_EQUAL_32(ZCFlag, w1);
+ ASSERT_EQUAL_32(ZCFlag, w2);
+ ASSERT_EQUAL_32(ZCFlag, w3);
+ ASSERT_EQUAL_32(ZCFlag, w4);
+ ASSERT_EQUAL_32(ZCFlag, w5);
+ ASSERT_EQUAL_32(ZCFlag, w6);
+ ASSERT_EQUAL_32(ZCFlag, w7);
+
+ TEARDOWN();
+}
+
+
+TEST(cmp_extend) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(w20, 0x2);
+ __ Mov(w21, 0x1);
+ __ Mov(x22, 0xffffffffffffffffUL);
+ __ Mov(x23, 0xff);
+ __ Mov(x24, 0xfffffffffffffffeUL);
+ __ Mov(x25, 0xffff);
+ __ Mov(x26, 0xffffffff);
+
+ __ Cmp(w20, Operand(w21, LSL, 1));
+ __ Mrs(x0, NZCV);
+
+ __ Cmp(x22, Operand(x23, SXTB, 0));
+ __ Mrs(x1, NZCV);
+
+ __ Cmp(x24, Operand(x23, SXTB, 1));
+ __ Mrs(x2, NZCV);
+
+ __ Cmp(x24, Operand(x23, UXTB, 1));
+ __ Mrs(x3, NZCV);
+
+ __ Cmp(w22, Operand(w25, UXTH));
+ __ Mrs(x4, NZCV);
+
+ __ Cmp(x22, Operand(x25, SXTH));
+ __ Mrs(x5, NZCV);
+
+ __ Cmp(x22, Operand(x26, UXTW));
+ __ Mrs(x6, NZCV);
+
+ __ Cmp(x24, Operand(x26, SXTW, 1));
+ __ Mrs(x7, NZCV);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_32(ZCFlag, w0);
+ ASSERT_EQUAL_32(ZCFlag, w1);
+ ASSERT_EQUAL_32(ZCFlag, w2);
+ ASSERT_EQUAL_32(NCFlag, w3);
+ ASSERT_EQUAL_32(NCFlag, w4);
+ ASSERT_EQUAL_32(ZCFlag, w5);
+ ASSERT_EQUAL_32(NCFlag, w6);
+ ASSERT_EQUAL_32(ZCFlag, w7);
+
+ TEARDOWN();
+}
+
+
+TEST(ccmp) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(w16, 0);
+ __ Mov(w17, 1);
+ __ Cmp(w16, w16);
+ __ Ccmp(w16, w17, NCFlag, eq);
+ __ Mrs(x0, NZCV);
+
+ __ Cmp(w16, w16);
+ __ Ccmp(w16, w17, NCFlag, ne);
+ __ Mrs(x1, NZCV);
+
+ __ Cmp(x16, x16);
+ __ Ccmn(x16, 2, NZCVFlag, eq);
+ __ Mrs(x2, NZCV);
+
+ __ Cmp(x16, x16);
+ __ Ccmn(x16, 2, NZCVFlag, ne);
+ __ Mrs(x3, NZCV);
+
+ __ ccmp(x16, x16, NZCVFlag, al);
+ __ Mrs(x4, NZCV);
+
+ __ ccmp(x16, x16, NZCVFlag, nv);
+ __ Mrs(x5, NZCV);
+
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_32(NFlag, w0);
+ ASSERT_EQUAL_32(NCFlag, w1);
+ ASSERT_EQUAL_32(NoFlag, w2);
+ ASSERT_EQUAL_32(NZCVFlag, w3);
+ ASSERT_EQUAL_32(ZCFlag, w4);
+ ASSERT_EQUAL_32(ZCFlag, w5);
+
+ TEARDOWN();
+}
+
+
+TEST(ccmp_wide_imm) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(w20, 0);
+
+ __ Cmp(w20, Operand(w20));
+ __ Ccmp(w20, Operand(0x12345678), NZCVFlag, eq);
+ __ Mrs(x0, NZCV);
+
+ __ Cmp(w20, Operand(w20));
+ __ Ccmp(x20, Operand(0xffffffffffffffffUL), NZCVFlag, eq);
+ __ Mrs(x1, NZCV);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_32(NFlag, w0);
+ ASSERT_EQUAL_32(NoFlag, w1);
+
+ TEARDOWN();
+}
+
+
+TEST(ccmp_shift_extend) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(w20, 0x2);
+ __ Mov(w21, 0x1);
+ __ Mov(x22, 0xffffffffffffffffUL);
+ __ Mov(x23, 0xff);
+ __ Mov(x24, 0xfffffffffffffffeUL);
+
+ __ Cmp(w20, Operand(w20));
+ __ Ccmp(w20, Operand(w21, LSL, 1), NZCVFlag, eq);
+ __ Mrs(x0, NZCV);
+
+ __ Cmp(w20, Operand(w20));
+ __ Ccmp(x22, Operand(x23, SXTB, 0), NZCVFlag, eq);
+ __ Mrs(x1, NZCV);
+
+ __ Cmp(w20, Operand(w20));
+ __ Ccmp(x24, Operand(x23, SXTB, 1), NZCVFlag, eq);
+ __ Mrs(x2, NZCV);
+
+ __ Cmp(w20, Operand(w20));
+ __ Ccmp(x24, Operand(x23, UXTB, 1), NZCVFlag, eq);
+ __ Mrs(x3, NZCV);
+
+ __ Cmp(w20, Operand(w20));
+ __ Ccmp(x24, Operand(x23, UXTB, 1), NZCVFlag, ne);
+ __ Mrs(x4, NZCV);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_32(ZCFlag, w0);
+ ASSERT_EQUAL_32(ZCFlag, w1);
+ ASSERT_EQUAL_32(ZCFlag, w2);
+ ASSERT_EQUAL_32(NCFlag, w3);
+ ASSERT_EQUAL_32(NZCVFlag, w4);
+
+ TEARDOWN();
+}
+
+
+TEST(csel) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x16, 0);
+ __ Mov(x24, 0x0000000f0000000fUL);
+ __ Mov(x25, 0x0000001f0000001fUL);
+ __ Mov(x26, 0);
+ __ Mov(x27, 0);
+
+ __ Cmp(w16, 0);
+ __ Csel(w0, w24, w25, eq);
+ __ Csel(w1, w24, w25, ne);
+ __ Csinc(w2, w24, w25, mi);
+ __ Csinc(w3, w24, w25, pl);
+
+ __ csel(w13, w24, w25, al);
+ __ csel(x14, x24, x25, nv);
+
+ __ Cmp(x16, 1);
+ __ Csinv(x4, x24, x25, gt);
+ __ Csinv(x5, x24, x25, le);
+ __ Csneg(x6, x24, x25, hs);
+ __ Csneg(x7, x24, x25, lo);
+
+ __ Cset(w8, ne);
+ __ Csetm(w9, ne);
+ __ Cinc(x10, x25, ne);
+ __ Cinv(x11, x24, ne);
+ __ Cneg(x12, x24, ne);
+
+ __ csel(w15, w24, w25, al);
+ __ csel(x18, x24, x25, nv);
+
+ __ CzeroX(x24, ne);
+ __ CzeroX(x25, eq);
+
+ __ CmovX(x26, x25, ne);
+ __ CmovX(x27, x25, eq);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0x0000000f, x0);
+ ASSERT_EQUAL_64(0x0000001f, x1);
+ ASSERT_EQUAL_64(0x00000020, x2);
+ ASSERT_EQUAL_64(0x0000000f, x3);
+ ASSERT_EQUAL_64(0xffffffe0ffffffe0UL, x4);
+ ASSERT_EQUAL_64(0x0000000f0000000fUL, x5);
+ ASSERT_EQUAL_64(0xffffffe0ffffffe1UL, x6);
+ ASSERT_EQUAL_64(0x0000000f0000000fUL, x7);
+ ASSERT_EQUAL_64(0x00000001, x8);
+ ASSERT_EQUAL_64(0xffffffff, x9);
+ ASSERT_EQUAL_64(0x0000001f00000020UL, x10);
+ ASSERT_EQUAL_64(0xfffffff0fffffff0UL, x11);
+ ASSERT_EQUAL_64(0xfffffff0fffffff1UL, x12);
+ ASSERT_EQUAL_64(0x0000000f, x13);
+ ASSERT_EQUAL_64(0x0000000f0000000fUL, x14);
+ ASSERT_EQUAL_64(0x0000000f, x15);
+ ASSERT_EQUAL_64(0x0000000f0000000fUL, x18);
+ ASSERT_EQUAL_64(0, x24);
+ ASSERT_EQUAL_64(0x0000001f0000001fUL, x25);
+ ASSERT_EQUAL_64(0x0000001f0000001fUL, x26);
+ ASSERT_EQUAL_64(0, x27);
+
+ TEARDOWN();
+}
+
+
+TEST(csel_imm) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x18, 0);
+ __ Mov(x19, 0x80000000);
+ __ Mov(x20, 0x8000000000000000UL);
+
+ __ Cmp(x18, Operand(0));
+ __ Csel(w0, w19, -2, ne);
+ __ Csel(w1, w19, -1, ne);
+ __ Csel(w2, w19, 0, ne);
+ __ Csel(w3, w19, 1, ne);
+ __ Csel(w4, w19, 2, ne);
+ __ Csel(w5, w19, Operand(w19, ASR, 31), ne);
+ __ Csel(w6, w19, Operand(w19, ROR, 1), ne);
+ __ Csel(w7, w19, 3, eq);
+
+ __ Csel(x8, x20, -2, ne);
+ __ Csel(x9, x20, -1, ne);
+ __ Csel(x10, x20, 0, ne);
+ __ Csel(x11, x20, 1, ne);
+ __ Csel(x12, x20, 2, ne);
+ __ Csel(x13, x20, Operand(x20, ASR, 63), ne);
+ __ Csel(x14, x20, Operand(x20, ROR, 1), ne);
+ __ Csel(x15, x20, 3, eq);
+
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_32(-2, w0);
+ ASSERT_EQUAL_32(-1, w1);
+ ASSERT_EQUAL_32(0, w2);
+ ASSERT_EQUAL_32(1, w3);
+ ASSERT_EQUAL_32(2, w4);
+ ASSERT_EQUAL_32(-1, w5);
+ ASSERT_EQUAL_32(0x40000000, w6);
+ ASSERT_EQUAL_32(0x80000000, w7);
+
+ ASSERT_EQUAL_64(-2, x8);
+ ASSERT_EQUAL_64(-1, x9);
+ ASSERT_EQUAL_64(0, x10);
+ ASSERT_EQUAL_64(1, x11);
+ ASSERT_EQUAL_64(2, x12);
+ ASSERT_EQUAL_64(-1, x13);
+ ASSERT_EQUAL_64(0x4000000000000000UL, x14);
+ ASSERT_EQUAL_64(0x8000000000000000UL, x15);
+
+ TEARDOWN();
+}
+
+
+TEST(lslv) {
+ INIT_V8();
+ SETUP();
+
+ uint64_t value = 0x0123456789abcdefUL;
+ int shift[] = {1, 3, 5, 9, 17, 33};
+
+ START();
+ __ Mov(x0, value);
+ __ Mov(w1, shift[0]);
+ __ Mov(w2, shift[1]);
+ __ Mov(w3, shift[2]);
+ __ Mov(w4, shift[3]);
+ __ Mov(w5, shift[4]);
+ __ Mov(w6, shift[5]);
+
+ __ lslv(x0, x0, xzr);
+
+ __ Lsl(x16, x0, x1);
+ __ Lsl(x17, x0, x2);
+ __ Lsl(x18, x0, x3);
+ __ Lsl(x19, x0, x4);
+ __ Lsl(x20, x0, x5);
+ __ Lsl(x21, x0, x6);
+
+ __ Lsl(w22, w0, w1);
+ __ Lsl(w23, w0, w2);
+ __ Lsl(w24, w0, w3);
+ __ Lsl(w25, w0, w4);
+ __ Lsl(w26, w0, w5);
+ __ Lsl(w27, w0, w6);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(value, x0);
+ ASSERT_EQUAL_64(value << (shift[0] & 63), x16);
+ ASSERT_EQUAL_64(value << (shift[1] & 63), x17);
+ ASSERT_EQUAL_64(value << (shift[2] & 63), x18);
+ ASSERT_EQUAL_64(value << (shift[3] & 63), x19);
+ ASSERT_EQUAL_64(value << (shift[4] & 63), x20);
+ ASSERT_EQUAL_64(value << (shift[5] & 63), x21);
+ ASSERT_EQUAL_32(value << (shift[0] & 31), w22);
+ ASSERT_EQUAL_32(value << (shift[1] & 31), w23);
+ ASSERT_EQUAL_32(value << (shift[2] & 31), w24);
+ ASSERT_EQUAL_32(value << (shift[3] & 31), w25);
+ ASSERT_EQUAL_32(value << (shift[4] & 31), w26);
+ ASSERT_EQUAL_32(value << (shift[5] & 31), w27);
+
+ TEARDOWN();
+}
+
+
+TEST(lsrv) {
+ INIT_V8();
+ SETUP();
+
+ uint64_t value = 0x0123456789abcdefUL;
+ int shift[] = {1, 3, 5, 9, 17, 33};
+
+ START();
+ __ Mov(x0, value);
+ __ Mov(w1, shift[0]);
+ __ Mov(w2, shift[1]);
+ __ Mov(w3, shift[2]);
+ __ Mov(w4, shift[3]);
+ __ Mov(w5, shift[4]);
+ __ Mov(w6, shift[5]);
+
+ __ lsrv(x0, x0, xzr);
+
+ __ Lsr(x16, x0, x1);
+ __ Lsr(x17, x0, x2);
+ __ Lsr(x18, x0, x3);
+ __ Lsr(x19, x0, x4);
+ __ Lsr(x20, x0, x5);
+ __ Lsr(x21, x0, x6);
+
+ __ Lsr(w22, w0, w1);
+ __ Lsr(w23, w0, w2);
+ __ Lsr(w24, w0, w3);
+ __ Lsr(w25, w0, w4);
+ __ Lsr(w26, w0, w5);
+ __ Lsr(w27, w0, w6);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(value, x0);
+ ASSERT_EQUAL_64(value >> (shift[0] & 63), x16);
+ ASSERT_EQUAL_64(value >> (shift[1] & 63), x17);
+ ASSERT_EQUAL_64(value >> (shift[2] & 63), x18);
+ ASSERT_EQUAL_64(value >> (shift[3] & 63), x19);
+ ASSERT_EQUAL_64(value >> (shift[4] & 63), x20);
+ ASSERT_EQUAL_64(value >> (shift[5] & 63), x21);
+
+ value &= 0xffffffffUL;
+ ASSERT_EQUAL_32(value >> (shift[0] & 31), w22);
+ ASSERT_EQUAL_32(value >> (shift[1] & 31), w23);
+ ASSERT_EQUAL_32(value >> (shift[2] & 31), w24);
+ ASSERT_EQUAL_32(value >> (shift[3] & 31), w25);
+ ASSERT_EQUAL_32(value >> (shift[4] & 31), w26);
+ ASSERT_EQUAL_32(value >> (shift[5] & 31), w27);
+
+ TEARDOWN();
+}
+
+
+TEST(asrv) {
+ INIT_V8();
+ SETUP();
+
+ int64_t value = 0xfedcba98fedcba98UL;
+ int shift[] = {1, 3, 5, 9, 17, 33};
+
+ START();
+ __ Mov(x0, value);
+ __ Mov(w1, shift[0]);
+ __ Mov(w2, shift[1]);
+ __ Mov(w3, shift[2]);
+ __ Mov(w4, shift[3]);
+ __ Mov(w5, shift[4]);
+ __ Mov(w6, shift[5]);
+
+ __ asrv(x0, x0, xzr);
+
+ __ Asr(x16, x0, x1);
+ __ Asr(x17, x0, x2);
+ __ Asr(x18, x0, x3);
+ __ Asr(x19, x0, x4);
+ __ Asr(x20, x0, x5);
+ __ Asr(x21, x0, x6);
+
+ __ Asr(w22, w0, w1);
+ __ Asr(w23, w0, w2);
+ __ Asr(w24, w0, w3);
+ __ Asr(w25, w0, w4);
+ __ Asr(w26, w0, w5);
+ __ Asr(w27, w0, w6);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(value, x0);
+ ASSERT_EQUAL_64(value >> (shift[0] & 63), x16);
+ ASSERT_EQUAL_64(value >> (shift[1] & 63), x17);
+ ASSERT_EQUAL_64(value >> (shift[2] & 63), x18);
+ ASSERT_EQUAL_64(value >> (shift[3] & 63), x19);
+ ASSERT_EQUAL_64(value >> (shift[4] & 63), x20);
+ ASSERT_EQUAL_64(value >> (shift[5] & 63), x21);
+
+ int32_t value32 = static_cast<int32_t>(value & 0xffffffffUL);
+ ASSERT_EQUAL_32(value32 >> (shift[0] & 31), w22);
+ ASSERT_EQUAL_32(value32 >> (shift[1] & 31), w23);
+ ASSERT_EQUAL_32(value32 >> (shift[2] & 31), w24);
+ ASSERT_EQUAL_32(value32 >> (shift[3] & 31), w25);
+ ASSERT_EQUAL_32(value32 >> (shift[4] & 31), w26);
+ ASSERT_EQUAL_32(value32 >> (shift[5] & 31), w27);
+
+ TEARDOWN();
+}
+
+
+TEST(rorv) {
+ INIT_V8();
+ SETUP();
+
+ uint64_t value = 0x0123456789abcdefUL;
+ int shift[] = {4, 8, 12, 16, 24, 36};
+
+ START();
+ __ Mov(x0, value);
+ __ Mov(w1, shift[0]);
+ __ Mov(w2, shift[1]);
+ __ Mov(w3, shift[2]);
+ __ Mov(w4, shift[3]);
+ __ Mov(w5, shift[4]);
+ __ Mov(w6, shift[5]);
+
+ __ rorv(x0, x0, xzr);
+
+ __ Ror(x16, x0, x1);
+ __ Ror(x17, x0, x2);
+ __ Ror(x18, x0, x3);
+ __ Ror(x19, x0, x4);
+ __ Ror(x20, x0, x5);
+ __ Ror(x21, x0, x6);
+
+ __ Ror(w22, w0, w1);
+ __ Ror(w23, w0, w2);
+ __ Ror(w24, w0, w3);
+ __ Ror(w25, w0, w4);
+ __ Ror(w26, w0, w5);
+ __ Ror(w27, w0, w6);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(value, x0);
+ ASSERT_EQUAL_64(0xf0123456789abcdeUL, x16);
+ ASSERT_EQUAL_64(0xef0123456789abcdUL, x17);
+ ASSERT_EQUAL_64(0xdef0123456789abcUL, x18);
+ ASSERT_EQUAL_64(0xcdef0123456789abUL, x19);
+ ASSERT_EQUAL_64(0xabcdef0123456789UL, x20);
+ ASSERT_EQUAL_64(0x789abcdef0123456UL, x21);
+ ASSERT_EQUAL_32(0xf89abcde, w22);
+ ASSERT_EQUAL_32(0xef89abcd, w23);
+ ASSERT_EQUAL_32(0xdef89abc, w24);
+ ASSERT_EQUAL_32(0xcdef89ab, w25);
+ ASSERT_EQUAL_32(0xabcdef89, w26);
+ ASSERT_EQUAL_32(0xf89abcde, w27);
+
+ TEARDOWN();
+}
+
+
+TEST(bfm) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x1, 0x0123456789abcdefL);
+
+ __ Mov(x10, 0x8888888888888888L);
+ __ Mov(x11, 0x8888888888888888L);
+ __ Mov(x12, 0x8888888888888888L);
+ __ Mov(x13, 0x8888888888888888L);
+ __ Mov(w20, 0x88888888);
+ __ Mov(w21, 0x88888888);
+
+ __ bfm(x10, x1, 16, 31);
+ __ bfm(x11, x1, 32, 15);
+
+ __ bfm(w20, w1, 16, 23);
+ __ bfm(w21, w1, 24, 15);
+
+ // Aliases.
+ __ Bfi(x12, x1, 16, 8);
+ __ Bfxil(x13, x1, 16, 8);
+ END();
+
+ RUN();
+
+
+ ASSERT_EQUAL_64(0x88888888888889abL, x10);
+ ASSERT_EQUAL_64(0x8888cdef88888888L, x11);
+
+ ASSERT_EQUAL_32(0x888888ab, w20);
+ ASSERT_EQUAL_32(0x88cdef88, w21);
+
+ ASSERT_EQUAL_64(0x8888888888ef8888L, x12);
+ ASSERT_EQUAL_64(0x88888888888888abL, x13);
+
+ TEARDOWN();
+}
+
+
+TEST(sbfm) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x1, 0x0123456789abcdefL);
+ __ Mov(x2, 0xfedcba9876543210L);
+
+ __ sbfm(x10, x1, 16, 31);
+ __ sbfm(x11, x1, 32, 15);
+ __ sbfm(x12, x1, 32, 47);
+ __ sbfm(x13, x1, 48, 35);
+
+ __ sbfm(w14, w1, 16, 23);
+ __ sbfm(w15, w1, 24, 15);
+ __ sbfm(w16, w2, 16, 23);
+ __ sbfm(w17, w2, 24, 15);
+
+ // Aliases.
+ __ Asr(x18, x1, 32);
+ __ Asr(x19, x2, 32);
+ __ Sbfiz(x20, x1, 8, 16);
+ __ Sbfiz(x21, x2, 8, 16);
+ __ Sbfx(x22, x1, 8, 16);
+ __ Sbfx(x23, x2, 8, 16);
+ __ Sxtb(x24, w1);
+ __ Sxtb(x25, x2);
+ __ Sxth(x26, w1);
+ __ Sxth(x27, x2);
+ __ Sxtw(x28, w1);
+ __ Sxtw(x29, x2);
+ END();
+
+ RUN();
+
+
+ ASSERT_EQUAL_64(0xffffffffffff89abL, x10);
+ ASSERT_EQUAL_64(0xffffcdef00000000L, x11);
+ ASSERT_EQUAL_64(0x4567L, x12);
+ ASSERT_EQUAL_64(0x789abcdef0000L, x13);
+
+ ASSERT_EQUAL_32(0xffffffab, w14);
+ ASSERT_EQUAL_32(0xffcdef00, w15);
+ ASSERT_EQUAL_32(0x54, w16);
+ ASSERT_EQUAL_32(0x00321000, w17);
+
+ ASSERT_EQUAL_64(0x01234567L, x18);
+ ASSERT_EQUAL_64(0xfffffffffedcba98L, x19);
+ ASSERT_EQUAL_64(0xffffffffffcdef00L, x20);
+ ASSERT_EQUAL_64(0x321000L, x21);
+ ASSERT_EQUAL_64(0xffffffffffffabcdL, x22);
+ ASSERT_EQUAL_64(0x5432L, x23);
+ ASSERT_EQUAL_64(0xffffffffffffffefL, x24);
+ ASSERT_EQUAL_64(0x10, x25);
+ ASSERT_EQUAL_64(0xffffffffffffcdefL, x26);
+ ASSERT_EQUAL_64(0x3210, x27);
+ ASSERT_EQUAL_64(0xffffffff89abcdefL, x28);
+ ASSERT_EQUAL_64(0x76543210, x29);
+
+ TEARDOWN();
+}
+
+
+TEST(ubfm) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x1, 0x0123456789abcdefL);
+ __ Mov(x2, 0xfedcba9876543210L);
+
+ __ Mov(x10, 0x8888888888888888L);
+ __ Mov(x11, 0x8888888888888888L);
+
+ __ ubfm(x10, x1, 16, 31);
+ __ ubfm(x11, x1, 32, 15);
+ __ ubfm(x12, x1, 32, 47);
+ __ ubfm(x13, x1, 48, 35);
+
+ __ ubfm(w25, w1, 16, 23);
+ __ ubfm(w26, w1, 24, 15);
+ __ ubfm(w27, w2, 16, 23);
+ __ ubfm(w28, w2, 24, 15);
+
+ // Aliases
+ __ Lsl(x15, x1, 63);
+ __ Lsl(x16, x1, 0);
+ __ Lsr(x17, x1, 32);
+ __ Ubfiz(x18, x1, 8, 16);
+ __ Ubfx(x19, x1, 8, 16);
+ __ Uxtb(x20, x1);
+ __ Uxth(x21, x1);
+ __ Uxtw(x22, x1);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0x00000000000089abL, x10);
+ ASSERT_EQUAL_64(0x0000cdef00000000L, x11);
+ ASSERT_EQUAL_64(0x4567L, x12);
+ ASSERT_EQUAL_64(0x789abcdef0000L, x13);
+
+ ASSERT_EQUAL_32(0x000000ab, w25);
+ ASSERT_EQUAL_32(0x00cdef00, w26);
+ ASSERT_EQUAL_32(0x54, w27);
+ ASSERT_EQUAL_32(0x00321000, w28);
+
+ ASSERT_EQUAL_64(0x8000000000000000L, x15);
+ ASSERT_EQUAL_64(0x0123456789abcdefL, x16);
+ ASSERT_EQUAL_64(0x01234567L, x17);
+ ASSERT_EQUAL_64(0xcdef00L, x18);
+ ASSERT_EQUAL_64(0xabcdL, x19);
+ ASSERT_EQUAL_64(0xefL, x20);
+ ASSERT_EQUAL_64(0xcdefL, x21);
+ ASSERT_EQUAL_64(0x89abcdefL, x22);
+
+ TEARDOWN();
+}
+
+
+TEST(extr) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x1, 0x0123456789abcdefL);
+ __ Mov(x2, 0xfedcba9876543210L);
+
+ __ Extr(w10, w1, w2, 0);
+ __ Extr(w11, w1, w2, 1);
+ __ Extr(x12, x2, x1, 2);
+
+ __ Ror(w13, w1, 0);
+ __ Ror(w14, w2, 17);
+ __ Ror(w15, w1, 31);
+ __ Ror(x18, x2, 1);
+ __ Ror(x19, x1, 63);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0x76543210, x10);
+ ASSERT_EQUAL_64(0xbb2a1908, x11);
+ ASSERT_EQUAL_64(0x0048d159e26af37bUL, x12);
+ ASSERT_EQUAL_64(0x89abcdef, x13);
+ ASSERT_EQUAL_64(0x19083b2a, x14);
+ ASSERT_EQUAL_64(0x13579bdf, x15);
+ ASSERT_EQUAL_64(0x7f6e5d4c3b2a1908UL, x18);
+ ASSERT_EQUAL_64(0x02468acf13579bdeUL, x19);
+
+ TEARDOWN();
+}
+
+
+TEST(fmov_imm) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Fmov(s11, 1.0);
+ __ Fmov(d22, -13.0);
+ __ Fmov(s1, 255.0);
+ __ Fmov(d2, 12.34567);
+ __ Fmov(s3, 0.0);
+ __ Fmov(d4, 0.0);
+ __ Fmov(s5, kFP32PositiveInfinity);
+ __ Fmov(d6, kFP64NegativeInfinity);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_FP32(1.0, s11);
+ ASSERT_EQUAL_FP64(-13.0, d22);
+ ASSERT_EQUAL_FP32(255.0, s1);
+ ASSERT_EQUAL_FP64(12.34567, d2);
+ ASSERT_EQUAL_FP32(0.0, s3);
+ ASSERT_EQUAL_FP64(0.0, d4);
+ ASSERT_EQUAL_FP32(kFP32PositiveInfinity, s5);
+ ASSERT_EQUAL_FP64(kFP64NegativeInfinity, d6);
+
+ TEARDOWN();
+}
+
+
+TEST(fmov_reg) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Fmov(s20, 1.0);
+ __ Fmov(w10, s20);
+ __ Fmov(s30, w10);
+ __ Fmov(s5, s20);
+ __ Fmov(d1, -13.0);
+ __ Fmov(x1, d1);
+ __ Fmov(d2, x1);
+ __ Fmov(d4, d1);
+ __ Fmov(d6, rawbits_to_double(0x0123456789abcdefL));
+ __ Fmov(s6, s6);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_32(float_to_rawbits(1.0), w10);
+ ASSERT_EQUAL_FP32(1.0, s30);
+ ASSERT_EQUAL_FP32(1.0, s5);
+ ASSERT_EQUAL_64(double_to_rawbits(-13.0), x1);
+ ASSERT_EQUAL_FP64(-13.0, d2);
+ ASSERT_EQUAL_FP64(-13.0, d4);
+ ASSERT_EQUAL_FP32(rawbits_to_float(0x89abcdef), s6);
+
+ TEARDOWN();
+}
+
+
+TEST(fadd) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Fmov(s13, -0.0);
+ __ Fmov(s14, kFP32PositiveInfinity);
+ __ Fmov(s15, kFP32NegativeInfinity);
+ __ Fmov(s16, 3.25);
+ __ Fmov(s17, 1.0);
+ __ Fmov(s18, 0);
+
+ __ Fmov(d26, -0.0);
+ __ Fmov(d27, kFP64PositiveInfinity);
+ __ Fmov(d28, kFP64NegativeInfinity);
+ __ Fmov(d29, 0);
+ __ Fmov(d30, -2.0);
+ __ Fmov(d31, 2.25);
+
+ __ Fadd(s0, s16, s17);
+ __ Fadd(s1, s17, s18);
+ __ Fadd(s2, s13, s17);
+ __ Fadd(s3, s14, s17);
+ __ Fadd(s4, s15, s17);
+
+ __ Fadd(d5, d30, d31);
+ __ Fadd(d6, d29, d31);
+ __ Fadd(d7, d26, d31);
+ __ Fadd(d8, d27, d31);
+ __ Fadd(d9, d28, d31);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_FP32(4.25, s0);
+ ASSERT_EQUAL_FP32(1.0, s1);
+ ASSERT_EQUAL_FP32(1.0, s2);
+ ASSERT_EQUAL_FP32(kFP32PositiveInfinity, s3);
+ ASSERT_EQUAL_FP32(kFP32NegativeInfinity, s4);
+ ASSERT_EQUAL_FP64(0.25, d5);
+ ASSERT_EQUAL_FP64(2.25, d6);
+ ASSERT_EQUAL_FP64(2.25, d7);
+ ASSERT_EQUAL_FP64(kFP64PositiveInfinity, d8);
+ ASSERT_EQUAL_FP64(kFP64NegativeInfinity, d9);
+
+ TEARDOWN();
+}
+
+
+TEST(fsub) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Fmov(s13, -0.0);
+ __ Fmov(s14, kFP32PositiveInfinity);
+ __ Fmov(s15, kFP32NegativeInfinity);
+ __ Fmov(s16, 3.25);
+ __ Fmov(s17, 1.0);
+ __ Fmov(s18, 0);
+
+ __ Fmov(d26, -0.0);
+ __ Fmov(d27, kFP64PositiveInfinity);
+ __ Fmov(d28, kFP64NegativeInfinity);
+ __ Fmov(d29, 0);
+ __ Fmov(d30, -2.0);
+ __ Fmov(d31, 2.25);
+
+ __ Fsub(s0, s16, s17);
+ __ Fsub(s1, s17, s18);
+ __ Fsub(s2, s13, s17);
+ __ Fsub(s3, s17, s14);
+ __ Fsub(s4, s17, s15);
+
+ __ Fsub(d5, d30, d31);
+ __ Fsub(d6, d29, d31);
+ __ Fsub(d7, d26, d31);
+ __ Fsub(d8, d31, d27);
+ __ Fsub(d9, d31, d28);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_FP32(2.25, s0);
+ ASSERT_EQUAL_FP32(1.0, s1);
+ ASSERT_EQUAL_FP32(-1.0, s2);
+ ASSERT_EQUAL_FP32(kFP32NegativeInfinity, s3);
+ ASSERT_EQUAL_FP32(kFP32PositiveInfinity, s4);
+ ASSERT_EQUAL_FP64(-4.25, d5);
+ ASSERT_EQUAL_FP64(-2.25, d6);
+ ASSERT_EQUAL_FP64(-2.25, d7);
+ ASSERT_EQUAL_FP64(kFP64NegativeInfinity, d8);
+ ASSERT_EQUAL_FP64(kFP64PositiveInfinity, d9);
+
+ TEARDOWN();
+}
+
+
+TEST(fmul) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Fmov(s13, -0.0);
+ __ Fmov(s14, kFP32PositiveInfinity);
+ __ Fmov(s15, kFP32NegativeInfinity);
+ __ Fmov(s16, 3.25);
+ __ Fmov(s17, 2.0);
+ __ Fmov(s18, 0);
+ __ Fmov(s19, -2.0);
+
+ __ Fmov(d26, -0.0);
+ __ Fmov(d27, kFP64PositiveInfinity);
+ __ Fmov(d28, kFP64NegativeInfinity);
+ __ Fmov(d29, 0);
+ __ Fmov(d30, -2.0);
+ __ Fmov(d31, 2.25);
+
+ __ Fmul(s0, s16, s17);
+ __ Fmul(s1, s17, s18);
+ __ Fmul(s2, s13, s13);
+ __ Fmul(s3, s14, s19);
+ __ Fmul(s4, s15, s19);
+
+ __ Fmul(d5, d30, d31);
+ __ Fmul(d6, d29, d31);
+ __ Fmul(d7, d26, d26);
+ __ Fmul(d8, d27, d30);
+ __ Fmul(d9, d28, d30);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_FP32(6.5, s0);
+ ASSERT_EQUAL_FP32(0.0, s1);
+ ASSERT_EQUAL_FP32(0.0, s2);
+ ASSERT_EQUAL_FP32(kFP32NegativeInfinity, s3);
+ ASSERT_EQUAL_FP32(kFP32PositiveInfinity, s4);
+ ASSERT_EQUAL_FP64(-4.5, d5);
+ ASSERT_EQUAL_FP64(0.0, d6);
+ ASSERT_EQUAL_FP64(0.0, d7);
+ ASSERT_EQUAL_FP64(kFP64NegativeInfinity, d8);
+ ASSERT_EQUAL_FP64(kFP64PositiveInfinity, d9);
+
+ TEARDOWN();
+}
+
+
+static void FmaddFmsubDoubleHelper(double n, double m, double a,
+ double fmadd, double fmsub) {
+ SETUP();
+ START();
+
+ __ Fmov(d0, n);
+ __ Fmov(d1, m);
+ __ Fmov(d2, a);
+ __ Fmadd(d28, d0, d1, d2);
+ __ Fmsub(d29, d0, d1, d2);
+ __ Fnmadd(d30, d0, d1, d2);
+ __ Fnmsub(d31, d0, d1, d2);
+
+ END();
+ RUN();
+
+ ASSERT_EQUAL_FP64(fmadd, d28);
+ ASSERT_EQUAL_FP64(fmsub, d29);
+ ASSERT_EQUAL_FP64(-fmadd, d30);
+ ASSERT_EQUAL_FP64(-fmsub, d31);
+
+ TEARDOWN();
+}
+
+
+TEST(fmadd_fmsub_double) {
+ INIT_V8();
+ double inputs[] = {
+ // Normal numbers, including -0.0.
+ DBL_MAX, DBL_MIN, 3.25, 2.0, 0.0,
+ -DBL_MAX, -DBL_MIN, -3.25, -2.0, -0.0,
+ // Infinities.
+ kFP64NegativeInfinity, kFP64PositiveInfinity,
+ // Subnormal numbers.
+ rawbits_to_double(0x000fffffffffffff),
+ rawbits_to_double(0x0000000000000001),
+ rawbits_to_double(0x000123456789abcd),
+ -rawbits_to_double(0x000fffffffffffff),
+ -rawbits_to_double(0x0000000000000001),
+ -rawbits_to_double(0x000123456789abcd),
+ // NaN.
+ kFP64QuietNaN,
+ -kFP64QuietNaN,
+ };
+ const int count = sizeof(inputs) / sizeof(inputs[0]);
+
+ for (int in = 0; in < count; in++) {
+ double n = inputs[in];
+ for (int im = 0; im < count; im++) {
+ double m = inputs[im];
+ for (int ia = 0; ia < count; ia++) {
+ double a = inputs[ia];
+ double fmadd = fma(n, m, a);
+ double fmsub = fma(-n, m, a);
+
+ FmaddFmsubDoubleHelper(n, m, a, fmadd, fmsub);
+ }
+ }
+ }
+}
+
+
+TEST(fmadd_fmsub_double_rounding) {
+ INIT_V8();
+ // Make sure we run plenty of tests where an intermediate rounding stage would
+ // produce an incorrect result.
+ const int limit = 1000;
+ int count_fmadd = 0;
+ int count_fmsub = 0;
+
+ uint16_t seed[3] = {42, 43, 44};
+ seed48(seed);
+
+ while ((count_fmadd < limit) || (count_fmsub < limit)) {
+ double n, m, a;
+ uint32_t r[2];
+ ASSERT(sizeof(r) == sizeof(n));
+
+ r[0] = mrand48();
+ r[1] = mrand48();
+ memcpy(&n, r, sizeof(r));
+ r[0] = mrand48();
+ r[1] = mrand48();
+ memcpy(&m, r, sizeof(r));
+ r[0] = mrand48();
+ r[1] = mrand48();
+ memcpy(&a, r, sizeof(r));
+
+ if (!std::isfinite(a) || !std::isfinite(n) || !std::isfinite(m)) {
+ continue;
+ }
+
+ // Calculate the expected results.
+ double fmadd = fma(n, m, a);
+ double fmsub = fma(-n, m, a);
+
+ bool test_fmadd = (fmadd != (a + n * m));
+ bool test_fmsub = (fmsub != (a - n * m));
+
+ // If rounding would produce a different result, increment the test count.
+ count_fmadd += test_fmadd;
+ count_fmsub += test_fmsub;
+
+ if (test_fmadd || test_fmsub) {
+ FmaddFmsubDoubleHelper(n, m, a, fmadd, fmsub);
+ }
+ }
+}
+
+
+static void FmaddFmsubFloatHelper(float n, float m, float a,
+ float fmadd, float fmsub) {
+ SETUP();
+ START();
+
+ __ Fmov(s0, n);
+ __ Fmov(s1, m);
+ __ Fmov(s2, a);
+ __ Fmadd(s30, s0, s1, s2);
+ __ Fmsub(s31, s0, s1, s2);
+
+ END();
+ RUN();
+
+ ASSERT_EQUAL_FP32(fmadd, s30);
+ ASSERT_EQUAL_FP32(fmsub, s31);
+
+ TEARDOWN();
+}
+
+
+TEST(fmadd_fmsub_float) {
+ INIT_V8();
+ float inputs[] = {
+ // Normal numbers, including -0.0f.
+ FLT_MAX, FLT_MIN, 3.25f, 2.0f, 0.0f,
+ -FLT_MAX, -FLT_MIN, -3.25f, -2.0f, -0.0f,
+ // Infinities.
+ kFP32NegativeInfinity, kFP32PositiveInfinity,
+ // Subnormal numbers.
+ rawbits_to_float(0x07ffffff),
+ rawbits_to_float(0x00000001),
+ rawbits_to_float(0x01234567),
+ -rawbits_to_float(0x07ffffff),
+ -rawbits_to_float(0x00000001),
+ -rawbits_to_float(0x01234567),
+ // NaN.
+ kFP32QuietNaN,
+ -kFP32QuietNaN,
+ };
+ const int count = sizeof(inputs) / sizeof(inputs[0]);
+
+ for (int in = 0; in < count; in++) {
+ float n = inputs[in];
+ for (int im = 0; im < count; im++) {
+ float m = inputs[im];
+ for (int ia = 0; ia < count; ia++) {
+ float a = inputs[ia];
+ float fmadd = fmaf(n, m, a);
+ float fmsub = fmaf(-n, m, a);
+
+ FmaddFmsubFloatHelper(n, m, a, fmadd, fmsub);
+ }
+ }
+ }
+}
+
+
+TEST(fmadd_fmsub_float_rounding) {
+ INIT_V8();
+ // Make sure we run plenty of tests where an intermediate rounding stage would
+ // produce an incorrect result.
+ const int limit = 1000;
+ int count_fmadd = 0;
+ int count_fmsub = 0;
+
+ uint16_t seed[3] = {42, 43, 44};
+ seed48(seed);
+
+ while ((count_fmadd < limit) || (count_fmsub < limit)) {
+ float n, m, a;
+ uint32_t r;
+ ASSERT(sizeof(r) == sizeof(n));
+
+ r = mrand48();
+ memcpy(&n, &r, sizeof(r));
+ r = mrand48();
+ memcpy(&m, &r, sizeof(r));
+ r = mrand48();
+ memcpy(&a, &r, sizeof(r));
+
+ if (!std::isfinite(a) || !std::isfinite(n) || !std::isfinite(m)) {
+ continue;
+ }
+
+ // Calculate the expected results.
+ float fmadd = fmaf(n, m, a);
+ float fmsub = fmaf(-n, m, a);
+
+ bool test_fmadd = (fmadd != (a + n * m));
+ bool test_fmsub = (fmsub != (a - n * m));
+
+ // If rounding would produce a different result, increment the test count.
+ count_fmadd += test_fmadd;
+ count_fmsub += test_fmsub;
+
+ if (test_fmadd || test_fmsub) {
+ FmaddFmsubFloatHelper(n, m, a, fmadd, fmsub);
+ }
+ }
+}
+
+
+TEST(fdiv) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Fmov(s13, -0.0);
+ __ Fmov(s14, kFP32PositiveInfinity);
+ __ Fmov(s15, kFP32NegativeInfinity);
+ __ Fmov(s16, 3.25);
+ __ Fmov(s17, 2.0);
+ __ Fmov(s18, 2.0);
+ __ Fmov(s19, -2.0);
+
+ __ Fmov(d26, -0.0);
+ __ Fmov(d27, kFP64PositiveInfinity);
+ __ Fmov(d28, kFP64NegativeInfinity);
+ __ Fmov(d29, 0);
+ __ Fmov(d30, -2.0);
+ __ Fmov(d31, 2.25);
+
+ __ Fdiv(s0, s16, s17);
+ __ Fdiv(s1, s17, s18);
+ __ Fdiv(s2, s13, s17);
+ __ Fdiv(s3, s17, s14);
+ __ Fdiv(s4, s17, s15);
+ __ Fdiv(d5, d31, d30);
+ __ Fdiv(d6, d29, d31);
+ __ Fdiv(d7, d26, d31);
+ __ Fdiv(d8, d31, d27);
+ __ Fdiv(d9, d31, d28);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_FP32(1.625, s0);
+ ASSERT_EQUAL_FP32(1.0, s1);
+ ASSERT_EQUAL_FP32(-0.0, s2);
+ ASSERT_EQUAL_FP32(0.0, s3);
+ ASSERT_EQUAL_FP32(-0.0, s4);
+ ASSERT_EQUAL_FP64(-1.125, d5);
+ ASSERT_EQUAL_FP64(0.0, d6);
+ ASSERT_EQUAL_FP64(-0.0, d7);
+ ASSERT_EQUAL_FP64(0.0, d8);
+ ASSERT_EQUAL_FP64(-0.0, d9);
+
+ TEARDOWN();
+}
+
+
+static float MinMaxHelper(float n,
+ float m,
+ bool min,
+ float quiet_nan_substitute = 0.0) {
+ const uint64_t kFP32QuietNaNMask = 0x00400000UL;
+ uint32_t raw_n = float_to_rawbits(n);
+ uint32_t raw_m = float_to_rawbits(m);
+
+ if (isnan(n) && ((raw_n & kFP32QuietNaNMask) == 0)) {
+ // n is signalling NaN.
+ return n;
+ } else if (isnan(m) && ((raw_m & kFP32QuietNaNMask) == 0)) {
+ // m is signalling NaN.
+ return m;
+ } else if (quiet_nan_substitute == 0.0) {
+ if (isnan(n)) {
+ // n is quiet NaN.
+ return n;
+ } else if (isnan(m)) {
+ // m is quiet NaN.
+ return m;
+ }
+ } else {
+ // Substitute n or m if one is quiet, but not both.
+ if (isnan(n) && !isnan(m)) {
+ // n is quiet NaN: replace with substitute.
+ n = quiet_nan_substitute;
+ } else if (!isnan(n) && isnan(m)) {
+ // m is quiet NaN: replace with substitute.
+ m = quiet_nan_substitute;
+ }
+ }
+
+ if ((n == 0.0) && (m == 0.0) &&
+ (copysign(1.0, n) != copysign(1.0, m))) {
+ return min ? -0.0 : 0.0;
+ }
+
+ return min ? fminf(n, m) : fmaxf(n, m);
+}
+
+
+static double MinMaxHelper(double n,
+ double m,
+ bool min,
+ double quiet_nan_substitute = 0.0) {
+ const uint64_t kFP64QuietNaNMask = 0x0008000000000000UL;
+ uint64_t raw_n = double_to_rawbits(n);
+ uint64_t raw_m = double_to_rawbits(m);
+
+ if (isnan(n) && ((raw_n & kFP64QuietNaNMask) == 0)) {
+ // n is signalling NaN.
+ return n;
+ } else if (isnan(m) && ((raw_m & kFP64QuietNaNMask) == 0)) {
+ // m is signalling NaN.
+ return m;
+ } else if (quiet_nan_substitute == 0.0) {
+ if (isnan(n)) {
+ // n is quiet NaN.
+ return n;
+ } else if (isnan(m)) {
+ // m is quiet NaN.
+ return m;
+ }
+ } else {
+ // Substitute n or m if one is quiet, but not both.
+ if (isnan(n) && !isnan(m)) {
+ // n is quiet NaN: replace with substitute.
+ n = quiet_nan_substitute;
+ } else if (!isnan(n) && isnan(m)) {
+ // m is quiet NaN: replace with substitute.
+ m = quiet_nan_substitute;
+ }
+ }
+
+ if ((n == 0.0) && (m == 0.0) &&
+ (copysign(1.0, n) != copysign(1.0, m))) {
+ return min ? -0.0 : 0.0;
+ }
+
+ return min ? fmin(n, m) : fmax(n, m);
+}
+
+
+static void FminFmaxDoubleHelper(double n, double m, double min, double max,
+ double minnm, double maxnm) {
+ SETUP();
+
+ START();
+ __ Fmov(d0, n);
+ __ Fmov(d1, m);
+ __ Fmin(d28, d0, d1);
+ __ Fmax(d29, d0, d1);
+ __ Fminnm(d30, d0, d1);
+ __ Fmaxnm(d31, d0, d1);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_FP64(min, d28);
+ ASSERT_EQUAL_FP64(max, d29);
+ ASSERT_EQUAL_FP64(minnm, d30);
+ ASSERT_EQUAL_FP64(maxnm, d31);
+
+ TEARDOWN();
+}
+
+
+TEST(fmax_fmin_d) {
+ INIT_V8();
+ // Bootstrap tests.
+ FminFmaxDoubleHelper(0, 0, 0, 0, 0, 0);
+ FminFmaxDoubleHelper(0, 1, 0, 1, 0, 1);
+ FminFmaxDoubleHelper(kFP64PositiveInfinity, kFP64NegativeInfinity,
+ kFP64NegativeInfinity, kFP64PositiveInfinity,
+ kFP64NegativeInfinity, kFP64PositiveInfinity);
+ FminFmaxDoubleHelper(kFP64SignallingNaN, 0,
+ kFP64SignallingNaN, kFP64SignallingNaN,
+ kFP64SignallingNaN, kFP64SignallingNaN);
+ FminFmaxDoubleHelper(kFP64QuietNaN, 0,
+ kFP64QuietNaN, kFP64QuietNaN,
+ 0, 0);
+ FminFmaxDoubleHelper(kFP64QuietNaN, kFP64SignallingNaN,
+ kFP64SignallingNaN, kFP64SignallingNaN,
+ kFP64SignallingNaN, kFP64SignallingNaN);
+
+ // Iterate over all combinations of inputs.
+ double inputs[] = { DBL_MAX, DBL_MIN, 1.0, 0.0,
+ -DBL_MAX, -DBL_MIN, -1.0, -0.0,
+ kFP64PositiveInfinity, kFP64NegativeInfinity,
+ kFP64QuietNaN, kFP64SignallingNaN };
+
+ const int count = sizeof(inputs) / sizeof(inputs[0]);
+
+ for (int in = 0; in < count; in++) {
+ double n = inputs[in];
+ for (int im = 0; im < count; im++) {
+ double m = inputs[im];
+ FminFmaxDoubleHelper(n, m,
+ MinMaxHelper(n, m, true),
+ MinMaxHelper(n, m, false),
+ MinMaxHelper(n, m, true, kFP64PositiveInfinity),
+ MinMaxHelper(n, m, false, kFP64NegativeInfinity));
+ }
+ }
+}
+
+
+static void FminFmaxFloatHelper(float n, float m, float min, float max,
+ float minnm, float maxnm) {
+ SETUP();
+
+ START();
+ // TODO(all): Signalling NaNs are sometimes converted by the C compiler to
+ // quiet NaNs on implicit casts from float to double. Here, we move the raw
+ // bits into a W register first, so we get the correct value. Fix Fmov so this
+ // additional step is no longer needed.
+ __ Mov(w0, float_to_rawbits(n));
+ __ Fmov(s0, w0);
+ __ Mov(w0, float_to_rawbits(m));
+ __ Fmov(s1, w0);
+ __ Fmin(s28, s0, s1);
+ __ Fmax(s29, s0, s1);
+ __ Fminnm(s30, s0, s1);
+ __ Fmaxnm(s31, s0, s1);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_FP32(min, s28);
+ ASSERT_EQUAL_FP32(max, s29);
+ ASSERT_EQUAL_FP32(minnm, s30);
+ ASSERT_EQUAL_FP32(maxnm, s31);
+
+ TEARDOWN();
+}
+
+
+TEST(fmax_fmin_s) {
+ INIT_V8();
+ // Bootstrap tests.
+ FminFmaxFloatHelper(0, 0, 0, 0, 0, 0);
+ FminFmaxFloatHelper(0, 1, 0, 1, 0, 1);
+ FminFmaxFloatHelper(kFP32PositiveInfinity, kFP32NegativeInfinity,
+ kFP32NegativeInfinity, kFP32PositiveInfinity,
+ kFP32NegativeInfinity, kFP32PositiveInfinity);
+ FminFmaxFloatHelper(kFP32SignallingNaN, 0,
+ kFP32SignallingNaN, kFP32SignallingNaN,
+ kFP32SignallingNaN, kFP32SignallingNaN);
+ FminFmaxFloatHelper(kFP32QuietNaN, 0,
+ kFP32QuietNaN, kFP32QuietNaN,
+ 0, 0);
+ FminFmaxFloatHelper(kFP32QuietNaN, kFP32SignallingNaN,
+ kFP32SignallingNaN, kFP32SignallingNaN,
+ kFP32SignallingNaN, kFP32SignallingNaN);
+
+ // Iterate over all combinations of inputs.
+ float inputs[] = { FLT_MAX, FLT_MIN, 1.0, 0.0,
+ -FLT_MAX, -FLT_MIN, -1.0, -0.0,
+ kFP32PositiveInfinity, kFP32NegativeInfinity,
+ kFP32QuietNaN, kFP32SignallingNaN };
+
+ const int count = sizeof(inputs) / sizeof(inputs[0]);
+
+ for (int in = 0; in < count; in++) {
+ float n = inputs[in];
+ for (int im = 0; im < count; im++) {
+ float m = inputs[im];
+ FminFmaxFloatHelper(n, m,
+ MinMaxHelper(n, m, true),
+ MinMaxHelper(n, m, false),
+ MinMaxHelper(n, m, true, kFP32PositiveInfinity),
+ MinMaxHelper(n, m, false, kFP32NegativeInfinity));
+ }
+ }
+}
+
+
+TEST(fccmp) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Fmov(s16, 0.0);
+ __ Fmov(s17, 0.5);
+ __ Fmov(d18, -0.5);
+ __ Fmov(d19, -1.0);
+ __ Mov(x20, 0);
+
+ __ Cmp(x20, 0);
+ __ Fccmp(s16, s16, NoFlag, eq);
+ __ Mrs(x0, NZCV);
+
+ __ Cmp(x20, 0);
+ __ Fccmp(s16, s16, VFlag, ne);
+ __ Mrs(x1, NZCV);
+
+ __ Cmp(x20, 0);
+ __ Fccmp(s16, s17, CFlag, ge);
+ __ Mrs(x2, NZCV);
+
+ __ Cmp(x20, 0);
+ __ Fccmp(s16, s17, CVFlag, lt);
+ __ Mrs(x3, NZCV);
+
+ __ Cmp(x20, 0);
+ __ Fccmp(d18, d18, ZFlag, le);
+ __ Mrs(x4, NZCV);
+
+ __ Cmp(x20, 0);
+ __ Fccmp(d18, d18, ZVFlag, gt);
+ __ Mrs(x5, NZCV);
+
+ __ Cmp(x20, 0);
+ __ Fccmp(d18, d19, ZCVFlag, ls);
+ __ Mrs(x6, NZCV);
+
+ __ Cmp(x20, 0);
+ __ Fccmp(d18, d19, NFlag, hi);
+ __ Mrs(x7, NZCV);
+
+ __ fccmp(s16, s16, NFlag, al);
+ __ Mrs(x8, NZCV);
+
+ __ fccmp(d18, d18, NFlag, nv);
+ __ Mrs(x9, NZCV);
+
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_32(ZCFlag, w0);
+ ASSERT_EQUAL_32(VFlag, w1);
+ ASSERT_EQUAL_32(NFlag, w2);
+ ASSERT_EQUAL_32(CVFlag, w3);
+ ASSERT_EQUAL_32(ZCFlag, w4);
+ ASSERT_EQUAL_32(ZVFlag, w5);
+ ASSERT_EQUAL_32(CFlag, w6);
+ ASSERT_EQUAL_32(NFlag, w7);
+ ASSERT_EQUAL_32(ZCFlag, w8);
+ ASSERT_EQUAL_32(ZCFlag, w9);
+
+ TEARDOWN();
+}
+
+
+TEST(fcmp) {
+ INIT_V8();
+ SETUP();
+
+ START();
+
+ // Some of these tests require a floating-point scratch register assigned to
+ // the macro assembler, but most do not.
+ __ SetFPScratchRegister(NoFPReg);
+
+ __ Fmov(s8, 0.0);
+ __ Fmov(s9, 0.5);
+ __ Mov(w18, 0x7f800001); // Single precision NaN.
+ __ Fmov(s18, w18);
+
+ __ Fcmp(s8, s8);
+ __ Mrs(x0, NZCV);
+ __ Fcmp(s8, s9);
+ __ Mrs(x1, NZCV);
+ __ Fcmp(s9, s8);
+ __ Mrs(x2, NZCV);
+ __ Fcmp(s8, s18);
+ __ Mrs(x3, NZCV);
+ __ Fcmp(s18, s18);
+ __ Mrs(x4, NZCV);
+ __ Fcmp(s8, 0.0);
+ __ Mrs(x5, NZCV);
+ __ SetFPScratchRegister(d0);
+ __ Fcmp(s8, 255.0);
+ __ SetFPScratchRegister(NoFPReg);
+ __ Mrs(x6, NZCV);
+
+ __ Fmov(d19, 0.0);
+ __ Fmov(d20, 0.5);
+ __ Mov(x21, 0x7ff0000000000001UL); // Double precision NaN.
+ __ Fmov(d21, x21);
+
+ __ Fcmp(d19, d19);
+ __ Mrs(x10, NZCV);
+ __ Fcmp(d19, d20);
+ __ Mrs(x11, NZCV);
+ __ Fcmp(d20, d19);
+ __ Mrs(x12, NZCV);
+ __ Fcmp(d19, d21);
+ __ Mrs(x13, NZCV);
+ __ Fcmp(d21, d21);
+ __ Mrs(x14, NZCV);
+ __ Fcmp(d19, 0.0);
+ __ Mrs(x15, NZCV);
+ __ SetFPScratchRegister(d0);
+ __ Fcmp(d19, 12.3456);
+ __ SetFPScratchRegister(NoFPReg);
+ __ Mrs(x16, NZCV);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_32(ZCFlag, w0);
+ ASSERT_EQUAL_32(NFlag, w1);
+ ASSERT_EQUAL_32(CFlag, w2);
+ ASSERT_EQUAL_32(CVFlag, w3);
+ ASSERT_EQUAL_32(CVFlag, w4);
+ ASSERT_EQUAL_32(ZCFlag, w5);
+ ASSERT_EQUAL_32(NFlag, w6);
+ ASSERT_EQUAL_32(ZCFlag, w10);
+ ASSERT_EQUAL_32(NFlag, w11);
+ ASSERT_EQUAL_32(CFlag, w12);
+ ASSERT_EQUAL_32(CVFlag, w13);
+ ASSERT_EQUAL_32(CVFlag, w14);
+ ASSERT_EQUAL_32(ZCFlag, w15);
+ ASSERT_EQUAL_32(NFlag, w16);
+
+ TEARDOWN();
+}
+
+
+TEST(fcsel) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x16, 0);
+ __ Fmov(s16, 1.0);
+ __ Fmov(s17, 2.0);
+ __ Fmov(d18, 3.0);
+ __ Fmov(d19, 4.0);
+
+ __ Cmp(x16, 0);
+ __ Fcsel(s0, s16, s17, eq);
+ __ Fcsel(s1, s16, s17, ne);
+ __ Fcsel(d2, d18, d19, eq);
+ __ Fcsel(d3, d18, d19, ne);
+ __ fcsel(s4, s16, s17, al);
+ __ fcsel(d5, d18, d19, nv);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_FP32(1.0, s0);
+ ASSERT_EQUAL_FP32(2.0, s1);
+ ASSERT_EQUAL_FP64(3.0, d2);
+ ASSERT_EQUAL_FP64(4.0, d3);
+ ASSERT_EQUAL_FP32(1.0, s4);
+ ASSERT_EQUAL_FP64(3.0, d5);
+
+ TEARDOWN();
+}
+
+
+TEST(fneg) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Fmov(s16, 1.0);
+ __ Fmov(s17, 0.0);
+ __ Fmov(s18, kFP32PositiveInfinity);
+ __ Fmov(d19, 1.0);
+ __ Fmov(d20, 0.0);
+ __ Fmov(d21, kFP64PositiveInfinity);
+
+ __ Fneg(s0, s16);
+ __ Fneg(s1, s0);
+ __ Fneg(s2, s17);
+ __ Fneg(s3, s2);
+ __ Fneg(s4, s18);
+ __ Fneg(s5, s4);
+ __ Fneg(d6, d19);
+ __ Fneg(d7, d6);
+ __ Fneg(d8, d20);
+ __ Fneg(d9, d8);
+ __ Fneg(d10, d21);
+ __ Fneg(d11, d10);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_FP32(-1.0, s0);
+ ASSERT_EQUAL_FP32(1.0, s1);
+ ASSERT_EQUAL_FP32(-0.0, s2);
+ ASSERT_EQUAL_FP32(0.0, s3);
+ ASSERT_EQUAL_FP32(kFP32NegativeInfinity, s4);
+ ASSERT_EQUAL_FP32(kFP32PositiveInfinity, s5);
+ ASSERT_EQUAL_FP64(-1.0, d6);
+ ASSERT_EQUAL_FP64(1.0, d7);
+ ASSERT_EQUAL_FP64(-0.0, d8);
+ ASSERT_EQUAL_FP64(0.0, d9);
+ ASSERT_EQUAL_FP64(kFP64NegativeInfinity, d10);
+ ASSERT_EQUAL_FP64(kFP64PositiveInfinity, d11);
+
+ TEARDOWN();
+}
+
+
+TEST(fabs) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Fmov(s16, -1.0);
+ __ Fmov(s17, -0.0);
+ __ Fmov(s18, kFP32NegativeInfinity);
+ __ Fmov(d19, -1.0);
+ __ Fmov(d20, -0.0);
+ __ Fmov(d21, kFP64NegativeInfinity);
+
+ __ Fabs(s0, s16);
+ __ Fabs(s1, s0);
+ __ Fabs(s2, s17);
+ __ Fabs(s3, s18);
+ __ Fabs(d4, d19);
+ __ Fabs(d5, d4);
+ __ Fabs(d6, d20);
+ __ Fabs(d7, d21);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_FP32(1.0, s0);
+ ASSERT_EQUAL_FP32(1.0, s1);
+ ASSERT_EQUAL_FP32(0.0, s2);
+ ASSERT_EQUAL_FP32(kFP32PositiveInfinity, s3);
+ ASSERT_EQUAL_FP64(1.0, d4);
+ ASSERT_EQUAL_FP64(1.0, d5);
+ ASSERT_EQUAL_FP64(0.0, d6);
+ ASSERT_EQUAL_FP64(kFP64PositiveInfinity, d7);
+
+ TEARDOWN();
+}
+
+
+TEST(fsqrt) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Fmov(s16, 0.0);
+ __ Fmov(s17, 1.0);
+ __ Fmov(s18, 0.25);
+ __ Fmov(s19, 65536.0);
+ __ Fmov(s20, -0.0);
+ __ Fmov(s21, kFP32PositiveInfinity);
+ __ Fmov(d22, 0.0);
+ __ Fmov(d23, 1.0);
+ __ Fmov(d24, 0.25);
+ __ Fmov(d25, 4294967296.0);
+ __ Fmov(d26, -0.0);
+ __ Fmov(d27, kFP64PositiveInfinity);
+
+ __ Fsqrt(s0, s16);
+ __ Fsqrt(s1, s17);
+ __ Fsqrt(s2, s18);
+ __ Fsqrt(s3, s19);
+ __ Fsqrt(s4, s20);
+ __ Fsqrt(s5, s21);
+ __ Fsqrt(d6, d22);
+ __ Fsqrt(d7, d23);
+ __ Fsqrt(d8, d24);
+ __ Fsqrt(d9, d25);
+ __ Fsqrt(d10, d26);
+ __ Fsqrt(d11, d27);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_FP32(0.0, s0);
+ ASSERT_EQUAL_FP32(1.0, s1);
+ ASSERT_EQUAL_FP32(0.5, s2);
+ ASSERT_EQUAL_FP32(256.0, s3);
+ ASSERT_EQUAL_FP32(-0.0, s4);
+ ASSERT_EQUAL_FP32(kFP32PositiveInfinity, s5);
+ ASSERT_EQUAL_FP64(0.0, d6);
+ ASSERT_EQUAL_FP64(1.0, d7);
+ ASSERT_EQUAL_FP64(0.5, d8);
+ ASSERT_EQUAL_FP64(65536.0, d9);
+ ASSERT_EQUAL_FP64(-0.0, d10);
+ ASSERT_EQUAL_FP64(kFP32PositiveInfinity, d11);
+
+ TEARDOWN();
+}
+
+
+TEST(frinta) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Fmov(s16, 1.0);
+ __ Fmov(s17, 1.1);
+ __ Fmov(s18, 1.5);
+ __ Fmov(s19, 1.9);
+ __ Fmov(s20, 2.5);
+ __ Fmov(s21, -1.5);
+ __ Fmov(s22, -2.5);
+ __ Fmov(s23, kFP32PositiveInfinity);
+ __ Fmov(s24, kFP32NegativeInfinity);
+ __ Fmov(s25, 0.0);
+ __ Fmov(s26, -0.0);
+
+ __ Frinta(s0, s16);
+ __ Frinta(s1, s17);
+ __ Frinta(s2, s18);
+ __ Frinta(s3, s19);
+ __ Frinta(s4, s20);
+ __ Frinta(s5, s21);
+ __ Frinta(s6, s22);
+ __ Frinta(s7, s23);
+ __ Frinta(s8, s24);
+ __ Frinta(s9, s25);
+ __ Frinta(s10, s26);
+
+ __ Fmov(d16, 1.0);
+ __ Fmov(d17, 1.1);
+ __ Fmov(d18, 1.5);
+ __ Fmov(d19, 1.9);
+ __ Fmov(d20, 2.5);
+ __ Fmov(d21, -1.5);
+ __ Fmov(d22, -2.5);
+ __ Fmov(d23, kFP32PositiveInfinity);
+ __ Fmov(d24, kFP32NegativeInfinity);
+ __ Fmov(d25, 0.0);
+ __ Fmov(d26, -0.0);
+
+ __ Frinta(d11, d16);
+ __ Frinta(d12, d17);
+ __ Frinta(d13, d18);
+ __ Frinta(d14, d19);
+ __ Frinta(d15, d20);
+ __ Frinta(d16, d21);
+ __ Frinta(d17, d22);
+ __ Frinta(d18, d23);
+ __ Frinta(d19, d24);
+ __ Frinta(d20, d25);
+ __ Frinta(d21, d26);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_FP32(1.0, s0);
+ ASSERT_EQUAL_FP32(1.0, s1);
+ ASSERT_EQUAL_FP32(2.0, s2);
+ ASSERT_EQUAL_FP32(2.0, s3);
+ ASSERT_EQUAL_FP32(3.0, s4);
+ ASSERT_EQUAL_FP32(-2.0, s5);
+ ASSERT_EQUAL_FP32(-3.0, s6);
+ ASSERT_EQUAL_FP32(kFP32PositiveInfinity, s7);
+ ASSERT_EQUAL_FP32(kFP32NegativeInfinity, s8);
+ ASSERT_EQUAL_FP32(0.0, s9);
+ ASSERT_EQUAL_FP32(-0.0, s10);
+ ASSERT_EQUAL_FP64(1.0, d11);
+ ASSERT_EQUAL_FP64(1.0, d12);
+ ASSERT_EQUAL_FP64(2.0, d13);
+ ASSERT_EQUAL_FP64(2.0, d14);
+ ASSERT_EQUAL_FP64(3.0, d15);
+ ASSERT_EQUAL_FP64(-2.0, d16);
+ ASSERT_EQUAL_FP64(-3.0, d17);
+ ASSERT_EQUAL_FP64(kFP64PositiveInfinity, d18);
+ ASSERT_EQUAL_FP64(kFP64NegativeInfinity, d19);
+ ASSERT_EQUAL_FP64(0.0, d20);
+ ASSERT_EQUAL_FP64(-0.0, d21);
+
+ TEARDOWN();
+}
+
+
+TEST(frintn) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Fmov(s16, 1.0);
+ __ Fmov(s17, 1.1);
+ __ Fmov(s18, 1.5);
+ __ Fmov(s19, 1.9);
+ __ Fmov(s20, 2.5);
+ __ Fmov(s21, -1.5);
+ __ Fmov(s22, -2.5);
+ __ Fmov(s23, kFP32PositiveInfinity);
+ __ Fmov(s24, kFP32NegativeInfinity);
+ __ Fmov(s25, 0.0);
+ __ Fmov(s26, -0.0);
+
+ __ Frintn(s0, s16);
+ __ Frintn(s1, s17);
+ __ Frintn(s2, s18);
+ __ Frintn(s3, s19);
+ __ Frintn(s4, s20);
+ __ Frintn(s5, s21);
+ __ Frintn(s6, s22);
+ __ Frintn(s7, s23);
+ __ Frintn(s8, s24);
+ __ Frintn(s9, s25);
+ __ Frintn(s10, s26);
+
+ __ Fmov(d16, 1.0);
+ __ Fmov(d17, 1.1);
+ __ Fmov(d18, 1.5);
+ __ Fmov(d19, 1.9);
+ __ Fmov(d20, 2.5);
+ __ Fmov(d21, -1.5);
+ __ Fmov(d22, -2.5);
+ __ Fmov(d23, kFP32PositiveInfinity);
+ __ Fmov(d24, kFP32NegativeInfinity);
+ __ Fmov(d25, 0.0);
+ __ Fmov(d26, -0.0);
+
+ __ Frintn(d11, d16);
+ __ Frintn(d12, d17);
+ __ Frintn(d13, d18);
+ __ Frintn(d14, d19);
+ __ Frintn(d15, d20);
+ __ Frintn(d16, d21);
+ __ Frintn(d17, d22);
+ __ Frintn(d18, d23);
+ __ Frintn(d19, d24);
+ __ Frintn(d20, d25);
+ __ Frintn(d21, d26);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_FP32(1.0, s0);
+ ASSERT_EQUAL_FP32(1.0, s1);
+ ASSERT_EQUAL_FP32(2.0, s2);
+ ASSERT_EQUAL_FP32(2.0, s3);
+ ASSERT_EQUAL_FP32(2.0, s4);
+ ASSERT_EQUAL_FP32(-2.0, s5);
+ ASSERT_EQUAL_FP32(-2.0, s6);
+ ASSERT_EQUAL_FP32(kFP32PositiveInfinity, s7);
+ ASSERT_EQUAL_FP32(kFP32NegativeInfinity, s8);
+ ASSERT_EQUAL_FP32(0.0, s9);
+ ASSERT_EQUAL_FP32(-0.0, s10);
+ ASSERT_EQUAL_FP64(1.0, d11);
+ ASSERT_EQUAL_FP64(1.0, d12);
+ ASSERT_EQUAL_FP64(2.0, d13);
+ ASSERT_EQUAL_FP64(2.0, d14);
+ ASSERT_EQUAL_FP64(2.0, d15);
+ ASSERT_EQUAL_FP64(-2.0, d16);
+ ASSERT_EQUAL_FP64(-2.0, d17);
+ ASSERT_EQUAL_FP64(kFP64PositiveInfinity, d18);
+ ASSERT_EQUAL_FP64(kFP64NegativeInfinity, d19);
+ ASSERT_EQUAL_FP64(0.0, d20);
+ ASSERT_EQUAL_FP64(-0.0, d21);
+
+ TEARDOWN();
+}
+
+
+TEST(frintz) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Fmov(s16, 1.0);
+ __ Fmov(s17, 1.1);
+ __ Fmov(s18, 1.5);
+ __ Fmov(s19, 1.9);
+ __ Fmov(s20, 2.5);
+ __ Fmov(s21, -1.5);
+ __ Fmov(s22, -2.5);
+ __ Fmov(s23, kFP32PositiveInfinity);
+ __ Fmov(s24, kFP32NegativeInfinity);
+ __ Fmov(s25, 0.0);
+ __ Fmov(s26, -0.0);
+
+ __ Frintz(s0, s16);
+ __ Frintz(s1, s17);
+ __ Frintz(s2, s18);
+ __ Frintz(s3, s19);
+ __ Frintz(s4, s20);
+ __ Frintz(s5, s21);
+ __ Frintz(s6, s22);
+ __ Frintz(s7, s23);
+ __ Frintz(s8, s24);
+ __ Frintz(s9, s25);
+ __ Frintz(s10, s26);
+
+ __ Fmov(d16, 1.0);
+ __ Fmov(d17, 1.1);
+ __ Fmov(d18, 1.5);
+ __ Fmov(d19, 1.9);
+ __ Fmov(d20, 2.5);
+ __ Fmov(d21, -1.5);
+ __ Fmov(d22, -2.5);
+ __ Fmov(d23, kFP32PositiveInfinity);
+ __ Fmov(d24, kFP32NegativeInfinity);
+ __ Fmov(d25, 0.0);
+ __ Fmov(d26, -0.0);
+
+ __ Frintz(d11, d16);
+ __ Frintz(d12, d17);
+ __ Frintz(d13, d18);
+ __ Frintz(d14, d19);
+ __ Frintz(d15, d20);
+ __ Frintz(d16, d21);
+ __ Frintz(d17, d22);
+ __ Frintz(d18, d23);
+ __ Frintz(d19, d24);
+ __ Frintz(d20, d25);
+ __ Frintz(d21, d26);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_FP32(1.0, s0);
+ ASSERT_EQUAL_FP32(1.0, s1);
+ ASSERT_EQUAL_FP32(1.0, s2);
+ ASSERT_EQUAL_FP32(1.0, s3);
+ ASSERT_EQUAL_FP32(2.0, s4);
+ ASSERT_EQUAL_FP32(-1.0, s5);
+ ASSERT_EQUAL_FP32(-2.0, s6);
+ ASSERT_EQUAL_FP32(kFP32PositiveInfinity, s7);
+ ASSERT_EQUAL_FP32(kFP32NegativeInfinity, s8);
+ ASSERT_EQUAL_FP32(0.0, s9);
+ ASSERT_EQUAL_FP32(-0.0, s10);
+ ASSERT_EQUAL_FP64(1.0, d11);
+ ASSERT_EQUAL_FP64(1.0, d12);
+ ASSERT_EQUAL_FP64(1.0, d13);
+ ASSERT_EQUAL_FP64(1.0, d14);
+ ASSERT_EQUAL_FP64(2.0, d15);
+ ASSERT_EQUAL_FP64(-1.0, d16);
+ ASSERT_EQUAL_FP64(-2.0, d17);
+ ASSERT_EQUAL_FP64(kFP64PositiveInfinity, d18);
+ ASSERT_EQUAL_FP64(kFP64NegativeInfinity, d19);
+ ASSERT_EQUAL_FP64(0.0, d20);
+ ASSERT_EQUAL_FP64(-0.0, d21);
+
+ TEARDOWN();
+}
+
+
+TEST(fcvt_ds) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Fmov(s16, 1.0);
+ __ Fmov(s17, 1.1);
+ __ Fmov(s18, 1.5);
+ __ Fmov(s19, 1.9);
+ __ Fmov(s20, 2.5);
+ __ Fmov(s21, -1.5);
+ __ Fmov(s22, -2.5);
+ __ Fmov(s23, kFP32PositiveInfinity);
+ __ Fmov(s24, kFP32NegativeInfinity);
+ __ Fmov(s25, 0.0);
+ __ Fmov(s26, -0.0);
+ __ Fmov(s27, FLT_MAX);
+ __ Fmov(s28, FLT_MIN);
+ __ Fmov(s29, rawbits_to_float(0x7fc12345)); // Quiet NaN.
+ __ Fmov(s30, rawbits_to_float(0x7f812345)); // Signalling NaN.
+
+ __ Fcvt(d0, s16);
+ __ Fcvt(d1, s17);
+ __ Fcvt(d2, s18);
+ __ Fcvt(d3, s19);
+ __ Fcvt(d4, s20);
+ __ Fcvt(d5, s21);
+ __ Fcvt(d6, s22);
+ __ Fcvt(d7, s23);
+ __ Fcvt(d8, s24);
+ __ Fcvt(d9, s25);
+ __ Fcvt(d10, s26);
+ __ Fcvt(d11, s27);
+ __ Fcvt(d12, s28);
+ __ Fcvt(d13, s29);
+ __ Fcvt(d14, s30);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_FP64(1.0f, d0);
+ ASSERT_EQUAL_FP64(1.1f, d1);
+ ASSERT_EQUAL_FP64(1.5f, d2);
+ ASSERT_EQUAL_FP64(1.9f, d3);
+ ASSERT_EQUAL_FP64(2.5f, d4);
+ ASSERT_EQUAL_FP64(-1.5f, d5);
+ ASSERT_EQUAL_FP64(-2.5f, d6);
+ ASSERT_EQUAL_FP64(kFP64PositiveInfinity, d7);
+ ASSERT_EQUAL_FP64(kFP64NegativeInfinity, d8);
+ ASSERT_EQUAL_FP64(0.0f, d9);
+ ASSERT_EQUAL_FP64(-0.0f, d10);
+ ASSERT_EQUAL_FP64(FLT_MAX, d11);
+ ASSERT_EQUAL_FP64(FLT_MIN, d12);
+
+ // Check that the NaN payload is preserved according to A64 conversion rules:
+ // - The sign bit is preserved.
+ // - The top bit of the mantissa is forced to 1 (making it a quiet NaN).
+ // - The remaining mantissa bits are copied until they run out.
+ // - The low-order bits that haven't already been assigned are set to 0.
+ ASSERT_EQUAL_FP64(rawbits_to_double(0x7ff82468a0000000), d13);
+ ASSERT_EQUAL_FP64(rawbits_to_double(0x7ff82468a0000000), d14);
+
+ TEARDOWN();
+}
+
+
+TEST(fcvt_sd) {
+ INIT_V8();
+ // There are a huge number of corner-cases to check, so this test iterates
+ // through a list. The list is then negated and checked again (since the sign
+ // is irrelevant in ties-to-even rounding), so the list shouldn't include any
+ // negative values.
+ //
+ // Note that this test only checks ties-to-even rounding, because that is all
+ // that the simulator supports.
+ struct {double in; float expected;} test[] = {
+ // Check some simple conversions.
+ {0.0, 0.0f},
+ {1.0, 1.0f},
+ {1.5, 1.5f},
+ {2.0, 2.0f},
+ {FLT_MAX, FLT_MAX},
+ // - The smallest normalized float.
+ {pow(2.0, -126), powf(2, -126)},
+ // - Normal floats that need (ties-to-even) rounding.
+ // For normalized numbers:
+ // bit 29 (0x0000000020000000) is the lowest-order bit which will
+ // fit in the float's mantissa.
+ {rawbits_to_double(0x3ff0000000000000), rawbits_to_float(0x3f800000)},
+ {rawbits_to_double(0x3ff0000000000001), rawbits_to_float(0x3f800000)},
+ {rawbits_to_double(0x3ff0000010000000), rawbits_to_float(0x3f800000)},
+ {rawbits_to_double(0x3ff0000010000001), rawbits_to_float(0x3f800001)},
+ {rawbits_to_double(0x3ff0000020000000), rawbits_to_float(0x3f800001)},
+ {rawbits_to_double(0x3ff0000020000001), rawbits_to_float(0x3f800001)},
+ {rawbits_to_double(0x3ff0000030000000), rawbits_to_float(0x3f800002)},
+ {rawbits_to_double(0x3ff0000030000001), rawbits_to_float(0x3f800002)},
+ {rawbits_to_double(0x3ff0000040000000), rawbits_to_float(0x3f800002)},
+ {rawbits_to_double(0x3ff0000040000001), rawbits_to_float(0x3f800002)},
+ {rawbits_to_double(0x3ff0000050000000), rawbits_to_float(0x3f800002)},
+ {rawbits_to_double(0x3ff0000050000001), rawbits_to_float(0x3f800003)},
+ {rawbits_to_double(0x3ff0000060000000), rawbits_to_float(0x3f800003)},
+ // - A mantissa that overflows into the exponent during rounding.
+ {rawbits_to_double(0x3feffffff0000000), rawbits_to_float(0x3f800000)},
+ // - The largest double that rounds to a normal float.
+ {rawbits_to_double(0x47efffffefffffff), rawbits_to_float(0x7f7fffff)},
+
+ // Doubles that are too big for a float.
+ {kFP64PositiveInfinity, kFP32PositiveInfinity},
+ {DBL_MAX, kFP32PositiveInfinity},
+ // - The smallest exponent that's too big for a float.
+ {pow(2.0, 128), kFP32PositiveInfinity},
+ // - This exponent is in range, but the value rounds to infinity.
+ {rawbits_to_double(0x47effffff0000000), kFP32PositiveInfinity},
+
+ // Doubles that are too small for a float.
+ // - The smallest (subnormal) double.
+ {DBL_MIN, 0.0},
+ // - The largest double which is too small for a subnormal float.
+ {rawbits_to_double(0x3690000000000000), rawbits_to_float(0x00000000)},
+
+ // Normal doubles that become subnormal floats.
+ // - The largest subnormal float.
+ {rawbits_to_double(0x380fffffc0000000), rawbits_to_float(0x007fffff)},
+ // - The smallest subnormal float.
+ {rawbits_to_double(0x36a0000000000000), rawbits_to_float(0x00000001)},
+ // - Subnormal floats that need (ties-to-even) rounding.
+ // For these subnormals:
+ // bit 34 (0x0000000400000000) is the lowest-order bit which will
+ // fit in the float's mantissa.
+ {rawbits_to_double(0x37c159e000000000), rawbits_to_float(0x00045678)},
+ {rawbits_to_double(0x37c159e000000001), rawbits_to_float(0x00045678)},
+ {rawbits_to_double(0x37c159e200000000), rawbits_to_float(0x00045678)},
+ {rawbits_to_double(0x37c159e200000001), rawbits_to_float(0x00045679)},
+ {rawbits_to_double(0x37c159e400000000), rawbits_to_float(0x00045679)},
+ {rawbits_to_double(0x37c159e400000001), rawbits_to_float(0x00045679)},
+ {rawbits_to_double(0x37c159e600000000), rawbits_to_float(0x0004567a)},
+ {rawbits_to_double(0x37c159e600000001), rawbits_to_float(0x0004567a)},
+ {rawbits_to_double(0x37c159e800000000), rawbits_to_float(0x0004567a)},
+ {rawbits_to_double(0x37c159e800000001), rawbits_to_float(0x0004567a)},
+ {rawbits_to_double(0x37c159ea00000000), rawbits_to_float(0x0004567a)},
+ {rawbits_to_double(0x37c159ea00000001), rawbits_to_float(0x0004567b)},
+ {rawbits_to_double(0x37c159ec00000000), rawbits_to_float(0x0004567b)},
+ // - The smallest double which rounds up to become a subnormal float.
+ {rawbits_to_double(0x3690000000000001), rawbits_to_float(0x00000001)},
+
+ // Check NaN payload preservation.
+ {rawbits_to_double(0x7ff82468a0000000), rawbits_to_float(0x7fc12345)},
+ {rawbits_to_double(0x7ff82468bfffffff), rawbits_to_float(0x7fc12345)},
+ // - Signalling NaNs become quiet NaNs.
+ {rawbits_to_double(0x7ff02468a0000000), rawbits_to_float(0x7fc12345)},
+ {rawbits_to_double(0x7ff02468bfffffff), rawbits_to_float(0x7fc12345)},
+ {rawbits_to_double(0x7ff000001fffffff), rawbits_to_float(0x7fc00000)},
+ };
+ int count = sizeof(test) / sizeof(test[0]);
+
+ for (int i = 0; i < count; i++) {
+ double in = test[i].in;
+ float expected = test[i].expected;
+
+ // We only expect positive input.
+ ASSERT(std::signbit(in) == 0);
+ ASSERT(std::signbit(expected) == 0);
+
+ SETUP();
+ START();
+
+ __ Fmov(d10, in);
+ __ Fcvt(s20, d10);
+
+ __ Fmov(d11, -in);
+ __ Fcvt(s21, d11);
+
+ END();
+ RUN();
+ ASSERT_EQUAL_FP32(expected, s20);
+ ASSERT_EQUAL_FP32(-expected, s21);
+ TEARDOWN();
+ }
+}
+
+
+TEST(fcvtas) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Fmov(s0, 1.0);
+ __ Fmov(s1, 1.1);
+ __ Fmov(s2, 2.5);
+ __ Fmov(s3, -2.5);
+ __ Fmov(s4, kFP32PositiveInfinity);
+ __ Fmov(s5, kFP32NegativeInfinity);
+ __ Fmov(s6, 0x7fffff80); // Largest float < INT32_MAX.
+ __ Fneg(s7, s6); // Smallest float > INT32_MIN.
+ __ Fmov(d8, 1.0);
+ __ Fmov(d9, 1.1);
+ __ Fmov(d10, 2.5);
+ __ Fmov(d11, -2.5);
+ __ Fmov(d12, kFP64PositiveInfinity);
+ __ Fmov(d13, kFP64NegativeInfinity);
+ __ Fmov(d14, kWMaxInt - 1);
+ __ Fmov(d15, kWMinInt + 1);
+ __ Fmov(s17, 1.1);
+ __ Fmov(s18, 2.5);
+ __ Fmov(s19, -2.5);
+ __ Fmov(s20, kFP32PositiveInfinity);
+ __ Fmov(s21, kFP32NegativeInfinity);
+ __ Fmov(s22, 0x7fffff8000000000UL); // Largest float < INT64_MAX.
+ __ Fneg(s23, s22); // Smallest float > INT64_MIN.
+ __ Fmov(d24, 1.1);
+ __ Fmov(d25, 2.5);
+ __ Fmov(d26, -2.5);
+ __ Fmov(d27, kFP64PositiveInfinity);
+ __ Fmov(d28, kFP64NegativeInfinity);
+ __ Fmov(d29, 0x7ffffffffffffc00UL); // Largest double < INT64_MAX.
+ __ Fneg(d30, d29); // Smallest double > INT64_MIN.
+
+ __ Fcvtas(w0, s0);
+ __ Fcvtas(w1, s1);
+ __ Fcvtas(w2, s2);
+ __ Fcvtas(w3, s3);
+ __ Fcvtas(w4, s4);
+ __ Fcvtas(w5, s5);
+ __ Fcvtas(w6, s6);
+ __ Fcvtas(w7, s7);
+ __ Fcvtas(w8, d8);
+ __ Fcvtas(w9, d9);
+ __ Fcvtas(w10, d10);
+ __ Fcvtas(w11, d11);
+ __ Fcvtas(w12, d12);
+ __ Fcvtas(w13, d13);
+ __ Fcvtas(w14, d14);
+ __ Fcvtas(w15, d15);
+ __ Fcvtas(x17, s17);
+ __ Fcvtas(x18, s18);
+ __ Fcvtas(x19, s19);
+ __ Fcvtas(x20, s20);
+ __ Fcvtas(x21, s21);
+ __ Fcvtas(x22, s22);
+ __ Fcvtas(x23, s23);
+ __ Fcvtas(x24, d24);
+ __ Fcvtas(x25, d25);
+ __ Fcvtas(x26, d26);
+ __ Fcvtas(x27, d27);
+ __ Fcvtas(x28, d28);
+ __ Fcvtas(x29, d29);
+ __ Fcvtas(x30, d30);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(1, x0);
+ ASSERT_EQUAL_64(1, x1);
+ ASSERT_EQUAL_64(3, x2);
+ ASSERT_EQUAL_64(0xfffffffd, x3);
+ ASSERT_EQUAL_64(0x7fffffff, x4);
+ ASSERT_EQUAL_64(0x80000000, x5);
+ ASSERT_EQUAL_64(0x7fffff80, x6);
+ ASSERT_EQUAL_64(0x80000080, x7);
+ ASSERT_EQUAL_64(1, x8);
+ ASSERT_EQUAL_64(1, x9);
+ ASSERT_EQUAL_64(3, x10);
+ ASSERT_EQUAL_64(0xfffffffd, x11);
+ ASSERT_EQUAL_64(0x7fffffff, x12);
+ ASSERT_EQUAL_64(0x80000000, x13);
+ ASSERT_EQUAL_64(0x7ffffffe, x14);
+ ASSERT_EQUAL_64(0x80000001, x15);
+ ASSERT_EQUAL_64(1, x17);
+ ASSERT_EQUAL_64(3, x18);
+ ASSERT_EQUAL_64(0xfffffffffffffffdUL, x19);
+ ASSERT_EQUAL_64(0x7fffffffffffffffUL, x20);
+ ASSERT_EQUAL_64(0x8000000000000000UL, x21);
+ ASSERT_EQUAL_64(0x7fffff8000000000UL, x22);
+ ASSERT_EQUAL_64(0x8000008000000000UL, x23);
+ ASSERT_EQUAL_64(1, x24);
+ ASSERT_EQUAL_64(3, x25);
+ ASSERT_EQUAL_64(0xfffffffffffffffdUL, x26);
+ ASSERT_EQUAL_64(0x7fffffffffffffffUL, x27);
+ ASSERT_EQUAL_64(0x8000000000000000UL, x28);
+ ASSERT_EQUAL_64(0x7ffffffffffffc00UL, x29);
+ ASSERT_EQUAL_64(0x8000000000000400UL, x30);
+
+ TEARDOWN();
+}
+
+
+TEST(fcvtau) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Fmov(s0, 1.0);
+ __ Fmov(s1, 1.1);
+ __ Fmov(s2, 2.5);
+ __ Fmov(s3, -2.5);
+ __ Fmov(s4, kFP32PositiveInfinity);
+ __ Fmov(s5, kFP32NegativeInfinity);
+ __ Fmov(s6, 0xffffff00); // Largest float < UINT32_MAX.
+ __ Fmov(d8, 1.0);
+ __ Fmov(d9, 1.1);
+ __ Fmov(d10, 2.5);
+ __ Fmov(d11, -2.5);
+ __ Fmov(d12, kFP64PositiveInfinity);
+ __ Fmov(d13, kFP64NegativeInfinity);
+ __ Fmov(d14, 0xfffffffe);
+ __ Fmov(s16, 1.0);
+ __ Fmov(s17, 1.1);
+ __ Fmov(s18, 2.5);
+ __ Fmov(s19, -2.5);
+ __ Fmov(s20, kFP32PositiveInfinity);
+ __ Fmov(s21, kFP32NegativeInfinity);
+ __ Fmov(s22, 0xffffff0000000000UL); // Largest float < UINT64_MAX.
+ __ Fmov(d24, 1.1);
+ __ Fmov(d25, 2.5);
+ __ Fmov(d26, -2.5);
+ __ Fmov(d27, kFP64PositiveInfinity);
+ __ Fmov(d28, kFP64NegativeInfinity);
+ __ Fmov(d29, 0xfffffffffffff800UL); // Largest double < UINT64_MAX.
+ __ Fmov(s30, 0x100000000UL);
+
+ __ Fcvtau(w0, s0);
+ __ Fcvtau(w1, s1);
+ __ Fcvtau(w2, s2);
+ __ Fcvtau(w3, s3);
+ __ Fcvtau(w4, s4);
+ __ Fcvtau(w5, s5);
+ __ Fcvtau(w6, s6);
+ __ Fcvtau(w8, d8);
+ __ Fcvtau(w9, d9);
+ __ Fcvtau(w10, d10);
+ __ Fcvtau(w11, d11);
+ __ Fcvtau(w12, d12);
+ __ Fcvtau(w13, d13);
+ __ Fcvtau(w14, d14);
+ __ Fcvtau(w15, d15);
+ __ Fcvtau(x16, s16);
+ __ Fcvtau(x17, s17);
+ __ Fcvtau(x18, s18);
+ __ Fcvtau(x19, s19);
+ __ Fcvtau(x20, s20);
+ __ Fcvtau(x21, s21);
+ __ Fcvtau(x22, s22);
+ __ Fcvtau(x24, d24);
+ __ Fcvtau(x25, d25);
+ __ Fcvtau(x26, d26);
+ __ Fcvtau(x27, d27);
+ __ Fcvtau(x28, d28);
+ __ Fcvtau(x29, d29);
+ __ Fcvtau(w30, s30);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(1, x0);
+ ASSERT_EQUAL_64(1, x1);
+ ASSERT_EQUAL_64(3, x2);
+ ASSERT_EQUAL_64(0, x3);
+ ASSERT_EQUAL_64(0xffffffff, x4);
+ ASSERT_EQUAL_64(0, x5);
+ ASSERT_EQUAL_64(0xffffff00, x6);
+ ASSERT_EQUAL_64(1, x8);
+ ASSERT_EQUAL_64(1, x9);
+ ASSERT_EQUAL_64(3, x10);
+ ASSERT_EQUAL_64(0, x11);
+ ASSERT_EQUAL_64(0xffffffff, x12);
+ ASSERT_EQUAL_64(0, x13);
+ ASSERT_EQUAL_64(0xfffffffe, x14);
+ ASSERT_EQUAL_64(1, x16);
+ ASSERT_EQUAL_64(1, x17);
+ ASSERT_EQUAL_64(3, x18);
+ ASSERT_EQUAL_64(0, x19);
+ ASSERT_EQUAL_64(0xffffffffffffffffUL, x20);
+ ASSERT_EQUAL_64(0, x21);
+ ASSERT_EQUAL_64(0xffffff0000000000UL, x22);
+ ASSERT_EQUAL_64(1, x24);
+ ASSERT_EQUAL_64(3, x25);
+ ASSERT_EQUAL_64(0, x26);
+ ASSERT_EQUAL_64(0xffffffffffffffffUL, x27);
+ ASSERT_EQUAL_64(0, x28);
+ ASSERT_EQUAL_64(0xfffffffffffff800UL, x29);
+ ASSERT_EQUAL_64(0xffffffff, x30);
+
+ TEARDOWN();
+}
+
+
+TEST(fcvtms) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Fmov(s0, 1.0);
+ __ Fmov(s1, 1.1);
+ __ Fmov(s2, 1.5);
+ __ Fmov(s3, -1.5);
+ __ Fmov(s4, kFP32PositiveInfinity);
+ __ Fmov(s5, kFP32NegativeInfinity);
+ __ Fmov(s6, 0x7fffff80); // Largest float < INT32_MAX.
+ __ Fneg(s7, s6); // Smallest float > INT32_MIN.
+ __ Fmov(d8, 1.0);
+ __ Fmov(d9, 1.1);
+ __ Fmov(d10, 1.5);
+ __ Fmov(d11, -1.5);
+ __ Fmov(d12, kFP64PositiveInfinity);
+ __ Fmov(d13, kFP64NegativeInfinity);
+ __ Fmov(d14, kWMaxInt - 1);
+ __ Fmov(d15, kWMinInt + 1);
+ __ Fmov(s17, 1.1);
+ __ Fmov(s18, 1.5);
+ __ Fmov(s19, -1.5);
+ __ Fmov(s20, kFP32PositiveInfinity);
+ __ Fmov(s21, kFP32NegativeInfinity);
+ __ Fmov(s22, 0x7fffff8000000000UL); // Largest float < INT64_MAX.
+ __ Fneg(s23, s22); // Smallest float > INT64_MIN.
+ __ Fmov(d24, 1.1);
+ __ Fmov(d25, 1.5);
+ __ Fmov(d26, -1.5);
+ __ Fmov(d27, kFP64PositiveInfinity);
+ __ Fmov(d28, kFP64NegativeInfinity);
+ __ Fmov(d29, 0x7ffffffffffffc00UL); // Largest double < INT64_MAX.
+ __ Fneg(d30, d29); // Smallest double > INT64_MIN.
+
+ __ Fcvtms(w0, s0);
+ __ Fcvtms(w1, s1);
+ __ Fcvtms(w2, s2);
+ __ Fcvtms(w3, s3);
+ __ Fcvtms(w4, s4);
+ __ Fcvtms(w5, s5);
+ __ Fcvtms(w6, s6);
+ __ Fcvtms(w7, s7);
+ __ Fcvtms(w8, d8);
+ __ Fcvtms(w9, d9);
+ __ Fcvtms(w10, d10);
+ __ Fcvtms(w11, d11);
+ __ Fcvtms(w12, d12);
+ __ Fcvtms(w13, d13);
+ __ Fcvtms(w14, d14);
+ __ Fcvtms(w15, d15);
+ __ Fcvtms(x17, s17);
+ __ Fcvtms(x18, s18);
+ __ Fcvtms(x19, s19);
+ __ Fcvtms(x20, s20);
+ __ Fcvtms(x21, s21);
+ __ Fcvtms(x22, s22);
+ __ Fcvtms(x23, s23);
+ __ Fcvtms(x24, d24);
+ __ Fcvtms(x25, d25);
+ __ Fcvtms(x26, d26);
+ __ Fcvtms(x27, d27);
+ __ Fcvtms(x28, d28);
+ __ Fcvtms(x29, d29);
+ __ Fcvtms(x30, d30);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(1, x0);
+ ASSERT_EQUAL_64(1, x1);
+ ASSERT_EQUAL_64(1, x2);
+ ASSERT_EQUAL_64(0xfffffffe, x3);
+ ASSERT_EQUAL_64(0x7fffffff, x4);
+ ASSERT_EQUAL_64(0x80000000, x5);
+ ASSERT_EQUAL_64(0x7fffff80, x6);
+ ASSERT_EQUAL_64(0x80000080, x7);
+ ASSERT_EQUAL_64(1, x8);
+ ASSERT_EQUAL_64(1, x9);
+ ASSERT_EQUAL_64(1, x10);
+ ASSERT_EQUAL_64(0xfffffffe, x11);
+ ASSERT_EQUAL_64(0x7fffffff, x12);
+ ASSERT_EQUAL_64(0x80000000, x13);
+ ASSERT_EQUAL_64(0x7ffffffe, x14);
+ ASSERT_EQUAL_64(0x80000001, x15);
+ ASSERT_EQUAL_64(1, x17);
+ ASSERT_EQUAL_64(1, x18);
+ ASSERT_EQUAL_64(0xfffffffffffffffeUL, x19);
+ ASSERT_EQUAL_64(0x7fffffffffffffffUL, x20);
+ ASSERT_EQUAL_64(0x8000000000000000UL, x21);
+ ASSERT_EQUAL_64(0x7fffff8000000000UL, x22);
+ ASSERT_EQUAL_64(0x8000008000000000UL, x23);
+ ASSERT_EQUAL_64(1, x24);
+ ASSERT_EQUAL_64(1, x25);
+ ASSERT_EQUAL_64(0xfffffffffffffffeUL, x26);
+ ASSERT_EQUAL_64(0x7fffffffffffffffUL, x27);
+ ASSERT_EQUAL_64(0x8000000000000000UL, x28);
+ ASSERT_EQUAL_64(0x7ffffffffffffc00UL, x29);
+ ASSERT_EQUAL_64(0x8000000000000400UL, x30);
+
+ TEARDOWN();
+}
+
+
+TEST(fcvtmu) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Fmov(s0, 1.0);
+ __ Fmov(s1, 1.1);
+ __ Fmov(s2, 1.5);
+ __ Fmov(s3, -1.5);
+ __ Fmov(s4, kFP32PositiveInfinity);
+ __ Fmov(s5, kFP32NegativeInfinity);
+ __ Fmov(s6, 0x7fffff80); // Largest float < INT32_MAX.
+ __ Fneg(s7, s6); // Smallest float > INT32_MIN.
+ __ Fmov(d8, 1.0);
+ __ Fmov(d9, 1.1);
+ __ Fmov(d10, 1.5);
+ __ Fmov(d11, -1.5);
+ __ Fmov(d12, kFP64PositiveInfinity);
+ __ Fmov(d13, kFP64NegativeInfinity);
+ __ Fmov(d14, kWMaxInt - 1);
+ __ Fmov(d15, kWMinInt + 1);
+ __ Fmov(s17, 1.1);
+ __ Fmov(s18, 1.5);
+ __ Fmov(s19, -1.5);
+ __ Fmov(s20, kFP32PositiveInfinity);
+ __ Fmov(s21, kFP32NegativeInfinity);
+ __ Fmov(s22, 0x7fffff8000000000UL); // Largest float < INT64_MAX.
+ __ Fneg(s23, s22); // Smallest float > INT64_MIN.
+ __ Fmov(d24, 1.1);
+ __ Fmov(d25, 1.5);
+ __ Fmov(d26, -1.5);
+ __ Fmov(d27, kFP64PositiveInfinity);
+ __ Fmov(d28, kFP64NegativeInfinity);
+ __ Fmov(d29, 0x7ffffffffffffc00UL); // Largest double < INT64_MAX.
+ __ Fneg(d30, d29); // Smallest double > INT64_MIN.
+
+ __ Fcvtmu(w0, s0);
+ __ Fcvtmu(w1, s1);
+ __ Fcvtmu(w2, s2);
+ __ Fcvtmu(w3, s3);
+ __ Fcvtmu(w4, s4);
+ __ Fcvtmu(w5, s5);
+ __ Fcvtmu(w6, s6);
+ __ Fcvtmu(w7, s7);
+ __ Fcvtmu(w8, d8);
+ __ Fcvtmu(w9, d9);
+ __ Fcvtmu(w10, d10);
+ __ Fcvtmu(w11, d11);
+ __ Fcvtmu(w12, d12);
+ __ Fcvtmu(w13, d13);
+ __ Fcvtmu(w14, d14);
+ __ Fcvtmu(x17, s17);
+ __ Fcvtmu(x18, s18);
+ __ Fcvtmu(x19, s19);
+ __ Fcvtmu(x20, s20);
+ __ Fcvtmu(x21, s21);
+ __ Fcvtmu(x22, s22);
+ __ Fcvtmu(x23, s23);
+ __ Fcvtmu(x24, d24);
+ __ Fcvtmu(x25, d25);
+ __ Fcvtmu(x26, d26);
+ __ Fcvtmu(x27, d27);
+ __ Fcvtmu(x28, d28);
+ __ Fcvtmu(x29, d29);
+ __ Fcvtmu(x30, d30);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(1, x0);
+ ASSERT_EQUAL_64(1, x1);
+ ASSERT_EQUAL_64(1, x2);
+ ASSERT_EQUAL_64(0, x3);
+ ASSERT_EQUAL_64(0xffffffff, x4);
+ ASSERT_EQUAL_64(0, x5);
+ ASSERT_EQUAL_64(0x7fffff80, x6);
+ ASSERT_EQUAL_64(0, x7);
+ ASSERT_EQUAL_64(1, x8);
+ ASSERT_EQUAL_64(1, x9);
+ ASSERT_EQUAL_64(1, x10);
+ ASSERT_EQUAL_64(0, x11);
+ ASSERT_EQUAL_64(0xffffffff, x12);
+ ASSERT_EQUAL_64(0, x13);
+ ASSERT_EQUAL_64(0x7ffffffe, x14);
+ ASSERT_EQUAL_64(1, x17);
+ ASSERT_EQUAL_64(1, x18);
+ ASSERT_EQUAL_64(0x0UL, x19);
+ ASSERT_EQUAL_64(0xffffffffffffffffUL, x20);
+ ASSERT_EQUAL_64(0x0UL, x21);
+ ASSERT_EQUAL_64(0x7fffff8000000000UL, x22);
+ ASSERT_EQUAL_64(0x0UL, x23);
+ ASSERT_EQUAL_64(1, x24);
+ ASSERT_EQUAL_64(1, x25);
+ ASSERT_EQUAL_64(0x0UL, x26);
+ ASSERT_EQUAL_64(0xffffffffffffffffUL, x27);
+ ASSERT_EQUAL_64(0x0UL, x28);
+ ASSERT_EQUAL_64(0x7ffffffffffffc00UL, x29);
+ ASSERT_EQUAL_64(0x0UL, x30);
+
+ TEARDOWN();
+}
+
+
+TEST(fcvtns) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Fmov(s0, 1.0);
+ __ Fmov(s1, 1.1);
+ __ Fmov(s2, 1.5);
+ __ Fmov(s3, -1.5);
+ __ Fmov(s4, kFP32PositiveInfinity);
+ __ Fmov(s5, kFP32NegativeInfinity);
+ __ Fmov(s6, 0x7fffff80); // Largest float < INT32_MAX.
+ __ Fneg(s7, s6); // Smallest float > INT32_MIN.
+ __ Fmov(d8, 1.0);
+ __ Fmov(d9, 1.1);
+ __ Fmov(d10, 1.5);
+ __ Fmov(d11, -1.5);
+ __ Fmov(d12, kFP64PositiveInfinity);
+ __ Fmov(d13, kFP64NegativeInfinity);
+ __ Fmov(d14, kWMaxInt - 1);
+ __ Fmov(d15, kWMinInt + 1);
+ __ Fmov(s17, 1.1);
+ __ Fmov(s18, 1.5);
+ __ Fmov(s19, -1.5);
+ __ Fmov(s20, kFP32PositiveInfinity);
+ __ Fmov(s21, kFP32NegativeInfinity);
+ __ Fmov(s22, 0x7fffff8000000000UL); // Largest float < INT64_MAX.
+ __ Fneg(s23, s22); // Smallest float > INT64_MIN.
+ __ Fmov(d24, 1.1);
+ __ Fmov(d25, 1.5);
+ __ Fmov(d26, -1.5);
+ __ Fmov(d27, kFP64PositiveInfinity);
+ __ Fmov(d28, kFP64NegativeInfinity);
+ __ Fmov(d29, 0x7ffffffffffffc00UL); // Largest double < INT64_MAX.
+ __ Fneg(d30, d29); // Smallest double > INT64_MIN.
+
+ __ Fcvtns(w0, s0);
+ __ Fcvtns(w1, s1);
+ __ Fcvtns(w2, s2);
+ __ Fcvtns(w3, s3);
+ __ Fcvtns(w4, s4);
+ __ Fcvtns(w5, s5);
+ __ Fcvtns(w6, s6);
+ __ Fcvtns(w7, s7);
+ __ Fcvtns(w8, d8);
+ __ Fcvtns(w9, d9);
+ __ Fcvtns(w10, d10);
+ __ Fcvtns(w11, d11);
+ __ Fcvtns(w12, d12);
+ __ Fcvtns(w13, d13);
+ __ Fcvtns(w14, d14);
+ __ Fcvtns(w15, d15);
+ __ Fcvtns(x17, s17);
+ __ Fcvtns(x18, s18);
+ __ Fcvtns(x19, s19);
+ __ Fcvtns(x20, s20);
+ __ Fcvtns(x21, s21);
+ __ Fcvtns(x22, s22);
+ __ Fcvtns(x23, s23);
+ __ Fcvtns(x24, d24);
+ __ Fcvtns(x25, d25);
+ __ Fcvtns(x26, d26);
+ __ Fcvtns(x27, d27);
+// __ Fcvtns(x28, d28);
+ __ Fcvtns(x29, d29);
+ __ Fcvtns(x30, d30);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(1, x0);
+ ASSERT_EQUAL_64(1, x1);
+ ASSERT_EQUAL_64(2, x2);
+ ASSERT_EQUAL_64(0xfffffffe, x3);
+ ASSERT_EQUAL_64(0x7fffffff, x4);
+ ASSERT_EQUAL_64(0x80000000, x5);
+ ASSERT_EQUAL_64(0x7fffff80, x6);
+ ASSERT_EQUAL_64(0x80000080, x7);
+ ASSERT_EQUAL_64(1, x8);
+ ASSERT_EQUAL_64(1, x9);
+ ASSERT_EQUAL_64(2, x10);
+ ASSERT_EQUAL_64(0xfffffffe, x11);
+ ASSERT_EQUAL_64(0x7fffffff, x12);
+ ASSERT_EQUAL_64(0x80000000, x13);
+ ASSERT_EQUAL_64(0x7ffffffe, x14);
+ ASSERT_EQUAL_64(0x80000001, x15);
+ ASSERT_EQUAL_64(1, x17);
+ ASSERT_EQUAL_64(2, x18);
+ ASSERT_EQUAL_64(0xfffffffffffffffeUL, x19);
+ ASSERT_EQUAL_64(0x7fffffffffffffffUL, x20);
+ ASSERT_EQUAL_64(0x8000000000000000UL, x21);
+ ASSERT_EQUAL_64(0x7fffff8000000000UL, x22);
+ ASSERT_EQUAL_64(0x8000008000000000UL, x23);
+ ASSERT_EQUAL_64(1, x24);
+ ASSERT_EQUAL_64(2, x25);
+ ASSERT_EQUAL_64(0xfffffffffffffffeUL, x26);
+ ASSERT_EQUAL_64(0x7fffffffffffffffUL, x27);
+// ASSERT_EQUAL_64(0x8000000000000000UL, x28);
+ ASSERT_EQUAL_64(0x7ffffffffffffc00UL, x29);
+ ASSERT_EQUAL_64(0x8000000000000400UL, x30);
+
+ TEARDOWN();
+}
+
+
+TEST(fcvtnu) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Fmov(s0, 1.0);
+ __ Fmov(s1, 1.1);
+ __ Fmov(s2, 1.5);
+ __ Fmov(s3, -1.5);
+ __ Fmov(s4, kFP32PositiveInfinity);
+ __ Fmov(s5, kFP32NegativeInfinity);
+ __ Fmov(s6, 0xffffff00); // Largest float < UINT32_MAX.
+ __ Fmov(d8, 1.0);
+ __ Fmov(d9, 1.1);
+ __ Fmov(d10, 1.5);
+ __ Fmov(d11, -1.5);
+ __ Fmov(d12, kFP64PositiveInfinity);
+ __ Fmov(d13, kFP64NegativeInfinity);
+ __ Fmov(d14, 0xfffffffe);
+ __ Fmov(s16, 1.0);
+ __ Fmov(s17, 1.1);
+ __ Fmov(s18, 1.5);
+ __ Fmov(s19, -1.5);
+ __ Fmov(s20, kFP32PositiveInfinity);
+ __ Fmov(s21, kFP32NegativeInfinity);
+ __ Fmov(s22, 0xffffff0000000000UL); // Largest float < UINT64_MAX.
+ __ Fmov(d24, 1.1);
+ __ Fmov(d25, 1.5);
+ __ Fmov(d26, -1.5);
+ __ Fmov(d27, kFP64PositiveInfinity);
+ __ Fmov(d28, kFP64NegativeInfinity);
+ __ Fmov(d29, 0xfffffffffffff800UL); // Largest double < UINT64_MAX.
+ __ Fmov(s30, 0x100000000UL);
+
+ __ Fcvtnu(w0, s0);
+ __ Fcvtnu(w1, s1);
+ __ Fcvtnu(w2, s2);
+ __ Fcvtnu(w3, s3);
+ __ Fcvtnu(w4, s4);
+ __ Fcvtnu(w5, s5);
+ __ Fcvtnu(w6, s6);
+ __ Fcvtnu(w8, d8);
+ __ Fcvtnu(w9, d9);
+ __ Fcvtnu(w10, d10);
+ __ Fcvtnu(w11, d11);
+ __ Fcvtnu(w12, d12);
+ __ Fcvtnu(w13, d13);
+ __ Fcvtnu(w14, d14);
+ __ Fcvtnu(w15, d15);
+ __ Fcvtnu(x16, s16);
+ __ Fcvtnu(x17, s17);
+ __ Fcvtnu(x18, s18);
+ __ Fcvtnu(x19, s19);
+ __ Fcvtnu(x20, s20);
+ __ Fcvtnu(x21, s21);
+ __ Fcvtnu(x22, s22);
+ __ Fcvtnu(x24, d24);
+ __ Fcvtnu(x25, d25);
+ __ Fcvtnu(x26, d26);
+ __ Fcvtnu(x27, d27);
+// __ Fcvtnu(x28, d28);
+ __ Fcvtnu(x29, d29);
+ __ Fcvtnu(w30, s30);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(1, x0);
+ ASSERT_EQUAL_64(1, x1);
+ ASSERT_EQUAL_64(2, x2);
+ ASSERT_EQUAL_64(0, x3);
+ ASSERT_EQUAL_64(0xffffffff, x4);
+ ASSERT_EQUAL_64(0, x5);
+ ASSERT_EQUAL_64(0xffffff00, x6);
+ ASSERT_EQUAL_64(1, x8);
+ ASSERT_EQUAL_64(1, x9);
+ ASSERT_EQUAL_64(2, x10);
+ ASSERT_EQUAL_64(0, x11);
+ ASSERT_EQUAL_64(0xffffffff, x12);
+ ASSERT_EQUAL_64(0, x13);
+ ASSERT_EQUAL_64(0xfffffffe, x14);
+ ASSERT_EQUAL_64(1, x16);
+ ASSERT_EQUAL_64(1, x17);
+ ASSERT_EQUAL_64(2, x18);
+ ASSERT_EQUAL_64(0, x19);
+ ASSERT_EQUAL_64(0xffffffffffffffffUL, x20);
+ ASSERT_EQUAL_64(0, x21);
+ ASSERT_EQUAL_64(0xffffff0000000000UL, x22);
+ ASSERT_EQUAL_64(1, x24);
+ ASSERT_EQUAL_64(2, x25);
+ ASSERT_EQUAL_64(0, x26);
+ ASSERT_EQUAL_64(0xffffffffffffffffUL, x27);
+// ASSERT_EQUAL_64(0, x28);
+ ASSERT_EQUAL_64(0xfffffffffffff800UL, x29);
+ ASSERT_EQUAL_64(0xffffffff, x30);
+
+ TEARDOWN();
+}
+
+
+TEST(fcvtzs) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Fmov(s0, 1.0);
+ __ Fmov(s1, 1.1);
+ __ Fmov(s2, 1.5);
+ __ Fmov(s3, -1.5);
+ __ Fmov(s4, kFP32PositiveInfinity);
+ __ Fmov(s5, kFP32NegativeInfinity);
+ __ Fmov(s6, 0x7fffff80); // Largest float < INT32_MAX.
+ __ Fneg(s7, s6); // Smallest float > INT32_MIN.
+ __ Fmov(d8, 1.0);
+ __ Fmov(d9, 1.1);
+ __ Fmov(d10, 1.5);
+ __ Fmov(d11, -1.5);
+ __ Fmov(d12, kFP64PositiveInfinity);
+ __ Fmov(d13, kFP64NegativeInfinity);
+ __ Fmov(d14, kWMaxInt - 1);
+ __ Fmov(d15, kWMinInt + 1);
+ __ Fmov(s17, 1.1);
+ __ Fmov(s18, 1.5);
+ __ Fmov(s19, -1.5);
+ __ Fmov(s20, kFP32PositiveInfinity);
+ __ Fmov(s21, kFP32NegativeInfinity);
+ __ Fmov(s22, 0x7fffff8000000000UL); // Largest float < INT64_MAX.
+ __ Fneg(s23, s22); // Smallest float > INT64_MIN.
+ __ Fmov(d24, 1.1);
+ __ Fmov(d25, 1.5);
+ __ Fmov(d26, -1.5);
+ __ Fmov(d27, kFP64PositiveInfinity);
+ __ Fmov(d28, kFP64NegativeInfinity);
+ __ Fmov(d29, 0x7ffffffffffffc00UL); // Largest double < INT64_MAX.
+ __ Fneg(d30, d29); // Smallest double > INT64_MIN.
+
+ __ Fcvtzs(w0, s0);
+ __ Fcvtzs(w1, s1);
+ __ Fcvtzs(w2, s2);
+ __ Fcvtzs(w3, s3);
+ __ Fcvtzs(w4, s4);
+ __ Fcvtzs(w5, s5);
+ __ Fcvtzs(w6, s6);
+ __ Fcvtzs(w7, s7);
+ __ Fcvtzs(w8, d8);
+ __ Fcvtzs(w9, d9);
+ __ Fcvtzs(w10, d10);
+ __ Fcvtzs(w11, d11);
+ __ Fcvtzs(w12, d12);
+ __ Fcvtzs(w13, d13);
+ __ Fcvtzs(w14, d14);
+ __ Fcvtzs(w15, d15);
+ __ Fcvtzs(x17, s17);
+ __ Fcvtzs(x18, s18);
+ __ Fcvtzs(x19, s19);
+ __ Fcvtzs(x20, s20);
+ __ Fcvtzs(x21, s21);
+ __ Fcvtzs(x22, s22);
+ __ Fcvtzs(x23, s23);
+ __ Fcvtzs(x24, d24);
+ __ Fcvtzs(x25, d25);
+ __ Fcvtzs(x26, d26);
+ __ Fcvtzs(x27, d27);
+ __ Fcvtzs(x28, d28);
+ __ Fcvtzs(x29, d29);
+ __ Fcvtzs(x30, d30);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(1, x0);
+ ASSERT_EQUAL_64(1, x1);
+ ASSERT_EQUAL_64(1, x2);
+ ASSERT_EQUAL_64(0xffffffff, x3);
+ ASSERT_EQUAL_64(0x7fffffff, x4);
+ ASSERT_EQUAL_64(0x80000000, x5);
+ ASSERT_EQUAL_64(0x7fffff80, x6);
+ ASSERT_EQUAL_64(0x80000080, x7);
+ ASSERT_EQUAL_64(1, x8);
+ ASSERT_EQUAL_64(1, x9);
+ ASSERT_EQUAL_64(1, x10);
+ ASSERT_EQUAL_64(0xffffffff, x11);
+ ASSERT_EQUAL_64(0x7fffffff, x12);
+ ASSERT_EQUAL_64(0x80000000, x13);
+ ASSERT_EQUAL_64(0x7ffffffe, x14);
+ ASSERT_EQUAL_64(0x80000001, x15);
+ ASSERT_EQUAL_64(1, x17);
+ ASSERT_EQUAL_64(1, x18);
+ ASSERT_EQUAL_64(0xffffffffffffffffUL, x19);
+ ASSERT_EQUAL_64(0x7fffffffffffffffUL, x20);
+ ASSERT_EQUAL_64(0x8000000000000000UL, x21);
+ ASSERT_EQUAL_64(0x7fffff8000000000UL, x22);
+ ASSERT_EQUAL_64(0x8000008000000000UL, x23);
+ ASSERT_EQUAL_64(1, x24);
+ ASSERT_EQUAL_64(1, x25);
+ ASSERT_EQUAL_64(0xffffffffffffffffUL, x26);
+ ASSERT_EQUAL_64(0x7fffffffffffffffUL, x27);
+ ASSERT_EQUAL_64(0x8000000000000000UL, x28);
+ ASSERT_EQUAL_64(0x7ffffffffffffc00UL, x29);
+ ASSERT_EQUAL_64(0x8000000000000400UL, x30);
+
+ TEARDOWN();
+}
+
+
+TEST(fcvtzu) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Fmov(s0, 1.0);
+ __ Fmov(s1, 1.1);
+ __ Fmov(s2, 1.5);
+ __ Fmov(s3, -1.5);
+ __ Fmov(s4, kFP32PositiveInfinity);
+ __ Fmov(s5, kFP32NegativeInfinity);
+ __ Fmov(s6, 0x7fffff80); // Largest float < INT32_MAX.
+ __ Fneg(s7, s6); // Smallest float > INT32_MIN.
+ __ Fmov(d8, 1.0);
+ __ Fmov(d9, 1.1);
+ __ Fmov(d10, 1.5);
+ __ Fmov(d11, -1.5);
+ __ Fmov(d12, kFP64PositiveInfinity);
+ __ Fmov(d13, kFP64NegativeInfinity);
+ __ Fmov(d14, kWMaxInt - 1);
+ __ Fmov(d15, kWMinInt + 1);
+ __ Fmov(s17, 1.1);
+ __ Fmov(s18, 1.5);
+ __ Fmov(s19, -1.5);
+ __ Fmov(s20, kFP32PositiveInfinity);
+ __ Fmov(s21, kFP32NegativeInfinity);
+ __ Fmov(s22, 0x7fffff8000000000UL); // Largest float < INT64_MAX.
+ __ Fneg(s23, s22); // Smallest float > INT64_MIN.
+ __ Fmov(d24, 1.1);
+ __ Fmov(d25, 1.5);
+ __ Fmov(d26, -1.5);
+ __ Fmov(d27, kFP64PositiveInfinity);
+ __ Fmov(d28, kFP64NegativeInfinity);
+ __ Fmov(d29, 0x7ffffffffffffc00UL); // Largest double < INT64_MAX.
+ __ Fneg(d30, d29); // Smallest double > INT64_MIN.
+
+ __ Fcvtzu(w0, s0);
+ __ Fcvtzu(w1, s1);
+ __ Fcvtzu(w2, s2);
+ __ Fcvtzu(w3, s3);
+ __ Fcvtzu(w4, s4);
+ __ Fcvtzu(w5, s5);
+ __ Fcvtzu(w6, s6);
+ __ Fcvtzu(w7, s7);
+ __ Fcvtzu(w8, d8);
+ __ Fcvtzu(w9, d9);
+ __ Fcvtzu(w10, d10);
+ __ Fcvtzu(w11, d11);
+ __ Fcvtzu(w12, d12);
+ __ Fcvtzu(w13, d13);
+ __ Fcvtzu(w14, d14);
+ __ Fcvtzu(x17, s17);
+ __ Fcvtzu(x18, s18);
+ __ Fcvtzu(x19, s19);
+ __ Fcvtzu(x20, s20);
+ __ Fcvtzu(x21, s21);
+ __ Fcvtzu(x22, s22);
+ __ Fcvtzu(x23, s23);
+ __ Fcvtzu(x24, d24);
+ __ Fcvtzu(x25, d25);
+ __ Fcvtzu(x26, d26);
+ __ Fcvtzu(x27, d27);
+ __ Fcvtzu(x28, d28);
+ __ Fcvtzu(x29, d29);
+ __ Fcvtzu(x30, d30);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(1, x0);
+ ASSERT_EQUAL_64(1, x1);
+ ASSERT_EQUAL_64(1, x2);
+ ASSERT_EQUAL_64(0, x3);
+ ASSERT_EQUAL_64(0xffffffff, x4);
+ ASSERT_EQUAL_64(0, x5);
+ ASSERT_EQUAL_64(0x7fffff80, x6);
+ ASSERT_EQUAL_64(0, x7);
+ ASSERT_EQUAL_64(1, x8);
+ ASSERT_EQUAL_64(1, x9);
+ ASSERT_EQUAL_64(1, x10);
+ ASSERT_EQUAL_64(0, x11);
+ ASSERT_EQUAL_64(0xffffffff, x12);
+ ASSERT_EQUAL_64(0, x13);
+ ASSERT_EQUAL_64(0x7ffffffe, x14);
+ ASSERT_EQUAL_64(1, x17);
+ ASSERT_EQUAL_64(1, x18);
+ ASSERT_EQUAL_64(0x0UL, x19);
+ ASSERT_EQUAL_64(0xffffffffffffffffUL, x20);
+ ASSERT_EQUAL_64(0x0UL, x21);
+ ASSERT_EQUAL_64(0x7fffff8000000000UL, x22);
+ ASSERT_EQUAL_64(0x0UL, x23);
+ ASSERT_EQUAL_64(1, x24);
+ ASSERT_EQUAL_64(1, x25);
+ ASSERT_EQUAL_64(0x0UL, x26);
+ ASSERT_EQUAL_64(0xffffffffffffffffUL, x27);
+ ASSERT_EQUAL_64(0x0UL, x28);
+ ASSERT_EQUAL_64(0x7ffffffffffffc00UL, x29);
+ ASSERT_EQUAL_64(0x0UL, x30);
+
+ TEARDOWN();
+}
+
+
+// Test that scvtf and ucvtf can convert the 64-bit input into the expected
+// value. All possible values of 'fbits' are tested. The expected value is
+// modified accordingly in each case.
+//
+// The expected value is specified as the bit encoding of the expected double
+// produced by scvtf (expected_scvtf_bits) as well as ucvtf
+// (expected_ucvtf_bits).
+//
+// Where the input value is representable by int32_t or uint32_t, conversions
+// from W registers will also be tested.
+static void TestUScvtfHelper(uint64_t in,
+ uint64_t expected_scvtf_bits,
+ uint64_t expected_ucvtf_bits) {
+ uint64_t u64 = in;
+ uint32_t u32 = u64 & 0xffffffff;
+ int64_t s64 = static_cast<int64_t>(in);
+ int32_t s32 = s64 & 0x7fffffff;
+
+ bool cvtf_s32 = (s64 == s32);
+ bool cvtf_u32 = (u64 == u32);
+
+ double results_scvtf_x[65];
+ double results_ucvtf_x[65];
+ double results_scvtf_w[33];
+ double results_ucvtf_w[33];
+
+ SETUP();
+ START();
+
+ __ Mov(x0, reinterpret_cast<int64_t>(results_scvtf_x));
+ __ Mov(x1, reinterpret_cast<int64_t>(results_ucvtf_x));
+ __ Mov(x2, reinterpret_cast<int64_t>(results_scvtf_w));
+ __ Mov(x3, reinterpret_cast<int64_t>(results_ucvtf_w));
+
+ __ Mov(x10, s64);
+
+ // Corrupt the top word, in case it is accidentally used during W-register
+ // conversions.
+ __ Mov(x11, 0x5555555555555555);
+ __ Bfi(x11, x10, 0, kWRegSize);
+
+ // Test integer conversions.
+ __ Scvtf(d0, x10);
+ __ Ucvtf(d1, x10);
+ __ Scvtf(d2, w11);
+ __ Ucvtf(d3, w11);
+ __ Str(d0, MemOperand(x0));
+ __ Str(d1, MemOperand(x1));
+ __ Str(d2, MemOperand(x2));
+ __ Str(d3, MemOperand(x3));
+
+ // Test all possible values of fbits.
+ for (int fbits = 1; fbits <= 32; fbits++) {
+ __ Scvtf(d0, x10, fbits);
+ __ Ucvtf(d1, x10, fbits);
+ __ Scvtf(d2, w11, fbits);
+ __ Ucvtf(d3, w11, fbits);
+ __ Str(d0, MemOperand(x0, fbits * kDRegSizeInBytes));
+ __ Str(d1, MemOperand(x1, fbits * kDRegSizeInBytes));
+ __ Str(d2, MemOperand(x2, fbits * kDRegSizeInBytes));
+ __ Str(d3, MemOperand(x3, fbits * kDRegSizeInBytes));
+ }
+
+ // Conversions from W registers can only handle fbits values <= 32, so just
+ // test conversions from X registers for 32 < fbits <= 64.
+ for (int fbits = 33; fbits <= 64; fbits++) {
+ __ Scvtf(d0, x10, fbits);
+ __ Ucvtf(d1, x10, fbits);
+ __ Str(d0, MemOperand(x0, fbits * kDRegSizeInBytes));
+ __ Str(d1, MemOperand(x1, fbits * kDRegSizeInBytes));
+ }
+
+ END();
+ RUN();
+
+ // Check the results.
+ double expected_scvtf_base = rawbits_to_double(expected_scvtf_bits);
+ double expected_ucvtf_base = rawbits_to_double(expected_ucvtf_bits);
+
+ for (int fbits = 0; fbits <= 32; fbits++) {
+ double expected_scvtf = expected_scvtf_base / pow(2.0, fbits);
+ double expected_ucvtf = expected_ucvtf_base / pow(2.0, fbits);
+ ASSERT_EQUAL_FP64(expected_scvtf, results_scvtf_x[fbits]);
+ ASSERT_EQUAL_FP64(expected_ucvtf, results_ucvtf_x[fbits]);
+ if (cvtf_s32) ASSERT_EQUAL_FP64(expected_scvtf, results_scvtf_w[fbits]);
+ if (cvtf_u32) ASSERT_EQUAL_FP64(expected_ucvtf, results_ucvtf_w[fbits]);
+ }
+ for (int fbits = 33; fbits <= 64; fbits++) {
+ double expected_scvtf = expected_scvtf_base / pow(2.0, fbits);
+ double expected_ucvtf = expected_ucvtf_base / pow(2.0, fbits);
+ ASSERT_EQUAL_FP64(expected_scvtf, results_scvtf_x[fbits]);
+ ASSERT_EQUAL_FP64(expected_ucvtf, results_ucvtf_x[fbits]);
+ }
+
+ TEARDOWN();
+}
+
+
+TEST(scvtf_ucvtf_double) {
+ INIT_V8();
+ // Simple conversions of positive numbers which require no rounding; the
+ // results should not depened on the rounding mode, and ucvtf and scvtf should
+ // produce the same result.
+ TestUScvtfHelper(0x0000000000000000, 0x0000000000000000, 0x0000000000000000);
+ TestUScvtfHelper(0x0000000000000001, 0x3ff0000000000000, 0x3ff0000000000000);
+ TestUScvtfHelper(0x0000000040000000, 0x41d0000000000000, 0x41d0000000000000);
+ TestUScvtfHelper(0x0000000100000000, 0x41f0000000000000, 0x41f0000000000000);
+ TestUScvtfHelper(0x4000000000000000, 0x43d0000000000000, 0x43d0000000000000);
+ // Test mantissa extremities.
+ TestUScvtfHelper(0x4000000000000400, 0x43d0000000000001, 0x43d0000000000001);
+ // The largest int32_t that fits in a double.
+ TestUScvtfHelper(0x000000007fffffff, 0x41dfffffffc00000, 0x41dfffffffc00000);
+ // Values that would be negative if treated as an int32_t.
+ TestUScvtfHelper(0x00000000ffffffff, 0x41efffffffe00000, 0x41efffffffe00000);
+ TestUScvtfHelper(0x0000000080000000, 0x41e0000000000000, 0x41e0000000000000);
+ TestUScvtfHelper(0x0000000080000001, 0x41e0000000200000, 0x41e0000000200000);
+ // The largest int64_t that fits in a double.
+ TestUScvtfHelper(0x7ffffffffffffc00, 0x43dfffffffffffff, 0x43dfffffffffffff);
+ // Check for bit pattern reproduction.
+ TestUScvtfHelper(0x0123456789abcde0, 0x43723456789abcde, 0x43723456789abcde);
+ TestUScvtfHelper(0x0000000012345678, 0x41b2345678000000, 0x41b2345678000000);
+
+ // Simple conversions of negative int64_t values. These require no rounding,
+ // and the results should not depend on the rounding mode.
+ TestUScvtfHelper(0xffffffffc0000000, 0xc1d0000000000000, 0x43effffffff80000);
+ TestUScvtfHelper(0xffffffff00000000, 0xc1f0000000000000, 0x43efffffffe00000);
+ TestUScvtfHelper(0xc000000000000000, 0xc3d0000000000000, 0x43e8000000000000);
+
+ // Conversions which require rounding.
+ TestUScvtfHelper(0x1000000000000000, 0x43b0000000000000, 0x43b0000000000000);
+ TestUScvtfHelper(0x1000000000000001, 0x43b0000000000000, 0x43b0000000000000);
+ TestUScvtfHelper(0x1000000000000080, 0x43b0000000000000, 0x43b0000000000000);
+ TestUScvtfHelper(0x1000000000000081, 0x43b0000000000001, 0x43b0000000000001);
+ TestUScvtfHelper(0x1000000000000100, 0x43b0000000000001, 0x43b0000000000001);
+ TestUScvtfHelper(0x1000000000000101, 0x43b0000000000001, 0x43b0000000000001);
+ TestUScvtfHelper(0x1000000000000180, 0x43b0000000000002, 0x43b0000000000002);
+ TestUScvtfHelper(0x1000000000000181, 0x43b0000000000002, 0x43b0000000000002);
+ TestUScvtfHelper(0x1000000000000200, 0x43b0000000000002, 0x43b0000000000002);
+ TestUScvtfHelper(0x1000000000000201, 0x43b0000000000002, 0x43b0000000000002);
+ TestUScvtfHelper(0x1000000000000280, 0x43b0000000000002, 0x43b0000000000002);
+ TestUScvtfHelper(0x1000000000000281, 0x43b0000000000003, 0x43b0000000000003);
+ TestUScvtfHelper(0x1000000000000300, 0x43b0000000000003, 0x43b0000000000003);
+ // Check rounding of negative int64_t values (and large uint64_t values).
+ TestUScvtfHelper(0x8000000000000000, 0xc3e0000000000000, 0x43e0000000000000);
+ TestUScvtfHelper(0x8000000000000001, 0xc3e0000000000000, 0x43e0000000000000);
+ TestUScvtfHelper(0x8000000000000200, 0xc3e0000000000000, 0x43e0000000000000);
+ TestUScvtfHelper(0x8000000000000201, 0xc3dfffffffffffff, 0x43e0000000000000);
+ TestUScvtfHelper(0x8000000000000400, 0xc3dfffffffffffff, 0x43e0000000000000);
+ TestUScvtfHelper(0x8000000000000401, 0xc3dfffffffffffff, 0x43e0000000000001);
+ TestUScvtfHelper(0x8000000000000600, 0xc3dffffffffffffe, 0x43e0000000000001);
+ TestUScvtfHelper(0x8000000000000601, 0xc3dffffffffffffe, 0x43e0000000000001);
+ TestUScvtfHelper(0x8000000000000800, 0xc3dffffffffffffe, 0x43e0000000000001);
+ TestUScvtfHelper(0x8000000000000801, 0xc3dffffffffffffe, 0x43e0000000000001);
+ TestUScvtfHelper(0x8000000000000a00, 0xc3dffffffffffffe, 0x43e0000000000001);
+ TestUScvtfHelper(0x8000000000000a01, 0xc3dffffffffffffd, 0x43e0000000000001);
+ TestUScvtfHelper(0x8000000000000c00, 0xc3dffffffffffffd, 0x43e0000000000002);
+ // Round up to produce a result that's too big for the input to represent.
+ TestUScvtfHelper(0x7ffffffffffffe00, 0x43e0000000000000, 0x43e0000000000000);
+ TestUScvtfHelper(0x7fffffffffffffff, 0x43e0000000000000, 0x43e0000000000000);
+ TestUScvtfHelper(0xfffffffffffffc00, 0xc090000000000000, 0x43f0000000000000);
+ TestUScvtfHelper(0xffffffffffffffff, 0xbff0000000000000, 0x43f0000000000000);
+}
+
+
+// The same as TestUScvtfHelper, but convert to floats.
+static void TestUScvtf32Helper(uint64_t in,
+ uint32_t expected_scvtf_bits,
+ uint32_t expected_ucvtf_bits) {
+ uint64_t u64 = in;
+ uint32_t u32 = u64 & 0xffffffff;
+ int64_t s64 = static_cast<int64_t>(in);
+ int32_t s32 = s64 & 0x7fffffff;
+
+ bool cvtf_s32 = (s64 == s32);
+ bool cvtf_u32 = (u64 == u32);
+
+ float results_scvtf_x[65];
+ float results_ucvtf_x[65];
+ float results_scvtf_w[33];
+ float results_ucvtf_w[33];
+
+ SETUP();
+ START();
+
+ __ Mov(x0, reinterpret_cast<int64_t>(results_scvtf_x));
+ __ Mov(x1, reinterpret_cast<int64_t>(results_ucvtf_x));
+ __ Mov(x2, reinterpret_cast<int64_t>(results_scvtf_w));
+ __ Mov(x3, reinterpret_cast<int64_t>(results_ucvtf_w));
+
+ __ Mov(x10, s64);
+
+ // Corrupt the top word, in case it is accidentally used during W-register
+ // conversions.
+ __ Mov(x11, 0x5555555555555555);
+ __ Bfi(x11, x10, 0, kWRegSize);
+
+ // Test integer conversions.
+ __ Scvtf(s0, x10);
+ __ Ucvtf(s1, x10);
+ __ Scvtf(s2, w11);
+ __ Ucvtf(s3, w11);
+ __ Str(s0, MemOperand(x0));
+ __ Str(s1, MemOperand(x1));
+ __ Str(s2, MemOperand(x2));
+ __ Str(s3, MemOperand(x3));
+
+ // Test all possible values of fbits.
+ for (int fbits = 1; fbits <= 32; fbits++) {
+ __ Scvtf(s0, x10, fbits);
+ __ Ucvtf(s1, x10, fbits);
+ __ Scvtf(s2, w11, fbits);
+ __ Ucvtf(s3, w11, fbits);
+ __ Str(s0, MemOperand(x0, fbits * kSRegSizeInBytes));
+ __ Str(s1, MemOperand(x1, fbits * kSRegSizeInBytes));
+ __ Str(s2, MemOperand(x2, fbits * kSRegSizeInBytes));
+ __ Str(s3, MemOperand(x3, fbits * kSRegSizeInBytes));
+ }
+
+ // Conversions from W registers can only handle fbits values <= 32, so just
+ // test conversions from X registers for 32 < fbits <= 64.
+ for (int fbits = 33; fbits <= 64; fbits++) {
+ __ Scvtf(s0, x10, fbits);
+ __ Ucvtf(s1, x10, fbits);
+ __ Str(s0, MemOperand(x0, fbits * kSRegSizeInBytes));
+ __ Str(s1, MemOperand(x1, fbits * kSRegSizeInBytes));
+ }
+
+ END();
+ RUN();
+
+ // Check the results.
+ float expected_scvtf_base = rawbits_to_float(expected_scvtf_bits);
+ float expected_ucvtf_base = rawbits_to_float(expected_ucvtf_bits);
+
+ for (int fbits = 0; fbits <= 32; fbits++) {
+ float expected_scvtf = expected_scvtf_base / powf(2, fbits);
+ float expected_ucvtf = expected_ucvtf_base / powf(2, fbits);
+ ASSERT_EQUAL_FP32(expected_scvtf, results_scvtf_x[fbits]);
+ ASSERT_EQUAL_FP32(expected_ucvtf, results_ucvtf_x[fbits]);
+ if (cvtf_s32) ASSERT_EQUAL_FP32(expected_scvtf, results_scvtf_w[fbits]);
+ if (cvtf_u32) ASSERT_EQUAL_FP32(expected_ucvtf, results_ucvtf_w[fbits]);
+ break;
+ }
+ for (int fbits = 33; fbits <= 64; fbits++) {
+ break;
+ float expected_scvtf = expected_scvtf_base / powf(2, fbits);
+ float expected_ucvtf = expected_ucvtf_base / powf(2, fbits);
+ ASSERT_EQUAL_FP32(expected_scvtf, results_scvtf_x[fbits]);
+ ASSERT_EQUAL_FP32(expected_ucvtf, results_ucvtf_x[fbits]);
+ }
+
+ TEARDOWN();
+}
+
+
+TEST(scvtf_ucvtf_float) {
+ INIT_V8();
+ // Simple conversions of positive numbers which require no rounding; the
+ // results should not depened on the rounding mode, and ucvtf and scvtf should
+ // produce the same result.
+ TestUScvtf32Helper(0x0000000000000000, 0x00000000, 0x00000000);
+ TestUScvtf32Helper(0x0000000000000001, 0x3f800000, 0x3f800000);
+ TestUScvtf32Helper(0x0000000040000000, 0x4e800000, 0x4e800000);
+ TestUScvtf32Helper(0x0000000100000000, 0x4f800000, 0x4f800000);
+ TestUScvtf32Helper(0x4000000000000000, 0x5e800000, 0x5e800000);
+ // Test mantissa extremities.
+ TestUScvtf32Helper(0x0000000000800001, 0x4b000001, 0x4b000001);
+ TestUScvtf32Helper(0x4000008000000000, 0x5e800001, 0x5e800001);
+ // The largest int32_t that fits in a float.
+ TestUScvtf32Helper(0x000000007fffff80, 0x4effffff, 0x4effffff);
+ // Values that would be negative if treated as an int32_t.
+ TestUScvtf32Helper(0x00000000ffffff00, 0x4f7fffff, 0x4f7fffff);
+ TestUScvtf32Helper(0x0000000080000000, 0x4f000000, 0x4f000000);
+ TestUScvtf32Helper(0x0000000080000100, 0x4f000001, 0x4f000001);
+ // The largest int64_t that fits in a float.
+ TestUScvtf32Helper(0x7fffff8000000000, 0x5effffff, 0x5effffff);
+ // Check for bit pattern reproduction.
+ TestUScvtf32Helper(0x0000000000876543, 0x4b076543, 0x4b076543);
+
+ // Simple conversions of negative int64_t values. These require no rounding,
+ // and the results should not depend on the rounding mode.
+ TestUScvtf32Helper(0xfffffc0000000000, 0xd4800000, 0x5f7ffffc);
+ TestUScvtf32Helper(0xc000000000000000, 0xde800000, 0x5f400000);
+
+ // Conversions which require rounding.
+ TestUScvtf32Helper(0x0000800000000000, 0x57000000, 0x57000000);
+ TestUScvtf32Helper(0x0000800000000001, 0x57000000, 0x57000000);
+ TestUScvtf32Helper(0x0000800000800000, 0x57000000, 0x57000000);
+ TestUScvtf32Helper(0x0000800000800001, 0x57000001, 0x57000001);
+ TestUScvtf32Helper(0x0000800001000000, 0x57000001, 0x57000001);
+ TestUScvtf32Helper(0x0000800001000001, 0x57000001, 0x57000001);
+ TestUScvtf32Helper(0x0000800001800000, 0x57000002, 0x57000002);
+ TestUScvtf32Helper(0x0000800001800001, 0x57000002, 0x57000002);
+ TestUScvtf32Helper(0x0000800002000000, 0x57000002, 0x57000002);
+ TestUScvtf32Helper(0x0000800002000001, 0x57000002, 0x57000002);
+ TestUScvtf32Helper(0x0000800002800000, 0x57000002, 0x57000002);
+ TestUScvtf32Helper(0x0000800002800001, 0x57000003, 0x57000003);
+ TestUScvtf32Helper(0x0000800003000000, 0x57000003, 0x57000003);
+ // Check rounding of negative int64_t values (and large uint64_t values).
+ TestUScvtf32Helper(0x8000000000000000, 0xdf000000, 0x5f000000);
+ TestUScvtf32Helper(0x8000000000000001, 0xdf000000, 0x5f000000);
+ TestUScvtf32Helper(0x8000004000000000, 0xdf000000, 0x5f000000);
+ TestUScvtf32Helper(0x8000004000000001, 0xdeffffff, 0x5f000000);
+ TestUScvtf32Helper(0x8000008000000000, 0xdeffffff, 0x5f000000);
+ TestUScvtf32Helper(0x8000008000000001, 0xdeffffff, 0x5f000001);
+ TestUScvtf32Helper(0x800000c000000000, 0xdefffffe, 0x5f000001);
+ TestUScvtf32Helper(0x800000c000000001, 0xdefffffe, 0x5f000001);
+ TestUScvtf32Helper(0x8000010000000000, 0xdefffffe, 0x5f000001);
+ TestUScvtf32Helper(0x8000010000000001, 0xdefffffe, 0x5f000001);
+ TestUScvtf32Helper(0x8000014000000000, 0xdefffffe, 0x5f000001);
+ TestUScvtf32Helper(0x8000014000000001, 0xdefffffd, 0x5f000001);
+ TestUScvtf32Helper(0x8000018000000000, 0xdefffffd, 0x5f000002);
+ // Round up to produce a result that's too big for the input to represent.
+ TestUScvtf32Helper(0x000000007fffffc0, 0x4f000000, 0x4f000000);
+ TestUScvtf32Helper(0x000000007fffffff, 0x4f000000, 0x4f000000);
+ TestUScvtf32Helper(0x00000000ffffff80, 0x4f800000, 0x4f800000);
+ TestUScvtf32Helper(0x00000000ffffffff, 0x4f800000, 0x4f800000);
+ TestUScvtf32Helper(0x7fffffc000000000, 0x5f000000, 0x5f000000);
+ TestUScvtf32Helper(0x7fffffffffffffff, 0x5f000000, 0x5f000000);
+ TestUScvtf32Helper(0xffffff8000000000, 0xd3000000, 0x5f800000);
+ TestUScvtf32Helper(0xffffffffffffffff, 0xbf800000, 0x5f800000);
+}
+
+
+TEST(system_mrs) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(w0, 0);
+ __ Mov(w1, 1);
+ __ Mov(w2, 0x80000000);
+
+ // Set the Z and C flags.
+ __ Cmp(w0, w0);
+ __ Mrs(x3, NZCV);
+
+ // Set the N flag.
+ __ Cmp(w0, w1);
+ __ Mrs(x4, NZCV);
+
+ // Set the Z, C and V flags.
+ __ Adds(w0, w2, w2);
+ __ Mrs(x5, NZCV);
+
+ // Read the default FPCR.
+ __ Mrs(x6, FPCR);
+ END();
+
+ RUN();
+
+ // NZCV
+ ASSERT_EQUAL_32(ZCFlag, w3);
+ ASSERT_EQUAL_32(NFlag, w4);
+ ASSERT_EQUAL_32(ZCVFlag, w5);
+
+ // FPCR
+ // The default FPCR on Linux-based platforms is 0.
+ ASSERT_EQUAL_32(0, w6);
+
+ TEARDOWN();
+}
+
+
+TEST(system_msr) {
+ INIT_V8();
+ // All FPCR fields that must be implemented: AHP, DN, FZ, RMode
+ const uint64_t fpcr_core = 0x07c00000;
+
+ // All FPCR fields (including fields which may be read-as-zero):
+ // Stride, Len
+ // IDE, IXE, UFE, OFE, DZE, IOE
+ const uint64_t fpcr_all = fpcr_core | 0x00379f00;
+
+ SETUP();
+
+ START();
+ __ Mov(w0, 0);
+ __ Mov(w1, 0x7fffffff);
+
+ __ Mov(x7, 0);
+
+ __ Mov(x10, NVFlag);
+ __ Cmp(w0, w0); // Set Z and C.
+ __ Msr(NZCV, x10); // Set N and V.
+ // The Msr should have overwritten every flag set by the Cmp.
+ __ Cinc(x7, x7, mi); // N
+ __ Cinc(x7, x7, ne); // !Z
+ __ Cinc(x7, x7, lo); // !C
+ __ Cinc(x7, x7, vs); // V
+
+ __ Mov(x10, ZCFlag);
+ __ Cmn(w1, w1); // Set N and V.
+ __ Msr(NZCV, x10); // Set Z and C.
+ // The Msr should have overwritten every flag set by the Cmn.
+ __ Cinc(x7, x7, pl); // !N
+ __ Cinc(x7, x7, eq); // Z
+ __ Cinc(x7, x7, hs); // C
+ __ Cinc(x7, x7, vc); // !V
+
+ // All core FPCR fields must be writable.
+ __ Mov(x8, fpcr_core);
+ __ Msr(FPCR, x8);
+ __ Mrs(x8, FPCR);
+
+ // All FPCR fields, including optional ones. This part of the test doesn't
+ // achieve much other than ensuring that supported fields can be cleared by
+ // the next test.
+ __ Mov(x9, fpcr_all);
+ __ Msr(FPCR, x9);
+ __ Mrs(x9, FPCR);
+ __ And(x9, x9, fpcr_core);
+
+ // The undefined bits must ignore writes.
+ // It's conceivable that a future version of the architecture could use these
+ // fields (making this test fail), but in the meantime this is a useful test
+ // for the simulator.
+ __ Mov(x10, ~fpcr_all);
+ __ Msr(FPCR, x10);
+ __ Mrs(x10, FPCR);
+
+ END();
+
+ RUN();
+
+ // We should have incremented x7 (from 0) exactly 8 times.
+ ASSERT_EQUAL_64(8, x7);
+
+ ASSERT_EQUAL_64(fpcr_core, x8);
+ ASSERT_EQUAL_64(fpcr_core, x9);
+ ASSERT_EQUAL_64(0, x10);
+
+ TEARDOWN();
+}
+
+
+TEST(system_nop) {
+ INIT_V8();
+ SETUP();
+ RegisterDump before;
+
+ START();
+ before.Dump(&masm);
+ __ Nop();
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_REGISTERS(before);
+ ASSERT_EQUAL_NZCV(before.flags_nzcv());
+
+ TEARDOWN();
+}
+
+
+TEST(zero_dest) {
+ INIT_V8();
+ SETUP();
+ RegisterDump before;
+
+ START();
+ // Preserve the system stack pointer, in case we clobber it.
+ __ Mov(x30, csp);
+ // Initialize the other registers used in this test.
+ uint64_t literal_base = 0x0100001000100101UL;
+ __ Mov(x0, 0);
+ __ Mov(x1, literal_base);
+ for (unsigned i = 2; i < x30.code(); i++) {
+ __ Add(Register::XRegFromCode(i), Register::XRegFromCode(i-1), x1);
+ }
+ before.Dump(&masm);
+
+ // All of these instructions should be NOPs in these forms, but have
+ // alternate forms which can write into the stack pointer.
+ __ add(xzr, x0, x1);
+ __ add(xzr, x1, xzr);
+ __ add(xzr, xzr, x1);
+
+ __ and_(xzr, x0, x2);
+ __ and_(xzr, x2, xzr);
+ __ and_(xzr, xzr, x2);
+
+ __ bic(xzr, x0, x3);
+ __ bic(xzr, x3, xzr);
+ __ bic(xzr, xzr, x3);
+
+ __ eon(xzr, x0, x4);
+ __ eon(xzr, x4, xzr);
+ __ eon(xzr, xzr, x4);
+
+ __ eor(xzr, x0, x5);
+ __ eor(xzr, x5, xzr);
+ __ eor(xzr, xzr, x5);
+
+ __ orr(xzr, x0, x6);
+ __ orr(xzr, x6, xzr);
+ __ orr(xzr, xzr, x6);
+
+ __ sub(xzr, x0, x7);
+ __ sub(xzr, x7, xzr);
+ __ sub(xzr, xzr, x7);
+
+ // Swap the saved system stack pointer with the real one. If csp was written
+ // during the test, it will show up in x30. This is done because the test
+ // framework assumes that csp will be valid at the end of the test.
+ __ Mov(x29, x30);
+ __ Mov(x30, csp);
+ __ Mov(csp, x29);
+ // We used x29 as a scratch register, so reset it to make sure it doesn't
+ // trigger a test failure.
+ __ Add(x29, x28, x1);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_REGISTERS(before);
+ ASSERT_EQUAL_NZCV(before.flags_nzcv());
+
+ TEARDOWN();
+}
+
+
+TEST(zero_dest_setflags) {
+ INIT_V8();
+ SETUP();
+ RegisterDump before;
+
+ START();
+ // Preserve the system stack pointer, in case we clobber it.
+ __ Mov(x30, csp);
+ // Initialize the other registers used in this test.
+ uint64_t literal_base = 0x0100001000100101UL;
+ __ Mov(x0, 0);
+ __ Mov(x1, literal_base);
+ for (int i = 2; i < 30; i++) {
+ __ Add(Register::XRegFromCode(i), Register::XRegFromCode(i-1), x1);
+ }
+ before.Dump(&masm);
+
+ // All of these instructions should only write to the flags in these forms,
+ // but have alternate forms which can write into the stack pointer.
+ __ adds(xzr, x0, Operand(x1, UXTX));
+ __ adds(xzr, x1, Operand(xzr, UXTX));
+ __ adds(xzr, x1, 1234);
+ __ adds(xzr, x0, x1);
+ __ adds(xzr, x1, xzr);
+ __ adds(xzr, xzr, x1);
+
+ __ ands(xzr, x2, ~0xf);
+ __ ands(xzr, xzr, ~0xf);
+ __ ands(xzr, x0, x2);
+ __ ands(xzr, x2, xzr);
+ __ ands(xzr, xzr, x2);
+
+ __ bics(xzr, x3, ~0xf);
+ __ bics(xzr, xzr, ~0xf);
+ __ bics(xzr, x0, x3);
+ __ bics(xzr, x3, xzr);
+ __ bics(xzr, xzr, x3);
+
+ __ subs(xzr, x0, Operand(x3, UXTX));
+ __ subs(xzr, x3, Operand(xzr, UXTX));
+ __ subs(xzr, x3, 1234);
+ __ subs(xzr, x0, x3);
+ __ subs(xzr, x3, xzr);
+ __ subs(xzr, xzr, x3);
+
+ // Swap the saved system stack pointer with the real one. If csp was written
+ // during the test, it will show up in x30. This is done because the test
+ // framework assumes that csp will be valid at the end of the test.
+ __ Mov(x29, x30);
+ __ Mov(x30, csp);
+ __ Mov(csp, x29);
+ // We used x29 as a scratch register, so reset it to make sure it doesn't
+ // trigger a test failure.
+ __ Add(x29, x28, x1);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_REGISTERS(before);
+
+ TEARDOWN();
+}
+
+
+TEST(register_bit) {
+ // No code generation takes place in this test, so no need to setup and
+ // teardown.
+
+ // Simple tests.
+ CHECK(x0.Bit() == (1UL << 0));
+ CHECK(x1.Bit() == (1UL << 1));
+ CHECK(x10.Bit() == (1UL << 10));
+
+ // AAPCS64 definitions.
+ CHECK(fp.Bit() == (1UL << kFramePointerRegCode));
+ CHECK(lr.Bit() == (1UL << kLinkRegCode));
+
+ // Fixed (hardware) definitions.
+ CHECK(xzr.Bit() == (1UL << kZeroRegCode));
+
+ // Internal ABI definitions.
+ CHECK(jssp.Bit() == (1UL << kJSSPCode));
+ CHECK(csp.Bit() == (1UL << kSPRegInternalCode));
+ CHECK(csp.Bit() != xzr.Bit());
+
+ // xn.Bit() == wn.Bit() at all times, for the same n.
+ CHECK(x0.Bit() == w0.Bit());
+ CHECK(x1.Bit() == w1.Bit());
+ CHECK(x10.Bit() == w10.Bit());
+ CHECK(jssp.Bit() == wjssp.Bit());
+ CHECK(xzr.Bit() == wzr.Bit());
+ CHECK(csp.Bit() == wcsp.Bit());
+}
+
+
+TEST(stack_pointer_override) {
+ // This test generates some stack maintenance code, but the test only checks
+ // the reported state.
+ INIT_V8();
+ SETUP();
+ START();
+
+ // The default stack pointer in V8 is jssp, but for compatibility with W16,
+ // the test framework sets it to csp before calling the test.
+ CHECK(csp.Is(__ StackPointer()));
+ __ SetStackPointer(x0);
+ CHECK(x0.Is(__ StackPointer()));
+ __ SetStackPointer(jssp);
+ CHECK(jssp.Is(__ StackPointer()));
+ __ SetStackPointer(csp);
+ CHECK(csp.Is(__ StackPointer()));
+
+ END();
+ RUN();
+ TEARDOWN();
+}
+
+
+TEST(peek_poke_simple) {
+ INIT_V8();
+ SETUP();
+ START();
+
+ static const RegList x0_to_x3 = x0.Bit() | x1.Bit() | x2.Bit() | x3.Bit();
+ static const RegList x10_to_x13 = x10.Bit() | x11.Bit() |
+ x12.Bit() | x13.Bit();
+
+ // The literal base is chosen to have two useful properties:
+ // * When multiplied by small values (such as a register index), this value
+ // is clearly readable in the result.
+ // * The value is not formed from repeating fixed-size smaller values, so it
+ // can be used to detect endianness-related errors.
+ uint64_t literal_base = 0x0100001000100101UL;
+
+ // Initialize the registers.
+ __ Mov(x0, literal_base);
+ __ Add(x1, x0, x0);
+ __ Add(x2, x1, x0);
+ __ Add(x3, x2, x0);
+
+ __ Claim(4);
+
+ // Simple exchange.
+ // After this test:
+ // x0-x3 should be unchanged.
+ // w10-w13 should contain the lower words of x0-x3.
+ __ Poke(x0, 0);
+ __ Poke(x1, 8);
+ __ Poke(x2, 16);
+ __ Poke(x3, 24);
+ Clobber(&masm, x0_to_x3);
+ __ Peek(x0, 0);
+ __ Peek(x1, 8);
+ __ Peek(x2, 16);
+ __ Peek(x3, 24);
+
+ __ Poke(w0, 0);
+ __ Poke(w1, 4);
+ __ Poke(w2, 8);
+ __ Poke(w3, 12);
+ Clobber(&masm, x10_to_x13);
+ __ Peek(w10, 0);
+ __ Peek(w11, 4);
+ __ Peek(w12, 8);
+ __ Peek(w13, 12);
+
+ __ Drop(4);
+
+ END();
+ RUN();
+
+ ASSERT_EQUAL_64(literal_base * 1, x0);
+ ASSERT_EQUAL_64(literal_base * 2, x1);
+ ASSERT_EQUAL_64(literal_base * 3, x2);
+ ASSERT_EQUAL_64(literal_base * 4, x3);
+
+ ASSERT_EQUAL_64((literal_base * 1) & 0xffffffff, x10);
+ ASSERT_EQUAL_64((literal_base * 2) & 0xffffffff, x11);
+ ASSERT_EQUAL_64((literal_base * 3) & 0xffffffff, x12);
+ ASSERT_EQUAL_64((literal_base * 4) & 0xffffffff, x13);
+
+ TEARDOWN();
+}
+
+
+TEST(peek_poke_unaligned) {
+ INIT_V8();
+ SETUP();
+ START();
+
+ // The literal base is chosen to have two useful properties:
+ // * When multiplied by small values (such as a register index), this value
+ // is clearly readable in the result.
+ // * The value is not formed from repeating fixed-size smaller values, so it
+ // can be used to detect endianness-related errors.
+ uint64_t literal_base = 0x0100001000100101UL;
+
+ // Initialize the registers.
+ __ Mov(x0, literal_base);
+ __ Add(x1, x0, x0);
+ __ Add(x2, x1, x0);
+ __ Add(x3, x2, x0);
+ __ Add(x4, x3, x0);
+ __ Add(x5, x4, x0);
+ __ Add(x6, x5, x0);
+
+ __ Claim(4);
+
+ // Unaligned exchanges.
+ // After this test:
+ // x0-x6 should be unchanged.
+ // w10-w12 should contain the lower words of x0-x2.
+ __ Poke(x0, 1);
+ Clobber(&masm, x0.Bit());
+ __ Peek(x0, 1);
+ __ Poke(x1, 2);
+ Clobber(&masm, x1.Bit());
+ __ Peek(x1, 2);
+ __ Poke(x2, 3);
+ Clobber(&masm, x2.Bit());
+ __ Peek(x2, 3);
+ __ Poke(x3, 4);
+ Clobber(&masm, x3.Bit());
+ __ Peek(x3, 4);
+ __ Poke(x4, 5);
+ Clobber(&masm, x4.Bit());
+ __ Peek(x4, 5);
+ __ Poke(x5, 6);
+ Clobber(&masm, x5.Bit());
+ __ Peek(x5, 6);
+ __ Poke(x6, 7);
+ Clobber(&masm, x6.Bit());
+ __ Peek(x6, 7);
+
+ __ Poke(w0, 1);
+ Clobber(&masm, w10.Bit());
+ __ Peek(w10, 1);
+ __ Poke(w1, 2);
+ Clobber(&masm, w11.Bit());
+ __ Peek(w11, 2);
+ __ Poke(w2, 3);
+ Clobber(&masm, w12.Bit());
+ __ Peek(w12, 3);
+
+ __ Drop(4);
+
+ END();
+ RUN();
+
+ ASSERT_EQUAL_64(literal_base * 1, x0);
+ ASSERT_EQUAL_64(literal_base * 2, x1);
+ ASSERT_EQUAL_64(literal_base * 3, x2);
+ ASSERT_EQUAL_64(literal_base * 4, x3);
+ ASSERT_EQUAL_64(literal_base * 5, x4);
+ ASSERT_EQUAL_64(literal_base * 6, x5);
+ ASSERT_EQUAL_64(literal_base * 7, x6);
+
+ ASSERT_EQUAL_64((literal_base * 1) & 0xffffffff, x10);
+ ASSERT_EQUAL_64((literal_base * 2) & 0xffffffff, x11);
+ ASSERT_EQUAL_64((literal_base * 3) & 0xffffffff, x12);
+
+ TEARDOWN();
+}
+
+
+TEST(peek_poke_endianness) {
+ INIT_V8();
+ SETUP();
+ START();
+
+ // The literal base is chosen to have two useful properties:
+ // * When multiplied by small values (such as a register index), this value
+ // is clearly readable in the result.
+ // * The value is not formed from repeating fixed-size smaller values, so it
+ // can be used to detect endianness-related errors.
+ uint64_t literal_base = 0x0100001000100101UL;
+
+ // Initialize the registers.
+ __ Mov(x0, literal_base);
+ __ Add(x1, x0, x0);
+
+ __ Claim(4);
+
+ // Endianness tests.
+ // After this section:
+ // x4 should match x0[31:0]:x0[63:32]
+ // w5 should match w1[15:0]:w1[31:16]
+ __ Poke(x0, 0);
+ __ Poke(x0, 8);
+ __ Peek(x4, 4);
+
+ __ Poke(w1, 0);
+ __ Poke(w1, 4);
+ __ Peek(w5, 2);
+
+ __ Drop(4);
+
+ END();
+ RUN();
+
+ uint64_t x0_expected = literal_base * 1;
+ uint64_t x1_expected = literal_base * 2;
+ uint64_t x4_expected = (x0_expected << 32) | (x0_expected >> 32);
+ uint64_t x5_expected = ((x1_expected << 16) & 0xffff0000) |
+ ((x1_expected >> 16) & 0x0000ffff);
+
+ ASSERT_EQUAL_64(x0_expected, x0);
+ ASSERT_EQUAL_64(x1_expected, x1);
+ ASSERT_EQUAL_64(x4_expected, x4);
+ ASSERT_EQUAL_64(x5_expected, x5);
+
+ TEARDOWN();
+}
+
+
+TEST(peek_poke_mixed) {
+ INIT_V8();
+ SETUP();
+ START();
+
+ // The literal base is chosen to have two useful properties:
+ // * When multiplied by small values (such as a register index), this value
+ // is clearly readable in the result.
+ // * The value is not formed from repeating fixed-size smaller values, so it
+ // can be used to detect endianness-related errors.
+ uint64_t literal_base = 0x0100001000100101UL;
+
+ // Initialize the registers.
+ __ Mov(x0, literal_base);
+ __ Add(x1, x0, x0);
+ __ Add(x2, x1, x0);
+ __ Add(x3, x2, x0);
+
+ __ Claim(4);
+
+ // Mix with other stack operations.
+ // After this section:
+ // x0-x3 should be unchanged.
+ // x6 should match x1[31:0]:x0[63:32]
+ // w7 should match x1[15:0]:x0[63:48]
+ __ Poke(x1, 8);
+ __ Poke(x0, 0);
+ {
+ ASSERT(__ StackPointer().Is(csp));
+ __ Mov(x4, __ StackPointer());
+ __ SetStackPointer(x4);
+
+ __ Poke(wzr, 0); // Clobber the space we're about to drop.
+ __ Drop(1, kWRegSizeInBytes);
+ __ Peek(x6, 0);
+ __ Claim(1);
+ __ Peek(w7, 10);
+ __ Poke(x3, 28);
+ __ Poke(xzr, 0); // Clobber the space we're about to drop.
+ __ Drop(1);
+ __ Poke(x2, 12);
+ __ Push(w0);
+
+ __ Mov(csp, __ StackPointer());
+ __ SetStackPointer(csp);
+ }
+
+ __ Pop(x0, x1, x2, x3);
+
+ END();
+ RUN();
+
+ uint64_t x0_expected = literal_base * 1;
+ uint64_t x1_expected = literal_base * 2;
+ uint64_t x2_expected = literal_base * 3;
+ uint64_t x3_expected = literal_base * 4;
+ uint64_t x6_expected = (x1_expected << 32) | (x0_expected >> 32);
+ uint64_t x7_expected = ((x1_expected << 16) & 0xffff0000) |
+ ((x0_expected >> 48) & 0x0000ffff);
+
+ ASSERT_EQUAL_64(x0_expected, x0);
+ ASSERT_EQUAL_64(x1_expected, x1);
+ ASSERT_EQUAL_64(x2_expected, x2);
+ ASSERT_EQUAL_64(x3_expected, x3);
+ ASSERT_EQUAL_64(x6_expected, x6);
+ ASSERT_EQUAL_64(x7_expected, x7);
+
+ TEARDOWN();
+}
+
+
+// This enum is used only as an argument to the push-pop test helpers.
+enum PushPopMethod {
+ // Push or Pop using the Push and Pop methods, with blocks of up to four
+ // registers. (Smaller blocks will be used if necessary.)
+ PushPopByFour,
+
+ // Use Push<Size>RegList and Pop<Size>RegList to transfer the registers.
+ PushPopRegList
+};
+
+
+// The maximum number of registers that can be used by the PushPopJssp* tests,
+// where a reg_count field is provided.
+static int const kPushPopJsspMaxRegCount = -1;
+
+// Test a simple push-pop pattern:
+// * Claim <claim> bytes to set the stack alignment.
+// * Push <reg_count> registers with size <reg_size>.
+// * Clobber the register contents.
+// * Pop <reg_count> registers to restore the original contents.
+// * Drop <claim> bytes to restore the original stack pointer.
+//
+// Different push and pop methods can be specified independently to test for
+// proper word-endian behaviour.
+static void PushPopJsspSimpleHelper(int reg_count,
+ int claim,
+ int reg_size,
+ PushPopMethod push_method,
+ PushPopMethod pop_method) {
+ SETUP();
+
+ START();
+
+ // Registers x8 and x9 are used by the macro assembler for debug code (for
+ // example in 'Pop'), so we can't use them here. We can't use jssp because it
+ // will be the stack pointer for this test.
+ static RegList const allowed = ~(x8.Bit() | x9.Bit() | jssp.Bit());
+ if (reg_count == kPushPopJsspMaxRegCount) {
+ reg_count = CountSetBits(allowed, kNumberOfRegisters);
+ }
+ // Work out which registers to use, based on reg_size.
+ Register r[kNumberOfRegisters];
+ Register x[kNumberOfRegisters];
+ RegList list = PopulateRegisterArray(NULL, x, r, reg_size, reg_count,
+ allowed);
+
+ // The literal base is chosen to have two useful properties:
+ // * When multiplied by small values (such as a register index), this value
+ // is clearly readable in the result.
+ // * The value is not formed from repeating fixed-size smaller values, so it
+ // can be used to detect endianness-related errors.
+ uint64_t literal_base = 0x0100001000100101UL;
+
+ {
+ ASSERT(__ StackPointer().Is(csp));
+ __ Mov(jssp, __ StackPointer());
+ __ SetStackPointer(jssp);
+
+ int i;
+
+ // Initialize the registers.
+ for (i = 0; i < reg_count; i++) {
+ // Always write into the X register, to ensure that the upper word is
+ // properly ignored by Push when testing W registers.
+ if (!x[i].IsZero()) {
+ __ Mov(x[i], literal_base * i);
+ }
+ }
+
+ // Claim memory first, as requested.
+ __ Claim(claim, kByteSizeInBytes);
+
+ switch (push_method) {
+ case PushPopByFour:
+ // Push high-numbered registers first (to the highest addresses).
+ for (i = reg_count; i >= 4; i -= 4) {
+ __ Push(r[i-1], r[i-2], r[i-3], r[i-4]);
+ }
+ // Finish off the leftovers.
+ switch (i) {
+ case 3: __ Push(r[2], r[1], r[0]); break;
+ case 2: __ Push(r[1], r[0]); break;
+ case 1: __ Push(r[0]); break;
+ default: ASSERT(i == 0); break;
+ }
+ break;
+ case PushPopRegList:
+ __ PushSizeRegList(list, reg_size);
+ break;
+ }
+
+ // Clobber all the registers, to ensure that they get repopulated by Pop.
+ Clobber(&masm, list);
+
+ switch (pop_method) {
+ case PushPopByFour:
+ // Pop low-numbered registers first (from the lowest addresses).
+ for (i = 0; i <= (reg_count-4); i += 4) {
+ __ Pop(r[i], r[i+1], r[i+2], r[i+3]);
+ }
+ // Finish off the leftovers.
+ switch (reg_count - i) {
+ case 3: __ Pop(r[i], r[i+1], r[i+2]); break;
+ case 2: __ Pop(r[i], r[i+1]); break;
+ case 1: __ Pop(r[i]); break;
+ default: ASSERT(i == reg_count); break;
+ }
+ break;
+ case PushPopRegList:
+ __ PopSizeRegList(list, reg_size);
+ break;
+ }
+
+ // Drop memory to restore jssp.
+ __ Drop(claim, kByteSizeInBytes);
+
+ __ Mov(csp, __ StackPointer());
+ __ SetStackPointer(csp);
+ }
+
+ END();
+
+ RUN();
+
+ // Check that the register contents were preserved.
+ // Always use ASSERT_EQUAL_64, even when testing W registers, so we can test
+ // that the upper word was properly cleared by Pop.
+ literal_base &= (0xffffffffffffffffUL >> (64-reg_size));
+ for (int i = 0; i < reg_count; i++) {
+ if (x[i].IsZero()) {
+ ASSERT_EQUAL_64(0, x[i]);
+ } else {
+ ASSERT_EQUAL_64(literal_base * i, x[i]);
+ }
+ }
+
+ TEARDOWN();
+}
+
+
+TEST(push_pop_jssp_simple_32) {
+ INIT_V8();
+ for (int claim = 0; claim <= 8; claim++) {
+ for (int count = 0; count <= 8; count++) {
+ PushPopJsspSimpleHelper(count, claim, kWRegSize,
+ PushPopByFour, PushPopByFour);
+ PushPopJsspSimpleHelper(count, claim, kWRegSize,
+ PushPopByFour, PushPopRegList);
+ PushPopJsspSimpleHelper(count, claim, kWRegSize,
+ PushPopRegList, PushPopByFour);
+ PushPopJsspSimpleHelper(count, claim, kWRegSize,
+ PushPopRegList, PushPopRegList);
+ }
+ // Test with the maximum number of registers.
+ PushPopJsspSimpleHelper(kPushPopJsspMaxRegCount, claim, kWRegSize,
+ PushPopByFour, PushPopByFour);
+ PushPopJsspSimpleHelper(kPushPopJsspMaxRegCount, claim, kWRegSize,
+ PushPopByFour, PushPopRegList);
+ PushPopJsspSimpleHelper(kPushPopJsspMaxRegCount, claim, kWRegSize,
+ PushPopRegList, PushPopByFour);
+ PushPopJsspSimpleHelper(kPushPopJsspMaxRegCount, claim, kWRegSize,
+ PushPopRegList, PushPopRegList);
+ }
+}
+
+
+TEST(push_pop_jssp_simple_64) {
+ INIT_V8();
+ for (int claim = 0; claim <= 8; claim++) {
+ for (int count = 0; count <= 8; count++) {
+ PushPopJsspSimpleHelper(count, claim, kXRegSize,
+ PushPopByFour, PushPopByFour);
+ PushPopJsspSimpleHelper(count, claim, kXRegSize,
+ PushPopByFour, PushPopRegList);
+ PushPopJsspSimpleHelper(count, claim, kXRegSize,
+ PushPopRegList, PushPopByFour);
+ PushPopJsspSimpleHelper(count, claim, kXRegSize,
+ PushPopRegList, PushPopRegList);
+ }
+ // Test with the maximum number of registers.
+ PushPopJsspSimpleHelper(kPushPopJsspMaxRegCount, claim, kXRegSize,
+ PushPopByFour, PushPopByFour);
+ PushPopJsspSimpleHelper(kPushPopJsspMaxRegCount, claim, kXRegSize,
+ PushPopByFour, PushPopRegList);
+ PushPopJsspSimpleHelper(kPushPopJsspMaxRegCount, claim, kXRegSize,
+ PushPopRegList, PushPopByFour);
+ PushPopJsspSimpleHelper(kPushPopJsspMaxRegCount, claim, kXRegSize,
+ PushPopRegList, PushPopRegList);
+ }
+}
+
+
+// The maximum number of registers that can be used by the PushPopFPJssp* tests,
+// where a reg_count field is provided.
+static int const kPushPopFPJsspMaxRegCount = -1;
+
+// Test a simple push-pop pattern:
+// * Claim <claim> bytes to set the stack alignment.
+// * Push <reg_count> FP registers with size <reg_size>.
+// * Clobber the register contents.
+// * Pop <reg_count> FP registers to restore the original contents.
+// * Drop <claim> bytes to restore the original stack pointer.
+//
+// Different push and pop methods can be specified independently to test for
+// proper word-endian behaviour.
+static void PushPopFPJsspSimpleHelper(int reg_count,
+ int claim,
+ int reg_size,
+ PushPopMethod push_method,
+ PushPopMethod pop_method) {
+ SETUP();
+
+ START();
+
+ // We can use any floating-point register. None of them are reserved for
+ // debug code, for example.
+ static RegList const allowed = ~0;
+ if (reg_count == kPushPopFPJsspMaxRegCount) {
+ reg_count = CountSetBits(allowed, kNumberOfFPRegisters);
+ }
+ // Work out which registers to use, based on reg_size.
+ FPRegister v[kNumberOfRegisters];
+ FPRegister d[kNumberOfRegisters];
+ RegList list = PopulateFPRegisterArray(NULL, d, v, reg_size, reg_count,
+ allowed);
+
+ // The literal base is chosen to have two useful properties:
+ // * When multiplied (using an integer) by small values (such as a register
+ // index), this value is clearly readable in the result.
+ // * The value is not formed from repeating fixed-size smaller values, so it
+ // can be used to detect endianness-related errors.
+ // * It is never a floating-point NaN, and will therefore always compare
+ // equal to itself.
+ uint64_t literal_base = 0x0100001000100101UL;
+
+ {
+ ASSERT(__ StackPointer().Is(csp));
+ __ Mov(jssp, __ StackPointer());
+ __ SetStackPointer(jssp);
+
+ int i;
+
+ // Initialize the registers, using X registers to load the literal.
+ __ Mov(x0, 0);
+ __ Mov(x1, literal_base);
+ for (i = 0; i < reg_count; i++) {
+ // Always write into the D register, to ensure that the upper word is
+ // properly ignored by Push when testing S registers.
+ __ Fmov(d[i], x0);
+ // Calculate the next literal.
+ __ Add(x0, x0, x1);
+ }
+
+ // Claim memory first, as requested.
+ __ Claim(claim, kByteSizeInBytes);
+
+ switch (push_method) {
+ case PushPopByFour:
+ // Push high-numbered registers first (to the highest addresses).
+ for (i = reg_count; i >= 4; i -= 4) {
+ __ Push(v[i-1], v[i-2], v[i-3], v[i-4]);
+ }
+ // Finish off the leftovers.
+ switch (i) {
+ case 3: __ Push(v[2], v[1], v[0]); break;
+ case 2: __ Push(v[1], v[0]); break;
+ case 1: __ Push(v[0]); break;
+ default: ASSERT(i == 0); break;
+ }
+ break;
+ case PushPopRegList:
+ __ PushSizeRegList(list, reg_size, CPURegister::kFPRegister);
+ break;
+ }
+
+ // Clobber all the registers, to ensure that they get repopulated by Pop.
+ ClobberFP(&masm, list);
+
+ switch (pop_method) {
+ case PushPopByFour:
+ // Pop low-numbered registers first (from the lowest addresses).
+ for (i = 0; i <= (reg_count-4); i += 4) {
+ __ Pop(v[i], v[i+1], v[i+2], v[i+3]);
+ }
+ // Finish off the leftovers.
+ switch (reg_count - i) {
+ case 3: __ Pop(v[i], v[i+1], v[i+2]); break;
+ case 2: __ Pop(v[i], v[i+1]); break;
+ case 1: __ Pop(v[i]); break;
+ default: ASSERT(i == reg_count); break;
+ }
+ break;
+ case PushPopRegList:
+ __ PopSizeRegList(list, reg_size, CPURegister::kFPRegister);
+ break;
+ }
+
+ // Drop memory to restore jssp.
+ __ Drop(claim, kByteSizeInBytes);
+
+ __ Mov(csp, __ StackPointer());
+ __ SetStackPointer(csp);
+ }
+
+ END();
+
+ RUN();
+
+ // Check that the register contents were preserved.
+ // Always use ASSERT_EQUAL_FP64, even when testing S registers, so we can
+ // test that the upper word was properly cleared by Pop.
+ literal_base &= (0xffffffffffffffffUL >> (64-reg_size));
+ for (int i = 0; i < reg_count; i++) {
+ uint64_t literal = literal_base * i;
+ double expected;
+ memcpy(&expected, &literal, sizeof(expected));
+ ASSERT_EQUAL_FP64(expected, d[i]);
+ }
+
+ TEARDOWN();
+}
+
+
+TEST(push_pop_fp_jssp_simple_32) {
+ INIT_V8();
+ for (int claim = 0; claim <= 8; claim++) {
+ for (int count = 0; count <= 8; count++) {
+ PushPopFPJsspSimpleHelper(count, claim, kSRegSize,
+ PushPopByFour, PushPopByFour);
+ PushPopFPJsspSimpleHelper(count, claim, kSRegSize,
+ PushPopByFour, PushPopRegList);
+ PushPopFPJsspSimpleHelper(count, claim, kSRegSize,
+ PushPopRegList, PushPopByFour);
+ PushPopFPJsspSimpleHelper(count, claim, kSRegSize,
+ PushPopRegList, PushPopRegList);
+ }
+ // Test with the maximum number of registers.
+ PushPopFPJsspSimpleHelper(kPushPopFPJsspMaxRegCount, claim, kSRegSize,
+ PushPopByFour, PushPopByFour);
+ PushPopFPJsspSimpleHelper(kPushPopFPJsspMaxRegCount, claim, kSRegSize,
+ PushPopByFour, PushPopRegList);
+ PushPopFPJsspSimpleHelper(kPushPopFPJsspMaxRegCount, claim, kSRegSize,
+ PushPopRegList, PushPopByFour);
+ PushPopFPJsspSimpleHelper(kPushPopFPJsspMaxRegCount, claim, kSRegSize,
+ PushPopRegList, PushPopRegList);
+ }
+}
+
+
+TEST(push_pop_fp_jssp_simple_64) {
+ INIT_V8();
+ for (int claim = 0; claim <= 8; claim++) {
+ for (int count = 0; count <= 8; count++) {
+ PushPopFPJsspSimpleHelper(count, claim, kDRegSize,
+ PushPopByFour, PushPopByFour);
+ PushPopFPJsspSimpleHelper(count, claim, kDRegSize,
+ PushPopByFour, PushPopRegList);
+ PushPopFPJsspSimpleHelper(count, claim, kDRegSize,
+ PushPopRegList, PushPopByFour);
+ PushPopFPJsspSimpleHelper(count, claim, kDRegSize,
+ PushPopRegList, PushPopRegList);
+ }
+ // Test with the maximum number of registers.
+ PushPopFPJsspSimpleHelper(kPushPopFPJsspMaxRegCount, claim, kDRegSize,
+ PushPopByFour, PushPopByFour);
+ PushPopFPJsspSimpleHelper(kPushPopFPJsspMaxRegCount, claim, kDRegSize,
+ PushPopByFour, PushPopRegList);
+ PushPopFPJsspSimpleHelper(kPushPopFPJsspMaxRegCount, claim, kDRegSize,
+ PushPopRegList, PushPopByFour);
+ PushPopFPJsspSimpleHelper(kPushPopFPJsspMaxRegCount, claim, kDRegSize,
+ PushPopRegList, PushPopRegList);
+ }
+}
+
+
+// Push and pop data using an overlapping combination of Push/Pop and
+// RegList-based methods.
+static void PushPopJsspMixedMethodsHelper(int claim, int reg_size) {
+ SETUP();
+
+ // Registers x8 and x9 are used by the macro assembler for debug code (for
+ // example in 'Pop'), so we can't use them here. We can't use jssp because it
+ // will be the stack pointer for this test.
+ static RegList const allowed =
+ ~(x8.Bit() | x9.Bit() | jssp.Bit() | xzr.Bit());
+ // Work out which registers to use, based on reg_size.
+ Register r[10];
+ Register x[10];
+ PopulateRegisterArray(NULL, x, r, reg_size, 10, allowed);
+
+ // Calculate some handy register lists.
+ RegList r0_to_r3 = 0;
+ for (int i = 0; i <= 3; i++) {
+ r0_to_r3 |= x[i].Bit();
+ }
+ RegList r4_to_r5 = 0;
+ for (int i = 4; i <= 5; i++) {
+ r4_to_r5 |= x[i].Bit();
+ }
+ RegList r6_to_r9 = 0;
+ for (int i = 6; i <= 9; i++) {
+ r6_to_r9 |= x[i].Bit();
+ }
+
+ // The literal base is chosen to have two useful properties:
+ // * When multiplied by small values (such as a register index), this value
+ // is clearly readable in the result.
+ // * The value is not formed from repeating fixed-size smaller values, so it
+ // can be used to detect endianness-related errors.
+ uint64_t literal_base = 0x0100001000100101UL;
+
+ START();
+ {
+ ASSERT(__ StackPointer().Is(csp));
+ __ Mov(jssp, __ StackPointer());
+ __ SetStackPointer(jssp);
+
+ // Claim memory first, as requested.
+ __ Claim(claim, kByteSizeInBytes);
+
+ __ Mov(x[3], literal_base * 3);
+ __ Mov(x[2], literal_base * 2);
+ __ Mov(x[1], literal_base * 1);
+ __ Mov(x[0], literal_base * 0);
+
+ __ PushSizeRegList(r0_to_r3, reg_size);
+ __ Push(r[3], r[2]);
+
+ Clobber(&masm, r0_to_r3);
+ __ PopSizeRegList(r0_to_r3, reg_size);
+
+ __ Push(r[2], r[1], r[3], r[0]);
+
+ Clobber(&masm, r4_to_r5);
+ __ Pop(r[4], r[5]);
+ Clobber(&masm, r6_to_r9);
+ __ Pop(r[6], r[7], r[8], r[9]);
+
+ // Drop memory to restore jssp.
+ __ Drop(claim, kByteSizeInBytes);
+
+ __ Mov(csp, __ StackPointer());
+ __ SetStackPointer(csp);
+ }
+
+ END();
+
+ RUN();
+
+ // Always use ASSERT_EQUAL_64, even when testing W registers, so we can test
+ // that the upper word was properly cleared by Pop.
+ literal_base &= (0xffffffffffffffffUL >> (64-reg_size));
+
+ ASSERT_EQUAL_64(literal_base * 3, x[9]);
+ ASSERT_EQUAL_64(literal_base * 2, x[8]);
+ ASSERT_EQUAL_64(literal_base * 0, x[7]);
+ ASSERT_EQUAL_64(literal_base * 3, x[6]);
+ ASSERT_EQUAL_64(literal_base * 1, x[5]);
+ ASSERT_EQUAL_64(literal_base * 2, x[4]);
+
+ TEARDOWN();
+}
+
+
+TEST(push_pop_jssp_mixed_methods_64) {
+ INIT_V8();
+ for (int claim = 0; claim <= 8; claim++) {
+ PushPopJsspMixedMethodsHelper(claim, kXRegSize);
+ }
+}
+
+
+TEST(push_pop_jssp_mixed_methods_32) {
+ INIT_V8();
+ for (int claim = 0; claim <= 8; claim++) {
+ PushPopJsspMixedMethodsHelper(claim, kWRegSize);
+ }
+}
+
+
+// Push and pop data using overlapping X- and W-sized quantities.
+static void PushPopJsspWXOverlapHelper(int reg_count, int claim) {
+ // This test emits rather a lot of code.
+ SETUP_SIZE(BUF_SIZE * 2);
+
+ // Work out which registers to use, based on reg_size.
+ static RegList const allowed = ~(x8.Bit() | x9.Bit() | jssp.Bit());
+ if (reg_count == kPushPopJsspMaxRegCount) {
+ reg_count = CountSetBits(allowed, kNumberOfRegisters);
+ }
+ Register w[kNumberOfRegisters];
+ Register x[kNumberOfRegisters];
+ RegList list = PopulateRegisterArray(w, x, NULL, 0, reg_count, allowed);
+
+ // The number of W-sized slots we expect to pop. When we pop, we alternate
+ // between W and X registers, so we need reg_count*1.5 W-sized slots.
+ int const requested_w_slots = reg_count + reg_count / 2;
+
+ // Track what _should_ be on the stack, using W-sized slots.
+ static int const kMaxWSlots = kNumberOfRegisters + kNumberOfRegisters / 2;
+ uint32_t stack[kMaxWSlots];
+ for (int i = 0; i < kMaxWSlots; i++) {
+ stack[i] = 0xdeadbeef;
+ }
+
+ // The literal base is chosen to have two useful properties:
+ // * When multiplied by small values (such as a register index), this value
+ // is clearly readable in the result.
+ // * The value is not formed from repeating fixed-size smaller values, so it
+ // can be used to detect endianness-related errors.
+ static uint64_t const literal_base = 0x0100001000100101UL;
+ static uint64_t const literal_base_hi = literal_base >> 32;
+ static uint64_t const literal_base_lo = literal_base & 0xffffffff;
+ static uint64_t const literal_base_w = literal_base & 0xffffffff;
+
+ START();
+ {
+ ASSERT(__ StackPointer().Is(csp));
+ __ Mov(jssp, __ StackPointer());
+ __ SetStackPointer(jssp);
+
+ // Initialize the registers.
+ for (int i = 0; i < reg_count; i++) {
+ // Always write into the X register, to ensure that the upper word is
+ // properly ignored by Push when testing W registers.
+ if (!x[i].IsZero()) {
+ __ Mov(x[i], literal_base * i);
+ }
+ }
+
+ // Claim memory first, as requested.
+ __ Claim(claim, kByteSizeInBytes);
+
+ // The push-pop pattern is as follows:
+ // Push: Pop:
+ // x[0](hi) -> w[0]
+ // x[0](lo) -> x[1](hi)
+ // w[1] -> x[1](lo)
+ // w[1] -> w[2]
+ // x[2](hi) -> x[2](hi)
+ // x[2](lo) -> x[2](lo)
+ // x[2](hi) -> w[3]
+ // x[2](lo) -> x[4](hi)
+ // x[2](hi) -> x[4](lo)
+ // x[2](lo) -> w[5]
+ // w[3] -> x[5](hi)
+ // w[3] -> x[6](lo)
+ // w[3] -> w[7]
+ // w[3] -> x[8](hi)
+ // x[4](hi) -> x[8](lo)
+ // x[4](lo) -> w[9]
+ // ... pattern continues ...
+ //
+ // That is, registers are pushed starting with the lower numbers,
+ // alternating between x and w registers, and pushing i%4+1 copies of each,
+ // where i is the register number.
+ // Registers are popped starting with the higher numbers one-by-one,
+ // alternating between x and w registers, but only popping one at a time.
+ //
+ // This pattern provides a wide variety of alignment effects and overlaps.
+
+ // ---- Push ----
+
+ int active_w_slots = 0;
+ for (int i = 0; active_w_slots < requested_w_slots; i++) {
+ ASSERT(i < reg_count);
+ // In order to test various arguments to PushMultipleTimes, and to try to
+ // exercise different alignment and overlap effects, we push each
+ // register a different number of times.
+ int times = i % 4 + 1;
+ if (i & 1) {
+ // Push odd-numbered registers as W registers.
+ __ PushMultipleTimes(times, w[i]);
+ // Fill in the expected stack slots.
+ for (int j = 0; j < times; j++) {
+ if (w[i].Is(wzr)) {
+ // The zero register always writes zeroes.
+ stack[active_w_slots++] = 0;
+ } else {
+ stack[active_w_slots++] = literal_base_w * i;
+ }
+ }
+ } else {
+ // Push even-numbered registers as X registers.
+ __ PushMultipleTimes(times, x[i]);
+ // Fill in the expected stack slots.
+ for (int j = 0; j < times; j++) {
+ if (x[i].IsZero()) {
+ // The zero register always writes zeroes.
+ stack[active_w_slots++] = 0;
+ stack[active_w_slots++] = 0;
+ } else {
+ stack[active_w_slots++] = literal_base_hi * i;
+ stack[active_w_slots++] = literal_base_lo * i;
+ }
+ }
+ }
+ }
+ // Because we were pushing several registers at a time, we probably pushed
+ // more than we needed to.
+ if (active_w_slots > requested_w_slots) {
+ __ Drop(active_w_slots - requested_w_slots, kWRegSizeInBytes);
+ // Bump the number of active W-sized slots back to where it should be,
+ // and fill the empty space with a dummy value.
+ do {
+ stack[active_w_slots--] = 0xdeadbeef;
+ } while (active_w_slots > requested_w_slots);
+ }
+
+ // ---- Pop ----
+
+ Clobber(&masm, list);
+
+ // If popping an even number of registers, the first one will be X-sized.
+ // Otherwise, the first one will be W-sized.
+ bool next_is_64 = !(reg_count & 1);
+ for (int i = reg_count-1; i >= 0; i--) {
+ if (next_is_64) {
+ __ Pop(x[i]);
+ active_w_slots -= 2;
+ } else {
+ __ Pop(w[i]);
+ active_w_slots -= 1;
+ }
+ next_is_64 = !next_is_64;
+ }
+ ASSERT(active_w_slots == 0);
+
+ // Drop memory to restore jssp.
+ __ Drop(claim, kByteSizeInBytes);
+
+ __ Mov(csp, __ StackPointer());
+ __ SetStackPointer(csp);
+ }
+
+ END();
+
+ RUN();
+
+ int slot = 0;
+ for (int i = 0; i < reg_count; i++) {
+ // Even-numbered registers were written as W registers.
+ // Odd-numbered registers were written as X registers.
+ bool expect_64 = (i & 1);
+ uint64_t expected;
+
+ if (expect_64) {
+ uint64_t hi = stack[slot++];
+ uint64_t lo = stack[slot++];
+ expected = (hi << 32) | lo;
+ } else {
+ expected = stack[slot++];
+ }
+
+ // Always use ASSERT_EQUAL_64, even when testing W registers, so we can
+ // test that the upper word was properly cleared by Pop.
+ if (x[i].IsZero()) {
+ ASSERT_EQUAL_64(0, x[i]);
+ } else {
+ ASSERT_EQUAL_64(expected, x[i]);
+ }
+ }
+ ASSERT(slot == requested_w_slots);
+
+ TEARDOWN();
+}
+
+
+TEST(push_pop_jssp_wx_overlap) {
+ INIT_V8();
+ for (int claim = 0; claim <= 8; claim++) {
+ for (int count = 1; count <= 8; count++) {
+ PushPopJsspWXOverlapHelper(count, claim);
+ PushPopJsspWXOverlapHelper(count, claim);
+ PushPopJsspWXOverlapHelper(count, claim);
+ PushPopJsspWXOverlapHelper(count, claim);
+ }
+ // Test with the maximum number of registers.
+ PushPopJsspWXOverlapHelper(kPushPopJsspMaxRegCount, claim);
+ PushPopJsspWXOverlapHelper(kPushPopJsspMaxRegCount, claim);
+ PushPopJsspWXOverlapHelper(kPushPopJsspMaxRegCount, claim);
+ PushPopJsspWXOverlapHelper(kPushPopJsspMaxRegCount, claim);
+ }
+}
+
+
+TEST(push_pop_csp) {
+ INIT_V8();
+ SETUP();
+
+ START();
+
+ ASSERT(csp.Is(__ StackPointer()));
+
+ __ Mov(x3, 0x3333333333333333UL);
+ __ Mov(x2, 0x2222222222222222UL);
+ __ Mov(x1, 0x1111111111111111UL);
+ __ Mov(x0, 0x0000000000000000UL);
+ __ Claim(2);
+ __ PushXRegList(x0.Bit() | x1.Bit() | x2.Bit() | x3.Bit());
+ __ Push(x3, x2);
+ __ PopXRegList(x0.Bit() | x1.Bit() | x2.Bit() | x3.Bit());
+ __ Push(x2, x1, x3, x0);
+ __ Pop(x4, x5);
+ __ Pop(x6, x7, x8, x9);
+
+ __ Claim(2);
+ __ PushWRegList(w0.Bit() | w1.Bit() | w2.Bit() | w3.Bit());
+ __ Push(w3, w1, w2, w0);
+ __ PopWRegList(w10.Bit() | w11.Bit() | w12.Bit() | w13.Bit());
+ __ Pop(w14, w15, w16, w17);
+
+ __ Claim(2);
+ __ Push(w2, w2, w1, w1);
+ __ Push(x3, x3);
+ __ Pop(w18, w19, w20, w21);
+ __ Pop(x22, x23);
+
+ __ Claim(2);
+ __ PushXRegList(x1.Bit() | x22.Bit());
+ __ PopXRegList(x24.Bit() | x26.Bit());
+
+ __ Claim(2);
+ __ PushWRegList(w1.Bit() | w2.Bit() | w4.Bit() | w22.Bit());
+ __ PopWRegList(w25.Bit() | w27.Bit() | w28.Bit() | w29.Bit());
+
+ __ Claim(2);
+ __ PushXRegList(0);
+ __ PopXRegList(0);
+ __ PushXRegList(0xffffffff);
+ __ PopXRegList(0xffffffff);
+ __ Drop(12);
+
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0x1111111111111111UL, x3);
+ ASSERT_EQUAL_64(0x0000000000000000UL, x2);
+ ASSERT_EQUAL_64(0x3333333333333333UL, x1);
+ ASSERT_EQUAL_64(0x2222222222222222UL, x0);
+ ASSERT_EQUAL_64(0x3333333333333333UL, x9);
+ ASSERT_EQUAL_64(0x2222222222222222UL, x8);
+ ASSERT_EQUAL_64(0x0000000000000000UL, x7);
+ ASSERT_EQUAL_64(0x3333333333333333UL, x6);
+ ASSERT_EQUAL_64(0x1111111111111111UL, x5);
+ ASSERT_EQUAL_64(0x2222222222222222UL, x4);
+
+ ASSERT_EQUAL_32(0x11111111U, w13);
+ ASSERT_EQUAL_32(0x33333333U, w12);
+ ASSERT_EQUAL_32(0x00000000U, w11);
+ ASSERT_EQUAL_32(0x22222222U, w10);
+ ASSERT_EQUAL_32(0x11111111U, w17);
+ ASSERT_EQUAL_32(0x00000000U, w16);
+ ASSERT_EQUAL_32(0x33333333U, w15);
+ ASSERT_EQUAL_32(0x22222222U, w14);
+
+ ASSERT_EQUAL_32(0x11111111U, w18);
+ ASSERT_EQUAL_32(0x11111111U, w19);
+ ASSERT_EQUAL_32(0x11111111U, w20);
+ ASSERT_EQUAL_32(0x11111111U, w21);
+ ASSERT_EQUAL_64(0x3333333333333333UL, x22);
+ ASSERT_EQUAL_64(0x0000000000000000UL, x23);
+
+ ASSERT_EQUAL_64(0x3333333333333333UL, x24);
+ ASSERT_EQUAL_64(0x3333333333333333UL, x26);
+
+ ASSERT_EQUAL_32(0x33333333U, w25);
+ ASSERT_EQUAL_32(0x00000000U, w27);
+ ASSERT_EQUAL_32(0x22222222U, w28);
+ ASSERT_EQUAL_32(0x33333333U, w29);
+ TEARDOWN();
+}
+
+
+TEST(jump_both_smi) {
+ INIT_V8();
+ SETUP();
+
+ Label cond_pass_00, cond_pass_01, cond_pass_10, cond_pass_11;
+ Label cond_fail_00, cond_fail_01, cond_fail_10, cond_fail_11;
+ Label return1, return2, return3, done;
+
+ START();
+
+ __ Mov(x0, 0x5555555500000001UL); // A pointer.
+ __ Mov(x1, 0xaaaaaaaa00000001UL); // A pointer.
+ __ Mov(x2, 0x1234567800000000UL); // A smi.
+ __ Mov(x3, 0x8765432100000000UL); // A smi.
+ __ Mov(x4, 0xdead);
+ __ Mov(x5, 0xdead);
+ __ Mov(x6, 0xdead);
+ __ Mov(x7, 0xdead);
+
+ __ JumpIfBothSmi(x0, x1, &cond_pass_00, &cond_fail_00);
+ __ Bind(&return1);
+ __ JumpIfBothSmi(x0, x2, &cond_pass_01, &cond_fail_01);
+ __ Bind(&return2);
+ __ JumpIfBothSmi(x2, x1, &cond_pass_10, &cond_fail_10);
+ __ Bind(&return3);
+ __ JumpIfBothSmi(x2, x3, &cond_pass_11, &cond_fail_11);
+
+ __ Bind(&cond_fail_00);
+ __ Mov(x4, 0);
+ __ B(&return1);
+ __ Bind(&cond_pass_00);
+ __ Mov(x4, 1);
+ __ B(&return1);
+
+ __ Bind(&cond_fail_01);
+ __ Mov(x5, 0);
+ __ B(&return2);
+ __ Bind(&cond_pass_01);
+ __ Mov(x5, 1);
+ __ B(&return2);
+
+ __ Bind(&cond_fail_10);
+ __ Mov(x6, 0);
+ __ B(&return3);
+ __ Bind(&cond_pass_10);
+ __ Mov(x6, 1);
+ __ B(&return3);
+
+ __ Bind(&cond_fail_11);
+ __ Mov(x7, 0);
+ __ B(&done);
+ __ Bind(&cond_pass_11);
+ __ Mov(x7, 1);
+
+ __ Bind(&done);
+
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0x5555555500000001UL, x0);
+ ASSERT_EQUAL_64(0xaaaaaaaa00000001UL, x1);
+ ASSERT_EQUAL_64(0x1234567800000000UL, x2);
+ ASSERT_EQUAL_64(0x8765432100000000UL, x3);
+ ASSERT_EQUAL_64(0, x4);
+ ASSERT_EQUAL_64(0, x5);
+ ASSERT_EQUAL_64(0, x6);
+ ASSERT_EQUAL_64(1, x7);
+
+ TEARDOWN();
+}
+
+
+TEST(jump_either_smi) {
+ INIT_V8();
+ SETUP();
+
+ Label cond_pass_00, cond_pass_01, cond_pass_10, cond_pass_11;
+ Label cond_fail_00, cond_fail_01, cond_fail_10, cond_fail_11;
+ Label return1, return2, return3, done;
+
+ START();
+
+ __ Mov(x0, 0x5555555500000001UL); // A pointer.
+ __ Mov(x1, 0xaaaaaaaa00000001UL); // A pointer.
+ __ Mov(x2, 0x1234567800000000UL); // A smi.
+ __ Mov(x3, 0x8765432100000000UL); // A smi.
+ __ Mov(x4, 0xdead);
+ __ Mov(x5, 0xdead);
+ __ Mov(x6, 0xdead);
+ __ Mov(x7, 0xdead);
+
+ __ JumpIfEitherSmi(x0, x1, &cond_pass_00, &cond_fail_00);
+ __ Bind(&return1);
+ __ JumpIfEitherSmi(x0, x2, &cond_pass_01, &cond_fail_01);
+ __ Bind(&return2);
+ __ JumpIfEitherSmi(x2, x1, &cond_pass_10, &cond_fail_10);
+ __ Bind(&return3);
+ __ JumpIfEitherSmi(x2, x3, &cond_pass_11, &cond_fail_11);
+
+ __ Bind(&cond_fail_00);
+ __ Mov(x4, 0);
+ __ B(&return1);
+ __ Bind(&cond_pass_00);
+ __ Mov(x4, 1);
+ __ B(&return1);
+
+ __ Bind(&cond_fail_01);
+ __ Mov(x5, 0);
+ __ B(&return2);
+ __ Bind(&cond_pass_01);
+ __ Mov(x5, 1);
+ __ B(&return2);
+
+ __ Bind(&cond_fail_10);
+ __ Mov(x6, 0);
+ __ B(&return3);
+ __ Bind(&cond_pass_10);
+ __ Mov(x6, 1);
+ __ B(&return3);
+
+ __ Bind(&cond_fail_11);
+ __ Mov(x7, 0);
+ __ B(&done);
+ __ Bind(&cond_pass_11);
+ __ Mov(x7, 1);
+
+ __ Bind(&done);
+
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0x5555555500000001UL, x0);
+ ASSERT_EQUAL_64(0xaaaaaaaa00000001UL, x1);
+ ASSERT_EQUAL_64(0x1234567800000000UL, x2);
+ ASSERT_EQUAL_64(0x8765432100000000UL, x3);
+ ASSERT_EQUAL_64(0, x4);
+ ASSERT_EQUAL_64(1, x5);
+ ASSERT_EQUAL_64(1, x6);
+ ASSERT_EQUAL_64(1, x7);
+
+ TEARDOWN();
+}
+
+
+TEST(noreg) {
+ // This test doesn't generate any code, but it verifies some invariants
+ // related to NoReg.
+ CHECK(NoReg.Is(NoFPReg));
+ CHECK(NoFPReg.Is(NoReg));
+ CHECK(NoReg.Is(NoCPUReg));
+ CHECK(NoCPUReg.Is(NoReg));
+ CHECK(NoFPReg.Is(NoCPUReg));
+ CHECK(NoCPUReg.Is(NoFPReg));
+
+ CHECK(NoReg.IsNone());
+ CHECK(NoFPReg.IsNone());
+ CHECK(NoCPUReg.IsNone());
+}
+
+
+TEST(isvalid) {
+ // This test doesn't generate any code, but it verifies some invariants
+ // related to IsValid().
+ CHECK(!NoReg.IsValid());
+ CHECK(!NoFPReg.IsValid());
+ CHECK(!NoCPUReg.IsValid());
+
+ CHECK(x0.IsValid());
+ CHECK(w0.IsValid());
+ CHECK(x30.IsValid());
+ CHECK(w30.IsValid());
+ CHECK(xzr.IsValid());
+ CHECK(wzr.IsValid());
+
+ CHECK(csp.IsValid());
+ CHECK(wcsp.IsValid());
+
+ CHECK(d0.IsValid());
+ CHECK(s0.IsValid());
+ CHECK(d31.IsValid());
+ CHECK(s31.IsValid());
+
+ CHECK(x0.IsValidRegister());
+ CHECK(w0.IsValidRegister());
+ CHECK(xzr.IsValidRegister());
+ CHECK(wzr.IsValidRegister());
+ CHECK(csp.IsValidRegister());
+ CHECK(wcsp.IsValidRegister());
+ CHECK(!x0.IsValidFPRegister());
+ CHECK(!w0.IsValidFPRegister());
+ CHECK(!xzr.IsValidFPRegister());
+ CHECK(!wzr.IsValidFPRegister());
+ CHECK(!csp.IsValidFPRegister());
+ CHECK(!wcsp.IsValidFPRegister());
+
+ CHECK(d0.IsValidFPRegister());
+ CHECK(s0.IsValidFPRegister());
+ CHECK(!d0.IsValidRegister());
+ CHECK(!s0.IsValidRegister());
+
+ // Test the same as before, but using CPURegister types. This shouldn't make
+ // any difference.
+ CHECK(static_cast<CPURegister>(x0).IsValid());
+ CHECK(static_cast<CPURegister>(w0).IsValid());
+ CHECK(static_cast<CPURegister>(x30).IsValid());
+ CHECK(static_cast<CPURegister>(w30).IsValid());
+ CHECK(static_cast<CPURegister>(xzr).IsValid());
+ CHECK(static_cast<CPURegister>(wzr).IsValid());
+
+ CHECK(static_cast<CPURegister>(csp).IsValid());
+ CHECK(static_cast<CPURegister>(wcsp).IsValid());
+
+ CHECK(static_cast<CPURegister>(d0).IsValid());
+ CHECK(static_cast<CPURegister>(s0).IsValid());
+ CHECK(static_cast<CPURegister>(d31).IsValid());
+ CHECK(static_cast<CPURegister>(s31).IsValid());
+
+ CHECK(static_cast<CPURegister>(x0).IsValidRegister());
+ CHECK(static_cast<CPURegister>(w0).IsValidRegister());
+ CHECK(static_cast<CPURegister>(xzr).IsValidRegister());
+ CHECK(static_cast<CPURegister>(wzr).IsValidRegister());
+ CHECK(static_cast<CPURegister>(csp).IsValidRegister());
+ CHECK(static_cast<CPURegister>(wcsp).IsValidRegister());
+ CHECK(!static_cast<CPURegister>(x0).IsValidFPRegister());
+ CHECK(!static_cast<CPURegister>(w0).IsValidFPRegister());
+ CHECK(!static_cast<CPURegister>(xzr).IsValidFPRegister());
+ CHECK(!static_cast<CPURegister>(wzr).IsValidFPRegister());
+ CHECK(!static_cast<CPURegister>(csp).IsValidFPRegister());
+ CHECK(!static_cast<CPURegister>(wcsp).IsValidFPRegister());
+
+ CHECK(static_cast<CPURegister>(d0).IsValidFPRegister());
+ CHECK(static_cast<CPURegister>(s0).IsValidFPRegister());
+ CHECK(!static_cast<CPURegister>(d0).IsValidRegister());
+ CHECK(!static_cast<CPURegister>(s0).IsValidRegister());
+}
+
+
+TEST(cpureglist_utils_x) {
+ // This test doesn't generate any code, but it verifies the behaviour of
+ // the CPURegList utility methods.
+
+ // Test a list of X registers.
+ CPURegList test(x0, x1, x2, x3);
+
+ CHECK(test.IncludesAliasOf(x0));
+ CHECK(test.IncludesAliasOf(x1));
+ CHECK(test.IncludesAliasOf(x2));
+ CHECK(test.IncludesAliasOf(x3));
+ CHECK(test.IncludesAliasOf(w0));
+ CHECK(test.IncludesAliasOf(w1));
+ CHECK(test.IncludesAliasOf(w2));
+ CHECK(test.IncludesAliasOf(w3));
+
+ CHECK(!test.IncludesAliasOf(x4));
+ CHECK(!test.IncludesAliasOf(x30));
+ CHECK(!test.IncludesAliasOf(xzr));
+ CHECK(!test.IncludesAliasOf(csp));
+ CHECK(!test.IncludesAliasOf(w4));
+ CHECK(!test.IncludesAliasOf(w30));
+ CHECK(!test.IncludesAliasOf(wzr));
+ CHECK(!test.IncludesAliasOf(wcsp));
+
+ CHECK(!test.IncludesAliasOf(d0));
+ CHECK(!test.IncludesAliasOf(d1));
+ CHECK(!test.IncludesAliasOf(d2));
+ CHECK(!test.IncludesAliasOf(d3));
+ CHECK(!test.IncludesAliasOf(s0));
+ CHECK(!test.IncludesAliasOf(s1));
+ CHECK(!test.IncludesAliasOf(s2));
+ CHECK(!test.IncludesAliasOf(s3));
+
+ CHECK(!test.IsEmpty());
+
+ CHECK(test.type() == x0.type());
+
+ CHECK(test.PopHighestIndex().Is(x3));
+ CHECK(test.PopLowestIndex().Is(x0));
+
+ CHECK(test.IncludesAliasOf(x1));
+ CHECK(test.IncludesAliasOf(x2));
+ CHECK(test.IncludesAliasOf(w1));
+ CHECK(test.IncludesAliasOf(w2));
+ CHECK(!test.IncludesAliasOf(x0));
+ CHECK(!test.IncludesAliasOf(x3));
+ CHECK(!test.IncludesAliasOf(w0));
+ CHECK(!test.IncludesAliasOf(w3));
+
+ CHECK(test.PopHighestIndex().Is(x2));
+ CHECK(test.PopLowestIndex().Is(x1));
+
+ CHECK(!test.IncludesAliasOf(x1));
+ CHECK(!test.IncludesAliasOf(x2));
+ CHECK(!test.IncludesAliasOf(w1));
+ CHECK(!test.IncludesAliasOf(w2));
+
+ CHECK(test.IsEmpty());
+}
+
+
+TEST(cpureglist_utils_w) {
+ // This test doesn't generate any code, but it verifies the behaviour of
+ // the CPURegList utility methods.
+
+ // Test a list of W registers.
+ CPURegList test(w10, w11, w12, w13);
+
+ CHECK(test.IncludesAliasOf(x10));
+ CHECK(test.IncludesAliasOf(x11));
+ CHECK(test.IncludesAliasOf(x12));
+ CHECK(test.IncludesAliasOf(x13));
+ CHECK(test.IncludesAliasOf(w10));
+ CHECK(test.IncludesAliasOf(w11));
+ CHECK(test.IncludesAliasOf(w12));
+ CHECK(test.IncludesAliasOf(w13));
+
+ CHECK(!test.IncludesAliasOf(x0));
+ CHECK(!test.IncludesAliasOf(x9));
+ CHECK(!test.IncludesAliasOf(x14));
+ CHECK(!test.IncludesAliasOf(x30));
+ CHECK(!test.IncludesAliasOf(xzr));
+ CHECK(!test.IncludesAliasOf(csp));
+ CHECK(!test.IncludesAliasOf(w0));
+ CHECK(!test.IncludesAliasOf(w9));
+ CHECK(!test.IncludesAliasOf(w14));
+ CHECK(!test.IncludesAliasOf(w30));
+ CHECK(!test.IncludesAliasOf(wzr));
+ CHECK(!test.IncludesAliasOf(wcsp));
+
+ CHECK(!test.IncludesAliasOf(d10));
+ CHECK(!test.IncludesAliasOf(d11));
+ CHECK(!test.IncludesAliasOf(d12));
+ CHECK(!test.IncludesAliasOf(d13));
+ CHECK(!test.IncludesAliasOf(s10));
+ CHECK(!test.IncludesAliasOf(s11));
+ CHECK(!test.IncludesAliasOf(s12));
+ CHECK(!test.IncludesAliasOf(s13));
+
+ CHECK(!test.IsEmpty());
+
+ CHECK(test.type() == w10.type());
+
+ CHECK(test.PopHighestIndex().Is(w13));
+ CHECK(test.PopLowestIndex().Is(w10));
+
+ CHECK(test.IncludesAliasOf(x11));
+ CHECK(test.IncludesAliasOf(x12));
+ CHECK(test.IncludesAliasOf(w11));
+ CHECK(test.IncludesAliasOf(w12));
+ CHECK(!test.IncludesAliasOf(x10));
+ CHECK(!test.IncludesAliasOf(x13));
+ CHECK(!test.IncludesAliasOf(w10));
+ CHECK(!test.IncludesAliasOf(w13));
+
+ CHECK(test.PopHighestIndex().Is(w12));
+ CHECK(test.PopLowestIndex().Is(w11));
+
+ CHECK(!test.IncludesAliasOf(x11));
+ CHECK(!test.IncludesAliasOf(x12));
+ CHECK(!test.IncludesAliasOf(w11));
+ CHECK(!test.IncludesAliasOf(w12));
+
+ CHECK(test.IsEmpty());
+}
+
+
+TEST(cpureglist_utils_d) {
+ // This test doesn't generate any code, but it verifies the behaviour of
+ // the CPURegList utility methods.
+
+ // Test a list of D registers.
+ CPURegList test(d20, d21, d22, d23);
+
+ CHECK(test.IncludesAliasOf(d20));
+ CHECK(test.IncludesAliasOf(d21));
+ CHECK(test.IncludesAliasOf(d22));
+ CHECK(test.IncludesAliasOf(d23));
+ CHECK(test.IncludesAliasOf(s20));
+ CHECK(test.IncludesAliasOf(s21));
+ CHECK(test.IncludesAliasOf(s22));
+ CHECK(test.IncludesAliasOf(s23));
+
+ CHECK(!test.IncludesAliasOf(d0));
+ CHECK(!test.IncludesAliasOf(d19));
+ CHECK(!test.IncludesAliasOf(d24));
+ CHECK(!test.IncludesAliasOf(d31));
+ CHECK(!test.IncludesAliasOf(s0));
+ CHECK(!test.IncludesAliasOf(s19));
+ CHECK(!test.IncludesAliasOf(s24));
+ CHECK(!test.IncludesAliasOf(s31));
+
+ CHECK(!test.IncludesAliasOf(x20));
+ CHECK(!test.IncludesAliasOf(x21));
+ CHECK(!test.IncludesAliasOf(x22));
+ CHECK(!test.IncludesAliasOf(x23));
+ CHECK(!test.IncludesAliasOf(w20));
+ CHECK(!test.IncludesAliasOf(w21));
+ CHECK(!test.IncludesAliasOf(w22));
+ CHECK(!test.IncludesAliasOf(w23));
+
+ CHECK(!test.IncludesAliasOf(xzr));
+ CHECK(!test.IncludesAliasOf(wzr));
+ CHECK(!test.IncludesAliasOf(csp));
+ CHECK(!test.IncludesAliasOf(wcsp));
+
+ CHECK(!test.IsEmpty());
+
+ CHECK(test.type() == d20.type());
+
+ CHECK(test.PopHighestIndex().Is(d23));
+ CHECK(test.PopLowestIndex().Is(d20));
+
+ CHECK(test.IncludesAliasOf(d21));
+ CHECK(test.IncludesAliasOf(d22));
+ CHECK(test.IncludesAliasOf(s21));
+ CHECK(test.IncludesAliasOf(s22));
+ CHECK(!test.IncludesAliasOf(d20));
+ CHECK(!test.IncludesAliasOf(d23));
+ CHECK(!test.IncludesAliasOf(s20));
+ CHECK(!test.IncludesAliasOf(s23));
+
+ CHECK(test.PopHighestIndex().Is(d22));
+ CHECK(test.PopLowestIndex().Is(d21));
+
+ CHECK(!test.IncludesAliasOf(d21));
+ CHECK(!test.IncludesAliasOf(d22));
+ CHECK(!test.IncludesAliasOf(s21));
+ CHECK(!test.IncludesAliasOf(s22));
+
+ CHECK(test.IsEmpty());
+}
+
+
+TEST(cpureglist_utils_s) {
+ // This test doesn't generate any code, but it verifies the behaviour of
+ // the CPURegList utility methods.
+
+ // Test a list of S registers.
+ CPURegList test(s20, s21, s22, s23);
+
+ // The type and size mechanisms are already covered, so here we just test
+ // that lists of S registers alias individual D registers.
+
+ CHECK(test.IncludesAliasOf(d20));
+ CHECK(test.IncludesAliasOf(d21));
+ CHECK(test.IncludesAliasOf(d22));
+ CHECK(test.IncludesAliasOf(d23));
+ CHECK(test.IncludesAliasOf(s20));
+ CHECK(test.IncludesAliasOf(s21));
+ CHECK(test.IncludesAliasOf(s22));
+ CHECK(test.IncludesAliasOf(s23));
+}
+
+
+TEST(cpureglist_utils_empty) {
+ // This test doesn't generate any code, but it verifies the behaviour of
+ // the CPURegList utility methods.
+
+ // Test an empty list.
+ // Empty lists can have type and size properties. Check that we can create
+ // them, and that they are empty.
+ CPURegList reg32(CPURegister::kRegister, kWRegSize, 0);
+ CPURegList reg64(CPURegister::kRegister, kXRegSize, 0);
+ CPURegList fpreg32(CPURegister::kFPRegister, kSRegSize, 0);
+ CPURegList fpreg64(CPURegister::kFPRegister, kDRegSize, 0);
+
+ CHECK(reg32.IsEmpty());
+ CHECK(reg64.IsEmpty());
+ CHECK(fpreg32.IsEmpty());
+ CHECK(fpreg64.IsEmpty());
+
+ CHECK(reg32.PopLowestIndex().IsNone());
+ CHECK(reg64.PopLowestIndex().IsNone());
+ CHECK(fpreg32.PopLowestIndex().IsNone());
+ CHECK(fpreg64.PopLowestIndex().IsNone());
+
+ CHECK(reg32.PopHighestIndex().IsNone());
+ CHECK(reg64.PopHighestIndex().IsNone());
+ CHECK(fpreg32.PopHighestIndex().IsNone());
+ CHECK(fpreg64.PopHighestIndex().IsNone());
+
+ CHECK(reg32.IsEmpty());
+ CHECK(reg64.IsEmpty());
+ CHECK(fpreg32.IsEmpty());
+ CHECK(fpreg64.IsEmpty());
+}
+
+
+TEST(printf) {
+ INIT_V8();
+ SETUP();
+ START();
+
+ char const * test_plain_string = "Printf with no arguments.\n";
+ char const * test_substring = "'This is a substring.'";
+ RegisterDump before;
+
+ // Initialize x29 to the value of the stack pointer. We will use x29 as a
+ // temporary stack pointer later, and initializing it in this way allows the
+ // RegisterDump check to pass.
+ __ Mov(x29, __ StackPointer());
+
+ // Test simple integer arguments.
+ __ Mov(x0, 1234);
+ __ Mov(x1, 0x1234);
+
+ // Test simple floating-point arguments.
+ __ Fmov(d0, 1.234);
+
+ // Test pointer (string) arguments.
+ __ Mov(x2, reinterpret_cast<uintptr_t>(test_substring));
+
+ // Test the maximum number of arguments, and sign extension.
+ __ Mov(w3, 0xffffffff);
+ __ Mov(w4, 0xffffffff);
+ __ Mov(x5, 0xffffffffffffffff);
+ __ Mov(x6, 0xffffffffffffffff);
+ __ Fmov(s1, 1.234);
+ __ Fmov(s2, 2.345);
+ __ Fmov(d3, 3.456);
+ __ Fmov(d4, 4.567);
+
+ // Test printing callee-saved registers.
+ __ Mov(x28, 0x123456789abcdef);
+ __ Fmov(d10, 42.0);
+
+ // Test with three arguments.
+ __ Mov(x10, 3);
+ __ Mov(x11, 40);
+ __ Mov(x12, 500);
+
+ // x8 and x9 are used by debug code in part of the macro assembler. However,
+ // Printf guarantees to preserve them (so we can use Printf in debug code),
+ // and we need to test that they are properly preserved. The above code
+ // shouldn't need to use them, but we initialize x8 and x9 last to be on the
+ // safe side. This test still assumes that none of the code from
+ // before->Dump() to the end of the test can clobber x8 or x9, so where
+ // possible we use the Assembler directly to be safe.
+ __ orr(x8, xzr, 0x8888888888888888);
+ __ orr(x9, xzr, 0x9999999999999999);
+
+ // Check that we don't clobber any registers, except those that we explicitly
+ // write results into.
+ before.Dump(&masm);
+
+ __ Printf(test_plain_string); // NOLINT(runtime/printf)
+ __ Printf("x0: %" PRId64", x1: 0x%08" PRIx64 "\n", x0, x1);
+ __ Printf("d0: %f\n", d0);
+ __ Printf("Test %%s: %s\n", x2);
+ __ Printf("w3(uint32): %" PRIu32 "\nw4(int32): %" PRId32 "\n"
+ "x5(uint64): %" PRIu64 "\nx6(int64): %" PRId64 "\n",
+ w3, w4, x5, x6);
+ __ Printf("%%f: %f\n%%g: %g\n%%e: %e\n%%E: %E\n", s1, s2, d3, d4);
+ __ Printf("0x%08" PRIx32 ", 0x%016" PRIx64 "\n", x28, x28);
+ __ Printf("%g\n", d10);
+
+ // Test with a different stack pointer.
+ const Register old_stack_pointer = __ StackPointer();
+ __ mov(x29, old_stack_pointer);
+ __ SetStackPointer(x29);
+ __ Printf("old_stack_pointer: 0x%016" PRIx64 "\n", old_stack_pointer);
+ __ mov(old_stack_pointer, __ StackPointer());
+ __ SetStackPointer(old_stack_pointer);
+
+ __ Printf("3=%u, 4=%u, 5=%u\n", x10, x11, x12);
+
+ END();
+ RUN();
+
+ // We cannot easily test the output of the Printf sequences, and because
+ // Printf preserves all registers by default, we can't look at the number of
+ // bytes that were printed. However, the printf_no_preserve test should check
+ // that, and here we just test that we didn't clobber any registers.
+ ASSERT_EQUAL_REGISTERS(before);
+
+ TEARDOWN();
+}
+
+
+TEST(printf_no_preserve) {
+ INIT_V8();
+ SETUP();
+ START();
+
+ char const * test_plain_string = "Printf with no arguments.\n";
+ char const * test_substring = "'This is a substring.'";
+
+ __ PrintfNoPreserve(test_plain_string); // NOLINT(runtime/printf)
+ __ Mov(x19, x0);
+
+ // Test simple integer arguments.
+ __ Mov(x0, 1234);
+ __ Mov(x1, 0x1234);
+ __ PrintfNoPreserve("x0: %" PRId64", x1: 0x%08" PRIx64 "\n", x0, x1);
+ __ Mov(x20, x0);
+
+ // Test simple floating-point arguments.
+ __ Fmov(d0, 1.234);
+ __ PrintfNoPreserve("d0: %f\n", d0);
+ __ Mov(x21, x0);
+
+ // Test pointer (string) arguments.
+ __ Mov(x2, reinterpret_cast<uintptr_t>(test_substring));
+ __ PrintfNoPreserve("Test %%s: %s\n", x2);
+ __ Mov(x22, x0);
+
+ // Test the maximum number of arguments, and sign extension.
+ __ Mov(w3, 0xffffffff);
+ __ Mov(w4, 0xffffffff);
+ __ Mov(x5, 0xffffffffffffffff);
+ __ Mov(x6, 0xffffffffffffffff);
+ __ PrintfNoPreserve("w3(uint32): %" PRIu32 "\nw4(int32): %" PRId32 "\n"
+ "x5(uint64): %" PRIu64 "\nx6(int64): %" PRId64 "\n",
+ w3, w4, x5, x6);
+ __ Mov(x23, x0);
+
+ __ Fmov(s1, 1.234);
+ __ Fmov(s2, 2.345);
+ __ Fmov(d3, 3.456);
+ __ Fmov(d4, 4.567);
+ __ PrintfNoPreserve("%%f: %f\n%%g: %g\n%%e: %e\n%%E: %E\n", s1, s2, d3, d4);
+ __ Mov(x24, x0);
+
+ // Test printing callee-saved registers.
+ __ Mov(x28, 0x123456789abcdef);
+ __ PrintfNoPreserve("0x%08" PRIx32 ", 0x%016" PRIx64 "\n", x28, x28);
+ __ Mov(x25, x0);
+
+ __ Fmov(d10, 42.0);
+ __ PrintfNoPreserve("%g\n", d10);
+ __ Mov(x26, x0);
+
+ // Test with a different stack pointer.
+ const Register old_stack_pointer = __ StackPointer();
+ __ Mov(x29, old_stack_pointer);
+ __ SetStackPointer(x29);
+
+ __ PrintfNoPreserve("old_stack_pointer: 0x%016" PRIx64 "\n",
+ old_stack_pointer);
+ __ Mov(x27, x0);
+
+ __ Mov(old_stack_pointer, __ StackPointer());
+ __ SetStackPointer(old_stack_pointer);
+
+ // Test with three arguments.
+ __ Mov(x3, 3);
+ __ Mov(x4, 40);
+ __ Mov(x5, 500);
+ __ PrintfNoPreserve("3=%u, 4=%u, 5=%u\n", x3, x4, x5);
+ __ Mov(x28, x0);
+
+ END();
+ RUN();
+
+ // We cannot easily test the exact output of the Printf sequences, but we can
+ // use the return code to check that the string length was correct.
+
+ // Printf with no arguments.
+ ASSERT_EQUAL_64(strlen(test_plain_string), x19);
+ // x0: 1234, x1: 0x00001234
+ ASSERT_EQUAL_64(25, x20);
+ // d0: 1.234000
+ ASSERT_EQUAL_64(13, x21);
+ // Test %s: 'This is a substring.'
+ ASSERT_EQUAL_64(32, x22);
+ // w3(uint32): 4294967295
+ // w4(int32): -1
+ // x5(uint64): 18446744073709551615
+ // x6(int64): -1
+ ASSERT_EQUAL_64(23 + 14 + 33 + 14, x23);
+ // %f: 1.234000
+ // %g: 2.345
+ // %e: 3.456000e+00
+ // %E: 4.567000E+00
+ ASSERT_EQUAL_64(13 + 10 + 17 + 17, x24);
+ // 0x89abcdef, 0x0123456789abcdef
+ ASSERT_EQUAL_64(31, x25);
+ // 42
+ ASSERT_EQUAL_64(3, x26);
+ // old_stack_pointer: 0x00007fb037ae2370
+ // Note: This is an example value, but the field width is fixed here so the
+ // string length is still predictable.
+ ASSERT_EQUAL_64(38, x27);
+ // 3=3, 4=40, 5=500
+ ASSERT_EQUAL_64(17, x28);
+
+ TEARDOWN();
+}
+
+
+// This is a V8-specific test.
+static void CopyFieldsHelper(CPURegList temps) {
+ static const uint64_t kLiteralBase = 0x0100001000100101UL;
+ static const uint64_t src[] = {kLiteralBase * 1,
+ kLiteralBase * 2,
+ kLiteralBase * 3,
+ kLiteralBase * 4,
+ kLiteralBase * 5,
+ kLiteralBase * 6,
+ kLiteralBase * 7,
+ kLiteralBase * 8,
+ kLiteralBase * 9,
+ kLiteralBase * 10,
+ kLiteralBase * 11};
+ static const uint64_t src_tagged =
+ reinterpret_cast<uint64_t>(src) + kHeapObjectTag;
+
+ static const unsigned kTestCount = sizeof(src) / sizeof(src[0]) + 1;
+ uint64_t* dst[kTestCount];
+ uint64_t dst_tagged[kTestCount];
+
+ // The first test will be to copy 0 fields. The destination (and source)
+ // should not be accessed in any way.
+ dst[0] = NULL;
+ dst_tagged[0] = kHeapObjectTag;
+
+ // Allocate memory for each other test. Each test <n> will have <n> fields.
+ // This is intended to exercise as many paths in CopyFields as possible.
+ for (unsigned i = 1; i < kTestCount; i++) {
+ dst[i] = new uint64_t[i];
+ memset(dst[i], 0, i * sizeof(kLiteralBase));
+ dst_tagged[i] = reinterpret_cast<uint64_t>(dst[i]) + kHeapObjectTag;
+ }
+
+ SETUP();
+ START();
+
+ __ Mov(x0, dst_tagged[0]);
+ __ Mov(x1, 0);
+ __ CopyFields(x0, x1, temps, 0);
+ for (unsigned i = 1; i < kTestCount; i++) {
+ __ Mov(x0, dst_tagged[i]);
+ __ Mov(x1, src_tagged);
+ __ CopyFields(x0, x1, temps, i);
+ }
+
+ END();
+ RUN();
+ TEARDOWN();
+
+ for (unsigned i = 1; i < kTestCount; i++) {
+ for (unsigned j = 0; j < i; j++) {
+ CHECK(src[j] == dst[i][j]);
+ }
+ delete [] dst[i];
+ }
+}
+
+
+// This is a V8-specific test.
+TEST(copyfields) {
+ INIT_V8();
+ CopyFieldsHelper(CPURegList(x10));
+ CopyFieldsHelper(CPURegList(x10, x11));
+ CopyFieldsHelper(CPURegList(x10, x11, x12));
+ CopyFieldsHelper(CPURegList(x10, x11, x12, x13));
+}
+
+
+static void DoSmiAbsTest(int32_t value, bool must_fail = false) {
+ SETUP();
+
+ START();
+ Label end, slow;
+ __ Mov(x2, 0xc001c0de);
+ __ Mov(x1, value);
+ __ SmiTag(x1);
+ __ SmiAbs(x1, &slow);
+ __ SmiUntag(x1);
+ __ B(&end);
+
+ __ Bind(&slow);
+ __ Mov(x2, 0xbad);
+
+ __ Bind(&end);
+ END();
+
+ RUN();
+
+ if (must_fail) {
+ // We tested an invalid conversion. The code must have jump on slow.
+ ASSERT_EQUAL_64(0xbad, x2);
+ } else {
+ // The conversion is valid, check the result.
+ int32_t result = (value >= 0) ? value : -value;
+ ASSERT_EQUAL_64(result, x1);
+
+ // Check that we didn't jump on slow.
+ ASSERT_EQUAL_64(0xc001c0de, x2);
+ }
+
+ TEARDOWN();
+}
+
+
+TEST(smi_abs) {
+ INIT_V8();
+ // Simple and edge cases.
+ DoSmiAbsTest(0);
+ DoSmiAbsTest(0x12345);
+ DoSmiAbsTest(0x40000000);
+ DoSmiAbsTest(0x7fffffff);
+ DoSmiAbsTest(-1);
+ DoSmiAbsTest(-12345);
+ DoSmiAbsTest(0x80000001);
+
+ // Check that the most negative SMI is detected.
+ DoSmiAbsTest(0x80000000, true);
+}
+
+
+TEST(blr_lr) {
+ // A simple test to check that the simulator correcty handle "blr lr".
+ INIT_V8();
+ SETUP();
+
+ START();
+ Label target;
+ Label end;
+
+ __ Mov(x0, 0x0);
+ __ Adr(lr, &target);
+
+ __ Blr(lr);
+ __ Mov(x0, 0xdeadbeef);
+ __ B(&end);
+
+ __ Bind(&target);
+ __ Mov(x0, 0xc001c0de);
+
+ __ Bind(&end);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0xc001c0de, x0);
+
+ TEARDOWN();
+}
+
+
+TEST(barriers) {
+ // Generate all supported barriers, this is just a smoke test
+ INIT_V8();
+ SETUP();
+
+ START();
+
+ // DMB
+ __ Dmb(FullSystem, BarrierAll);
+ __ Dmb(FullSystem, BarrierReads);
+ __ Dmb(FullSystem, BarrierWrites);
+ __ Dmb(FullSystem, BarrierOther);
+
+ __ Dmb(InnerShareable, BarrierAll);
+ __ Dmb(InnerShareable, BarrierReads);
+ __ Dmb(InnerShareable, BarrierWrites);
+ __ Dmb(InnerShareable, BarrierOther);
+
+ __ Dmb(NonShareable, BarrierAll);
+ __ Dmb(NonShareable, BarrierReads);
+ __ Dmb(NonShareable, BarrierWrites);
+ __ Dmb(NonShareable, BarrierOther);
+
+ __ Dmb(OuterShareable, BarrierAll);
+ __ Dmb(OuterShareable, BarrierReads);
+ __ Dmb(OuterShareable, BarrierWrites);
+ __ Dmb(OuterShareable, BarrierOther);
+
+ // DSB
+ __ Dsb(FullSystem, BarrierAll);
+ __ Dsb(FullSystem, BarrierReads);
+ __ Dsb(FullSystem, BarrierWrites);
+ __ Dsb(FullSystem, BarrierOther);
+
+ __ Dsb(InnerShareable, BarrierAll);
+ __ Dsb(InnerShareable, BarrierReads);
+ __ Dsb(InnerShareable, BarrierWrites);
+ __ Dsb(InnerShareable, BarrierOther);
+
+ __ Dsb(NonShareable, BarrierAll);
+ __ Dsb(NonShareable, BarrierReads);
+ __ Dsb(NonShareable, BarrierWrites);
+ __ Dsb(NonShareable, BarrierOther);
+
+ __ Dsb(OuterShareable, BarrierAll);
+ __ Dsb(OuterShareable, BarrierReads);
+ __ Dsb(OuterShareable, BarrierWrites);
+ __ Dsb(OuterShareable, BarrierOther);
+
+ // ISB
+ __ Isb();
+
+ END();
+
+ RUN();
+
+ TEARDOWN();
+}
+
+
+TEST(call_no_relocation) {
+ Address call_start;
+ Address return_address;
+
+ INIT_V8();
+ SETUP();
+
+ START();
+
+ Label function;
+ Label test;
+
+ __ B(&test);
+
+ __ Bind(&function);
+ __ Mov(x0, 0x1);
+ __ Ret();
+
+ __ Bind(&test);
+ __ Mov(x0, 0x0);
+ __ Push(lr, xzr);
+ {
+ Assembler::BlockConstPoolScope scope(&masm);
+ call_start = buf + __ pc_offset();
+ __ Call(buf + function.pos(), RelocInfo::NONE64);
+ return_address = buf + __ pc_offset();
+ }
+ __ Pop(xzr, lr);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(1, x0);
+
+ // The return_address_from_call_start function doesn't currently encounter any
+ // non-relocatable sequences, so we check it here to make sure it works.
+ // TODO(jbramley): Once Crankshaft is complete, decide if we need to support
+ // non-relocatable calls at all.
+ CHECK(return_address ==
+ Assembler::return_address_from_call_start(call_start));
+
+ TEARDOWN();
+}
+
+
+static void ECMA262ToInt32Helper(int32_t expected, double input) {
+ SETUP();
+ START();
+
+ __ Fmov(d0, input);
+
+ __ ECMA262ToInt32(x0, d0, x10, x11, MacroAssembler::INT32_IN_W);
+ __ ECMA262ToInt32(x1, d0, x10, x11, MacroAssembler::INT32_IN_X);
+ __ ECMA262ToInt32(x2, d0, x10, x11, MacroAssembler::SMI);
+
+ // The upper bits of INT32_IN_W are undefined, so make sure we don't try to
+ // test them.
+ __ Mov(w0, w0);
+
+ END();
+
+ RUN();
+
+ int64_t expected64 = expected;
+
+ ASSERT_EQUAL_32(expected, w0);
+ ASSERT_EQUAL_64(expected64, x1);
+ ASSERT_EQUAL_64(expected64 << kSmiShift | kSmiTag, x2);
+
+ TEARDOWN();
+}
+
+
+TEST(ecma_262_to_int32) {
+ INIT_V8();
+ // ==== exponent < 64 ====
+
+ ECMA262ToInt32Helper(0, 0.0);
+ ECMA262ToInt32Helper(0, -0.0);
+ ECMA262ToInt32Helper(1, 1.0);
+ ECMA262ToInt32Helper(-1, -1.0);
+
+ // The largest representable value that is less than 1.
+ ECMA262ToInt32Helper(0, 0x001fffffffffffff * pow(2.0, -53));
+ ECMA262ToInt32Helper(0, 0x001fffffffffffff * -pow(2.0, -53));
+ ECMA262ToInt32Helper(0, std::numeric_limits<double>::denorm_min());
+ ECMA262ToInt32Helper(0, -std::numeric_limits<double>::denorm_min());
+
+ // The largest conversion which doesn't require the integer modulo-2^32 step.
+ ECMA262ToInt32Helper(0x7fffffff, 0x7fffffff);
+ ECMA262ToInt32Helper(-0x80000000, -0x80000000);
+
+ // The largest simple conversion, requiring module-2^32, but where the fcvt
+ // does not saturate when converting to int64_t.
+ ECMA262ToInt32Helper(0xfffffc00, 0x7ffffffffffffc00);
+ ECMA262ToInt32Helper(-0xfffffc00, 0x7ffffffffffffc00 * -1.0);
+
+ // ==== 64 <= exponent < 84 ====
+
+ // The smallest conversion where the fcvt saturates.
+ ECMA262ToInt32Helper(0, 0x8000000000000000);
+ ECMA262ToInt32Helper(0, 0x8000000000000000 * -1.0);
+
+ // The smallest conversion where the fcvt saturates, and where all the
+ // mantissa bits are '1' (to check the shift logic).
+ ECMA262ToInt32Helper(0xfffff800, 0xfffffffffffff800);
+ ECMA262ToInt32Helper(-0xfffff800, 0xfffffffffffff800 * -1.0);
+
+ // The largest conversion which doesn't produce a zero result.
+ ECMA262ToInt32Helper(0x80000000, 0x001fffffffffffff * pow(2.0, 31));
+ ECMA262ToInt32Helper(-0x80000000, 0x001fffffffffffff * -pow(2.0, 31));
+
+ // Some large conversions to check the shifting function.
+ ECMA262ToInt32Helper(0x6789abcd, 0x001123456789abcd);
+ ECMA262ToInt32Helper(0x12345678, 0x001123456789abcd * pow(2.0, -20));
+ ECMA262ToInt32Helper(0x891a2b3c, 0x001123456789abcd * pow(2.0, -21));
+ ECMA262ToInt32Helper(0x11234567, 0x001123456789abcd * pow(2.0, -24));
+ ECMA262ToInt32Helper(-0x6789abcd, 0x001123456789abcd * -1.0);
+ ECMA262ToInt32Helper(-0x12345678, 0x001123456789abcd * -pow(2.0, -20));
+ ECMA262ToInt32Helper(-0x891a2b3c, 0x001123456789abcd * -pow(2.0, -21));
+ ECMA262ToInt32Helper(-0x11234567, 0x001123456789abcd * -pow(2.0, -24));
+
+ // ==== 84 <= exponent ====
+
+ // The smallest conversion which produces a zero result by shifting the
+ // mantissa out of the int32_t range.
+ ECMA262ToInt32Helper(0, pow(2.0, 32));
+ ECMA262ToInt32Helper(0, -pow(2.0, 32));
+
+ // Some very large conversions.
+ ECMA262ToInt32Helper(0, 0x001fffffffffffff * pow(2.0, 32));
+ ECMA262ToInt32Helper(0, 0x001fffffffffffff * -pow(2.0, 32));
+ ECMA262ToInt32Helper(0, DBL_MAX);
+ ECMA262ToInt32Helper(0, -DBL_MAX);
+
+ // ==== Special values. ====
+
+ ECMA262ToInt32Helper(0, std::numeric_limits<double>::infinity());
+ ECMA262ToInt32Helper(0, -std::numeric_limits<double>::infinity());
+ ECMA262ToInt32Helper(0, std::numeric_limits<double>::quiet_NaN());
+ ECMA262ToInt32Helper(0, -std::numeric_limits<double>::quiet_NaN());
+ ECMA262ToInt32Helper(0, std::numeric_limits<double>::signaling_NaN());
+ ECMA262ToInt32Helper(0, -std::numeric_limits<double>::signaling_NaN());
+}
+
+
+static void AbsHelperX(int64_t value) {
+ int64_t expected;
+
+ SETUP();
+ START();
+
+ Label fail;
+ Label done;
+
+ __ Mov(x0, 0);
+ __ Mov(x1, value);
+
+ if (value != kXMinInt) {
+ expected = labs(value);
+
+ Label next;
+ // The result is representable.
+ __ Abs(x10, x1);
+ __ Abs(x11, x1, &fail);
+ __ Abs(x12, x1, &fail, &next);
+ __ Bind(&next);
+ __ Abs(x13, x1, NULL, &done);
+ } else {
+ // labs is undefined for kXMinInt but our implementation in the
+ // MacroAssembler will return kXMinInt in such a case.
+ expected = kXMinInt;
+
+ Label next;
+ // The result is not representable.
+ __ Abs(x10, x1);
+ __ Abs(x11, x1, NULL, &fail);
+ __ Abs(x12, x1, &next, &fail);
+ __ Bind(&next);
+ __ Abs(x13, x1, &done);
+ }
+
+ __ Bind(&fail);
+ __ Mov(x0, -1);
+
+ __ Bind(&done);
+
+ END();
+ RUN();
+
+ ASSERT_EQUAL_64(0, x0);
+ ASSERT_EQUAL_64(value, x1);
+ ASSERT_EQUAL_64(expected, x10);
+ ASSERT_EQUAL_64(expected, x11);
+ ASSERT_EQUAL_64(expected, x12);
+ ASSERT_EQUAL_64(expected, x13);
+
+ TEARDOWN();
+}
+
+
+static void AbsHelperW(int32_t value) {
+ int32_t expected;
+
+ SETUP();
+ START();
+
+ Label fail;
+ Label done;
+
+ __ Mov(w0, 0);
+ // TODO(jbramley): The cast is needed to avoid a sign-extension bug in VIXL.
+ // Once it is fixed, we should remove the cast.
+ __ Mov(w1, static_cast<uint32_t>(value));
+
+ if (value != kWMinInt) {
+ expected = abs(value);
+
+ Label next;
+ // The result is representable.
+ __ Abs(w10, w1);
+ __ Abs(w11, w1, &fail);
+ __ Abs(w12, w1, &fail, &next);
+ __ Bind(&next);
+ __ Abs(w13, w1, NULL, &done);
+ } else {
+ // abs is undefined for kWMinInt but our implementation in the
+ // MacroAssembler will return kWMinInt in such a case.
+ expected = kWMinInt;
+
+ Label next;
+ // The result is not representable.
+ __ Abs(w10, w1);
+ __ Abs(w11, w1, NULL, &fail);
+ __ Abs(w12, w1, &next, &fail);
+ __ Bind(&next);
+ __ Abs(w13, w1, &done);
+ }
+
+ __ Bind(&fail);
+ __ Mov(w0, -1);
+
+ __ Bind(&done);
+
+ END();
+ RUN();
+
+ ASSERT_EQUAL_32(0, w0);
+ ASSERT_EQUAL_32(value, w1);
+ ASSERT_EQUAL_32(expected, w10);
+ ASSERT_EQUAL_32(expected, w11);
+ ASSERT_EQUAL_32(expected, w12);
+ ASSERT_EQUAL_32(expected, w13);
+
+ TEARDOWN();
+}
+
+
+TEST(abs) {
+ INIT_V8();
+ AbsHelperX(0);
+ AbsHelperX(42);
+ AbsHelperX(-42);
+ AbsHelperX(kXMinInt);
+ AbsHelperX(kXMaxInt);
+
+ AbsHelperW(0);
+ AbsHelperW(42);
+ AbsHelperW(-42);
+ AbsHelperW(kWMinInt);
+ AbsHelperW(kWMaxInt);
+}
--- /dev/null
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <stdio.h>
+#include <cstring>
+#include "cctest.h"
+
+#include "v8.h"
+
+#include "macro-assembler.h"
+#include "a64/assembler-a64.h"
+#include "a64/macro-assembler-a64.h"
+#include "a64/disasm-a64.h"
+#include "a64/utils-a64.h"
+
+using namespace v8::internal;
+
+#define TEST_(name) TEST(DISASM_##name)
+
+#define EXP_SIZE (256)
+#define INSTR_SIZE (1024)
+#define SET_UP_CLASS(ASMCLASS) \
+ InitializeVM(); \
+ Isolate* isolate = Isolate::Current(); \
+ HandleScope scope(isolate); \
+ byte* buf = static_cast<byte*>(malloc(INSTR_SIZE)); \
+ uint32_t encoding = 0; \
+ ASMCLASS* assm = new ASMCLASS(isolate, buf, INSTR_SIZE); \
+ Decoder* decoder = new Decoder(); \
+ Disassembler* disasm = new Disassembler(); \
+ decoder->AppendVisitor(disasm)
+
+#define SET_UP() SET_UP_CLASS(Assembler)
+
+#define COMPARE(ASM, EXP) \
+ assm->Reset(); \
+ assm->ASM; \
+ assm->GetCode(NULL); \
+ decoder->Decode(reinterpret_cast<Instruction*>(buf)); \
+ encoding = *reinterpret_cast<uint32_t*>(buf); \
+ if (strcmp(disasm->GetOutput(), EXP) != 0) { \
+ printf("%u : Encoding: %08" PRIx32 "\nExpected: %s\nFound: %s\n", \
+ __LINE__, encoding, EXP, disasm->GetOutput()); \
+ abort(); \
+ }
+
+#define COMPARE_PREFIX(ASM, EXP) \
+ assm->Reset(); \
+ assm->ASM; \
+ assm->GetCode(NULL); \
+ decoder->Decode(reinterpret_cast<Instruction*>(buf)); \
+ encoding = *reinterpret_cast<uint32_t*>(buf); \
+ if (strncmp(disasm->GetOutput(), EXP, strlen(EXP)) != 0) { \
+ printf("%u : Encoding: %08" PRIx32 "\nExpected: %s\nFound: %s\n", \
+ __LINE__, encoding, EXP, disasm->GetOutput()); \
+ abort(); \
+ }
+
+#define CLEANUP() \
+ delete disasm; \
+ delete decoder; \
+ delete assm
+
+
+static bool vm_initialized = false;
+
+
+static void InitializeVM() {
+ if (!vm_initialized) {
+ CcTest::InitializeVM();
+ vm_initialized = true;
+ }
+}
+
+
+TEST_(bootstrap) {
+ SET_UP();
+
+ // Instructions generated by C compiler, disassembled by objdump, and
+ // reformatted to suit our disassembly style.
+ COMPARE(dci(0xa9ba7bfd), "stp fp, lr, [csp, #-96]!");
+ COMPARE(dci(0x910003fd), "mov fp, csp");
+ COMPARE(dci(0x9100e3a0), "add x0, fp, #0x38 (56)");
+ COMPARE(dci(0xb900001f), "str wzr, [x0]");
+ COMPARE(dci(0x528000e1), "movz w1, #0x7");
+ COMPARE(dci(0xb9001c01), "str w1, [x0, #28]");
+ COMPARE(dci(0x390043a0), "strb w0, [fp, #16]");
+ COMPARE(dci(0x790027a0), "strh w0, [fp, #18]");
+ COMPARE(dci(0xb9400400), "ldr w0, [x0, #4]");
+ COMPARE(dci(0x0b000021), "add w1, w1, w0");
+ COMPARE(dci(0x531b6800), "lsl w0, w0, #5");
+ COMPARE(dci(0x521e0400), "eor w0, w0, #0xc");
+ COMPARE(dci(0x72af0f00), "movk w0, #0x7878, lsl #16");
+ COMPARE(dci(0xd360fc00), "lsr x0, x0, #32");
+ COMPARE(dci(0x13037c01), "asr w1, w0, #3");
+ COMPARE(dci(0x4b000021), "sub w1, w1, w0");
+ COMPARE(dci(0x2a0103e0), "mov w0, w1");
+ COMPARE(dci(0x93407c00), "sxtw x0, w0");
+ COMPARE(dci(0x2a000020), "orr w0, w1, w0");
+ COMPARE(dci(0xa8c67bfd), "ldp fp, lr, [csp], #96");
+
+ CLEANUP();
+}
+
+
+TEST_(mov_mvn) {
+ SET_UP_CLASS(MacroAssembler);
+
+ COMPARE(Mov(w0, Operand(0x1234)), "movz w0, #0x1234");
+ COMPARE(Mov(x1, Operand(0x1234)), "movz x1, #0x1234");
+ COMPARE(Mov(w2, Operand(w3)), "mov w2, w3");
+ COMPARE(Mov(x4, Operand(x5)), "mov x4, x5");
+ COMPARE(Mov(w6, Operand(w7, LSL, 5)), "lsl w6, w7, #5");
+ COMPARE(Mov(x8, Operand(x9, ASR, 42)), "asr x8, x9, #42");
+ COMPARE(Mov(w10, Operand(w11, UXTB)), "uxtb w10, w11");
+ COMPARE(Mov(x12, Operand(x13, UXTB, 1)), "ubfiz x12, x13, #1, #8");
+ COMPARE(Mov(w14, Operand(w15, SXTH, 2)), "sbfiz w14, w15, #2, #16");
+ COMPARE(Mov(x16, Operand(x20, SXTW, 3)), "sbfiz x16, x20, #3, #32");
+
+ COMPARE(Mov(x0, csp), "mov x0, csp");
+ COMPARE(Mov(w0, wcsp), "mov w0, wcsp");
+ COMPARE(Mov(x0, xzr), "mov x0, xzr");
+ COMPARE(Mov(w0, wzr), "mov w0, wzr");
+ COMPARE(mov(x0, csp), "mov x0, csp");
+ COMPARE(mov(w0, wcsp), "mov w0, wcsp");
+ COMPARE(mov(x0, xzr), "mov x0, xzr");
+ COMPARE(mov(w0, wzr), "mov w0, wzr");
+
+ COMPARE(Mvn(w0, Operand(0x1)), "movn w0, #0x1");
+ COMPARE(Mvn(x1, Operand(0xfff)), "movn x1, #0xfff");
+ COMPARE(Mvn(w2, Operand(w3)), "mvn w2, w3");
+ COMPARE(Mvn(x4, Operand(x5)), "mvn x4, x5");
+ COMPARE(Mvn(w6, Operand(w7, LSL, 12)), "mvn w6, w7, lsl #12");
+ COMPARE(Mvn(x8, Operand(x9, ASR, 63)), "mvn x8, x9, asr #63");
+
+ CLEANUP();
+}
+
+
+TEST_(move_immediate) {
+ SET_UP();
+
+ COMPARE(movz(w0, 0x1234), "movz w0, #0x1234");
+ COMPARE(movz(x1, 0xabcd0000), "movz x1, #0xabcd0000");
+ COMPARE(movz(x2, 0x555500000000), "movz x2, #0x555500000000");
+ COMPARE(movz(x3, 0xaaaa000000000000), "movz x3, #0xaaaa000000000000");
+ COMPARE(movz(x4, 0xabcd, 16), "movz x4, #0xabcd0000");
+ COMPARE(movz(x5, 0x5555, 32), "movz x5, #0x555500000000");
+ COMPARE(movz(x6, 0xaaaa, 48), "movz x6, #0xaaaa000000000000");
+
+ COMPARE(movk(w7, 0x1234), "movk w7, #0x1234");
+ COMPARE(movk(x8, 0xabcd0000), "movk x8, #0xabcd, lsl #16");
+ COMPARE(movk(x9, 0x555500000000), "movk x9, #0x5555, lsl #32");
+ COMPARE(movk(x10, 0xaaaa000000000000), "movk x10, #0xaaaa, lsl #48");
+ COMPARE(movk(w11, 0xabcd, 16), "movk w11, #0xabcd, lsl #16");
+ COMPARE(movk(x12, 0x5555, 32), "movk x12, #0x5555, lsl #32");
+ COMPARE(movk(x13, 0xaaaa, 48), "movk x13, #0xaaaa, lsl #48");
+
+ COMPARE(movn(w14, 0x1234), "movn w14, #0x1234");
+ COMPARE(movn(x15, 0xabcd0000), "movn x15, #0xabcd0000");
+ COMPARE(movn(x16, 0x555500000000), "movn x16, #0x555500000000");
+ COMPARE(movn(x17, 0xaaaa000000000000), "movn x17, #0xaaaa000000000000");
+ COMPARE(movn(w18, 0xabcd, 16), "movn w18, #0xabcd0000");
+ COMPARE(movn(x19, 0x5555, 32), "movn x19, #0x555500000000");
+ COMPARE(movn(x20, 0xaaaa, 48), "movn x20, #0xaaaa000000000000");
+
+ COMPARE(movk(w21, 0), "movk w21, #0x0");
+ COMPARE(movk(x22, 0, 0), "movk x22, #0x0");
+ COMPARE(movk(w23, 0, 16), "movk w23, #0x0, lsl #16");
+ COMPARE(movk(x24, 0, 32), "movk x24, #0x0, lsl #32");
+ COMPARE(movk(x25, 0, 48), "movk x25, #0x0, lsl #48");
+
+ CLEANUP();
+}
+
+
+TEST(move_immediate_2) {
+ SET_UP_CLASS(MacroAssembler);
+
+ // Move instructions expected for certain immediates. This is really a macro
+ // assembler test, to ensure it generates immediates efficiently.
+ COMPARE(Mov(w0, 0), "movz w0, #0x0");
+ COMPARE(Mov(w0, 0x0000ffff), "movz w0, #0xffff");
+ COMPARE(Mov(w0, 0x00010000), "movz w0, #0x10000");
+ COMPARE(Mov(w0, 0xffff0000), "movz w0, #0xffff0000");
+ COMPARE(Mov(w0, 0x0001ffff), "movn w0, #0xfffe0000");
+ COMPARE(Mov(w0, 0xffff8000), "movn w0, #0x7fff");
+ COMPARE(Mov(w0, 0xfffffffe), "movn w0, #0x1");
+ COMPARE(Mov(w0, 0xffffffff), "movn w0, #0x0");
+ COMPARE(Mov(w0, 0x00ffff00), "mov w0, #0xffff00");
+ COMPARE(Mov(w0, 0xfffe7fff), "mov w0, #0xfffe7fff");
+ COMPARE(Mov(w0, 0xfffeffff), "movn w0, #0x10000");
+ COMPARE(Mov(w0, 0xffff7fff), "movn w0, #0x8000");
+
+ COMPARE(Mov(x0, 0), "movz x0, #0x0");
+ COMPARE(Mov(x0, 0x0000ffff), "movz x0, #0xffff");
+ COMPARE(Mov(x0, 0x00010000), "movz x0, #0x10000");
+ COMPARE(Mov(x0, 0xffff0000), "movz x0, #0xffff0000");
+ COMPARE(Mov(x0, 0x0001ffff), "mov x0, #0x1ffff");
+ COMPARE(Mov(x0, 0xffff8000), "mov x0, #0xffff8000");
+ COMPARE(Mov(x0, 0xfffffffe), "mov x0, #0xfffffffe");
+ COMPARE(Mov(x0, 0xffffffff), "mov x0, #0xffffffff");
+ COMPARE(Mov(x0, 0x00ffff00), "mov x0, #0xffff00");
+ COMPARE(Mov(x0, 0xffff000000000000), "movz x0, #0xffff000000000000");
+ COMPARE(Mov(x0, 0x0000ffff00000000), "movz x0, #0xffff00000000");
+ COMPARE(Mov(x0, 0x00000000ffff0000), "movz x0, #0xffff0000");
+ COMPARE(Mov(x0, 0xffffffffffff0000), "movn x0, #0xffff");
+ COMPARE(Mov(x0, 0xffffffff0000ffff), "movn x0, #0xffff0000");
+ COMPARE(Mov(x0, 0xffff0000ffffffff), "movn x0, #0xffff00000000");
+ COMPARE(Mov(x0, 0x0000ffffffffffff), "movn x0, #0xffff000000000000");
+ COMPARE(Mov(x0, 0xfffe7fffffffffff), "mov x0, #0xfffe7fffffffffff");
+ COMPARE(Mov(x0, 0xfffeffffffffffff), "movn x0, #0x1000000000000");
+ COMPARE(Mov(x0, 0xffff7fffffffffff), "movn x0, #0x800000000000");
+ COMPARE(Mov(x0, 0xfffffffe7fffffff), "mov x0, #0xfffffffe7fffffff");
+ COMPARE(Mov(x0, 0xfffffffeffffffff), "movn x0, #0x100000000");
+ COMPARE(Mov(x0, 0xffffffff7fffffff), "movn x0, #0x80000000");
+ COMPARE(Mov(x0, 0xfffffffffffe7fff), "mov x0, #0xfffffffffffe7fff");
+ COMPARE(Mov(x0, 0xfffffffffffeffff), "movn x0, #0x10000");
+ COMPARE(Mov(x0, 0xffffffffffff7fff), "movn x0, #0x8000");
+ COMPARE(Mov(x0, 0xffffffffffffffff), "movn x0, #0x0");
+
+ COMPARE(Movk(w0, 0x1234, 0), "movk w0, #0x1234");
+ COMPARE(Movk(x1, 0x2345, 0), "movk x1, #0x2345");
+ COMPARE(Movk(w2, 0x3456, 16), "movk w2, #0x3456, lsl #16");
+ COMPARE(Movk(x3, 0x4567, 16), "movk x3, #0x4567, lsl #16");
+ COMPARE(Movk(x4, 0x5678, 32), "movk x4, #0x5678, lsl #32");
+ COMPARE(Movk(x5, 0x6789, 48), "movk x5, #0x6789, lsl #48");
+
+ CLEANUP();
+}
+
+
+TEST_(add_immediate) {
+ SET_UP();
+
+ COMPARE(add(w0, w1, Operand(0xff)), "add w0, w1, #0xff (255)");
+ COMPARE(add(x2, x3, Operand(0x3ff)), "add x2, x3, #0x3ff (1023)");
+ COMPARE(add(w4, w5, Operand(0xfff)), "add w4, w5, #0xfff (4095)");
+ COMPARE(add(x6, x7, Operand(0x1000)), "add x6, x7, #0x1000 (4096)");
+ COMPARE(add(w8, w9, Operand(0xff000)), "add w8, w9, #0xff000 (1044480)");
+ COMPARE(add(x10, x11, Operand(0x3ff000)),
+ "add x10, x11, #0x3ff000 (4190208)");
+ COMPARE(add(w12, w13, Operand(0xfff000)),
+ "add w12, w13, #0xfff000 (16773120)");
+ COMPARE(adds(w14, w15, Operand(0xff)), "adds w14, w15, #0xff (255)");
+ COMPARE(adds(x16, x17, Operand(0xaa000)),
+ "adds x16, x17, #0xaa000 (696320)");
+ COMPARE(cmn(w18, Operand(0xff)), "cmn w18, #0xff (255)");
+ COMPARE(cmn(x19, Operand(0xff000)), "cmn x19, #0xff000 (1044480)");
+ COMPARE(add(w0, wcsp, Operand(0)), "mov w0, wcsp");
+ COMPARE(add(csp, x0, Operand(0)), "mov csp, x0");
+
+ COMPARE(add(w1, wcsp, Operand(8)), "add w1, wcsp, #0x8 (8)");
+ COMPARE(add(x2, csp, Operand(16)), "add x2, csp, #0x10 (16)");
+ COMPARE(add(wcsp, wcsp, Operand(42)), "add wcsp, wcsp, #0x2a (42)");
+ COMPARE(cmn(csp, Operand(24)), "cmn csp, #0x18 (24)");
+ COMPARE(adds(wzr, wcsp, Operand(9)), "cmn wcsp, #0x9 (9)");
+
+ CLEANUP();
+}
+
+
+TEST_(sub_immediate) {
+ SET_UP();
+
+ COMPARE(sub(w0, w1, Operand(0xff)), "sub w0, w1, #0xff (255)");
+ COMPARE(sub(x2, x3, Operand(0x3ff)), "sub x2, x3, #0x3ff (1023)");
+ COMPARE(sub(w4, w5, Operand(0xfff)), "sub w4, w5, #0xfff (4095)");
+ COMPARE(sub(x6, x7, Operand(0x1000)), "sub x6, x7, #0x1000 (4096)");
+ COMPARE(sub(w8, w9, Operand(0xff000)), "sub w8, w9, #0xff000 (1044480)");
+ COMPARE(sub(x10, x11, Operand(0x3ff000)),
+ "sub x10, x11, #0x3ff000 (4190208)");
+ COMPARE(sub(w12, w13, Operand(0xfff000)),
+ "sub w12, w13, #0xfff000 (16773120)");
+ COMPARE(subs(w14, w15, Operand(0xff)), "subs w14, w15, #0xff (255)");
+ COMPARE(subs(x16, x17, Operand(0xaa000)),
+ "subs x16, x17, #0xaa000 (696320)");
+ COMPARE(cmp(w18, Operand(0xff)), "cmp w18, #0xff (255)");
+ COMPARE(cmp(x19, Operand(0xff000)), "cmp x19, #0xff000 (1044480)");
+
+ COMPARE(add(w1, wcsp, Operand(8)), "add w1, wcsp, #0x8 (8)");
+ COMPARE(add(x2, csp, Operand(16)), "add x2, csp, #0x10 (16)");
+ COMPARE(add(wcsp, wcsp, Operand(42)), "add wcsp, wcsp, #0x2a (42)");
+ COMPARE(cmn(csp, Operand(24)), "cmn csp, #0x18 (24)");
+ COMPARE(adds(wzr, wcsp, Operand(9)), "cmn wcsp, #0x9 (9)");
+
+ CLEANUP();
+}
+
+
+TEST_(add_shifted) {
+ SET_UP();
+
+ COMPARE(add(w0, w1, Operand(w2)), "add w0, w1, w2");
+ COMPARE(add(x3, x4, Operand(x5)), "add x3, x4, x5");
+ COMPARE(add(w6, w7, Operand(w8, LSL, 1)), "add w6, w7, w8, lsl #1");
+ COMPARE(add(x9, x10, Operand(x11, LSL, 2)), "add x9, x10, x11, lsl #2");
+ COMPARE(add(w12, w13, Operand(w14, LSR, 3)), "add w12, w13, w14, lsr #3");
+ COMPARE(add(x15, x16, Operand(x17, LSR, 4)), "add x15, x16, x17, lsr #4");
+ COMPARE(add(w18, w19, Operand(w20, ASR, 5)), "add w18, w19, w20, asr #5");
+ COMPARE(add(x21, x22, Operand(x23, ASR, 6)), "add x21, x22, x23, asr #6");
+ COMPARE(cmn(w24, Operand(w25)), "cmn w24, w25");
+ COMPARE(cmn(x26, Operand(cp, LSL, 63)), "cmn x26, cp, lsl #63");
+
+ COMPARE(add(x0, csp, Operand(x1)), "add x0, csp, x1");
+ COMPARE(add(w2, wcsp, Operand(w3)), "add w2, wcsp, w3");
+ COMPARE(add(x4, csp, Operand(x5, LSL, 1)), "add x4, csp, x5, lsl #1");
+ COMPARE(add(x4, xzr, Operand(x5, LSL, 1)), "add x4, xzr, x5, lsl #1");
+ COMPARE(add(w6, wcsp, Operand(w7, LSL, 3)), "add w6, wcsp, w7, lsl #3");
+ COMPARE(adds(xzr, csp, Operand(x8, LSL, 4)), "cmn csp, x8, lsl #4");
+ COMPARE(adds(xzr, xzr, Operand(x8, LSL, 5)), "cmn xzr, x8, lsl #5");
+
+ CLEANUP();
+}
+
+
+TEST_(sub_shifted) {
+ SET_UP();
+
+ COMPARE(sub(w0, w1, Operand(w2)), "sub w0, w1, w2");
+ COMPARE(sub(x3, x4, Operand(x5)), "sub x3, x4, x5");
+ COMPARE(sub(w6, w7, Operand(w8, LSL, 1)), "sub w6, w7, w8, lsl #1");
+ COMPARE(sub(x9, x10, Operand(x11, LSL, 2)), "sub x9, x10, x11, lsl #2");
+ COMPARE(sub(w12, w13, Operand(w14, LSR, 3)), "sub w12, w13, w14, lsr #3");
+ COMPARE(sub(x15, x16, Operand(x17, LSR, 4)), "sub x15, x16, x17, lsr #4");
+ COMPARE(sub(w18, w19, Operand(w20, ASR, 5)), "sub w18, w19, w20, asr #5");
+ COMPARE(sub(x21, x22, Operand(x23, ASR, 6)), "sub x21, x22, x23, asr #6");
+ COMPARE(cmp(w24, Operand(w25)), "cmp w24, w25");
+ COMPARE(cmp(x26, Operand(cp, LSL, 63)), "cmp x26, cp, lsl #63");
+ COMPARE(neg(w28, Operand(w29)), "neg w28, w29");
+ COMPARE(neg(lr, Operand(x0, LSR, 62)), "neg lr, x0, lsr #62");
+ COMPARE(negs(w1, Operand(w2)), "negs w1, w2");
+ COMPARE(negs(x3, Operand(x4, ASR, 61)), "negs x3, x4, asr #61");
+
+ COMPARE(sub(x0, csp, Operand(x1)), "sub x0, csp, x1");
+ COMPARE(sub(w2, wcsp, Operand(w3)), "sub w2, wcsp, w3");
+ COMPARE(sub(x4, csp, Operand(x5, LSL, 1)), "sub x4, csp, x5, lsl #1");
+ COMPARE(sub(x4, xzr, Operand(x5, LSL, 1)), "neg x4, x5, lsl #1");
+ COMPARE(sub(w6, wcsp, Operand(w7, LSL, 3)), "sub w6, wcsp, w7, lsl #3");
+ COMPARE(subs(xzr, csp, Operand(x8, LSL, 4)), "cmp csp, x8, lsl #4");
+ COMPARE(subs(xzr, xzr, Operand(x8, LSL, 5)), "cmp xzr, x8, lsl #5");
+
+ CLEANUP();
+}
+
+
+TEST_(add_extended) {
+ SET_UP();
+
+ COMPARE(add(w0, w1, Operand(w2, UXTB)), "add w0, w1, w2, uxtb");
+ COMPARE(adds(x3, x4, Operand(w5, UXTB, 1)), "adds x3, x4, w5, uxtb #1");
+ COMPARE(add(w6, w7, Operand(w8, UXTH, 2)), "add w6, w7, w8, uxth #2");
+ COMPARE(adds(x9, x10, Operand(x11, UXTW, 3)), "adds x9, x10, w11, uxtw #3");
+ COMPARE(add(x12, x13, Operand(x14, UXTX, 4)), "add x12, x13, x14, uxtx #4");
+ COMPARE(adds(w15, w16, Operand(w17, SXTB, 4)), "adds w15, w16, w17, sxtb #4");
+ COMPARE(add(x18, x19, Operand(x20, SXTB, 3)), "add x18, x19, w20, sxtb #3");
+ COMPARE(adds(w21, w22, Operand(w23, SXTH, 2)), "adds w21, w22, w23, sxth #2");
+ COMPARE(add(x24, x25, Operand(x26, SXTW, 1)), "add x24, x25, w26, sxtw #1");
+ COMPARE(adds(cp, jssp, Operand(fp, SXTX)), "adds cp, jssp, fp, sxtx");
+ COMPARE(cmn(w0, Operand(w1, UXTB, 2)), "cmn w0, w1, uxtb #2");
+ COMPARE(cmn(x2, Operand(x3, SXTH, 4)), "cmn x2, w3, sxth #4");
+
+ COMPARE(add(w0, wcsp, Operand(w1, UXTB)), "add w0, wcsp, w1, uxtb");
+ COMPARE(add(x2, csp, Operand(x3, UXTH, 1)), "add x2, csp, w3, uxth #1");
+ COMPARE(add(wcsp, wcsp, Operand(w4, UXTW, 2)), "add wcsp, wcsp, w4, lsl #2");
+ COMPARE(cmn(csp, Operand(xzr, UXTX, 3)), "cmn csp, xzr, lsl #3");
+ COMPARE(cmn(csp, Operand(xzr, LSL, 4)), "cmn csp, xzr, lsl #4");
+
+ CLEANUP();
+}
+
+
+TEST_(sub_extended) {
+ SET_UP();
+
+ COMPARE(sub(w0, w1, Operand(w2, UXTB)), "sub w0, w1, w2, uxtb");
+ COMPARE(subs(x3, x4, Operand(w5, UXTB, 1)), "subs x3, x4, w5, uxtb #1");
+ COMPARE(sub(w6, w7, Operand(w8, UXTH, 2)), "sub w6, w7, w8, uxth #2");
+ COMPARE(subs(x9, x10, Operand(x11, UXTW, 3)), "subs x9, x10, w11, uxtw #3");
+ COMPARE(sub(x12, x13, Operand(x14, UXTX, 4)), "sub x12, x13, x14, uxtx #4");
+ COMPARE(subs(w15, w16, Operand(w17, SXTB, 4)), "subs w15, w16, w17, sxtb #4");
+ COMPARE(sub(x18, x19, Operand(x20, SXTB, 3)), "sub x18, x19, w20, sxtb #3");
+ COMPARE(subs(w21, w22, Operand(w23, SXTH, 2)), "subs w21, w22, w23, sxth #2");
+ COMPARE(sub(x24, x25, Operand(x26, SXTW, 1)), "sub x24, x25, w26, sxtw #1");
+ COMPARE(subs(cp, jssp, Operand(fp, SXTX)), "subs cp, jssp, fp, sxtx");
+ COMPARE(cmp(w0, Operand(w1, SXTB, 1)), "cmp w0, w1, sxtb #1");
+ COMPARE(cmp(x2, Operand(x3, UXTH, 3)), "cmp x2, w3, uxth #3");
+
+ COMPARE(sub(w0, wcsp, Operand(w1, UXTB)), "sub w0, wcsp, w1, uxtb");
+ COMPARE(sub(x2, csp, Operand(x3, UXTH, 1)), "sub x2, csp, w3, uxth #1");
+ COMPARE(sub(wcsp, wcsp, Operand(w4, UXTW, 2)), "sub wcsp, wcsp, w4, lsl #2");
+ COMPARE(cmp(csp, Operand(xzr, UXTX, 3)), "cmp csp, xzr, lsl #3");
+ COMPARE(cmp(csp, Operand(xzr, LSL, 4)), "cmp csp, xzr, lsl #4");
+
+ CLEANUP();
+}
+
+
+TEST_(adc_subc_ngc) {
+ SET_UP();
+
+ COMPARE(adc(w0, w1, Operand(w2)), "adc w0, w1, w2");
+ COMPARE(adc(x3, x4, Operand(x5)), "adc x3, x4, x5");
+ COMPARE(adcs(w6, w7, Operand(w8)), "adcs w6, w7, w8");
+ COMPARE(adcs(x9, x10, Operand(x11)), "adcs x9, x10, x11");
+ COMPARE(sbc(w12, w13, Operand(w14)), "sbc w12, w13, w14");
+ COMPARE(sbc(x15, x16, Operand(x17)), "sbc x15, x16, x17");
+ COMPARE(sbcs(w18, w19, Operand(w20)), "sbcs w18, w19, w20");
+ COMPARE(sbcs(x21, x22, Operand(x23)), "sbcs x21, x22, x23");
+ COMPARE(ngc(w24, Operand(w25)), "ngc w24, w25");
+ COMPARE(ngc(x26, Operand(cp)), "ngc x26, cp");
+ COMPARE(ngcs(w28, Operand(w29)), "ngcs w28, w29");
+ COMPARE(ngcs(lr, Operand(x0)), "ngcs lr, x0");
+
+ CLEANUP();
+}
+
+
+TEST_(mul_and_div) {
+ SET_UP();
+
+ COMPARE(mul(w0, w1, w2), "mul w0, w1, w2");
+ COMPARE(mul(x3, x4, x5), "mul x3, x4, x5");
+ COMPARE(mul(w30, w0, w1), "mul w30, w0, w1");
+ COMPARE(mul(lr, x0, x1), "mul lr, x0, x1");
+ COMPARE(mneg(w0, w1, w2), "mneg w0, w1, w2");
+ COMPARE(mneg(x3, x4, x5), "mneg x3, x4, x5");
+ COMPARE(mneg(w30, w0, w1), "mneg w30, w0, w1");
+ COMPARE(mneg(lr, x0, x1), "mneg lr, x0, x1");
+ COMPARE(smull(x0, w0, w1), "smull x0, w0, w1");
+ COMPARE(smull(lr, w30, w0), "smull lr, w30, w0");
+ COMPARE(smulh(x0, x1, x2), "smulh x0, x1, x2");
+
+ COMPARE(madd(w0, w1, w2, w3), "madd w0, w1, w2, w3");
+ COMPARE(madd(x4, x5, x6, x7), "madd x4, x5, x6, x7");
+ COMPARE(madd(w8, w9, w10, wzr), "mul w8, w9, w10");
+ COMPARE(madd(x11, x12, x13, xzr), "mul x11, x12, x13");
+ COMPARE(msub(w14, w15, w16, w17), "msub w14, w15, w16, w17");
+ COMPARE(msub(x18, x19, x20, x21), "msub x18, x19, x20, x21");
+ COMPARE(msub(w22, w23, w24, wzr), "mneg w22, w23, w24");
+ COMPARE(msub(x25, x26, x0, xzr), "mneg x25, x26, x0");
+
+ COMPARE(sdiv(w0, w1, w2), "sdiv w0, w1, w2");
+ COMPARE(sdiv(x3, x4, x5), "sdiv x3, x4, x5");
+ COMPARE(udiv(w6, w7, w8), "udiv w6, w7, w8");
+ COMPARE(udiv(x9, x10, x11), "udiv x9, x10, x11");
+
+ CLEANUP();
+}
+
+
+TEST(maddl_msubl) {
+ SET_UP();
+
+ COMPARE(smaddl(x0, w1, w2, x3), "smaddl x0, w1, w2, x3");
+ COMPARE(smaddl(x25, w21, w22, x16), "smaddl x25, w21, w22, x16");
+ COMPARE(umaddl(x0, w1, w2, x3), "umaddl x0, w1, w2, x3");
+ COMPARE(umaddl(x25, w21, w22, x16), "umaddl x25, w21, w22, x16");
+
+ COMPARE(smsubl(x0, w1, w2, x3), "smsubl x0, w1, w2, x3");
+ COMPARE(smsubl(x25, w21, w22, x16), "smsubl x25, w21, w22, x16");
+ COMPARE(umsubl(x0, w1, w2, x3), "umsubl x0, w1, w2, x3");
+ COMPARE(umsubl(x25, w21, w22, x16), "umsubl x25, w21, w22, x16");
+
+ CLEANUP();
+}
+
+
+TEST_(dp_1_source) {
+ SET_UP();
+
+ COMPARE(rbit(w0, w1), "rbit w0, w1");
+ COMPARE(rbit(x2, x3), "rbit x2, x3");
+ COMPARE(rev16(w4, w5), "rev16 w4, w5");
+ COMPARE(rev16(x6, x7), "rev16 x6, x7");
+ COMPARE(rev32(x8, x9), "rev32 x8, x9");
+ COMPARE(rev(w10, w11), "rev w10, w11");
+ COMPARE(rev(x12, x13), "rev x12, x13");
+ COMPARE(clz(w14, w15), "clz w14, w15");
+ COMPARE(clz(x16, x17), "clz x16, x17");
+ COMPARE(cls(w18, w19), "cls w18, w19");
+ COMPARE(cls(x20, x21), "cls x20, x21");
+
+ CLEANUP();
+}
+
+
+TEST_(bitfield) {
+ SET_UP();
+
+ COMPARE(sxtb(w0, w1), "sxtb w0, w1");
+ COMPARE(sxtb(x2, x3), "sxtb x2, w3");
+ COMPARE(sxth(w4, w5), "sxth w4, w5");
+ COMPARE(sxth(x6, x7), "sxth x6, w7");
+ COMPARE(sxtw(x8, x9), "sxtw x8, w9");
+ COMPARE(sxtb(x0, w1), "sxtb x0, w1");
+ COMPARE(sxth(x2, w3), "sxth x2, w3");
+ COMPARE(sxtw(x4, w5), "sxtw x4, w5");
+
+ COMPARE(uxtb(w10, w11), "uxtb w10, w11");
+ COMPARE(uxtb(x12, x13), "uxtb x12, w13");
+ COMPARE(uxth(w14, w15), "uxth w14, w15");
+ COMPARE(uxth(x16, x17), "uxth x16, w17");
+ COMPARE(uxtw(x18, x19), "ubfx x18, x19, #0, #32");
+
+ COMPARE(asr(w20, w21, 10), "asr w20, w21, #10");
+ COMPARE(asr(x22, x23, 20), "asr x22, x23, #20");
+ COMPARE(lsr(w24, w25, 10), "lsr w24, w25, #10");
+ COMPARE(lsr(x26, cp, 20), "lsr x26, cp, #20");
+ COMPARE(lsl(w28, w29, 10), "lsl w28, w29, #10");
+ COMPARE(lsl(lr, x0, 20), "lsl lr, x0, #20");
+
+ COMPARE(sbfiz(w1, w2, 1, 20), "sbfiz w1, w2, #1, #20");
+ COMPARE(sbfiz(x3, x4, 2, 19), "sbfiz x3, x4, #2, #19");
+ COMPARE(sbfx(w5, w6, 3, 18), "sbfx w5, w6, #3, #18");
+ COMPARE(sbfx(x7, x8, 4, 17), "sbfx x7, x8, #4, #17");
+ COMPARE(bfi(w9, w10, 5, 16), "bfi w9, w10, #5, #16");
+ COMPARE(bfi(x11, x12, 6, 15), "bfi x11, x12, #6, #15");
+ COMPARE(bfxil(w13, w14, 7, 14), "bfxil w13, w14, #7, #14");
+ COMPARE(bfxil(x15, x16, 8, 13), "bfxil x15, x16, #8, #13");
+ COMPARE(ubfiz(w17, w18, 9, 12), "ubfiz w17, w18, #9, #12");
+ COMPARE(ubfiz(x19, x20, 10, 11), "ubfiz x19, x20, #10, #11");
+ COMPARE(ubfx(w21, w22, 11, 10), "ubfx w21, w22, #11, #10");
+ COMPARE(ubfx(x23, x24, 12, 9), "ubfx x23, x24, #12, #9");
+
+ CLEANUP();
+}
+
+
+TEST_(extract) {
+ SET_UP();
+
+ COMPARE(extr(w0, w1, w2, 0), "extr w0, w1, w2, #0");
+ COMPARE(extr(x3, x4, x5, 1), "extr x3, x4, x5, #1");
+ COMPARE(extr(w6, w7, w8, 31), "extr w6, w7, w8, #31");
+ COMPARE(extr(x9, x10, x11, 63), "extr x9, x10, x11, #63");
+ COMPARE(extr(w12, w13, w13, 10), "ror w12, w13, #10");
+ COMPARE(extr(x14, x15, x15, 42), "ror x14, x15, #42");
+
+ CLEANUP();
+}
+
+
+TEST_(logical_immediate) {
+ SET_UP();
+ #define RESULT_SIZE (256)
+
+ char result[RESULT_SIZE];
+
+ // Test immediate encoding - 64-bit destination.
+ // 64-bit patterns.
+ uint64_t value = 0x7fffffff;
+ for (int i = 0; i < 64; i++) {
+ snprintf(result, RESULT_SIZE, "and x0, x0, #0x%" PRIx64, value);
+ COMPARE(and_(x0, x0, Operand(value)), result);
+ value = ((value & 1) << 63) | (value >> 1); // Rotate right 1 bit.
+ }
+
+ // 32-bit patterns.
+ value = 0x00003fff00003fffL;
+ for (int i = 0; i < 32; i++) {
+ snprintf(result, RESULT_SIZE, "and x0, x0, #0x%" PRIx64, value);
+ COMPARE(and_(x0, x0, Operand(value)), result);
+ value = ((value & 1) << 63) | (value >> 1); // Rotate right 1 bit.
+ }
+
+ // 16-bit patterns.
+ value = 0x001f001f001f001fL;
+ for (int i = 0; i < 16; i++) {
+ snprintf(result, RESULT_SIZE, "and x0, x0, #0x%" PRIx64, value);
+ COMPARE(and_(x0, x0, Operand(value)), result);
+ value = ((value & 1) << 63) | (value >> 1); // Rotate right 1 bit.
+ }
+
+ // 8-bit patterns.
+ value = 0x0e0e0e0e0e0e0e0eL;
+ for (int i = 0; i < 8; i++) {
+ snprintf(result, RESULT_SIZE, "and x0, x0, #0x%" PRIx64, value);
+ COMPARE(and_(x0, x0, Operand(value)), result);
+ value = ((value & 1) << 63) | (value >> 1); // Rotate right 1 bit.
+ }
+
+ // 4-bit patterns.
+ value = 0x6666666666666666L;
+ for (int i = 0; i < 4; i++) {
+ snprintf(result, RESULT_SIZE, "and x0, x0, #0x%" PRIx64, value);
+ COMPARE(and_(x0, x0, Operand(value)), result);
+ value = ((value & 1) << 63) | (value >> 1); // Rotate right 1 bit.
+ }
+
+ // 2-bit patterns.
+ COMPARE(and_(x0, x0, Operand(0x5555555555555555L)),
+ "and x0, x0, #0x5555555555555555");
+ COMPARE(and_(x0, x0, Operand(0xaaaaaaaaaaaaaaaaL)),
+ "and x0, x0, #0xaaaaaaaaaaaaaaaa");
+
+ // Test immediate encoding - 32-bit destination.
+ COMPARE(and_(w0, w0, Operand(0xff8007ff)),
+ "and w0, w0, #0xff8007ff"); // 32-bit pattern.
+ COMPARE(and_(w0, w0, Operand(0xf87ff87f)),
+ "and w0, w0, #0xf87ff87f"); // 16-bit pattern.
+ COMPARE(and_(w0, w0, Operand(0x87878787)),
+ "and w0, w0, #0x87878787"); // 8-bit pattern.
+ COMPARE(and_(w0, w0, Operand(0x66666666)),
+ "and w0, w0, #0x66666666"); // 4-bit pattern.
+ COMPARE(and_(w0, w0, Operand(0x55555555)),
+ "and w0, w0, #0x55555555"); // 2-bit pattern.
+
+ // Test other instructions.
+ COMPARE(tst(w1, Operand(0x11111111)),
+ "tst w1, #0x11111111");
+ COMPARE(tst(x2, Operand(0x8888888888888888L)),
+ "tst x2, #0x8888888888888888");
+ COMPARE(orr(w7, w8, Operand(0xaaaaaaaa)),
+ "orr w7, w8, #0xaaaaaaaa");
+ COMPARE(orr(x9, x10, Operand(0x5555555555555555L)),
+ "orr x9, x10, #0x5555555555555555");
+ COMPARE(eor(w15, w16, Operand(0x00000001)),
+ "eor w15, w16, #0x1");
+ COMPARE(eor(x17, x18, Operand(0x0000000000000003L)),
+ "eor x17, x18, #0x3");
+ COMPARE(ands(w23, w24, Operand(0x0000000f)), "ands w23, w24, #0xf");
+ COMPARE(ands(x25, x26, Operand(0x800000000000000fL)),
+ "ands x25, x26, #0x800000000000000f");
+
+ // Test inverse.
+ COMPARE(bic(w3, w4, Operand(0x20202020)),
+ "and w3, w4, #0xdfdfdfdf");
+ COMPARE(bic(x5, x6, Operand(0x4040404040404040L)),
+ "and x5, x6, #0xbfbfbfbfbfbfbfbf");
+ COMPARE(orn(w11, w12, Operand(0x40004000)),
+ "orr w11, w12, #0xbfffbfff");
+ COMPARE(orn(x13, x14, Operand(0x8181818181818181L)),
+ "orr x13, x14, #0x7e7e7e7e7e7e7e7e");
+ COMPARE(eon(w19, w20, Operand(0x80000001)),
+ "eor w19, w20, #0x7ffffffe");
+ COMPARE(eon(x21, x22, Operand(0xc000000000000003L)),
+ "eor x21, x22, #0x3ffffffffffffffc");
+ COMPARE(bics(w27, w28, Operand(0xfffffff7)), "ands w27, w28, #0x8");
+ COMPARE(bics(fp, x0, Operand(0xfffffffeffffffffL)),
+ "ands fp, x0, #0x100000000");
+
+ // Test stack pointer.
+ COMPARE(and_(wcsp, wzr, Operand(7)), "and wcsp, wzr, #0x7");
+ COMPARE(ands(xzr, xzr, Operand(7)), "tst xzr, #0x7");
+ COMPARE(orr(csp, xzr, Operand(15)), "orr csp, xzr, #0xf");
+ COMPARE(eor(wcsp, w0, Operand(31)), "eor wcsp, w0, #0x1f");
+
+ // Test move aliases.
+ COMPARE(orr(w0, wzr, Operand(0x00000780)), "orr w0, wzr, #0x780");
+ COMPARE(orr(w1, wzr, Operand(0x00007800)), "orr w1, wzr, #0x7800");
+ COMPARE(orr(w2, wzr, Operand(0x00078000)), "mov w2, #0x78000");
+ COMPARE(orr(w3, wzr, Operand(0x00780000)), "orr w3, wzr, #0x780000");
+ COMPARE(orr(w4, wzr, Operand(0x07800000)), "orr w4, wzr, #0x7800000");
+ COMPARE(orr(x5, xzr, Operand(0xffffffffffffc001UL)),
+ "orr x5, xzr, #0xffffffffffffc001");
+ COMPARE(orr(x6, xzr, Operand(0xfffffffffffc001fUL)),
+ "mov x6, #0xfffffffffffc001f");
+ COMPARE(orr(x7, xzr, Operand(0xffffffffffc001ffUL)),
+ "mov x7, #0xffffffffffc001ff");
+ COMPARE(orr(x8, xzr, Operand(0xfffffffffc001fffUL)),
+ "mov x8, #0xfffffffffc001fff");
+ COMPARE(orr(x9, xzr, Operand(0xffffffffc001ffffUL)),
+ "orr x9, xzr, #0xffffffffc001ffff");
+
+ CLEANUP();
+}
+
+
+TEST_(logical_shifted) {
+ SET_UP();
+
+ COMPARE(and_(w0, w1, Operand(w2)), "and w0, w1, w2");
+ COMPARE(and_(x3, x4, Operand(x5, LSL, 1)), "and x3, x4, x5, lsl #1");
+ COMPARE(and_(w6, w7, Operand(w8, LSR, 2)), "and w6, w7, w8, lsr #2");
+ COMPARE(and_(x9, x10, Operand(x11, ASR, 3)), "and x9, x10, x11, asr #3");
+ COMPARE(and_(w12, w13, Operand(w14, ROR, 4)), "and w12, w13, w14, ror #4");
+
+ COMPARE(bic(w15, w16, Operand(w17)), "bic w15, w16, w17");
+ COMPARE(bic(x18, x19, Operand(x20, LSL, 5)), "bic x18, x19, x20, lsl #5");
+ COMPARE(bic(w21, w22, Operand(w23, LSR, 6)), "bic w21, w22, w23, lsr #6");
+ COMPARE(bic(x24, x25, Operand(x26, ASR, 7)), "bic x24, x25, x26, asr #7");
+ COMPARE(bic(w27, w28, Operand(w29, ROR, 8)), "bic w27, w28, w29, ror #8");
+
+ COMPARE(orr(w0, w1, Operand(w2)), "orr w0, w1, w2");
+ COMPARE(orr(x3, x4, Operand(x5, LSL, 9)), "orr x3, x4, x5, lsl #9");
+ COMPARE(orr(w6, w7, Operand(w8, LSR, 10)), "orr w6, w7, w8, lsr #10");
+ COMPARE(orr(x9, x10, Operand(x11, ASR, 11)), "orr x9, x10, x11, asr #11");
+ COMPARE(orr(w12, w13, Operand(w14, ROR, 12)), "orr w12, w13, w14, ror #12");
+
+ COMPARE(orn(w15, w16, Operand(w17)), "orn w15, w16, w17");
+ COMPARE(orn(x18, x19, Operand(x20, LSL, 13)), "orn x18, x19, x20, lsl #13");
+ COMPARE(orn(w21, w22, Operand(w23, LSR, 14)), "orn w21, w22, w23, lsr #14");
+ COMPARE(orn(x24, x25, Operand(x26, ASR, 15)), "orn x24, x25, x26, asr #15");
+ COMPARE(orn(w27, w28, Operand(w29, ROR, 16)), "orn w27, w28, w29, ror #16");
+
+ COMPARE(eor(w0, w1, Operand(w2)), "eor w0, w1, w2");
+ COMPARE(eor(x3, x4, Operand(x5, LSL, 17)), "eor x3, x4, x5, lsl #17");
+ COMPARE(eor(w6, w7, Operand(w8, LSR, 18)), "eor w6, w7, w8, lsr #18");
+ COMPARE(eor(x9, x10, Operand(x11, ASR, 19)), "eor x9, x10, x11, asr #19");
+ COMPARE(eor(w12, w13, Operand(w14, ROR, 20)), "eor w12, w13, w14, ror #20");
+
+ COMPARE(eon(w15, w16, Operand(w17)), "eon w15, w16, w17");
+ COMPARE(eon(x18, x19, Operand(x20, LSL, 21)), "eon x18, x19, x20, lsl #21");
+ COMPARE(eon(w21, w22, Operand(w23, LSR, 22)), "eon w21, w22, w23, lsr #22");
+ COMPARE(eon(x24, x25, Operand(x26, ASR, 23)), "eon x24, x25, x26, asr #23");
+ COMPARE(eon(w27, w28, Operand(w29, ROR, 24)), "eon w27, w28, w29, ror #24");
+
+ COMPARE(ands(w0, w1, Operand(w2)), "ands w0, w1, w2");
+ COMPARE(ands(x3, x4, Operand(x5, LSL, 1)), "ands x3, x4, x5, lsl #1");
+ COMPARE(ands(w6, w7, Operand(w8, LSR, 2)), "ands w6, w7, w8, lsr #2");
+ COMPARE(ands(x9, x10, Operand(x11, ASR, 3)), "ands x9, x10, x11, asr #3");
+ COMPARE(ands(w12, w13, Operand(w14, ROR, 4)), "ands w12, w13, w14, ror #4");
+
+ COMPARE(bics(w15, w16, Operand(w17)), "bics w15, w16, w17");
+ COMPARE(bics(x18, x19, Operand(x20, LSL, 5)), "bics x18, x19, x20, lsl #5");
+ COMPARE(bics(w21, w22, Operand(w23, LSR, 6)), "bics w21, w22, w23, lsr #6");
+ COMPARE(bics(x24, x25, Operand(x26, ASR, 7)), "bics x24, x25, x26, asr #7");
+ COMPARE(bics(w27, w28, Operand(w29, ROR, 8)), "bics w27, w28, w29, ror #8");
+
+ COMPARE(tst(w0, Operand(w1)), "tst w0, w1");
+ COMPARE(tst(w2, Operand(w3, ROR, 10)), "tst w2, w3, ror #10");
+ COMPARE(tst(x0, Operand(x1)), "tst x0, x1");
+ COMPARE(tst(x2, Operand(x3, ROR, 42)), "tst x2, x3, ror #42");
+
+ COMPARE(orn(w0, wzr, Operand(w1)), "mvn w0, w1");
+ COMPARE(orn(w2, wzr, Operand(w3, ASR, 5)), "mvn w2, w3, asr #5");
+ COMPARE(orn(x0, xzr, Operand(x1)), "mvn x0, x1");
+ COMPARE(orn(x2, xzr, Operand(x3, ASR, 42)), "mvn x2, x3, asr #42");
+
+ COMPARE(orr(w0, wzr, Operand(w1)), "mov w0, w1");
+ COMPARE(orr(x0, xzr, Operand(x1)), "mov x0, x1");
+ COMPARE(orr(w16, wzr, Operand(w17, LSL, 1)), "orr w16, wzr, w17, lsl #1");
+ COMPARE(orr(x16, xzr, Operand(x17, ASR, 2)), "orr x16, xzr, x17, asr #2");
+
+ CLEANUP();
+}
+
+
+TEST_(dp_2_source) {
+ SET_UP();
+
+ COMPARE(lslv(w0, w1, w2), "lsl w0, w1, w2");
+ COMPARE(lslv(x3, x4, x5), "lsl x3, x4, x5");
+ COMPARE(lsrv(w6, w7, w8), "lsr w6, w7, w8");
+ COMPARE(lsrv(x9, x10, x11), "lsr x9, x10, x11");
+ COMPARE(asrv(w12, w13, w14), "asr w12, w13, w14");
+ COMPARE(asrv(x15, x16, x17), "asr x15, x16, x17");
+ COMPARE(rorv(w18, w19, w20), "ror w18, w19, w20");
+ COMPARE(rorv(x21, x22, x23), "ror x21, x22, x23");
+
+ CLEANUP();
+}
+
+
+TEST_(adr) {
+ SET_UP();
+
+ COMPARE(adr(x0, 0), "adr x0, #+0x0");
+ COMPARE(adr(x1, 1), "adr x1, #+0x1");
+ COMPARE(adr(x2, -1), "adr x2, #-0x1");
+ COMPARE(adr(x3, 4), "adr x3, #+0x4");
+ COMPARE(adr(x4, -4), "adr x4, #-0x4");
+ COMPARE(adr(x5, 0x000fffff), "adr x5, #+0xfffff");
+ COMPARE(adr(x6, -0x00100000), "adr x6, #-0x100000");
+ COMPARE(adr(xzr, 0), "adr xzr, #+0x0");
+
+ CLEANUP();
+}
+
+
+TEST_(branch) {
+ SET_UP();
+
+ #define INST_OFF(x) ((x) >> kInstructionSizeLog2)
+ COMPARE(b(INST_OFF(0x4)), "b #+0x4");
+ COMPARE(b(INST_OFF(-0x4)), "b #-0x4");
+ COMPARE(b(INST_OFF(0x7fffffc)), "b #+0x7fffffc");
+ COMPARE(b(INST_OFF(-0x8000000)), "b #-0x8000000");
+ COMPARE(b(INST_OFF(0xffffc), eq), "b.eq #+0xffffc");
+ COMPARE(b(INST_OFF(-0x100000), mi), "b.mi #-0x100000");
+ COMPARE(bl(INST_OFF(0x4)), "bl #+0x4");
+ COMPARE(bl(INST_OFF(-0x4)), "bl #-0x4");
+ COMPARE(bl(INST_OFF(0xffffc)), "bl #+0xffffc");
+ COMPARE(bl(INST_OFF(-0x100000)), "bl #-0x100000");
+ COMPARE(cbz(w0, INST_OFF(0xffffc)), "cbz w0, #+0xffffc");
+ COMPARE(cbz(x1, INST_OFF(-0x100000)), "cbz x1, #-0x100000");
+ COMPARE(cbnz(w2, INST_OFF(0xffffc)), "cbnz w2, #+0xffffc");
+ COMPARE(cbnz(x3, INST_OFF(-0x100000)), "cbnz x3, #-0x100000");
+ COMPARE(tbz(w4, 0, INST_OFF(0x7ffc)), "tbz w4, #0, #+0x7ffc");
+ COMPARE(tbz(x5, 63, INST_OFF(-0x8000)), "tbz x5, #63, #-0x8000");
+ COMPARE(tbz(w6, 31, INST_OFF(0)), "tbz w6, #31, #+0x0");
+ COMPARE(tbz(x7, 31, INST_OFF(0x4)), "tbz w7, #31, #+0x4");
+ COMPARE(tbz(x8, 32, INST_OFF(0x8)), "tbz x8, #32, #+0x8");
+ COMPARE(tbnz(w8, 0, INST_OFF(0x7ffc)), "tbnz w8, #0, #+0x7ffc");
+ COMPARE(tbnz(x9, 63, INST_OFF(-0x8000)), "tbnz x9, #63, #-0x8000");
+ COMPARE(tbnz(w10, 31, INST_OFF(0)), "tbnz w10, #31, #+0x0");
+ COMPARE(tbnz(x11, 31, INST_OFF(0x4)), "tbnz w11, #31, #+0x4");
+ COMPARE(tbnz(x12, 32, INST_OFF(0x8)), "tbnz x12, #32, #+0x8");
+ COMPARE(br(x0), "br x0");
+ COMPARE(blr(x1), "blr x1");
+ COMPARE(ret(x2), "ret x2");
+ COMPARE(ret(lr), "ret")
+
+ CLEANUP();
+}
+
+
+TEST_(load_store) {
+ SET_UP();
+
+ COMPARE(ldr(w0, MemOperand(x1)), "ldr w0, [x1]");
+ COMPARE(ldr(w2, MemOperand(x3, 4)), "ldr w2, [x3, #4]");
+ COMPARE(ldr(w4, MemOperand(x5, 16380)), "ldr w4, [x5, #16380]");
+ COMPARE(ldr(x6, MemOperand(x7)), "ldr x6, [x7]");
+ COMPARE(ldr(x8, MemOperand(x9, 8)), "ldr x8, [x9, #8]");
+ COMPARE(ldr(x10, MemOperand(x11, 32760)), "ldr x10, [x11, #32760]");
+ COMPARE(str(w12, MemOperand(x13)), "str w12, [x13]");
+ COMPARE(str(w14, MemOperand(x15, 4)), "str w14, [x15, #4]");
+ COMPARE(str(w16, MemOperand(x17, 16380)), "str w16, [x17, #16380]");
+ COMPARE(str(x18, MemOperand(x19)), "str x18, [x19]");
+ COMPARE(str(x20, MemOperand(x21, 8)), "str x20, [x21, #8]");
+ COMPARE(str(x22, MemOperand(x23, 32760)), "str x22, [x23, #32760]");
+
+ COMPARE(ldr(w0, MemOperand(x1, 4, PreIndex)), "ldr w0, [x1, #4]!");
+ COMPARE(ldr(w2, MemOperand(x3, 255, PreIndex)), "ldr w2, [x3, #255]!");
+ COMPARE(ldr(w4, MemOperand(x5, -256, PreIndex)), "ldr w4, [x5, #-256]!");
+ COMPARE(ldr(x6, MemOperand(x7, 8, PreIndex)), "ldr x6, [x7, #8]!");
+ COMPARE(ldr(x8, MemOperand(x9, 255, PreIndex)), "ldr x8, [x9, #255]!");
+ COMPARE(ldr(x10, MemOperand(x11, -256, PreIndex)), "ldr x10, [x11, #-256]!");
+ COMPARE(str(w12, MemOperand(x13, 4, PreIndex)), "str w12, [x13, #4]!");
+ COMPARE(str(w14, MemOperand(x15, 255, PreIndex)), "str w14, [x15, #255]!");
+ COMPARE(str(w16, MemOperand(x17, -256, PreIndex)), "str w16, [x17, #-256]!");
+ COMPARE(str(x18, MemOperand(x19, 8, PreIndex)), "str x18, [x19, #8]!");
+ COMPARE(str(x20, MemOperand(x21, 255, PreIndex)), "str x20, [x21, #255]!");
+ COMPARE(str(x22, MemOperand(x23, -256, PreIndex)), "str x22, [x23, #-256]!");
+
+ COMPARE(ldr(w0, MemOperand(x1, 4, PostIndex)), "ldr w0, [x1], #4");
+ COMPARE(ldr(w2, MemOperand(x3, 255, PostIndex)), "ldr w2, [x3], #255");
+ COMPARE(ldr(w4, MemOperand(x5, -256, PostIndex)), "ldr w4, [x5], #-256");
+ COMPARE(ldr(x6, MemOperand(x7, 8, PostIndex)), "ldr x6, [x7], #8");
+ COMPARE(ldr(x8, MemOperand(x9, 255, PostIndex)), "ldr x8, [x9], #255");
+ COMPARE(ldr(x10, MemOperand(x11, -256, PostIndex)), "ldr x10, [x11], #-256");
+ COMPARE(str(w12, MemOperand(x13, 4, PostIndex)), "str w12, [x13], #4");
+ COMPARE(str(w14, MemOperand(x15, 255, PostIndex)), "str w14, [x15], #255");
+ COMPARE(str(w16, MemOperand(x17, -256, PostIndex)), "str w16, [x17], #-256");
+ COMPARE(str(x18, MemOperand(x19, 8, PostIndex)), "str x18, [x19], #8");
+ COMPARE(str(x20, MemOperand(x21, 255, PostIndex)), "str x20, [x21], #255");
+ COMPARE(str(x22, MemOperand(x23, -256, PostIndex)), "str x22, [x23], #-256");
+
+ // TODO(all): Fix this for jssp.
+ COMPARE(ldr(w24, MemOperand(jssp)), "ldr w24, [jssp]");
+ COMPARE(ldr(x25, MemOperand(jssp, 8)), "ldr x25, [jssp, #8]");
+ COMPARE(str(w26, MemOperand(jssp, 4, PreIndex)), "str w26, [jssp, #4]!");
+ COMPARE(str(cp, MemOperand(jssp, -8, PostIndex)), "str cp, [jssp], #-8");
+
+ COMPARE(ldrsw(x0, MemOperand(x1)), "ldrsw x0, [x1]");
+ COMPARE(ldrsw(x2, MemOperand(x3, 8)), "ldrsw x2, [x3, #8]");
+ COMPARE(ldrsw(x4, MemOperand(x5, 42, PreIndex)), "ldrsw x4, [x5, #42]!");
+ COMPARE(ldrsw(x6, MemOperand(x7, -11, PostIndex)), "ldrsw x6, [x7], #-11");
+
+ CLEANUP();
+}
+
+
+TEST_(load_store_regoffset) {
+ SET_UP();
+
+ COMPARE(ldr(w0, MemOperand(x1, w2, UXTW)), "ldr w0, [x1, w2, uxtw]");
+ COMPARE(ldr(w3, MemOperand(x4, w5, UXTW, 2)), "ldr w3, [x4, w5, uxtw #2]");
+ COMPARE(ldr(w6, MemOperand(x7, x8)), "ldr w6, [x7, x8]");
+ COMPARE(ldr(w9, MemOperand(x10, x11, LSL, 2)), "ldr w9, [x10, x11, lsl #2]");
+ COMPARE(ldr(w12, MemOperand(x13, w14, SXTW)), "ldr w12, [x13, w14, sxtw]");
+ COMPARE(ldr(w15, MemOperand(x16, w17, SXTW, 2)),
+ "ldr w15, [x16, w17, sxtw #2]");
+ COMPARE(ldr(w18, MemOperand(x19, x20, SXTX)), "ldr w18, [x19, x20, sxtx]");
+ COMPARE(ldr(w21, MemOperand(x22, x23, SXTX, 2)),
+ "ldr w21, [x22, x23, sxtx #2]");
+ COMPARE(ldr(x0, MemOperand(x1, w2, UXTW)), "ldr x0, [x1, w2, uxtw]");
+ COMPARE(ldr(x3, MemOperand(x4, w5, UXTW, 3)), "ldr x3, [x4, w5, uxtw #3]");
+ COMPARE(ldr(x6, MemOperand(x7, x8)), "ldr x6, [x7, x8]");
+ COMPARE(ldr(x9, MemOperand(x10, x11, LSL, 3)), "ldr x9, [x10, x11, lsl #3]");
+ COMPARE(ldr(x12, MemOperand(x13, w14, SXTW)), "ldr x12, [x13, w14, sxtw]");
+ COMPARE(ldr(x15, MemOperand(x16, w17, SXTW, 3)),
+ "ldr x15, [x16, w17, sxtw #3]");
+ COMPARE(ldr(x18, MemOperand(x19, x20, SXTX)), "ldr x18, [x19, x20, sxtx]");
+ COMPARE(ldr(x21, MemOperand(x22, x23, SXTX, 3)),
+ "ldr x21, [x22, x23, sxtx #3]");
+
+ COMPARE(str(w0, MemOperand(x1, w2, UXTW)), "str w0, [x1, w2, uxtw]");
+ COMPARE(str(w3, MemOperand(x4, w5, UXTW, 2)), "str w3, [x4, w5, uxtw #2]");
+ COMPARE(str(w6, MemOperand(x7, x8)), "str w6, [x7, x8]");
+ COMPARE(str(w9, MemOperand(x10, x11, LSL, 2)), "str w9, [x10, x11, lsl #2]");
+ COMPARE(str(w12, MemOperand(x13, w14, SXTW)), "str w12, [x13, w14, sxtw]");
+ COMPARE(str(w15, MemOperand(x16, w17, SXTW, 2)),
+ "str w15, [x16, w17, sxtw #2]");
+ COMPARE(str(w18, MemOperand(x19, x20, SXTX)), "str w18, [x19, x20, sxtx]");
+ COMPARE(str(w21, MemOperand(x22, x23, SXTX, 2)),
+ "str w21, [x22, x23, sxtx #2]");
+ COMPARE(str(x0, MemOperand(x1, w2, UXTW)), "str x0, [x1, w2, uxtw]");
+ COMPARE(str(x3, MemOperand(x4, w5, UXTW, 3)), "str x3, [x4, w5, uxtw #3]");
+ COMPARE(str(x6, MemOperand(x7, x8)), "str x6, [x7, x8]");
+ COMPARE(str(x9, MemOperand(x10, x11, LSL, 3)), "str x9, [x10, x11, lsl #3]");
+ COMPARE(str(x12, MemOperand(x13, w14, SXTW)), "str x12, [x13, w14, sxtw]");
+ COMPARE(str(x15, MemOperand(x16, w17, SXTW, 3)),
+ "str x15, [x16, w17, sxtw #3]");
+ COMPARE(str(x18, MemOperand(x19, x20, SXTX)), "str x18, [x19, x20, sxtx]");
+ COMPARE(str(x21, MemOperand(x22, x23, SXTX, 3)),
+ "str x21, [x22, x23, sxtx #3]");
+
+ COMPARE(ldrb(w0, MemOperand(x1, w2, UXTW)), "ldrb w0, [x1, w2, uxtw]");
+ COMPARE(ldrb(w6, MemOperand(x7, x8)), "ldrb w6, [x7, x8]");
+ COMPARE(ldrb(w12, MemOperand(x13, w14, SXTW)), "ldrb w12, [x13, w14, sxtw]");
+ COMPARE(ldrb(w18, MemOperand(x19, x20, SXTX)), "ldrb w18, [x19, x20, sxtx]");
+ COMPARE(strb(w0, MemOperand(x1, w2, UXTW)), "strb w0, [x1, w2, uxtw]");
+ COMPARE(strb(w6, MemOperand(x7, x8)), "strb w6, [x7, x8]");
+ COMPARE(strb(w12, MemOperand(x13, w14, SXTW)), "strb w12, [x13, w14, sxtw]");
+ COMPARE(strb(w18, MemOperand(x19, x20, SXTX)), "strb w18, [x19, x20, sxtx]");
+
+ COMPARE(ldrh(w0, MemOperand(x1, w2, UXTW)), "ldrh w0, [x1, w2, uxtw]");
+ COMPARE(ldrh(w3, MemOperand(x4, w5, UXTW, 1)), "ldrh w3, [x4, w5, uxtw #1]");
+ COMPARE(ldrh(w6, MemOperand(x7, x8)), "ldrh w6, [x7, x8]");
+ COMPARE(ldrh(w9, MemOperand(x10, x11, LSL, 1)),
+ "ldrh w9, [x10, x11, lsl #1]");
+ COMPARE(ldrh(w12, MemOperand(x13, w14, SXTW)), "ldrh w12, [x13, w14, sxtw]");
+ COMPARE(ldrh(w15, MemOperand(x16, w17, SXTW, 1)),
+ "ldrh w15, [x16, w17, sxtw #1]");
+ COMPARE(ldrh(w18, MemOperand(x19, x20, SXTX)), "ldrh w18, [x19, x20, sxtx]");
+ COMPARE(ldrh(w21, MemOperand(x22, x23, SXTX, 1)),
+ "ldrh w21, [x22, x23, sxtx #1]");
+ COMPARE(strh(w0, MemOperand(x1, w2, UXTW)), "strh w0, [x1, w2, uxtw]");
+ COMPARE(strh(w3, MemOperand(x4, w5, UXTW, 1)), "strh w3, [x4, w5, uxtw #1]");
+ COMPARE(strh(w6, MemOperand(x7, x8)), "strh w6, [x7, x8]");
+ COMPARE(strh(w9, MemOperand(x10, x11, LSL, 1)),
+ "strh w9, [x10, x11, lsl #1]");
+ COMPARE(strh(w12, MemOperand(x13, w14, SXTW)), "strh w12, [x13, w14, sxtw]");
+ COMPARE(strh(w15, MemOperand(x16, w17, SXTW, 1)),
+ "strh w15, [x16, w17, sxtw #1]");
+ COMPARE(strh(w18, MemOperand(x19, x20, SXTX)), "strh w18, [x19, x20, sxtx]");
+ COMPARE(strh(w21, MemOperand(x22, x23, SXTX, 1)),
+ "strh w21, [x22, x23, sxtx #1]");
+
+ // TODO(all): Fix this for jssp.
+ COMPARE(ldr(x0, MemOperand(jssp, wzr, SXTW)), "ldr x0, [jssp, wzr, sxtw]");
+ COMPARE(str(x1, MemOperand(jssp, xzr)), "str x1, [jssp, xzr]");
+
+ CLEANUP();
+}
+
+
+TEST_(load_store_byte) {
+ SET_UP();
+
+ COMPARE(ldrb(w0, MemOperand(x1)), "ldrb w0, [x1]");
+ COMPARE(ldrb(x2, MemOperand(x3)), "ldrb w2, [x3]");
+ COMPARE(ldrb(w4, MemOperand(x5, 4095)), "ldrb w4, [x5, #4095]");
+ COMPARE(ldrb(w6, MemOperand(x7, 255, PreIndex)), "ldrb w6, [x7, #255]!");
+ COMPARE(ldrb(w8, MemOperand(x9, -256, PreIndex)), "ldrb w8, [x9, #-256]!");
+ COMPARE(ldrb(w10, MemOperand(x11, 255, PostIndex)), "ldrb w10, [x11], #255");
+ COMPARE(ldrb(w12, MemOperand(x13, -256, PostIndex)),
+ "ldrb w12, [x13], #-256");
+ COMPARE(strb(w14, MemOperand(x15)), "strb w14, [x15]");
+ COMPARE(strb(x16, MemOperand(x17)), "strb w16, [x17]");
+ COMPARE(strb(w18, MemOperand(x19, 4095)), "strb w18, [x19, #4095]");
+ COMPARE(strb(w20, MemOperand(x21, 255, PreIndex)), "strb w20, [x21, #255]!");
+ COMPARE(strb(w22, MemOperand(x23, -256, PreIndex)),
+ "strb w22, [x23, #-256]!");
+ COMPARE(strb(w24, MemOperand(x25, 255, PostIndex)), "strb w24, [x25], #255");
+ COMPARE(strb(w26, MemOperand(cp, -256, PostIndex)),
+ "strb w26, [cp], #-256");
+ // TODO(all): Fix this for jssp.
+ COMPARE(ldrb(w28, MemOperand(jssp, 3, PostIndex)), "ldrb w28, [jssp], #3");
+ COMPARE(strb(fp, MemOperand(jssp, -42, PreIndex)), "strb w29, [jssp, #-42]!");
+ COMPARE(ldrsb(w0, MemOperand(x1)), "ldrsb w0, [x1]");
+ COMPARE(ldrsb(x2, MemOperand(x3, 8)), "ldrsb x2, [x3, #8]");
+ COMPARE(ldrsb(w4, MemOperand(x5, 42, PreIndex)), "ldrsb w4, [x5, #42]!");
+ COMPARE(ldrsb(x6, MemOperand(x7, -11, PostIndex)), "ldrsb x6, [x7], #-11");
+
+ CLEANUP();
+}
+
+
+TEST_(load_store_half) {
+ SET_UP();
+
+ COMPARE(ldrh(w0, MemOperand(x1)), "ldrh w0, [x1]");
+ COMPARE(ldrh(x2, MemOperand(x3)), "ldrh w2, [x3]");
+ COMPARE(ldrh(w4, MemOperand(x5, 8190)), "ldrh w4, [x5, #8190]");
+ COMPARE(ldrh(w6, MemOperand(x7, 255, PreIndex)), "ldrh w6, [x7, #255]!");
+ COMPARE(ldrh(w8, MemOperand(x9, -256, PreIndex)), "ldrh w8, [x9, #-256]!");
+ COMPARE(ldrh(w10, MemOperand(x11, 255, PostIndex)), "ldrh w10, [x11], #255");
+ COMPARE(ldrh(w12, MemOperand(x13, -256, PostIndex)),
+ "ldrh w12, [x13], #-256");
+ COMPARE(strh(w14, MemOperand(x15)), "strh w14, [x15]");
+ COMPARE(strh(x16, MemOperand(x17)), "strh w16, [x17]");
+ COMPARE(strh(w18, MemOperand(x19, 8190)), "strh w18, [x19, #8190]");
+ COMPARE(strh(w20, MemOperand(x21, 255, PreIndex)), "strh w20, [x21, #255]!");
+ COMPARE(strh(w22, MemOperand(x23, -256, PreIndex)),
+ "strh w22, [x23, #-256]!");
+ COMPARE(strh(w24, MemOperand(x25, 255, PostIndex)), "strh w24, [x25], #255");
+ COMPARE(strh(w26, MemOperand(cp, -256, PostIndex)),
+ "strh w26, [cp], #-256");
+ // TODO(all): Fix this for jssp.
+ COMPARE(ldrh(w28, MemOperand(jssp, 3, PostIndex)), "ldrh w28, [jssp], #3");
+ COMPARE(strh(fp, MemOperand(jssp, -42, PreIndex)), "strh w29, [jssp, #-42]!");
+ COMPARE(ldrh(w30, MemOperand(x0, 255)), "ldurh w30, [x0, #255]");
+ COMPARE(ldrh(x1, MemOperand(x2, -256)), "ldurh w1, [x2, #-256]");
+ COMPARE(strh(w3, MemOperand(x4, 255)), "sturh w3, [x4, #255]");
+ COMPARE(strh(x5, MemOperand(x6, -256)), "sturh w5, [x6, #-256]");
+ COMPARE(ldrsh(w0, MemOperand(x1)), "ldrsh w0, [x1]");
+ COMPARE(ldrsh(w2, MemOperand(x3, 8)), "ldrsh w2, [x3, #8]");
+ COMPARE(ldrsh(w4, MemOperand(x5, 42, PreIndex)), "ldrsh w4, [x5, #42]!");
+ COMPARE(ldrsh(x6, MemOperand(x7, -11, PostIndex)), "ldrsh x6, [x7], #-11");
+
+ CLEANUP();
+}
+
+
+TEST_(load_store_fp) {
+ SET_UP();
+
+ COMPARE(ldr(s0, MemOperand(x1)), "ldr s0, [x1]");
+ COMPARE(ldr(s2, MemOperand(x3, 4)), "ldr s2, [x3, #4]");
+ COMPARE(ldr(s4, MemOperand(x5, 16380)), "ldr s4, [x5, #16380]");
+ COMPARE(ldr(d6, MemOperand(x7)), "ldr d6, [x7]");
+ COMPARE(ldr(d8, MemOperand(x9, 8)), "ldr d8, [x9, #8]");
+ COMPARE(ldr(d10, MemOperand(x11, 32760)), "ldr d10, [x11, #32760]");
+ COMPARE(str(s12, MemOperand(x13)), "str s12, [x13]");
+ COMPARE(str(s14, MemOperand(x15, 4)), "str s14, [x15, #4]");
+ COMPARE(str(s16, MemOperand(x17, 16380)), "str s16, [x17, #16380]");
+ COMPARE(str(d18, MemOperand(x19)), "str d18, [x19]");
+ COMPARE(str(d20, MemOperand(x21, 8)), "str d20, [x21, #8]");
+ COMPARE(str(d22, MemOperand(x23, 32760)), "str d22, [x23, #32760]");
+
+ COMPARE(ldr(s0, MemOperand(x1, 4, PreIndex)), "ldr s0, [x1, #4]!");
+ COMPARE(ldr(s2, MemOperand(x3, 255, PreIndex)), "ldr s2, [x3, #255]!");
+ COMPARE(ldr(s4, MemOperand(x5, -256, PreIndex)), "ldr s4, [x5, #-256]!");
+ COMPARE(ldr(d6, MemOperand(x7, 8, PreIndex)), "ldr d6, [x7, #8]!");
+ COMPARE(ldr(d8, MemOperand(x9, 255, PreIndex)), "ldr d8, [x9, #255]!");
+ COMPARE(ldr(d10, MemOperand(x11, -256, PreIndex)), "ldr d10, [x11, #-256]!");
+ COMPARE(str(s12, MemOperand(x13, 4, PreIndex)), "str s12, [x13, #4]!");
+ COMPARE(str(s14, MemOperand(x15, 255, PreIndex)), "str s14, [x15, #255]!");
+ COMPARE(str(s16, MemOperand(x17, -256, PreIndex)), "str s16, [x17, #-256]!");
+ COMPARE(str(d18, MemOperand(x19, 8, PreIndex)), "str d18, [x19, #8]!");
+ COMPARE(str(d20, MemOperand(x21, 255, PreIndex)), "str d20, [x21, #255]!");
+ COMPARE(str(d22, MemOperand(x23, -256, PreIndex)), "str d22, [x23, #-256]!");
+
+ COMPARE(ldr(s0, MemOperand(x1, 4, PostIndex)), "ldr s0, [x1], #4");
+ COMPARE(ldr(s2, MemOperand(x3, 255, PostIndex)), "ldr s2, [x3], #255");
+ COMPARE(ldr(s4, MemOperand(x5, -256, PostIndex)), "ldr s4, [x5], #-256");
+ COMPARE(ldr(d6, MemOperand(x7, 8, PostIndex)), "ldr d6, [x7], #8");
+ COMPARE(ldr(d8, MemOperand(x9, 255, PostIndex)), "ldr d8, [x9], #255");
+ COMPARE(ldr(d10, MemOperand(x11, -256, PostIndex)), "ldr d10, [x11], #-256");
+ COMPARE(str(s12, MemOperand(x13, 4, PostIndex)), "str s12, [x13], #4");
+ COMPARE(str(s14, MemOperand(x15, 255, PostIndex)), "str s14, [x15], #255");
+ COMPARE(str(s16, MemOperand(x17, -256, PostIndex)), "str s16, [x17], #-256");
+ COMPARE(str(d18, MemOperand(x19, 8, PostIndex)), "str d18, [x19], #8");
+ COMPARE(str(d20, MemOperand(x21, 255, PostIndex)), "str d20, [x21], #255");
+ COMPARE(str(d22, MemOperand(x23, -256, PostIndex)), "str d22, [x23], #-256");
+
+ // TODO(all): Fix this for jssp.
+ COMPARE(ldr(s24, MemOperand(jssp)), "ldr s24, [jssp]");
+ COMPARE(ldr(d25, MemOperand(jssp, 8)), "ldr d25, [jssp, #8]");
+ COMPARE(str(s26, MemOperand(jssp, 4, PreIndex)), "str s26, [jssp, #4]!");
+ COMPARE(str(d27, MemOperand(jssp, -8, PostIndex)), "str d27, [jssp], #-8");
+
+ CLEANUP();
+}
+
+
+TEST_(load_store_unscaled) {
+ SET_UP();
+
+ COMPARE(ldr(w0, MemOperand(x1, 1)), "ldur w0, [x1, #1]");
+ COMPARE(ldr(w2, MemOperand(x3, -1)), "ldur w2, [x3, #-1]");
+ COMPARE(ldr(w4, MemOperand(x5, 255)), "ldur w4, [x5, #255]");
+ COMPARE(ldr(w6, MemOperand(x7, -256)), "ldur w6, [x7, #-256]");
+ COMPARE(ldr(x8, MemOperand(x9, 1)), "ldur x8, [x9, #1]");
+ COMPARE(ldr(x10, MemOperand(x11, -1)), "ldur x10, [x11, #-1]");
+ COMPARE(ldr(x12, MemOperand(x13, 255)), "ldur x12, [x13, #255]");
+ COMPARE(ldr(x14, MemOperand(x15, -256)), "ldur x14, [x15, #-256]");
+ COMPARE(str(w16, MemOperand(x17, 1)), "stur w16, [x17, #1]");
+ COMPARE(str(w18, MemOperand(x19, -1)), "stur w18, [x19, #-1]");
+ COMPARE(str(w20, MemOperand(x21, 255)), "stur w20, [x21, #255]");
+ COMPARE(str(w22, MemOperand(x23, -256)), "stur w22, [x23, #-256]");
+ COMPARE(str(x24, MemOperand(x25, 1)), "stur x24, [x25, #1]");
+ COMPARE(str(x26, MemOperand(cp, -1)), "stur x26, [cp, #-1]");
+ COMPARE(str(jssp, MemOperand(fp, 255)), "stur jssp, [fp, #255]");
+ COMPARE(str(lr, MemOperand(x0, -256)), "stur lr, [x0, #-256]");
+ COMPARE(ldr(w0, MemOperand(csp, 1)), "ldur w0, [csp, #1]");
+ COMPARE(str(x1, MemOperand(csp, -1)), "stur x1, [csp, #-1]");
+ COMPARE(ldrb(w2, MemOperand(x3, -2)), "ldurb w2, [x3, #-2]");
+ COMPARE(ldrsb(w4, MemOperand(x5, -3)), "ldursb w4, [x5, #-3]");
+ COMPARE(ldrsb(x6, MemOperand(x7, -4)), "ldursb x6, [x7, #-4]");
+ COMPARE(ldrh(w8, MemOperand(x9, -5)), "ldurh w8, [x9, #-5]");
+ COMPARE(ldrsh(w10, MemOperand(x11, -6)), "ldursh w10, [x11, #-6]");
+ COMPARE(ldrsh(x12, MemOperand(x13, -7)), "ldursh x12, [x13, #-7]");
+ COMPARE(ldrsw(x14, MemOperand(x15, -8)), "ldursw x14, [x15, #-8]");
+
+ CLEANUP();
+}
+
+
+TEST_(load_store_pair) {
+ SET_UP();
+
+ COMPARE(ldp(w0, w1, MemOperand(x2)), "ldp w0, w1, [x2]");
+ COMPARE(ldp(x3, x4, MemOperand(x5)), "ldp x3, x4, [x5]");
+ COMPARE(ldp(w6, w7, MemOperand(x8, 4)), "ldp w6, w7, [x8, #4]");
+ COMPARE(ldp(x9, x10, MemOperand(x11, 8)), "ldp x9, x10, [x11, #8]");
+ COMPARE(ldp(w12, w13, MemOperand(x14, 252)), "ldp w12, w13, [x14, #252]");
+ COMPARE(ldp(x15, x16, MemOperand(x17, 504)), "ldp x15, x16, [x17, #504]");
+ COMPARE(ldp(w18, w19, MemOperand(x20, -256)), "ldp w18, w19, [x20, #-256]");
+ COMPARE(ldp(x21, x22, MemOperand(x23, -512)), "ldp x21, x22, [x23, #-512]");
+ COMPARE(ldp(w24, w25, MemOperand(x26, 252, PreIndex)),
+ "ldp w24, w25, [x26, #252]!");
+ COMPARE(ldp(cp, jssp, MemOperand(fp, 504, PreIndex)),
+ "ldp cp, jssp, [fp, #504]!");
+ COMPARE(ldp(w30, w0, MemOperand(x1, -256, PreIndex)),
+ "ldp w30, w0, [x1, #-256]!");
+ COMPARE(ldp(x2, x3, MemOperand(x4, -512, PreIndex)),
+ "ldp x2, x3, [x4, #-512]!");
+ COMPARE(ldp(w5, w6, MemOperand(x7, 252, PostIndex)),
+ "ldp w5, w6, [x7], #252");
+ COMPARE(ldp(x8, x9, MemOperand(x10, 504, PostIndex)),
+ "ldp x8, x9, [x10], #504");
+ COMPARE(ldp(w11, w12, MemOperand(x13, -256, PostIndex)),
+ "ldp w11, w12, [x13], #-256");
+ COMPARE(ldp(x14, x15, MemOperand(x16, -512, PostIndex)),
+ "ldp x14, x15, [x16], #-512");
+
+ COMPARE(ldp(s17, s18, MemOperand(x19)), "ldp s17, s18, [x19]");
+ COMPARE(ldp(s20, s21, MemOperand(x22, 252)), "ldp s20, s21, [x22, #252]");
+ COMPARE(ldp(s23, s24, MemOperand(x25, -256)), "ldp s23, s24, [x25, #-256]");
+ COMPARE(ldp(s26, s27, MemOperand(jssp, 252, PreIndex)),
+ "ldp s26, s27, [jssp, #252]!");
+ COMPARE(ldp(s29, s30, MemOperand(fp, -256, PreIndex)),
+ "ldp s29, s30, [fp, #-256]!");
+ COMPARE(ldp(s31, s0, MemOperand(x1, 252, PostIndex)),
+ "ldp s31, s0, [x1], #252");
+ COMPARE(ldp(s2, s3, MemOperand(x4, -256, PostIndex)),
+ "ldp s2, s3, [x4], #-256");
+ COMPARE(ldp(d17, d18, MemOperand(x19)), "ldp d17, d18, [x19]");
+ COMPARE(ldp(d20, d21, MemOperand(x22, 504)), "ldp d20, d21, [x22, #504]");
+ COMPARE(ldp(d23, d24, MemOperand(x25, -512)), "ldp d23, d24, [x25, #-512]");
+ COMPARE(ldp(d26, d27, MemOperand(jssp, 504, PreIndex)),
+ "ldp d26, d27, [jssp, #504]!");
+ COMPARE(ldp(d29, d30, MemOperand(fp, -512, PreIndex)),
+ "ldp d29, d30, [fp, #-512]!");
+ COMPARE(ldp(d31, d0, MemOperand(x1, 504, PostIndex)),
+ "ldp d31, d0, [x1], #504");
+ COMPARE(ldp(d2, d3, MemOperand(x4, -512, PostIndex)),
+ "ldp d2, d3, [x4], #-512");
+
+ COMPARE(stp(w0, w1, MemOperand(x2)), "stp w0, w1, [x2]");
+ COMPARE(stp(x3, x4, MemOperand(x5)), "stp x3, x4, [x5]");
+ COMPARE(stp(w6, w7, MemOperand(x8, 4)), "stp w6, w7, [x8, #4]");
+ COMPARE(stp(x9, x10, MemOperand(x11, 8)), "stp x9, x10, [x11, #8]");
+ COMPARE(stp(w12, w13, MemOperand(x14, 252)), "stp w12, w13, [x14, #252]");
+ COMPARE(stp(x15, x16, MemOperand(x17, 504)), "stp x15, x16, [x17, #504]");
+ COMPARE(stp(w18, w19, MemOperand(x20, -256)), "stp w18, w19, [x20, #-256]");
+ COMPARE(stp(x21, x22, MemOperand(x23, -512)), "stp x21, x22, [x23, #-512]");
+ COMPARE(stp(w24, w25, MemOperand(x26, 252, PreIndex)),
+ "stp w24, w25, [x26, #252]!");
+ COMPARE(stp(cp, jssp, MemOperand(fp, 504, PreIndex)),
+ "stp cp, jssp, [fp, #504]!");
+ COMPARE(stp(w30, w0, MemOperand(x1, -256, PreIndex)),
+ "stp w30, w0, [x1, #-256]!");
+ COMPARE(stp(x2, x3, MemOperand(x4, -512, PreIndex)),
+ "stp x2, x3, [x4, #-512]!");
+ COMPARE(stp(w5, w6, MemOperand(x7, 252, PostIndex)),
+ "stp w5, w6, [x7], #252");
+ COMPARE(stp(x8, x9, MemOperand(x10, 504, PostIndex)),
+ "stp x8, x9, [x10], #504");
+ COMPARE(stp(w11, w12, MemOperand(x13, -256, PostIndex)),
+ "stp w11, w12, [x13], #-256");
+ COMPARE(stp(x14, x15, MemOperand(x16, -512, PostIndex)),
+ "stp x14, x15, [x16], #-512");
+
+ COMPARE(stp(s17, s18, MemOperand(x19)), "stp s17, s18, [x19]");
+ COMPARE(stp(s20, s21, MemOperand(x22, 252)), "stp s20, s21, [x22, #252]");
+ COMPARE(stp(s23, s24, MemOperand(x25, -256)), "stp s23, s24, [x25, #-256]");
+ COMPARE(stp(s26, s27, MemOperand(jssp, 252, PreIndex)),
+ "stp s26, s27, [jssp, #252]!");
+ COMPARE(stp(s29, s30, MemOperand(fp, -256, PreIndex)),
+ "stp s29, s30, [fp, #-256]!");
+ COMPARE(stp(s31, s0, MemOperand(x1, 252, PostIndex)),
+ "stp s31, s0, [x1], #252");
+ COMPARE(stp(s2, s3, MemOperand(x4, -256, PostIndex)),
+ "stp s2, s3, [x4], #-256");
+ COMPARE(stp(d17, d18, MemOperand(x19)), "stp d17, d18, [x19]");
+ COMPARE(stp(d20, d21, MemOperand(x22, 504)), "stp d20, d21, [x22, #504]");
+ COMPARE(stp(d23, d24, MemOperand(x25, -512)), "stp d23, d24, [x25, #-512]");
+ COMPARE(stp(d26, d27, MemOperand(jssp, 504, PreIndex)),
+ "stp d26, d27, [jssp, #504]!");
+ COMPARE(stp(d29, d30, MemOperand(fp, -512, PreIndex)),
+ "stp d29, d30, [fp, #-512]!");
+ COMPARE(stp(d31, d0, MemOperand(x1, 504, PostIndex)),
+ "stp d31, d0, [x1], #504");
+ COMPARE(stp(d2, d3, MemOperand(x4, -512, PostIndex)),
+ "stp d2, d3, [x4], #-512");
+
+ // TODO(all): Update / Restore this test.
+ COMPARE(ldp(w16, w17, MemOperand(jssp, 4, PostIndex)),
+ "ldp w16, w17, [jssp], #4");
+ COMPARE(stp(x18, x19, MemOperand(jssp, -8, PreIndex)),
+ "stp x18, x19, [jssp, #-8]!");
+ COMPARE(ldp(s30, s31, MemOperand(jssp, 12, PostIndex)),
+ "ldp s30, s31, [jssp], #12");
+ COMPARE(stp(d30, d31, MemOperand(jssp, -16)),
+ "stp d30, d31, [jssp, #-16]");
+
+ COMPARE(ldpsw(x0, x1, MemOperand(x2)), "ldpsw x0, x1, [x2]");
+ COMPARE(ldpsw(x3, x4, MemOperand(x5, 16)), "ldpsw x3, x4, [x5, #16]");
+ COMPARE(ldpsw(x6, x7, MemOperand(x8, -32, PreIndex)),
+ "ldpsw x6, x7, [x8, #-32]!");
+ COMPARE(ldpsw(x9, x10, MemOperand(x11, 128, PostIndex)),
+ "ldpsw x9, x10, [x11], #128");
+
+ CLEANUP();
+}
+
+
+TEST_(load_store_pair_nontemp) {
+ SET_UP();
+
+ COMPARE(ldnp(w0, w1, MemOperand(x2)), "ldnp w0, w1, [x2]");
+ COMPARE(stnp(w3, w4, MemOperand(x5, 252)), "stnp w3, w4, [x5, #252]");
+ COMPARE(ldnp(w6, w7, MemOperand(x8, -256)), "ldnp w6, w7, [x8, #-256]");
+ COMPARE(stnp(x9, x10, MemOperand(x11)), "stnp x9, x10, [x11]");
+ COMPARE(ldnp(x12, x13, MemOperand(x14, 504)), "ldnp x12, x13, [x14, #504]");
+ COMPARE(stnp(x15, x16, MemOperand(x17, -512)), "stnp x15, x16, [x17, #-512]");
+ COMPARE(ldnp(s18, s19, MemOperand(x20)), "ldnp s18, s19, [x20]");
+ COMPARE(stnp(s21, s22, MemOperand(x23, 252)), "stnp s21, s22, [x23, #252]");
+ COMPARE(ldnp(s24, s25, MemOperand(x26, -256)), "ldnp s24, s25, [x26, #-256]");
+ COMPARE(stnp(d27, d28, MemOperand(fp)), "stnp d27, d28, [fp]");
+ COMPARE(ldnp(d30, d31, MemOperand(x0, 504)), "ldnp d30, d31, [x0, #504]");
+ COMPARE(stnp(d1, d2, MemOperand(x3, -512)), "stnp d1, d2, [x3, #-512]");
+
+ CLEANUP();
+}
+
+#if 0 // TODO(all): enable.
+TEST_(load_literal) {
+ SET_UP();
+
+ COMPARE_PREFIX(ldr(x10, 0x1234567890abcdefUL), "ldr x10, pc+8");
+ COMPARE_PREFIX(ldr(w20, 0xfedcba09), "ldr w20, pc+8");
+ COMPARE_PREFIX(ldr(d11, 1.234), "ldr d11, pc+8");
+ COMPARE_PREFIX(ldr(s22, 2.5), "ldr s22, pc+8");
+
+ CLEANUP();
+}
+#endif
+
+TEST_(cond_select) {
+ SET_UP();
+
+ COMPARE(csel(w0, w1, w2, eq), "csel w0, w1, w2, eq");
+ COMPARE(csel(x3, x4, x5, ne), "csel x3, x4, x5, ne");
+ COMPARE(csinc(w6, w7, w8, hs), "csinc w6, w7, w8, hs");
+ COMPARE(csinc(x9, x10, x11, lo), "csinc x9, x10, x11, lo");
+ COMPARE(csinv(w12, w13, w14, mi), "csinv w12, w13, w14, mi");
+ COMPARE(csinv(x15, x16, x17, pl), "csinv x15, x16, x17, pl");
+ COMPARE(csneg(w18, w19, w20, vs), "csneg w18, w19, w20, vs");
+ COMPARE(csneg(x21, x22, x23, vc), "csneg x21, x22, x23, vc");
+ COMPARE(cset(w24, hi), "cset w24, hi");
+ COMPARE(cset(x25, ls), "cset x25, ls");
+ COMPARE(csetm(w26, ge), "csetm w26, ge");
+ COMPARE(csetm(cp, lt), "csetm cp, lt");
+ COMPARE(cinc(w28, w29, gt), "cinc w28, w29, gt");
+ COMPARE(cinc(lr, x0, le), "cinc lr, x0, le");
+ COMPARE(cinv(w1, w2, eq), "cinv w1, w2, eq");
+ COMPARE(cinv(x3, x4, ne), "cinv x3, x4, ne");
+ COMPARE(cneg(w5, w6, hs), "cneg w5, w6, hs");
+ COMPARE(cneg(x7, x8, lo), "cneg x7, x8, lo");
+
+ COMPARE(csel(x0, x1, x2, al), "csel x0, x1, x2, al");
+ COMPARE(csel(x1, x2, x3, nv), "csel x1, x2, x3, nv");
+ COMPARE(csinc(x2, x3, x4, al), "csinc x2, x3, x4, al");
+ COMPARE(csinc(x3, x4, x5, nv), "csinc x3, x4, x5, nv");
+ COMPARE(csinv(x4, x5, x6, al), "csinv x4, x5, x6, al");
+ COMPARE(csinv(x5, x6, x7, nv), "csinv x5, x6, x7, nv");
+ COMPARE(csneg(x6, x7, x8, al), "csneg x6, x7, x8, al");
+ COMPARE(csneg(x7, x8, x9, nv), "csneg x7, x8, x9, nv");
+
+ CLEANUP();
+}
+
+
+TEST(cond_select_macro) {
+ SET_UP_CLASS(MacroAssembler);
+
+ COMPARE(Csel(w0, w1, -1, eq), "csinv w0, w1, wzr, eq");
+ COMPARE(Csel(w2, w3, 0, ne), "csel w2, w3, wzr, ne");
+ COMPARE(Csel(w4, w5, 1, hs), "csinc w4, w5, wzr, hs");
+ COMPARE(Csel(x6, x7, -1, lo), "csinv x6, x7, xzr, lo");
+ COMPARE(Csel(x8, x9, 0, mi), "csel x8, x9, xzr, mi");
+ COMPARE(Csel(x10, x11, 1, pl), "csinc x10, x11, xzr, pl");
+
+ CLEANUP();
+}
+
+
+TEST_(cond_cmp) {
+ SET_UP();
+
+ COMPARE(ccmn(w0, w1, NZCVFlag, eq), "ccmn w0, w1, #NZCV, eq");
+ COMPARE(ccmn(x2, x3, NZCFlag, ne), "ccmn x2, x3, #NZCv, ne");
+ COMPARE(ccmp(w4, w5, NZVFlag, hs), "ccmp w4, w5, #NZcV, hs");
+ COMPARE(ccmp(x6, x7, NZFlag, lo), "ccmp x6, x7, #NZcv, lo");
+ COMPARE(ccmn(w8, 31, NFlag, mi), "ccmn w8, #31, #Nzcv, mi");
+ COMPARE(ccmn(x9, 30, NCFlag, pl), "ccmn x9, #30, #NzCv, pl");
+ COMPARE(ccmp(w10, 29, NVFlag, vs), "ccmp w10, #29, #NzcV, vs");
+ COMPARE(ccmp(x11, 28, NFlag, vc), "ccmp x11, #28, #Nzcv, vc");
+ COMPARE(ccmn(w12, w13, NoFlag, al), "ccmn w12, w13, #nzcv, al");
+ COMPARE(ccmp(x14, 27, ZVFlag, nv), "ccmp x14, #27, #nZcV, nv");
+
+ CLEANUP();
+}
+
+
+TEST_(cond_cmp_macro) {
+ SET_UP_CLASS(MacroAssembler);
+
+ COMPARE(Ccmp(w0, -1, VFlag, hi), "ccmn w0, #1, #nzcV, hi");
+ COMPARE(Ccmp(x1, -31, CFlag, ge), "ccmn x1, #31, #nzCv, ge");
+ COMPARE(Ccmn(w2, -1, CVFlag, gt), "ccmp w2, #1, #nzCV, gt");
+ COMPARE(Ccmn(x3, -31, ZCVFlag, ls), "ccmp x3, #31, #nZCV, ls");
+
+ CLEANUP();
+}
+
+
+TEST_(fmov_imm) {
+ SET_UP();
+
+ COMPARE(fmov(s0, 1.0), "fmov s0, #0x70 (1.0000)");
+ COMPARE(fmov(s31, -13.0), "fmov s31, #0xaa (-13.0000)");
+ COMPARE(fmov(d1, 1.0), "fmov d1, #0x70 (1.0000)");
+ COMPARE(fmov(d29, -13.0), "fmov d29, #0xaa (-13.0000)");
+
+ CLEANUP();
+}
+
+
+TEST_(fmov_reg) {
+ SET_UP();
+
+ COMPARE(fmov(w3, s13), "fmov w3, s13");
+ COMPARE(fmov(x6, d26), "fmov x6, d26");
+ COMPARE(fmov(s11, w30), "fmov s11, w30");
+ COMPARE(fmov(d31, x2), "fmov d31, x2");
+ COMPARE(fmov(s12, s13), "fmov s12, s13");
+ COMPARE(fmov(d22, d23), "fmov d22, d23");
+
+ CLEANUP();
+}
+
+
+TEST_(fp_dp1) {
+ SET_UP();
+
+ COMPARE(fabs(s0, s1), "fabs s0, s1");
+ COMPARE(fabs(s31, s30), "fabs s31, s30");
+ COMPARE(fabs(d2, d3), "fabs d2, d3");
+ COMPARE(fabs(d31, d30), "fabs d31, d30");
+ COMPARE(fneg(s4, s5), "fneg s4, s5");
+ COMPARE(fneg(s31, s30), "fneg s31, s30");
+ COMPARE(fneg(d6, d7), "fneg d6, d7");
+ COMPARE(fneg(d31, d30), "fneg d31, d30");
+ COMPARE(fsqrt(s8, s9), "fsqrt s8, s9");
+ COMPARE(fsqrt(s31, s30), "fsqrt s31, s30");
+ COMPARE(fsqrt(d10, d11), "fsqrt d10, d11");
+ COMPARE(fsqrt(d31, d30), "fsqrt d31, d30");
+ COMPARE(frinta(s10, s11), "frinta s10, s11");
+ COMPARE(frinta(s31, s30), "frinta s31, s30");
+ COMPARE(frinta(d12, d13), "frinta d12, d13");
+ COMPARE(frinta(d31, d30), "frinta d31, d30");
+ COMPARE(frintn(s10, s11), "frintn s10, s11");
+ COMPARE(frintn(s31, s30), "frintn s31, s30");
+ COMPARE(frintn(d12, d13), "frintn d12, d13");
+ COMPARE(frintn(d31, d30), "frintn d31, d30");
+ COMPARE(frintz(s10, s11), "frintz s10, s11");
+ COMPARE(frintz(s31, s30), "frintz s31, s30");
+ COMPARE(frintz(d12, d13), "frintz d12, d13");
+ COMPARE(frintz(d31, d30), "frintz d31, d30");
+ COMPARE(fcvt(d14, s15), "fcvt d14, s15");
+ COMPARE(fcvt(d31, s31), "fcvt d31, s31");
+
+ CLEANUP();
+}
+
+
+TEST_(fp_dp2) {
+ SET_UP();
+
+ COMPARE(fadd(s0, s1, s2), "fadd s0, s1, s2");
+ COMPARE(fadd(d3, d4, d5), "fadd d3, d4, d5");
+ COMPARE(fsub(s31, s30, s29), "fsub s31, s30, s29");
+ COMPARE(fsub(d31, d30, d29), "fsub d31, d30, d29");
+ COMPARE(fmul(s7, s8, s9), "fmul s7, s8, s9");
+ COMPARE(fmul(d10, d11, d12), "fmul d10, d11, d12");
+ COMPARE(fdiv(s13, s14, s15), "fdiv s13, s14, s15");
+ COMPARE(fdiv(d16, d17, d18), "fdiv d16, d17, d18");
+ COMPARE(fmax(s19, s20, s21), "fmax s19, s20, s21");
+ COMPARE(fmax(d22, d23, d24), "fmax d22, d23, d24");
+ COMPARE(fmin(s25, s26, s27), "fmin s25, s26, s27");
+ COMPARE(fmin(d28, d29, d30), "fmin d28, d29, d30");
+ COMPARE(fmaxnm(s31, s0, s1), "fmaxnm s31, s0, s1");
+ COMPARE(fmaxnm(d2, d3, d4), "fmaxnm d2, d3, d4");
+ COMPARE(fminnm(s5, s6, s7), "fminnm s5, s6, s7");
+ COMPARE(fminnm(d8, d9, d10), "fminnm d8, d9, d10");
+
+ CLEANUP();
+}
+
+
+TEST(fp_dp3) {
+ SET_UP();
+
+ COMPARE(fmadd(s7, s8, s9, s10), "fmadd s7, s8, s9, s10");
+ COMPARE(fmadd(d10, d11, d12, d10), "fmadd d10, d11, d12, d10");
+ COMPARE(fmsub(s7, s8, s9, s10), "fmsub s7, s8, s9, s10");
+ COMPARE(fmsub(d10, d11, d12, d10), "fmsub d10, d11, d12, d10");
+
+ COMPARE(fnmadd(s7, s8, s9, s10), "fnmadd s7, s8, s9, s10");
+ COMPARE(fnmadd(d10, d11, d12, d10), "fnmadd d10, d11, d12, d10");
+ COMPARE(fnmsub(s7, s8, s9, s10), "fnmsub s7, s8, s9, s10");
+ COMPARE(fnmsub(d10, d11, d12, d10), "fnmsub d10, d11, d12, d10");
+
+ CLEANUP();
+}
+
+
+TEST_(fp_compare) {
+ SET_UP();
+
+ COMPARE(fcmp(s0, s1), "fcmp s0, s1");
+ COMPARE(fcmp(s31, s30), "fcmp s31, s30");
+ COMPARE(fcmp(d0, d1), "fcmp d0, d1");
+ COMPARE(fcmp(d31, d30), "fcmp d31, d30");
+ COMPARE(fcmp(s12, 0), "fcmp s12, #0.0");
+ COMPARE(fcmp(d12, 0), "fcmp d12, #0.0");
+
+ CLEANUP();
+}
+
+
+TEST_(fp_cond_compare) {
+ SET_UP();
+
+ COMPARE(fccmp(s0, s1, NoFlag, eq), "fccmp s0, s1, #nzcv, eq");
+ COMPARE(fccmp(s2, s3, ZVFlag, ne), "fccmp s2, s3, #nZcV, ne");
+ COMPARE(fccmp(s30, s16, NCFlag, pl), "fccmp s30, s16, #NzCv, pl");
+ COMPARE(fccmp(s31, s31, NZCVFlag, le), "fccmp s31, s31, #NZCV, le");
+ COMPARE(fccmp(d4, d5, VFlag, gt), "fccmp d4, d5, #nzcV, gt");
+ COMPARE(fccmp(d6, d7, NFlag, vs), "fccmp d6, d7, #Nzcv, vs");
+ COMPARE(fccmp(d30, d0, NZFlag, vc), "fccmp d30, d0, #NZcv, vc");
+ COMPARE(fccmp(d31, d31, ZFlag, hs), "fccmp d31, d31, #nZcv, hs");
+ COMPARE(fccmp(s14, s15, CVFlag, al), "fccmp s14, s15, #nzCV, al");
+ COMPARE(fccmp(d16, d17, CFlag, nv), "fccmp d16, d17, #nzCv, nv");
+
+ CLEANUP();
+}
+
+
+TEST_(fp_select) {
+ SET_UP();
+
+ COMPARE(fcsel(s0, s1, s2, eq), "fcsel s0, s1, s2, eq")
+ COMPARE(fcsel(s31, s31, s30, ne), "fcsel s31, s31, s30, ne");
+ COMPARE(fcsel(d0, d1, d2, mi), "fcsel d0, d1, d2, mi");
+ COMPARE(fcsel(d31, d30, d31, pl), "fcsel d31, d30, d31, pl");
+ COMPARE(fcsel(s14, s15, s16, al), "fcsel s14, s15, s16, al");
+ COMPARE(fcsel(d17, d18, d19, nv), "fcsel d17, d18, d19, nv");
+
+ CLEANUP();
+}
+
+
+TEST_(fcvt_scvtf_ucvtf) {
+ SET_UP();
+
+ COMPARE(fcvtas(w0, s1), "fcvtas w0, s1");
+ COMPARE(fcvtas(x2, s3), "fcvtas x2, s3");
+ COMPARE(fcvtas(w4, d5), "fcvtas w4, d5");
+ COMPARE(fcvtas(x6, d7), "fcvtas x6, d7");
+ COMPARE(fcvtau(w8, s9), "fcvtau w8, s9");
+ COMPARE(fcvtau(x10, s11), "fcvtau x10, s11");
+ COMPARE(fcvtau(w12, d13), "fcvtau w12, d13");
+ COMPARE(fcvtau(x14, d15), "fcvtau x14, d15");
+ COMPARE(fcvtns(w0, s1), "fcvtns w0, s1");
+ COMPARE(fcvtns(x2, s3), "fcvtns x2, s3");
+ COMPARE(fcvtns(w4, d5), "fcvtns w4, d5");
+ COMPARE(fcvtns(x6, d7), "fcvtns x6, d7");
+ COMPARE(fcvtnu(w8, s9), "fcvtnu w8, s9");
+ COMPARE(fcvtnu(x10, s11), "fcvtnu x10, s11");
+ COMPARE(fcvtnu(w12, d13), "fcvtnu w12, d13");
+ COMPARE(fcvtnu(x14, d15), "fcvtnu x14, d15");
+ COMPARE(fcvtzu(x16, d17), "fcvtzu x16, d17");
+ COMPARE(fcvtzu(w18, d19), "fcvtzu w18, d19");
+ COMPARE(fcvtzs(x20, d21), "fcvtzs x20, d21");
+ COMPARE(fcvtzs(w22, d23), "fcvtzs w22, d23");
+ COMPARE(fcvtzu(x16, s17), "fcvtzu x16, s17");
+ COMPARE(fcvtzu(w18, s19), "fcvtzu w18, s19");
+ COMPARE(fcvtzs(x20, s21), "fcvtzs x20, s21");
+ COMPARE(fcvtzs(w22, s23), "fcvtzs w22, s23");
+ COMPARE(scvtf(d24, w25), "scvtf d24, w25");
+ COMPARE(scvtf(s24, w25), "scvtf s24, w25");
+ COMPARE(scvtf(d26, x0), "scvtf d26, x0");
+ COMPARE(scvtf(s26, x0), "scvtf s26, x0");
+ COMPARE(ucvtf(d28, w29), "ucvtf d28, w29");
+ COMPARE(ucvtf(s28, w29), "ucvtf s28, w29");
+ COMPARE(ucvtf(d0, x1), "ucvtf d0, x1");
+ COMPARE(ucvtf(s0, x1), "ucvtf s0, x1");
+ COMPARE(ucvtf(d0, x1, 0), "ucvtf d0, x1");
+ COMPARE(ucvtf(s0, x1, 0), "ucvtf s0, x1");
+ COMPARE(scvtf(d1, x2, 1), "scvtf d1, x2, #1");
+ COMPARE(scvtf(s1, x2, 1), "scvtf s1, x2, #1");
+ COMPARE(scvtf(d3, x4, 15), "scvtf d3, x4, #15");
+ COMPARE(scvtf(s3, x4, 15), "scvtf s3, x4, #15");
+ COMPARE(scvtf(d5, x6, 32), "scvtf d5, x6, #32");
+ COMPARE(scvtf(s5, x6, 32), "scvtf s5, x6, #32");
+ COMPARE(ucvtf(d7, x8, 2), "ucvtf d7, x8, #2");
+ COMPARE(ucvtf(s7, x8, 2), "ucvtf s7, x8, #2");
+ COMPARE(ucvtf(d9, x10, 16), "ucvtf d9, x10, #16");
+ COMPARE(ucvtf(s9, x10, 16), "ucvtf s9, x10, #16");
+ COMPARE(ucvtf(d11, x12, 33), "ucvtf d11, x12, #33");
+ COMPARE(ucvtf(s11, x12, 33), "ucvtf s11, x12, #33");
+ COMPARE(fcvtms(w0, s1), "fcvtms w0, s1");
+ COMPARE(fcvtms(x2, s3), "fcvtms x2, s3");
+ COMPARE(fcvtms(w4, d5), "fcvtms w4, d5");
+ COMPARE(fcvtms(x6, d7), "fcvtms x6, d7");
+ COMPARE(fcvtmu(w8, s9), "fcvtmu w8, s9");
+ COMPARE(fcvtmu(x10, s11), "fcvtmu x10, s11");
+ COMPARE(fcvtmu(w12, d13), "fcvtmu w12, d13");
+ COMPARE(fcvtmu(x14, d15), "fcvtmu x14, d15");
+
+ CLEANUP();
+}
+
+
+TEST_(system_mrs) {
+ SET_UP();
+
+ COMPARE(mrs(x0, NZCV), "mrs x0, nzcv");
+ COMPARE(mrs(lr, NZCV), "mrs lr, nzcv");
+ COMPARE(mrs(x15, FPCR), "mrs x15, fpcr");
+
+ CLEANUP();
+}
+
+
+TEST_(system_msr) {
+ SET_UP();
+
+ COMPARE(msr(NZCV, x0), "msr nzcv, x0");
+ COMPARE(msr(NZCV, x30), "msr nzcv, lr");
+ COMPARE(msr(FPCR, x15), "msr fpcr, x15");
+
+ CLEANUP();
+}
+
+
+TEST_(system_nop) {
+ SET_UP();
+
+ COMPARE(nop(), "nop");
+
+ CLEANUP();
+}
+
+
+TEST_(debug) {
+ SET_UP();
+
+ ASSERT(kImmExceptionIsDebug == 0xdeb0);
+
+ // All debug codes should produce the same instruction, and the debug code
+ // can be any uint32_t.
+ COMPARE(debug("message", 0, NO_PARAM), "hlt #0xdeb0");
+ COMPARE(debug("message", 1, NO_PARAM), "hlt #0xdeb0");
+ COMPARE(debug("message", 0xffff, NO_PARAM), "hlt #0xdeb0");
+ COMPARE(debug("message", 0x10000, NO_PARAM), "hlt #0xdeb0");
+ COMPARE(debug("message", 0x7fffffff, NO_PARAM), "hlt #0xdeb0");
+ COMPARE(debug("message", 0x80000000u, NO_PARAM), "hlt #0xdeb0");
+ COMPARE(debug("message", 0xffffffffu, NO_PARAM), "hlt #0xdeb0");
+
+ CLEANUP();
+}
+
+
+TEST_(hlt) {
+ SET_UP();
+
+ COMPARE(hlt(0), "hlt #0x0");
+ COMPARE(hlt(1), "hlt #0x1");
+ COMPARE(hlt(65535), "hlt #0xffff");
+
+ CLEANUP();
+}
+
+
+TEST_(brk) {
+ SET_UP();
+
+ COMPARE(brk(0), "brk #0x0");
+ COMPARE(brk(1), "brk #0x1");
+ COMPARE(brk(65535), "brk #0xffff");
+
+ CLEANUP();
+}
+
+
+TEST_(add_sub_negative) {
+ SET_UP_CLASS(MacroAssembler);
+
+ COMPARE(Add(x10, x0, -42), "sub x10, x0, #0x2a (42)");
+ COMPARE(Add(x11, x1, -687), "sub x11, x1, #0x2af (687)");
+ COMPARE(Add(x12, x2, -0x88), "sub x12, x2, #0x88 (136)");
+
+ COMPARE(Sub(x13, x0, -600), "add x13, x0, #0x258 (600)");
+ COMPARE(Sub(x14, x1, -313), "add x14, x1, #0x139 (313)");
+ COMPARE(Sub(x15, x2, -0x555), "add x15, x2, #0x555 (1365)");
+
+ COMPARE(Add(w19, w3, -0x344), "sub w19, w3, #0x344 (836)");
+ COMPARE(Add(w20, w4, -2000), "sub w20, w4, #0x7d0 (2000)");
+
+ COMPARE(Sub(w21, w3, -0xbc), "add w21, w3, #0xbc (188)");
+ COMPARE(Sub(w22, w4, -2000), "add w22, w4, #0x7d0 (2000)");
+
+ COMPARE(Cmp(w0, -1), "cmn w0, #0x1 (1)");
+ COMPARE(Cmp(x1, -1), "cmn x1, #0x1 (1)");
+ COMPARE(Cmp(w2, -4095), "cmn w2, #0xfff (4095)");
+ COMPARE(Cmp(x3, -4095), "cmn x3, #0xfff (4095)");
+
+ COMPARE(Cmn(w0, -1), "cmp w0, #0x1 (1)");
+ COMPARE(Cmn(x1, -1), "cmp x1, #0x1 (1)");
+ COMPARE(Cmn(w2, -4095), "cmp w2, #0xfff (4095)");
+ COMPARE(Cmn(x3, -4095), "cmp x3, #0xfff (4095)");
+
+ CLEANUP();
+}
+
+
+TEST_(logical_immediate_move) {
+ SET_UP_CLASS(MacroAssembler);
+
+ COMPARE(And(w0, w1, 0), "movz w0, #0x0");
+ COMPARE(And(x0, x1, 0), "movz x0, #0x0");
+ COMPARE(Orr(w2, w3, 0), "mov w2, w3");
+ COMPARE(Orr(x2, x3, 0), "mov x2, x3");
+ COMPARE(Eor(w4, w5, 0), "mov w4, w5");
+ COMPARE(Eor(x4, x5, 0), "mov x4, x5");
+ COMPARE(Bic(w6, w7, 0), "mov w6, w7");
+ COMPARE(Bic(x6, x7, 0), "mov x6, x7");
+ COMPARE(Orn(w8, w9, 0), "movn w8, #0x0");
+ COMPARE(Orn(x8, x9, 0), "movn x8, #0x0");
+ COMPARE(Eon(w10, w11, 0), "mvn w10, w11");
+ COMPARE(Eon(x10, x11, 0), "mvn x10, x11");
+
+ COMPARE(And(w12, w13, 0xffffffff), "mov w12, w13");
+ COMPARE(And(x12, x13, 0xffffffff), "and x12, x13, #0xffffffff");
+ COMPARE(And(x12, x13, 0xffffffffffffffff), "mov x12, x13");
+ COMPARE(Orr(w14, w15, 0xffffffff), "movn w14, #0x0");
+ COMPARE(Orr(x14, x15, 0xffffffff), "orr x14, x15, #0xffffffff");
+ COMPARE(Orr(x14, x15, 0xffffffffffffffff), "movn x14, #0x0");
+ COMPARE(Eor(w16, w17, 0xffffffff), "mvn w16, w17");
+ COMPARE(Eor(x16, x17, 0xffffffff), "eor x16, x17, #0xffffffff");
+ COMPARE(Eor(x16, x17, 0xffffffffffffffff), "mvn x16, x17");
+ COMPARE(Bic(w18, w19, 0xffffffff), "movz w18, #0x0");
+ COMPARE(Bic(x18, x19, 0xffffffff), "and x18, x19, #0xffffffff00000000");
+ COMPARE(Bic(x18, x19, 0xffffffffffffffff), "movz x18, #0x0");
+ COMPARE(Orn(w20, w21, 0xffffffff), "mov w20, w21");
+ COMPARE(Orn(x20, x21, 0xffffffff), "orr x20, x21, #0xffffffff00000000");
+ COMPARE(Orn(x20, x21, 0xffffffffffffffff), "mov x20, x21");
+ COMPARE(Eon(w22, w23, 0xffffffff), "mov w22, w23");
+ COMPARE(Eon(x22, x23, 0xffffffff), "eor x22, x23, #0xffffffff00000000");
+ COMPARE(Eon(x22, x23, 0xffffffffffffffff), "mov x22, x23");
+
+ CLEANUP();
+}
+
+
+TEST_(barriers) {
+ SET_UP_CLASS(MacroAssembler);
+
+ // DMB
+ COMPARE(Dmb(FullSystem, BarrierAll), "dmb sy");
+ COMPARE(Dmb(FullSystem, BarrierReads), "dmb ld");
+ COMPARE(Dmb(FullSystem, BarrierWrites), "dmb st");
+
+ COMPARE(Dmb(InnerShareable, BarrierAll), "dmb ish");
+ COMPARE(Dmb(InnerShareable, BarrierReads), "dmb ishld");
+ COMPARE(Dmb(InnerShareable, BarrierWrites), "dmb ishst");
+
+ COMPARE(Dmb(NonShareable, BarrierAll), "dmb nsh");
+ COMPARE(Dmb(NonShareable, BarrierReads), "dmb nshld");
+ COMPARE(Dmb(NonShareable, BarrierWrites), "dmb nshst");
+
+ COMPARE(Dmb(OuterShareable, BarrierAll), "dmb osh");
+ COMPARE(Dmb(OuterShareable, BarrierReads), "dmb oshld");
+ COMPARE(Dmb(OuterShareable, BarrierWrites), "dmb oshst");
+
+ COMPARE(Dmb(FullSystem, BarrierOther), "dmb sy (0b1100)");
+ COMPARE(Dmb(InnerShareable, BarrierOther), "dmb sy (0b1000)");
+ COMPARE(Dmb(NonShareable, BarrierOther), "dmb sy (0b0100)");
+ COMPARE(Dmb(OuterShareable, BarrierOther), "dmb sy (0b0000)");
+
+ // DSB
+ COMPARE(Dsb(FullSystem, BarrierAll), "dsb sy");
+ COMPARE(Dsb(FullSystem, BarrierReads), "dsb ld");
+ COMPARE(Dsb(FullSystem, BarrierWrites), "dsb st");
+
+ COMPARE(Dsb(InnerShareable, BarrierAll), "dsb ish");
+ COMPARE(Dsb(InnerShareable, BarrierReads), "dsb ishld");
+ COMPARE(Dsb(InnerShareable, BarrierWrites), "dsb ishst");
+
+ COMPARE(Dsb(NonShareable, BarrierAll), "dsb nsh");
+ COMPARE(Dsb(NonShareable, BarrierReads), "dsb nshld");
+ COMPARE(Dsb(NonShareable, BarrierWrites), "dsb nshst");
+
+ COMPARE(Dsb(OuterShareable, BarrierAll), "dsb osh");
+ COMPARE(Dsb(OuterShareable, BarrierReads), "dsb oshld");
+ COMPARE(Dsb(OuterShareable, BarrierWrites), "dsb oshst");
+
+ COMPARE(Dsb(FullSystem, BarrierOther), "dsb sy (0b1100)");
+ COMPARE(Dsb(InnerShareable, BarrierOther), "dsb sy (0b1000)");
+ COMPARE(Dsb(NonShareable, BarrierOther), "dsb sy (0b0100)");
+ COMPARE(Dsb(OuterShareable, BarrierOther), "dsb sy (0b0000)");
+
+ // ISB
+ COMPARE(Isb(), "isb");
+
+ CLEANUP();
+}
--- /dev/null
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of ARM Limited nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <stdlib.h>
+#include "cctest.h"
+
+#include "a64/decoder-a64.h"
+#include "a64/disasm-a64.h"
+
+using namespace v8::internal;
+
+TEST(FUZZ_decoder) {
+ // Feed noise into the decoder to check that it doesn't crash.
+ // 43 million = ~1% of the instruction space.
+ static const int instruction_count = 43 * 1024 * 1024;
+
+ uint16_t seed[3] = {1, 2, 3};
+ seed48(seed);
+
+ Decoder decoder;
+ Instruction buffer[kInstructionSize];
+
+ for (int i = 0; i < instruction_count; i++) {
+ uint32_t instr = mrand48();
+ buffer->SetInstructionBits(instr);
+ decoder.Decode(buffer);
+ }
+}
+
+
+TEST(FUZZ_disasm) {
+ // Feed noise into the disassembler to check that it doesn't crash.
+ // 9 million = ~0.2% of the instruction space.
+ static const int instruction_count = 9 * 1024 * 1024;
+
+ uint16_t seed[3] = {42, 43, 44};
+ seed48(seed);
+
+ Decoder decoder;
+ Disassembler disasm;
+ Instruction buffer[kInstructionSize];
+
+ decoder.AppendVisitor(&disasm);
+ for (int i = 0; i < instruction_count; i++) {
+ uint32_t instr = mrand48();
+ buffer->SetInstructionBits(instr);
+ decoder.Decode(buffer);
+ }
+}
StringHelper::GenerateHashGetHash(masm, r0);
__ pop(kRootRegister);
__ mov(pc, Operand(lr));
+#elif V8_TARGET_ARCH_A64
+ // The A64 assembler usually uses jssp (x28) as a stack pointer, but only csp
+ // is initialized by the calling (C++) code.
+ Register old_stack_pointer = __ StackPointer();
+ __ SetStackPointer(csp);
+ __ Push(root, xzr);
+ __ InitializeRootRegister();
+ __ Mov(x0, 0);
+ __ Mov(x10, Operand(string.at(0)));
+ StringHelper::GenerateHashInit(masm, x0, x10);
+ for (int i = 1; i < string.length(); i++) {
+ __ Mov(x10, Operand(string.at(i)));
+ StringHelper::GenerateHashAddCharacter(masm, x0, x10);
+ }
+ StringHelper::GenerateHashGetHash(masm, x0, x10);
+ __ Pop(xzr, root);
+ __ Ret();
+ __ SetStackPointer(old_stack_pointer);
#elif V8_TARGET_ARCH_MIPS
__ push(kRootRegister);
__ InitializeRootRegister();
__ pop(kRootRegister);
__ jr(ra);
__ nop();
+#else
+#error Unsupported architecture.
#endif
}
__ GetNumberHash(r0, ip);
__ pop(kRootRegister);
__ mov(pc, Operand(lr));
+#elif V8_TARGET_ARCH_A64
+ // The A64 assembler usually uses jssp (x28) as a stack pointer, but only csp
+ // is initialized by the calling (C++) code.
+ Register old_stack_pointer = __ StackPointer();
+ __ SetStackPointer(csp);
+ __ Push(root, xzr);
+ __ InitializeRootRegister();
+ __ Mov(x0, key);
+ __ GetNumberHash(x0, x10);
+ __ Pop(xzr, root);
+ __ Ret();
+ __ SetStackPointer(old_stack_pointer);
#elif V8_TARGET_ARCH_MIPS
__ push(kRootRegister);
__ InitializeRootRegister();
__ pop(kRootRegister);
__ jr(ra);
__ nop();
+#else
+#error Unsupported architecture.
#endif
}
Handle<String> v8_string = factory->NewStringFromOneByte(string);
v8_string->set_hash_field(String::kEmptyHashField);
#ifdef USE_SIMULATOR
- uint32_t codegen_hash =
- reinterpret_cast<uint32_t>(CALL_GENERATED_CODE(hash, 0, 0, 0, 0, 0));
+ uint32_t codegen_hash = static_cast<uint32_t>(
+ reinterpret_cast<uintptr_t>(CALL_GENERATED_CODE(hash, 0, 0, 0, 0, 0)));
#else
uint32_t codegen_hash = hash();
#endif
HASH_FUNCTION hash = FUNCTION_CAST<HASH_FUNCTION>(code->entry());
#ifdef USE_SIMULATOR
- uint32_t codegen_hash =
- reinterpret_cast<uint32_t>(CALL_GENERATED_CODE(hash, 0, 0, 0, 0, 0));
+ uint32_t codegen_hash = static_cast<uint32_t>(
+ reinterpret_cast<uintptr_t>(CALL_GENERATED_CODE(hash, 0, 0, 0, 0, 0)));
#else
uint32_t codegen_hash = hash();
#endif
CHECK(value->IsNumber());
CHECK_EQ(Smi::kMaxValue, Smi::cast(value)->value());
-#ifndef V8_TARGET_ARCH_X64
+#if !defined(V8_TARGET_ARCH_X64) && !defined(V8_TARGET_ARCH_A64)
// TODO(lrn): We need a NumberFromIntptr function in order to test this.
value = heap->NumberFromInt32(Smi::kMinValue - 1)->ToObjectChecked();
CHECK(value->IsHeapNumber());
--- /dev/null
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <limits.h>
+
+#include "v8.h"
+
+#include "api.h"
+#include "isolate.h"
+#include "compilation-cache.h"
+#include "execution.h"
+#include "snapshot.h"
+#include "platform.h"
+#include "utils.h"
+#include "cctest.h"
+#include "parser.h"
+#include "unicode-inl.h"
+
+using ::v8::Context;
+using ::v8::Extension;
+using ::v8::Function;
+using ::v8::FunctionTemplate;
+using ::v8::Handle;
+using ::v8::HandleScope;
+using ::v8::Local;
+using ::v8::Message;
+using ::v8::MessageCallback;
+using ::v8::Object;
+using ::v8::ObjectTemplate;
+using ::v8::Persistent;
+using ::v8::Script;
+using ::v8::StackTrace;
+using ::v8::String;
+using ::v8::TryCatch;
+using ::v8::Undefined;
+using ::v8::V8;
+using ::v8::Value;
+
+static void ExpectBoolean(bool expected, Local<Value> result) {
+ CHECK(result->IsBoolean());
+ CHECK_EQ(expected, result->BooleanValue());
+}
+
+
+static void ExpectInt32(int32_t expected, Local<Value> result) {
+ CHECK(result->IsInt32());
+ CHECK_EQ(expected, result->Int32Value());
+}
+
+
+static void ExpectNumber(double expected, Local<Value> result) {
+ CHECK(result->IsNumber());
+ CHECK_EQ(expected, result->NumberValue());
+}
+
+
+static void ExpectUndefined(Local<Value> result) {
+ CHECK(result->IsUndefined());
+}
+
+
+// Tests are sorted by order of implementation.
+
+TEST(simple_value) {
+ LocalContext env;
+ v8::HandleScope scope(env->GetIsolate());
+ Local<Value> result = CompileRun("0x271828;");
+ ExpectInt32(0x271828, result);
+}
+
+
+TEST(global_variable) {
+ LocalContext env;
+ v8::HandleScope scope(env->GetIsolate());
+ Local<Value> result = CompileRun("var my_global_var = 0x123; my_global_var;");
+ ExpectInt32(0x123, result);
+}
+
+
+TEST(simple_function_call) {
+ LocalContext env;
+ v8::HandleScope scope(env->GetIsolate());
+ Local<Value> result = CompileRun(
+ "function foo() { return 0x314; }"
+ "foo();");
+ ExpectInt32(0x314, result);
+}
+
+
+TEST(binary_op) {
+ LocalContext env;
+ v8::HandleScope scope(env->GetIsolate());
+ Local<Value> result = CompileRun(
+ "function foo() {"
+ " var a = 0x1200;"
+ " var b = 0x0035;"
+ " return 2 * (a + b - 1);"
+ "}"
+ "foo();");
+ ExpectInt32(0x2468, result);
+}
+
+static void if_comparison_testcontext_helper(
+ char const * op,
+ char const * lhs,
+ char const * rhs,
+ int expect) {
+ char buffer[256];
+ snprintf(buffer, sizeof(buffer),
+ "var lhs = %s;"
+ "var rhs = %s;"
+ "if ( lhs %s rhs ) { 1; }"
+ "else { 0; }",
+ lhs, rhs, op);
+ Local<Value> result = CompileRun(buffer);
+ ExpectInt32(expect, result);
+}
+
+static void if_comparison_effectcontext_helper(
+ char const * op,
+ char const * lhs,
+ char const * rhs,
+ int expect) {
+ char buffer[256];
+ snprintf(buffer, sizeof(buffer),
+ "var lhs = %s;"
+ "var rhs = %s;"
+ "var test = lhs %s rhs;"
+ "if ( test ) { 1; }"
+ "else { 0; }",
+ lhs, rhs, op);
+ Local<Value> result = CompileRun(buffer);
+ ExpectInt32(expect, result);
+}
+
+static void if_comparison_helper(
+ char const * op,
+ int expect_when_lt,
+ int expect_when_eq,
+ int expect_when_gt) {
+ // TODO(all): Non-SMI tests.
+
+ if_comparison_testcontext_helper(op, "1", "3", expect_when_lt);
+ if_comparison_testcontext_helper(op, "5", "5", expect_when_eq);
+ if_comparison_testcontext_helper(op, "9", "7", expect_when_gt);
+
+ if_comparison_effectcontext_helper(op, "1", "3", expect_when_lt);
+ if_comparison_effectcontext_helper(op, "5", "5", expect_when_eq);
+ if_comparison_effectcontext_helper(op, "9", "7", expect_when_gt);
+}
+
+
+TEST(if_comparison) {
+ LocalContext env;
+ v8::HandleScope scope(env->GetIsolate());
+
+ if_comparison_helper("<", 1, 0, 0);
+ if_comparison_helper("<=", 1, 1, 0);
+ if_comparison_helper("==", 0, 1, 0);
+ if_comparison_helper("===", 0, 1, 0);
+ if_comparison_helper(">=", 0, 1, 1);
+ if_comparison_helper(">", 0, 0, 1);
+ if_comparison_helper("!=", 1, 0, 1);
+ if_comparison_helper("!==", 1, 0, 1);
+}
+
+
+TEST(unary_plus) {
+ LocalContext env;
+ v8::HandleScope scope(env->GetIsolate());
+ Local<Value> result;
+ // SMI
+ result = CompileRun("var a = 1234; +a");
+ ExpectInt32(1234, result);
+ // Number
+ result = CompileRun("var a = 1234.5; +a");
+ ExpectNumber(1234.5, result);
+ // String (SMI)
+ result = CompileRun("var a = '1234'; +a");
+ ExpectInt32(1234, result);
+ // String (Number)
+ result = CompileRun("var a = '1234.5'; +a");
+ ExpectNumber(1234.5, result);
+ // Check side effects.
+ result = CompileRun("var a = 1234; +(a = 4321); a");
+ ExpectInt32(4321, result);
+}
+
+
+TEST(unary_minus) {
+ LocalContext env;
+ v8::HandleScope scope(env->GetIsolate());
+ Local<Value> result;
+ result = CompileRun("var a = 1234; -a");
+ ExpectInt32(-1234, result);
+ result = CompileRun("var a = 1234.5; -a");
+ ExpectNumber(-1234.5, result);
+ result = CompileRun("var a = 1234; -(a = 4321); a");
+ ExpectInt32(4321, result);
+ result = CompileRun("var a = '1234'; -a");
+ ExpectInt32(-1234, result);
+ result = CompileRun("var a = '1234.5'; -a");
+ ExpectNumber(-1234.5, result);
+}
+
+
+TEST(unary_void) {
+ LocalContext env;
+ v8::HandleScope scope(env->GetIsolate());
+ Local<Value> result;
+ result = CompileRun("var a = 1234; void (a);");
+ ExpectUndefined(result);
+ result = CompileRun("var a = 0; void (a = 42); a");
+ ExpectInt32(42, result);
+ result = CompileRun("var a = 0; void (a = 42);");
+ ExpectUndefined(result);
+}
+
+
+TEST(unary_not) {
+ LocalContext env;
+ v8::HandleScope scope(env->GetIsolate());
+ Local<Value> result;
+ result = CompileRun("var a = 1234; !a");
+ ExpectBoolean(false, result);
+ result = CompileRun("var a = 0; !a");
+ ExpectBoolean(true, result);
+ result = CompileRun("var a = 0; !(a = 1234); a");
+ ExpectInt32(1234, result);
+ result = CompileRun("var a = '1234'; !a");
+ ExpectBoolean(false, result);
+ result = CompileRun("var a = ''; !a");
+ ExpectBoolean(true, result);
+ result = CompileRun("var a = 1234; !!a");
+ ExpectBoolean(true, result);
+ result = CompileRun("var a = 0; !!a");
+ ExpectBoolean(false, result);
+ result = CompileRun("var a = 0; if ( !a ) { 1; } else { 0; }");
+ ExpectInt32(1, result);
+ result = CompileRun("var a = 1; if ( !a ) { 1; } else { 0; }");
+ ExpectInt32(0, result);
+}
--- /dev/null
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Adapted from test/mjsunit/compiler/variables.js
+
+#include <limits.h>
+
+#include "v8.h"
+
+#include "api.h"
+#include "isolate.h"
+#include "compilation-cache.h"
+#include "execution.h"
+#include "snapshot.h"
+#include "platform.h"
+#include "utils.h"
+#include "cctest.h"
+#include "parser.h"
+#include "unicode-inl.h"
+
+using ::v8::Context;
+using ::v8::Extension;
+using ::v8::Function;
+using ::v8::FunctionTemplate;
+using ::v8::Handle;
+using ::v8::HandleScope;
+using ::v8::Local;
+using ::v8::Message;
+using ::v8::MessageCallback;
+using ::v8::Object;
+using ::v8::ObjectTemplate;
+using ::v8::Persistent;
+using ::v8::Script;
+using ::v8::StackTrace;
+using ::v8::String;
+using ::v8::TryCatch;
+using ::v8::Undefined;
+using ::v8::V8;
+using ::v8::Value;
+
+static void ExpectInt32(int32_t expected, Local<Value> result) {
+ CHECK(result->IsInt32());
+ CHECK_EQ(expected, result->Int32Value());
+}
+
+
+// Global variables.
+TEST(global_variables) {
+ LocalContext env;
+ v8::HandleScope scope(env->GetIsolate());
+ Local<Value> result = CompileRun(
+"var x = 0;"
+"function f0() { return x; }"
+"f0();");
+ ExpectInt32(0, result);
+}
+
+
+// Parameters.
+TEST(parameters) {
+ LocalContext env;
+ v8::HandleScope scope(env->GetIsolate());
+ Local<Value> result = CompileRun(
+"function f1(x) { return x; }"
+"f1(1);");
+ ExpectInt32(1, result);
+}
+
+
+// Stack-allocated locals.
+TEST(stack_allocated_locals) {
+ LocalContext env;
+ v8::HandleScope scope(env->GetIsolate());
+ Local<Value> result = CompileRun(
+"function f2() { var x = 2; return x; }"
+"f2();");
+ ExpectInt32(2, result);
+}
+
+
+// Context-allocated locals. Local function forces x into f3's context.
+TEST(context_allocated_locals) {
+ LocalContext env;
+ v8::HandleScope scope(env->GetIsolate());
+ Local<Value> result = CompileRun(
+"function f3(x) {"
+" function g() { return x; }"
+" return x;"
+"}"
+"f3(3);");
+ ExpectInt32(3, result);
+}
+
+
+// Local function reads x from an outer context.
+TEST(read_from_outer_context) {
+ LocalContext env;
+ v8::HandleScope scope(env->GetIsolate());
+ Local<Value> result = CompileRun(
+"function f4(x) {"
+" function g() { return x; }"
+" return g();"
+"}"
+"f4(4);");
+ ExpectInt32(4, result);
+}
+
+
+// Local function reads x from an outer context.
+TEST(lookup_slots) {
+ LocalContext env;
+ v8::HandleScope scope(env->GetIsolate());
+ Local<Value> result = CompileRun(
+"function f5(x) {"
+" with ({}) return x;"
+"}"
+"f5(5);");
+ ExpectInt32(5, result);
+}
do { \
ASM("str %%sp, %0" : "=g" (sp_addr)); \
} while (0)
+#elif defined(__AARCH64EL__)
+#define GET_STACK_POINTER() \
+ static int sp_addr = 0; \
+ do { \
+ ASM("mov x16, sp; str x16, %0" : "=g" (sp_addr)); \
+ } while (0)
#elif defined(__MIPSEL__)
#define GET_STACK_POINTER() \
static int sp_addr = 0; \
#include "arm/macro-assembler-arm.h"
#include "arm/regexp-macro-assembler-arm.h"
#endif
+#if V8_TARGET_ARCH_A64
+#include "a64/assembler-a64.h"
+#include "a64/macro-assembler-a64.h"
+#include "a64/regexp-macro-assembler-a64.h"
+#endif
#if V8_TARGET_ARCH_MIPS
#include "mips/assembler-mips.h"
#include "mips/macro-assembler-mips.h"
typedef RegExpMacroAssemblerX64 ArchRegExpMacroAssembler;
#elif V8_TARGET_ARCH_ARM
typedef RegExpMacroAssemblerARM ArchRegExpMacroAssembler;
+#elif V8_TARGET_ARCH_A64
+typedef RegExpMacroAssemblerA64 ArchRegExpMacroAssembler;
#elif V8_TARGET_ARCH_MIPS
typedef RegExpMacroAssemblerMIPS ArchRegExpMacroAssembler;
#endif
--- /dev/null
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "macro-assembler.h"
+#include "a64/utils-a64.h"
+#include "cctest.h"
+#include "test-utils-a64.h"
+
+using namespace v8::internal;
+
+
+#define __ masm->
+
+
+bool Equal32(uint32_t expected, const RegisterDump*, uint32_t result) {
+ if (result != expected) {
+ printf("Expected 0x%08" PRIx32 "\t Found 0x%08" PRIx32 "\n",
+ expected, result);
+ }
+
+ return expected == result;
+}
+
+
+bool Equal64(uint64_t expected, const RegisterDump*, uint64_t result) {
+ if (result != expected) {
+ printf("Expected 0x%016" PRIx64 "\t Found 0x%016" PRIx64 "\n",
+ expected, result);
+ }
+
+ return expected == result;
+}
+
+
+bool EqualFP32(float expected, const RegisterDump*, float result) {
+ if (float_to_rawbits(expected) == float_to_rawbits(result)) {
+ return true;
+ } else {
+ if (isnan(expected) || (expected == 0.0)) {
+ printf("Expected 0x%08" PRIx32 "\t Found 0x%08" PRIx32 "\n",
+ float_to_rawbits(expected), float_to_rawbits(result));
+ } else {
+ printf("Expected %.9f (0x%08" PRIx32 ")\t "
+ "Found %.9f (0x%08" PRIx32 ")\n",
+ expected, float_to_rawbits(expected),
+ result, float_to_rawbits(result));
+ }
+ return false;
+ }
+}
+
+
+bool EqualFP64(double expected, const RegisterDump*, double result) {
+ if (double_to_rawbits(expected) == double_to_rawbits(result)) {
+ return true;
+ }
+
+ if (isnan(expected) || (expected == 0.0)) {
+ printf("Expected 0x%016" PRIx64 "\t Found 0x%016" PRIx64 "\n",
+ double_to_rawbits(expected), double_to_rawbits(result));
+ } else {
+ printf("Expected %.17f (0x%016" PRIx64 ")\t "
+ "Found %.17f (0x%016" PRIx64 ")\n",
+ expected, double_to_rawbits(expected),
+ result, double_to_rawbits(result));
+ }
+ return false;
+}
+
+
+bool Equal32(uint32_t expected, const RegisterDump* core, const Register& reg) {
+ ASSERT(reg.Is32Bits());
+ // Retrieve the corresponding X register so we can check that the upper part
+ // was properly cleared.
+ int64_t result_x = core->xreg(reg.code());
+ if ((result_x & 0xffffffff00000000L) != 0) {
+ printf("Expected 0x%08" PRIx32 "\t Found 0x%016" PRIx64 "\n",
+ expected, result_x);
+ return false;
+ }
+ uint32_t result_w = core->wreg(reg.code());
+ return Equal32(expected, core, result_w);
+}
+
+
+bool Equal64(uint64_t expected,
+ const RegisterDump* core,
+ const Register& reg) {
+ ASSERT(reg.Is64Bits());
+ uint64_t result = core->xreg(reg.code());
+ return Equal64(expected, core, result);
+}
+
+
+bool EqualFP32(float expected,
+ const RegisterDump* core,
+ const FPRegister& fpreg) {
+ ASSERT(fpreg.Is32Bits());
+ // Retrieve the corresponding D register so we can check that the upper part
+ // was properly cleared.
+ uint64_t result_64 = core->dreg_bits(fpreg.code());
+ if ((result_64 & 0xffffffff00000000L) != 0) {
+ printf("Expected 0x%08" PRIx32 " (%f)\t Found 0x%016" PRIx64 "\n",
+ float_to_rawbits(expected), expected, result_64);
+ return false;
+ }
+
+ return EqualFP32(expected, core, core->sreg(fpreg.code()));
+}
+
+
+bool EqualFP64(double expected,
+ const RegisterDump* core,
+ const FPRegister& fpreg) {
+ ASSERT(fpreg.Is64Bits());
+ return EqualFP64(expected, core, core->dreg(fpreg.code()));
+}
+
+
+bool Equal64(const Register& reg0,
+ const RegisterDump* core,
+ const Register& reg1) {
+ ASSERT(reg0.Is64Bits() && reg1.Is64Bits());
+ int64_t expected = core->xreg(reg0.code());
+ int64_t result = core->xreg(reg1.code());
+ return Equal64(expected, core, result);
+}
+
+
+static char FlagN(uint32_t flags) {
+ return (flags & NFlag) ? 'N' : 'n';
+}
+
+
+static char FlagZ(uint32_t flags) {
+ return (flags & ZFlag) ? 'Z' : 'z';
+}
+
+
+static char FlagC(uint32_t flags) {
+ return (flags & CFlag) ? 'C' : 'c';
+}
+
+
+static char FlagV(uint32_t flags) {
+ return (flags & VFlag) ? 'V' : 'v';
+}
+
+
+bool EqualNzcv(uint32_t expected, uint32_t result) {
+ ASSERT((expected & ~NZCVFlag) == 0);
+ ASSERT((result & ~NZCVFlag) == 0);
+ if (result != expected) {
+ printf("Expected: %c%c%c%c\t Found: %c%c%c%c\n",
+ FlagN(expected), FlagZ(expected), FlagC(expected), FlagV(expected),
+ FlagN(result), FlagZ(result), FlagC(result), FlagV(result));
+ return false;
+ }
+
+ return true;
+}
+
+
+bool EqualRegisters(const RegisterDump* a, const RegisterDump* b) {
+ for (unsigned i = 0; i < kNumberOfRegisters; i++) {
+ if (a->xreg(i) != b->xreg(i)) {
+ printf("x%d\t Expected 0x%016" PRIx64 "\t Found 0x%016" PRIx64 "\n",
+ i, a->xreg(i), b->xreg(i));
+ return false;
+ }
+ }
+
+ for (unsigned i = 0; i < kNumberOfFPRegisters; i++) {
+ uint64_t a_bits = a->dreg_bits(i);
+ uint64_t b_bits = b->dreg_bits(i);
+ if (a_bits != b_bits) {
+ printf("d%d\t Expected 0x%016" PRIx64 "\t Found 0x%016" PRIx64 "\n",
+ i, a_bits, b_bits);
+ return false;
+ }
+ }
+
+ return true;
+}
+
+
+RegList PopulateRegisterArray(Register* w, Register* x, Register* r,
+ int reg_size, int reg_count, RegList allowed) {
+ RegList list = 0;
+ int i = 0;
+ for (unsigned n = 0; (n < kNumberOfRegisters) && (i < reg_count); n++) {
+ if (((1UL << n) & allowed) != 0) {
+ // Only assign allowed registers.
+ if (r) {
+ r[i] = Register::Create(n, reg_size);
+ }
+ if (x) {
+ x[i] = Register::Create(n, kXRegSize);
+ }
+ if (w) {
+ w[i] = Register::Create(n, kWRegSize);
+ }
+ list |= (1UL << n);
+ i++;
+ }
+ }
+ // Check that we got enough registers.
+ ASSERT(CountSetBits(list, kNumberOfRegisters) == reg_count);
+
+ return list;
+}
+
+
+RegList PopulateFPRegisterArray(FPRegister* s, FPRegister* d, FPRegister* v,
+ int reg_size, int reg_count, RegList allowed) {
+ RegList list = 0;
+ int i = 0;
+ for (unsigned n = 0; (n < kNumberOfFPRegisters) && (i < reg_count); n++) {
+ if (((1UL << n) & allowed) != 0) {
+ // Only assigned allowed registers.
+ if (v) {
+ v[i] = FPRegister::Create(n, reg_size);
+ }
+ if (d) {
+ d[i] = FPRegister::Create(n, kDRegSize);
+ }
+ if (s) {
+ s[i] = FPRegister::Create(n, kSRegSize);
+ }
+ list |= (1UL << n);
+ i++;
+ }
+ }
+ // Check that we got enough registers.
+ ASSERT(CountSetBits(list, kNumberOfFPRegisters) == reg_count);
+
+ return list;
+}
+
+
+void Clobber(MacroAssembler* masm, RegList reg_list, uint64_t const value) {
+ Register first = NoReg;
+ for (unsigned i = 0; i < kNumberOfRegisters; i++) {
+ if (reg_list & (1UL << i)) {
+ Register xn = Register::Create(i, kXRegSize);
+ // We should never write into csp here.
+ ASSERT(!xn.Is(csp));
+ if (!xn.IsZero()) {
+ if (!first.IsValid()) {
+ // This is the first register we've hit, so construct the literal.
+ __ Mov(xn, value);
+ first = xn;
+ } else {
+ // We've already loaded the literal, so re-use the value already
+ // loaded into the first register we hit.
+ __ Mov(xn, first);
+ }
+ }
+ }
+ }
+}
+
+
+void ClobberFP(MacroAssembler* masm, RegList reg_list, double const value) {
+ FPRegister first = NoFPReg;
+ for (unsigned i = 0; i < kNumberOfFPRegisters; i++) {
+ if (reg_list & (1UL << i)) {
+ FPRegister dn = FPRegister::Create(i, kDRegSize);
+ if (!first.IsValid()) {
+ // This is the first register we've hit, so construct the literal.
+ __ Fmov(dn, value);
+ first = dn;
+ } else {
+ // We've already loaded the literal, so re-use the value already loaded
+ // into the first register we hit.
+ __ Fmov(dn, first);
+ }
+ }
+ }
+}
+
+
+void Clobber(MacroAssembler* masm, CPURegList reg_list) {
+ if (reg_list.type() == CPURegister::kRegister) {
+ // This will always clobber X registers.
+ Clobber(masm, reg_list.list());
+ } else if (reg_list.type() == CPURegister::kFPRegister) {
+ // This will always clobber D registers.
+ ClobberFP(masm, reg_list.list());
+ } else {
+ UNREACHABLE();
+ }
+}
+
+
+void RegisterDump::Dump(MacroAssembler* masm) {
+ ASSERT(__ StackPointer().Is(csp));
+
+ // Ensure that we don't unintentionally clobber any registers.
+ Register old_tmp0 = __ Tmp0();
+ Register old_tmp1 = __ Tmp1();
+ FPRegister old_fptmp0 = __ FPTmp0();
+ __ SetScratchRegisters(NoReg, NoReg);
+ __ SetFPScratchRegister(NoFPReg);
+
+ // Preserve some temporary registers.
+ Register dump_base = x0;
+ Register dump = x1;
+ Register tmp = x2;
+ Register dump_base_w = dump_base.W();
+ Register dump_w = dump.W();
+ Register tmp_w = tmp.W();
+
+ // Offsets into the dump_ structure.
+ const int x_offset = offsetof(dump_t, x_);
+ const int w_offset = offsetof(dump_t, w_);
+ const int d_offset = offsetof(dump_t, d_);
+ const int s_offset = offsetof(dump_t, s_);
+ const int sp_offset = offsetof(dump_t, sp_);
+ const int wsp_offset = offsetof(dump_t, wsp_);
+ const int flags_offset = offsetof(dump_t, flags_);
+
+ __ Push(xzr, dump_base, dump, tmp);
+
+ // Load the address where we will dump the state.
+ __ Mov(dump_base, reinterpret_cast<uint64_t>(&dump_));
+
+ // Dump the stack pointer (csp and wcsp).
+ // The stack pointer cannot be stored directly; it needs to be moved into
+ // another register first. Also, we pushed four X registers, so we need to
+ // compensate here.
+ __ Add(tmp, csp, 4 * kXRegSizeInBytes);
+ __ Str(tmp, MemOperand(dump_base, sp_offset));
+ __ Add(tmp_w, wcsp, 4 * kXRegSizeInBytes);
+ __ Str(tmp_w, MemOperand(dump_base, wsp_offset));
+
+ // Dump X registers.
+ __ Add(dump, dump_base, x_offset);
+ for (unsigned i = 0; i < kNumberOfRegisters; i += 2) {
+ __ Stp(Register::XRegFromCode(i), Register::XRegFromCode(i + 1),
+ MemOperand(dump, i * kXRegSizeInBytes));
+ }
+
+ // Dump W registers.
+ __ Add(dump, dump_base, w_offset);
+ for (unsigned i = 0; i < kNumberOfRegisters; i += 2) {
+ __ Stp(Register::WRegFromCode(i), Register::WRegFromCode(i + 1),
+ MemOperand(dump, i * kWRegSizeInBytes));
+ }
+
+ // Dump D registers.
+ __ Add(dump, dump_base, d_offset);
+ for (unsigned i = 0; i < kNumberOfFPRegisters; i += 2) {
+ __ Stp(FPRegister::DRegFromCode(i), FPRegister::DRegFromCode(i + 1),
+ MemOperand(dump, i * kDRegSizeInBytes));
+ }
+
+ // Dump S registers.
+ __ Add(dump, dump_base, s_offset);
+ for (unsigned i = 0; i < kNumberOfFPRegisters; i += 2) {
+ __ Stp(FPRegister::SRegFromCode(i), FPRegister::SRegFromCode(i + 1),
+ MemOperand(dump, i * kSRegSizeInBytes));
+ }
+
+ // Dump the flags.
+ __ Mrs(tmp, NZCV);
+ __ Str(tmp, MemOperand(dump_base, flags_offset));
+
+ // To dump the values that were in tmp amd dump, we need a new scratch
+ // register. We can use any of the already dumped registers since we can
+ // easily restore them.
+ Register dump2_base = x10;
+ Register dump2 = x11;
+ ASSERT(!AreAliased(dump_base, dump, tmp, dump2_base, dump2));
+
+ // Don't lose the dump_ address.
+ __ Mov(dump2_base, dump_base);
+
+ __ Pop(tmp, dump, dump_base, xzr);
+
+ __ Add(dump2, dump2_base, w_offset);
+ __ Str(dump_base_w, MemOperand(dump2, dump_base.code() * kWRegSizeInBytes));
+ __ Str(dump_w, MemOperand(dump2, dump.code() * kWRegSizeInBytes));
+ __ Str(tmp_w, MemOperand(dump2, tmp.code() * kWRegSizeInBytes));
+
+ __ Add(dump2, dump2_base, x_offset);
+ __ Str(dump_base, MemOperand(dump2, dump_base.code() * kXRegSizeInBytes));
+ __ Str(dump, MemOperand(dump2, dump.code() * kXRegSizeInBytes));
+ __ Str(tmp, MemOperand(dump2, tmp.code() * kXRegSizeInBytes));
+
+ // Finally, restore dump2_base and dump2.
+ __ Ldr(dump2_base, MemOperand(dump2, dump2_base.code() * kXRegSizeInBytes));
+ __ Ldr(dump2, MemOperand(dump2, dump2.code() * kXRegSizeInBytes));
+
+ // Restore the MacroAssembler's scratch registers.
+ __ SetScratchRegisters(old_tmp0, old_tmp1);
+ __ SetFPScratchRegister(old_fptmp0);
+
+ completed_ = true;
+}
--- /dev/null
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_A64_TEST_UTILS_A64_H_
+#define V8_A64_TEST_UTILS_A64_H_
+
+#include "v8.h"
+
+#include "macro-assembler.h"
+#include "a64/macro-assembler-a64.h"
+#include "a64/utils-a64.h"
+#include "cctest.h"
+
+
+using namespace v8::internal;
+
+
+// RegisterDump: Object allowing integer, floating point and flags registers
+// to be saved to itself for future reference.
+class RegisterDump {
+ public:
+ RegisterDump() : completed_(false) {}
+
+ // The Dump method generates code to store a snapshot of the register values.
+ // It needs to be able to use the stack temporarily, and requires that the
+ // current stack pointer is csp, and is properly aligned.
+ //
+ // The dumping code is generated though the given MacroAssembler. No registers
+ // are corrupted in the process, but the stack is used briefly. The flags will
+ // be corrupted during this call.
+ void Dump(MacroAssembler* assm);
+
+ // Register accessors.
+ inline int32_t wreg(unsigned code) const {
+ if (code == kSPRegInternalCode) {
+ return wspreg();
+ }
+ ASSERT(RegAliasesMatch(code));
+ return dump_.w_[code];
+ }
+
+ inline int64_t xreg(unsigned code) const {
+ if (code == kSPRegInternalCode) {
+ return spreg();
+ }
+ ASSERT(RegAliasesMatch(code));
+ return dump_.x_[code];
+ }
+
+ // FPRegister accessors.
+ inline uint32_t sreg_bits(unsigned code) const {
+ ASSERT(FPRegAliasesMatch(code));
+ return dump_.s_[code];
+ }
+
+ inline float sreg(unsigned code) const {
+ return rawbits_to_float(sreg_bits(code));
+ }
+
+ inline uint64_t dreg_bits(unsigned code) const {
+ ASSERT(FPRegAliasesMatch(code));
+ return dump_.d_[code];
+ }
+
+ inline double dreg(unsigned code) const {
+ return rawbits_to_double(dreg_bits(code));
+ }
+
+ // Stack pointer accessors.
+ inline int64_t spreg() const {
+ ASSERT(SPRegAliasesMatch());
+ return dump_.sp_;
+ }
+
+ inline int64_t wspreg() const {
+ ASSERT(SPRegAliasesMatch());
+ return dump_.wsp_;
+ }
+
+ // Flags accessors.
+ inline uint64_t flags_nzcv() const {
+ ASSERT(IsComplete());
+ ASSERT((dump_.flags_ & ~Flags_mask) == 0);
+ return dump_.flags_ & Flags_mask;
+ }
+
+ inline bool IsComplete() const {
+ return completed_;
+ }
+
+ private:
+ // Indicate whether the dump operation has been completed.
+ bool completed_;
+
+ // Check that the lower 32 bits of x<code> exactly match the 32 bits of
+ // w<code>. A failure of this test most likely represents a failure in the
+ // ::Dump method, or a failure in the simulator.
+ bool RegAliasesMatch(unsigned code) const {
+ ASSERT(IsComplete());
+ ASSERT(code < kNumberOfRegisters);
+ return ((dump_.x_[code] & kWRegMask) == dump_.w_[code]);
+ }
+
+ // As RegAliasesMatch, but for the stack pointer.
+ bool SPRegAliasesMatch() const {
+ ASSERT(IsComplete());
+ return ((dump_.sp_ & kWRegMask) == dump_.wsp_);
+ }
+
+ // As RegAliasesMatch, but for floating-point registers.
+ bool FPRegAliasesMatch(unsigned code) const {
+ ASSERT(IsComplete());
+ ASSERT(code < kNumberOfFPRegisters);
+ return (dump_.d_[code] & kSRegMask) == dump_.s_[code];
+ }
+
+ // Store all the dumped elements in a simple struct so the implementation can
+ // use offsetof to quickly find the correct field.
+ struct dump_t {
+ // Core registers.
+ uint64_t x_[kNumberOfRegisters];
+ uint32_t w_[kNumberOfRegisters];
+
+ // Floating-point registers, as raw bits.
+ uint64_t d_[kNumberOfFPRegisters];
+ uint32_t s_[kNumberOfFPRegisters];
+
+ // The stack pointer.
+ uint64_t sp_;
+ uint64_t wsp_;
+
+ // NZCV flags, stored in bits 28 to 31.
+ // bit[31] : Negative
+ // bit[30] : Zero
+ // bit[29] : Carry
+ // bit[28] : oVerflow
+ uint64_t flags_;
+ } dump_;
+
+ STATIC_ASSERT(sizeof(dump_.d_[0]) == kDRegSizeInBytes);
+ STATIC_ASSERT(sizeof(dump_.s_[0]) == kSRegSizeInBytes);
+ STATIC_ASSERT(sizeof(dump_.d_[0]) == kXRegSizeInBytes);
+ STATIC_ASSERT(sizeof(dump_.s_[0]) == kWRegSizeInBytes);
+ STATIC_ASSERT(sizeof(dump_.x_[0]) == kXRegSizeInBytes);
+ STATIC_ASSERT(sizeof(dump_.w_[0]) == kWRegSizeInBytes);
+};
+
+// Some of these methods don't use the RegisterDump argument, but they have to
+// accept them so that they can overload those that take register arguments.
+bool Equal32(uint32_t expected, const RegisterDump*, uint32_t result);
+bool Equal64(uint64_t expected, const RegisterDump*, uint64_t result);
+
+bool EqualFP32(float expected, const RegisterDump*, float result);
+bool EqualFP64(double expected, const RegisterDump*, double result);
+
+bool Equal32(uint32_t expected, const RegisterDump* core, const Register& reg);
+bool Equal64(uint64_t expected, const RegisterDump* core, const Register& reg);
+
+bool EqualFP32(float expected, const RegisterDump* core,
+ const FPRegister& fpreg);
+bool EqualFP64(double expected, const RegisterDump* core,
+ const FPRegister& fpreg);
+
+bool Equal64(const Register& reg0, const RegisterDump* core,
+ const Register& reg1);
+
+bool EqualNzcv(uint32_t expected, uint32_t result);
+
+bool EqualRegisters(const RegisterDump* a, const RegisterDump* b);
+
+// Populate the w, x and r arrays with registers from the 'allowed' mask. The
+// r array will be populated with <reg_size>-sized registers,
+//
+// This allows for tests which use large, parameterized blocks of registers
+// (such as the push and pop tests), but where certain registers must be
+// avoided as they are used for other purposes.
+//
+// Any of w, x, or r can be NULL if they are not required.
+//
+// The return value is a RegList indicating which registers were allocated.
+RegList PopulateRegisterArray(Register* w, Register* x, Register* r,
+ int reg_size, int reg_count, RegList allowed);
+
+// As PopulateRegisterArray, but for floating-point registers.
+RegList PopulateFPRegisterArray(FPRegister* s, FPRegister* d, FPRegister* v,
+ int reg_size, int reg_count, RegList allowed);
+
+// Ovewrite the contents of the specified registers. This enables tests to
+// check that register contents are written in cases where it's likely that the
+// correct outcome could already be stored in the register.
+//
+// This always overwrites X-sized registers. If tests are operating on W
+// registers, a subsequent write into an aliased W register should clear the
+// top word anyway, so clobbering the full X registers should make tests more
+// rigorous.
+void Clobber(MacroAssembler* masm, RegList reg_list,
+ uint64_t const value = 0xfedcba9876543210UL);
+
+// As Clobber, but for FP registers.
+void ClobberFP(MacroAssembler* masm, RegList reg_list,
+ double const value = kFP64SignallingNaN);
+
+// As Clobber, but for a CPURegList with either FP or integer registers. When
+// using this method, the clobber value is always the default for the basic
+// Clobber or ClobberFP functions.
+void Clobber(MacroAssembler* masm, CPURegList reg_list);
+
+#endif // V8_A64_TEST_UTILS_A64_H_
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# The following tests use getDefaultTimeZone().
[
[ALWAYS, {
- # The following tests use getDefaultTimeZone().
'date-format/resolved-options': [FAIL],
'date-format/timezone': [FAIL],
'general/v8Intl-exists': [FAIL],
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# All tests in the bug directory are expected to fail.
[
[ALWAYS, {
- # All tests in the bug directory are expected to fail.
'bugs/*': [FAIL],
}], # ALWAYS
+
+['arch == a64', {
+ # Message tests won't pass until all UNIMPLEMENTED messages are removed.
+ 'regress/regress-75': [SKIP],
+ 'regress/regress-73': [SKIP],
+ 'regress/regress-1527': [SKIP],
+ 'try-finally-throw-in-try': [SKIP],
+ 'try-finally-throw-in-finally': [SKIP],
+ 'try-finally-return-in-finally': [SKIP],
+ 'try-catch-finally-throw-in-finally': [SKIP],
+ 'try-catch-finally-throw-in-catch-and-finally': [SKIP],
+ 'try-catch-finally-throw-in-catch': [SKIP],
+ 'try-catch-finally-return-in-finally': [SKIP],
+ 'try-catch-finally-no-message': [SKIP],
+ 'simple-throw': [SKIP],
+ 'replacement-marker-as-argument': [SKIP],
+ 'overwritten-builtins': [SKIP],
+ 'try-finally-throw-in-try-and-finally': [SKIP],
+}], # 'arch == a64'
]
return f.read()
def _IgnoreLine(self, string):
- """Ignore empty lines, valgrind output and Android output."""
+ """Ignore empty lines, valgrind output, Android output."""
if not string: return True
return (string.startswith("==") or string.startswith("**") or
string.startswith("ANDROID") or
--- /dev/null
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// This test requires OSR or --stress-runs=3 to optimize the top level script.
+
+for (var i = 0; i < 3; i++) {
+ // HToFastProperties is used for top-level object literals that have
+ // function property.
+ var obj = {
+ index: function() { return i; },
+ x: 0
+ }
+ var n = 10000;
+ // Loop to hit OSR.
+ for (var j = 0; j < n; j++) {
+ obj.x += i;
+ }
+ assertEquals(obj.index() * n, obj.x);
+}
--- /dev/null
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+deopt_trigger = 0;
+side_effect = 0;
+
+function test(a, b, c, d, e, v) {
+ // This test expects some specific input values.
+ assertEquals(10.0, a);
+ assertEquals(20.0, b);
+ assertEquals(30.0, c);
+ assertEquals(40.0, d);
+ assertEquals(50.0, e);
+ assertEquals(1.5, v);
+
+ // Perform a few double calculations.
+ a = a * 0.1;
+ b = b * 0.2;
+ c = c * 0.3;
+ d = d * 0.4;
+ e = e * 0.5;
+
+ // Write to a field of a global object. As for any side effect, a HSimulate
+ // will be introduced after the instructions to support this. If we deopt
+ // later in this function, the execution will resume in full-codegen after
+ // this point.
+ side_effect++;
+ // The following field of the global object will be deleted to force a deopt.
+ // If we use type feedback to deopt, then tests ran with --stress-opt will
+ // not deopt after a few iteration.
+ // If we use %DeoptimizeFunction, all values will be on the frame due to the
+ // call and we will not exercise the translation mechanism handling fp
+ // registers.
+ deopt_trigger = v;
+
+ // Do a few more calculations using the previous values after our deopt point
+ // so the floating point registers which hold those values are recorded in the
+ // environment and will be used during deoptimization.
+ a = a * v;
+ b = b * v;
+ c = c * v;
+ d = d * v;
+ e = e * v;
+
+ // Check that we got the expected results.
+ assertEquals(1.5, a);
+ assertEquals(6, b);
+ assertEquals(13.5, c);
+ assertEquals(24, d);
+ assertEquals(37.5, e);
+}
+
+
+test(10.0, 20.0, 30.0, 40.0, 50.0, 1.5);
+test(10.0, 20.0, 30.0, 40.0, 50.0, 1.5);
+%OptimizeFunctionOnNextCall(test);
+test(10.0, 20.0, 30.0, 40.0, 50.0, 1.5);
+assertTrue(2 != %GetOptimizationStatus(test));
+
+// By deleting the field we are forcing the code to deopt when the field is
+// read on next execution.
+delete deopt_trigger;
+test(10.0, 20.0, 30.0, 40.0, 50.0, 1.5);
+assertTrue(1 != %GetOptimizationStatus(test));
var example_numbers = [
NaN,
0,
+
+ // Due to a bug in fmod(), modulos involving denormals
+ // return the wrong result for glibc <= 2.16.
+ // Details: http://sourceware.org/bugzilla/show_bug.cgi?id=14048
+
Number.MIN_VALUE,
3 * Number.MIN_VALUE,
max_denormal,
+
min_normal,
repeating_decimal,
finite_decimal,
--- /dev/null
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Execises ArgumentsAccessStub::GenerateNewNonStrictSlow.
+
+function f(a, a) {
+ assertEquals(2, a);
+ assertEquals(1, arguments[0]);
+ assertEquals(2, arguments[1]);
+}
+
+f(1, 2);
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --harmony-proxies
+// We change the stack size for the A64 simulator because at one point this test
+// enters an infinite recursion which goes through the runtime and we overflow
+// the system stack before the simulator stack.
+
+// Flags: --harmony-proxies --sim-stack-size=500
// Helper.
'regress/regress-2249': [SKIP],
}], # 'gc_stress == True'
+##############################################################################
+['arch == a64', {
+
+ # Requires bigger stack size in the Genesis and if stack size is increased,
+ # the test requires too much time to run. However, the problem test covers
+ # should be platform-independent.
+ 'regress/regress-1132': [SKIP],
+
+ # Pass but take too long to run. Skip.
+ # Some similar tests (with fewer iterations) may be included in a64-js tests.
+ 'compiler/regress-arguments': [SKIP],
+ 'compiler/regress-gvn': [SKIP],
+ 'compiler/regress-max-locals-for-osr': [SKIP],
+ 'compiler/regress-4': [SKIP],
+ 'compiler/regress-or': [SKIP],
+ 'compiler/regress-rep-change': [SKIP],
+ 'regress/regress-1117': [SKIP],
+ 'regress/regress-1145': [SKIP],
+ 'regress/regress-1849': [SKIP],
+ 'regress/regress-3247124': [SKIP],
+ 'regress/regress-634': [SKIP],
+ 'regress/regress-91008': [SKIP],
+ 'regress/regress-91010': [SKIP],
+ 'regress/regress-91013': [SKIP],
+ 'regress/regress-99167': [SKIP],
+
+ # Long running tests.
+ 'regress/regress-2185': [PASS, ['mode == debug', PASS, TIMEOUT]],
+ 'regress/regress-2185-2': [PASS, TIMEOUT],
+
+ # Stack manipulations in LiveEdit is not implemented for this arch.
+ 'debug-liveedit-check-stack': [SKIP],
+ 'debug-liveedit-stack-padding': [SKIP],
+ 'debug-liveedit-restart-frame': [SKIP],
+ 'debug-liveedit-double-call': [SKIP],
+
+ # BUG(v8:3147). It works on other architectures by accident.
+ 'regress/regress-conditional-position': [FAIL],
+
+ # BUG(v8:3149). Incorrect register usage exposed in debug mode.
+ 'sin-cos': [PASS, ['mode == debug', FAIL]],
+}], # 'arch == a64'
+
+['arch == a64 and mode == debug and simulator_run == True', {
+
+ # Pass but take too long with the simulator in debug mode.
+ 'array-sort': [PASS, TIMEOUT],
+ 'packed-elements': [SKIP],
+ 'regexp-global': [SKIP],
+ 'compiler/alloc-numbers': [SKIP],
+ 'harmony/symbols': [SKIP],
+}], # 'arch == a64 and mode == debug and simulator_run == True'
+
##############################################################################
['asan == True', {
# Skip tests not suitable for ASAN.
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --expose-debug-as debug --nostack-trace-on-abort --stack-size=100
+// Flags: --expose-debug-as debug --nostack-trace-on-abort --stack-size=150
function f() {
var i = 0;
--- /dev/null
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax --noalways-opt
+
+function check(func, input, expected) {
+ func(-1);
+ func(-1);
+ %OptimizeFunctionOnNextCall(func);
+ assertEquals(expected, func(input));
+ assertOptimized(func);
+}
+
+function mul_by_neg_1(a) { return a * -1; }
+function mul_by_0(a) { return a * 0; }
+function mul_by_1(a) { return a * 1; }
+function mul_by_2(a) { return a * 2; }
+
+check(mul_by_neg_1, 2, -2);
+check(mul_by_0, 2, 0);
+check(mul_by_1, 2, 2);
+check(mul_by_2, 2, 4);
+
+function limit_range(a) {
+ // Limit the range of 'a' to enable no-overflow optimizations.
+ return Math.max(Math.min(a | 0, 10), -10);
+}
+
+function mul_by_neg_127(a) { return limit_range(a) * -127; }
+function mul_by_neg_128(a) { return limit_range(a) * -128; }
+function mul_by_neg_129(a) { return limit_range(a) * -129; }
+function mul_by_1023(a) { return limit_range(a) * 1023; }
+function mul_by_1024(a) { return limit_range(a) * 1024; }
+function mul_by_1025(a) { return limit_range(a) * 1025; }
+
+check(mul_by_neg_127, 2, -254);
+check(mul_by_neg_128, 2, -256);
+check(mul_by_neg_129, 2, -258);
+check(mul_by_1023, 2, 2046);
+check(mul_by_1024, 2, 2048);
+check(mul_by_1025, 2, 2050);
+
+// Deopt on minus zero.
+assertEquals(-0, mul_by_neg_128(0));
+assertUnoptimized(mul_by_neg_128);
+assertEquals(-0, mul_by_2(-0));
+assertUnoptimized(mul_by_2);
+
+// Deopt on overflow.
+
+// 2^30 is a smi boundary on arm and ia32.
+var two_30 = 1 << 30;
+// 2^31 is a smi boundary on arm64 and x64.
+var two_31 = 2 * two_30;
+
+// TODO(rmcilroy): replace after r16361 with: if (%IsValidSmi(two_31)) {
+if (true) {
+ assertEquals(two_31, mul_by_neg_1(-two_31));
+ assertUnoptimized(mul_by_neg_1);
+} else {
+ assertEquals(two_30, mul_by_neg_1(-two_30));
+ assertUnoptimized(mul_by_neg_1);
+}
}], # ALWAYS
-['arch == arm', {
+['arch == arm or arch == a64', {
# BUG(3251229): Times out when running new crankshaft test script.
'ecma_3/RegExp/regress-311414': [SKIP],
# BUG(1040): Allow this test to timeout.
'js1_5/GC/regress-203278-2': [PASS, TIMEOUT, NO_VARIANTS],
-}], # 'arch == arm'
+}], # 'arch == arm or arch == a64'
+
+
+['arch == a64', {
+ # BUG(v8:3148): Invalid branch instruction emitted.
+ 'ecma/Date/15.9.5.26-1': [SKIP],
+ 'js1_5/extensions/regress-396326': [SKIP],
+ 'js1_5/Regress/regress-80981': [SKIP],
+ 'ecma/Date/15.9.5.28-1': [PASS, ['mode == debug', SKIP]],
+
+ # BUG(v8:3152): Runs out of stack in debug mode.
+ 'js1_5/extensions/regress-355497': [FAIL_OK, ['mode == debug', SKIP]],
+}], # 'arch == a64'
['arch == mipsel', {
# BUG(1040): Allow this test to timeout.
'js1_5/GC/regress-203278-2': [PASS, TIMEOUT, NO_VARIANTS],
}], # 'arch == mipsel'
+
+['arch == a64 and simulator_run == True', {
+
+ 'js1_5/GC/regress-203278-2': [SKIP],
+
+ # These tests time out in debug mode but pass in product mode
+ 'js1_5/Regress/regress-360969-03': [SKIP],
+ 'js1_5/Regress/regress-360969-04': [SKIP],
+ 'js1_5/Regress/regress-360969-05': [SKIP],
+ 'js1_5/Regress/regress-360969-06': [SKIP],
+ 'js1_5/extensions/regress-365527': [SKIP],
+ 'ecma/Date/15.9.5.10-2': [SKIP],
+ 'js1_5/Regress/regress-416628': [SKIP],
+ 'js1_5/extensions/regress-371636': [SKIP],
+ 'ecma_3/RegExp/regress-330684': [SKIP],
+ 'ecma_3/RegExp/regress-307456': [SKIP],
+ 'js1_5/Regress/regress-303213': [SKIP],
+ 'js1_5/extensions/regress-330569': [SKIP],
+ 'js1_5/extensions/regress-351448': [SKIP],
+ 'js1_5/extensions/regress-336410-1': [SKIP],
+}], # 'arch == a64 and simulator_run == True'
]
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# We don't parse RegExps at scanning time, so we can't fail on octal
+# escapes (we need to parse to distinguish octal escapes from valid
+# back-references).
[
[ALWAYS, {
# TODO(mstarzinger): This script parses but throws a TypeError when run.
'S15.1.3.2_A2.5_T1': [PASS, ['mode == debug', SKIP]],
}], # ALWAYS
-['arch == arm or arch == mipsel', {
+['arch == arm or arch == mipsel or arch == a64', {
# TODO(mstarzinger): Causes stack overflow on simulators due to eager
# compilation of parenthesized function literals. Needs investigation.
'S15.1.3.2_A2.5_T1': [SKIP],
'S15.1.3.3_A2.3_T1': [SKIP],
'S15.1.3.4_A2.3_T1': [SKIP],
-}], # 'arch == arm or arch == mipsel'
+}], # 'arch == arm or arch == mipsel or arch == a64'
]
['simulator', {
'function-apply-aliased': [SKIP],
}], # 'simulator'
+['arch == a64 and simulator_run == True', {
+ 'dfg-int-overflow-in-loop': [SKIP],
+}], # 'arch == a64 and simulator_run == True'
]
--- /dev/null
+#!/bin/sh
+#
+# Copyright 2013 the V8 project authors. All rights reserved.
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following
+# disclaimer in the documentation and/or other materials provided
+# with the distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+if [ "$#" -lt 1 ]; then
+ echo "Usage: tools/cross_build_gcc.sh <GCC prefix> [make arguments ...]"
+ exit 1
+fi
+
+export CXX=$1g++
+export AR=$1ar
+export RANLIB=$1ranlib
+export CC=$1gcc
+export LD=$1g++
+
+OK=1
+if [ ! -x "$CXX" ]; then
+ echo "Error: $CXX does not exist or is not executable."
+ OK=0
+fi
+if [ ! -x "$AR" ]; then
+ echo "Error: $AR does not exist or is not executable."
+ OK=0
+fi
+if [ ! -x "$RANLIB" ]; then
+ echo "Error: $RANLIB does not exist or is not executable."
+ OK=0
+fi
+if [ ! -x "$CC" ]; then
+ echo "Error: $CC does not exist or is not executable."
+ OK=0
+fi
+if [ ! -x "$LD" ]; then
+ echo "Error: $LD does not exist or is not executable."
+ OK=0
+fi
+if [ $OK -ne 1 ]; then
+ exit 1
+fi
+
+shift
+make snapshot=off $@
--- /dev/null
+#!/bin/bash
+#
+# Copyright 2013 the V8 project authors. All rights reserved.
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following
+# disclaimer in the documentation and/or other materials provided
+# with the distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# This script reads in CSV formatted instruction data, and draws a stacked
+# graph in png format.
+
+defaultfile=a64_inst.csv
+defaultout=a64_inst.png
+gnuplot=/usr/bin/gnuplot
+
+
+# File containing CSV instruction data from simulator.
+file=${1:-$defaultfile}
+
+# Output graph png file.
+out=${2:-$defaultout}
+
+# Check input file exists.
+if [ ! -e $file ]; then
+ echo "Input file not found: $file."
+ echo "Usage: draw_instruction_graph.sh <input csv> <output png>"
+ exit 1
+fi
+
+# Search for an error message, and if found, exit.
+error=`grep -m1 '# Error:' $file`
+if [ -n "$error" ]; then
+ echo "Error message in input file:"
+ echo " $error"
+ exit 2
+fi
+
+# Sample period - period over which numbers for each category of instructions is
+# counted.
+sp=`grep -m1 '# sample_period=' $file | cut -d= -f2`
+
+# Get number of counters in the CSV file.
+nc=`grep -m1 '# counters=' $file | cut -d= -f2`
+
+# Find the annotation arrows. They appear as comments in the CSV file, in the
+# format:
+# # xx @ yyyyy
+# Where xx is a two character annotation identifier, and yyyyy is the
+# position in the executed instruction stream that generated the annotation.
+# Turn these locations into labelled arrows.
+arrows=`sed '/^[^#]/ d' $file | \
+ perl -pe "s/^# .. @ (\d+)/set arrow from \1, graph 0.9 to \1, $sp/"`;
+labels=`sed '/^[^#]/d' $file | \
+ sed -r 's/^# (..) @ (.+)/set label at \2, graph 0.9 "\1" \
+ center offset 0,0.5 font "FreeSans, 8"/'`;
+
+# Check for gnuplot, and warn if not available.
+if [ ! -e $gnuplot ]; then
+ echo "Can't find gnuplot at $gnuplot."
+ echo "Gnuplot version 4.6.3 or later required."
+ exit 3
+fi
+
+# Initialise gnuplot, and give it the data to draw.
+echo | $gnuplot <<EOF
+$arrows
+$labels
+MAXCOL=$nc
+set term png size 1920, 800 #ffffff
+set output '$out'
+set datafile separator ','
+set xtics font 'FreeSans, 10'
+set xlabel 'Instructions' font 'FreeSans, 10'
+set ytics font 'FreeSans, 10'
+set yrange [0:*]
+set key outside font 'FreeSans, 8'
+
+set style line 2 lc rgb '#800000'
+set style line 3 lc rgb '#d00000'
+set style line 4 lc rgb '#ff6000'
+set style line 5 lc rgb '#ffc000'
+set style line 6 lc rgb '#ffff00'
+
+set style line 7 lc rgb '#ff00ff'
+set style line 8 lc rgb '#ffc0ff'
+
+set style line 9 lc rgb '#004040'
+set style line 10 lc rgb '#008080'
+set style line 11 lc rgb '#40c0c0'
+set style line 12 lc rgb '#c0f0f0'
+
+set style line 13 lc rgb '#004000'
+set style line 14 lc rgb '#008000'
+set style line 15 lc rgb '#40c040'
+set style line 16 lc rgb '#c0f0c0'
+
+set style line 17 lc rgb '#2020f0'
+set style line 18 lc rgb '#6060f0'
+set style line 19 lc rgb '#a0a0f0'
+
+set style line 20 lc rgb '#000000'
+set style line 21 lc rgb '#ffffff'
+
+plot for [i=2:MAXCOL] '$file' using 1:(sum [col=i:MAXCOL] column(col)) \
+title columnheader(i) with filledcurve y1=0 ls i
+EOF
+
+
+
'../../src/arm/stub-cache-arm.cc',
],
}],
+ ['v8_target_arch=="a64"', {
+ 'sources': [ ### gcmole(arch:a64) ###
+ '../../src/a64/assembler-a64.cc',
+ '../../src/a64/assembler-a64.h',
+ '../../src/a64/assembler-a64-inl.h',
+ '../../src/a64/builtins-a64.cc',
+ '../../src/a64/codegen-a64.cc',
+ '../../src/a64/codegen-a64.h',
+ '../../src/a64/code-stubs-a64.cc',
+ '../../src/a64/code-stubs-a64.h',
+ '../../src/a64/constants-a64.h',
+ '../../src/a64/cpu-a64.cc',
+ '../../src/a64/cpu-a64.h',
+ '../../src/a64/debug-a64.cc',
+ '../../src/a64/debugger-a64.cc',
+ '../../src/a64/debugger-a64.h',
+ '../../src/a64/decoder-a64.cc',
+ '../../src/a64/decoder-a64.h',
+ '../../src/a64/deoptimizer-a64.cc',
+ '../../src/a64/disasm-a64.cc',
+ '../../src/a64/disasm-a64.h',
+ '../../src/a64/frames-a64.cc',
+ '../../src/a64/frames-a64.h',
+ '../../src/a64/full-codegen-a64.cc',
+ '../../src/a64/ic-a64.cc',
+ '../../src/a64/instructions-a64.cc',
+ '../../src/a64/instructions-a64.h',
+ '../../src/a64/instrument-a64.cc',
+ '../../src/a64/instrument-a64.h',
+ '../../src/a64/lithium-a64.cc',
+ '../../src/a64/lithium-a64.h',
+ '../../src/a64/lithium-codegen-a64.cc',
+ '../../src/a64/lithium-codegen-a64.h',
+ '../../src/a64/lithium-gap-resolver-a64.cc',
+ '../../src/a64/lithium-gap-resolver-a64.h',
+ '../../src/a64/macro-assembler-a64.cc',
+ '../../src/a64/macro-assembler-a64.h',
+ '../../src/a64/macro-assembler-a64-inl.h',
+ '../../src/a64/regexp-macro-assembler-a64.cc',
+ '../../src/a64/regexp-macro-assembler-a64.h',
+ '../../src/a64/simulator-a64.cc',
+ '../../src/a64/simulator-a64.h',
+ '../../src/a64/stub-cache-a64.cc',
+ '../../src/a64/utils-a64.cc',
+ '../../src/a64/utils-a64.h',
+ ],
+ }],
['v8_target_arch=="ia32" or v8_target_arch=="mac" or OS=="mac"', {
'sources': [ ### gcmole(arch:ia32) ###
'../../src/ia32/assembler-ia32-inl.h',
"mipsel",
"nacl_ia32",
"nacl_x64",
- "x64"]
+ "x64",
+ "a64"]
# Double the timeout for these:
SLOW_ARCHS = ["android_arm",
"android_ia32",
"arm",
"mipsel",
"nacl_ia32",
- "nacl_x64"]
+ "nacl_x64",
+ "a64"]
def BuildOptions():
result.add_option("--shell", help="DEPRECATED! use --shell-dir", default="")
result.add_option("--shell-dir", help="Directory containing executables",
default="")
+ result.add_option("--dont-skip-slow-simulator-tests",
+ help="Don't skip more slow tests when using a simulator.",
+ default=False, action="store_true",
+ dest="dont_skip_simulator_slow_tests")
result.add_option("--stress-only",
help="Only run tests with --always-opt --stress-opt",
default=False, action="store_true")
options.extra_flags,
options.no_i18n)
+ # TODO(all): Combine "simulator" and "simulator_run".
+ simulator_run = not options.dont_skip_simulator_slow_tests and \
+ arch in ['a64', 'arm', 'mips'] and ARCH_GUESS and arch != ARCH_GUESS
# Find available test suites and read test cases from them.
variables = {
"arch": arch,
"isolates": options.isolates,
"mode": mode,
"no_i18n": options.no_i18n,
+ "simulator_run": simulator_run,
"simulator": utils.UseSimulator(arch),
"system": utils.GuessOS(),
}
# Support arches, modes to be written as keywords instead of strings.
VARIABLES = {ALWAYS: True}
-for var in ["debug", "release", "android_arm", "android_ia32", "arm", "ia32",
- "mipsel", "x64", "nacl_ia32", "nacl_x64", "macos", "windows",
- "linux"]:
+for var in ["debug", "release", "android_arm", "android_ia32", "arm", "a64",
+ "ia32", "mipsel", "x64", "nacl_ia32", "nacl_x64", "macos",
+ "windows", "linux"]:
VARIABLES[var] = var
def UseSimulator(arch):
machine = platform.machine()
return (machine and
- (arch == "mipsel" or arch == "arm") and
+ (arch == "mipsel" or arch == "arm" or arch == "a64") and
not arch.startswith(machine))