NVIDIA Corporation
BlackBerry Limited
Opera Software ASA
+Intel Corporation
Akinori MUSHA <knu@FreeBSD.org>
Alexander Botero-Lowry <alexbl@FreeBSD.org>
Bert Belder <bertbelder@gmail.com>
Burcu Dogan <burcujdogan@gmail.com>
Craig Schlenter <craig.schlenter@gmail.com>
+Chunyang Dai <chunyang.dai@intel.com>
Daniel Andersson <kodandersson@gmail.com>
Daniel James <dnljms@gmail.com>
Derek J Conrod <dconrod@codeaurora.org>
Tobias Burnus <burnus@net-b.de>
Vincent Belliard <vincent.belliard@arm.com>
Vlad Burlik <vladbph@gmail.com>
+Weiliang Lin<weiliang.lin@intel.com>
Xi Qian <xi.qian@intel.com>
Yuqiang Xian <yuqiang.xian@intel.com>
Zaheer Ahmad <zahmad@codeaurora.org>
# Architectures and modes to be compiled. Consider these to be internal
# variables, don't override them (use the targets instead).
-ARCHES = ia32 x64 arm arm64 mips mipsel
+ARCHES = ia32 x64 arm arm64 mips mipsel x87
DEFAULT_ARCHES = ia32 x64 arm
MODES = release debug optdebug
DEFAULT_MODES = release debug
-ANDROID_ARCHES = android_ia32 android_arm android_arm64 android_mipsel
+ANDROID_ARCHES = android_ia32 android_arm android_arm64 android_mipsel android_x87
NACL_ARCHES = nacl_ia32 nacl_x64
# List of files that trigger Makefile regeneration:
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Those definitions should be consistent with the main Makefile
-ANDROID_ARCHES = android_ia32 android_arm android_arm64 android_mipsel
+ANDROID_ARCHES = android_ia32 android_arm android_arm64 android_mipsel android_x87
MODES = release debug
# Generates all combinations of ANDROID ARCHES and MODES,
TOOLCHAIN_PREFIX = i686-linux-android
TOOLCHAIN_VER = 4.6
else
- $(error Target architecture "${ARCH}" is not supported)
+ ifeq ($(ARCH), android_x87)
+ DEFINES = target_arch=x87 v8_target_arch=x87 android_target_arch=x86 android_target_platform=14
+ TOOLCHAIN_ARCH = x86
+ TOOLCHAIN_PREFIX = i686-linux-android
+ TOOLCHAIN_VER = 4.6
+ else
+ $(error Target architecture "${ARCH}" is not supported)
+ endif
endif
endif
endif
'-L<(android_stlport_libs)/mips',
],
}],
- ['target_arch=="ia32"', {
+ ['target_arch=="ia32" or target_arch=="x87"', {
'ldflags': [
'-L<(android_stlport_libs)/x86',
],
}],
],
}],
- ['target_arch=="ia32"', {
+ ['target_arch=="ia32" or target_arch=="x87"', {
# The x86 toolchain currently has problems with stack-protector.
'cflags!': [
'-fstack-protector',
}, {
'os_posix%': 1,
}],
- ['(v8_target_arch=="ia32" or v8_target_arch=="x64") and \
+ ['(v8_target_arch=="ia32" or v8_target_arch=="x64" or v8_target_arch=="x87") and \
(OS=="linux" or OS=="mac")', {
'v8_enable_gdbjit%': 1,
}, {
'V8_TARGET_ARCH_IA32',
],
}], # v8_target_arch=="ia32"
+ ['v8_target_arch=="x87"', {
+ 'defines': [
+ 'V8_TARGET_ARCH_X87',
+ ],
+ 'cflags': ['-mfpmath=387'],
+ 'ldflags': ['-mfpmath=387'],
+ }], # v8_target_arch=="x87"
['v8_target_arch=="mips"', {
'defines': [
'V8_TARGET_ARCH_MIPS',
}],
['(OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="solaris" \
or OS=="netbsd" or OS=="mac" or OS=="android" or OS=="qnx") and \
- (v8_target_arch=="arm" or v8_target_arch=="ia32" or \
+ (v8_target_arch=="arm" or v8_target_arch=="ia32" or v8_target_arch=="x87" or\
v8_target_arch=="mips" or v8_target_arch=="mipsel")', {
# Check whether the host compiler and target compiler support the
# '-m32' option and set it if so.
#include "arm/assembler-arm-inl.h"
#elif V8_TARGET_ARCH_MIPS
#include "mips/assembler-mips-inl.h"
+#elif V8_TARGET_ARCH_X87
+#include "x87/assembler-x87-inl.h"
#else
#error "Unknown architecture."
#endif
#include "arm/regexp-macro-assembler-arm.h"
#elif V8_TARGET_ARCH_MIPS
#include "mips/regexp-macro-assembler-mips.h"
+#elif V8_TARGET_ARCH_X87
+#include "x87/regexp-macro-assembler-x87.h"
#else // Unknown architecture.
#error "Unknown architecture."
#endif // Target architecture.
function = FUNCTION_ADDR(RegExpMacroAssemblerARM::CheckStackGuardState);
#elif V8_TARGET_ARCH_MIPS
function = FUNCTION_ADDR(RegExpMacroAssemblerMIPS::CheckStackGuardState);
+#elif V8_TARGET_ARCH_X87
+ function = FUNCTION_ADDR(RegExpMacroAssemblerX87::CheckStackGuardState);
#else
UNREACHABLE();
#endif
#include "arm/code-stubs-arm.h"
#elif V8_TARGET_ARCH_MIPS
#include "mips/code-stubs-mips.h"
+#elif V8_TARGET_ARCH_X87
+#include "x87/code-stubs-x87.h"
#else
#error Unsupported target architecture.
#endif
#include "arm/codegen-arm.h"
#elif V8_TARGET_ARCH_MIPS
#include "mips/codegen-mips.h"
+#elif V8_TARGET_ARCH_X87
+#include "x87/codegen-x87.h"
#else
#error Unsupported target architecture.
#endif
#include "arm/frames-arm.h"
#elif V8_TARGET_ARCH_MIPS
#include "mips/frames-mips.h"
+#elif V8_TARGET_ARCH_X87
+#include "x87/frames-x87.h"
#else
#error Unsupported target architecture.
#endif
Address StackFrame::UnpaddedFP() const {
-#if V8_TARGET_ARCH_IA32
+#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X87
if (!is_optimized()) return fp();
int32_t alignment_state = Memory::int32_at(
fp() + JavaScriptFrameConstants::kDynamicAlignmentStateOffset);
static const int kMaxBackEdgeWeight = 127;
// Platform-specific code size multiplier.
-#if V8_TARGET_ARCH_IA32
+#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X87
static const int kCodeSizeMultiplier = 105;
static const int kBootCodeSizeMultiplier = 100;
#elif V8_TARGET_ARCH_X64
struct MachOSectionHeader {
char sectname[16];
char segname[16];
-#if V8_TARGET_ARCH_IA32
+#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X87
uint32_t addr;
uint32_t size;
#else
uint32_t cmd;
uint32_t cmdsize;
char segname[16];
-#if V8_TARGET_ARCH_IA32
+#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X87
uint32_t vmaddr;
uint32_t vmsize;
uint32_t fileoff;
Writer::Slot<MachOHeader> WriteHeader(Writer* w) {
ASSERT(w->position() == 0);
Writer::Slot<MachOHeader> header = w->CreateSlotHere<MachOHeader>();
-#if V8_TARGET_ARCH_IA32
+#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X87
header->magic = 0xFEEDFACEu;
header->cputype = 7; // i386
header->cpusubtype = 3; // CPU_SUBTYPE_I386_ALL
uintptr_t code_size) {
Writer::Slot<MachOSegmentCommand> cmd =
w->CreateSlotHere<MachOSegmentCommand>();
-#if V8_TARGET_ARCH_IA32
+#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X87
cmd->cmd = LC_SEGMENT_32;
#else
cmd->cmd = LC_SEGMENT_64;
void WriteHeader(Writer* w) {
ASSERT(w->position() == 0);
Writer::Slot<ELFHeader> header = w->CreateSlotHere<ELFHeader>();
-#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_ARM
+#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_X87
const uint8_t ident[16] =
{ 0x7f, 'E', 'L', 'F', 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0};
#elif V8_TARGET_ARCH_X64
#endif
OS::MemCopy(header->ident, ident, 16);
header->type = 1;
-#if V8_TARGET_ARCH_IA32
+#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X87
header->machine = 3;
#elif V8_TARGET_ARCH_X64
// Processor identification value for x64 is 62 as defined in
Binding binding() const {
return static_cast<Binding>(info >> 4);
}
-#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_ARM
+#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_X87
struct SerializedLayout {
SerializedLayout(uint32_t name,
uintptr_t value,
w->Write<intptr_t>(desc_->CodeStart() + desc_->CodeSize());
Writer::Slot<uint32_t> fb_block_size = w->CreateSlotHere<uint32_t>();
uintptr_t fb_block_start = w->position();
-#if V8_TARGET_ARCH_IA32
+#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X87
w->Write<uint8_t>(DW_OP_reg5); // The frame pointer's here on ia32
#elif V8_TARGET_ARCH_X64
w->Write<uint8_t>(DW_OP_reg6); // and here on x64.
// Target architecture detection. This may be set externally. If not, detect
// in the same way as the host architecture, that is, target the native
// environment as presented by the compiler.
-#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_IA32 && \
+#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_X87 &&\
!V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS
#if defined(_M_X64) || defined(__x86_64__)
#define V8_TARGET_ARCH_X64 1
#else
#define V8_TARGET_LITTLE_ENDIAN 1
#endif
+#elif V8_TARGET_ARCH_X87
+#define V8_TARGET_LITTLE_ENDIAN 1
#else
#error Unknown target architecture endianness
#endif
#include "arm/lithium-arm.h"
#elif V8_TARGET_ARCH_MIPS
#include "mips/lithium-mips.h"
+#elif V8_TARGET_ARCH_X87
+#include "x87/lithium-x87.h"
#else
#error Unsupported target architecture.
#endif
#include "arm/lithium-codegen-arm.h"
#elif V8_TARGET_ARCH_MIPS
#include "mips/lithium-codegen-mips.h"
+#elif V8_TARGET_ARCH_X87
+#include "x87/lithium-codegen-x87.h"
#else
#error Unsupported target architecture.
#endif
#include "arm/regexp-macro-assembler-arm.h"
#elif V8_TARGET_ARCH_MIPS
#include "mips/regexp-macro-assembler-mips.h"
+#elif V8_TARGET_ARCH_X87
+#include "x87/regexp-macro-assembler-x87.h"
#else
#error Unsupported target architecture.
#endif
#elif V8_TARGET_ARCH_MIPS
RegExpMacroAssemblerMIPS macro_assembler(mode, (data->capture_count + 1) * 2,
zone);
+#elif V8_TARGET_ARCH_X87
+ RegExpMacroAssemblerX87 macro_assembler(mode, (data->capture_count + 1) * 2,
+ zone);
#else
#error "Unsupported architecture"
#endif
#include "arm/lithium-arm.h"
#elif V8_TARGET_ARCH_MIPS
#include "mips/lithium-mips.h"
+#elif V8_TARGET_ARCH_X87
+#include "x87/lithium-x87.h"
#else
#error "Unknown architecture."
#endif
#include "arm/lithium-arm.h"
#elif V8_TARGET_ARCH_MIPS
#include "mips/lithium-mips.h"
+#elif V8_TARGET_ARCH_X87
+#include "x87/lithium-x87.h"
#else
#error "Unknown architecture."
#endif
#elif V8_TARGET_ARCH_MIPS
#include "mips/lithium-mips.h"
#include "mips/lithium-codegen-mips.h"
+#elif V8_TARGET_ARCH_X87
+#include "x87/lithium-x87.h"
+#include "x87/lithium-codegen-x87.h"
#else
#error Unsupported target architecture.
#endif
#elif V8_TARGET_ARCH_ARM64
#include "arm64/lithium-arm64.h"
#include "arm64/lithium-codegen-arm64.h"
+#elif V8_TARGET_ARCH_X87
+#include "x87/lithium-x87.h"
+#include "x87/lithium-codegen-x87.h"
#else
#error "Unknown architecture."
#endif
static const uint32_t kElfMachX64 = 62;
static const uint32_t kElfMachARM = 40;
static const uint32_t kElfMachMIPS = 10;
+ static const uint32_t kElfMachX87 = 3;
struct jitheader {
uint32_t magic;
return kElfMachARM;
#elif V8_TARGET_ARCH_MIPS
return kElfMachMIPS;
+#elif V8_TARGET_ARCH_X87
+ return kElfMachX87;
#else
UNIMPLEMENTED();
return 0;
const char arch[] = "arm";
#elif V8_TARGET_ARCH_MIPS
const char arch[] = "mips";
+#elif V8_TARGET_ARCH_X87
+ const char arch[] = "x87";
#else
const char arch[] = "unknown";
#endif
#include "mips/assembler-mips-inl.h"
#include "code.h" // must be after assembler_*.h
#include "mips/macro-assembler-mips.h"
+#elif V8_TARGET_ARCH_X87
+#include "assembler.h"
+#include "x87/assembler-x87.h"
+#include "x87/assembler-x87-inl.h"
+#include "code.h" // must be after assembler_*.h
+#include "x87/macro-assembler-x87.h"
#else
#error Unsupported target architecture.
#endif
}
-#if V8_TARGET_ARCH_IA32
+#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X87
static void MemMoveWrapper(void* dest, const void* src, size_t size) {
memmove(dest, src, size);
}
void OS::PostSetUp() {
-#if V8_TARGET_ARCH_IA32
+#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X87
OS::MemMoveFunction generated_memmove = CreateMemMoveFunction();
if (generated_memmove != NULL) {
memmove_function = generated_memmove;
}
-#if V8_TARGET_ARCH_IA32
+#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X87
static void MemMoveWrapper(void* dest, const void* src, size_t size) {
memmove(dest, src, size);
}
(*memmove_function)(dest, src, size);
}
-#endif // V8_TARGET_ARCH_IA32
+#endif // V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X87
class TimezoneCache {
void OS::PostSetUp() {
-#if V8_TARGET_ARCH_IA32
+#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X87
OS::MemMoveFunction generated_memmove = CreateMemMoveFunction();
if (generated_memmove != NULL) {
memmove_function = generated_memmove;
#if (_MSC_VER < 1800)
inline int lrint(double flt) {
int intgr;
-#if V8_TARGET_ARCH_IA32
+#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X87
__asm {
fld flt
fistp intgr
#ifndef V8_NO_FAST_TLS
-#if defined(_MSC_VER) && V8_HOST_ARCH_IA32
+#if defined(_MSC_VER) && (V8_HOST_ARCH_IA32)
#define V8_FAST_TLS_SUPPORTED 1
// the platform doesn't care. Guaranteed to be a power of two.
static int ActivationFrameAlignment();
-#if defined(V8_TARGET_ARCH_IA32)
+#if defined(V8_TARGET_ARCH_IA32) || defined(V8_TARGET_ARCH_X87)
// Limit below which the extra overhead of the MemCopy function is likely
// to outweigh the benefits of faster copying.
static const int kMinComplexMemCopy = 64;
unsigned int type = assembler->Implementation();
ASSERT(type < 6);
const char* impl_names[] = {"IA32", "ARM", "ARM64",
- "MIPS", "X64", "Bytecode"};
+ "MIPS", "X64", "X87", "Bytecode"};
PrintF("RegExpMacroAssembler%s();\n", impl_names[type]);
}
kARM64Implementation,
kMIPSImplementation,
kX64Implementation,
+ kX87Implementation,
kBytecodeImplementation
};
#include "arm/simulator-arm.h"
#elif V8_TARGET_ARCH_MIPS
#include "mips/simulator-mips.h"
+#elif V8_TARGET_ARCH_X87
+#include "x87/simulator-x87.h"
#else
#error Unsupported target architecture.
#endif
static bool DoubleStrtod(Vector<const char> trimmed,
int exponent,
double* result) {
-#if (V8_TARGET_ARCH_IA32 || defined(USE_SIMULATOR)) && !defined(_MSC_VER)
+#if (V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X87 || defined(USE_SIMULATOR)) && \
+ !defined(_MSC_VER)
// On x86 the floating-point stack can be 64 or 80 bits wide. If it is
// 80 bits wide (as is the case on Linux) then double-rounding occurs and the
// result is not accurate.
--- /dev/null
+// Copyright (c) 1994-2006 Sun Microsystems Inc.
+// All Rights Reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// - Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// - Redistribution in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// - Neither the name of Sun Microsystems or the names of contributors may
+// be used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// The original source code covered by the above license above has been
+// modified significantly by Google Inc.
+// Copyright 2012 the V8 project authors. All rights reserved.
+
+// A light-weight IA32 Assembler.
+
+#ifndef V8_X87_ASSEMBLER_X87_INL_H_
+#define V8_X87_ASSEMBLER_X87_INL_H_
+
+#include "x87/assembler-x87.h"
+
+#include "cpu.h"
+#include "debug.h"
+
+namespace v8 {
+namespace internal {
+
+bool CpuFeatures::SupportsCrankshaft() { return false; }
+
+
+static const byte kCallOpcode = 0xE8;
+static const int kNoCodeAgeSequenceLength = 5;
+
+
+// The modes possibly affected by apply must be in kApplyMask.
+void RelocInfo::apply(intptr_t delta, ICacheFlushMode icache_flush_mode) {
+ bool flush_icache = icache_flush_mode != SKIP_ICACHE_FLUSH;
+ if (IsRuntimeEntry(rmode_) || IsCodeTarget(rmode_)) {
+ int32_t* p = reinterpret_cast<int32_t*>(pc_);
+ *p -= delta; // Relocate entry.
+ if (flush_icache) CPU::FlushICache(p, sizeof(uint32_t));
+ } else if (rmode_ == CODE_AGE_SEQUENCE) {
+ if (*pc_ == kCallOpcode) {
+ int32_t* p = reinterpret_cast<int32_t*>(pc_ + 1);
+ *p -= delta; // Relocate entry.
+ if (flush_icache) CPU::FlushICache(p, sizeof(uint32_t));
+ }
+ } else if (rmode_ == JS_RETURN && IsPatchedReturnSequence()) {
+ // Special handling of js_return when a break point is set (call
+ // instruction has been inserted).
+ int32_t* p = reinterpret_cast<int32_t*>(pc_ + 1);
+ *p -= delta; // Relocate entry.
+ if (flush_icache) CPU::FlushICache(p, sizeof(uint32_t));
+ } else if (rmode_ == DEBUG_BREAK_SLOT && IsPatchedDebugBreakSlotSequence()) {
+ // Special handling of a debug break slot when a break point is set (call
+ // instruction has been inserted).
+ int32_t* p = reinterpret_cast<int32_t*>(pc_ + 1);
+ *p -= delta; // Relocate entry.
+ if (flush_icache) CPU::FlushICache(p, sizeof(uint32_t));
+ } else if (IsInternalReference(rmode_)) {
+ // absolute code pointer inside code object moves with the code object.
+ int32_t* p = reinterpret_cast<int32_t*>(pc_);
+ *p += delta; // Relocate entry.
+ if (flush_icache) CPU::FlushICache(p, sizeof(uint32_t));
+ }
+}
+
+
+Address RelocInfo::target_address() {
+ ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
+ return Assembler::target_address_at(pc_, host_);
+}
+
+
+Address RelocInfo::target_address_address() {
+ ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)
+ || rmode_ == EMBEDDED_OBJECT
+ || rmode_ == EXTERNAL_REFERENCE);
+ return reinterpret_cast<Address>(pc_);
+}
+
+
+Address RelocInfo::constant_pool_entry_address() {
+ UNREACHABLE();
+ return NULL;
+}
+
+
+int RelocInfo::target_address_size() {
+ return Assembler::kSpecialTargetSize;
+}
+
+
+void RelocInfo::set_target_address(Address target,
+ WriteBarrierMode write_barrier_mode,
+ ICacheFlushMode icache_flush_mode) {
+ Assembler::set_target_address_at(pc_, host_, target, icache_flush_mode);
+ Assembler::set_target_address_at(pc_, host_, target);
+ ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
+ if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != NULL &&
+ IsCodeTarget(rmode_)) {
+ Object* target_code = Code::GetCodeFromTargetAddress(target);
+ host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
+ host(), this, HeapObject::cast(target_code));
+ }
+}
+
+
+Object* RelocInfo::target_object() {
+ ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
+ return Memory::Object_at(pc_);
+}
+
+
+Handle<Object> RelocInfo::target_object_handle(Assembler* origin) {
+ ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
+ return Memory::Object_Handle_at(pc_);
+}
+
+
+void RelocInfo::set_target_object(Object* target,
+ WriteBarrierMode write_barrier_mode,
+ ICacheFlushMode icache_flush_mode) {
+ ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
+ ASSERT(!target->IsConsString());
+ Memory::Object_at(pc_) = target;
+ if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
+ CPU::FlushICache(pc_, sizeof(Address));
+ }
+ if (write_barrier_mode == UPDATE_WRITE_BARRIER &&
+ host() != NULL &&
+ target->IsHeapObject()) {
+ host()->GetHeap()->incremental_marking()->RecordWrite(
+ host(), &Memory::Object_at(pc_), HeapObject::cast(target));
+ }
+}
+
+
+Address RelocInfo::target_reference() {
+ ASSERT(rmode_ == RelocInfo::EXTERNAL_REFERENCE);
+ return Memory::Address_at(pc_);
+}
+
+
+Address RelocInfo::target_runtime_entry(Assembler* origin) {
+ ASSERT(IsRuntimeEntry(rmode_));
+ return reinterpret_cast<Address>(*reinterpret_cast<int32_t*>(pc_));
+}
+
+
+void RelocInfo::set_target_runtime_entry(Address target,
+ WriteBarrierMode write_barrier_mode,
+ ICacheFlushMode icache_flush_mode) {
+ ASSERT(IsRuntimeEntry(rmode_));
+ if (target_address() != target) {
+ set_target_address(target, write_barrier_mode, icache_flush_mode);
+ }
+}
+
+
+Handle<Cell> RelocInfo::target_cell_handle() {
+ ASSERT(rmode_ == RelocInfo::CELL);
+ Address address = Memory::Address_at(pc_);
+ return Handle<Cell>(reinterpret_cast<Cell**>(address));
+}
+
+
+Cell* RelocInfo::target_cell() {
+ ASSERT(rmode_ == RelocInfo::CELL);
+ return Cell::FromValueAddress(Memory::Address_at(pc_));
+}
+
+
+void RelocInfo::set_target_cell(Cell* cell,
+ WriteBarrierMode write_barrier_mode,
+ ICacheFlushMode icache_flush_mode) {
+ ASSERT(rmode_ == RelocInfo::CELL);
+ Address address = cell->address() + Cell::kValueOffset;
+ Memory::Address_at(pc_) = address;
+ if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
+ CPU::FlushICache(pc_, sizeof(Address));
+ }
+ if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != NULL) {
+ // TODO(1550) We are passing NULL as a slot because cell can never be on
+ // evacuation candidate.
+ host()->GetHeap()->incremental_marking()->RecordWrite(
+ host(), NULL, cell);
+ }
+}
+
+
+Handle<Object> RelocInfo::code_age_stub_handle(Assembler* origin) {
+ ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
+ ASSERT(*pc_ == kCallOpcode);
+ return Memory::Object_Handle_at(pc_ + 1);
+}
+
+
+Code* RelocInfo::code_age_stub() {
+ ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
+ ASSERT(*pc_ == kCallOpcode);
+ return Code::GetCodeFromTargetAddress(
+ Assembler::target_address_at(pc_ + 1, host_));
+}
+
+
+void RelocInfo::set_code_age_stub(Code* stub,
+ ICacheFlushMode icache_flush_mode) {
+ ASSERT(*pc_ == kCallOpcode);
+ ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
+ Assembler::set_target_address_at(pc_ + 1, host_, stub->instruction_start(),
+ icache_flush_mode);
+}
+
+
+Address RelocInfo::call_address() {
+ ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
+ (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
+ return Assembler::target_address_at(pc_ + 1, host_);
+}
+
+
+void RelocInfo::set_call_address(Address target) {
+ ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
+ (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
+ Assembler::set_target_address_at(pc_ + 1, host_, target);
+ if (host() != NULL) {
+ Object* target_code = Code::GetCodeFromTargetAddress(target);
+ host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
+ host(), this, HeapObject::cast(target_code));
+ }
+}
+
+
+Object* RelocInfo::call_object() {
+ return *call_object_address();
+}
+
+
+void RelocInfo::set_call_object(Object* target) {
+ *call_object_address() = target;
+}
+
+
+Object** RelocInfo::call_object_address() {
+ ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
+ (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
+ return reinterpret_cast<Object**>(pc_ + 1);
+}
+
+
+void RelocInfo::WipeOut() {
+ if (IsEmbeddedObject(rmode_) || IsExternalReference(rmode_)) {
+ Memory::Address_at(pc_) = NULL;
+ } else if (IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)) {
+ // Effectively write zero into the relocation.
+ Assembler::set_target_address_at(pc_, host_, pc_ + sizeof(int32_t));
+ } else {
+ UNREACHABLE();
+ }
+}
+
+
+bool RelocInfo::IsPatchedReturnSequence() {
+ return *pc_ == kCallOpcode;
+}
+
+
+bool RelocInfo::IsPatchedDebugBreakSlotSequence() {
+ return !Assembler::IsNop(pc());
+}
+
+
+void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
+ RelocInfo::Mode mode = rmode();
+ if (mode == RelocInfo::EMBEDDED_OBJECT) {
+ visitor->VisitEmbeddedPointer(this);
+ CPU::FlushICache(pc_, sizeof(Address));
+ } else if (RelocInfo::IsCodeTarget(mode)) {
+ visitor->VisitCodeTarget(this);
+ } else if (mode == RelocInfo::CELL) {
+ visitor->VisitCell(this);
+ } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
+ visitor->VisitExternalReference(this);
+ CPU::FlushICache(pc_, sizeof(Address));
+ } else if (RelocInfo::IsCodeAgeSequence(mode)) {
+ visitor->VisitCodeAgeSequence(this);
+ } else if (((RelocInfo::IsJSReturn(mode) &&
+ IsPatchedReturnSequence()) ||
+ (RelocInfo::IsDebugBreakSlot(mode) &&
+ IsPatchedDebugBreakSlotSequence())) &&
+ isolate->debug()->has_break_points()) {
+ visitor->VisitDebugTarget(this);
+ } else if (IsRuntimeEntry(mode)) {
+ visitor->VisitRuntimeEntry(this);
+ }
+}
+
+
+template<typename StaticVisitor>
+void RelocInfo::Visit(Heap* heap) {
+ RelocInfo::Mode mode = rmode();
+ if (mode == RelocInfo::EMBEDDED_OBJECT) {
+ StaticVisitor::VisitEmbeddedPointer(heap, this);
+ CPU::FlushICache(pc_, sizeof(Address));
+ } else if (RelocInfo::IsCodeTarget(mode)) {
+ StaticVisitor::VisitCodeTarget(heap, this);
+ } else if (mode == RelocInfo::CELL) {
+ StaticVisitor::VisitCell(heap, this);
+ } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
+ StaticVisitor::VisitExternalReference(this);
+ CPU::FlushICache(pc_, sizeof(Address));
+ } else if (RelocInfo::IsCodeAgeSequence(mode)) {
+ StaticVisitor::VisitCodeAgeSequence(heap, this);
+ } else if (heap->isolate()->debug()->has_break_points() &&
+ ((RelocInfo::IsJSReturn(mode) &&
+ IsPatchedReturnSequence()) ||
+ (RelocInfo::IsDebugBreakSlot(mode) &&
+ IsPatchedDebugBreakSlotSequence()))) {
+ StaticVisitor::VisitDebugTarget(heap, this);
+ } else if (IsRuntimeEntry(mode)) {
+ StaticVisitor::VisitRuntimeEntry(this);
+ }
+}
+
+
+
+Immediate::Immediate(int x) {
+ x_ = x;
+ rmode_ = RelocInfo::NONE32;
+}
+
+
+Immediate::Immediate(const ExternalReference& ext) {
+ x_ = reinterpret_cast<int32_t>(ext.address());
+ rmode_ = RelocInfo::EXTERNAL_REFERENCE;
+}
+
+
+Immediate::Immediate(Label* internal_offset) {
+ x_ = reinterpret_cast<int32_t>(internal_offset);
+ rmode_ = RelocInfo::INTERNAL_REFERENCE;
+}
+
+
+Immediate::Immediate(Handle<Object> handle) {
+ AllowDeferredHandleDereference using_raw_address;
+ // Verify all Objects referred by code are NOT in new space.
+ Object* obj = *handle;
+ if (obj->IsHeapObject()) {
+ ASSERT(!HeapObject::cast(obj)->GetHeap()->InNewSpace(obj));
+ x_ = reinterpret_cast<intptr_t>(handle.location());
+ rmode_ = RelocInfo::EMBEDDED_OBJECT;
+ } else {
+ // no relocation needed
+ x_ = reinterpret_cast<intptr_t>(obj);
+ rmode_ = RelocInfo::NONE32;
+ }
+}
+
+
+Immediate::Immediate(Smi* value) {
+ x_ = reinterpret_cast<intptr_t>(value);
+ rmode_ = RelocInfo::NONE32;
+}
+
+
+Immediate::Immediate(Address addr) {
+ x_ = reinterpret_cast<int32_t>(addr);
+ rmode_ = RelocInfo::NONE32;
+}
+
+
+void Assembler::emit(uint32_t x) {
+ *reinterpret_cast<uint32_t*>(pc_) = x;
+ pc_ += sizeof(uint32_t);
+}
+
+
+void Assembler::emit(Handle<Object> handle) {
+ AllowDeferredHandleDereference heap_object_check;
+ // Verify all Objects referred by code are NOT in new space.
+ Object* obj = *handle;
+ ASSERT(!isolate()->heap()->InNewSpace(obj));
+ if (obj->IsHeapObject()) {
+ emit(reinterpret_cast<intptr_t>(handle.location()),
+ RelocInfo::EMBEDDED_OBJECT);
+ } else {
+ // no relocation needed
+ emit(reinterpret_cast<intptr_t>(obj));
+ }
+}
+
+
+void Assembler::emit(uint32_t x, RelocInfo::Mode rmode, TypeFeedbackId id) {
+ if (rmode == RelocInfo::CODE_TARGET && !id.IsNone()) {
+ RecordRelocInfo(RelocInfo::CODE_TARGET_WITH_ID, id.ToInt());
+ } else if (!RelocInfo::IsNone(rmode)
+ && rmode != RelocInfo::CODE_AGE_SEQUENCE) {
+ RecordRelocInfo(rmode);
+ }
+ emit(x);
+}
+
+
+void Assembler::emit(Handle<Code> code,
+ RelocInfo::Mode rmode,
+ TypeFeedbackId id) {
+ AllowDeferredHandleDereference embedding_raw_address;
+ emit(reinterpret_cast<intptr_t>(code.location()), rmode, id);
+}
+
+
+void Assembler::emit(const Immediate& x) {
+ if (x.rmode_ == RelocInfo::INTERNAL_REFERENCE) {
+ Label* label = reinterpret_cast<Label*>(x.x_);
+ emit_code_relative_offset(label);
+ return;
+ }
+ if (!RelocInfo::IsNone(x.rmode_)) RecordRelocInfo(x.rmode_);
+ emit(x.x_);
+}
+
+
+void Assembler::emit_code_relative_offset(Label* label) {
+ if (label->is_bound()) {
+ int32_t pos;
+ pos = label->pos() + Code::kHeaderSize - kHeapObjectTag;
+ emit(pos);
+ } else {
+ emit_disp(label, Displacement::CODE_RELATIVE);
+ }
+}
+
+
+void Assembler::emit_w(const Immediate& x) {
+ ASSERT(RelocInfo::IsNone(x.rmode_));
+ uint16_t value = static_cast<uint16_t>(x.x_);
+ reinterpret_cast<uint16_t*>(pc_)[0] = value;
+ pc_ += sizeof(uint16_t);
+}
+
+
+Address Assembler::target_address_at(Address pc,
+ ConstantPoolArray* constant_pool) {
+ return pc + sizeof(int32_t) + *reinterpret_cast<int32_t*>(pc);
+}
+
+
+void Assembler::set_target_address_at(Address pc,
+ ConstantPoolArray* constant_pool,
+ Address target,
+ ICacheFlushMode icache_flush_mode) {
+ int32_t* p = reinterpret_cast<int32_t*>(pc);
+ *p = target - (pc + sizeof(int32_t));
+ if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
+ CPU::FlushICache(p, sizeof(int32_t));
+ }
+}
+
+
+Address Assembler::target_address_from_return_address(Address pc) {
+ return pc - kCallTargetAddressOffset;
+}
+
+
+Displacement Assembler::disp_at(Label* L) {
+ return Displacement(long_at(L->pos()));
+}
+
+
+void Assembler::disp_at_put(Label* L, Displacement disp) {
+ long_at_put(L->pos(), disp.data());
+}
+
+
+void Assembler::emit_disp(Label* L, Displacement::Type type) {
+ Displacement disp(L, type);
+ L->link_to(pc_offset());
+ emit(static_cast<int>(disp.data()));
+}
+
+
+void Assembler::emit_near_disp(Label* L) {
+ byte disp = 0x00;
+ if (L->is_near_linked()) {
+ int offset = L->near_link_pos() - pc_offset();
+ ASSERT(is_int8(offset));
+ disp = static_cast<byte>(offset & 0xFF);
+ }
+ L->link_to(pc_offset(), Label::kNear);
+ *pc_++ = disp;
+}
+
+
+void Operand::set_modrm(int mod, Register rm) {
+ ASSERT((mod & -4) == 0);
+ buf_[0] = mod << 6 | rm.code();
+ len_ = 1;
+}
+
+
+void Operand::set_sib(ScaleFactor scale, Register index, Register base) {
+ ASSERT(len_ == 1);
+ ASSERT((scale & -4) == 0);
+ // Use SIB with no index register only for base esp.
+ ASSERT(!index.is(esp) || base.is(esp));
+ buf_[1] = scale << 6 | index.code() << 3 | base.code();
+ len_ = 2;
+}
+
+
+void Operand::set_disp8(int8_t disp) {
+ ASSERT(len_ == 1 || len_ == 2);
+ *reinterpret_cast<int8_t*>(&buf_[len_++]) = disp;
+}
+
+
+void Operand::set_dispr(int32_t disp, RelocInfo::Mode rmode) {
+ ASSERT(len_ == 1 || len_ == 2);
+ int32_t* p = reinterpret_cast<int32_t*>(&buf_[len_]);
+ *p = disp;
+ len_ += sizeof(int32_t);
+ rmode_ = rmode;
+}
+
+Operand::Operand(Register reg) {
+ // reg
+ set_modrm(3, reg);
+}
+
+
+Operand::Operand(int32_t disp, RelocInfo::Mode rmode) {
+ // [disp/r]
+ set_modrm(0, ebp);
+ set_dispr(disp, rmode);
+}
+
+} } // namespace v8::internal
+
+#endif // V8_X87_ASSEMBLER_X87_INL_H_
--- /dev/null
+// Copyright (c) 1994-2006 Sun Microsystems Inc.
+// All Rights Reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// - Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// - Redistribution in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the
+// distribution.
+//
+// - Neither the name of Sun Microsystems or the names of contributors may
+// be used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+// OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// The original source code covered by the above license above has been modified
+// significantly by Google Inc.
+// Copyright 2012 the V8 project authors. All rights reserved.
+
+#include "v8.h"
+
+#if V8_TARGET_ARCH_X87
+
+#include "disassembler.h"
+#include "macro-assembler.h"
+#include "serialize.h"
+
+namespace v8 {
+namespace internal {
+
+// -----------------------------------------------------------------------------
+// Implementation of CpuFeatures
+
+void CpuFeatures::ProbeImpl(bool cross_compile) {
+ CPU cpu;
+ // SAHF must be available in compat/legacy mode.
+ CHECK(cpu.has_sahf());
+ supported_ |= 1u << SAHF;
+ supported_ |= OS::CpuFeaturesImpliedByPlatform();
+
+ // Only use statically determined features for cross compile (snapshot).
+ if (cross_compile) return;
+}
+
+
+void CpuFeatures::PrintTarget() { }
+void CpuFeatures::PrintFeatures() { }
+
+
+// -----------------------------------------------------------------------------
+// Implementation of Displacement
+
+void Displacement::init(Label* L, Type type) {
+ ASSERT(!L->is_bound());
+ int next = 0;
+ if (L->is_linked()) {
+ next = L->pos();
+ ASSERT(next > 0); // Displacements must be at positions > 0
+ }
+ // Ensure that we _never_ overflow the next field.
+ ASSERT(NextField::is_valid(Assembler::kMaximalBufferSize));
+ data_ = NextField::encode(next) | TypeField::encode(type);
+}
+
+
+// -----------------------------------------------------------------------------
+// Implementation of RelocInfo
+
+
+const int RelocInfo::kApplyMask =
+ RelocInfo::kCodeTargetMask | 1 << RelocInfo::RUNTIME_ENTRY |
+ 1 << RelocInfo::JS_RETURN | 1 << RelocInfo::INTERNAL_REFERENCE |
+ 1 << RelocInfo::DEBUG_BREAK_SLOT | 1 << RelocInfo::CODE_AGE_SEQUENCE;
+
+
+bool RelocInfo::IsCodedSpecially() {
+ // The deserializer needs to know whether a pointer is specially coded. Being
+ // specially coded on IA32 means that it is a relative address, as used by
+ // branch instructions. These are also the ones that need changing when a
+ // code object moves.
+ return (1 << rmode_) & kApplyMask;
+}
+
+
+bool RelocInfo::IsInConstantPool() {
+ return false;
+}
+
+
+void RelocInfo::PatchCode(byte* instructions, int instruction_count) {
+ // Patch the code at the current address with the supplied instructions.
+ for (int i = 0; i < instruction_count; i++) {
+ *(pc_ + i) = *(instructions + i);
+ }
+
+ // Indicate that code has changed.
+ CPU::FlushICache(pc_, instruction_count);
+}
+
+
+// Patch the code at the current PC with a call to the target address.
+// Additional guard int3 instructions can be added if required.
+void RelocInfo::PatchCodeWithCall(Address target, int guard_bytes) {
+ // Call instruction takes up 5 bytes and int3 takes up one byte.
+ static const int kCallCodeSize = 5;
+ int code_size = kCallCodeSize + guard_bytes;
+
+ // Create a code patcher.
+ CodePatcher patcher(pc_, code_size);
+
+ // Add a label for checking the size of the code used for returning.
+#ifdef DEBUG
+ Label check_codesize;
+ patcher.masm()->bind(&check_codesize);
+#endif
+
+ // Patch the code.
+ patcher.masm()->call(target, RelocInfo::NONE32);
+
+ // Check that the size of the code generated is as expected.
+ ASSERT_EQ(kCallCodeSize,
+ patcher.masm()->SizeOfCodeGeneratedSince(&check_codesize));
+
+ // Add the requested number of int3 instructions after the call.
+ ASSERT_GE(guard_bytes, 0);
+ for (int i = 0; i < guard_bytes; i++) {
+ patcher.masm()->int3();
+ }
+}
+
+
+// -----------------------------------------------------------------------------
+// Implementation of Operand
+
+Operand::Operand(Register base, int32_t disp, RelocInfo::Mode rmode) {
+ // [base + disp/r]
+ if (disp == 0 && RelocInfo::IsNone(rmode) && !base.is(ebp)) {
+ // [base]
+ set_modrm(0, base);
+ if (base.is(esp)) set_sib(times_1, esp, base);
+ } else if (is_int8(disp) && RelocInfo::IsNone(rmode)) {
+ // [base + disp8]
+ set_modrm(1, base);
+ if (base.is(esp)) set_sib(times_1, esp, base);
+ set_disp8(disp);
+ } else {
+ // [base + disp/r]
+ set_modrm(2, base);
+ if (base.is(esp)) set_sib(times_1, esp, base);
+ set_dispr(disp, rmode);
+ }
+}
+
+
+Operand::Operand(Register base,
+ Register index,
+ ScaleFactor scale,
+ int32_t disp,
+ RelocInfo::Mode rmode) {
+ ASSERT(!index.is(esp)); // illegal addressing mode
+ // [base + index*scale + disp/r]
+ if (disp == 0 && RelocInfo::IsNone(rmode) && !base.is(ebp)) {
+ // [base + index*scale]
+ set_modrm(0, esp);
+ set_sib(scale, index, base);
+ } else if (is_int8(disp) && RelocInfo::IsNone(rmode)) {
+ // [base + index*scale + disp8]
+ set_modrm(1, esp);
+ set_sib(scale, index, base);
+ set_disp8(disp);
+ } else {
+ // [base + index*scale + disp/r]
+ set_modrm(2, esp);
+ set_sib(scale, index, base);
+ set_dispr(disp, rmode);
+ }
+}
+
+
+Operand::Operand(Register index,
+ ScaleFactor scale,
+ int32_t disp,
+ RelocInfo::Mode rmode) {
+ ASSERT(!index.is(esp)); // illegal addressing mode
+ // [index*scale + disp/r]
+ set_modrm(0, esp);
+ set_sib(scale, index, ebp);
+ set_dispr(disp, rmode);
+}
+
+
+bool Operand::is_reg(Register reg) const {
+ return ((buf_[0] & 0xF8) == 0xC0) // addressing mode is register only.
+ && ((buf_[0] & 0x07) == reg.code()); // register codes match.
+}
+
+
+bool Operand::is_reg_only() const {
+ return (buf_[0] & 0xF8) == 0xC0; // Addressing mode is register only.
+}
+
+
+Register Operand::reg() const {
+ ASSERT(is_reg_only());
+ return Register::from_code(buf_[0] & 0x07);
+}
+
+
+// -----------------------------------------------------------------------------
+// Implementation of Assembler.
+
+// Emit a single byte. Must always be inlined.
+#define EMIT(x) \
+ *pc_++ = (x)
+
+
+#ifdef GENERATED_CODE_COVERAGE
+static void InitCoverageLog();
+#endif
+
+Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
+ : AssemblerBase(isolate, buffer, buffer_size),
+ positions_recorder_(this) {
+ // Clear the buffer in debug mode unless it was provided by the
+ // caller in which case we can't be sure it's okay to overwrite
+ // existing code in it; see CodePatcher::CodePatcher(...).
+#ifdef DEBUG
+ if (own_buffer_) {
+ memset(buffer_, 0xCC, buffer_size_); // int3
+ }
+#endif
+
+ reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_);
+
+#ifdef GENERATED_CODE_COVERAGE
+ InitCoverageLog();
+#endif
+}
+
+
+void Assembler::GetCode(CodeDesc* desc) {
+ // Finalize code (at this point overflow() may be true, but the gap ensures
+ // that we are still not overlapping instructions and relocation info).
+ ASSERT(pc_ <= reloc_info_writer.pos()); // No overlap.
+ // Set up code descriptor.
+ desc->buffer = buffer_;
+ desc->buffer_size = buffer_size_;
+ desc->instr_size = pc_offset();
+ desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
+ desc->origin = this;
+}
+
+
+void Assembler::Align(int m) {
+ ASSERT(IsPowerOf2(m));
+ int mask = m - 1;
+ int addr = pc_offset();
+ Nop((m - (addr & mask)) & mask);
+}
+
+
+bool Assembler::IsNop(Address addr) {
+ Address a = addr;
+ while (*a == 0x66) a++;
+ if (*a == 0x90) return true;
+ if (a[0] == 0xf && a[1] == 0x1f) return true;
+ return false;
+}
+
+
+void Assembler::Nop(int bytes) {
+ EnsureSpace ensure_space(this);
+
+ // Older CPUs that do not support SSE2 may not support multibyte NOP
+ // instructions.
+ for (; bytes > 0; bytes--) {
+ EMIT(0x90);
+ }
+ return;
+}
+
+
+void Assembler::CodeTargetAlign() {
+ Align(16); // Preferred alignment of jump targets on ia32.
+}
+
+
+void Assembler::cpuid() {
+ EnsureSpace ensure_space(this);
+ EMIT(0x0F);
+ EMIT(0xA2);
+}
+
+
+void Assembler::pushad() {
+ EnsureSpace ensure_space(this);
+ EMIT(0x60);
+}
+
+
+void Assembler::popad() {
+ EnsureSpace ensure_space(this);
+ EMIT(0x61);
+}
+
+
+void Assembler::pushfd() {
+ EnsureSpace ensure_space(this);
+ EMIT(0x9C);
+}
+
+
+void Assembler::popfd() {
+ EnsureSpace ensure_space(this);
+ EMIT(0x9D);
+}
+
+
+void Assembler::push(const Immediate& x) {
+ EnsureSpace ensure_space(this);
+ if (x.is_int8()) {
+ EMIT(0x6a);
+ EMIT(x.x_);
+ } else {
+ EMIT(0x68);
+ emit(x);
+ }
+}
+
+
+void Assembler::push_imm32(int32_t imm32) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x68);
+ emit(imm32);
+}
+
+
+void Assembler::push(Register src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x50 | src.code());
+}
+
+
+void Assembler::push(const Operand& src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0xFF);
+ emit_operand(esi, src);
+}
+
+
+void Assembler::pop(Register dst) {
+ ASSERT(reloc_info_writer.last_pc() != NULL);
+ EnsureSpace ensure_space(this);
+ EMIT(0x58 | dst.code());
+}
+
+
+void Assembler::pop(const Operand& dst) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x8F);
+ emit_operand(eax, dst);
+}
+
+
+void Assembler::enter(const Immediate& size) {
+ EnsureSpace ensure_space(this);
+ EMIT(0xC8);
+ emit_w(size);
+ EMIT(0);
+}
+
+
+void Assembler::leave() {
+ EnsureSpace ensure_space(this);
+ EMIT(0xC9);
+}
+
+
+void Assembler::mov_b(Register dst, const Operand& src) {
+ CHECK(dst.is_byte_register());
+ EnsureSpace ensure_space(this);
+ EMIT(0x8A);
+ emit_operand(dst, src);
+}
+
+
+void Assembler::mov_b(const Operand& dst, int8_t imm8) {
+ EnsureSpace ensure_space(this);
+ EMIT(0xC6);
+ emit_operand(eax, dst);
+ EMIT(imm8);
+}
+
+
+void Assembler::mov_b(const Operand& dst, Register src) {
+ CHECK(src.is_byte_register());
+ EnsureSpace ensure_space(this);
+ EMIT(0x88);
+ emit_operand(src, dst);
+}
+
+
+void Assembler::mov_w(Register dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x66);
+ EMIT(0x8B);
+ emit_operand(dst, src);
+}
+
+
+void Assembler::mov_w(const Operand& dst, Register src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x66);
+ EMIT(0x89);
+ emit_operand(src, dst);
+}
+
+
+void Assembler::mov_w(const Operand& dst, int16_t imm16) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x66);
+ EMIT(0xC7);
+ emit_operand(eax, dst);
+ EMIT(static_cast<int8_t>(imm16 & 0xff));
+ EMIT(static_cast<int8_t>(imm16 >> 8));
+}
+
+
+void Assembler::mov(Register dst, int32_t imm32) {
+ EnsureSpace ensure_space(this);
+ EMIT(0xB8 | dst.code());
+ emit(imm32);
+}
+
+
+void Assembler::mov(Register dst, const Immediate& x) {
+ EnsureSpace ensure_space(this);
+ EMIT(0xB8 | dst.code());
+ emit(x);
+}
+
+
+void Assembler::mov(Register dst, Handle<Object> handle) {
+ EnsureSpace ensure_space(this);
+ EMIT(0xB8 | dst.code());
+ emit(handle);
+}
+
+
+void Assembler::mov(Register dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x8B);
+ emit_operand(dst, src);
+}
+
+
+void Assembler::mov(Register dst, Register src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x89);
+ EMIT(0xC0 | src.code() << 3 | dst.code());
+}
+
+
+void Assembler::mov(const Operand& dst, const Immediate& x) {
+ EnsureSpace ensure_space(this);
+ EMIT(0xC7);
+ emit_operand(eax, dst);
+ emit(x);
+}
+
+
+void Assembler::mov(const Operand& dst, Handle<Object> handle) {
+ EnsureSpace ensure_space(this);
+ EMIT(0xC7);
+ emit_operand(eax, dst);
+ emit(handle);
+}
+
+
+void Assembler::mov(const Operand& dst, Register src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x89);
+ emit_operand(src, dst);
+}
+
+
+void Assembler::movsx_b(Register dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x0F);
+ EMIT(0xBE);
+ emit_operand(dst, src);
+}
+
+
+void Assembler::movsx_w(Register dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x0F);
+ EMIT(0xBF);
+ emit_operand(dst, src);
+}
+
+
+void Assembler::movzx_b(Register dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x0F);
+ EMIT(0xB6);
+ emit_operand(dst, src);
+}
+
+
+void Assembler::movzx_w(Register dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x0F);
+ EMIT(0xB7);
+ emit_operand(dst, src);
+}
+
+
+void Assembler::cld() {
+ EnsureSpace ensure_space(this);
+ EMIT(0xFC);
+}
+
+
+void Assembler::rep_movs() {
+ EnsureSpace ensure_space(this);
+ EMIT(0xF3);
+ EMIT(0xA5);
+}
+
+
+void Assembler::rep_stos() {
+ EnsureSpace ensure_space(this);
+ EMIT(0xF3);
+ EMIT(0xAB);
+}
+
+
+void Assembler::stos() {
+ EnsureSpace ensure_space(this);
+ EMIT(0xAB);
+}
+
+
+void Assembler::xchg(Register dst, Register src) {
+ EnsureSpace ensure_space(this);
+ if (src.is(eax) || dst.is(eax)) { // Single-byte encoding.
+ EMIT(0x90 | (src.is(eax) ? dst.code() : src.code()));
+ } else {
+ EMIT(0x87);
+ EMIT(0xC0 | src.code() << 3 | dst.code());
+ }
+}
+
+
+void Assembler::adc(Register dst, int32_t imm32) {
+ EnsureSpace ensure_space(this);
+ emit_arith(2, Operand(dst), Immediate(imm32));
+}
+
+
+void Assembler::adc(Register dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x13);
+ emit_operand(dst, src);
+}
+
+
+void Assembler::add(Register dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x03);
+ emit_operand(dst, src);
+}
+
+
+void Assembler::add(const Operand& dst, Register src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x01);
+ emit_operand(src, dst);
+}
+
+
+void Assembler::add(const Operand& dst, const Immediate& x) {
+ ASSERT(reloc_info_writer.last_pc() != NULL);
+ EnsureSpace ensure_space(this);
+ emit_arith(0, dst, x);
+}
+
+
+void Assembler::and_(Register dst, int32_t imm32) {
+ and_(dst, Immediate(imm32));
+}
+
+
+void Assembler::and_(Register dst, const Immediate& x) {
+ EnsureSpace ensure_space(this);
+ emit_arith(4, Operand(dst), x);
+}
+
+
+void Assembler::and_(Register dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x23);
+ emit_operand(dst, src);
+}
+
+
+void Assembler::and_(const Operand& dst, const Immediate& x) {
+ EnsureSpace ensure_space(this);
+ emit_arith(4, dst, x);
+}
+
+
+void Assembler::and_(const Operand& dst, Register src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x21);
+ emit_operand(src, dst);
+}
+
+
+void Assembler::cmpb(const Operand& op, int8_t imm8) {
+ EnsureSpace ensure_space(this);
+ if (op.is_reg(eax)) {
+ EMIT(0x3C);
+ } else {
+ EMIT(0x80);
+ emit_operand(edi, op); // edi == 7
+ }
+ EMIT(imm8);
+}
+
+
+void Assembler::cmpb(const Operand& op, Register reg) {
+ CHECK(reg.is_byte_register());
+ EnsureSpace ensure_space(this);
+ EMIT(0x38);
+ emit_operand(reg, op);
+}
+
+
+void Assembler::cmpb(Register reg, const Operand& op) {
+ CHECK(reg.is_byte_register());
+ EnsureSpace ensure_space(this);
+ EMIT(0x3A);
+ emit_operand(reg, op);
+}
+
+
+void Assembler::cmpw(const Operand& op, Immediate imm16) {
+ ASSERT(imm16.is_int16());
+ EnsureSpace ensure_space(this);
+ EMIT(0x66);
+ EMIT(0x81);
+ emit_operand(edi, op);
+ emit_w(imm16);
+}
+
+
+void Assembler::cmp(Register reg, int32_t imm32) {
+ EnsureSpace ensure_space(this);
+ emit_arith(7, Operand(reg), Immediate(imm32));
+}
+
+
+void Assembler::cmp(Register reg, Handle<Object> handle) {
+ EnsureSpace ensure_space(this);
+ emit_arith(7, Operand(reg), Immediate(handle));
+}
+
+
+void Assembler::cmp(Register reg, const Operand& op) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x3B);
+ emit_operand(reg, op);
+}
+
+
+void Assembler::cmp(const Operand& op, const Immediate& imm) {
+ EnsureSpace ensure_space(this);
+ emit_arith(7, op, imm);
+}
+
+
+void Assembler::cmp(const Operand& op, Handle<Object> handle) {
+ EnsureSpace ensure_space(this);
+ emit_arith(7, op, Immediate(handle));
+}
+
+
+void Assembler::cmpb_al(const Operand& op) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x38); // CMP r/m8, r8
+ emit_operand(eax, op); // eax has same code as register al.
+}
+
+
+void Assembler::cmpw_ax(const Operand& op) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x66);
+ EMIT(0x39); // CMP r/m16, r16
+ emit_operand(eax, op); // eax has same code as register ax.
+}
+
+
+void Assembler::dec_b(Register dst) {
+ CHECK(dst.is_byte_register());
+ EnsureSpace ensure_space(this);
+ EMIT(0xFE);
+ EMIT(0xC8 | dst.code());
+}
+
+
+void Assembler::dec_b(const Operand& dst) {
+ EnsureSpace ensure_space(this);
+ EMIT(0xFE);
+ emit_operand(ecx, dst);
+}
+
+
+void Assembler::dec(Register dst) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x48 | dst.code());
+}
+
+
+void Assembler::dec(const Operand& dst) {
+ EnsureSpace ensure_space(this);
+ EMIT(0xFF);
+ emit_operand(ecx, dst);
+}
+
+
+void Assembler::cdq() {
+ EnsureSpace ensure_space(this);
+ EMIT(0x99);
+}
+
+
+void Assembler::idiv(Register src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0xF7);
+ EMIT(0xF8 | src.code());
+}
+
+
+void Assembler::imul(Register reg) {
+ EnsureSpace ensure_space(this);
+ EMIT(0xF7);
+ EMIT(0xE8 | reg.code());
+}
+
+
+void Assembler::imul(Register dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x0F);
+ EMIT(0xAF);
+ emit_operand(dst, src);
+}
+
+
+void Assembler::imul(Register dst, Register src, int32_t imm32) {
+ EnsureSpace ensure_space(this);
+ if (is_int8(imm32)) {
+ EMIT(0x6B);
+ EMIT(0xC0 | dst.code() << 3 | src.code());
+ EMIT(imm32);
+ } else {
+ EMIT(0x69);
+ EMIT(0xC0 | dst.code() << 3 | src.code());
+ emit(imm32);
+ }
+}
+
+
+void Assembler::inc(Register dst) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x40 | dst.code());
+}
+
+
+void Assembler::inc(const Operand& dst) {
+ EnsureSpace ensure_space(this);
+ EMIT(0xFF);
+ emit_operand(eax, dst);
+}
+
+
+void Assembler::lea(Register dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x8D);
+ emit_operand(dst, src);
+}
+
+
+void Assembler::mul(Register src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0xF7);
+ EMIT(0xE0 | src.code());
+}
+
+
+void Assembler::neg(Register dst) {
+ EnsureSpace ensure_space(this);
+ EMIT(0xF7);
+ EMIT(0xD8 | dst.code());
+}
+
+
+void Assembler::not_(Register dst) {
+ EnsureSpace ensure_space(this);
+ EMIT(0xF7);
+ EMIT(0xD0 | dst.code());
+}
+
+
+void Assembler::or_(Register dst, int32_t imm32) {
+ EnsureSpace ensure_space(this);
+ emit_arith(1, Operand(dst), Immediate(imm32));
+}
+
+
+void Assembler::or_(Register dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x0B);
+ emit_operand(dst, src);
+}
+
+
+void Assembler::or_(const Operand& dst, const Immediate& x) {
+ EnsureSpace ensure_space(this);
+ emit_arith(1, dst, x);
+}
+
+
+void Assembler::or_(const Operand& dst, Register src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x09);
+ emit_operand(src, dst);
+}
+
+
+void Assembler::rcl(Register dst, uint8_t imm8) {
+ EnsureSpace ensure_space(this);
+ ASSERT(is_uint5(imm8)); // illegal shift count
+ if (imm8 == 1) {
+ EMIT(0xD1);
+ EMIT(0xD0 | dst.code());
+ } else {
+ EMIT(0xC1);
+ EMIT(0xD0 | dst.code());
+ EMIT(imm8);
+ }
+}
+
+
+void Assembler::rcr(Register dst, uint8_t imm8) {
+ EnsureSpace ensure_space(this);
+ ASSERT(is_uint5(imm8)); // illegal shift count
+ if (imm8 == 1) {
+ EMIT(0xD1);
+ EMIT(0xD8 | dst.code());
+ } else {
+ EMIT(0xC1);
+ EMIT(0xD8 | dst.code());
+ EMIT(imm8);
+ }
+}
+
+
+void Assembler::ror(Register dst, uint8_t imm8) {
+ EnsureSpace ensure_space(this);
+ ASSERT(is_uint5(imm8)); // illegal shift count
+ if (imm8 == 1) {
+ EMIT(0xD1);
+ EMIT(0xC8 | dst.code());
+ } else {
+ EMIT(0xC1);
+ EMIT(0xC8 | dst.code());
+ EMIT(imm8);
+ }
+}
+
+
+void Assembler::ror_cl(Register dst) {
+ EnsureSpace ensure_space(this);
+ EMIT(0xD3);
+ EMIT(0xC8 | dst.code());
+}
+
+
+void Assembler::sar(Register dst, uint8_t imm8) {
+ EnsureSpace ensure_space(this);
+ ASSERT(is_uint5(imm8)); // illegal shift count
+ if (imm8 == 1) {
+ EMIT(0xD1);
+ EMIT(0xF8 | dst.code());
+ } else {
+ EMIT(0xC1);
+ EMIT(0xF8 | dst.code());
+ EMIT(imm8);
+ }
+}
+
+
+void Assembler::sar_cl(Register dst) {
+ EnsureSpace ensure_space(this);
+ EMIT(0xD3);
+ EMIT(0xF8 | dst.code());
+}
+
+
+void Assembler::sbb(Register dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x1B);
+ emit_operand(dst, src);
+}
+
+
+void Assembler::shld(Register dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x0F);
+ EMIT(0xA5);
+ emit_operand(dst, src);
+}
+
+
+void Assembler::shl(Register dst, uint8_t imm8) {
+ EnsureSpace ensure_space(this);
+ ASSERT(is_uint5(imm8)); // illegal shift count
+ if (imm8 == 1) {
+ EMIT(0xD1);
+ EMIT(0xE0 | dst.code());
+ } else {
+ EMIT(0xC1);
+ EMIT(0xE0 | dst.code());
+ EMIT(imm8);
+ }
+}
+
+
+void Assembler::shl_cl(Register dst) {
+ EnsureSpace ensure_space(this);
+ EMIT(0xD3);
+ EMIT(0xE0 | dst.code());
+}
+
+
+void Assembler::shrd(Register dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x0F);
+ EMIT(0xAD);
+ emit_operand(dst, src);
+}
+
+
+void Assembler::shr(Register dst, uint8_t imm8) {
+ EnsureSpace ensure_space(this);
+ ASSERT(is_uint5(imm8)); // illegal shift count
+ if (imm8 == 1) {
+ EMIT(0xD1);
+ EMIT(0xE8 | dst.code());
+ } else {
+ EMIT(0xC1);
+ EMIT(0xE8 | dst.code());
+ EMIT(imm8);
+ }
+}
+
+
+void Assembler::shr_cl(Register dst) {
+ EnsureSpace ensure_space(this);
+ EMIT(0xD3);
+ EMIT(0xE8 | dst.code());
+}
+
+
+void Assembler::sub(const Operand& dst, const Immediate& x) {
+ EnsureSpace ensure_space(this);
+ emit_arith(5, dst, x);
+}
+
+
+void Assembler::sub(Register dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x2B);
+ emit_operand(dst, src);
+}
+
+
+void Assembler::sub(const Operand& dst, Register src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x29);
+ emit_operand(src, dst);
+}
+
+
+void Assembler::test(Register reg, const Immediate& imm) {
+ if (RelocInfo::IsNone(imm.rmode_) && is_uint8(imm.x_)) {
+ test_b(reg, imm.x_);
+ return;
+ }
+
+ EnsureSpace ensure_space(this);
+ // This is not using emit_arith because test doesn't support
+ // sign-extension of 8-bit operands.
+ if (reg.is(eax)) {
+ EMIT(0xA9);
+ } else {
+ EMIT(0xF7);
+ EMIT(0xC0 | reg.code());
+ }
+ emit(imm);
+}
+
+
+void Assembler::test(Register reg, const Operand& op) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x85);
+ emit_operand(reg, op);
+}
+
+
+void Assembler::test_b(Register reg, const Operand& op) {
+ CHECK(reg.is_byte_register());
+ EnsureSpace ensure_space(this);
+ EMIT(0x84);
+ emit_operand(reg, op);
+}
+
+
+void Assembler::test(const Operand& op, const Immediate& imm) {
+ if (op.is_reg_only()) {
+ test(op.reg(), imm);
+ return;
+ }
+ if (RelocInfo::IsNone(imm.rmode_) && is_uint8(imm.x_)) {
+ return test_b(op, imm.x_);
+ }
+ EnsureSpace ensure_space(this);
+ EMIT(0xF7);
+ emit_operand(eax, op);
+ emit(imm);
+}
+
+
+void Assembler::test_b(Register reg, uint8_t imm8) {
+ EnsureSpace ensure_space(this);
+ // Only use test against byte for registers that have a byte
+ // variant: eax, ebx, ecx, and edx.
+ if (reg.is(eax)) {
+ EMIT(0xA8);
+ EMIT(imm8);
+ } else if (reg.is_byte_register()) {
+ emit_arith_b(0xF6, 0xC0, reg, imm8);
+ } else {
+ EMIT(0xF7);
+ EMIT(0xC0 | reg.code());
+ emit(imm8);
+ }
+}
+
+
+void Assembler::test_b(const Operand& op, uint8_t imm8) {
+ if (op.is_reg_only()) {
+ test_b(op.reg(), imm8);
+ return;
+ }
+ EnsureSpace ensure_space(this);
+ EMIT(0xF6);
+ emit_operand(eax, op);
+ EMIT(imm8);
+}
+
+
+void Assembler::xor_(Register dst, int32_t imm32) {
+ EnsureSpace ensure_space(this);
+ emit_arith(6, Operand(dst), Immediate(imm32));
+}
+
+
+void Assembler::xor_(Register dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x33);
+ emit_operand(dst, src);
+}
+
+
+void Assembler::xor_(const Operand& dst, Register src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x31);
+ emit_operand(src, dst);
+}
+
+
+void Assembler::xor_(const Operand& dst, const Immediate& x) {
+ EnsureSpace ensure_space(this);
+ emit_arith(6, dst, x);
+}
+
+
+void Assembler::bt(const Operand& dst, Register src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x0F);
+ EMIT(0xA3);
+ emit_operand(src, dst);
+}
+
+
+void Assembler::bts(const Operand& dst, Register src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x0F);
+ EMIT(0xAB);
+ emit_operand(src, dst);
+}
+
+
+void Assembler::bsr(Register dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x0F);
+ EMIT(0xBD);
+ emit_operand(dst, src);
+}
+
+
+void Assembler::hlt() {
+ EnsureSpace ensure_space(this);
+ EMIT(0xF4);
+}
+
+
+void Assembler::int3() {
+ EnsureSpace ensure_space(this);
+ EMIT(0xCC);
+}
+
+
+void Assembler::nop() {
+ EnsureSpace ensure_space(this);
+ EMIT(0x90);
+}
+
+
+void Assembler::ret(int imm16) {
+ EnsureSpace ensure_space(this);
+ ASSERT(is_uint16(imm16));
+ if (imm16 == 0) {
+ EMIT(0xC3);
+ } else {
+ EMIT(0xC2);
+ EMIT(imm16 & 0xFF);
+ EMIT((imm16 >> 8) & 0xFF);
+ }
+}
+
+
+// Labels refer to positions in the (to be) generated code.
+// There are bound, linked, and unused labels.
+//
+// Bound labels refer to known positions in the already
+// generated code. pos() is the position the label refers to.
+//
+// Linked labels refer to unknown positions in the code
+// to be generated; pos() is the position of the 32bit
+// Displacement of the last instruction using the label.
+
+
+void Assembler::print(Label* L) {
+ if (L->is_unused()) {
+ PrintF("unused label\n");
+ } else if (L->is_bound()) {
+ PrintF("bound label to %d\n", L->pos());
+ } else if (L->is_linked()) {
+ Label l = *L;
+ PrintF("unbound label");
+ while (l.is_linked()) {
+ Displacement disp = disp_at(&l);
+ PrintF("@ %d ", l.pos());
+ disp.print();
+ PrintF("\n");
+ disp.next(&l);
+ }
+ } else {
+ PrintF("label in inconsistent state (pos = %d)\n", L->pos_);
+ }
+}
+
+
+void Assembler::bind_to(Label* L, int pos) {
+ EnsureSpace ensure_space(this);
+ ASSERT(0 <= pos && pos <= pc_offset()); // must have a valid binding position
+ while (L->is_linked()) {
+ Displacement disp = disp_at(L);
+ int fixup_pos = L->pos();
+ if (disp.type() == Displacement::CODE_RELATIVE) {
+ // Relative to Code* heap object pointer.
+ long_at_put(fixup_pos, pos + Code::kHeaderSize - kHeapObjectTag);
+ } else {
+ if (disp.type() == Displacement::UNCONDITIONAL_JUMP) {
+ ASSERT(byte_at(fixup_pos - 1) == 0xE9); // jmp expected
+ }
+ // Relative address, relative to point after address.
+ int imm32 = pos - (fixup_pos + sizeof(int32_t));
+ long_at_put(fixup_pos, imm32);
+ }
+ disp.next(L);
+ }
+ while (L->is_near_linked()) {
+ int fixup_pos = L->near_link_pos();
+ int offset_to_next =
+ static_cast<int>(*reinterpret_cast<int8_t*>(addr_at(fixup_pos)));
+ ASSERT(offset_to_next <= 0);
+ // Relative address, relative to point after address.
+ int disp = pos - fixup_pos - sizeof(int8_t);
+ CHECK(0 <= disp && disp <= 127);
+ set_byte_at(fixup_pos, disp);
+ if (offset_to_next < 0) {
+ L->link_to(fixup_pos + offset_to_next, Label::kNear);
+ } else {
+ L->UnuseNear();
+ }
+ }
+ L->bind_to(pos);
+}
+
+
+void Assembler::bind(Label* L) {
+ EnsureSpace ensure_space(this);
+ ASSERT(!L->is_bound()); // label can only be bound once
+ bind_to(L, pc_offset());
+}
+
+
+void Assembler::call(Label* L) {
+ positions_recorder()->WriteRecordedPositions();
+ EnsureSpace ensure_space(this);
+ if (L->is_bound()) {
+ const int long_size = 5;
+ int offs = L->pos() - pc_offset();
+ ASSERT(offs <= 0);
+ // 1110 1000 #32-bit disp.
+ EMIT(0xE8);
+ emit(offs - long_size);
+ } else {
+ // 1110 1000 #32-bit disp.
+ EMIT(0xE8);
+ emit_disp(L, Displacement::OTHER);
+ }
+}
+
+
+void Assembler::call(byte* entry, RelocInfo::Mode rmode) {
+ positions_recorder()->WriteRecordedPositions();
+ EnsureSpace ensure_space(this);
+ ASSERT(!RelocInfo::IsCodeTarget(rmode));
+ EMIT(0xE8);
+ if (RelocInfo::IsRuntimeEntry(rmode)) {
+ emit(reinterpret_cast<uint32_t>(entry), rmode);
+ } else {
+ emit(entry - (pc_ + sizeof(int32_t)), rmode);
+ }
+}
+
+
+int Assembler::CallSize(const Operand& adr) {
+ // Call size is 1 (opcode) + adr.len_ (operand).
+ return 1 + adr.len_;
+}
+
+
+void Assembler::call(const Operand& adr) {
+ positions_recorder()->WriteRecordedPositions();
+ EnsureSpace ensure_space(this);
+ EMIT(0xFF);
+ emit_operand(edx, adr);
+}
+
+
+int Assembler::CallSize(Handle<Code> code, RelocInfo::Mode rmode) {
+ return 1 /* EMIT */ + sizeof(uint32_t) /* emit */;
+}
+
+
+void Assembler::call(Handle<Code> code,
+ RelocInfo::Mode rmode,
+ TypeFeedbackId ast_id) {
+ positions_recorder()->WriteRecordedPositions();
+ EnsureSpace ensure_space(this);
+ ASSERT(RelocInfo::IsCodeTarget(rmode)
+ || rmode == RelocInfo::CODE_AGE_SEQUENCE);
+ EMIT(0xE8);
+ emit(code, rmode, ast_id);
+}
+
+
+void Assembler::jmp(Label* L, Label::Distance distance) {
+ EnsureSpace ensure_space(this);
+ if (L->is_bound()) {
+ const int short_size = 2;
+ const int long_size = 5;
+ int offs = L->pos() - pc_offset();
+ ASSERT(offs <= 0);
+ if (is_int8(offs - short_size)) {
+ // 1110 1011 #8-bit disp.
+ EMIT(0xEB);
+ EMIT((offs - short_size) & 0xFF);
+ } else {
+ // 1110 1001 #32-bit disp.
+ EMIT(0xE9);
+ emit(offs - long_size);
+ }
+ } else if (distance == Label::kNear) {
+ EMIT(0xEB);
+ emit_near_disp(L);
+ } else {
+ // 1110 1001 #32-bit disp.
+ EMIT(0xE9);
+ emit_disp(L, Displacement::UNCONDITIONAL_JUMP);
+ }
+}
+
+
+void Assembler::jmp(byte* entry, RelocInfo::Mode rmode) {
+ EnsureSpace ensure_space(this);
+ ASSERT(!RelocInfo::IsCodeTarget(rmode));
+ EMIT(0xE9);
+ if (RelocInfo::IsRuntimeEntry(rmode)) {
+ emit(reinterpret_cast<uint32_t>(entry), rmode);
+ } else {
+ emit(entry - (pc_ + sizeof(int32_t)), rmode);
+ }
+}
+
+
+void Assembler::jmp(const Operand& adr) {
+ EnsureSpace ensure_space(this);
+ EMIT(0xFF);
+ emit_operand(esp, adr);
+}
+
+
+void Assembler::jmp(Handle<Code> code, RelocInfo::Mode rmode) {
+ EnsureSpace ensure_space(this);
+ ASSERT(RelocInfo::IsCodeTarget(rmode));
+ EMIT(0xE9);
+ emit(code, rmode);
+}
+
+
+void Assembler::j(Condition cc, Label* L, Label::Distance distance) {
+ EnsureSpace ensure_space(this);
+ ASSERT(0 <= cc && static_cast<int>(cc) < 16);
+ if (L->is_bound()) {
+ const int short_size = 2;
+ const int long_size = 6;
+ int offs = L->pos() - pc_offset();
+ ASSERT(offs <= 0);
+ if (is_int8(offs - short_size)) {
+ // 0111 tttn #8-bit disp
+ EMIT(0x70 | cc);
+ EMIT((offs - short_size) & 0xFF);
+ } else {
+ // 0000 1111 1000 tttn #32-bit disp
+ EMIT(0x0F);
+ EMIT(0x80 | cc);
+ emit(offs - long_size);
+ }
+ } else if (distance == Label::kNear) {
+ EMIT(0x70 | cc);
+ emit_near_disp(L);
+ } else {
+ // 0000 1111 1000 tttn #32-bit disp
+ // Note: could eliminate cond. jumps to this jump if condition
+ // is the same however, seems to be rather unlikely case.
+ EMIT(0x0F);
+ EMIT(0x80 | cc);
+ emit_disp(L, Displacement::OTHER);
+ }
+}
+
+
+void Assembler::j(Condition cc, byte* entry, RelocInfo::Mode rmode) {
+ EnsureSpace ensure_space(this);
+ ASSERT((0 <= cc) && (static_cast<int>(cc) < 16));
+ // 0000 1111 1000 tttn #32-bit disp.
+ EMIT(0x0F);
+ EMIT(0x80 | cc);
+ if (RelocInfo::IsRuntimeEntry(rmode)) {
+ emit(reinterpret_cast<uint32_t>(entry), rmode);
+ } else {
+ emit(entry - (pc_ + sizeof(int32_t)), rmode);
+ }
+}
+
+
+void Assembler::j(Condition cc, Handle<Code> code) {
+ EnsureSpace ensure_space(this);
+ // 0000 1111 1000 tttn #32-bit disp
+ EMIT(0x0F);
+ EMIT(0x80 | cc);
+ emit(code, RelocInfo::CODE_TARGET);
+}
+
+
+// FPU instructions.
+
+void Assembler::fld(int i) {
+ EnsureSpace ensure_space(this);
+ emit_farith(0xD9, 0xC0, i);
+}
+
+
+void Assembler::fstp(int i) {
+ EnsureSpace ensure_space(this);
+ emit_farith(0xDD, 0xD8, i);
+}
+
+
+void Assembler::fld1() {
+ EnsureSpace ensure_space(this);
+ EMIT(0xD9);
+ EMIT(0xE8);
+}
+
+
+void Assembler::fldpi() {
+ EnsureSpace ensure_space(this);
+ EMIT(0xD9);
+ EMIT(0xEB);
+}
+
+
+void Assembler::fldz() {
+ EnsureSpace ensure_space(this);
+ EMIT(0xD9);
+ EMIT(0xEE);
+}
+
+
+void Assembler::fldln2() {
+ EnsureSpace ensure_space(this);
+ EMIT(0xD9);
+ EMIT(0xED);
+}
+
+
+void Assembler::fld_s(const Operand& adr) {
+ EnsureSpace ensure_space(this);
+ EMIT(0xD9);
+ emit_operand(eax, adr);
+}
+
+
+void Assembler::fld_d(const Operand& adr) {
+ EnsureSpace ensure_space(this);
+ EMIT(0xDD);
+ emit_operand(eax, adr);
+}
+
+
+void Assembler::fstp_s(const Operand& adr) {
+ EnsureSpace ensure_space(this);
+ EMIT(0xD9);
+ emit_operand(ebx, adr);
+}
+
+
+void Assembler::fst_s(const Operand& adr) {
+ EnsureSpace ensure_space(this);
+ EMIT(0xD9);
+ emit_operand(edx, adr);
+}
+
+
+void Assembler::fstp_d(const Operand& adr) {
+ EnsureSpace ensure_space(this);
+ EMIT(0xDD);
+ emit_operand(ebx, adr);
+}
+
+
+void Assembler::fst_d(const Operand& adr) {
+ EnsureSpace ensure_space(this);
+ EMIT(0xDD);
+ emit_operand(edx, adr);
+}
+
+
+void Assembler::fild_s(const Operand& adr) {
+ EnsureSpace ensure_space(this);
+ EMIT(0xDB);
+ emit_operand(eax, adr);
+}
+
+
+void Assembler::fild_d(const Operand& adr) {
+ EnsureSpace ensure_space(this);
+ EMIT(0xDF);
+ emit_operand(ebp, adr);
+}
+
+
+void Assembler::fistp_s(const Operand& adr) {
+ EnsureSpace ensure_space(this);
+ EMIT(0xDB);
+ emit_operand(ebx, adr);
+}
+
+
+void Assembler::fisttp_s(const Operand& adr) {
+ ASSERT(IsEnabled(SSE3));
+ EnsureSpace ensure_space(this);
+ EMIT(0xDB);
+ emit_operand(ecx, adr);
+}
+
+
+void Assembler::fisttp_d(const Operand& adr) {
+ ASSERT(IsEnabled(SSE3));
+ EnsureSpace ensure_space(this);
+ EMIT(0xDD);
+ emit_operand(ecx, adr);
+}
+
+
+void Assembler::fist_s(const Operand& adr) {
+ EnsureSpace ensure_space(this);
+ EMIT(0xDB);
+ emit_operand(edx, adr);
+}
+
+
+void Assembler::fistp_d(const Operand& adr) {
+ EnsureSpace ensure_space(this);
+ EMIT(0xDF);
+ emit_operand(edi, adr);
+}
+
+
+void Assembler::fabs() {
+ EnsureSpace ensure_space(this);
+ EMIT(0xD9);
+ EMIT(0xE1);
+}
+
+
+void Assembler::fchs() {
+ EnsureSpace ensure_space(this);
+ EMIT(0xD9);
+ EMIT(0xE0);
+}
+
+
+void Assembler::fcos() {
+ EnsureSpace ensure_space(this);
+ EMIT(0xD9);
+ EMIT(0xFF);
+}
+
+
+void Assembler::fsin() {
+ EnsureSpace ensure_space(this);
+ EMIT(0xD9);
+ EMIT(0xFE);
+}
+
+
+void Assembler::fptan() {
+ EnsureSpace ensure_space(this);
+ EMIT(0xD9);
+ EMIT(0xF2);
+}
+
+
+void Assembler::fyl2x() {
+ EnsureSpace ensure_space(this);
+ EMIT(0xD9);
+ EMIT(0xF1);
+}
+
+
+void Assembler::f2xm1() {
+ EnsureSpace ensure_space(this);
+ EMIT(0xD9);
+ EMIT(0xF0);
+}
+
+
+void Assembler::fscale() {
+ EnsureSpace ensure_space(this);
+ EMIT(0xD9);
+ EMIT(0xFD);
+}
+
+
+void Assembler::fninit() {
+ EnsureSpace ensure_space(this);
+ EMIT(0xDB);
+ EMIT(0xE3);
+}
+
+
+void Assembler::fadd(int i) {
+ EnsureSpace ensure_space(this);
+ emit_farith(0xDC, 0xC0, i);
+}
+
+
+void Assembler::fadd_i(int i) {
+ EnsureSpace ensure_space(this);
+ emit_farith(0xD8, 0xC0, i);
+}
+
+
+void Assembler::fsub(int i) {
+ EnsureSpace ensure_space(this);
+ emit_farith(0xDC, 0xE8, i);
+}
+
+
+void Assembler::fsub_i(int i) {
+ EnsureSpace ensure_space(this);
+ emit_farith(0xD8, 0xE0, i);
+}
+
+
+void Assembler::fisub_s(const Operand& adr) {
+ EnsureSpace ensure_space(this);
+ EMIT(0xDA);
+ emit_operand(esp, adr);
+}
+
+
+void Assembler::fmul_i(int i) {
+ EnsureSpace ensure_space(this);
+ emit_farith(0xD8, 0xC8, i);
+}
+
+
+void Assembler::fmul(int i) {
+ EnsureSpace ensure_space(this);
+ emit_farith(0xDC, 0xC8, i);
+}
+
+
+void Assembler::fdiv(int i) {
+ EnsureSpace ensure_space(this);
+ emit_farith(0xDC, 0xF8, i);
+}
+
+
+void Assembler::fdiv_i(int i) {
+ EnsureSpace ensure_space(this);
+ emit_farith(0xD8, 0xF0, i);
+}
+
+
+void Assembler::faddp(int i) {
+ EnsureSpace ensure_space(this);
+ emit_farith(0xDE, 0xC0, i);
+}
+
+
+void Assembler::fsubp(int i) {
+ EnsureSpace ensure_space(this);
+ emit_farith(0xDE, 0xE8, i);
+}
+
+
+void Assembler::fsubrp(int i) {
+ EnsureSpace ensure_space(this);
+ emit_farith(0xDE, 0xE0, i);
+}
+
+
+void Assembler::fmulp(int i) {
+ EnsureSpace ensure_space(this);
+ emit_farith(0xDE, 0xC8, i);
+}
+
+
+void Assembler::fdivp(int i) {
+ EnsureSpace ensure_space(this);
+ emit_farith(0xDE, 0xF8, i);
+}
+
+
+void Assembler::fprem() {
+ EnsureSpace ensure_space(this);
+ EMIT(0xD9);
+ EMIT(0xF8);
+}
+
+
+void Assembler::fprem1() {
+ EnsureSpace ensure_space(this);
+ EMIT(0xD9);
+ EMIT(0xF5);
+}
+
+
+void Assembler::fxch(int i) {
+ EnsureSpace ensure_space(this);
+ emit_farith(0xD9, 0xC8, i);
+}
+
+
+void Assembler::fincstp() {
+ EnsureSpace ensure_space(this);
+ EMIT(0xD9);
+ EMIT(0xF7);
+}
+
+
+void Assembler::ffree(int i) {
+ EnsureSpace ensure_space(this);
+ emit_farith(0xDD, 0xC0, i);
+}
+
+
+void Assembler::ftst() {
+ EnsureSpace ensure_space(this);
+ EMIT(0xD9);
+ EMIT(0xE4);
+}
+
+
+void Assembler::fucomp(int i) {
+ EnsureSpace ensure_space(this);
+ emit_farith(0xDD, 0xE8, i);
+}
+
+
+void Assembler::fucompp() {
+ EnsureSpace ensure_space(this);
+ EMIT(0xDA);
+ EMIT(0xE9);
+}
+
+
+void Assembler::fucomi(int i) {
+ EnsureSpace ensure_space(this);
+ EMIT(0xDB);
+ EMIT(0xE8 + i);
+}
+
+
+void Assembler::fucomip() {
+ EnsureSpace ensure_space(this);
+ EMIT(0xDF);
+ EMIT(0xE9);
+}
+
+
+void Assembler::fcompp() {
+ EnsureSpace ensure_space(this);
+ EMIT(0xDE);
+ EMIT(0xD9);
+}
+
+
+void Assembler::fnstsw_ax() {
+ EnsureSpace ensure_space(this);
+ EMIT(0xDF);
+ EMIT(0xE0);
+}
+
+
+void Assembler::fwait() {
+ EnsureSpace ensure_space(this);
+ EMIT(0x9B);
+}
+
+
+void Assembler::frndint() {
+ EnsureSpace ensure_space(this);
+ EMIT(0xD9);
+ EMIT(0xFC);
+}
+
+
+void Assembler::fnclex() {
+ EnsureSpace ensure_space(this);
+ EMIT(0xDB);
+ EMIT(0xE2);
+}
+
+
+void Assembler::sahf() {
+ EnsureSpace ensure_space(this);
+ EMIT(0x9E);
+}
+
+
+void Assembler::setcc(Condition cc, Register reg) {
+ ASSERT(reg.is_byte_register());
+ EnsureSpace ensure_space(this);
+ EMIT(0x0F);
+ EMIT(0x90 | cc);
+ EMIT(0xC0 | reg.code());
+}
+
+
+void Assembler::Print() {
+ Disassembler::Decode(isolate(), stdout, buffer_, pc_);
+}
+
+
+void Assembler::RecordJSReturn() {
+ positions_recorder()->WriteRecordedPositions();
+ EnsureSpace ensure_space(this);
+ RecordRelocInfo(RelocInfo::JS_RETURN);
+}
+
+
+void Assembler::RecordDebugBreakSlot() {
+ positions_recorder()->WriteRecordedPositions();
+ EnsureSpace ensure_space(this);
+ RecordRelocInfo(RelocInfo::DEBUG_BREAK_SLOT);
+}
+
+
+void Assembler::RecordComment(const char* msg, bool force) {
+ if (FLAG_code_comments || force) {
+ EnsureSpace ensure_space(this);
+ RecordRelocInfo(RelocInfo::COMMENT, reinterpret_cast<intptr_t>(msg));
+ }
+}
+
+
+void Assembler::GrowBuffer() {
+ ASSERT(buffer_overflow());
+ if (!own_buffer_) FATAL("external code buffer is too small");
+
+ // Compute new buffer size.
+ CodeDesc desc; // the new buffer
+ if (buffer_size_ < 4*KB) {
+ desc.buffer_size = 4*KB;
+ } else {
+ desc.buffer_size = 2*buffer_size_;
+ }
+ // Some internal data structures overflow for very large buffers,
+ // they must ensure that kMaximalBufferSize is not too large.
+ if ((desc.buffer_size > kMaximalBufferSize) ||
+ (desc.buffer_size > isolate()->heap()->MaxOldGenerationSize())) {
+ V8::FatalProcessOutOfMemory("Assembler::GrowBuffer");
+ }
+
+ // Set up new buffer.
+ desc.buffer = NewArray<byte>(desc.buffer_size);
+ desc.instr_size = pc_offset();
+ desc.reloc_size = (buffer_ + buffer_size_) - (reloc_info_writer.pos());
+
+ // Clear the buffer in debug mode. Use 'int3' instructions to make
+ // sure to get into problems if we ever run uninitialized code.
+#ifdef DEBUG
+ memset(desc.buffer, 0xCC, desc.buffer_size);
+#endif
+
+ // Copy the data.
+ int pc_delta = desc.buffer - buffer_;
+ int rc_delta = (desc.buffer + desc.buffer_size) - (buffer_ + buffer_size_);
+ OS::MemMove(desc.buffer, buffer_, desc.instr_size);
+ OS::MemMove(rc_delta + reloc_info_writer.pos(),
+ reloc_info_writer.pos(), desc.reloc_size);
+
+ // Switch buffers.
+ if (isolate()->assembler_spare_buffer() == NULL &&
+ buffer_size_ == kMinimalBufferSize) {
+ isolate()->set_assembler_spare_buffer(buffer_);
+ } else {
+ DeleteArray(buffer_);
+ }
+ buffer_ = desc.buffer;
+ buffer_size_ = desc.buffer_size;
+ pc_ += pc_delta;
+ reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
+ reloc_info_writer.last_pc() + pc_delta);
+
+ // Relocate runtime entries.
+ for (RelocIterator it(desc); !it.done(); it.next()) {
+ RelocInfo::Mode rmode = it.rinfo()->rmode();
+ if (rmode == RelocInfo::INTERNAL_REFERENCE) {
+ int32_t* p = reinterpret_cast<int32_t*>(it.rinfo()->pc());
+ if (*p != 0) { // 0 means uninitialized.
+ *p += pc_delta;
+ }
+ }
+ }
+
+ ASSERT(!buffer_overflow());
+}
+
+
+void Assembler::emit_arith_b(int op1, int op2, Register dst, int imm8) {
+ ASSERT(is_uint8(op1) && is_uint8(op2)); // wrong opcode
+ ASSERT(is_uint8(imm8));
+ ASSERT((op1 & 0x01) == 0); // should be 8bit operation
+ EMIT(op1);
+ EMIT(op2 | dst.code());
+ EMIT(imm8);
+}
+
+
+void Assembler::emit_arith(int sel, Operand dst, const Immediate& x) {
+ ASSERT((0 <= sel) && (sel <= 7));
+ Register ireg = { sel };
+ if (x.is_int8()) {
+ EMIT(0x83); // using a sign-extended 8-bit immediate.
+ emit_operand(ireg, dst);
+ EMIT(x.x_ & 0xFF);
+ } else if (dst.is_reg(eax)) {
+ EMIT((sel << 3) | 0x05); // short form if the destination is eax.
+ emit(x);
+ } else {
+ EMIT(0x81); // using a literal 32-bit immediate.
+ emit_operand(ireg, dst);
+ emit(x);
+ }
+}
+
+
+void Assembler::emit_operand(Register reg, const Operand& adr) {
+ const unsigned length = adr.len_;
+ ASSERT(length > 0);
+
+ // Emit updated ModRM byte containing the given register.
+ pc_[0] = (adr.buf_[0] & ~0x38) | (reg.code() << 3);
+
+ // Emit the rest of the encoded operand.
+ for (unsigned i = 1; i < length; i++) pc_[i] = adr.buf_[i];
+ pc_ += length;
+
+ // Emit relocation information if necessary.
+ if (length >= sizeof(int32_t) && !RelocInfo::IsNone(adr.rmode_)) {
+ pc_ -= sizeof(int32_t); // pc_ must be *at* disp32
+ RecordRelocInfo(adr.rmode_);
+ pc_ += sizeof(int32_t);
+ }
+}
+
+
+void Assembler::emit_farith(int b1, int b2, int i) {
+ ASSERT(is_uint8(b1) && is_uint8(b2)); // wrong opcode
+ ASSERT(0 <= i && i < 8); // illegal stack offset
+ EMIT(b1);
+ EMIT(b2 + i);
+}
+
+
+void Assembler::db(uint8_t data) {
+ EnsureSpace ensure_space(this);
+ EMIT(data);
+}
+
+
+void Assembler::dd(uint32_t data) {
+ EnsureSpace ensure_space(this);
+ emit(data);
+}
+
+
+void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
+ ASSERT(!RelocInfo::IsNone(rmode));
+ // Don't record external references unless the heap will be serialized.
+ if (rmode == RelocInfo::EXTERNAL_REFERENCE &&
+ !serializer_enabled() && !emit_debug_code()) {
+ return;
+ }
+ RelocInfo rinfo(pc_, rmode, data, NULL);
+ reloc_info_writer.Write(&rinfo);
+}
+
+
+Handle<ConstantPoolArray> Assembler::NewConstantPool(Isolate* isolate) {
+ // No out-of-line constant pool support.
+ ASSERT(!FLAG_enable_ool_constant_pool);
+ return isolate->factory()->empty_constant_pool_array();
+}
+
+
+void Assembler::PopulateConstantPool(ConstantPoolArray* constant_pool) {
+ // No out-of-line constant pool support.
+ ASSERT(!FLAG_enable_ool_constant_pool);
+ return;
+}
+
+
+#ifdef GENERATED_CODE_COVERAGE
+static FILE* coverage_log = NULL;
+
+
+static void InitCoverageLog() {
+ char* file_name = getenv("V8_GENERATED_CODE_COVERAGE_LOG");
+ if (file_name != NULL) {
+ coverage_log = fopen(file_name, "aw+");
+ }
+}
+
+
+void LogGeneratedCodeCoverage(const char* file_line) {
+ const char* return_address = (&file_line)[-1];
+ char* push_insn = const_cast<char*>(return_address - 12);
+ push_insn[0] = 0xeb; // Relative branch insn.
+ push_insn[1] = 13; // Skip over coverage insns.
+ if (coverage_log != NULL) {
+ fprintf(coverage_log, "%s\n", file_line);
+ fflush(coverage_log);
+ }
+}
+
+#endif
+
+} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_X87
--- /dev/null
+// Copyright (c) 1994-2006 Sun Microsystems Inc.
+// All Rights Reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// - Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// - Redistribution in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// - Neither the name of Sun Microsystems or the names of contributors may
+// be used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// The original source code covered by the above license above has been
+// modified significantly by Google Inc.
+// Copyright 2011 the V8 project authors. All rights reserved.
+
+// A light-weight IA32 Assembler.
+
+#ifndef V8_X87_ASSEMBLER_X87_H_
+#define V8_X87_ASSEMBLER_X87_H_
+
+#include "isolate.h"
+#include "serialize.h"
+
+namespace v8 {
+namespace internal {
+
+// CPU Registers.
+//
+// 1) We would prefer to use an enum, but enum values are assignment-
+// compatible with int, which has caused code-generation bugs.
+//
+// 2) We would prefer to use a class instead of a struct but we don't like
+// the register initialization to depend on the particular initialization
+// order (which appears to be different on OS X, Linux, and Windows for the
+// installed versions of C++ we tried). Using a struct permits C-style
+// "initialization". Also, the Register objects cannot be const as this
+// forces initialization stubs in MSVC, making us dependent on initialization
+// order.
+//
+// 3) By not using an enum, we are possibly preventing the compiler from
+// doing certain constant folds, which may significantly reduce the
+// code generated for some assembly instructions (because they boil down
+// to a few constants). If this is a problem, we could change the code
+// such that we use an enum in optimized mode, and the struct in debug
+// mode. This way we get the compile-time error checking in debug mode
+// and best performance in optimized code.
+//
+struct Register {
+ static const int kMaxNumAllocatableRegisters = 6;
+ static int NumAllocatableRegisters() {
+ return kMaxNumAllocatableRegisters;
+ }
+ static const int kNumRegisters = 8;
+
+ static inline const char* AllocationIndexToString(int index);
+
+ static inline int ToAllocationIndex(Register reg);
+
+ static inline Register FromAllocationIndex(int index);
+
+ static Register from_code(int code) {
+ ASSERT(code >= 0);
+ ASSERT(code < kNumRegisters);
+ Register r = { code };
+ return r;
+ }
+ bool is_valid() const { return 0 <= code_ && code_ < kNumRegisters; }
+ bool is(Register reg) const { return code_ == reg.code_; }
+ // eax, ebx, ecx and edx are byte registers, the rest are not.
+ bool is_byte_register() const { return code_ <= 3; }
+ int code() const {
+ ASSERT(is_valid());
+ return code_;
+ }
+ int bit() const {
+ ASSERT(is_valid());
+ return 1 << code_;
+ }
+
+ // Unfortunately we can't make this private in a struct.
+ int code_;
+};
+
+const int kRegister_eax_Code = 0;
+const int kRegister_ecx_Code = 1;
+const int kRegister_edx_Code = 2;
+const int kRegister_ebx_Code = 3;
+const int kRegister_esp_Code = 4;
+const int kRegister_ebp_Code = 5;
+const int kRegister_esi_Code = 6;
+const int kRegister_edi_Code = 7;
+const int kRegister_no_reg_Code = -1;
+
+const Register eax = { kRegister_eax_Code };
+const Register ecx = { kRegister_ecx_Code };
+const Register edx = { kRegister_edx_Code };
+const Register ebx = { kRegister_ebx_Code };
+const Register esp = { kRegister_esp_Code };
+const Register ebp = { kRegister_ebp_Code };
+const Register esi = { kRegister_esi_Code };
+const Register edi = { kRegister_edi_Code };
+const Register no_reg = { kRegister_no_reg_Code };
+
+
+inline const char* Register::AllocationIndexToString(int index) {
+ ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
+ // This is the mapping of allocation indices to registers.
+ const char* const kNames[] = { "eax", "ecx", "edx", "ebx", "esi", "edi" };
+ return kNames[index];
+}
+
+
+inline int Register::ToAllocationIndex(Register reg) {
+ ASSERT(reg.is_valid() && !reg.is(esp) && !reg.is(ebp));
+ return (reg.code() >= 6) ? reg.code() - 2 : reg.code();
+}
+
+
+inline Register Register::FromAllocationIndex(int index) {
+ ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
+ return (index >= 4) ? from_code(index + 2) : from_code(index);
+}
+
+
+struct X87Register {
+ static const int kMaxNumAllocatableRegisters = 8;
+ static const int kMaxNumRegisters = 8;
+ static int NumAllocatableRegisters() {
+ return kMaxNumAllocatableRegisters;
+ }
+
+ static int ToAllocationIndex(X87Register reg) {
+ return reg.code_;
+ }
+
+ static const char* AllocationIndexToString(int index) {
+ ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
+ const char* const names[] = {
+ "stX_0", "stX_1", "stX_2", "stX_3", "stX_4",
+ "stX_5", "stX_6", "stX_7"
+ };
+ return names[index];
+ }
+
+ static X87Register FromAllocationIndex(int index) {
+ ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
+ X87Register result;
+ result.code_ = index;
+ return result;
+ }
+
+ bool is_valid() const {
+ return 0 <= code_ && code_ < kMaxNumRegisters;
+ }
+
+ int code() const {
+ ASSERT(is_valid());
+ return code_;
+ }
+
+ bool is(X87Register reg) const {
+ return code_ == reg.code_;
+ }
+
+ int code_;
+};
+
+
+typedef X87Register DoubleRegister;
+
+
+const X87Register stX_0 = { 0 };
+const X87Register stX_1 = { 1 };
+const X87Register stX_2 = { 2 };
+const X87Register stX_3 = { 3 };
+const X87Register stX_4 = { 4 };
+const X87Register stX_5 = { 5 };
+const X87Register stX_6 = { 6 };
+const X87Register stX_7 = { 7 };
+
+
+enum Condition {
+ // any value < 0 is considered no_condition
+ no_condition = -1,
+
+ overflow = 0,
+ no_overflow = 1,
+ below = 2,
+ above_equal = 3,
+ equal = 4,
+ not_equal = 5,
+ below_equal = 6,
+ above = 7,
+ negative = 8,
+ positive = 9,
+ parity_even = 10,
+ parity_odd = 11,
+ less = 12,
+ greater_equal = 13,
+ less_equal = 14,
+ greater = 15,
+
+ // aliases
+ carry = below,
+ not_carry = above_equal,
+ zero = equal,
+ not_zero = not_equal,
+ sign = negative,
+ not_sign = positive
+};
+
+
+// Returns the equivalent of !cc.
+// Negation of the default no_condition (-1) results in a non-default
+// no_condition value (-2). As long as tests for no_condition check
+// for condition < 0, this will work as expected.
+inline Condition NegateCondition(Condition cc) {
+ return static_cast<Condition>(cc ^ 1);
+}
+
+
+// Corresponds to transposing the operands of a comparison.
+inline Condition ReverseCondition(Condition cc) {
+ switch (cc) {
+ case below:
+ return above;
+ case above:
+ return below;
+ case above_equal:
+ return below_equal;
+ case below_equal:
+ return above_equal;
+ case less:
+ return greater;
+ case greater:
+ return less;
+ case greater_equal:
+ return less_equal;
+ case less_equal:
+ return greater_equal;
+ default:
+ return cc;
+ }
+}
+
+
+// -----------------------------------------------------------------------------
+// Machine instruction Immediates
+
+class Immediate BASE_EMBEDDED {
+ public:
+ inline explicit Immediate(int x);
+ inline explicit Immediate(const ExternalReference& ext);
+ inline explicit Immediate(Handle<Object> handle);
+ inline explicit Immediate(Smi* value);
+ inline explicit Immediate(Address addr);
+
+ static Immediate CodeRelativeOffset(Label* label) {
+ return Immediate(label);
+ }
+
+ bool is_zero() const { return x_ == 0 && RelocInfo::IsNone(rmode_); }
+ bool is_int8() const {
+ return -128 <= x_ && x_ < 128 && RelocInfo::IsNone(rmode_);
+ }
+ bool is_int16() const {
+ return -32768 <= x_ && x_ < 32768 && RelocInfo::IsNone(rmode_);
+ }
+
+ private:
+ inline explicit Immediate(Label* value);
+
+ int x_;
+ RelocInfo::Mode rmode_;
+
+ friend class Assembler;
+ friend class MacroAssembler;
+};
+
+
+// -----------------------------------------------------------------------------
+// Machine instruction Operands
+
+enum ScaleFactor {
+ times_1 = 0,
+ times_2 = 1,
+ times_4 = 2,
+ times_8 = 3,
+ times_int_size = times_4,
+ times_half_pointer_size = times_2,
+ times_pointer_size = times_4,
+ times_twice_pointer_size = times_8
+};
+
+
+class Operand BASE_EMBEDDED {
+ public:
+ // [disp/r]
+ INLINE(explicit Operand(int32_t disp, RelocInfo::Mode rmode));
+ // disp only must always be relocated
+
+ // [base + disp/r]
+ explicit Operand(Register base, int32_t disp,
+ RelocInfo::Mode rmode = RelocInfo::NONE32);
+
+ // [base + index*scale + disp/r]
+ explicit Operand(Register base,
+ Register index,
+ ScaleFactor scale,
+ int32_t disp,
+ RelocInfo::Mode rmode = RelocInfo::NONE32);
+
+ // [index*scale + disp/r]
+ explicit Operand(Register index,
+ ScaleFactor scale,
+ int32_t disp,
+ RelocInfo::Mode rmode = RelocInfo::NONE32);
+
+ static Operand StaticVariable(const ExternalReference& ext) {
+ return Operand(reinterpret_cast<int32_t>(ext.address()),
+ RelocInfo::EXTERNAL_REFERENCE);
+ }
+
+ static Operand StaticArray(Register index,
+ ScaleFactor scale,
+ const ExternalReference& arr) {
+ return Operand(index, scale, reinterpret_cast<int32_t>(arr.address()),
+ RelocInfo::EXTERNAL_REFERENCE);
+ }
+
+ static Operand ForCell(Handle<Cell> cell) {
+ AllowDeferredHandleDereference embedding_raw_address;
+ return Operand(reinterpret_cast<int32_t>(cell.location()),
+ RelocInfo::CELL);
+ }
+
+ // Returns true if this Operand is a wrapper for the specified register.
+ bool is_reg(Register reg) const;
+
+ // Returns true if this Operand is a wrapper for one register.
+ bool is_reg_only() const;
+
+ // Asserts that this Operand is a wrapper for one register and returns the
+ // register.
+ Register reg() const;
+
+ private:
+ // reg
+ INLINE(explicit Operand(Register reg));
+
+ // Set the ModRM byte without an encoded 'reg' register. The
+ // register is encoded later as part of the emit_operand operation.
+ inline void set_modrm(int mod, Register rm);
+
+ inline void set_sib(ScaleFactor scale, Register index, Register base);
+ inline void set_disp8(int8_t disp);
+ inline void set_dispr(int32_t disp, RelocInfo::Mode rmode);
+
+ byte buf_[6];
+ // The number of bytes in buf_.
+ unsigned int len_;
+ // Only valid if len_ > 4.
+ RelocInfo::Mode rmode_;
+
+ friend class Assembler;
+ friend class MacroAssembler;
+ friend class LCodeGen;
+};
+
+
+// -----------------------------------------------------------------------------
+// A Displacement describes the 32bit immediate field of an instruction which
+// may be used together with a Label in order to refer to a yet unknown code
+// position. Displacements stored in the instruction stream are used to describe
+// the instruction and to chain a list of instructions using the same Label.
+// A Displacement contains 2 different fields:
+//
+// next field: position of next displacement in the chain (0 = end of list)
+// type field: instruction type
+//
+// A next value of null (0) indicates the end of a chain (note that there can
+// be no displacement at position zero, because there is always at least one
+// instruction byte before the displacement).
+//
+// Displacement _data field layout
+//
+// |31.....2|1......0|
+// [ next | type |
+
+class Displacement BASE_EMBEDDED {
+ public:
+ enum Type {
+ UNCONDITIONAL_JUMP,
+ CODE_RELATIVE,
+ OTHER
+ };
+
+ int data() const { return data_; }
+ Type type() const { return TypeField::decode(data_); }
+ void next(Label* L) const {
+ int n = NextField::decode(data_);
+ n > 0 ? L->link_to(n) : L->Unuse();
+ }
+ void link_to(Label* L) { init(L, type()); }
+
+ explicit Displacement(int data) { data_ = data; }
+
+ Displacement(Label* L, Type type) { init(L, type); }
+
+ void print() {
+ PrintF("%s (%x) ", (type() == UNCONDITIONAL_JUMP ? "jmp" : "[other]"),
+ NextField::decode(data_));
+ }
+
+ private:
+ int data_;
+
+ class TypeField: public BitField<Type, 0, 2> {};
+ class NextField: public BitField<int, 2, 32-2> {};
+
+ void init(Label* L, Type type);
+};
+
+
+class Assembler : public AssemblerBase {
+ private:
+ // We check before assembling an instruction that there is sufficient
+ // space to write an instruction and its relocation information.
+ // The relocation writer's position must be kGap bytes above the end of
+ // the generated instructions. This leaves enough space for the
+ // longest possible ia32 instruction, 15 bytes, and the longest possible
+ // relocation information encoding, RelocInfoWriter::kMaxLength == 16.
+ // (There is a 15 byte limit on ia32 instruction length that rules out some
+ // otherwise valid instructions.)
+ // This allows for a single, fast space check per instruction.
+ static const int kGap = 32;
+
+ public:
+ // Create an assembler. Instructions and relocation information are emitted
+ // into a buffer, with the instructions starting from the beginning and the
+ // relocation information starting from the end of the buffer. See CodeDesc
+ // for a detailed comment on the layout (globals.h).
+ //
+ // If the provided buffer is NULL, the assembler allocates and grows its own
+ // buffer, and buffer_size determines the initial buffer size. The buffer is
+ // owned by the assembler and deallocated upon destruction of the assembler.
+ //
+ // If the provided buffer is not NULL, the assembler uses the provided buffer
+ // for code generation and assumes its size to be buffer_size. If the buffer
+ // is too small, a fatal error occurs. No deallocation of the buffer is done
+ // upon destruction of the assembler.
+ // TODO(vitalyr): the assembler does not need an isolate.
+ Assembler(Isolate* isolate, void* buffer, int buffer_size);
+ virtual ~Assembler() { }
+
+ // GetCode emits any pending (non-emitted) code and fills the descriptor
+ // desc. GetCode() is idempotent; it returns the same result if no other
+ // Assembler functions are invoked in between GetCode() calls.
+ void GetCode(CodeDesc* desc);
+
+ // Read/Modify the code target in the branch/call instruction at pc.
+ inline static Address target_address_at(Address pc,
+ ConstantPoolArray* constant_pool);
+ inline static void set_target_address_at(Address pc,
+ ConstantPoolArray* constant_pool,
+ Address target,
+ ICacheFlushMode icache_flush_mode =
+ FLUSH_ICACHE_IF_NEEDED);
+ static inline Address target_address_at(Address pc, Code* code) {
+ ConstantPoolArray* constant_pool = code ? code->constant_pool() : NULL;
+ return target_address_at(pc, constant_pool);
+ }
+ static inline void set_target_address_at(Address pc,
+ Code* code,
+ Address target,
+ ICacheFlushMode icache_flush_mode =
+ FLUSH_ICACHE_IF_NEEDED) {
+ ConstantPoolArray* constant_pool = code ? code->constant_pool() : NULL;
+ set_target_address_at(pc, constant_pool, target);
+ }
+
+ // Return the code target address at a call site from the return address
+ // of that call in the instruction stream.
+ inline static Address target_address_from_return_address(Address pc);
+
+ // This sets the branch destination (which is in the instruction on x86).
+ // This is for calls and branches within generated code.
+ inline static void deserialization_set_special_target_at(
+ Address instruction_payload, Code* code, Address target) {
+ set_target_address_at(instruction_payload, code, target);
+ }
+
+ static const int kSpecialTargetSize = kPointerSize;
+
+ // Distance between the address of the code target in the call instruction
+ // and the return address
+ static const int kCallTargetAddressOffset = kPointerSize;
+ // Distance between start of patched return sequence and the emitted address
+ // to jump to.
+ static const int kPatchReturnSequenceAddressOffset = 1; // JMP imm32.
+
+ // Distance between start of patched debug break slot and the emitted address
+ // to jump to.
+ static const int kPatchDebugBreakSlotAddressOffset = 1; // JMP imm32.
+
+ static const int kCallInstructionLength = 5;
+ static const int kPatchDebugBreakSlotReturnOffset = kPointerSize;
+ static const int kJSReturnSequenceLength = 6;
+
+ // The debug break slot must be able to contain a call instruction.
+ static const int kDebugBreakSlotLength = kCallInstructionLength;
+
+ // One byte opcode for test al, 0xXX.
+ static const byte kTestAlByte = 0xA8;
+ // One byte opcode for nop.
+ static const byte kNopByte = 0x90;
+
+ // One byte opcode for a short unconditional jump.
+ static const byte kJmpShortOpcode = 0xEB;
+ // One byte prefix for a short conditional jump.
+ static const byte kJccShortPrefix = 0x70;
+ static const byte kJncShortOpcode = kJccShortPrefix | not_carry;
+ static const byte kJcShortOpcode = kJccShortPrefix | carry;
+ static const byte kJnzShortOpcode = kJccShortPrefix | not_zero;
+ static const byte kJzShortOpcode = kJccShortPrefix | zero;
+
+
+ // ---------------------------------------------------------------------------
+ // Code generation
+ //
+ // - function names correspond one-to-one to ia32 instruction mnemonics
+ // - unless specified otherwise, instructions operate on 32bit operands
+ // - instructions on 8bit (byte) operands/registers have a trailing '_b'
+ // - instructions on 16bit (word) operands/registers have a trailing '_w'
+ // - naming conflicts with C++ keywords are resolved via a trailing '_'
+
+ // NOTE ON INTERFACE: Currently, the interface is not very consistent
+ // in the sense that some operations (e.g. mov()) can be called in more
+ // the one way to generate the same instruction: The Register argument
+ // can in some cases be replaced with an Operand(Register) argument.
+ // This should be cleaned up and made more orthogonal. The questions
+ // is: should we always use Operands instead of Registers where an
+ // Operand is possible, or should we have a Register (overloaded) form
+ // instead? We must be careful to make sure that the selected instruction
+ // is obvious from the parameters to avoid hard-to-find code generation
+ // bugs.
+
+ // Insert the smallest number of nop instructions
+ // possible to align the pc offset to a multiple
+ // of m. m must be a power of 2.
+ void Align(int m);
+ void Nop(int bytes = 1);
+ // Aligns code to something that's optimal for a jump target for the platform.
+ void CodeTargetAlign();
+
+ // Stack
+ void pushad();
+ void popad();
+
+ void pushfd();
+ void popfd();
+
+ void push(const Immediate& x);
+ void push_imm32(int32_t imm32);
+ void push(Register src);
+ void push(const Operand& src);
+
+ void pop(Register dst);
+ void pop(const Operand& dst);
+
+ void enter(const Immediate& size);
+ void leave();
+
+ // Moves
+ void mov_b(Register dst, Register src) { mov_b(dst, Operand(src)); }
+ void mov_b(Register dst, const Operand& src);
+ void mov_b(Register dst, int8_t imm8) { mov_b(Operand(dst), imm8); }
+ void mov_b(const Operand& dst, int8_t imm8);
+ void mov_b(const Operand& dst, Register src);
+
+ void mov_w(Register dst, const Operand& src);
+ void mov_w(const Operand& dst, Register src);
+ void mov_w(const Operand& dst, int16_t imm16);
+
+ void mov(Register dst, int32_t imm32);
+ void mov(Register dst, const Immediate& x);
+ void mov(Register dst, Handle<Object> handle);
+ void mov(Register dst, const Operand& src);
+ void mov(Register dst, Register src);
+ void mov(const Operand& dst, const Immediate& x);
+ void mov(const Operand& dst, Handle<Object> handle);
+ void mov(const Operand& dst, Register src);
+
+ void movsx_b(Register dst, Register src) { movsx_b(dst, Operand(src)); }
+ void movsx_b(Register dst, const Operand& src);
+
+ void movsx_w(Register dst, Register src) { movsx_w(dst, Operand(src)); }
+ void movsx_w(Register dst, const Operand& src);
+
+ void movzx_b(Register dst, Register src) { movzx_b(dst, Operand(src)); }
+ void movzx_b(Register dst, const Operand& src);
+
+ void movzx_w(Register dst, Register src) { movzx_w(dst, Operand(src)); }
+ void movzx_w(Register dst, const Operand& src);
+
+ // Flag management.
+ void cld();
+
+ // Repetitive string instructions.
+ void rep_movs();
+ void rep_stos();
+ void stos();
+
+ // Exchange two registers
+ void xchg(Register dst, Register src);
+
+ // Arithmetics
+ void adc(Register dst, int32_t imm32);
+ void adc(Register dst, const Operand& src);
+
+ void add(Register dst, Register src) { add(dst, Operand(src)); }
+ void add(Register dst, const Operand& src);
+ void add(const Operand& dst, Register src);
+ void add(Register dst, const Immediate& imm) { add(Operand(dst), imm); }
+ void add(const Operand& dst, const Immediate& x);
+
+ void and_(Register dst, int32_t imm32);
+ void and_(Register dst, const Immediate& x);
+ void and_(Register dst, Register src) { and_(dst, Operand(src)); }
+ void and_(Register dst, const Operand& src);
+ void and_(const Operand& dst, Register src);
+ void and_(const Operand& dst, const Immediate& x);
+
+ void cmpb(Register reg, int8_t imm8) { cmpb(Operand(reg), imm8); }
+ void cmpb(const Operand& op, int8_t imm8);
+ void cmpb(Register reg, const Operand& op);
+ void cmpb(const Operand& op, Register reg);
+ void cmpb_al(const Operand& op);
+ void cmpw_ax(const Operand& op);
+ void cmpw(const Operand& op, Immediate imm16);
+ void cmp(Register reg, int32_t imm32);
+ void cmp(Register reg, Handle<Object> handle);
+ void cmp(Register reg0, Register reg1) { cmp(reg0, Operand(reg1)); }
+ void cmp(Register reg, const Operand& op);
+ void cmp(Register reg, const Immediate& imm) { cmp(Operand(reg), imm); }
+ void cmp(const Operand& op, const Immediate& imm);
+ void cmp(const Operand& op, Handle<Object> handle);
+
+ void dec_b(Register dst);
+ void dec_b(const Operand& dst);
+
+ void dec(Register dst);
+ void dec(const Operand& dst);
+
+ void cdq();
+
+ void idiv(Register src);
+
+ // Signed multiply instructions.
+ void imul(Register src); // edx:eax = eax * src.
+ void imul(Register dst, Register src) { imul(dst, Operand(src)); }
+ void imul(Register dst, const Operand& src); // dst = dst * src.
+ void imul(Register dst, Register src, int32_t imm32); // dst = src * imm32.
+
+ void inc(Register dst);
+ void inc(const Operand& dst);
+
+ void lea(Register dst, const Operand& src);
+
+ // Unsigned multiply instruction.
+ void mul(Register src); // edx:eax = eax * reg.
+
+ void neg(Register dst);
+
+ void not_(Register dst);
+
+ void or_(Register dst, int32_t imm32);
+ void or_(Register dst, Register src) { or_(dst, Operand(src)); }
+ void or_(Register dst, const Operand& src);
+ void or_(const Operand& dst, Register src);
+ void or_(Register dst, const Immediate& imm) { or_(Operand(dst), imm); }
+ void or_(const Operand& dst, const Immediate& x);
+
+ void rcl(Register dst, uint8_t imm8);
+ void rcr(Register dst, uint8_t imm8);
+ void ror(Register dst, uint8_t imm8);
+ void ror_cl(Register dst);
+
+ void sar(Register dst, uint8_t imm8);
+ void sar_cl(Register dst);
+
+ void sbb(Register dst, const Operand& src);
+
+ void shld(Register dst, Register src) { shld(dst, Operand(src)); }
+ void shld(Register dst, const Operand& src);
+
+ void shl(Register dst, uint8_t imm8);
+ void shl_cl(Register dst);
+
+ void shrd(Register dst, Register src) { shrd(dst, Operand(src)); }
+ void shrd(Register dst, const Operand& src);
+
+ void shr(Register dst, uint8_t imm8);
+ void shr_cl(Register dst);
+
+ void sub(Register dst, const Immediate& imm) { sub(Operand(dst), imm); }
+ void sub(const Operand& dst, const Immediate& x);
+ void sub(Register dst, Register src) { sub(dst, Operand(src)); }
+ void sub(Register dst, const Operand& src);
+ void sub(const Operand& dst, Register src);
+
+ void test(Register reg, const Immediate& imm);
+ void test(Register reg0, Register reg1) { test(reg0, Operand(reg1)); }
+ void test(Register reg, const Operand& op);
+ void test_b(Register reg, const Operand& op);
+ void test(const Operand& op, const Immediate& imm);
+ void test_b(Register reg, uint8_t imm8);
+ void test_b(const Operand& op, uint8_t imm8);
+
+ void xor_(Register dst, int32_t imm32);
+ void xor_(Register dst, Register src) { xor_(dst, Operand(src)); }
+ void xor_(Register dst, const Operand& src);
+ void xor_(const Operand& dst, Register src);
+ void xor_(Register dst, const Immediate& imm) { xor_(Operand(dst), imm); }
+ void xor_(const Operand& dst, const Immediate& x);
+
+ // Bit operations.
+ void bt(const Operand& dst, Register src);
+ void bts(Register dst, Register src) { bts(Operand(dst), src); }
+ void bts(const Operand& dst, Register src);
+ void bsr(Register dst, Register src) { bsr(dst, Operand(src)); }
+ void bsr(Register dst, const Operand& src);
+
+ // Miscellaneous
+ void hlt();
+ void int3();
+ void nop();
+ void ret(int imm16);
+
+ // Label operations & relative jumps (PPUM Appendix D)
+ //
+ // Takes a branch opcode (cc) and a label (L) and generates
+ // either a backward branch or a forward branch and links it
+ // to the label fixup chain. Usage:
+ //
+ // Label L; // unbound label
+ // j(cc, &L); // forward branch to unbound label
+ // bind(&L); // bind label to the current pc
+ // j(cc, &L); // backward branch to bound label
+ // bind(&L); // illegal: a label may be bound only once
+ //
+ // Note: The same Label can be used for forward and backward branches
+ // but it may be bound only once.
+
+ void bind(Label* L); // binds an unbound label L to the current code position
+
+ // Calls
+ void call(Label* L);
+ void call(byte* entry, RelocInfo::Mode rmode);
+ int CallSize(const Operand& adr);
+ void call(Register reg) { call(Operand(reg)); }
+ void call(const Operand& adr);
+ int CallSize(Handle<Code> code, RelocInfo::Mode mode);
+ void call(Handle<Code> code,
+ RelocInfo::Mode rmode,
+ TypeFeedbackId id = TypeFeedbackId::None());
+
+ // Jumps
+ // unconditional jump to L
+ void jmp(Label* L, Label::Distance distance = Label::kFar);
+ void jmp(byte* entry, RelocInfo::Mode rmode);
+ void jmp(Register reg) { jmp(Operand(reg)); }
+ void jmp(const Operand& adr);
+ void jmp(Handle<Code> code, RelocInfo::Mode rmode);
+
+ // Conditional jumps
+ void j(Condition cc,
+ Label* L,
+ Label::Distance distance = Label::kFar);
+ void j(Condition cc, byte* entry, RelocInfo::Mode rmode);
+ void j(Condition cc, Handle<Code> code);
+
+ // Floating-point operations
+ void fld(int i);
+ void fstp(int i);
+
+ void fld1();
+ void fldz();
+ void fldpi();
+ void fldln2();
+
+ void fld_s(const Operand& adr);
+ void fld_d(const Operand& adr);
+
+ void fstp_s(const Operand& adr);
+ void fst_s(const Operand& adr);
+ void fstp_d(const Operand& adr);
+ void fst_d(const Operand& adr);
+
+ void fild_s(const Operand& adr);
+ void fild_d(const Operand& adr);
+
+ void fist_s(const Operand& adr);
+
+ void fistp_s(const Operand& adr);
+ void fistp_d(const Operand& adr);
+
+ // The fisttp instructions require SSE3.
+ void fisttp_s(const Operand& adr);
+ void fisttp_d(const Operand& adr);
+
+ void fabs();
+ void fchs();
+ void fcos();
+ void fsin();
+ void fptan();
+ void fyl2x();
+ void f2xm1();
+ void fscale();
+ void fninit();
+
+ void fadd(int i);
+ void fadd_i(int i);
+ void fsub(int i);
+ void fsub_i(int i);
+ void fmul(int i);
+ void fmul_i(int i);
+ void fdiv(int i);
+ void fdiv_i(int i);
+
+ void fisub_s(const Operand& adr);
+
+ void faddp(int i = 1);
+ void fsubp(int i = 1);
+ void fsubrp(int i = 1);
+ void fmulp(int i = 1);
+ void fdivp(int i = 1);
+ void fprem();
+ void fprem1();
+
+ void fxch(int i = 1);
+ void fincstp();
+ void ffree(int i = 0);
+
+ void ftst();
+ void fucomp(int i);
+ void fucompp();
+ void fucomi(int i);
+ void fucomip();
+ void fcompp();
+ void fnstsw_ax();
+ void fwait();
+ void fnclex();
+
+ void frndint();
+
+ void sahf();
+ void setcc(Condition cc, Register reg);
+
+ void cpuid();
+
+ // TODO(lrn): Need SFENCE for movnt?
+
+ // Debugging
+ void Print();
+
+ // Check the code size generated from label to here.
+ int SizeOfCodeGeneratedSince(Label* label) {
+ return pc_offset() - label->pos();
+ }
+
+ // Mark address of the ExitJSFrame code.
+ void RecordJSReturn();
+
+ // Mark address of a debug break slot.
+ void RecordDebugBreakSlot();
+
+ // Record a comment relocation entry that can be used by a disassembler.
+ // Use --code-comments to enable, or provide "force = true" flag to always
+ // write a comment.
+ void RecordComment(const char* msg, bool force = false);
+
+ // Writes a single byte or word of data in the code stream. Used for
+ // inline tables, e.g., jump-tables.
+ void db(uint8_t data);
+ void dd(uint32_t data);
+
+ // Check if there is less than kGap bytes available in the buffer.
+ // If this is the case, we need to grow the buffer before emitting
+ // an instruction or relocation information.
+ inline bool buffer_overflow() const {
+ return pc_ >= reloc_info_writer.pos() - kGap;
+ }
+
+ // Get the number of bytes available in the buffer.
+ inline int available_space() const { return reloc_info_writer.pos() - pc_; }
+
+ static bool IsNop(Address addr);
+
+ PositionsRecorder* positions_recorder() { return &positions_recorder_; }
+
+ int relocation_writer_size() {
+ return (buffer_ + buffer_size_) - reloc_info_writer.pos();
+ }
+
+ // Avoid overflows for displacements etc.
+ static const int kMaximalBufferSize = 512*MB;
+
+ byte byte_at(int pos) { return buffer_[pos]; }
+ void set_byte_at(int pos, byte value) { buffer_[pos] = value; }
+
+ // Allocate a constant pool of the correct size for the generated code.
+ Handle<ConstantPoolArray> NewConstantPool(Isolate* isolate);
+
+ // Generate the constant pool for the generated code.
+ void PopulateConstantPool(ConstantPoolArray* constant_pool);
+
+ protected:
+ byte* addr_at(int pos) { return buffer_ + pos; }
+
+
+ private:
+ uint32_t long_at(int pos) {
+ return *reinterpret_cast<uint32_t*>(addr_at(pos));
+ }
+ void long_at_put(int pos, uint32_t x) {
+ *reinterpret_cast<uint32_t*>(addr_at(pos)) = x;
+ }
+
+ // code emission
+ void GrowBuffer();
+ inline void emit(uint32_t x);
+ inline void emit(Handle<Object> handle);
+ inline void emit(uint32_t x,
+ RelocInfo::Mode rmode,
+ TypeFeedbackId id = TypeFeedbackId::None());
+ inline void emit(Handle<Code> code,
+ RelocInfo::Mode rmode,
+ TypeFeedbackId id = TypeFeedbackId::None());
+ inline void emit(const Immediate& x);
+ inline void emit_w(const Immediate& x);
+
+ // Emit the code-object-relative offset of the label's position
+ inline void emit_code_relative_offset(Label* label);
+
+ // instruction generation
+ void emit_arith_b(int op1, int op2, Register dst, int imm8);
+
+ // Emit a basic arithmetic instruction (i.e. first byte of the family is 0x81)
+ // with a given destination expression and an immediate operand. It attempts
+ // to use the shortest encoding possible.
+ // sel specifies the /n in the modrm byte (see the Intel PRM).
+ void emit_arith(int sel, Operand dst, const Immediate& x);
+
+ void emit_operand(Register reg, const Operand& adr);
+
+ void emit_farith(int b1, int b2, int i);
+
+ // labels
+ void print(Label* L);
+ void bind_to(Label* L, int pos);
+
+ // displacements
+ inline Displacement disp_at(Label* L);
+ inline void disp_at_put(Label* L, Displacement disp);
+ inline void emit_disp(Label* L, Displacement::Type type);
+ inline void emit_near_disp(Label* L);
+
+ // record reloc info for current pc_
+ void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0);
+
+ friend class CodePatcher;
+ friend class EnsureSpace;
+
+ // code generation
+ RelocInfoWriter reloc_info_writer;
+
+ PositionsRecorder positions_recorder_;
+ friend class PositionsRecorder;
+};
+
+
+// Helper class that ensures that there is enough space for generating
+// instructions and relocation information. The constructor makes
+// sure that there is enough space and (in debug mode) the destructor
+// checks that we did not generate too much.
+class EnsureSpace BASE_EMBEDDED {
+ public:
+ explicit EnsureSpace(Assembler* assembler) : assembler_(assembler) {
+ if (assembler_->buffer_overflow()) assembler_->GrowBuffer();
+#ifdef DEBUG
+ space_before_ = assembler_->available_space();
+#endif
+ }
+
+#ifdef DEBUG
+ ~EnsureSpace() {
+ int bytes_generated = space_before_ - assembler_->available_space();
+ ASSERT(bytes_generated < assembler_->kGap);
+ }
+#endif
+
+ private:
+ Assembler* assembler_;
+#ifdef DEBUG
+ int space_before_;
+#endif
+};
+
+} } // namespace v8::internal
+
+#endif // V8_X87_ASSEMBLER_X87_H_
--- /dev/null
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "v8.h"
+
+#if V8_TARGET_ARCH_X87
+
+#include "codegen.h"
+#include "deoptimizer.h"
+#include "full-codegen.h"
+#include "stub-cache.h"
+
+namespace v8 {
+namespace internal {
+
+
+#define __ ACCESS_MASM(masm)
+
+
+void Builtins::Generate_Adaptor(MacroAssembler* masm,
+ CFunctionId id,
+ BuiltinExtraArguments extra_args) {
+ // ----------- S t a t e -------------
+ // -- eax : number of arguments excluding receiver
+ // -- edi : called function (only guaranteed when
+ // extra_args requires it)
+ // -- esi : context
+ // -- esp[0] : return address
+ // -- esp[4] : last argument
+ // -- ...
+ // -- esp[4 * argc] : first argument (argc == eax)
+ // -- esp[4 * (argc +1)] : receiver
+ // -----------------------------------
+
+ // Insert extra arguments.
+ int num_extra_args = 0;
+ if (extra_args == NEEDS_CALLED_FUNCTION) {
+ num_extra_args = 1;
+ Register scratch = ebx;
+ __ pop(scratch); // Save return address.
+ __ push(edi);
+ __ push(scratch); // Restore return address.
+ } else {
+ ASSERT(extra_args == NO_EXTRA_ARGUMENTS);
+ }
+
+ // JumpToExternalReference expects eax to contain the number of arguments
+ // including the receiver and the extra arguments.
+ __ add(eax, Immediate(num_extra_args + 1));
+ __ JumpToExternalReference(ExternalReference(id, masm->isolate()));
+}
+
+
+static void CallRuntimePassFunction(
+ MacroAssembler* masm, Runtime::FunctionId function_id) {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ // Push a copy of the function.
+ __ push(edi);
+ // Function is also the parameter to the runtime call.
+ __ push(edi);
+
+ __ CallRuntime(function_id, 1);
+ // Restore receiver.
+ __ pop(edi);
+}
+
+
+static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
+ __ mov(eax, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+ __ mov(eax, FieldOperand(eax, SharedFunctionInfo::kCodeOffset));
+ __ lea(eax, FieldOperand(eax, Code::kHeaderSize));
+ __ jmp(eax);
+}
+
+
+static void GenerateTailCallToReturnedCode(MacroAssembler* masm) {
+ __ lea(eax, FieldOperand(eax, Code::kHeaderSize));
+ __ jmp(eax);
+}
+
+
+void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
+ // Checking whether the queued function is ready for install is optional,
+ // since we come across interrupts and stack checks elsewhere. However,
+ // not checking may delay installing ready functions, and always checking
+ // would be quite expensive. A good compromise is to first check against
+ // stack limit as a cue for an interrupt signal.
+ Label ok;
+ ExternalReference stack_limit =
+ ExternalReference::address_of_stack_limit(masm->isolate());
+ __ cmp(esp, Operand::StaticVariable(stack_limit));
+ __ j(above_equal, &ok, Label::kNear);
+
+ CallRuntimePassFunction(masm, Runtime::kHiddenTryInstallOptimizedCode);
+ GenerateTailCallToReturnedCode(masm);
+
+ __ bind(&ok);
+ GenerateTailCallToSharedCode(masm);
+}
+
+
+static void Generate_JSConstructStubHelper(MacroAssembler* masm,
+ bool is_api_function,
+ bool create_memento) {
+ // ----------- S t a t e -------------
+ // -- eax: number of arguments
+ // -- edi: constructor function
+ // -- ebx: allocation site or undefined
+ // -----------------------------------
+
+ // Should never create mementos for api functions.
+ ASSERT(!is_api_function || !create_memento);
+
+ // Enter a construct frame.
+ {
+ FrameScope scope(masm, StackFrame::CONSTRUCT);
+
+ if (create_memento) {
+ __ AssertUndefinedOrAllocationSite(ebx);
+ __ push(ebx);
+ }
+
+ // Store a smi-tagged arguments count on the stack.
+ __ SmiTag(eax);
+ __ push(eax);
+
+ // Push the function to invoke on the stack.
+ __ push(edi);
+
+ // Try to allocate the object without transitioning into C code. If any of
+ // the preconditions is not met, the code bails out to the runtime call.
+ Label rt_call, allocated;
+ if (FLAG_inline_new) {
+ Label undo_allocation;
+ ExternalReference debug_step_in_fp =
+ ExternalReference::debug_step_in_fp_address(masm->isolate());
+ __ cmp(Operand::StaticVariable(debug_step_in_fp), Immediate(0));
+ __ j(not_equal, &rt_call);
+
+ // Verified that the constructor is a JSFunction.
+ // Load the initial map and verify that it is in fact a map.
+ // edi: constructor
+ __ mov(eax, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset));
+ // Will both indicate a NULL and a Smi
+ __ JumpIfSmi(eax, &rt_call);
+ // edi: constructor
+ // eax: initial map (if proven valid below)
+ __ CmpObjectType(eax, MAP_TYPE, ebx);
+ __ j(not_equal, &rt_call);
+
+ // Check that the constructor is not constructing a JSFunction (see
+ // comments in Runtime_NewObject in runtime.cc). In which case the
+ // initial map's instance type would be JS_FUNCTION_TYPE.
+ // edi: constructor
+ // eax: initial map
+ __ CmpInstanceType(eax, JS_FUNCTION_TYPE);
+ __ j(equal, &rt_call);
+
+ if (!is_api_function) {
+ Label allocate;
+ // The code below relies on these assumptions.
+ STATIC_ASSERT(JSFunction::kNoSlackTracking == 0);
+ STATIC_ASSERT(Map::ConstructionCount::kShift +
+ Map::ConstructionCount::kSize == 32);
+ // Check if slack tracking is enabled.
+ __ mov(esi, FieldOperand(eax, Map::kBitField3Offset));
+ __ shr(esi, Map::ConstructionCount::kShift);
+ __ j(zero, &allocate); // JSFunction::kNoSlackTracking
+ // Decrease generous allocation count.
+ __ sub(FieldOperand(eax, Map::kBitField3Offset),
+ Immediate(1 << Map::ConstructionCount::kShift));
+
+ __ cmp(esi, JSFunction::kFinishSlackTracking);
+ __ j(not_equal, &allocate);
+
+ __ push(eax);
+ __ push(edi);
+
+ __ push(edi); // constructor
+ __ CallRuntime(Runtime::kHiddenFinalizeInstanceSize, 1);
+
+ __ pop(edi);
+ __ pop(eax);
+ __ xor_(esi, esi); // JSFunction::kNoSlackTracking
+
+ __ bind(&allocate);
+ }
+
+ // Now allocate the JSObject on the heap.
+ // edi: constructor
+ // eax: initial map
+ __ movzx_b(edi, FieldOperand(eax, Map::kInstanceSizeOffset));
+ __ shl(edi, kPointerSizeLog2);
+ if (create_memento) {
+ __ add(edi, Immediate(AllocationMemento::kSize));
+ }
+
+ __ Allocate(edi, ebx, edi, no_reg, &rt_call, NO_ALLOCATION_FLAGS);
+
+ Factory* factory = masm->isolate()->factory();
+
+ // Allocated the JSObject, now initialize the fields.
+ // eax: initial map
+ // ebx: JSObject
+ // edi: start of next object (including memento if create_memento)
+ __ mov(Operand(ebx, JSObject::kMapOffset), eax);
+ __ mov(ecx, factory->empty_fixed_array());
+ __ mov(Operand(ebx, JSObject::kPropertiesOffset), ecx);
+ __ mov(Operand(ebx, JSObject::kElementsOffset), ecx);
+ // Set extra fields in the newly allocated object.
+ // eax: initial map
+ // ebx: JSObject
+ // edi: start of next object (including memento if create_memento)
+ // esi: slack tracking counter (non-API function case)
+ __ mov(edx, factory->undefined_value());
+ __ lea(ecx, Operand(ebx, JSObject::kHeaderSize));
+ if (!is_api_function) {
+ Label no_inobject_slack_tracking;
+
+ // Check if slack tracking is enabled.
+ __ cmp(esi, JSFunction::kNoSlackTracking);
+ __ j(equal, &no_inobject_slack_tracking);
+
+ // Allocate object with a slack.
+ __ movzx_b(esi,
+ FieldOperand(eax, Map::kPreAllocatedPropertyFieldsOffset));
+ __ lea(esi,
+ Operand(ebx, esi, times_pointer_size, JSObject::kHeaderSize));
+ // esi: offset of first field after pre-allocated fields
+ if (FLAG_debug_code) {
+ __ cmp(esi, edi);
+ __ Assert(less_equal,
+ kUnexpectedNumberOfPreAllocatedPropertyFields);
+ }
+ __ InitializeFieldsWithFiller(ecx, esi, edx);
+ __ mov(edx, factory->one_pointer_filler_map());
+ // Fill the remaining fields with one pointer filler map.
+
+ __ bind(&no_inobject_slack_tracking);
+ }
+
+ if (create_memento) {
+ __ lea(esi, Operand(edi, -AllocationMemento::kSize));
+ __ InitializeFieldsWithFiller(ecx, esi, edx);
+
+ // Fill in memento fields if necessary.
+ // esi: points to the allocated but uninitialized memento.
+ __ mov(Operand(esi, AllocationMemento::kMapOffset),
+ factory->allocation_memento_map());
+ // Get the cell or undefined.
+ __ mov(edx, Operand(esp, kPointerSize*2));
+ __ mov(Operand(esi, AllocationMemento::kAllocationSiteOffset),
+ edx);
+ } else {
+ __ InitializeFieldsWithFiller(ecx, edi, edx);
+ }
+
+ // Add the object tag to make the JSObject real, so that we can continue
+ // and jump into the continuation code at any time from now on. Any
+ // failures need to undo the allocation, so that the heap is in a
+ // consistent state and verifiable.
+ // eax: initial map
+ // ebx: JSObject
+ // edi: start of next object
+ __ or_(ebx, Immediate(kHeapObjectTag));
+
+ // Check if a non-empty properties array is needed.
+ // Allocate and initialize a FixedArray if it is.
+ // eax: initial map
+ // ebx: JSObject
+ // edi: start of next object
+ // Calculate the total number of properties described by the map.
+ __ movzx_b(edx, FieldOperand(eax, Map::kUnusedPropertyFieldsOffset));
+ __ movzx_b(ecx,
+ FieldOperand(eax, Map::kPreAllocatedPropertyFieldsOffset));
+ __ add(edx, ecx);
+ // Calculate unused properties past the end of the in-object properties.
+ __ movzx_b(ecx, FieldOperand(eax, Map::kInObjectPropertiesOffset));
+ __ sub(edx, ecx);
+ // Done if no extra properties are to be allocated.
+ __ j(zero, &allocated);
+ __ Assert(positive, kPropertyAllocationCountFailed);
+
+ // Scale the number of elements by pointer size and add the header for
+ // FixedArrays to the start of the next object calculation from above.
+ // ebx: JSObject
+ // edi: start of next object (will be start of FixedArray)
+ // edx: number of elements in properties array
+ __ Allocate(FixedArray::kHeaderSize,
+ times_pointer_size,
+ edx,
+ REGISTER_VALUE_IS_INT32,
+ edi,
+ ecx,
+ no_reg,
+ &undo_allocation,
+ RESULT_CONTAINS_TOP);
+
+ // Initialize the FixedArray.
+ // ebx: JSObject
+ // edi: FixedArray
+ // edx: number of elements
+ // ecx: start of next object
+ __ mov(eax, factory->fixed_array_map());
+ __ mov(Operand(edi, FixedArray::kMapOffset), eax); // setup the map
+ __ SmiTag(edx);
+ __ mov(Operand(edi, FixedArray::kLengthOffset), edx); // and length
+
+ // Initialize the fields to undefined.
+ // ebx: JSObject
+ // edi: FixedArray
+ // ecx: start of next object
+ { Label loop, entry;
+ __ mov(edx, factory->undefined_value());
+ __ lea(eax, Operand(edi, FixedArray::kHeaderSize));
+ __ jmp(&entry);
+ __ bind(&loop);
+ __ mov(Operand(eax, 0), edx);
+ __ add(eax, Immediate(kPointerSize));
+ __ bind(&entry);
+ __ cmp(eax, ecx);
+ __ j(below, &loop);
+ }
+
+ // Store the initialized FixedArray into the properties field of
+ // the JSObject
+ // ebx: JSObject
+ // edi: FixedArray
+ __ or_(edi, Immediate(kHeapObjectTag)); // add the heap tag
+ __ mov(FieldOperand(ebx, JSObject::kPropertiesOffset), edi);
+
+
+ // Continue with JSObject being successfully allocated
+ // ebx: JSObject
+ __ jmp(&allocated);
+
+ // Undo the setting of the new top so that the heap is verifiable. For
+ // example, the map's unused properties potentially do not match the
+ // allocated objects unused properties.
+ // ebx: JSObject (previous new top)
+ __ bind(&undo_allocation);
+ __ UndoAllocationInNewSpace(ebx);
+ }
+
+ // Allocate the new receiver object using the runtime call.
+ __ bind(&rt_call);
+ int offset = 0;
+ if (create_memento) {
+ // Get the cell or allocation site.
+ __ mov(edi, Operand(esp, kPointerSize * 2));
+ __ push(edi);
+ offset = kPointerSize;
+ }
+
+ // Must restore esi (context) and edi (constructor) before calling runtime.
+ __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+ __ mov(edi, Operand(esp, offset));
+ // edi: function (constructor)
+ __ push(edi);
+ if (create_memento) {
+ __ CallRuntime(Runtime::kHiddenNewObjectWithAllocationSite, 2);
+ } else {
+ __ CallRuntime(Runtime::kHiddenNewObject, 1);
+ }
+ __ mov(ebx, eax); // store result in ebx
+
+ // If we ended up using the runtime, and we want a memento, then the
+ // runtime call made it for us, and we shouldn't do create count
+ // increment.
+ Label count_incremented;
+ if (create_memento) {
+ __ jmp(&count_incremented);
+ }
+
+ // New object allocated.
+ // ebx: newly allocated object
+ __ bind(&allocated);
+
+ if (create_memento) {
+ __ mov(ecx, Operand(esp, kPointerSize * 2));
+ __ cmp(ecx, masm->isolate()->factory()->undefined_value());
+ __ j(equal, &count_incremented);
+ // ecx is an AllocationSite. We are creating a memento from it, so we
+ // need to increment the memento create count.
+ __ add(FieldOperand(ecx, AllocationSite::kPretenureCreateCountOffset),
+ Immediate(Smi::FromInt(1)));
+ __ bind(&count_incremented);
+ }
+
+ // Retrieve the function from the stack.
+ __ pop(edi);
+
+ // Retrieve smi-tagged arguments count from the stack.
+ __ mov(eax, Operand(esp, 0));
+ __ SmiUntag(eax);
+
+ // Push the allocated receiver to the stack. We need two copies
+ // because we may have to return the original one and the calling
+ // conventions dictate that the called function pops the receiver.
+ __ push(ebx);
+ __ push(ebx);
+
+ // Set up pointer to last argument.
+ __ lea(ebx, Operand(ebp, StandardFrameConstants::kCallerSPOffset));
+
+ // Copy arguments and receiver to the expression stack.
+ Label loop, entry;
+ __ mov(ecx, eax);
+ __ jmp(&entry);
+ __ bind(&loop);
+ __ push(Operand(ebx, ecx, times_4, 0));
+ __ bind(&entry);
+ __ dec(ecx);
+ __ j(greater_equal, &loop);
+
+ // Call the function.
+ if (is_api_function) {
+ __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
+ Handle<Code> code =
+ masm->isolate()->builtins()->HandleApiCallConstruct();
+ __ call(code, RelocInfo::CODE_TARGET);
+ } else {
+ ParameterCount actual(eax);
+ __ InvokeFunction(edi, actual, CALL_FUNCTION,
+ NullCallWrapper());
+ }
+
+ // Store offset of return address for deoptimizer.
+ if (!is_api_function) {
+ masm->isolate()->heap()->SetConstructStubDeoptPCOffset(masm->pc_offset());
+ }
+
+ // Restore context from the frame.
+ __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+
+ // If the result is an object (in the ECMA sense), we should get rid
+ // of the receiver and use the result; see ECMA-262 section 13.2.2-7
+ // on page 74.
+ Label use_receiver, exit;
+
+ // If the result is a smi, it is *not* an object in the ECMA sense.
+ __ JumpIfSmi(eax, &use_receiver);
+
+ // If the type of the result (stored in its map) is less than
+ // FIRST_SPEC_OBJECT_TYPE, it is not an object in the ECMA sense.
+ __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ecx);
+ __ j(above_equal, &exit);
+
+ // Throw away the result of the constructor invocation and use the
+ // on-stack receiver as the result.
+ __ bind(&use_receiver);
+ __ mov(eax, Operand(esp, 0));
+
+ // Restore the arguments count and leave the construct frame.
+ __ bind(&exit);
+ __ mov(ebx, Operand(esp, kPointerSize)); // Get arguments count.
+
+ // Leave construct frame.
+ }
+
+ // Remove caller arguments from the stack and return.
+ STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
+ __ pop(ecx);
+ __ lea(esp, Operand(esp, ebx, times_2, 1 * kPointerSize)); // 1 ~ receiver
+ __ push(ecx);
+ __ IncrementCounter(masm->isolate()->counters()->constructed_objects(), 1);
+ __ ret(0);
+}
+
+
+void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
+ Generate_JSConstructStubHelper(masm, false, FLAG_pretenuring_call_new);
+}
+
+
+void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
+ Generate_JSConstructStubHelper(masm, true, false);
+}
+
+
+static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
+ bool is_construct) {
+ ProfileEntryHookStub::MaybeCallEntryHook(masm);
+
+ // Clear the context before we push it when entering the internal frame.
+ __ Move(esi, Immediate(0));
+
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ // Load the previous frame pointer (ebx) to access C arguments
+ __ mov(ebx, Operand(ebp, 0));
+
+ // Get the function from the frame and setup the context.
+ __ mov(ecx, Operand(ebx, EntryFrameConstants::kFunctionArgOffset));
+ __ mov(esi, FieldOperand(ecx, JSFunction::kContextOffset));
+
+ // Push the function and the receiver onto the stack.
+ __ push(ecx);
+ __ push(Operand(ebx, EntryFrameConstants::kReceiverArgOffset));
+
+ // Load the number of arguments and setup pointer to the arguments.
+ __ mov(eax, Operand(ebx, EntryFrameConstants::kArgcOffset));
+ __ mov(ebx, Operand(ebx, EntryFrameConstants::kArgvOffset));
+
+ // Copy arguments to the stack in a loop.
+ Label loop, entry;
+ __ Move(ecx, Immediate(0));
+ __ jmp(&entry);
+ __ bind(&loop);
+ __ mov(edx, Operand(ebx, ecx, times_4, 0)); // push parameter from argv
+ __ push(Operand(edx, 0)); // dereference handle
+ __ inc(ecx);
+ __ bind(&entry);
+ __ cmp(ecx, eax);
+ __ j(not_equal, &loop);
+
+ // Get the function from the stack and call it.
+ // kPointerSize for the receiver.
+ __ mov(edi, Operand(esp, eax, times_4, kPointerSize));
+
+ // Invoke the code.
+ if (is_construct) {
+ // No type feedback cell is available
+ __ mov(ebx, masm->isolate()->factory()->undefined_value());
+ CallConstructStub stub(masm->isolate(), NO_CALL_CONSTRUCTOR_FLAGS);
+ __ CallStub(&stub);
+ } else {
+ ParameterCount actual(eax);
+ __ InvokeFunction(edi, actual, CALL_FUNCTION,
+ NullCallWrapper());
+ }
+
+ // Exit the internal frame. Notice that this also removes the empty.
+ // context and the function left on the stack by the code
+ // invocation.
+ }
+ __ ret(kPointerSize); // Remove receiver.
+}
+
+
+void Builtins::Generate_JSEntryTrampoline(MacroAssembler* masm) {
+ Generate_JSEntryTrampolineHelper(masm, false);
+}
+
+
+void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
+ Generate_JSEntryTrampolineHelper(masm, true);
+}
+
+
+void Builtins::Generate_CompileUnoptimized(MacroAssembler* masm) {
+ CallRuntimePassFunction(masm, Runtime::kHiddenCompileUnoptimized);
+ GenerateTailCallToReturnedCode(masm);
+}
+
+
+
+static void CallCompileOptimized(MacroAssembler* masm, bool concurrent) {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ // Push a copy of the function.
+ __ push(edi);
+ // Function is also the parameter to the runtime call.
+ __ push(edi);
+ // Whether to compile in a background thread.
+ __ Push(masm->isolate()->factory()->ToBoolean(concurrent));
+
+ __ CallRuntime(Runtime::kHiddenCompileOptimized, 2);
+ // Restore receiver.
+ __ pop(edi);
+}
+
+
+void Builtins::Generate_CompileOptimized(MacroAssembler* masm) {
+ CallCompileOptimized(masm, false);
+ GenerateTailCallToReturnedCode(masm);
+}
+
+
+void Builtins::Generate_CompileOptimizedConcurrent(MacroAssembler* masm) {
+ CallCompileOptimized(masm, true);
+ GenerateTailCallToReturnedCode(masm);
+}
+
+
+static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) {
+ // For now, we are relying on the fact that make_code_young doesn't do any
+ // garbage collection which allows us to save/restore the registers without
+ // worrying about which of them contain pointers. We also don't build an
+ // internal frame to make the code faster, since we shouldn't have to do stack
+ // crawls in MakeCodeYoung. This seems a bit fragile.
+
+ // Re-execute the code that was patched back to the young age when
+ // the stub returns.
+ __ sub(Operand(esp, 0), Immediate(5));
+ __ pushad();
+ __ mov(eax, Operand(esp, 8 * kPointerSize));
+ {
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ PrepareCallCFunction(2, ebx);
+ __ mov(Operand(esp, 1 * kPointerSize),
+ Immediate(ExternalReference::isolate_address(masm->isolate())));
+ __ mov(Operand(esp, 0), eax);
+ __ CallCFunction(
+ ExternalReference::get_make_code_young_function(masm->isolate()), 2);
+ }
+ __ popad();
+ __ ret(0);
+}
+
+#define DEFINE_CODE_AGE_BUILTIN_GENERATOR(C) \
+void Builtins::Generate_Make##C##CodeYoungAgainEvenMarking( \
+ MacroAssembler* masm) { \
+ GenerateMakeCodeYoungAgainCommon(masm); \
+} \
+void Builtins::Generate_Make##C##CodeYoungAgainOddMarking( \
+ MacroAssembler* masm) { \
+ GenerateMakeCodeYoungAgainCommon(masm); \
+}
+CODE_AGE_LIST(DEFINE_CODE_AGE_BUILTIN_GENERATOR)
+#undef DEFINE_CODE_AGE_BUILTIN_GENERATOR
+
+
+void Builtins::Generate_MarkCodeAsExecutedOnce(MacroAssembler* masm) {
+ // For now, as in GenerateMakeCodeYoungAgainCommon, we are relying on the fact
+ // that make_code_young doesn't do any garbage collection which allows us to
+ // save/restore the registers without worrying about which of them contain
+ // pointers.
+ __ pushad();
+ __ mov(eax, Operand(esp, 8 * kPointerSize));
+ __ sub(eax, Immediate(Assembler::kCallInstructionLength));
+ { // NOLINT
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ PrepareCallCFunction(2, ebx);
+ __ mov(Operand(esp, 1 * kPointerSize),
+ Immediate(ExternalReference::isolate_address(masm->isolate())));
+ __ mov(Operand(esp, 0), eax);
+ __ CallCFunction(
+ ExternalReference::get_mark_code_as_executed_function(masm->isolate()),
+ 2);
+ }
+ __ popad();
+
+ // Perform prologue operations usually performed by the young code stub.
+ __ pop(eax); // Pop return address into scratch register.
+ __ push(ebp); // Caller's frame pointer.
+ __ mov(ebp, esp);
+ __ push(esi); // Callee's context.
+ __ push(edi); // Callee's JS Function.
+ __ push(eax); // Push return address after frame prologue.
+
+ // Jump to point after the code-age stub.
+ __ ret(0);
+}
+
+
+void Builtins::Generate_MarkCodeAsExecutedTwice(MacroAssembler* masm) {
+ GenerateMakeCodeYoungAgainCommon(masm);
+}
+
+
+static void Generate_NotifyStubFailureHelper(MacroAssembler* masm) {
+ // Enter an internal frame.
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ // Preserve registers across notification, this is important for compiled
+ // stubs that tail call the runtime on deopts passing their parameters in
+ // registers.
+ __ pushad();
+ __ CallRuntime(Runtime::kHiddenNotifyStubFailure, 0);
+ __ popad();
+ // Tear down internal frame.
+ }
+
+ __ pop(MemOperand(esp, 0)); // Ignore state offset
+ __ ret(0); // Return to IC Miss stub, continuation still on stack.
+}
+
+
+void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) {
+ Generate_NotifyStubFailureHelper(masm);
+}
+
+
+void Builtins::Generate_NotifyStubFailureSaveDoubles(MacroAssembler* masm) {
+ // SaveDoubles is meanless for X87, just used by deoptimizer.cc
+ Generate_NotifyStubFailureHelper(masm);
+}
+
+
+static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
+ Deoptimizer::BailoutType type) {
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ // Pass deoptimization type to the runtime system.
+ __ push(Immediate(Smi::FromInt(static_cast<int>(type))));
+ __ CallRuntime(Runtime::kHiddenNotifyDeoptimized, 1);
+
+ // Tear down internal frame.
+ }
+
+ // Get the full codegen state from the stack and untag it.
+ __ mov(ecx, Operand(esp, 1 * kPointerSize));
+ __ SmiUntag(ecx);
+
+ // Switch on the state.
+ Label not_no_registers, not_tos_eax;
+ __ cmp(ecx, FullCodeGenerator::NO_REGISTERS);
+ __ j(not_equal, ¬_no_registers, Label::kNear);
+ __ ret(1 * kPointerSize); // Remove state.
+
+ __ bind(¬_no_registers);
+ __ mov(eax, Operand(esp, 2 * kPointerSize));
+ __ cmp(ecx, FullCodeGenerator::TOS_REG);
+ __ j(not_equal, ¬_tos_eax, Label::kNear);
+ __ ret(2 * kPointerSize); // Remove state, eax.
+
+ __ bind(¬_tos_eax);
+ __ Abort(kNoCasesLeft);
+}
+
+
+void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
+ Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::EAGER);
+}
+
+
+void Builtins::Generate_NotifySoftDeoptimized(MacroAssembler* masm) {
+ Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::SOFT);
+}
+
+
+void Builtins::Generate_NotifyLazyDeoptimized(MacroAssembler* masm) {
+ Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::LAZY);
+}
+
+
+void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
+ Factory* factory = masm->isolate()->factory();
+
+ // 1. Make sure we have at least one argument.
+ { Label done;
+ __ test(eax, eax);
+ __ j(not_zero, &done);
+ __ pop(ebx);
+ __ push(Immediate(factory->undefined_value()));
+ __ push(ebx);
+ __ inc(eax);
+ __ bind(&done);
+ }
+
+ // 2. Get the function to call (passed as receiver) from the stack, check
+ // if it is a function.
+ Label slow, non_function;
+ // 1 ~ return address.
+ __ mov(edi, Operand(esp, eax, times_4, 1 * kPointerSize));
+ __ JumpIfSmi(edi, &non_function);
+ __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
+ __ j(not_equal, &slow);
+
+
+ // 3a. Patch the first argument if necessary when calling a function.
+ Label shift_arguments;
+ __ Move(edx, Immediate(0)); // indicate regular JS_FUNCTION
+ { Label convert_to_object, use_global_receiver, patch_receiver;
+ // Change context eagerly in case we need the global receiver.
+ __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
+
+ // Do not transform the receiver for strict mode functions.
+ __ mov(ebx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+ __ test_b(FieldOperand(ebx, SharedFunctionInfo::kStrictModeByteOffset),
+ 1 << SharedFunctionInfo::kStrictModeBitWithinByte);
+ __ j(not_equal, &shift_arguments);
+
+ // Do not transform the receiver for natives (shared already in ebx).
+ __ test_b(FieldOperand(ebx, SharedFunctionInfo::kNativeByteOffset),
+ 1 << SharedFunctionInfo::kNativeBitWithinByte);
+ __ j(not_equal, &shift_arguments);
+
+ // Compute the receiver in sloppy mode.
+ __ mov(ebx, Operand(esp, eax, times_4, 0)); // First argument.
+
+ // Call ToObject on the receiver if it is not an object, or use the
+ // global object if it is null or undefined.
+ __ JumpIfSmi(ebx, &convert_to_object);
+ __ cmp(ebx, factory->null_value());
+ __ j(equal, &use_global_receiver);
+ __ cmp(ebx, factory->undefined_value());
+ __ j(equal, &use_global_receiver);
+ STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
+ __ CmpObjectType(ebx, FIRST_SPEC_OBJECT_TYPE, ecx);
+ __ j(above_equal, &shift_arguments);
+
+ __ bind(&convert_to_object);
+
+ { // In order to preserve argument count.
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ SmiTag(eax);
+ __ push(eax);
+
+ __ push(ebx);
+ __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+ __ mov(ebx, eax);
+ __ Move(edx, Immediate(0)); // restore
+
+ __ pop(eax);
+ __ SmiUntag(eax);
+ }
+
+ // Restore the function to edi.
+ __ mov(edi, Operand(esp, eax, times_4, 1 * kPointerSize));
+ __ jmp(&patch_receiver);
+
+ __ bind(&use_global_receiver);
+ __ mov(ebx,
+ Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+ __ mov(ebx, FieldOperand(ebx, GlobalObject::kGlobalReceiverOffset));
+
+ __ bind(&patch_receiver);
+ __ mov(Operand(esp, eax, times_4, 0), ebx);
+
+ __ jmp(&shift_arguments);
+ }
+
+ // 3b. Check for function proxy.
+ __ bind(&slow);
+ __ Move(edx, Immediate(1)); // indicate function proxy
+ __ CmpInstanceType(ecx, JS_FUNCTION_PROXY_TYPE);
+ __ j(equal, &shift_arguments);
+ __ bind(&non_function);
+ __ Move(edx, Immediate(2)); // indicate non-function
+
+ // 3c. Patch the first argument when calling a non-function. The
+ // CALL_NON_FUNCTION builtin expects the non-function callee as
+ // receiver, so overwrite the first argument which will ultimately
+ // become the receiver.
+ __ mov(Operand(esp, eax, times_4, 0), edi);
+
+ // 4. Shift arguments and return address one slot down on the stack
+ // (overwriting the original receiver). Adjust argument count to make
+ // the original first argument the new receiver.
+ __ bind(&shift_arguments);
+ { Label loop;
+ __ mov(ecx, eax);
+ __ bind(&loop);
+ __ mov(ebx, Operand(esp, ecx, times_4, 0));
+ __ mov(Operand(esp, ecx, times_4, kPointerSize), ebx);
+ __ dec(ecx);
+ __ j(not_sign, &loop); // While non-negative (to copy return address).
+ __ pop(ebx); // Discard copy of return address.
+ __ dec(eax); // One fewer argument (first argument is new receiver).
+ }
+
+ // 5a. Call non-function via tail call to CALL_NON_FUNCTION builtin,
+ // or a function proxy via CALL_FUNCTION_PROXY.
+ { Label function, non_proxy;
+ __ test(edx, edx);
+ __ j(zero, &function);
+ __ Move(ebx, Immediate(0));
+ __ cmp(edx, Immediate(1));
+ __ j(not_equal, &non_proxy);
+
+ __ pop(edx); // return address
+ __ push(edi); // re-add proxy object as additional argument
+ __ push(edx);
+ __ inc(eax);
+ __ GetBuiltinEntry(edx, Builtins::CALL_FUNCTION_PROXY);
+ __ jmp(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
+ RelocInfo::CODE_TARGET);
+
+ __ bind(&non_proxy);
+ __ GetBuiltinEntry(edx, Builtins::CALL_NON_FUNCTION);
+ __ jmp(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
+ RelocInfo::CODE_TARGET);
+ __ bind(&function);
+ }
+
+ // 5b. Get the code to call from the function and check that the number of
+ // expected arguments matches what we're providing. If so, jump
+ // (tail-call) to the code in register edx without checking arguments.
+ __ mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+ __ mov(ebx,
+ FieldOperand(edx, SharedFunctionInfo::kFormalParameterCountOffset));
+ __ mov(edx, FieldOperand(edi, JSFunction::kCodeEntryOffset));
+ __ SmiUntag(ebx);
+ __ cmp(eax, ebx);
+ __ j(not_equal,
+ masm->isolate()->builtins()->ArgumentsAdaptorTrampoline());
+
+ ParameterCount expected(0);
+ __ InvokeCode(edx, expected, expected, JUMP_FUNCTION, NullCallWrapper());
+}
+
+
+void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
+ static const int kArgumentsOffset = 2 * kPointerSize;
+ static const int kReceiverOffset = 3 * kPointerSize;
+ static const int kFunctionOffset = 4 * kPointerSize;
+ {
+ FrameScope frame_scope(masm, StackFrame::INTERNAL);
+
+ __ push(Operand(ebp, kFunctionOffset)); // push this
+ __ push(Operand(ebp, kArgumentsOffset)); // push arguments
+ __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION);
+
+ // Check the stack for overflow. We are not trying to catch
+ // interruptions (e.g. debug break and preemption) here, so the "real stack
+ // limit" is checked.
+ Label okay;
+ ExternalReference real_stack_limit =
+ ExternalReference::address_of_real_stack_limit(masm->isolate());
+ __ mov(edi, Operand::StaticVariable(real_stack_limit));
+ // Make ecx the space we have left. The stack might already be overflowed
+ // here which will cause ecx to become negative.
+ __ mov(ecx, esp);
+ __ sub(ecx, edi);
+ // Make edx the space we need for the array when it is unrolled onto the
+ // stack.
+ __ mov(edx, eax);
+ __ shl(edx, kPointerSizeLog2 - kSmiTagSize);
+ // Check if the arguments will overflow the stack.
+ __ cmp(ecx, edx);
+ __ j(greater, &okay); // Signed comparison.
+
+ // Out of stack space.
+ __ push(Operand(ebp, 4 * kPointerSize)); // push this
+ __ push(eax);
+ __ InvokeBuiltin(Builtins::STACK_OVERFLOW, CALL_FUNCTION);
+ __ bind(&okay);
+ // End of stack check.
+
+ // Push current index and limit.
+ const int kLimitOffset =
+ StandardFrameConstants::kExpressionsOffset - 1 * kPointerSize;
+ const int kIndexOffset = kLimitOffset - 1 * kPointerSize;
+ __ push(eax); // limit
+ __ push(Immediate(0)); // index
+
+ // Get the receiver.
+ __ mov(ebx, Operand(ebp, kReceiverOffset));
+
+ // Check that the function is a JS function (otherwise it must be a proxy).
+ Label push_receiver, use_global_receiver;
+ __ mov(edi, Operand(ebp, kFunctionOffset));
+ __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
+ __ j(not_equal, &push_receiver);
+
+ // Change context eagerly to get the right global object if necessary.
+ __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
+
+ // Compute the receiver.
+ // Do not transform the receiver for strict mode functions.
+ Label call_to_object;
+ __ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+ __ test_b(FieldOperand(ecx, SharedFunctionInfo::kStrictModeByteOffset),
+ 1 << SharedFunctionInfo::kStrictModeBitWithinByte);
+ __ j(not_equal, &push_receiver);
+
+ Factory* factory = masm->isolate()->factory();
+
+ // Do not transform the receiver for natives (shared already in ecx).
+ __ test_b(FieldOperand(ecx, SharedFunctionInfo::kNativeByteOffset),
+ 1 << SharedFunctionInfo::kNativeBitWithinByte);
+ __ j(not_equal, &push_receiver);
+
+ // Compute the receiver in sloppy mode.
+ // Call ToObject on the receiver if it is not an object, or use the
+ // global object if it is null or undefined.
+ __ JumpIfSmi(ebx, &call_to_object);
+ __ cmp(ebx, factory->null_value());
+ __ j(equal, &use_global_receiver);
+ __ cmp(ebx, factory->undefined_value());
+ __ j(equal, &use_global_receiver);
+ STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
+ __ CmpObjectType(ebx, FIRST_SPEC_OBJECT_TYPE, ecx);
+ __ j(above_equal, &push_receiver);
+
+ __ bind(&call_to_object);
+ __ push(ebx);
+ __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+ __ mov(ebx, eax);
+ __ jmp(&push_receiver);
+
+ __ bind(&use_global_receiver);
+ __ mov(ebx,
+ Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+ __ mov(ebx, FieldOperand(ebx, GlobalObject::kGlobalReceiverOffset));
+
+ // Push the receiver.
+ __ bind(&push_receiver);
+ __ push(ebx);
+
+ // Copy all arguments from the array to the stack.
+ Label entry, loop;
+ __ mov(ecx, Operand(ebp, kIndexOffset));
+ __ jmp(&entry);
+ __ bind(&loop);
+ __ mov(edx, Operand(ebp, kArgumentsOffset)); // load arguments
+
+ // Use inline caching to speed up access to arguments.
+ Handle<Code> ic = masm->isolate()->builtins()->KeyedLoadIC_Initialize();
+ __ call(ic, RelocInfo::CODE_TARGET);
+ // It is important that we do not have a test instruction after the
+ // call. A test instruction after the call is used to indicate that
+ // we have generated an inline version of the keyed load. In this
+ // case, we know that we are not generating a test instruction next.
+
+ // Push the nth argument.
+ __ push(eax);
+
+ // Update the index on the stack and in register eax.
+ __ mov(ecx, Operand(ebp, kIndexOffset));
+ __ add(ecx, Immediate(1 << kSmiTagSize));
+ __ mov(Operand(ebp, kIndexOffset), ecx);
+
+ __ bind(&entry);
+ __ cmp(ecx, Operand(ebp, kLimitOffset));
+ __ j(not_equal, &loop);
+
+ // Call the function.
+ Label call_proxy;
+ __ mov(eax, ecx);
+ ParameterCount actual(eax);
+ __ SmiUntag(eax);
+ __ mov(edi, Operand(ebp, kFunctionOffset));
+ __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
+ __ j(not_equal, &call_proxy);
+ __ InvokeFunction(edi, actual, CALL_FUNCTION, NullCallWrapper());
+
+ frame_scope.GenerateLeaveFrame();
+ __ ret(3 * kPointerSize); // remove this, receiver, and arguments
+
+ // Call the function proxy.
+ __ bind(&call_proxy);
+ __ push(edi); // add function proxy as last argument
+ __ inc(eax);
+ __ Move(ebx, Immediate(0));
+ __ GetBuiltinEntry(edx, Builtins::CALL_FUNCTION_PROXY);
+ __ call(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
+ RelocInfo::CODE_TARGET);
+
+ // Leave internal frame.
+ }
+ __ ret(3 * kPointerSize); // remove this, receiver, and arguments
+}
+
+
+void Builtins::Generate_InternalArrayCode(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- eax : argc
+ // -- esp[0] : return address
+ // -- esp[4] : last argument
+ // -----------------------------------
+ Label generic_array_code;
+
+ // Get the InternalArray function.
+ __ LoadGlobalFunction(Context::INTERNAL_ARRAY_FUNCTION_INDEX, edi);
+
+ if (FLAG_debug_code) {
+ // Initial map for the builtin InternalArray function should be a map.
+ __ mov(ebx, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset));
+ // Will both indicate a NULL and a Smi.
+ __ test(ebx, Immediate(kSmiTagMask));
+ __ Assert(not_zero, kUnexpectedInitialMapForInternalArrayFunction);
+ __ CmpObjectType(ebx, MAP_TYPE, ecx);
+ __ Assert(equal, kUnexpectedInitialMapForInternalArrayFunction);
+ }
+
+ // Run the native code for the InternalArray function called as a normal
+ // function.
+ // tail call a stub
+ InternalArrayConstructorStub stub(masm->isolate());
+ __ TailCallStub(&stub);
+}
+
+
+void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- eax : argc
+ // -- esp[0] : return address
+ // -- esp[4] : last argument
+ // -----------------------------------
+ Label generic_array_code;
+
+ // Get the Array function.
+ __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, edi);
+
+ if (FLAG_debug_code) {
+ // Initial map for the builtin Array function should be a map.
+ __ mov(ebx, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset));
+ // Will both indicate a NULL and a Smi.
+ __ test(ebx, Immediate(kSmiTagMask));
+ __ Assert(not_zero, kUnexpectedInitialMapForArrayFunction);
+ __ CmpObjectType(ebx, MAP_TYPE, ecx);
+ __ Assert(equal, kUnexpectedInitialMapForArrayFunction);
+ }
+
+ // Run the native code for the Array function called as a normal function.
+ // tail call a stub
+ __ mov(ebx, masm->isolate()->factory()->undefined_value());
+ ArrayConstructorStub stub(masm->isolate());
+ __ TailCallStub(&stub);
+}
+
+
+void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- eax : number of arguments
+ // -- edi : constructor function
+ // -- esp[0] : return address
+ // -- esp[(argc - n) * 4] : arg[n] (zero-based)
+ // -- esp[(argc + 1) * 4] : receiver
+ // -----------------------------------
+ Counters* counters = masm->isolate()->counters();
+ __ IncrementCounter(counters->string_ctor_calls(), 1);
+
+ if (FLAG_debug_code) {
+ __ LoadGlobalFunction(Context::STRING_FUNCTION_INDEX, ecx);
+ __ cmp(edi, ecx);
+ __ Assert(equal, kUnexpectedStringFunction);
+ }
+
+ // Load the first argument into eax and get rid of the rest
+ // (including the receiver).
+ Label no_arguments;
+ __ test(eax, eax);
+ __ j(zero, &no_arguments);
+ __ mov(ebx, Operand(esp, eax, times_pointer_size, 0));
+ __ pop(ecx);
+ __ lea(esp, Operand(esp, eax, times_pointer_size, kPointerSize));
+ __ push(ecx);
+ __ mov(eax, ebx);
+
+ // Lookup the argument in the number to string cache.
+ Label not_cached, argument_is_string;
+ __ LookupNumberStringCache(eax, // Input.
+ ebx, // Result.
+ ecx, // Scratch 1.
+ edx, // Scratch 2.
+ ¬_cached);
+ __ IncrementCounter(counters->string_ctor_cached_number(), 1);
+ __ bind(&argument_is_string);
+ // ----------- S t a t e -------------
+ // -- ebx : argument converted to string
+ // -- edi : constructor function
+ // -- esp[0] : return address
+ // -----------------------------------
+
+ // Allocate a JSValue and put the tagged pointer into eax.
+ Label gc_required;
+ __ Allocate(JSValue::kSize,
+ eax, // Result.
+ ecx, // New allocation top (we ignore it).
+ no_reg,
+ &gc_required,
+ TAG_OBJECT);
+
+ // Set the map.
+ __ LoadGlobalFunctionInitialMap(edi, ecx);
+ if (FLAG_debug_code) {
+ __ cmpb(FieldOperand(ecx, Map::kInstanceSizeOffset),
+ JSValue::kSize >> kPointerSizeLog2);
+ __ Assert(equal, kUnexpectedStringWrapperInstanceSize);
+ __ cmpb(FieldOperand(ecx, Map::kUnusedPropertyFieldsOffset), 0);
+ __ Assert(equal, kUnexpectedUnusedPropertiesOfStringWrapper);
+ }
+ __ mov(FieldOperand(eax, HeapObject::kMapOffset), ecx);
+
+ // Set properties and elements.
+ Factory* factory = masm->isolate()->factory();
+ __ Move(ecx, Immediate(factory->empty_fixed_array()));
+ __ mov(FieldOperand(eax, JSObject::kPropertiesOffset), ecx);
+ __ mov(FieldOperand(eax, JSObject::kElementsOffset), ecx);
+
+ // Set the value.
+ __ mov(FieldOperand(eax, JSValue::kValueOffset), ebx);
+
+ // Ensure the object is fully initialized.
+ STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
+
+ // We're done. Return.
+ __ ret(0);
+
+ // The argument was not found in the number to string cache. Check
+ // if it's a string already before calling the conversion builtin.
+ Label convert_argument;
+ __ bind(¬_cached);
+ STATIC_ASSERT(kSmiTag == 0);
+ __ JumpIfSmi(eax, &convert_argument);
+ Condition is_string = masm->IsObjectStringType(eax, ebx, ecx);
+ __ j(NegateCondition(is_string), &convert_argument);
+ __ mov(ebx, eax);
+ __ IncrementCounter(counters->string_ctor_string_value(), 1);
+ __ jmp(&argument_is_string);
+
+ // Invoke the conversion builtin and put the result into ebx.
+ __ bind(&convert_argument);
+ __ IncrementCounter(counters->string_ctor_conversions(), 1);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ push(edi); // Preserve the function.
+ __ push(eax);
+ __ InvokeBuiltin(Builtins::TO_STRING, CALL_FUNCTION);
+ __ pop(edi);
+ }
+ __ mov(ebx, eax);
+ __ jmp(&argument_is_string);
+
+ // Load the empty string into ebx, remove the receiver from the
+ // stack, and jump back to the case where the argument is a string.
+ __ bind(&no_arguments);
+ __ Move(ebx, Immediate(factory->empty_string()));
+ __ pop(ecx);
+ __ lea(esp, Operand(esp, kPointerSize));
+ __ push(ecx);
+ __ jmp(&argument_is_string);
+
+ // At this point the argument is already a string. Call runtime to
+ // create a string wrapper.
+ __ bind(&gc_required);
+ __ IncrementCounter(counters->string_ctor_gc_required(), 1);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ push(ebx);
+ __ CallRuntime(Runtime::kNewStringWrapper, 1);
+ }
+ __ ret(0);
+}
+
+
+static void ArgumentsAdaptorStackCheck(MacroAssembler* masm,
+ Label* stack_overflow) {
+ // ----------- S t a t e -------------
+ // -- eax : actual number of arguments
+ // -- ebx : expected number of arguments
+ // -- edi : function (passed through to callee)
+ // -----------------------------------
+ // Check the stack for overflow. We are not trying to catch
+ // interruptions (e.g. debug break and preemption) here, so the "real stack
+ // limit" is checked.
+ ExternalReference real_stack_limit =
+ ExternalReference::address_of_real_stack_limit(masm->isolate());
+ __ mov(edx, Operand::StaticVariable(real_stack_limit));
+ // Make ecx the space we have left. The stack might already be overflowed
+ // here which will cause ecx to become negative.
+ __ mov(ecx, esp);
+ __ sub(ecx, edx);
+ // Make edx the space we need for the array when it is unrolled onto the
+ // stack.
+ __ mov(edx, ebx);
+ __ shl(edx, kPointerSizeLog2);
+ // Check if the arguments will overflow the stack.
+ __ cmp(ecx, edx);
+ __ j(less_equal, stack_overflow); // Signed comparison.
+}
+
+
+static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
+ __ push(ebp);
+ __ mov(ebp, esp);
+
+ // Store the arguments adaptor context sentinel.
+ __ push(Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+
+ // Push the function on the stack.
+ __ push(edi);
+
+ // Preserve the number of arguments on the stack. Must preserve eax,
+ // ebx and ecx because these registers are used when copying the
+ // arguments and the receiver.
+ STATIC_ASSERT(kSmiTagSize == 1);
+ __ lea(edi, Operand(eax, eax, times_1, kSmiTag));
+ __ push(edi);
+}
+
+
+static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
+ // Retrieve the number of arguments from the stack.
+ __ mov(ebx, Operand(ebp, ArgumentsAdaptorFrameConstants::kLengthOffset));
+
+ // Leave the frame.
+ __ leave();
+
+ // Remove caller arguments from the stack.
+ STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
+ __ pop(ecx);
+ __ lea(esp, Operand(esp, ebx, times_2, 1 * kPointerSize)); // 1 ~ receiver
+ __ push(ecx);
+}
+
+
+void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- eax : actual number of arguments
+ // -- ebx : expected number of arguments
+ // -- edi : function (passed through to callee)
+ // -----------------------------------
+
+ Label invoke, dont_adapt_arguments;
+ __ IncrementCounter(masm->isolate()->counters()->arguments_adaptors(), 1);
+
+ Label stack_overflow;
+ ArgumentsAdaptorStackCheck(masm, &stack_overflow);
+
+ Label enough, too_few;
+ __ mov(edx, FieldOperand(edi, JSFunction::kCodeEntryOffset));
+ __ cmp(eax, ebx);
+ __ j(less, &too_few);
+ __ cmp(ebx, SharedFunctionInfo::kDontAdaptArgumentsSentinel);
+ __ j(equal, &dont_adapt_arguments);
+
+ { // Enough parameters: Actual >= expected.
+ __ bind(&enough);
+ EnterArgumentsAdaptorFrame(masm);
+
+ // Copy receiver and all expected arguments.
+ const int offset = StandardFrameConstants::kCallerSPOffset;
+ __ lea(eax, Operand(ebp, eax, times_4, offset));
+ __ mov(edi, -1); // account for receiver
+
+ Label copy;
+ __ bind(©);
+ __ inc(edi);
+ __ push(Operand(eax, 0));
+ __ sub(eax, Immediate(kPointerSize));
+ __ cmp(edi, ebx);
+ __ j(less, ©);
+ __ jmp(&invoke);
+ }
+
+ { // Too few parameters: Actual < expected.
+ __ bind(&too_few);
+ EnterArgumentsAdaptorFrame(masm);
+
+ // Copy receiver and all actual arguments.
+ const int offset = StandardFrameConstants::kCallerSPOffset;
+ __ lea(edi, Operand(ebp, eax, times_4, offset));
+ // ebx = expected - actual.
+ __ sub(ebx, eax);
+ // eax = -actual - 1
+ __ neg(eax);
+ __ sub(eax, Immediate(1));
+
+ Label copy;
+ __ bind(©);
+ __ inc(eax);
+ __ push(Operand(edi, 0));
+ __ sub(edi, Immediate(kPointerSize));
+ __ test(eax, eax);
+ __ j(not_zero, ©);
+
+ // Fill remaining expected arguments with undefined values.
+ Label fill;
+ __ bind(&fill);
+ __ inc(eax);
+ __ push(Immediate(masm->isolate()->factory()->undefined_value()));
+ __ cmp(eax, ebx);
+ __ j(less, &fill);
+ }
+
+ // Call the entry point.
+ __ bind(&invoke);
+ // Restore function pointer.
+ __ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
+ __ call(edx);
+
+ // Store offset of return address for deoptimizer.
+ masm->isolate()->heap()->SetArgumentsAdaptorDeoptPCOffset(masm->pc_offset());
+
+ // Leave frame and return.
+ LeaveArgumentsAdaptorFrame(masm);
+ __ ret(0);
+
+ // -------------------------------------------
+ // Dont adapt arguments.
+ // -------------------------------------------
+ __ bind(&dont_adapt_arguments);
+ __ jmp(edx);
+
+ __ bind(&stack_overflow);
+ {
+ FrameScope frame(masm, StackFrame::MANUAL);
+ EnterArgumentsAdaptorFrame(masm);
+ __ InvokeBuiltin(Builtins::STACK_OVERFLOW, CALL_FUNCTION);
+ __ int3();
+ }
+}
+
+
+void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
+ // Lookup the function in the JavaScript frame.
+ __ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ // Pass function as argument.
+ __ push(eax);
+ __ CallRuntime(Runtime::kCompileForOnStackReplacement, 1);
+ }
+
+ Label skip;
+ // If the code object is null, just return to the unoptimized code.
+ __ cmp(eax, Immediate(0));
+ __ j(not_equal, &skip, Label::kNear);
+ __ ret(0);
+
+ __ bind(&skip);
+
+ // Load deoptimization data from the code object.
+ __ mov(ebx, Operand(eax, Code::kDeoptimizationDataOffset - kHeapObjectTag));
+
+ // Load the OSR entrypoint offset from the deoptimization data.
+ __ mov(ebx, Operand(ebx, FixedArray::OffsetOfElementAt(
+ DeoptimizationInputData::kOsrPcOffsetIndex) - kHeapObjectTag));
+ __ SmiUntag(ebx);
+
+ // Compute the target address = code_obj + header_size + osr_offset
+ __ lea(eax, Operand(eax, ebx, times_1, Code::kHeaderSize - kHeapObjectTag));
+
+ // Overwrite the return address on the stack.
+ __ mov(Operand(esp, 0), eax);
+
+ // And "return" to the OSR entry point of the function.
+ __ ret(0);
+}
+
+
+void Builtins::Generate_OsrAfterStackCheck(MacroAssembler* masm) {
+ // We check the stack limit as indicator that recompilation might be done.
+ Label ok;
+ ExternalReference stack_limit =
+ ExternalReference::address_of_stack_limit(masm->isolate());
+ __ cmp(esp, Operand::StaticVariable(stack_limit));
+ __ j(above_equal, &ok, Label::kNear);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ CallRuntime(Runtime::kHiddenStackGuard, 0);
+ }
+ __ jmp(masm->isolate()->builtins()->OnStackReplacement(),
+ RelocInfo::CODE_TARGET);
+
+ __ bind(&ok);
+ __ ret(0);
+}
+
+#undef __
+}
+} // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_X87
--- /dev/null
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "v8.h"
+
+#if V8_TARGET_ARCH_X87
+
+#include "bootstrapper.h"
+#include "code-stubs.h"
+#include "isolate.h"
+#include "jsregexp.h"
+#include "regexp-macro-assembler.h"
+#include "runtime.h"
+#include "stub-cache.h"
+#include "codegen.h"
+#include "runtime.h"
+
+namespace v8 {
+namespace internal {
+
+
+void FastNewClosureStub::InitializeInterfaceDescriptor(
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { ebx };
+ descriptor->register_param_count_ = 1;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ Runtime::FunctionForId(Runtime::kHiddenNewClosureFromStubFailure)->entry;
+}
+
+
+void FastNewContextStub::InitializeInterfaceDescriptor(
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { edi };
+ descriptor->register_param_count_ = 1;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ = NULL;
+}
+
+
+void ToNumberStub::InitializeInterfaceDescriptor(
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { eax };
+ descriptor->register_param_count_ = 1;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ = NULL;
+}
+
+
+void NumberToStringStub::InitializeInterfaceDescriptor(
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { eax };
+ descriptor->register_param_count_ = 1;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ Runtime::FunctionForId(Runtime::kHiddenNumberToString)->entry;
+}
+
+
+void FastCloneShallowArrayStub::InitializeInterfaceDescriptor(
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { eax, ebx, ecx };
+ descriptor->register_param_count_ = 3;
+ descriptor->register_params_ = registers;
+ static Representation representations[] = {
+ Representation::Tagged(),
+ Representation::Smi(),
+ Representation::Tagged() };
+ descriptor->register_param_representations_ = representations;
+ descriptor->deoptimization_handler_ =
+ Runtime::FunctionForId(
+ Runtime::kHiddenCreateArrayLiteralStubBailout)->entry;
+}
+
+
+void FastCloneShallowObjectStub::InitializeInterfaceDescriptor(
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { eax, ebx, ecx, edx };
+ descriptor->register_param_count_ = 4;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ Runtime::FunctionForId(Runtime::kHiddenCreateObjectLiteral)->entry;
+}
+
+
+void CreateAllocationSiteStub::InitializeInterfaceDescriptor(
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { ebx, edx };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ = NULL;
+}
+
+
+void KeyedLoadFastElementStub::InitializeInterfaceDescriptor(
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { edx, ecx };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ FUNCTION_ADDR(KeyedLoadIC_MissFromStubFailure);
+}
+
+
+void KeyedLoadDictionaryElementStub::InitializeInterfaceDescriptor(
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { edx, ecx };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ FUNCTION_ADDR(KeyedLoadIC_MissFromStubFailure);
+}
+
+
+void RegExpConstructResultStub::InitializeInterfaceDescriptor(
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { ecx, ebx, eax };
+ descriptor->register_param_count_ = 3;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ Runtime::FunctionForId(Runtime::kHiddenRegExpConstructResult)->entry;
+}
+
+
+void LoadFieldStub::InitializeInterfaceDescriptor(
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { edx };
+ descriptor->register_param_count_ = 1;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ = NULL;
+}
+
+
+void KeyedLoadFieldStub::InitializeInterfaceDescriptor(
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { edx };
+ descriptor->register_param_count_ = 1;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ = NULL;
+}
+
+
+void StringLengthStub::InitializeInterfaceDescriptor(
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { edx, ecx };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ = NULL;
+}
+
+
+void KeyedStringLengthStub::InitializeInterfaceDescriptor(
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { edx, ecx };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ = NULL;
+}
+
+
+void KeyedStoreFastElementStub::InitializeInterfaceDescriptor(
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { edx, ecx, eax };
+ descriptor->register_param_count_ = 3;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ FUNCTION_ADDR(KeyedStoreIC_MissFromStubFailure);
+}
+
+
+void TransitionElementsKindStub::InitializeInterfaceDescriptor(
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { eax, ebx };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ Runtime::FunctionForId(Runtime::kTransitionElementsKind)->entry;
+}
+
+
+static void InitializeArrayConstructorDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor,
+ int constant_stack_parameter_count) {
+ // register state
+ // eax -- number of arguments
+ // edi -- function
+ // ebx -- allocation site with elements kind
+ static Register registers_variable_args[] = { edi, ebx, eax };
+ static Register registers_no_args[] = { edi, ebx };
+
+ if (constant_stack_parameter_count == 0) {
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers_no_args;
+ } else {
+ // stack param count needs (constructor pointer, and single argument)
+ descriptor->handler_arguments_mode_ = PASS_ARGUMENTS;
+ descriptor->stack_parameter_count_ = eax;
+ descriptor->register_param_count_ = 3;
+ descriptor->register_params_ = registers_variable_args;
+ static Representation representations[] = {
+ Representation::Tagged(),
+ Representation::Tagged(),
+ Representation::Integer32() };
+ descriptor->register_param_representations_ = representations;
+ }
+
+ descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count;
+ descriptor->function_mode_ = JS_FUNCTION_STUB_MODE;
+ descriptor->deoptimization_handler_ =
+ Runtime::FunctionForId(Runtime::kHiddenArrayConstructor)->entry;
+}
+
+
+static void InitializeInternalArrayConstructorDescriptor(
+ CodeStubInterfaceDescriptor* descriptor,
+ int constant_stack_parameter_count) {
+ // register state
+ // eax -- number of arguments
+ // edi -- constructor function
+ static Register registers_variable_args[] = { edi, eax };
+ static Register registers_no_args[] = { edi };
+
+ if (constant_stack_parameter_count == 0) {
+ descriptor->register_param_count_ = 1;
+ descriptor->register_params_ = registers_no_args;
+ } else {
+ // stack param count needs (constructor pointer, and single argument)
+ descriptor->handler_arguments_mode_ = PASS_ARGUMENTS;
+ descriptor->stack_parameter_count_ = eax;
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers_variable_args;
+ static Representation representations[] = {
+ Representation::Tagged(),
+ Representation::Integer32() };
+ descriptor->register_param_representations_ = representations;
+ }
+
+ descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count;
+ descriptor->function_mode_ = JS_FUNCTION_STUB_MODE;
+ descriptor->deoptimization_handler_ =
+ Runtime::FunctionForId(Runtime::kHiddenInternalArrayConstructor)->entry;
+}
+
+
+void ArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor(
+ CodeStubInterfaceDescriptor* descriptor) {
+ InitializeArrayConstructorDescriptor(isolate(), descriptor, 0);
+}
+
+
+void ArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor(
+ CodeStubInterfaceDescriptor* descriptor) {
+ InitializeArrayConstructorDescriptor(isolate(), descriptor, 1);
+}
+
+
+void ArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor(
+ CodeStubInterfaceDescriptor* descriptor) {
+ InitializeArrayConstructorDescriptor(isolate(), descriptor, -1);
+}
+
+
+void InternalArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor(
+ CodeStubInterfaceDescriptor* descriptor) {
+ InitializeInternalArrayConstructorDescriptor(descriptor, 0);
+}
+
+
+void InternalArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor(
+ CodeStubInterfaceDescriptor* descriptor) {
+ InitializeInternalArrayConstructorDescriptor(descriptor, 1);
+}
+
+
+void InternalArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor(
+ CodeStubInterfaceDescriptor* descriptor) {
+ InitializeInternalArrayConstructorDescriptor(descriptor, -1);
+}
+
+
+void CompareNilICStub::InitializeInterfaceDescriptor(
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { eax };
+ descriptor->register_param_count_ = 1;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ FUNCTION_ADDR(CompareNilIC_Miss);
+ descriptor->SetMissHandler(
+ ExternalReference(IC_Utility(IC::kCompareNilIC_Miss), isolate()));
+}
+
+void ToBooleanStub::InitializeInterfaceDescriptor(
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { eax };
+ descriptor->register_param_count_ = 1;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ FUNCTION_ADDR(ToBooleanIC_Miss);
+ descriptor->SetMissHandler(
+ ExternalReference(IC_Utility(IC::kToBooleanIC_Miss), isolate()));
+}
+
+
+void StoreGlobalStub::InitializeInterfaceDescriptor(
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { edx, ecx, eax };
+ descriptor->register_param_count_ = 3;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ FUNCTION_ADDR(StoreIC_MissFromStubFailure);
+}
+
+
+void ElementsTransitionAndStoreStub::InitializeInterfaceDescriptor(
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { eax, ebx, ecx, edx };
+ descriptor->register_param_count_ = 4;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ FUNCTION_ADDR(ElementsTransitionAndStoreIC_Miss);
+}
+
+
+void ArrayShiftStub::InitializeInterfaceDescriptor(
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { eax };
+ descriptor->register_param_count_ = 1;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ Builtins::c_function_address(Builtins::c_ArrayShift);
+}
+
+
+void BinaryOpICStub::InitializeInterfaceDescriptor(
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { edx, eax };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ = FUNCTION_ADDR(BinaryOpIC_Miss);
+ descriptor->SetMissHandler(
+ ExternalReference(IC_Utility(IC::kBinaryOpIC_Miss), isolate()));
+}
+
+
+void BinaryOpWithAllocationSiteStub::InitializeInterfaceDescriptor(
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { ecx, edx, eax };
+ descriptor->register_param_count_ = 3;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ FUNCTION_ADDR(BinaryOpIC_MissWithAllocationSite);
+}
+
+
+void StringAddStub::InitializeInterfaceDescriptor(
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { edx, eax };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ Runtime::FunctionForId(Runtime::kHiddenStringAdd)->entry;
+}
+
+
+void CallDescriptors::InitializeForIsolate(Isolate* isolate) {
+ {
+ CallInterfaceDescriptor* descriptor =
+ isolate->call_descriptor(Isolate::ArgumentAdaptorCall);
+ static Register registers[] = { edi, // JSFunction
+ esi, // context
+ eax, // actual number of arguments
+ ebx, // expected number of arguments
+ };
+ static Representation representations[] = {
+ Representation::Tagged(), // JSFunction
+ Representation::Tagged(), // context
+ Representation::Integer32(), // actual number of arguments
+ Representation::Integer32(), // expected number of arguments
+ };
+ descriptor->register_param_count_ = 4;
+ descriptor->register_params_ = registers;
+ descriptor->param_representations_ = representations;
+ }
+ {
+ CallInterfaceDescriptor* descriptor =
+ isolate->call_descriptor(Isolate::KeyedCall);
+ static Register registers[] = { esi, // context
+ ecx, // key
+ };
+ static Representation representations[] = {
+ Representation::Tagged(), // context
+ Representation::Tagged(), // key
+ };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->param_representations_ = representations;
+ }
+ {
+ CallInterfaceDescriptor* descriptor =
+ isolate->call_descriptor(Isolate::NamedCall);
+ static Register registers[] = { esi, // context
+ ecx, // name
+ };
+ static Representation representations[] = {
+ Representation::Tagged(), // context
+ Representation::Tagged(), // name
+ };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->param_representations_ = representations;
+ }
+ {
+ CallInterfaceDescriptor* descriptor =
+ isolate->call_descriptor(Isolate::CallHandler);
+ static Register registers[] = { esi, // context
+ edx, // receiver
+ };
+ static Representation representations[] = {
+ Representation::Tagged(), // context
+ Representation::Tagged(), // receiver
+ };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->param_representations_ = representations;
+ }
+ {
+ CallInterfaceDescriptor* descriptor =
+ isolate->call_descriptor(Isolate::ApiFunctionCall);
+ static Register registers[] = { eax, // callee
+ ebx, // call_data
+ ecx, // holder
+ edx, // api_function_address
+ esi, // context
+ };
+ static Representation representations[] = {
+ Representation::Tagged(), // callee
+ Representation::Tagged(), // call_data
+ Representation::Tagged(), // holder
+ Representation::External(), // api_function_address
+ Representation::Tagged(), // context
+ };
+ descriptor->register_param_count_ = 5;
+ descriptor->register_params_ = registers;
+ descriptor->param_representations_ = representations;
+ }
+}
+
+
+#define __ ACCESS_MASM(masm)
+
+
+void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm) {
+ // Update the static counter each time a new code stub is generated.
+ isolate()->counters()->code_stubs()->Increment();
+
+ CodeStubInterfaceDescriptor* descriptor = GetInterfaceDescriptor();
+ int param_count = descriptor->register_param_count_;
+ {
+ // Call the runtime system in a fresh internal frame.
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ ASSERT(descriptor->register_param_count_ == 0 ||
+ eax.is(descriptor->register_params_[param_count - 1]));
+ // Push arguments
+ for (int i = 0; i < param_count; ++i) {
+ __ push(descriptor->register_params_[i]);
+ }
+ ExternalReference miss = descriptor->miss_handler();
+ __ CallExternalReference(miss, descriptor->register_param_count_);
+ }
+
+ __ ret(0);
+}
+
+
+void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
+ // We don't allow a GC during a store buffer overflow so there is no need to
+ // store the registers in any particular way, but we do have to store and
+ // restore them.
+ __ pushad();
+ const int argument_count = 1;
+
+ AllowExternalCallThatCantCauseGC scope(masm);
+ __ PrepareCallCFunction(argument_count, ecx);
+ __ mov(Operand(esp, 0 * kPointerSize),
+ Immediate(ExternalReference::isolate_address(isolate())));
+ __ CallCFunction(
+ ExternalReference::store_buffer_overflow_function(isolate()),
+ argument_count);
+ __ popad();
+ __ ret(0);
+}
+
+
+class FloatingPointHelper : public AllStatic {
+ public:
+ enum ArgLocation {
+ ARGS_ON_STACK,
+ ARGS_IN_REGISTERS
+ };
+
+ // Code pattern for loading a floating point value. Input value must
+ // be either a smi or a heap number object (fp value). Requirements:
+ // operand in register number. Returns operand as floating point number
+ // on FPU stack.
+ static void LoadFloatOperand(MacroAssembler* masm, Register number);
+
+ // Test if operands are smi or number objects (fp). Requirements:
+ // operand_1 in eax, operand_2 in edx; falls through on float
+ // operands, jumps to the non_float label otherwise.
+ static void CheckFloatOperands(MacroAssembler* masm,
+ Label* non_float,
+ Register scratch);
+};
+
+
+void DoubleToIStub::Generate(MacroAssembler* masm) {
+ Register input_reg = this->source();
+ Register final_result_reg = this->destination();
+ ASSERT(is_truncating());
+
+ Label check_negative, process_64_bits, done, done_no_stash;
+
+ int double_offset = offset();
+
+ // Account for return address and saved regs if input is esp.
+ if (input_reg.is(esp)) double_offset += 3 * kPointerSize;
+
+ MemOperand mantissa_operand(MemOperand(input_reg, double_offset));
+ MemOperand exponent_operand(MemOperand(input_reg,
+ double_offset + kDoubleSize / 2));
+
+ Register scratch1;
+ {
+ Register scratch_candidates[3] = { ebx, edx, edi };
+ for (int i = 0; i < 3; i++) {
+ scratch1 = scratch_candidates[i];
+ if (!final_result_reg.is(scratch1) && !input_reg.is(scratch1)) break;
+ }
+ }
+ // Since we must use ecx for shifts below, use some other register (eax)
+ // to calculate the result if ecx is the requested return register.
+ Register result_reg = final_result_reg.is(ecx) ? eax : final_result_reg;
+ // Save ecx if it isn't the return register and therefore volatile, or if it
+ // is the return register, then save the temp register we use in its stead for
+ // the result.
+ Register save_reg = final_result_reg.is(ecx) ? eax : ecx;
+ __ push(scratch1);
+ __ push(save_reg);
+
+ bool stash_exponent_copy = !input_reg.is(esp);
+ __ mov(scratch1, mantissa_operand);
+ __ mov(ecx, exponent_operand);
+ if (stash_exponent_copy) __ push(ecx);
+
+ __ and_(ecx, HeapNumber::kExponentMask);
+ __ shr(ecx, HeapNumber::kExponentShift);
+ __ lea(result_reg, MemOperand(ecx, -HeapNumber::kExponentBias));
+ __ cmp(result_reg, Immediate(HeapNumber::kMantissaBits));
+ __ j(below, &process_64_bits);
+
+ // Result is entirely in lower 32-bits of mantissa
+ int delta = HeapNumber::kExponentBias + Double::kPhysicalSignificandSize;
+ __ sub(ecx, Immediate(delta));
+ __ xor_(result_reg, result_reg);
+ __ cmp(ecx, Immediate(31));
+ __ j(above, &done);
+ __ shl_cl(scratch1);
+ __ jmp(&check_negative);
+
+ __ bind(&process_64_bits);
+ // Result must be extracted from shifted 32-bit mantissa
+ __ sub(ecx, Immediate(delta));
+ __ neg(ecx);
+ if (stash_exponent_copy) {
+ __ mov(result_reg, MemOperand(esp, 0));
+ } else {
+ __ mov(result_reg, exponent_operand);
+ }
+ __ and_(result_reg,
+ Immediate(static_cast<uint32_t>(Double::kSignificandMask >> 32)));
+ __ add(result_reg,
+ Immediate(static_cast<uint32_t>(Double::kHiddenBit >> 32)));
+ __ shrd(result_reg, scratch1);
+ __ shr_cl(result_reg);
+ __ test(ecx, Immediate(32));
+ {
+ Label skip_mov;
+ __ j(equal, &skip_mov, Label::kNear);
+ __ mov(scratch1, result_reg);
+ __ bind(&skip_mov);
+ }
+
+ // If the double was negative, negate the integer result.
+ __ bind(&check_negative);
+ __ mov(result_reg, scratch1);
+ __ neg(result_reg);
+ if (stash_exponent_copy) {
+ __ cmp(MemOperand(esp, 0), Immediate(0));
+ } else {
+ __ cmp(exponent_operand, Immediate(0));
+ }
+ {
+ Label skip_mov;
+ __ j(less_equal, &skip_mov, Label::kNear);
+ __ mov(result_reg, scratch1);
+ __ bind(&skip_mov);
+ }
+
+ // Restore registers
+ __ bind(&done);
+ if (stash_exponent_copy) {
+ __ add(esp, Immediate(kDoubleSize / 2));
+ }
+ __ bind(&done_no_stash);
+ if (!final_result_reg.is(result_reg)) {
+ ASSERT(final_result_reg.is(ecx));
+ __ mov(final_result_reg, result_reg);
+ }
+ __ pop(save_reg);
+ __ pop(scratch1);
+ __ ret(0);
+}
+
+
+void FloatingPointHelper::LoadFloatOperand(MacroAssembler* masm,
+ Register number) {
+ Label load_smi, done;
+
+ __ JumpIfSmi(number, &load_smi, Label::kNear);
+ __ fld_d(FieldOperand(number, HeapNumber::kValueOffset));
+ __ jmp(&done, Label::kNear);
+
+ __ bind(&load_smi);
+ __ SmiUntag(number);
+ __ push(number);
+ __ fild_s(Operand(esp, 0));
+ __ pop(number);
+
+ __ bind(&done);
+}
+
+
+void FloatingPointHelper::CheckFloatOperands(MacroAssembler* masm,
+ Label* non_float,
+ Register scratch) {
+ Label test_other, done;
+ // Test if both operands are floats or smi -> scratch=k_is_float;
+ // Otherwise scratch = k_not_float.
+ __ JumpIfSmi(edx, &test_other, Label::kNear);
+ __ mov(scratch, FieldOperand(edx, HeapObject::kMapOffset));
+ Factory* factory = masm->isolate()->factory();
+ __ cmp(scratch, factory->heap_number_map());
+ __ j(not_equal, non_float); // argument in edx is not a number -> NaN
+
+ __ bind(&test_other);
+ __ JumpIfSmi(eax, &done, Label::kNear);
+ __ mov(scratch, FieldOperand(eax, HeapObject::kMapOffset));
+ __ cmp(scratch, factory->heap_number_map());
+ __ j(not_equal, non_float); // argument in eax is not a number -> NaN
+
+ // Fall-through: Both operands are numbers.
+ __ bind(&done);
+}
+
+
+void MathPowStub::Generate(MacroAssembler* masm) {
+ // No SSE2 support
+ UNREACHABLE();
+}
+
+
+void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- ecx : name
+ // -- edx : receiver
+ // -- esp[0] : return address
+ // -----------------------------------
+ Label miss;
+
+ if (kind() == Code::KEYED_LOAD_IC) {
+ __ cmp(ecx, Immediate(isolate()->factory()->prototype_string()));
+ __ j(not_equal, &miss);
+ }
+
+ StubCompiler::GenerateLoadFunctionPrototype(masm, edx, eax, ebx, &miss);
+ __ bind(&miss);
+ StubCompiler::TailCallBuiltin(
+ masm, BaseLoadStoreStubCompiler::MissBuiltin(kind()));
+}
+
+
+void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
+ // The key is in edx and the parameter count is in eax.
+
+ // The displacement is used for skipping the frame pointer on the
+ // stack. It is the offset of the last parameter (if any) relative
+ // to the frame pointer.
+ static const int kDisplacement = 1 * kPointerSize;
+
+ // Check that the key is a smi.
+ Label slow;
+ __ JumpIfNotSmi(edx, &slow, Label::kNear);
+
+ // Check if the calling frame is an arguments adaptor frame.
+ Label adaptor;
+ __ mov(ebx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
+ __ mov(ecx, Operand(ebx, StandardFrameConstants::kContextOffset));
+ __ cmp(ecx, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ j(equal, &adaptor, Label::kNear);
+
+ // Check index against formal parameters count limit passed in
+ // through register eax. Use unsigned comparison to get negative
+ // check for free.
+ __ cmp(edx, eax);
+ __ j(above_equal, &slow, Label::kNear);
+
+ // Read the argument from the stack and return it.
+ STATIC_ASSERT(kSmiTagSize == 1);
+ STATIC_ASSERT(kSmiTag == 0); // Shifting code depends on these.
+ __ lea(ebx, Operand(ebp, eax, times_2, 0));
+ __ neg(edx);
+ __ mov(eax, Operand(ebx, edx, times_2, kDisplacement));
+ __ ret(0);
+
+ // Arguments adaptor case: Check index against actual arguments
+ // limit found in the arguments adaptor frame. Use unsigned
+ // comparison to get negative check for free.
+ __ bind(&adaptor);
+ __ mov(ecx, Operand(ebx, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ cmp(edx, ecx);
+ __ j(above_equal, &slow, Label::kNear);
+
+ // Read the argument from the stack and return it.
+ STATIC_ASSERT(kSmiTagSize == 1);
+ STATIC_ASSERT(kSmiTag == 0); // Shifting code depends on these.
+ __ lea(ebx, Operand(ebx, ecx, times_2, 0));
+ __ neg(edx);
+ __ mov(eax, Operand(ebx, edx, times_2, kDisplacement));
+ __ ret(0);
+
+ // Slow-case: Handle non-smi or out-of-bounds access to arguments
+ // by calling the runtime system.
+ __ bind(&slow);
+ __ pop(ebx); // Return address.
+ __ push(edx);
+ __ push(ebx);
+ __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1);
+}
+
+
+void ArgumentsAccessStub::GenerateNewSloppySlow(MacroAssembler* masm) {
+ // esp[0] : return address
+ // esp[4] : number of parameters
+ // esp[8] : receiver displacement
+ // esp[12] : function
+
+ // Check if the calling frame is an arguments adaptor frame.
+ Label runtime;
+ __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
+ __ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset));
+ __ cmp(ecx, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ j(not_equal, &runtime, Label::kNear);
+
+ // Patch the arguments.length and the parameters pointer.
+ __ mov(ecx, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ mov(Operand(esp, 1 * kPointerSize), ecx);
+ __ lea(edx, Operand(edx, ecx, times_2,
+ StandardFrameConstants::kCallerSPOffset));
+ __ mov(Operand(esp, 2 * kPointerSize), edx);
+
+ __ bind(&runtime);
+ __ TailCallRuntime(Runtime::kHiddenNewArgumentsFast, 3, 1);
+}
+
+
+void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
+ // esp[0] : return address
+ // esp[4] : number of parameters (tagged)
+ // esp[8] : receiver displacement
+ // esp[12] : function
+
+ // ebx = parameter count (tagged)
+ __ mov(ebx, Operand(esp, 1 * kPointerSize));
+
+ // Check if the calling frame is an arguments adaptor frame.
+ // TODO(rossberg): Factor out some of the bits that are shared with the other
+ // Generate* functions.
+ Label runtime;
+ Label adaptor_frame, try_allocate;
+ __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
+ __ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset));
+ __ cmp(ecx, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ j(equal, &adaptor_frame, Label::kNear);
+
+ // No adaptor, parameter count = argument count.
+ __ mov(ecx, ebx);
+ __ jmp(&try_allocate, Label::kNear);
+
+ // We have an adaptor frame. Patch the parameters pointer.
+ __ bind(&adaptor_frame);
+ __ mov(ecx, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ lea(edx, Operand(edx, ecx, times_2,
+ StandardFrameConstants::kCallerSPOffset));
+ __ mov(Operand(esp, 2 * kPointerSize), edx);
+
+ // ebx = parameter count (tagged)
+ // ecx = argument count (tagged)
+ // esp[4] = parameter count (tagged)
+ // esp[8] = address of receiver argument
+ // Compute the mapped parameter count = min(ebx, ecx) in ebx.
+ __ cmp(ebx, ecx);
+ __ j(less_equal, &try_allocate, Label::kNear);
+ __ mov(ebx, ecx);
+
+ __ bind(&try_allocate);
+
+ // Save mapped parameter count.
+ __ push(ebx);
+
+ // Compute the sizes of backing store, parameter map, and arguments object.
+ // 1. Parameter map, has 2 extra words containing context and backing store.
+ const int kParameterMapHeaderSize =
+ FixedArray::kHeaderSize + 2 * kPointerSize;
+ Label no_parameter_map;
+ __ test(ebx, ebx);
+ __ j(zero, &no_parameter_map, Label::kNear);
+ __ lea(ebx, Operand(ebx, times_2, kParameterMapHeaderSize));
+ __ bind(&no_parameter_map);
+
+ // 2. Backing store.
+ __ lea(ebx, Operand(ebx, ecx, times_2, FixedArray::kHeaderSize));
+
+ // 3. Arguments object.
+ __ add(ebx, Immediate(Heap::kSloppyArgumentsObjectSize));
+
+ // Do the allocation of all three objects in one go.
+ __ Allocate(ebx, eax, edx, edi, &runtime, TAG_OBJECT);
+
+ // eax = address of new object(s) (tagged)
+ // ecx = argument count (tagged)
+ // esp[0] = mapped parameter count (tagged)
+ // esp[8] = parameter count (tagged)
+ // esp[12] = address of receiver argument
+ // Get the arguments boilerplate from the current native context into edi.
+ Label has_mapped_parameters, copy;
+ __ mov(edi, Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+ __ mov(edi, FieldOperand(edi, GlobalObject::kNativeContextOffset));
+ __ mov(ebx, Operand(esp, 0 * kPointerSize));
+ __ test(ebx, ebx);
+ __ j(not_zero, &has_mapped_parameters, Label::kNear);
+ __ mov(edi, Operand(edi,
+ Context::SlotOffset(Context::SLOPPY_ARGUMENTS_BOILERPLATE_INDEX)));
+ __ jmp(©, Label::kNear);
+
+ __ bind(&has_mapped_parameters);
+ __ mov(edi, Operand(edi,
+ Context::SlotOffset(Context::ALIASED_ARGUMENTS_BOILERPLATE_INDEX)));
+ __ bind(©);
+
+ // eax = address of new object (tagged)
+ // ebx = mapped parameter count (tagged)
+ // ecx = argument count (tagged)
+ // edi = address of boilerplate object (tagged)
+ // esp[0] = mapped parameter count (tagged)
+ // esp[8] = parameter count (tagged)
+ // esp[12] = address of receiver argument
+ // Copy the JS object part.
+ for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) {
+ __ mov(edx, FieldOperand(edi, i));
+ __ mov(FieldOperand(eax, i), edx);
+ }
+
+ // Set up the callee in-object property.
+ STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
+ __ mov(edx, Operand(esp, 4 * kPointerSize));
+ __ mov(FieldOperand(eax, JSObject::kHeaderSize +
+ Heap::kArgumentsCalleeIndex * kPointerSize),
+ edx);
+
+ // Use the length (smi tagged) and set that as an in-object property too.
+ STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
+ __ mov(FieldOperand(eax, JSObject::kHeaderSize +
+ Heap::kArgumentsLengthIndex * kPointerSize),
+ ecx);
+
+ // Set up the elements pointer in the allocated arguments object.
+ // If we allocated a parameter map, edi will point there, otherwise to the
+ // backing store.
+ __ lea(edi, Operand(eax, Heap::kSloppyArgumentsObjectSize));
+ __ mov(FieldOperand(eax, JSObject::kElementsOffset), edi);
+
+ // eax = address of new object (tagged)
+ // ebx = mapped parameter count (tagged)
+ // ecx = argument count (tagged)
+ // edi = address of parameter map or backing store (tagged)
+ // esp[0] = mapped parameter count (tagged)
+ // esp[8] = parameter count (tagged)
+ // esp[12] = address of receiver argument
+ // Free a register.
+ __ push(eax);
+
+ // Initialize parameter map. If there are no mapped arguments, we're done.
+ Label skip_parameter_map;
+ __ test(ebx, ebx);
+ __ j(zero, &skip_parameter_map);
+
+ __ mov(FieldOperand(edi, FixedArray::kMapOffset),
+ Immediate(isolate()->factory()->sloppy_arguments_elements_map()));
+ __ lea(eax, Operand(ebx, reinterpret_cast<intptr_t>(Smi::FromInt(2))));
+ __ mov(FieldOperand(edi, FixedArray::kLengthOffset), eax);
+ __ mov(FieldOperand(edi, FixedArray::kHeaderSize + 0 * kPointerSize), esi);
+ __ lea(eax, Operand(edi, ebx, times_2, kParameterMapHeaderSize));
+ __ mov(FieldOperand(edi, FixedArray::kHeaderSize + 1 * kPointerSize), eax);
+
+ // Copy the parameter slots and the holes in the arguments.
+ // We need to fill in mapped_parameter_count slots. They index the context,
+ // where parameters are stored in reverse order, at
+ // MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1
+ // The mapped parameter thus need to get indices
+ // MIN_CONTEXT_SLOTS+parameter_count-1 ..
+ // MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count
+ // We loop from right to left.
+ Label parameters_loop, parameters_test;
+ __ push(ecx);
+ __ mov(eax, Operand(esp, 2 * kPointerSize));
+ __ mov(ebx, Immediate(Smi::FromInt(Context::MIN_CONTEXT_SLOTS)));
+ __ add(ebx, Operand(esp, 4 * kPointerSize));
+ __ sub(ebx, eax);
+ __ mov(ecx, isolate()->factory()->the_hole_value());
+ __ mov(edx, edi);
+ __ lea(edi, Operand(edi, eax, times_2, kParameterMapHeaderSize));
+ // eax = loop variable (tagged)
+ // ebx = mapping index (tagged)
+ // ecx = the hole value
+ // edx = address of parameter map (tagged)
+ // edi = address of backing store (tagged)
+ // esp[0] = argument count (tagged)
+ // esp[4] = address of new object (tagged)
+ // esp[8] = mapped parameter count (tagged)
+ // esp[16] = parameter count (tagged)
+ // esp[20] = address of receiver argument
+ __ jmp(¶meters_test, Label::kNear);
+
+ __ bind(¶meters_loop);
+ __ sub(eax, Immediate(Smi::FromInt(1)));
+ __ mov(FieldOperand(edx, eax, times_2, kParameterMapHeaderSize), ebx);
+ __ mov(FieldOperand(edi, eax, times_2, FixedArray::kHeaderSize), ecx);
+ __ add(ebx, Immediate(Smi::FromInt(1)));
+ __ bind(¶meters_test);
+ __ test(eax, eax);
+ __ j(not_zero, ¶meters_loop, Label::kNear);
+ __ pop(ecx);
+
+ __ bind(&skip_parameter_map);
+
+ // ecx = argument count (tagged)
+ // edi = address of backing store (tagged)
+ // esp[0] = address of new object (tagged)
+ // esp[4] = mapped parameter count (tagged)
+ // esp[12] = parameter count (tagged)
+ // esp[16] = address of receiver argument
+ // Copy arguments header and remaining slots (if there are any).
+ __ mov(FieldOperand(edi, FixedArray::kMapOffset),
+ Immediate(isolate()->factory()->fixed_array_map()));
+ __ mov(FieldOperand(edi, FixedArray::kLengthOffset), ecx);
+
+ Label arguments_loop, arguments_test;
+ __ mov(ebx, Operand(esp, 1 * kPointerSize));
+ __ mov(edx, Operand(esp, 4 * kPointerSize));
+ __ sub(edx, ebx); // Is there a smarter way to do negative scaling?
+ __ sub(edx, ebx);
+ __ jmp(&arguments_test, Label::kNear);
+
+ __ bind(&arguments_loop);
+ __ sub(edx, Immediate(kPointerSize));
+ __ mov(eax, Operand(edx, 0));
+ __ mov(FieldOperand(edi, ebx, times_2, FixedArray::kHeaderSize), eax);
+ __ add(ebx, Immediate(Smi::FromInt(1)));
+
+ __ bind(&arguments_test);
+ __ cmp(ebx, ecx);
+ __ j(less, &arguments_loop, Label::kNear);
+
+ // Restore.
+ __ pop(eax); // Address of arguments object.
+ __ pop(ebx); // Parameter count.
+
+ // Return and remove the on-stack parameters.
+ __ ret(3 * kPointerSize);
+
+ // Do the runtime call to allocate the arguments object.
+ __ bind(&runtime);
+ __ pop(eax); // Remove saved parameter count.
+ __ mov(Operand(esp, 1 * kPointerSize), ecx); // Patch argument count.
+ __ TailCallRuntime(Runtime::kHiddenNewArgumentsFast, 3, 1);
+}
+
+
+void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
+ // esp[0] : return address
+ // esp[4] : number of parameters
+ // esp[8] : receiver displacement
+ // esp[12] : function
+
+ // Check if the calling frame is an arguments adaptor frame.
+ Label adaptor_frame, try_allocate, runtime;
+ __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
+ __ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset));
+ __ cmp(ecx, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ j(equal, &adaptor_frame, Label::kNear);
+
+ // Get the length from the frame.
+ __ mov(ecx, Operand(esp, 1 * kPointerSize));
+ __ jmp(&try_allocate, Label::kNear);
+
+ // Patch the arguments.length and the parameters pointer.
+ __ bind(&adaptor_frame);
+ __ mov(ecx, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ mov(Operand(esp, 1 * kPointerSize), ecx);
+ __ lea(edx, Operand(edx, ecx, times_2,
+ StandardFrameConstants::kCallerSPOffset));
+ __ mov(Operand(esp, 2 * kPointerSize), edx);
+
+ // Try the new space allocation. Start out with computing the size of
+ // the arguments object and the elements array.
+ Label add_arguments_object;
+ __ bind(&try_allocate);
+ __ test(ecx, ecx);
+ __ j(zero, &add_arguments_object, Label::kNear);
+ __ lea(ecx, Operand(ecx, times_2, FixedArray::kHeaderSize));
+ __ bind(&add_arguments_object);
+ __ add(ecx, Immediate(Heap::kStrictArgumentsObjectSize));
+
+ // Do the allocation of both objects in one go.
+ __ Allocate(ecx, eax, edx, ebx, &runtime, TAG_OBJECT);
+
+ // Get the arguments boilerplate from the current native context.
+ __ mov(edi, Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+ __ mov(edi, FieldOperand(edi, GlobalObject::kNativeContextOffset));
+ const int offset =
+ Context::SlotOffset(Context::STRICT_ARGUMENTS_BOILERPLATE_INDEX);
+ __ mov(edi, Operand(edi, offset));
+
+ // Copy the JS object part.
+ for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) {
+ __ mov(ebx, FieldOperand(edi, i));
+ __ mov(FieldOperand(eax, i), ebx);
+ }
+
+ // Get the length (smi tagged) and set that as an in-object property too.
+ STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
+ __ mov(ecx, Operand(esp, 1 * kPointerSize));
+ __ mov(FieldOperand(eax, JSObject::kHeaderSize +
+ Heap::kArgumentsLengthIndex * kPointerSize),
+ ecx);
+
+ // If there are no actual arguments, we're done.
+ Label done;
+ __ test(ecx, ecx);
+ __ j(zero, &done, Label::kNear);
+
+ // Get the parameters pointer from the stack.
+ __ mov(edx, Operand(esp, 2 * kPointerSize));
+
+ // Set up the elements pointer in the allocated arguments object and
+ // initialize the header in the elements fixed array.
+ __ lea(edi, Operand(eax, Heap::kStrictArgumentsObjectSize));
+ __ mov(FieldOperand(eax, JSObject::kElementsOffset), edi);
+ __ mov(FieldOperand(edi, FixedArray::kMapOffset),
+ Immediate(isolate()->factory()->fixed_array_map()));
+
+ __ mov(FieldOperand(edi, FixedArray::kLengthOffset), ecx);
+ // Untag the length for the loop below.
+ __ SmiUntag(ecx);
+
+ // Copy the fixed array slots.
+ Label loop;
+ __ bind(&loop);
+ __ mov(ebx, Operand(edx, -1 * kPointerSize)); // Skip receiver.
+ __ mov(FieldOperand(edi, FixedArray::kHeaderSize), ebx);
+ __ add(edi, Immediate(kPointerSize));
+ __ sub(edx, Immediate(kPointerSize));
+ __ dec(ecx);
+ __ j(not_zero, &loop);
+
+ // Return and remove the on-stack parameters.
+ __ bind(&done);
+ __ ret(3 * kPointerSize);
+
+ // Do the runtime call to allocate the arguments object.
+ __ bind(&runtime);
+ __ TailCallRuntime(Runtime::kHiddenNewStrictArgumentsFast, 3, 1);
+}
+
+
+void RegExpExecStub::Generate(MacroAssembler* masm) {
+ // Just jump directly to runtime if native RegExp is not selected at compile
+ // time or if regexp entry in generated code is turned off runtime switch or
+ // at compilation.
+#ifdef V8_INTERPRETED_REGEXP
+ __ TailCallRuntime(Runtime::kHiddenRegExpExec, 4, 1);
+#else // V8_INTERPRETED_REGEXP
+
+ // Stack frame on entry.
+ // esp[0]: return address
+ // esp[4]: last_match_info (expected JSArray)
+ // esp[8]: previous index
+ // esp[12]: subject string
+ // esp[16]: JSRegExp object
+
+ static const int kLastMatchInfoOffset = 1 * kPointerSize;
+ static const int kPreviousIndexOffset = 2 * kPointerSize;
+ static const int kSubjectOffset = 3 * kPointerSize;
+ static const int kJSRegExpOffset = 4 * kPointerSize;
+
+ Label runtime;
+ Factory* factory = isolate()->factory();
+
+ // Ensure that a RegExp stack is allocated.
+ ExternalReference address_of_regexp_stack_memory_address =
+ ExternalReference::address_of_regexp_stack_memory_address(isolate());
+ ExternalReference address_of_regexp_stack_memory_size =
+ ExternalReference::address_of_regexp_stack_memory_size(isolate());
+ __ mov(ebx, Operand::StaticVariable(address_of_regexp_stack_memory_size));
+ __ test(ebx, ebx);
+ __ j(zero, &runtime);
+
+ // Check that the first argument is a JSRegExp object.
+ __ mov(eax, Operand(esp, kJSRegExpOffset));
+ STATIC_ASSERT(kSmiTag == 0);
+ __ JumpIfSmi(eax, &runtime);
+ __ CmpObjectType(eax, JS_REGEXP_TYPE, ecx);
+ __ j(not_equal, &runtime);
+
+ // Check that the RegExp has been compiled (data contains a fixed array).
+ __ mov(ecx, FieldOperand(eax, JSRegExp::kDataOffset));
+ if (FLAG_debug_code) {
+ __ test(ecx, Immediate(kSmiTagMask));
+ __ Check(not_zero, kUnexpectedTypeForRegExpDataFixedArrayExpected);
+ __ CmpObjectType(ecx, FIXED_ARRAY_TYPE, ebx);
+ __ Check(equal, kUnexpectedTypeForRegExpDataFixedArrayExpected);
+ }
+
+ // ecx: RegExp data (FixedArray)
+ // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
+ __ mov(ebx, FieldOperand(ecx, JSRegExp::kDataTagOffset));
+ __ cmp(ebx, Immediate(Smi::FromInt(JSRegExp::IRREGEXP)));
+ __ j(not_equal, &runtime);
+
+ // ecx: RegExp data (FixedArray)
+ // Check that the number of captures fit in the static offsets vector buffer.
+ __ mov(edx, FieldOperand(ecx, JSRegExp::kIrregexpCaptureCountOffset));
+ // Check (number_of_captures + 1) * 2 <= offsets vector size
+ // Or number_of_captures * 2 <= offsets vector size - 2
+ // Multiplying by 2 comes for free since edx is smi-tagged.
+ STATIC_ASSERT(kSmiTag == 0);
+ STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
+ STATIC_ASSERT(Isolate::kJSRegexpStaticOffsetsVectorSize >= 2);
+ __ cmp(edx, Isolate::kJSRegexpStaticOffsetsVectorSize - 2);
+ __ j(above, &runtime);
+
+ // Reset offset for possibly sliced string.
+ __ Move(edi, Immediate(0));
+ __ mov(eax, Operand(esp, kSubjectOffset));
+ __ JumpIfSmi(eax, &runtime);
+ __ mov(edx, eax); // Make a copy of the original subject string.
+ __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
+ __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
+
+ // eax: subject string
+ // edx: subject string
+ // ebx: subject string instance type
+ // ecx: RegExp data (FixedArray)
+ // Handle subject string according to its encoding and representation:
+ // (1) Sequential two byte? If yes, go to (9).
+ // (2) Sequential one byte? If yes, go to (6).
+ // (3) Anything but sequential or cons? If yes, go to (7).
+ // (4) Cons string. If the string is flat, replace subject with first string.
+ // Otherwise bailout.
+ // (5a) Is subject sequential two byte? If yes, go to (9).
+ // (5b) Is subject external? If yes, go to (8).
+ // (6) One byte sequential. Load regexp code for one byte.
+ // (E) Carry on.
+ /// [...]
+
+ // Deferred code at the end of the stub:
+ // (7) Not a long external string? If yes, go to (10).
+ // (8) External string. Make it, offset-wise, look like a sequential string.
+ // (8a) Is the external string one byte? If yes, go to (6).
+ // (9) Two byte sequential. Load regexp code for one byte. Go to (E).
+ // (10) Short external string or not a string? If yes, bail out to runtime.
+ // (11) Sliced string. Replace subject with parent. Go to (5a).
+
+ Label seq_one_byte_string /* 6 */, seq_two_byte_string /* 9 */,
+ external_string /* 8 */, check_underlying /* 5a */,
+ not_seq_nor_cons /* 7 */, check_code /* E */,
+ not_long_external /* 10 */;
+
+ // (1) Sequential two byte? If yes, go to (9).
+ __ and_(ebx, kIsNotStringMask |
+ kStringRepresentationMask |
+ kStringEncodingMask |
+ kShortExternalStringMask);
+ STATIC_ASSERT((kStringTag | kSeqStringTag | kTwoByteStringTag) == 0);
+ __ j(zero, &seq_two_byte_string); // Go to (9).
+
+ // (2) Sequential one byte? If yes, go to (6).
+ // Any other sequential string must be one byte.
+ __ and_(ebx, Immediate(kIsNotStringMask |
+ kStringRepresentationMask |
+ kShortExternalStringMask));
+ __ j(zero, &seq_one_byte_string, Label::kNear); // Go to (6).
+
+ // (3) Anything but sequential or cons? If yes, go to (7).
+ // We check whether the subject string is a cons, since sequential strings
+ // have already been covered.
+ STATIC_ASSERT(kConsStringTag < kExternalStringTag);
+ STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
+ STATIC_ASSERT(kIsNotStringMask > kExternalStringTag);
+ STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag);
+ __ cmp(ebx, Immediate(kExternalStringTag));
+ __ j(greater_equal, ¬_seq_nor_cons); // Go to (7).
+
+ // (4) Cons string. Check that it's flat.
+ // Replace subject with first string and reload instance type.
+ __ cmp(FieldOperand(eax, ConsString::kSecondOffset), factory->empty_string());
+ __ j(not_equal, &runtime);
+ __ mov(eax, FieldOperand(eax, ConsString::kFirstOffset));
+ __ bind(&check_underlying);
+ __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
+ __ mov(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
+
+ // (5a) Is subject sequential two byte? If yes, go to (9).
+ __ test_b(ebx, kStringRepresentationMask | kStringEncodingMask);
+ STATIC_ASSERT((kSeqStringTag | kTwoByteStringTag) == 0);
+ __ j(zero, &seq_two_byte_string); // Go to (9).
+ // (5b) Is subject external? If yes, go to (8).
+ __ test_b(ebx, kStringRepresentationMask);
+ // The underlying external string is never a short external string.
+ STATIC_CHECK(ExternalString::kMaxShortLength < ConsString::kMinLength);
+ STATIC_CHECK(ExternalString::kMaxShortLength < SlicedString::kMinLength);
+ __ j(not_zero, &external_string); // Go to (8).
+
+ // eax: sequential subject string (or look-alike, external string)
+ // edx: original subject string
+ // ecx: RegExp data (FixedArray)
+ // (6) One byte sequential. Load regexp code for one byte.
+ __ bind(&seq_one_byte_string);
+ // Load previous index and check range before edx is overwritten. We have
+ // to use edx instead of eax here because it might have been only made to
+ // look like a sequential string when it actually is an external string.
+ __ mov(ebx, Operand(esp, kPreviousIndexOffset));
+ __ JumpIfNotSmi(ebx, &runtime);
+ __ cmp(ebx, FieldOperand(edx, String::kLengthOffset));
+ __ j(above_equal, &runtime);
+ __ mov(edx, FieldOperand(ecx, JSRegExp::kDataAsciiCodeOffset));
+ __ Move(ecx, Immediate(1)); // Type is one byte.
+
+ // (E) Carry on. String handling is done.
+ __ bind(&check_code);
+ // edx: irregexp code
+ // Check that the irregexp code has been generated for the actual string
+ // encoding. If it has, the field contains a code object otherwise it contains
+ // a smi (code flushing support).
+ __ JumpIfSmi(edx, &runtime);
+
+ // eax: subject string
+ // ebx: previous index (smi)
+ // edx: code
+ // ecx: encoding of subject string (1 if ASCII, 0 if two_byte);
+ // All checks done. Now push arguments for native regexp code.
+ Counters* counters = isolate()->counters();
+ __ IncrementCounter(counters->regexp_entry_native(), 1);
+
+ // Isolates: note we add an additional parameter here (isolate pointer).
+ static const int kRegExpExecuteArguments = 9;
+ __ EnterApiExitFrame(kRegExpExecuteArguments);
+
+ // Argument 9: Pass current isolate address.
+ __ mov(Operand(esp, 8 * kPointerSize),
+ Immediate(ExternalReference::isolate_address(isolate())));
+
+ // Argument 8: Indicate that this is a direct call from JavaScript.
+ __ mov(Operand(esp, 7 * kPointerSize), Immediate(1));
+
+ // Argument 7: Start (high end) of backtracking stack memory area.
+ __ mov(esi, Operand::StaticVariable(address_of_regexp_stack_memory_address));
+ __ add(esi, Operand::StaticVariable(address_of_regexp_stack_memory_size));
+ __ mov(Operand(esp, 6 * kPointerSize), esi);
+
+ // Argument 6: Set the number of capture registers to zero to force global
+ // regexps to behave as non-global. This does not affect non-global regexps.
+ __ mov(Operand(esp, 5 * kPointerSize), Immediate(0));
+
+ // Argument 5: static offsets vector buffer.
+ __ mov(Operand(esp, 4 * kPointerSize),
+ Immediate(ExternalReference::address_of_static_offsets_vector(
+ isolate())));
+
+ // Argument 2: Previous index.
+ __ SmiUntag(ebx);
+ __ mov(Operand(esp, 1 * kPointerSize), ebx);
+
+ // Argument 1: Original subject string.
+ // The original subject is in the previous stack frame. Therefore we have to
+ // use ebp, which points exactly to one pointer size below the previous esp.
+ // (Because creating a new stack frame pushes the previous ebp onto the stack
+ // and thereby moves up esp by one kPointerSize.)
+ __ mov(esi, Operand(ebp, kSubjectOffset + kPointerSize));
+ __ mov(Operand(esp, 0 * kPointerSize), esi);
+
+ // esi: original subject string
+ // eax: underlying subject string
+ // ebx: previous index
+ // ecx: encoding of subject string (1 if ASCII 0 if two_byte);
+ // edx: code
+ // Argument 4: End of string data
+ // Argument 3: Start of string data
+ // Prepare start and end index of the input.
+ // Load the length from the original sliced string if that is the case.
+ __ mov(esi, FieldOperand(esi, String::kLengthOffset));
+ __ add(esi, edi); // Calculate input end wrt offset.
+ __ SmiUntag(edi);
+ __ add(ebx, edi); // Calculate input start wrt offset.
+
+ // ebx: start index of the input string
+ // esi: end index of the input string
+ Label setup_two_byte, setup_rest;
+ __ test(ecx, ecx);
+ __ j(zero, &setup_two_byte, Label::kNear);
+ __ SmiUntag(esi);
+ __ lea(ecx, FieldOperand(eax, esi, times_1, SeqOneByteString::kHeaderSize));
+ __ mov(Operand(esp, 3 * kPointerSize), ecx); // Argument 4.
+ __ lea(ecx, FieldOperand(eax, ebx, times_1, SeqOneByteString::kHeaderSize));
+ __ mov(Operand(esp, 2 * kPointerSize), ecx); // Argument 3.
+ __ jmp(&setup_rest, Label::kNear);
+
+ __ bind(&setup_two_byte);
+ STATIC_ASSERT(kSmiTag == 0);
+ STATIC_ASSERT(kSmiTagSize == 1); // esi is smi (powered by 2).
+ __ lea(ecx, FieldOperand(eax, esi, times_1, SeqTwoByteString::kHeaderSize));
+ __ mov(Operand(esp, 3 * kPointerSize), ecx); // Argument 4.
+ __ lea(ecx, FieldOperand(eax, ebx, times_2, SeqTwoByteString::kHeaderSize));
+ __ mov(Operand(esp, 2 * kPointerSize), ecx); // Argument 3.
+
+ __ bind(&setup_rest);
+
+ // Locate the code entry and call it.
+ __ add(edx, Immediate(Code::kHeaderSize - kHeapObjectTag));
+ __ call(edx);
+
+ // Drop arguments and come back to JS mode.
+ __ LeaveApiExitFrame(true);
+
+ // Check the result.
+ Label success;
+ __ cmp(eax, 1);
+ // We expect exactly one result since we force the called regexp to behave
+ // as non-global.
+ __ j(equal, &success);
+ Label failure;
+ __ cmp(eax, NativeRegExpMacroAssembler::FAILURE);
+ __ j(equal, &failure);
+ __ cmp(eax, NativeRegExpMacroAssembler::EXCEPTION);
+ // If not exception it can only be retry. Handle that in the runtime system.
+ __ j(not_equal, &runtime);
+ // Result must now be exception. If there is no pending exception already a
+ // stack overflow (on the backtrack stack) was detected in RegExp code but
+ // haven't created the exception yet. Handle that in the runtime system.
+ // TODO(592): Rerunning the RegExp to get the stack overflow exception.
+ ExternalReference pending_exception(Isolate::kPendingExceptionAddress,
+ isolate());
+ __ mov(edx, Immediate(isolate()->factory()->the_hole_value()));
+ __ mov(eax, Operand::StaticVariable(pending_exception));
+ __ cmp(edx, eax);
+ __ j(equal, &runtime);
+ // For exception, throw the exception again.
+
+ // Clear the pending exception variable.
+ __ mov(Operand::StaticVariable(pending_exception), edx);
+
+ // Special handling of termination exceptions which are uncatchable
+ // by javascript code.
+ __ cmp(eax, factory->termination_exception());
+ Label throw_termination_exception;
+ __ j(equal, &throw_termination_exception, Label::kNear);
+
+ // Handle normal exception by following handler chain.
+ __ Throw(eax);
+
+ __ bind(&throw_termination_exception);
+ __ ThrowUncatchable(eax);
+
+ __ bind(&failure);
+ // For failure to match, return null.
+ __ mov(eax, factory->null_value());
+ __ ret(4 * kPointerSize);
+
+ // Load RegExp data.
+ __ bind(&success);
+ __ mov(eax, Operand(esp, kJSRegExpOffset));
+ __ mov(ecx, FieldOperand(eax, JSRegExp::kDataOffset));
+ __ mov(edx, FieldOperand(ecx, JSRegExp::kIrregexpCaptureCountOffset));
+ // Calculate number of capture registers (number_of_captures + 1) * 2.
+ STATIC_ASSERT(kSmiTag == 0);
+ STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
+ __ add(edx, Immediate(2)); // edx was a smi.
+
+ // edx: Number of capture registers
+ // Load last_match_info which is still known to be a fast case JSArray.
+ // Check that the fourth object is a JSArray object.
+ __ mov(eax, Operand(esp, kLastMatchInfoOffset));
+ __ JumpIfSmi(eax, &runtime);
+ __ CmpObjectType(eax, JS_ARRAY_TYPE, ebx);
+ __ j(not_equal, &runtime);
+ // Check that the JSArray is in fast case.
+ __ mov(ebx, FieldOperand(eax, JSArray::kElementsOffset));
+ __ mov(eax, FieldOperand(ebx, HeapObject::kMapOffset));
+ __ cmp(eax, factory->fixed_array_map());
+ __ j(not_equal, &runtime);
+ // Check that the last match info has space for the capture registers and the
+ // additional information.
+ __ mov(eax, FieldOperand(ebx, FixedArray::kLengthOffset));
+ __ SmiUntag(eax);
+ __ sub(eax, Immediate(RegExpImpl::kLastMatchOverhead));
+ __ cmp(edx, eax);
+ __ j(greater, &runtime);
+
+ // ebx: last_match_info backing store (FixedArray)
+ // edx: number of capture registers
+ // Store the capture count.
+ __ SmiTag(edx); // Number of capture registers to smi.
+ __ mov(FieldOperand(ebx, RegExpImpl::kLastCaptureCountOffset), edx);
+ __ SmiUntag(edx); // Number of capture registers back from smi.
+ // Store last subject and last input.
+ __ mov(eax, Operand(esp, kSubjectOffset));
+ __ mov(ecx, eax);
+ __ mov(FieldOperand(ebx, RegExpImpl::kLastSubjectOffset), eax);
+ __ RecordWriteField(ebx,
+ RegExpImpl::kLastSubjectOffset,
+ eax,
+ edi);
+ __ mov(eax, ecx);
+ __ mov(FieldOperand(ebx, RegExpImpl::kLastInputOffset), eax);
+ __ RecordWriteField(ebx,
+ RegExpImpl::kLastInputOffset,
+ eax,
+ edi);
+
+ // Get the static offsets vector filled by the native regexp code.
+ ExternalReference address_of_static_offsets_vector =
+ ExternalReference::address_of_static_offsets_vector(isolate());
+ __ mov(ecx, Immediate(address_of_static_offsets_vector));
+
+ // ebx: last_match_info backing store (FixedArray)
+ // ecx: offsets vector
+ // edx: number of capture registers
+ Label next_capture, done;
+ // Capture register counter starts from number of capture registers and
+ // counts down until wraping after zero.
+ __ bind(&next_capture);
+ __ sub(edx, Immediate(1));
+ __ j(negative, &done, Label::kNear);
+ // Read the value from the static offsets vector buffer.
+ __ mov(edi, Operand(ecx, edx, times_int_size, 0));
+ __ SmiTag(edi);
+ // Store the smi value in the last match info.
+ __ mov(FieldOperand(ebx,
+ edx,
+ times_pointer_size,
+ RegExpImpl::kFirstCaptureOffset),
+ edi);
+ __ jmp(&next_capture);
+ __ bind(&done);
+
+ // Return last match info.
+ __ mov(eax, Operand(esp, kLastMatchInfoOffset));
+ __ ret(4 * kPointerSize);
+
+ // Do the runtime call to execute the regexp.
+ __ bind(&runtime);
+ __ TailCallRuntime(Runtime::kHiddenRegExpExec, 4, 1);
+
+ // Deferred code for string handling.
+ // (7) Not a long external string? If yes, go to (10).
+ __ bind(¬_seq_nor_cons);
+ // Compare flags are still set from (3).
+ __ j(greater, ¬_long_external, Label::kNear); // Go to (10).
+
+ // (8) External string. Short external strings have been ruled out.
+ __ bind(&external_string);
+ // Reload instance type.
+ __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
+ __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
+ if (FLAG_debug_code) {
+ // Assert that we do not have a cons or slice (indirect strings) here.
+ // Sequential strings have already been ruled out.
+ __ test_b(ebx, kIsIndirectStringMask);
+ __ Assert(zero, kExternalStringExpectedButNotFound);
+ }
+ __ mov(eax, FieldOperand(eax, ExternalString::kResourceDataOffset));
+ // Move the pointer so that offset-wise, it looks like a sequential string.
+ STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
+ __ sub(eax, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+ STATIC_ASSERT(kTwoByteStringTag == 0);
+ // (8a) Is the external string one byte? If yes, go to (6).
+ __ test_b(ebx, kStringEncodingMask);
+ __ j(not_zero, &seq_one_byte_string); // Goto (6).
+
+ // eax: sequential subject string (or look-alike, external string)
+ // edx: original subject string
+ // ecx: RegExp data (FixedArray)
+ // (9) Two byte sequential. Load regexp code for one byte. Go to (E).
+ __ bind(&seq_two_byte_string);
+ // Load previous index and check range before edx is overwritten. We have
+ // to use edx instead of eax here because it might have been only made to
+ // look like a sequential string when it actually is an external string.
+ __ mov(ebx, Operand(esp, kPreviousIndexOffset));
+ __ JumpIfNotSmi(ebx, &runtime);
+ __ cmp(ebx, FieldOperand(edx, String::kLengthOffset));
+ __ j(above_equal, &runtime);
+ __ mov(edx, FieldOperand(ecx, JSRegExp::kDataUC16CodeOffset));
+ __ Move(ecx, Immediate(0)); // Type is two byte.
+ __ jmp(&check_code); // Go to (E).
+
+ // (10) Not a string or a short external string? If yes, bail out to runtime.
+ __ bind(¬_long_external);
+ // Catch non-string subject or short external string.
+ STATIC_ASSERT(kNotStringTag != 0 && kShortExternalStringTag !=0);
+ __ test(ebx, Immediate(kIsNotStringMask | kShortExternalStringTag));
+ __ j(not_zero, &runtime);
+
+ // (11) Sliced string. Replace subject with parent. Go to (5a).
+ // Load offset into edi and replace subject string with parent.
+ __ mov(edi, FieldOperand(eax, SlicedString::kOffsetOffset));
+ __ mov(eax, FieldOperand(eax, SlicedString::kParentOffset));
+ __ jmp(&check_underlying); // Go to (5a).
+#endif // V8_INTERPRETED_REGEXP
+}
+
+
+static int NegativeComparisonResult(Condition cc) {
+ ASSERT(cc != equal);
+ ASSERT((cc == less) || (cc == less_equal)
+ || (cc == greater) || (cc == greater_equal));
+ return (cc == greater || cc == greater_equal) ? LESS : GREATER;
+}
+
+
+static void CheckInputType(MacroAssembler* masm,
+ Register input,
+ CompareIC::State expected,
+ Label* fail) {
+ Label ok;
+ if (expected == CompareIC::SMI) {
+ __ JumpIfNotSmi(input, fail);
+ } else if (expected == CompareIC::NUMBER) {
+ __ JumpIfSmi(input, &ok);
+ __ cmp(FieldOperand(input, HeapObject::kMapOffset),
+ Immediate(masm->isolate()->factory()->heap_number_map()));
+ __ j(not_equal, fail);
+ }
+ // We could be strict about internalized/non-internalized here, but as long as
+ // hydrogen doesn't care, the stub doesn't have to care either.
+ __ bind(&ok);
+}
+
+
+static void BranchIfNotInternalizedString(MacroAssembler* masm,
+ Label* label,
+ Register object,
+ Register scratch) {
+ __ JumpIfSmi(object, label);
+ __ mov(scratch, FieldOperand(object, HeapObject::kMapOffset));
+ __ movzx_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
+ STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
+ __ test(scratch, Immediate(kIsNotStringMask | kIsNotInternalizedMask));
+ __ j(not_zero, label);
+}
+
+
+void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
+ Label check_unequal_objects;
+ Condition cc = GetCondition();
+
+ Label miss;
+ CheckInputType(masm, edx, left_, &miss);
+ CheckInputType(masm, eax, right_, &miss);
+
+ // Compare two smis.
+ Label non_smi, smi_done;
+ __ mov(ecx, edx);
+ __ or_(ecx, eax);
+ __ JumpIfNotSmi(ecx, &non_smi, Label::kNear);
+ __ sub(edx, eax); // Return on the result of the subtraction.
+ __ j(no_overflow, &smi_done, Label::kNear);
+ __ not_(edx); // Correct sign in case of overflow. edx is never 0 here.
+ __ bind(&smi_done);
+ __ mov(eax, edx);
+ __ ret(0);
+ __ bind(&non_smi);
+
+ // NOTICE! This code is only reached after a smi-fast-case check, so
+ // it is certain that at least one operand isn't a smi.
+
+ // Identical objects can be compared fast, but there are some tricky cases
+ // for NaN and undefined.
+ Label generic_heap_number_comparison;
+ {
+ Label not_identical;
+ __ cmp(eax, edx);
+ __ j(not_equal, ¬_identical);
+
+ if (cc != equal) {
+ // Check for undefined. undefined OP undefined is false even though
+ // undefined == undefined.
+ Label check_for_nan;
+ __ cmp(edx, isolate()->factory()->undefined_value());
+ __ j(not_equal, &check_for_nan, Label::kNear);
+ __ Move(eax, Immediate(Smi::FromInt(NegativeComparisonResult(cc))));
+ __ ret(0);
+ __ bind(&check_for_nan);
+ }
+
+ // Test for NaN. Compare heap numbers in a general way,
+ // to hanlde NaNs correctly.
+ __ cmp(FieldOperand(edx, HeapObject::kMapOffset),
+ Immediate(isolate()->factory()->heap_number_map()));
+ __ j(equal, &generic_heap_number_comparison, Label::kNear);
+ if (cc != equal) {
+ // Call runtime on identical JSObjects. Otherwise return equal.
+ __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ecx);
+ __ j(above_equal, ¬_identical);
+ }
+ __ Move(eax, Immediate(Smi::FromInt(EQUAL)));
+ __ ret(0);
+
+
+ __ bind(¬_identical);
+ }
+
+ // Strict equality can quickly decide whether objects are equal.
+ // Non-strict object equality is slower, so it is handled later in the stub.
+ if (cc == equal && strict()) {
+ Label slow; // Fallthrough label.
+ Label not_smis;
+ // If we're doing a strict equality comparison, we don't have to do
+ // type conversion, so we generate code to do fast comparison for objects
+ // and oddballs. Non-smi numbers and strings still go through the usual
+ // slow-case code.
+ // If either is a Smi (we know that not both are), then they can only
+ // be equal if the other is a HeapNumber. If so, use the slow case.
+ STATIC_ASSERT(kSmiTag == 0);
+ ASSERT_EQ(0, Smi::FromInt(0));
+ __ mov(ecx, Immediate(kSmiTagMask));
+ __ and_(ecx, eax);
+ __ test(ecx, edx);
+ __ j(not_zero, ¬_smis, Label::kNear);
+ // One operand is a smi.
+
+ // Check whether the non-smi is a heap number.
+ STATIC_ASSERT(kSmiTagMask == 1);
+ // ecx still holds eax & kSmiTag, which is either zero or one.
+ __ sub(ecx, Immediate(0x01));
+ __ mov(ebx, edx);
+ __ xor_(ebx, eax);
+ __ and_(ebx, ecx); // ebx holds either 0 or eax ^ edx.
+ __ xor_(ebx, eax);
+ // if eax was smi, ebx is now edx, else eax.
+
+ // Check if the non-smi operand is a heap number.
+ __ cmp(FieldOperand(ebx, HeapObject::kMapOffset),
+ Immediate(isolate()->factory()->heap_number_map()));
+ // If heap number, handle it in the slow case.
+ __ j(equal, &slow, Label::kNear);
+ // Return non-equal (ebx is not zero)
+ __ mov(eax, ebx);
+ __ ret(0);
+
+ __ bind(¬_smis);
+ // If either operand is a JSObject or an oddball value, then they are not
+ // equal since their pointers are different
+ // There is no test for undetectability in strict equality.
+
+ // Get the type of the first operand.
+ // If the first object is a JS object, we have done pointer comparison.
+ Label first_non_object;
+ STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE);
+ __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ecx);
+ __ j(below, &first_non_object, Label::kNear);
+
+ // Return non-zero (eax is not zero)
+ Label return_not_equal;
+ STATIC_ASSERT(kHeapObjectTag != 0);
+ __ bind(&return_not_equal);
+ __ ret(0);
+
+ __ bind(&first_non_object);
+ // Check for oddballs: true, false, null, undefined.
+ __ CmpInstanceType(ecx, ODDBALL_TYPE);
+ __ j(equal, &return_not_equal);
+
+ __ CmpObjectType(edx, FIRST_SPEC_OBJECT_TYPE, ecx);
+ __ j(above_equal, &return_not_equal);
+
+ // Check for oddballs: true, false, null, undefined.
+ __ CmpInstanceType(ecx, ODDBALL_TYPE);
+ __ j(equal, &return_not_equal);
+
+ // Fall through to the general case.
+ __ bind(&slow);
+ }
+
+ // Generate the number comparison code.
+ Label non_number_comparison;
+ Label unordered;
+ __ bind(&generic_heap_number_comparison);
+ FloatingPointHelper::CheckFloatOperands(
+ masm, &non_number_comparison, ebx);
+ FloatingPointHelper::LoadFloatOperand(masm, eax);
+ FloatingPointHelper::LoadFloatOperand(masm, edx);
+ __ FCmp();
+
+ // Don't base result on EFLAGS when a NaN is involved.
+ __ j(parity_even, &unordered, Label::kNear);
+
+ Label below_label, above_label;
+ // Return a result of -1, 0, or 1, based on EFLAGS.
+ __ j(below, &below_label, Label::kNear);
+ __ j(above, &above_label, Label::kNear);
+
+ __ Move(eax, Immediate(0));
+ __ ret(0);
+
+ __ bind(&below_label);
+ __ mov(eax, Immediate(Smi::FromInt(-1)));
+ __ ret(0);
+
+ __ bind(&above_label);
+ __ mov(eax, Immediate(Smi::FromInt(1)));
+ __ ret(0);
+
+ // If one of the numbers was NaN, then the result is always false.
+ // The cc is never not-equal.
+ __ bind(&unordered);
+ ASSERT(cc != not_equal);
+ if (cc == less || cc == less_equal) {
+ __ mov(eax, Immediate(Smi::FromInt(1)));
+ } else {
+ __ mov(eax, Immediate(Smi::FromInt(-1)));
+ }
+ __ ret(0);
+
+ // The number comparison code did not provide a valid result.
+ __ bind(&non_number_comparison);
+
+ // Fast negative check for internalized-to-internalized equality.
+ Label check_for_strings;
+ if (cc == equal) {
+ BranchIfNotInternalizedString(masm, &check_for_strings, eax, ecx);
+ BranchIfNotInternalizedString(masm, &check_for_strings, edx, ecx);
+
+ // We've already checked for object identity, so if both operands
+ // are internalized they aren't equal. Register eax already holds a
+ // non-zero value, which indicates not equal, so just return.
+ __ ret(0);
+ }
+
+ __ bind(&check_for_strings);
+
+ __ JumpIfNotBothSequentialAsciiStrings(edx, eax, ecx, ebx,
+ &check_unequal_objects);
+
+ // Inline comparison of ASCII strings.
+ if (cc == equal) {
+ StringCompareStub::GenerateFlatAsciiStringEquals(masm,
+ edx,
+ eax,
+ ecx,
+ ebx);
+ } else {
+ StringCompareStub::GenerateCompareFlatAsciiStrings(masm,
+ edx,
+ eax,
+ ecx,
+ ebx,
+ edi);
+ }
+#ifdef DEBUG
+ __ Abort(kUnexpectedFallThroughFromStringComparison);
+#endif
+
+ __ bind(&check_unequal_objects);
+ if (cc == equal && !strict()) {
+ // Non-strict equality. Objects are unequal if
+ // they are both JSObjects and not undetectable,
+ // and their pointers are different.
+ Label not_both_objects;
+ Label return_unequal;
+ // At most one is a smi, so we can test for smi by adding the two.
+ // A smi plus a heap object has the low bit set, a heap object plus
+ // a heap object has the low bit clear.
+ STATIC_ASSERT(kSmiTag == 0);
+ STATIC_ASSERT(kSmiTagMask == 1);
+ __ lea(ecx, Operand(eax, edx, times_1, 0));
+ __ test(ecx, Immediate(kSmiTagMask));
+ __ j(not_zero, ¬_both_objects, Label::kNear);
+ __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ecx);
+ __ j(below, ¬_both_objects, Label::kNear);
+ __ CmpObjectType(edx, FIRST_SPEC_OBJECT_TYPE, ebx);
+ __ j(below, ¬_both_objects, Label::kNear);
+ // We do not bail out after this point. Both are JSObjects, and
+ // they are equal if and only if both are undetectable.
+ // The and of the undetectable flags is 1 if and only if they are equal.
+ __ test_b(FieldOperand(ecx, Map::kBitFieldOffset),
+ 1 << Map::kIsUndetectable);
+ __ j(zero, &return_unequal, Label::kNear);
+ __ test_b(FieldOperand(ebx, Map::kBitFieldOffset),
+ 1 << Map::kIsUndetectable);
+ __ j(zero, &return_unequal, Label::kNear);
+ // The objects are both undetectable, so they both compare as the value
+ // undefined, and are equal.
+ __ Move(eax, Immediate(EQUAL));
+ __ bind(&return_unequal);
+ // Return non-equal by returning the non-zero object pointer in eax,
+ // or return equal if we fell through to here.
+ __ ret(0); // rax, rdx were pushed
+ __ bind(¬_both_objects);
+ }
+
+ // Push arguments below the return address.
+ __ pop(ecx);
+ __ push(edx);
+ __ push(eax);
+
+ // Figure out which native to call and setup the arguments.
+ Builtins::JavaScript builtin;
+ if (cc == equal) {
+ builtin = strict() ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
+ } else {
+ builtin = Builtins::COMPARE;
+ __ push(Immediate(Smi::FromInt(NegativeComparisonResult(cc))));
+ }
+
+ // Restore return address on the stack.
+ __ push(ecx);
+
+ // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
+ // tagged as a small integer.
+ __ InvokeBuiltin(builtin, JUMP_FUNCTION);
+
+ __ bind(&miss);
+ GenerateMiss(masm);
+}
+
+
+static void GenerateRecordCallTarget(MacroAssembler* masm) {
+ // Cache the called function in a feedback vector slot. Cache states
+ // are uninitialized, monomorphic (indicated by a JSFunction), and
+ // megamorphic.
+ // eax : number of arguments to the construct function
+ // ebx : Feedback vector
+ // edx : slot in feedback vector (Smi)
+ // edi : the function to call
+ Isolate* isolate = masm->isolate();
+ Label initialize, done, miss, megamorphic, not_array_function;
+
+ // Load the cache state into ecx.
+ __ mov(ecx, FieldOperand(ebx, edx, times_half_pointer_size,
+ FixedArray::kHeaderSize));
+
+ // A monomorphic cache hit or an already megamorphic state: invoke the
+ // function without changing the state.
+ __ cmp(ecx, edi);
+ __ j(equal, &done, Label::kFar);
+ __ cmp(ecx, Immediate(TypeFeedbackInfo::MegamorphicSentinel(isolate)));
+ __ j(equal, &done, Label::kFar);
+
+ if (!FLAG_pretenuring_call_new) {
+ // If we came here, we need to see if we are the array function.
+ // If we didn't have a matching function, and we didn't find the megamorph
+ // sentinel, then we have in the slot either some other function or an
+ // AllocationSite. Do a map check on the object in ecx.
+ Handle<Map> allocation_site_map = isolate->factory()->allocation_site_map();
+ __ cmp(FieldOperand(ecx, 0), Immediate(allocation_site_map));
+ __ j(not_equal, &miss);
+
+ // Make sure the function is the Array() function
+ __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, ecx);
+ __ cmp(edi, ecx);
+ __ j(not_equal, &megamorphic);
+ __ jmp(&done, Label::kFar);
+ }
+
+ __ bind(&miss);
+
+ // A monomorphic miss (i.e, here the cache is not uninitialized) goes
+ // megamorphic.
+ __ cmp(ecx, Immediate(TypeFeedbackInfo::UninitializedSentinel(isolate)));
+ __ j(equal, &initialize);
+ // MegamorphicSentinel is an immortal immovable object (undefined) so no
+ // write-barrier is needed.
+ __ bind(&megamorphic);
+ __ mov(FieldOperand(ebx, edx, times_half_pointer_size,
+ FixedArray::kHeaderSize),
+ Immediate(TypeFeedbackInfo::MegamorphicSentinel(isolate)));
+ __ jmp(&done, Label::kFar);
+
+ // An uninitialized cache is patched with the function or sentinel to
+ // indicate the ElementsKind if function is the Array constructor.
+ __ bind(&initialize);
+ if (!FLAG_pretenuring_call_new) {
+ // Make sure the function is the Array() function
+ __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, ecx);
+ __ cmp(edi, ecx);
+ __ j(not_equal, ¬_array_function);
+
+ // The target function is the Array constructor,
+ // Create an AllocationSite if we don't already have it, store it in the
+ // slot.
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ // Arguments register must be smi-tagged to call out.
+ __ SmiTag(eax);
+ __ push(eax);
+ __ push(edi);
+ __ push(edx);
+ __ push(ebx);
+
+ CreateAllocationSiteStub create_stub(isolate);
+ __ CallStub(&create_stub);
+
+ __ pop(ebx);
+ __ pop(edx);
+ __ pop(edi);
+ __ pop(eax);
+ __ SmiUntag(eax);
+ }
+ __ jmp(&done);
+
+ __ bind(¬_array_function);
+ }
+
+ __ mov(FieldOperand(ebx, edx, times_half_pointer_size,
+ FixedArray::kHeaderSize),
+ edi);
+ // We won't need edx or ebx anymore, just save edi
+ __ push(edi);
+ __ push(ebx);
+ __ push(edx);
+ __ RecordWriteArray(ebx, edi, edx, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+ __ pop(edx);
+ __ pop(ebx);
+ __ pop(edi);
+
+ __ bind(&done);
+}
+
+
+static void EmitContinueIfStrictOrNative(MacroAssembler* masm, Label* cont) {
+ // Do not transform the receiver for strict mode functions.
+ __ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+ __ test_b(FieldOperand(ecx, SharedFunctionInfo::kStrictModeByteOffset),
+ 1 << SharedFunctionInfo::kStrictModeBitWithinByte);
+ __ j(not_equal, cont);
+
+ // Do not transform the receiver for natives (shared already in ecx).
+ __ test_b(FieldOperand(ecx, SharedFunctionInfo::kNativeByteOffset),
+ 1 << SharedFunctionInfo::kNativeBitWithinByte);
+ __ j(not_equal, cont);
+}
+
+
+static void EmitSlowCase(Isolate* isolate,
+ MacroAssembler* masm,
+ int argc,
+ Label* non_function) {
+ // Check for function proxy.
+ __ CmpInstanceType(ecx, JS_FUNCTION_PROXY_TYPE);
+ __ j(not_equal, non_function);
+ __ pop(ecx);
+ __ push(edi); // put proxy as additional argument under return address
+ __ push(ecx);
+ __ Move(eax, Immediate(argc + 1));
+ __ Move(ebx, Immediate(0));
+ __ GetBuiltinEntry(edx, Builtins::CALL_FUNCTION_PROXY);
+ {
+ Handle<Code> adaptor = isolate->builtins()->ArgumentsAdaptorTrampoline();
+ __ jmp(adaptor, RelocInfo::CODE_TARGET);
+ }
+
+ // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
+ // of the original receiver from the call site).
+ __ bind(non_function);
+ __ mov(Operand(esp, (argc + 1) * kPointerSize), edi);
+ __ Move(eax, Immediate(argc));
+ __ Move(ebx, Immediate(0));
+ __ GetBuiltinEntry(edx, Builtins::CALL_NON_FUNCTION);
+ Handle<Code> adaptor = isolate->builtins()->ArgumentsAdaptorTrampoline();
+ __ jmp(adaptor, RelocInfo::CODE_TARGET);
+}
+
+
+static void EmitWrapCase(MacroAssembler* masm, int argc, Label* cont) {
+ // Wrap the receiver and patch it back onto the stack.
+ { FrameScope frame_scope(masm, StackFrame::INTERNAL);
+ __ push(edi);
+ __ push(eax);
+ __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+ __ pop(edi);
+ }
+ __ mov(Operand(esp, (argc + 1) * kPointerSize), eax);
+ __ jmp(cont);
+}
+
+
+static void CallFunctionNoFeedback(MacroAssembler* masm,
+ int argc, bool needs_checks,
+ bool call_as_method) {
+ // edi : the function to call
+ Label slow, non_function, wrap, cont;
+
+ if (needs_checks) {
+ // Check that the function really is a JavaScript function.
+ __ JumpIfSmi(edi, &non_function);
+
+ // Goto slow case if we do not have a function.
+ __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
+ __ j(not_equal, &slow);
+ }
+
+ // Fast-case: Just invoke the function.
+ ParameterCount actual(argc);
+
+ if (call_as_method) {
+ if (needs_checks) {
+ EmitContinueIfStrictOrNative(masm, &cont);
+ }
+
+ // Load the receiver from the stack.
+ __ mov(eax, Operand(esp, (argc + 1) * kPointerSize));
+
+ if (call_as_method) {
+ __ JumpIfSmi(eax, &wrap);
+
+ __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ecx);
+ __ j(below, &wrap);
+ } else {
+ __ jmp(&wrap);
+ }
+
+ __ bind(&cont);
+ }
+
+ __ InvokeFunction(edi, actual, JUMP_FUNCTION, NullCallWrapper());
+
+ if (needs_checks) {
+ // Slow-case: Non-function called.
+ __ bind(&slow);
+ // (non_function is bound in EmitSlowCase)
+ EmitSlowCase(masm->isolate(), masm, argc, &non_function);
+ }
+
+ if (call_as_method) {
+ __ bind(&wrap);
+ EmitWrapCase(masm, argc, &cont);
+ }
+}
+
+
+void CallFunctionStub::Generate(MacroAssembler* masm) {
+ CallFunctionNoFeedback(masm, argc_, NeedsChecks(), CallAsMethod());
+}
+
+
+void CallConstructStub::Generate(MacroAssembler* masm) {
+ // eax : number of arguments
+ // ebx : feedback vector
+ // edx : (only if ebx is not the megamorphic symbol) slot in feedback
+ // vector (Smi)
+ // edi : constructor function
+ Label slow, non_function_call;
+
+ // Check that function is not a smi.
+ __ JumpIfSmi(edi, &non_function_call);
+ // Check that function is a JSFunction.
+ __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
+ __ j(not_equal, &slow);
+
+ if (RecordCallTarget()) {
+ GenerateRecordCallTarget(masm);
+
+ if (FLAG_pretenuring_call_new) {
+ // Put the AllocationSite from the feedback vector into ebx.
+ // By adding kPointerSize we encode that we know the AllocationSite
+ // entry is at the feedback vector slot given by edx + 1.
+ __ mov(ebx, FieldOperand(ebx, edx, times_half_pointer_size,
+ FixedArray::kHeaderSize + kPointerSize));
+ } else {
+ Label feedback_register_initialized;
+ // Put the AllocationSite from the feedback vector into ebx, or undefined.
+ __ mov(ebx, FieldOperand(ebx, edx, times_half_pointer_size,
+ FixedArray::kHeaderSize));
+ Handle<Map> allocation_site_map =
+ isolate()->factory()->allocation_site_map();
+ __ cmp(FieldOperand(ebx, 0), Immediate(allocation_site_map));
+ __ j(equal, &feedback_register_initialized);
+ __ mov(ebx, isolate()->factory()->undefined_value());
+ __ bind(&feedback_register_initialized);
+ }
+
+ __ AssertUndefinedOrAllocationSite(ebx);
+ }
+
+ // Jump to the function-specific construct stub.
+ Register jmp_reg = ecx;
+ __ mov(jmp_reg, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+ __ mov(jmp_reg, FieldOperand(jmp_reg,
+ SharedFunctionInfo::kConstructStubOffset));
+ __ lea(jmp_reg, FieldOperand(jmp_reg, Code::kHeaderSize));
+ __ jmp(jmp_reg);
+
+ // edi: called object
+ // eax: number of arguments
+ // ecx: object map
+ Label do_call;
+ __ bind(&slow);
+ __ CmpInstanceType(ecx, JS_FUNCTION_PROXY_TYPE);
+ __ j(not_equal, &non_function_call);
+ __ GetBuiltinEntry(edx, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR);
+ __ jmp(&do_call);
+
+ __ bind(&non_function_call);
+ __ GetBuiltinEntry(edx, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
+ __ bind(&do_call);
+ // Set expected number of arguments to zero (not changing eax).
+ __ Move(ebx, Immediate(0));
+ Handle<Code> arguments_adaptor =
+ isolate()->builtins()->ArgumentsAdaptorTrampoline();
+ __ jmp(arguments_adaptor, RelocInfo::CODE_TARGET);
+}
+
+
+static void EmitLoadTypeFeedbackVector(MacroAssembler* masm, Register vector) {
+ __ mov(vector, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
+ __ mov(vector, FieldOperand(vector, JSFunction::kSharedFunctionInfoOffset));
+ __ mov(vector, FieldOperand(vector,
+ SharedFunctionInfo::kFeedbackVectorOffset));
+}
+
+
+void CallICStub::Generate_MonomorphicArray(MacroAssembler* masm, Label* miss) {
+ // edi - function
+ // ebx - feedback vector
+ // edx - slot id
+ __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, ecx);
+ __ cmp(edi, ecx);
+ __ j(not_equal, miss);
+
+ __ mov(eax, arg_count());
+ __ mov(ebx, FieldOperand(ebx, edx, times_half_pointer_size,
+ FixedArray::kHeaderSize));
+ // Verify that ecx contains an AllocationSite
+ __ AssertUndefinedOrAllocationSite(ebx);
+ ArrayConstructorStub stub(masm->isolate(), arg_count());
+ __ TailCallStub(&stub);
+}
+
+
+void CallICStub::Generate_CustomFeedbackCall(MacroAssembler* masm) {
+ // edi - function
+ // ebx - feedback vector
+ // edx - slot id
+ Label miss;
+
+ if (state_.stub_type() == CallIC::MONOMORPHIC_ARRAY) {
+ Generate_MonomorphicArray(masm, &miss);
+ } else {
+ // So far there is only one customer for our custom feedback scheme.
+ UNREACHABLE();
+ }
+
+ __ bind(&miss);
+ GenerateMiss(masm);
+
+ // The slow case, we need this no matter what to complete a call after a miss.
+ CallFunctionNoFeedback(masm,
+ arg_count(),
+ true,
+ CallAsMethod());
+
+ // Unreachable.
+ __ int3();
+}
+
+
+void CallICStub::Generate(MacroAssembler* masm) {
+ // edi - function
+ // edx - slot id
+ Isolate* isolate = masm->isolate();
+ Label extra_checks_or_miss, slow_start;
+ Label slow, non_function, wrap, cont;
+ Label have_js_function;
+ int argc = state_.arg_count();
+ ParameterCount actual(argc);
+
+ EmitLoadTypeFeedbackVector(masm, ebx);
+
+ if (state_.stub_type() != CallIC::DEFAULT) {
+ Generate_CustomFeedbackCall(masm);
+ return;
+ }
+
+ // The checks. First, does edi match the recorded monomorphic target?
+ __ cmp(edi, FieldOperand(ebx, edx, times_half_pointer_size,
+ FixedArray::kHeaderSize));
+ __ j(not_equal, &extra_checks_or_miss);
+
+ __ bind(&have_js_function);
+ if (state_.CallAsMethod()) {
+ EmitContinueIfStrictOrNative(masm, &cont);
+
+ // Load the receiver from the stack.
+ __ mov(eax, Operand(esp, (argc + 1) * kPointerSize));
+
+ __ JumpIfSmi(eax, &wrap);
+
+ __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ecx);
+ __ j(below, &wrap);
+
+ __ bind(&cont);
+ }
+
+ __ InvokeFunction(edi, actual, JUMP_FUNCTION, NullCallWrapper());
+
+ __ bind(&slow);
+ EmitSlowCase(isolate, masm, argc, &non_function);
+
+ if (state_.CallAsMethod()) {
+ __ bind(&wrap);
+ EmitWrapCase(masm, argc, &cont);
+ }
+
+ __ bind(&extra_checks_or_miss);
+ Label miss;
+
+ __ mov(ecx, FieldOperand(ebx, edx, times_half_pointer_size,
+ FixedArray::kHeaderSize));
+ __ cmp(ecx, Immediate(TypeFeedbackInfo::MegamorphicSentinel(isolate)));
+ __ j(equal, &slow_start);
+ __ cmp(ecx, Immediate(TypeFeedbackInfo::UninitializedSentinel(isolate)));
+ __ j(equal, &miss);
+
+ if (!FLAG_trace_ic) {
+ // We are going megamorphic, and we don't want to visit the runtime.
+ __ mov(FieldOperand(ebx, edx, times_half_pointer_size,
+ FixedArray::kHeaderSize),
+ Immediate(TypeFeedbackInfo::MegamorphicSentinel(isolate)));
+ __ jmp(&slow_start);
+ }
+
+ // We are here because tracing is on or we are going monomorphic.
+ __ bind(&miss);
+ GenerateMiss(masm);
+
+ // the slow case
+ __ bind(&slow_start);
+
+ // Check that the function really is a JavaScript function.
+ __ JumpIfSmi(edi, &non_function);
+
+ // Goto slow case if we do not have a function.
+ __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
+ __ j(not_equal, &slow);
+ __ jmp(&have_js_function);
+
+ // Unreachable
+ __ int3();
+}
+
+
+void CallICStub::GenerateMiss(MacroAssembler* masm) {
+ // Get the receiver of the function from the stack; 1 ~ return address.
+ __ mov(ecx, Operand(esp, (state_.arg_count() + 1) * kPointerSize));
+
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ // Push the receiver and the function and feedback info.
+ __ push(ecx);
+ __ push(edi);
+ __ push(ebx);
+ __ push(edx);
+
+ // Call the entry.
+ ExternalReference miss = ExternalReference(IC_Utility(IC::kCallIC_Miss),
+ masm->isolate());
+ __ CallExternalReference(miss, 4);
+
+ // Move result to edi and exit the internal frame.
+ __ mov(edi, eax);
+ }
+}
+
+
+bool CEntryStub::NeedsImmovableCode() {
+ return false;
+}
+
+
+void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
+ CEntryStub::GenerateAheadOfTime(isolate);
+ StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
+ StubFailureTrampolineStub::GenerateAheadOfTime(isolate);
+ // It is important that the store buffer overflow stubs are generated first.
+ ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
+ CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
+ BinaryOpICStub::GenerateAheadOfTime(isolate);
+ BinaryOpICWithAllocationSiteStub::GenerateAheadOfTime(isolate);
+}
+
+
+void CodeStub::GenerateFPStubs(Isolate* isolate) {
+ // Do nothing.
+}
+
+
+void CEntryStub::GenerateAheadOfTime(Isolate* isolate) {
+ CEntryStub stub(isolate, 1);
+ stub.GetCode();
+}
+
+
+void CEntryStub::Generate(MacroAssembler* masm) {
+ // eax: number of arguments including receiver
+ // ebx: pointer to C function (C callee-saved)
+ // ebp: frame pointer (restored after C call)
+ // esp: stack pointer (restored after C call)
+ // esi: current context (C callee-saved)
+ // edi: JS function of the caller (C callee-saved)
+
+ ProfileEntryHookStub::MaybeCallEntryHook(masm);
+
+ // Enter the exit frame that transitions from JavaScript to C++.
+ __ EnterExitFrame();
+
+ // ebx: pointer to C function (C callee-saved)
+ // ebp: frame pointer (restored after C call)
+ // esp: stack pointer (restored after C call)
+ // edi: number of arguments including receiver (C callee-saved)
+ // esi: pointer to the first argument (C callee-saved)
+
+ // Result returned in eax, or eax+edx if result_size_ is 2.
+
+ // Check stack alignment.
+ if (FLAG_debug_code) {
+ __ CheckStackAlignment();
+ }
+
+ // Call C function.
+ __ mov(Operand(esp, 0 * kPointerSize), edi); // argc.
+ __ mov(Operand(esp, 1 * kPointerSize), esi); // argv.
+ __ mov(Operand(esp, 2 * kPointerSize),
+ Immediate(ExternalReference::isolate_address(isolate())));
+ __ call(ebx);
+ // Result is in eax or edx:eax - do not destroy these registers!
+
+ // Runtime functions should not return 'the hole'. Allowing it to escape may
+ // lead to crashes in the IC code later.
+ if (FLAG_debug_code) {
+ Label okay;
+ __ cmp(eax, isolate()->factory()->the_hole_value());
+ __ j(not_equal, &okay, Label::kNear);
+ __ int3();
+ __ bind(&okay);
+ }
+
+ // Check result for exception sentinel.
+ Label exception_returned;
+ __ cmp(eax, isolate()->factory()->exception());
+ __ j(equal, &exception_returned);
+
+ ExternalReference pending_exception_address(
+ Isolate::kPendingExceptionAddress, isolate());
+
+ // Check that there is no pending exception, otherwise we
+ // should have returned the exception sentinel.
+ if (FLAG_debug_code) {
+ __ push(edx);
+ __ mov(edx, Immediate(isolate()->factory()->the_hole_value()));
+ Label okay;
+ __ cmp(edx, Operand::StaticVariable(pending_exception_address));
+ // Cannot use check here as it attempts to generate call into runtime.
+ __ j(equal, &okay, Label::kNear);
+ __ int3();
+ __ bind(&okay);
+ __ pop(edx);
+ }
+
+ // Exit the JavaScript to C++ exit frame.
+ __ LeaveExitFrame();
+ __ ret(0);
+
+ // Handling of exception.
+ __ bind(&exception_returned);
+
+ // Retrieve the pending exception.
+ __ mov(eax, Operand::StaticVariable(pending_exception_address));
+
+ // Clear the pending exception.
+ __ mov(edx, Immediate(isolate()->factory()->the_hole_value()));
+ __ mov(Operand::StaticVariable(pending_exception_address), edx);
+
+ // Special handling of termination exceptions which are uncatchable
+ // by javascript code.
+ Label throw_termination_exception;
+ __ cmp(eax, isolate()->factory()->termination_exception());
+ __ j(equal, &throw_termination_exception);
+
+ // Handle normal exception.
+ __ Throw(eax);
+
+ __ bind(&throw_termination_exception);
+ __ ThrowUncatchable(eax);
+}
+
+
+void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
+ Label invoke, handler_entry, exit;
+ Label not_outermost_js, not_outermost_js_2;
+
+ ProfileEntryHookStub::MaybeCallEntryHook(masm);
+
+ // Set up frame.
+ __ push(ebp);
+ __ mov(ebp, esp);
+
+ // Push marker in two places.
+ int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
+ __ push(Immediate(Smi::FromInt(marker))); // context slot
+ __ push(Immediate(Smi::FromInt(marker))); // function slot
+ // Save callee-saved registers (C calling conventions).
+ __ push(edi);
+ __ push(esi);
+ __ push(ebx);
+
+ // Save copies of the top frame descriptor on the stack.
+ ExternalReference c_entry_fp(Isolate::kCEntryFPAddress, isolate());
+ __ push(Operand::StaticVariable(c_entry_fp));
+
+ // If this is the outermost JS call, set js_entry_sp value.
+ ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate());
+ __ cmp(Operand::StaticVariable(js_entry_sp), Immediate(0));
+ __ j(not_equal, ¬_outermost_js, Label::kNear);
+ __ mov(Operand::StaticVariable(js_entry_sp), ebp);
+ __ push(Immediate(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
+ __ jmp(&invoke, Label::kNear);
+ __ bind(¬_outermost_js);
+ __ push(Immediate(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME)));
+
+ // Jump to a faked try block that does the invoke, with a faked catch
+ // block that sets the pending exception.
+ __ jmp(&invoke);
+ __ bind(&handler_entry);
+ handler_offset_ = handler_entry.pos();
+ // Caught exception: Store result (exception) in the pending exception
+ // field in the JSEnv and return a failure sentinel.
+ ExternalReference pending_exception(Isolate::kPendingExceptionAddress,
+ isolate());
+ __ mov(Operand::StaticVariable(pending_exception), eax);
+ __ mov(eax, Immediate(isolate()->factory()->exception()));
+ __ jmp(&exit);
+
+ // Invoke: Link this frame into the handler chain. There's only one
+ // handler block in this code object, so its index is 0.
+ __ bind(&invoke);
+ __ PushTryHandler(StackHandler::JS_ENTRY, 0);
+
+ // Clear any pending exceptions.
+ __ mov(edx, Immediate(isolate()->factory()->the_hole_value()));
+ __ mov(Operand::StaticVariable(pending_exception), edx);
+
+ // Fake a receiver (NULL).
+ __ push(Immediate(0)); // receiver
+
+ // Invoke the function by calling through JS entry trampoline builtin and
+ // pop the faked function when we return. Notice that we cannot store a
+ // reference to the trampoline code directly in this stub, because the
+ // builtin stubs may not have been generated yet.
+ if (is_construct) {
+ ExternalReference construct_entry(Builtins::kJSConstructEntryTrampoline,
+ isolate());
+ __ mov(edx, Immediate(construct_entry));
+ } else {
+ ExternalReference entry(Builtins::kJSEntryTrampoline, isolate());
+ __ mov(edx, Immediate(entry));
+ }
+ __ mov(edx, Operand(edx, 0)); // deref address
+ __ lea(edx, FieldOperand(edx, Code::kHeaderSize));
+ __ call(edx);
+
+ // Unlink this frame from the handler chain.
+ __ PopTryHandler();
+
+ __ bind(&exit);
+ // Check if the current stack frame is marked as the outermost JS frame.
+ __ pop(ebx);
+ __ cmp(ebx, Immediate(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
+ __ j(not_equal, ¬_outermost_js_2);
+ __ mov(Operand::StaticVariable(js_entry_sp), Immediate(0));
+ __ bind(¬_outermost_js_2);
+
+ // Restore the top frame descriptor from the stack.
+ __ pop(Operand::StaticVariable(ExternalReference(
+ Isolate::kCEntryFPAddress, isolate())));
+
+ // Restore callee-saved registers (C calling conventions).
+ __ pop(ebx);
+ __ pop(esi);
+ __ pop(edi);
+ __ add(esp, Immediate(2 * kPointerSize)); // remove markers
+
+ // Restore frame pointer and return.
+ __ pop(ebp);
+ __ ret(0);
+}
+
+
+// Generate stub code for instanceof.
+// This code can patch a call site inlined cache of the instance of check,
+// which looks like this.
+//
+// 81 ff XX XX XX XX cmp edi, <the hole, patched to a map>
+// 75 0a jne <some near label>
+// b8 XX XX XX XX mov eax, <the hole, patched to either true or false>
+//
+// If call site patching is requested the stack will have the delta from the
+// return address to the cmp instruction just below the return address. This
+// also means that call site patching can only take place with arguments in
+// registers. TOS looks like this when call site patching is requested
+//
+// esp[0] : return address
+// esp[4] : delta from return address to cmp instruction
+//
+void InstanceofStub::Generate(MacroAssembler* masm) {
+ // Call site inlining and patching implies arguments in registers.
+ ASSERT(HasArgsInRegisters() || !HasCallSiteInlineCheck());
+
+ // Fixed register usage throughout the stub.
+ Register object = eax; // Object (lhs).
+ Register map = ebx; // Map of the object.
+ Register function = edx; // Function (rhs).
+ Register prototype = edi; // Prototype of the function.
+ Register scratch = ecx;
+
+ // Constants describing the call site code to patch.
+ static const int kDeltaToCmpImmediate = 2;
+ static const int kDeltaToMov = 8;
+ static const int kDeltaToMovImmediate = 9;
+ static const int8_t kCmpEdiOperandByte1 = BitCast<int8_t, uint8_t>(0x3b);
+ static const int8_t kCmpEdiOperandByte2 = BitCast<int8_t, uint8_t>(0x3d);
+ static const int8_t kMovEaxImmediateByte = BitCast<int8_t, uint8_t>(0xb8);
+
+ ASSERT_EQ(object.code(), InstanceofStub::left().code());
+ ASSERT_EQ(function.code(), InstanceofStub::right().code());
+
+ // Get the object and function - they are always both needed.
+ Label slow, not_js_object;
+ if (!HasArgsInRegisters()) {
+ __ mov(object, Operand(esp, 2 * kPointerSize));
+ __ mov(function, Operand(esp, 1 * kPointerSize));
+ }
+
+ // Check that the left hand is a JS object.
+ __ JumpIfSmi(object, ¬_js_object);
+ __ IsObjectJSObjectType(object, map, scratch, ¬_js_object);
+
+ // If there is a call site cache don't look in the global cache, but do the
+ // real lookup and update the call site cache.
+ if (!HasCallSiteInlineCheck()) {
+ // Look up the function and the map in the instanceof cache.
+ Label miss;
+ __ CompareRoot(function, scratch, Heap::kInstanceofCacheFunctionRootIndex);
+ __ j(not_equal, &miss, Label::kNear);
+ __ CompareRoot(map, scratch, Heap::kInstanceofCacheMapRootIndex);
+ __ j(not_equal, &miss, Label::kNear);
+ __ LoadRoot(eax, Heap::kInstanceofCacheAnswerRootIndex);
+ __ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
+ __ bind(&miss);
+ }
+
+ // Get the prototype of the function.
+ __ TryGetFunctionPrototype(function, prototype, scratch, &slow, true);
+
+ // Check that the function prototype is a JS object.
+ __ JumpIfSmi(prototype, &slow);
+ __ IsObjectJSObjectType(prototype, scratch, scratch, &slow);
+
+ // Update the global instanceof or call site inlined cache with the current
+ // map and function. The cached answer will be set when it is known below.
+ if (!HasCallSiteInlineCheck()) {
+ __ StoreRoot(map, scratch, Heap::kInstanceofCacheMapRootIndex);
+ __ StoreRoot(function, scratch, Heap::kInstanceofCacheFunctionRootIndex);
+ } else {
+ // The constants for the code patching are based on no push instructions
+ // at the call site.
+ ASSERT(HasArgsInRegisters());
+ // Get return address and delta to inlined map check.
+ __ mov(scratch, Operand(esp, 0 * kPointerSize));
+ __ sub(scratch, Operand(esp, 1 * kPointerSize));
+ if (FLAG_debug_code) {
+ __ cmpb(Operand(scratch, 0), kCmpEdiOperandByte1);
+ __ Assert(equal, kInstanceofStubUnexpectedCallSiteCacheCmp1);
+ __ cmpb(Operand(scratch, 1), kCmpEdiOperandByte2);
+ __ Assert(equal, kInstanceofStubUnexpectedCallSiteCacheCmp2);
+ }
+ __ mov(scratch, Operand(scratch, kDeltaToCmpImmediate));
+ __ mov(Operand(scratch, 0), map);
+ }
+
+ // Loop through the prototype chain of the object looking for the function
+ // prototype.
+ __ mov(scratch, FieldOperand(map, Map::kPrototypeOffset));
+ Label loop, is_instance, is_not_instance;
+ __ bind(&loop);
+ __ cmp(scratch, prototype);
+ __ j(equal, &is_instance, Label::kNear);
+ Factory* factory = isolate()->factory();
+ __ cmp(scratch, Immediate(factory->null_value()));
+ __ j(equal, &is_not_instance, Label::kNear);
+ __ mov(scratch, FieldOperand(scratch, HeapObject::kMapOffset));
+ __ mov(scratch, FieldOperand(scratch, Map::kPrototypeOffset));
+ __ jmp(&loop);
+
+ __ bind(&is_instance);
+ if (!HasCallSiteInlineCheck()) {
+ __ mov(eax, Immediate(0));
+ __ StoreRoot(eax, scratch, Heap::kInstanceofCacheAnswerRootIndex);
+ } else {
+ // Get return address and delta to inlined map check.
+ __ mov(eax, factory->true_value());
+ __ mov(scratch, Operand(esp, 0 * kPointerSize));
+ __ sub(scratch, Operand(esp, 1 * kPointerSize));
+ if (FLAG_debug_code) {
+ __ cmpb(Operand(scratch, kDeltaToMov), kMovEaxImmediateByte);
+ __ Assert(equal, kInstanceofStubUnexpectedCallSiteCacheMov);
+ }
+ __ mov(Operand(scratch, kDeltaToMovImmediate), eax);
+ if (!ReturnTrueFalseObject()) {
+ __ Move(eax, Immediate(0));
+ }
+ }
+ __ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
+
+ __ bind(&is_not_instance);
+ if (!HasCallSiteInlineCheck()) {
+ __ mov(eax, Immediate(Smi::FromInt(1)));
+ __ StoreRoot(eax, scratch, Heap::kInstanceofCacheAnswerRootIndex);
+ } else {
+ // Get return address and delta to inlined map check.
+ __ mov(eax, factory->false_value());
+ __ mov(scratch, Operand(esp, 0 * kPointerSize));
+ __ sub(scratch, Operand(esp, 1 * kPointerSize));
+ if (FLAG_debug_code) {
+ __ cmpb(Operand(scratch, kDeltaToMov), kMovEaxImmediateByte);
+ __ Assert(equal, kInstanceofStubUnexpectedCallSiteCacheMov);
+ }
+ __ mov(Operand(scratch, kDeltaToMovImmediate), eax);
+ if (!ReturnTrueFalseObject()) {
+ __ Move(eax, Immediate(Smi::FromInt(1)));
+ }
+ }
+ __ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
+
+ Label object_not_null, object_not_null_or_smi;
+ __ bind(¬_js_object);
+ // Before null, smi and string value checks, check that the rhs is a function
+ // as for a non-function rhs an exception needs to be thrown.
+ __ JumpIfSmi(function, &slow, Label::kNear);
+ __ CmpObjectType(function, JS_FUNCTION_TYPE, scratch);
+ __ j(not_equal, &slow, Label::kNear);
+
+ // Null is not instance of anything.
+ __ cmp(object, factory->null_value());
+ __ j(not_equal, &object_not_null, Label::kNear);
+ __ Move(eax, Immediate(Smi::FromInt(1)));
+ __ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
+
+ __ bind(&object_not_null);
+ // Smi values is not instance of anything.
+ __ JumpIfNotSmi(object, &object_not_null_or_smi, Label::kNear);
+ __ Move(eax, Immediate(Smi::FromInt(1)));
+ __ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
+
+ __ bind(&object_not_null_or_smi);
+ // String values is not instance of anything.
+ Condition is_string = masm->IsObjectStringType(object, scratch, scratch);
+ __ j(NegateCondition(is_string), &slow, Label::kNear);
+ __ Move(eax, Immediate(Smi::FromInt(1)));
+ __ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
+
+ // Slow-case: Go through the JavaScript implementation.
+ __ bind(&slow);
+ if (!ReturnTrueFalseObject()) {
+ // Tail call the builtin which returns 0 or 1.
+ if (HasArgsInRegisters()) {
+ // Push arguments below return address.
+ __ pop(scratch);
+ __ push(object);
+ __ push(function);
+ __ push(scratch);
+ }
+ __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
+ } else {
+ // Call the builtin and convert 0/1 to true/false.
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ push(object);
+ __ push(function);
+ __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION);
+ }
+ Label true_value, done;
+ __ test(eax, eax);
+ __ j(zero, &true_value, Label::kNear);
+ __ mov(eax, factory->false_value());
+ __ jmp(&done, Label::kNear);
+ __ bind(&true_value);
+ __ mov(eax, factory->true_value());
+ __ bind(&done);
+ __ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
+ }
+}
+
+
+Register InstanceofStub::left() { return eax; }
+
+
+Register InstanceofStub::right() { return edx; }
+
+
+// -------------------------------------------------------------------------
+// StringCharCodeAtGenerator
+
+void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
+ // If the receiver is a smi trigger the non-string case.
+ STATIC_ASSERT(kSmiTag == 0);
+ __ JumpIfSmi(object_, receiver_not_string_);
+
+ // Fetch the instance type of the receiver into result register.
+ __ mov(result_, FieldOperand(object_, HeapObject::kMapOffset));
+ __ movzx_b(result_, FieldOperand(result_, Map::kInstanceTypeOffset));
+ // If the receiver is not a string trigger the non-string case.
+ __ test(result_, Immediate(kIsNotStringMask));
+ __ j(not_zero, receiver_not_string_);
+
+ // If the index is non-smi trigger the non-smi case.
+ STATIC_ASSERT(kSmiTag == 0);
+ __ JumpIfNotSmi(index_, &index_not_smi_);
+ __ bind(&got_smi_index_);
+
+ // Check for index out of range.
+ __ cmp(index_, FieldOperand(object_, String::kLengthOffset));
+ __ j(above_equal, index_out_of_range_);
+
+ __ SmiUntag(index_);
+
+ Factory* factory = masm->isolate()->factory();
+ StringCharLoadGenerator::Generate(
+ masm, factory, object_, index_, result_, &call_runtime_);
+
+ __ SmiTag(result_);
+ __ bind(&exit_);
+}
+
+
+void StringCharCodeAtGenerator::GenerateSlow(
+ MacroAssembler* masm,
+ const RuntimeCallHelper& call_helper) {
+ __ Abort(kUnexpectedFallthroughToCharCodeAtSlowCase);
+
+ // Index is not a smi.
+ __ bind(&index_not_smi_);
+ // If index is a heap number, try converting it to an integer.
+ __ CheckMap(index_,
+ masm->isolate()->factory()->heap_number_map(),
+ index_not_number_,
+ DONT_DO_SMI_CHECK);
+ call_helper.BeforeCall(masm);
+ __ push(object_);
+ __ push(index_); // Consumed by runtime conversion function.
+ if (index_flags_ == STRING_INDEX_IS_NUMBER) {
+ __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
+ } else {
+ ASSERT(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
+ // NumberToSmi discards numbers that are not exact integers.
+ __ CallRuntime(Runtime::kHiddenNumberToSmi, 1);
+ }
+ if (!index_.is(eax)) {
+ // Save the conversion result before the pop instructions below
+ // have a chance to overwrite it.
+ __ mov(index_, eax);
+ }
+ __ pop(object_);
+ // Reload the instance type.
+ __ mov(result_, FieldOperand(object_, HeapObject::kMapOffset));
+ __ movzx_b(result_, FieldOperand(result_, Map::kInstanceTypeOffset));
+ call_helper.AfterCall(masm);
+ // If index is still not a smi, it must be out of range.
+ STATIC_ASSERT(kSmiTag == 0);
+ __ JumpIfNotSmi(index_, index_out_of_range_);
+ // Otherwise, return to the fast path.
+ __ jmp(&got_smi_index_);
+
+ // Call runtime. We get here when the receiver is a string and the
+ // index is a number, but the code of getting the actual character
+ // is too complex (e.g., when the string needs to be flattened).
+ __ bind(&call_runtime_);
+ call_helper.BeforeCall(masm);
+ __ push(object_);
+ __ SmiTag(index_);
+ __ push(index_);
+ __ CallRuntime(Runtime::kHiddenStringCharCodeAt, 2);
+ if (!result_.is(eax)) {
+ __ mov(result_, eax);
+ }
+ call_helper.AfterCall(masm);
+ __ jmp(&exit_);
+
+ __ Abort(kUnexpectedFallthroughFromCharCodeAtSlowCase);
+}
+
+
+// -------------------------------------------------------------------------
+// StringCharFromCodeGenerator
+
+void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
+ // Fast case of Heap::LookupSingleCharacterStringFromCode.
+ STATIC_ASSERT(kSmiTag == 0);
+ STATIC_ASSERT(kSmiShiftSize == 0);
+ ASSERT(IsPowerOf2(String::kMaxOneByteCharCode + 1));
+ __ test(code_,
+ Immediate(kSmiTagMask |
+ ((~String::kMaxOneByteCharCode) << kSmiTagSize)));
+ __ j(not_zero, &slow_case_);
+
+ Factory* factory = masm->isolate()->factory();
+ __ Move(result_, Immediate(factory->single_character_string_cache()));
+ STATIC_ASSERT(kSmiTag == 0);
+ STATIC_ASSERT(kSmiTagSize == 1);
+ STATIC_ASSERT(kSmiShiftSize == 0);
+ // At this point code register contains smi tagged ASCII char code.
+ __ mov(result_, FieldOperand(result_,
+ code_, times_half_pointer_size,
+ FixedArray::kHeaderSize));
+ __ cmp(result_, factory->undefined_value());
+ __ j(equal, &slow_case_);
+ __ bind(&exit_);
+}
+
+
+void StringCharFromCodeGenerator::GenerateSlow(
+ MacroAssembler* masm,
+ const RuntimeCallHelper& call_helper) {
+ __ Abort(kUnexpectedFallthroughToCharFromCodeSlowCase);
+
+ __ bind(&slow_case_);
+ call_helper.BeforeCall(masm);
+ __ push(code_);
+ __ CallRuntime(Runtime::kCharFromCode, 1);
+ if (!result_.is(eax)) {
+ __ mov(result_, eax);
+ }
+ call_helper.AfterCall(masm);
+ __ jmp(&exit_);
+
+ __ Abort(kUnexpectedFallthroughFromCharFromCodeSlowCase);
+}
+
+
+void StringHelper::GenerateCopyCharactersREP(MacroAssembler* masm,
+ Register dest,
+ Register src,
+ Register count,
+ Register scratch,
+ bool ascii) {
+ // Copy characters using rep movs of doublewords.
+ // The destination is aligned on a 4 byte boundary because we are
+ // copying to the beginning of a newly allocated string.
+ ASSERT(dest.is(edi)); // rep movs destination
+ ASSERT(src.is(esi)); // rep movs source
+ ASSERT(count.is(ecx)); // rep movs count
+ ASSERT(!scratch.is(dest));
+ ASSERT(!scratch.is(src));
+ ASSERT(!scratch.is(count));
+
+ // Nothing to do for zero characters.
+ Label done;
+ __ test(count, count);
+ __ j(zero, &done);
+
+ // Make count the number of bytes to copy.
+ if (!ascii) {
+ __ shl(count, 1);
+ }
+
+ // Don't enter the rep movs if there are less than 4 bytes to copy.
+ Label last_bytes;
+ __ test(count, Immediate(~3));
+ __ j(zero, &last_bytes, Label::kNear);
+
+ // Copy from edi to esi using rep movs instruction.
+ __ mov(scratch, count);
+ __ sar(count, 2); // Number of doublewords to copy.
+ __ cld();
+ __ rep_movs();
+
+ // Find number of bytes left.
+ __ mov(count, scratch);
+ __ and_(count, 3);
+
+ // Check if there are more bytes to copy.
+ __ bind(&last_bytes);
+ __ test(count, count);
+ __ j(zero, &done);
+
+ // Copy remaining characters.
+ Label loop;
+ __ bind(&loop);
+ __ mov_b(scratch, Operand(src, 0));
+ __ mov_b(Operand(dest, 0), scratch);
+ __ add(src, Immediate(1));
+ __ add(dest, Immediate(1));
+ __ sub(count, Immediate(1));
+ __ j(not_zero, &loop);
+
+ __ bind(&done);
+}
+
+
+void StringHelper::GenerateHashInit(MacroAssembler* masm,
+ Register hash,
+ Register character,
+ Register scratch) {
+ // hash = (seed + character) + ((seed + character) << 10);
+ if (masm->serializer_enabled()) {
+ __ LoadRoot(scratch, Heap::kHashSeedRootIndex);
+ __ SmiUntag(scratch);
+ __ add(scratch, character);
+ __ mov(hash, scratch);
+ __ shl(scratch, 10);
+ __ add(hash, scratch);
+ } else {
+ int32_t seed = masm->isolate()->heap()->HashSeed();
+ __ lea(scratch, Operand(character, seed));
+ __ shl(scratch, 10);
+ __ lea(hash, Operand(scratch, character, times_1, seed));
+ }
+ // hash ^= hash >> 6;
+ __ mov(scratch, hash);
+ __ shr(scratch, 6);
+ __ xor_(hash, scratch);
+}
+
+
+void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm,
+ Register hash,
+ Register character,
+ Register scratch) {
+ // hash += character;
+ __ add(hash, character);
+ // hash += hash << 10;
+ __ mov(scratch, hash);
+ __ shl(scratch, 10);
+ __ add(hash, scratch);
+ // hash ^= hash >> 6;
+ __ mov(scratch, hash);
+ __ shr(scratch, 6);
+ __ xor_(hash, scratch);
+}
+
+
+void StringHelper::GenerateHashGetHash(MacroAssembler* masm,
+ Register hash,
+ Register scratch) {
+ // hash += hash << 3;
+ __ mov(scratch, hash);
+ __ shl(scratch, 3);
+ __ add(hash, scratch);
+ // hash ^= hash >> 11;
+ __ mov(scratch, hash);
+ __ shr(scratch, 11);
+ __ xor_(hash, scratch);
+ // hash += hash << 15;
+ __ mov(scratch, hash);
+ __ shl(scratch, 15);
+ __ add(hash, scratch);
+
+ __ and_(hash, String::kHashBitMask);
+
+ // if (hash == 0) hash = 27;
+ Label hash_not_zero;
+ __ j(not_zero, &hash_not_zero, Label::kNear);
+ __ mov(hash, Immediate(StringHasher::kZeroHash));
+ __ bind(&hash_not_zero);
+}
+
+
+void SubStringStub::Generate(MacroAssembler* masm) {
+ Label runtime;
+
+ // Stack frame on entry.
+ // esp[0]: return address
+ // esp[4]: to
+ // esp[8]: from
+ // esp[12]: string
+
+ // Make sure first argument is a string.
+ __ mov(eax, Operand(esp, 3 * kPointerSize));
+ STATIC_ASSERT(kSmiTag == 0);
+ __ JumpIfSmi(eax, &runtime);
+ Condition is_string = masm->IsObjectStringType(eax, ebx, ebx);
+ __ j(NegateCondition(is_string), &runtime);
+
+ // eax: string
+ // ebx: instance type
+
+ // Calculate length of sub string using the smi values.
+ __ mov(ecx, Operand(esp, 1 * kPointerSize)); // To index.
+ __ JumpIfNotSmi(ecx, &runtime);
+ __ mov(edx, Operand(esp, 2 * kPointerSize)); // From index.
+ __ JumpIfNotSmi(edx, &runtime);
+ __ sub(ecx, edx);
+ __ cmp(ecx, FieldOperand(eax, String::kLengthOffset));
+ Label not_original_string;
+ // Shorter than original string's length: an actual substring.
+ __ j(below, ¬_original_string, Label::kNear);
+ // Longer than original string's length or negative: unsafe arguments.
+ __ j(above, &runtime);
+ // Return original string.
+ Counters* counters = isolate()->counters();
+ __ IncrementCounter(counters->sub_string_native(), 1);
+ __ ret(3 * kPointerSize);
+ __ bind(¬_original_string);
+
+ Label single_char;
+ __ cmp(ecx, Immediate(Smi::FromInt(1)));
+ __ j(equal, &single_char);
+
+ // eax: string
+ // ebx: instance type
+ // ecx: sub string length (smi)
+ // edx: from index (smi)
+ // Deal with different string types: update the index if necessary
+ // and put the underlying string into edi.
+ Label underlying_unpacked, sliced_string, seq_or_external_string;
+ // If the string is not indirect, it can only be sequential or external.
+ STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag));
+ STATIC_ASSERT(kIsIndirectStringMask != 0);
+ __ test(ebx, Immediate(kIsIndirectStringMask));
+ __ j(zero, &seq_or_external_string, Label::kNear);
+
+ Factory* factory = isolate()->factory();
+ __ test(ebx, Immediate(kSlicedNotConsMask));
+ __ j(not_zero, &sliced_string, Label::kNear);
+ // Cons string. Check whether it is flat, then fetch first part.
+ // Flat cons strings have an empty second part.
+ __ cmp(FieldOperand(eax, ConsString::kSecondOffset),
+ factory->empty_string());
+ __ j(not_equal, &runtime);
+ __ mov(edi, FieldOperand(eax, ConsString::kFirstOffset));
+ // Update instance type.
+ __ mov(ebx, FieldOperand(edi, HeapObject::kMapOffset));
+ __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
+ __ jmp(&underlying_unpacked, Label::kNear);
+
+ __ bind(&sliced_string);
+ // Sliced string. Fetch parent and adjust start index by offset.
+ __ add(edx, FieldOperand(eax, SlicedString::kOffsetOffset));
+ __ mov(edi, FieldOperand(eax, SlicedString::kParentOffset));
+ // Update instance type.
+ __ mov(ebx, FieldOperand(edi, HeapObject::kMapOffset));
+ __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
+ __ jmp(&underlying_unpacked, Label::kNear);
+
+ __ bind(&seq_or_external_string);
+ // Sequential or external string. Just move string to the expected register.
+ __ mov(edi, eax);
+
+ __ bind(&underlying_unpacked);
+
+ if (FLAG_string_slices) {
+ Label copy_routine;
+ // edi: underlying subject string
+ // ebx: instance type of underlying subject string
+ // edx: adjusted start index (smi)
+ // ecx: length (smi)
+ __ cmp(ecx, Immediate(Smi::FromInt(SlicedString::kMinLength)));
+ // Short slice. Copy instead of slicing.
+ __ j(less, ©_routine);
+ // Allocate new sliced string. At this point we do not reload the instance
+ // type including the string encoding because we simply rely on the info
+ // provided by the original string. It does not matter if the original
+ // string's encoding is wrong because we always have to recheck encoding of
+ // the newly created string's parent anyways due to externalized strings.
+ Label two_byte_slice, set_slice_header;
+ STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
+ STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
+ __ test(ebx, Immediate(kStringEncodingMask));
+ __ j(zero, &two_byte_slice, Label::kNear);
+ __ AllocateAsciiSlicedString(eax, ebx, no_reg, &runtime);
+ __ jmp(&set_slice_header, Label::kNear);
+ __ bind(&two_byte_slice);
+ __ AllocateTwoByteSlicedString(eax, ebx, no_reg, &runtime);
+ __ bind(&set_slice_header);
+ __ mov(FieldOperand(eax, SlicedString::kLengthOffset), ecx);
+ __ mov(FieldOperand(eax, SlicedString::kHashFieldOffset),
+ Immediate(String::kEmptyHashField));
+ __ mov(FieldOperand(eax, SlicedString::kParentOffset), edi);
+ __ mov(FieldOperand(eax, SlicedString::kOffsetOffset), edx);
+ __ IncrementCounter(counters->sub_string_native(), 1);
+ __ ret(3 * kPointerSize);
+
+ __ bind(©_routine);
+ }
+
+ // edi: underlying subject string
+ // ebx: instance type of underlying subject string
+ // edx: adjusted start index (smi)
+ // ecx: length (smi)
+ // The subject string can only be external or sequential string of either
+ // encoding at this point.
+ Label two_byte_sequential, runtime_drop_two, sequential_string;
+ STATIC_ASSERT(kExternalStringTag != 0);
+ STATIC_ASSERT(kSeqStringTag == 0);
+ __ test_b(ebx, kExternalStringTag);
+ __ j(zero, &sequential_string);
+
+ // Handle external string.
+ // Rule out short external strings.
+ STATIC_CHECK(kShortExternalStringTag != 0);
+ __ test_b(ebx, kShortExternalStringMask);
+ __ j(not_zero, &runtime);
+ __ mov(edi, FieldOperand(edi, ExternalString::kResourceDataOffset));
+ // Move the pointer so that offset-wise, it looks like a sequential string.
+ STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
+ __ sub(edi, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+
+ __ bind(&sequential_string);
+ // Stash away (adjusted) index and (underlying) string.
+ __ push(edx);
+ __ push(edi);
+ __ SmiUntag(ecx);
+ STATIC_ASSERT((kOneByteStringTag & kStringEncodingMask) != 0);
+ __ test_b(ebx, kStringEncodingMask);
+ __ j(zero, &two_byte_sequential);
+
+ // Sequential ASCII string. Allocate the result.
+ __ AllocateAsciiString(eax, ecx, ebx, edx, edi, &runtime_drop_two);
+
+ // eax: result string
+ // ecx: result string length
+ __ mov(edx, esi); // esi used by following code.
+ // Locate first character of result.
+ __ mov(edi, eax);
+ __ add(edi, Immediate(SeqOneByteString::kHeaderSize - kHeapObjectTag));
+ // Load string argument and locate character of sub string start.
+ __ pop(esi);
+ __ pop(ebx);
+ __ SmiUntag(ebx);
+ __ lea(esi, FieldOperand(esi, ebx, times_1, SeqOneByteString::kHeaderSize));
+
+ // eax: result string
+ // ecx: result length
+ // edx: original value of esi
+ // edi: first character of result
+ // esi: character of sub string start
+ StringHelper::GenerateCopyCharactersREP(masm, edi, esi, ecx, ebx, true);
+ __ mov(esi, edx); // Restore esi.
+ __ IncrementCounter(counters->sub_string_native(), 1);
+ __ ret(3 * kPointerSize);
+
+ __ bind(&two_byte_sequential);
+ // Sequential two-byte string. Allocate the result.
+ __ AllocateTwoByteString(eax, ecx, ebx, edx, edi, &runtime_drop_two);
+
+ // eax: result string
+ // ecx: result string length
+ __ mov(edx, esi); // esi used by following code.
+ // Locate first character of result.
+ __ mov(edi, eax);
+ __ add(edi,
+ Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+ // Load string argument and locate character of sub string start.
+ __ pop(esi);
+ __ pop(ebx);
+ // As from is a smi it is 2 times the value which matches the size of a two
+ // byte character.
+ STATIC_ASSERT(kSmiTag == 0);
+ STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
+ __ lea(esi, FieldOperand(esi, ebx, times_1, SeqTwoByteString::kHeaderSize));
+
+ // eax: result string
+ // ecx: result length
+ // edx: original value of esi
+ // edi: first character of result
+ // esi: character of sub string start
+ StringHelper::GenerateCopyCharactersREP(masm, edi, esi, ecx, ebx, false);
+ __ mov(esi, edx); // Restore esi.
+ __ IncrementCounter(counters->sub_string_native(), 1);
+ __ ret(3 * kPointerSize);
+
+ // Drop pushed values on the stack before tail call.
+ __ bind(&runtime_drop_two);
+ __ Drop(2);
+
+ // Just jump to runtime to create the sub string.
+ __ bind(&runtime);
+ __ TailCallRuntime(Runtime::kHiddenSubString, 3, 1);
+
+ __ bind(&single_char);
+ // eax: string
+ // ebx: instance type
+ // ecx: sub string length (smi)
+ // edx: from index (smi)
+ StringCharAtGenerator generator(
+ eax, edx, ecx, eax, &runtime, &runtime, &runtime, STRING_INDEX_IS_NUMBER);
+ generator.GenerateFast(masm);
+ __ ret(3 * kPointerSize);
+ generator.SkipSlow(masm, &runtime);
+}
+
+
+void StringCompareStub::GenerateFlatAsciiStringEquals(MacroAssembler* masm,
+ Register left,
+ Register right,
+ Register scratch1,
+ Register scratch2) {
+ Register length = scratch1;
+
+ // Compare lengths.
+ Label strings_not_equal, check_zero_length;
+ __ mov(length, FieldOperand(left, String::kLengthOffset));
+ __ cmp(length, FieldOperand(right, String::kLengthOffset));
+ __ j(equal, &check_zero_length, Label::kNear);
+ __ bind(&strings_not_equal);
+ __ Move(eax, Immediate(Smi::FromInt(NOT_EQUAL)));
+ __ ret(0);
+
+ // Check if the length is zero.
+ Label compare_chars;
+ __ bind(&check_zero_length);
+ STATIC_ASSERT(kSmiTag == 0);
+ __ test(length, length);
+ __ j(not_zero, &compare_chars, Label::kNear);
+ __ Move(eax, Immediate(Smi::FromInt(EQUAL)));
+ __ ret(0);
+
+ // Compare characters.
+ __ bind(&compare_chars);
+ GenerateAsciiCharsCompareLoop(masm, left, right, length, scratch2,
+ &strings_not_equal, Label::kNear);
+
+ // Characters are equal.
+ __ Move(eax, Immediate(Smi::FromInt(EQUAL)));
+ __ ret(0);
+}
+
+
+void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
+ Register left,
+ Register right,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3) {
+ Counters* counters = masm->isolate()->counters();
+ __ IncrementCounter(counters->string_compare_native(), 1);
+
+ // Find minimum length.
+ Label left_shorter;
+ __ mov(scratch1, FieldOperand(left, String::kLengthOffset));
+ __ mov(scratch3, scratch1);
+ __ sub(scratch3, FieldOperand(right, String::kLengthOffset));
+
+ Register length_delta = scratch3;
+
+ __ j(less_equal, &left_shorter, Label::kNear);
+ // Right string is shorter. Change scratch1 to be length of right string.
+ __ sub(scratch1, length_delta);
+ __ bind(&left_shorter);
+
+ Register min_length = scratch1;
+
+ // If either length is zero, just compare lengths.
+ Label compare_lengths;
+ __ test(min_length, min_length);
+ __ j(zero, &compare_lengths, Label::kNear);
+
+ // Compare characters.
+ Label result_not_equal;
+ GenerateAsciiCharsCompareLoop(masm, left, right, min_length, scratch2,
+ &result_not_equal, Label::kNear);
+
+ // Compare lengths - strings up to min-length are equal.
+ __ bind(&compare_lengths);
+ __ test(length_delta, length_delta);
+ Label length_not_equal;
+ __ j(not_zero, &length_not_equal, Label::kNear);
+
+ // Result is EQUAL.
+ STATIC_ASSERT(EQUAL == 0);
+ STATIC_ASSERT(kSmiTag == 0);
+ __ Move(eax, Immediate(Smi::FromInt(EQUAL)));
+ __ ret(0);
+
+ Label result_greater;
+ Label result_less;
+ __ bind(&length_not_equal);
+ __ j(greater, &result_greater, Label::kNear);
+ __ jmp(&result_less, Label::kNear);
+ __ bind(&result_not_equal);
+ __ j(above, &result_greater, Label::kNear);
+ __ bind(&result_less);
+
+ // Result is LESS.
+ __ Move(eax, Immediate(Smi::FromInt(LESS)));
+ __ ret(0);
+
+ // Result is GREATER.
+ __ bind(&result_greater);
+ __ Move(eax, Immediate(Smi::FromInt(GREATER)));
+ __ ret(0);
+}
+
+
+void StringCompareStub::GenerateAsciiCharsCompareLoop(
+ MacroAssembler* masm,
+ Register left,
+ Register right,
+ Register length,
+ Register scratch,
+ Label* chars_not_equal,
+ Label::Distance chars_not_equal_near) {
+ // Change index to run from -length to -1 by adding length to string
+ // start. This means that loop ends when index reaches zero, which
+ // doesn't need an additional compare.
+ __ SmiUntag(length);
+ __ lea(left,
+ FieldOperand(left, length, times_1, SeqOneByteString::kHeaderSize));
+ __ lea(right,
+ FieldOperand(right, length, times_1, SeqOneByteString::kHeaderSize));
+ __ neg(length);
+ Register index = length; // index = -length;
+
+ // Compare loop.
+ Label loop;
+ __ bind(&loop);
+ __ mov_b(scratch, Operand(left, index, times_1, 0));
+ __ cmpb(scratch, Operand(right, index, times_1, 0));
+ __ j(not_equal, chars_not_equal, chars_not_equal_near);
+ __ inc(index);
+ __ j(not_zero, &loop);
+}
+
+
+void StringCompareStub::Generate(MacroAssembler* masm) {
+ Label runtime;
+
+ // Stack frame on entry.
+ // esp[0]: return address
+ // esp[4]: right string
+ // esp[8]: left string
+
+ __ mov(edx, Operand(esp, 2 * kPointerSize)); // left
+ __ mov(eax, Operand(esp, 1 * kPointerSize)); // right
+
+ Label not_same;
+ __ cmp(edx, eax);
+ __ j(not_equal, ¬_same, Label::kNear);
+ STATIC_ASSERT(EQUAL == 0);
+ STATIC_ASSERT(kSmiTag == 0);
+ __ Move(eax, Immediate(Smi::FromInt(EQUAL)));
+ __ IncrementCounter(isolate()->counters()->string_compare_native(), 1);
+ __ ret(2 * kPointerSize);
+
+ __ bind(¬_same);
+
+ // Check that both objects are sequential ASCII strings.
+ __ JumpIfNotBothSequentialAsciiStrings(edx, eax, ecx, ebx, &runtime);
+
+ // Compare flat ASCII strings.
+ // Drop arguments from the stack.
+ __ pop(ecx);
+ __ add(esp, Immediate(2 * kPointerSize));
+ __ push(ecx);
+ GenerateCompareFlatAsciiStrings(masm, edx, eax, ecx, ebx, edi);
+
+ // Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
+ // tagged as a small integer.
+ __ bind(&runtime);
+ __ TailCallRuntime(Runtime::kHiddenStringCompare, 2, 1);
+}
+
+
+void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- edx : left
+ // -- eax : right
+ // -- esp[0] : return address
+ // -----------------------------------
+
+ // Load ecx with the allocation site. We stick an undefined dummy value here
+ // and replace it with the real allocation site later when we instantiate this
+ // stub in BinaryOpICWithAllocationSiteStub::GetCodeCopyFromTemplate().
+ __ mov(ecx, handle(isolate()->heap()->undefined_value()));
+
+ // Make sure that we actually patched the allocation site.
+ if (FLAG_debug_code) {
+ __ test(ecx, Immediate(kSmiTagMask));
+ __ Assert(not_equal, kExpectedAllocationSite);
+ __ cmp(FieldOperand(ecx, HeapObject::kMapOffset),
+ isolate()->factory()->allocation_site_map());
+ __ Assert(equal, kExpectedAllocationSite);
+ }
+
+ // Tail call into the stub that handles binary operations with allocation
+ // sites.
+ BinaryOpWithAllocationSiteStub stub(isolate(), state_);
+ __ TailCallStub(&stub);
+}
+
+
+void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
+ ASSERT(state_ == CompareIC::SMI);
+ Label miss;
+ __ mov(ecx, edx);
+ __ or_(ecx, eax);
+ __ JumpIfNotSmi(ecx, &miss, Label::kNear);
+
+ if (GetCondition() == equal) {
+ // For equality we do not care about the sign of the result.
+ __ sub(eax, edx);
+ } else {
+ Label done;
+ __ sub(edx, eax);
+ __ j(no_overflow, &done, Label::kNear);
+ // Correct sign of result in case of overflow.
+ __ not_(edx);
+ __ bind(&done);
+ __ mov(eax, edx);
+ }
+ __ ret(0);
+
+ __ bind(&miss);
+ GenerateMiss(masm);
+}
+
+
+void ICCompareStub::GenerateNumbers(MacroAssembler* masm) {
+ ASSERT(state_ == CompareIC::NUMBER);
+
+ Label generic_stub;
+ Label unordered, maybe_undefined1, maybe_undefined2;
+ Label miss;
+
+ if (left_ == CompareIC::SMI) {
+ __ JumpIfNotSmi(edx, &miss);
+ }
+ if (right_ == CompareIC::SMI) {
+ __ JumpIfNotSmi(eax, &miss);
+ }
+
+ // Inlining the double comparison and falling back to the general compare
+ // stub if NaN is involved or SSE2 or CMOV is unsupported.
+ __ mov(ecx, edx);
+ __ and_(ecx, eax);
+ __ JumpIfSmi(ecx, &generic_stub, Label::kNear);
+
+ __ cmp(FieldOperand(eax, HeapObject::kMapOffset),
+ isolate()->factory()->heap_number_map());
+ __ j(not_equal, &maybe_undefined1, Label::kNear);
+ __ cmp(FieldOperand(edx, HeapObject::kMapOffset),
+ isolate()->factory()->heap_number_map());
+ __ j(not_equal, &maybe_undefined2, Label::kNear);
+
+ __ bind(&unordered);
+ __ bind(&generic_stub);
+ ICCompareStub stub(isolate(), op_, CompareIC::GENERIC, CompareIC::GENERIC,
+ CompareIC::GENERIC);
+ __ jmp(stub.GetCode(), RelocInfo::CODE_TARGET);
+
+ __ bind(&maybe_undefined1);
+ if (Token::IsOrderedRelationalCompareOp(op_)) {
+ __ cmp(eax, Immediate(isolate()->factory()->undefined_value()));
+ __ j(not_equal, &miss);
+ __ JumpIfSmi(edx, &unordered);
+ __ CmpObjectType(edx, HEAP_NUMBER_TYPE, ecx);
+ __ j(not_equal, &maybe_undefined2, Label::kNear);
+ __ jmp(&unordered);
+ }
+
+ __ bind(&maybe_undefined2);
+ if (Token::IsOrderedRelationalCompareOp(op_)) {
+ __ cmp(edx, Immediate(isolate()->factory()->undefined_value()));
+ __ j(equal, &unordered);
+ }
+
+ __ bind(&miss);
+ GenerateMiss(masm);
+}
+
+
+void ICCompareStub::GenerateInternalizedStrings(MacroAssembler* masm) {
+ ASSERT(state_ == CompareIC::INTERNALIZED_STRING);
+ ASSERT(GetCondition() == equal);
+
+ // Registers containing left and right operands respectively.
+ Register left = edx;
+ Register right = eax;
+ Register tmp1 = ecx;
+ Register tmp2 = ebx;
+
+ // Check that both operands are heap objects.
+ Label miss;
+ __ mov(tmp1, left);
+ STATIC_ASSERT(kSmiTag == 0);
+ __ and_(tmp1, right);
+ __ JumpIfSmi(tmp1, &miss, Label::kNear);
+
+ // Check that both operands are internalized strings.
+ __ mov(tmp1, FieldOperand(left, HeapObject::kMapOffset));
+ __ mov(tmp2, FieldOperand(right, HeapObject::kMapOffset));
+ __ movzx_b(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset));
+ __ movzx_b(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset));
+ STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
+ __ or_(tmp1, tmp2);
+ __ test(tmp1, Immediate(kIsNotStringMask | kIsNotInternalizedMask));
+ __ j(not_zero, &miss, Label::kNear);
+
+ // Internalized strings are compared by identity.
+ Label done;
+ __ cmp(left, right);
+ // Make sure eax is non-zero. At this point input operands are
+ // guaranteed to be non-zero.
+ ASSERT(right.is(eax));
+ __ j(not_equal, &done, Label::kNear);
+ STATIC_ASSERT(EQUAL == 0);
+ STATIC_ASSERT(kSmiTag == 0);
+ __ Move(eax, Immediate(Smi::FromInt(EQUAL)));
+ __ bind(&done);
+ __ ret(0);
+
+ __ bind(&miss);
+ GenerateMiss(masm);
+}
+
+
+void ICCompareStub::GenerateUniqueNames(MacroAssembler* masm) {
+ ASSERT(state_ == CompareIC::UNIQUE_NAME);
+ ASSERT(GetCondition() == equal);
+
+ // Registers containing left and right operands respectively.
+ Register left = edx;
+ Register right = eax;
+ Register tmp1 = ecx;
+ Register tmp2 = ebx;
+
+ // Check that both operands are heap objects.
+ Label miss;
+ __ mov(tmp1, left);
+ STATIC_ASSERT(kSmiTag == 0);
+ __ and_(tmp1, right);
+ __ JumpIfSmi(tmp1, &miss, Label::kNear);
+
+ // Check that both operands are unique names. This leaves the instance
+ // types loaded in tmp1 and tmp2.
+ __ mov(tmp1, FieldOperand(left, HeapObject::kMapOffset));
+ __ mov(tmp2, FieldOperand(right, HeapObject::kMapOffset));
+ __ movzx_b(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset));
+ __ movzx_b(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset));
+
+ __ JumpIfNotUniqueName(tmp1, &miss, Label::kNear);
+ __ JumpIfNotUniqueName(tmp2, &miss, Label::kNear);
+
+ // Unique names are compared by identity.
+ Label done;
+ __ cmp(left, right);
+ // Make sure eax is non-zero. At this point input operands are
+ // guaranteed to be non-zero.
+ ASSERT(right.is(eax));
+ __ j(not_equal, &done, Label::kNear);
+ STATIC_ASSERT(EQUAL == 0);
+ STATIC_ASSERT(kSmiTag == 0);
+ __ Move(eax, Immediate(Smi::FromInt(EQUAL)));
+ __ bind(&done);
+ __ ret(0);
+
+ __ bind(&miss);
+ GenerateMiss(masm);
+}
+
+
+void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
+ ASSERT(state_ == CompareIC::STRING);
+ Label miss;
+
+ bool equality = Token::IsEqualityOp(op_);
+
+ // Registers containing left and right operands respectively.
+ Register left = edx;
+ Register right = eax;
+ Register tmp1 = ecx;
+ Register tmp2 = ebx;
+ Register tmp3 = edi;
+
+ // Check that both operands are heap objects.
+ __ mov(tmp1, left);
+ STATIC_ASSERT(kSmiTag == 0);
+ __ and_(tmp1, right);
+ __ JumpIfSmi(tmp1, &miss);
+
+ // Check that both operands are strings. This leaves the instance
+ // types loaded in tmp1 and tmp2.
+ __ mov(tmp1, FieldOperand(left, HeapObject::kMapOffset));
+ __ mov(tmp2, FieldOperand(right, HeapObject::kMapOffset));
+ __ movzx_b(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset));
+ __ movzx_b(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset));
+ __ mov(tmp3, tmp1);
+ STATIC_ASSERT(kNotStringTag != 0);
+ __ or_(tmp3, tmp2);
+ __ test(tmp3, Immediate(kIsNotStringMask));
+ __ j(not_zero, &miss);
+
+ // Fast check for identical strings.
+ Label not_same;
+ __ cmp(left, right);
+ __ j(not_equal, ¬_same, Label::kNear);
+ STATIC_ASSERT(EQUAL == 0);
+ STATIC_ASSERT(kSmiTag == 0);
+ __ Move(eax, Immediate(Smi::FromInt(EQUAL)));
+ __ ret(0);
+
+ // Handle not identical strings.
+ __ bind(¬_same);
+
+ // Check that both strings are internalized. If they are, we're done
+ // because we already know they are not identical. But in the case of
+ // non-equality compare, we still need to determine the order. We
+ // also know they are both strings.
+ if (equality) {
+ Label do_compare;
+ STATIC_ASSERT(kInternalizedTag == 0);
+ __ or_(tmp1, tmp2);
+ __ test(tmp1, Immediate(kIsNotInternalizedMask));
+ __ j(not_zero, &do_compare, Label::kNear);
+ // Make sure eax is non-zero. At this point input operands are
+ // guaranteed to be non-zero.
+ ASSERT(right.is(eax));
+ __ ret(0);
+ __ bind(&do_compare);
+ }
+
+ // Check that both strings are sequential ASCII.
+ Label runtime;
+ __ JumpIfNotBothSequentialAsciiStrings(left, right, tmp1, tmp2, &runtime);
+
+ // Compare flat ASCII strings. Returns when done.
+ if (equality) {
+ StringCompareStub::GenerateFlatAsciiStringEquals(
+ masm, left, right, tmp1, tmp2);
+ } else {
+ StringCompareStub::GenerateCompareFlatAsciiStrings(
+ masm, left, right, tmp1, tmp2, tmp3);
+ }
+
+ // Handle more complex cases in runtime.
+ __ bind(&runtime);
+ __ pop(tmp1); // Return address.
+ __ push(left);
+ __ push(right);
+ __ push(tmp1);
+ if (equality) {
+ __ TailCallRuntime(Runtime::kStringEquals, 2, 1);
+ } else {
+ __ TailCallRuntime(Runtime::kHiddenStringCompare, 2, 1);
+ }
+
+ __ bind(&miss);
+ GenerateMiss(masm);
+}
+
+
+void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
+ ASSERT(state_ == CompareIC::OBJECT);
+ Label miss;
+ __ mov(ecx, edx);
+ __ and_(ecx, eax);
+ __ JumpIfSmi(ecx, &miss, Label::kNear);
+
+ __ CmpObjectType(eax, JS_OBJECT_TYPE, ecx);
+ __ j(not_equal, &miss, Label::kNear);
+ __ CmpObjectType(edx, JS_OBJECT_TYPE, ecx);
+ __ j(not_equal, &miss, Label::kNear);
+
+ ASSERT(GetCondition() == equal);
+ __ sub(eax, edx);
+ __ ret(0);
+
+ __ bind(&miss);
+ GenerateMiss(masm);
+}
+
+
+void ICCompareStub::GenerateKnownObjects(MacroAssembler* masm) {
+ Label miss;
+ __ mov(ecx, edx);
+ __ and_(ecx, eax);
+ __ JumpIfSmi(ecx, &miss, Label::kNear);
+
+ __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
+ __ mov(ebx, FieldOperand(edx, HeapObject::kMapOffset));
+ __ cmp(ecx, known_map_);
+ __ j(not_equal, &miss, Label::kNear);
+ __ cmp(ebx, known_map_);
+ __ j(not_equal, &miss, Label::kNear);
+
+ __ sub(eax, edx);
+ __ ret(0);
+
+ __ bind(&miss);
+ GenerateMiss(masm);
+}
+
+
+void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
+ {
+ // Call the runtime system in a fresh internal frame.
+ ExternalReference miss = ExternalReference(IC_Utility(IC::kCompareIC_Miss),
+ isolate());
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ push(edx); // Preserve edx and eax.
+ __ push(eax);
+ __ push(edx); // And also use them as the arguments.
+ __ push(eax);
+ __ push(Immediate(Smi::FromInt(op_)));
+ __ CallExternalReference(miss, 3);
+ // Compute the entry point of the rewritten stub.
+ __ lea(edi, FieldOperand(eax, Code::kHeaderSize));
+ __ pop(eax);
+ __ pop(edx);
+ }
+
+ // Do a tail call to the rewritten stub.
+ __ jmp(edi);
+}
+
+
+// Helper function used to check that the dictionary doesn't contain
+// the property. This function may return false negatives, so miss_label
+// must always call a backup property check that is complete.
+// This function is safe to call if the receiver has fast properties.
+// Name must be a unique name and receiver must be a heap object.
+void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
+ Label* miss,
+ Label* done,
+ Register properties,
+ Handle<Name> name,
+ Register r0) {
+ ASSERT(name->IsUniqueName());
+
+ // If names of slots in range from 1 to kProbes - 1 for the hash value are
+ // not equal to the name and kProbes-th slot is not used (its name is the
+ // undefined value), it guarantees the hash table doesn't contain the
+ // property. It's true even if some slots represent deleted properties
+ // (their names are the hole value).
+ for (int i = 0; i < kInlinedProbes; i++) {
+ // Compute the masked index: (hash + i + i * i) & mask.
+ Register index = r0;
+ // Capacity is smi 2^n.
+ __ mov(index, FieldOperand(properties, kCapacityOffset));
+ __ dec(index);
+ __ and_(index,
+ Immediate(Smi::FromInt(name->Hash() +
+ NameDictionary::GetProbeOffset(i))));
+
+ // Scale the index by multiplying by the entry size.
+ ASSERT(NameDictionary::kEntrySize == 3);
+ __ lea(index, Operand(index, index, times_2, 0)); // index *= 3.
+ Register entity_name = r0;
+ // Having undefined at this place means the name is not contained.
+ ASSERT_EQ(kSmiTagSize, 1);
+ __ mov(entity_name, Operand(properties, index, times_half_pointer_size,
+ kElementsStartOffset - kHeapObjectTag));
+ __ cmp(entity_name, masm->isolate()->factory()->undefined_value());
+ __ j(equal, done);
+
+ // Stop if found the property.
+ __ cmp(entity_name, Handle<Name>(name));
+ __ j(equal, miss);
+
+ Label good;
+ // Check for the hole and skip.
+ __ cmp(entity_name, masm->isolate()->factory()->the_hole_value());
+ __ j(equal, &good, Label::kNear);
+
+ // Check if the entry name is not a unique name.
+ __ mov(entity_name, FieldOperand(entity_name, HeapObject::kMapOffset));
+ __ JumpIfNotUniqueName(FieldOperand(entity_name, Map::kInstanceTypeOffset),
+ miss);
+ __ bind(&good);
+ }
+
+ NameDictionaryLookupStub stub(masm->isolate(), properties, r0, r0,
+ NEGATIVE_LOOKUP);
+ __ push(Immediate(Handle<Object>(name)));
+ __ push(Immediate(name->Hash()));
+ __ CallStub(&stub);
+ __ test(r0, r0);
+ __ j(not_zero, miss);
+ __ jmp(done);
+}
+
+
+// Probe the name dictionary in the |elements| register. Jump to the
+// |done| label if a property with the given name is found leaving the
+// index into the dictionary in |r0|. Jump to the |miss| label
+// otherwise.
+void NameDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
+ Label* miss,
+ Label* done,
+ Register elements,
+ Register name,
+ Register r0,
+ Register r1) {
+ ASSERT(!elements.is(r0));
+ ASSERT(!elements.is(r1));
+ ASSERT(!name.is(r0));
+ ASSERT(!name.is(r1));
+
+ __ AssertName(name);
+
+ __ mov(r1, FieldOperand(elements, kCapacityOffset));
+ __ shr(r1, kSmiTagSize); // convert smi to int
+ __ dec(r1);
+
+ // Generate an unrolled loop that performs a few probes before
+ // giving up. Measurements done on Gmail indicate that 2 probes
+ // cover ~93% of loads from dictionaries.
+ for (int i = 0; i < kInlinedProbes; i++) {
+ // Compute the masked index: (hash + i + i * i) & mask.
+ __ mov(r0, FieldOperand(name, Name::kHashFieldOffset));
+ __ shr(r0, Name::kHashShift);
+ if (i > 0) {
+ __ add(r0, Immediate(NameDictionary::GetProbeOffset(i)));
+ }
+ __ and_(r0, r1);
+
+ // Scale the index by multiplying by the entry size.
+ ASSERT(NameDictionary::kEntrySize == 3);
+ __ lea(r0, Operand(r0, r0, times_2, 0)); // r0 = r0 * 3
+
+ // Check if the key is identical to the name.
+ __ cmp(name, Operand(elements,
+ r0,
+ times_4,
+ kElementsStartOffset - kHeapObjectTag));
+ __ j(equal, done);
+ }
+
+ NameDictionaryLookupStub stub(masm->isolate(), elements, r1, r0,
+ POSITIVE_LOOKUP);
+ __ push(name);
+ __ mov(r0, FieldOperand(name, Name::kHashFieldOffset));
+ __ shr(r0, Name::kHashShift);
+ __ push(r0);
+ __ CallStub(&stub);
+
+ __ test(r1, r1);
+ __ j(zero, miss);
+ __ jmp(done);
+}
+
+
+void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
+ // This stub overrides SometimesSetsUpAFrame() to return false. That means
+ // we cannot call anything that could cause a GC from this stub.
+ // Stack frame on entry:
+ // esp[0 * kPointerSize]: return address.
+ // esp[1 * kPointerSize]: key's hash.
+ // esp[2 * kPointerSize]: key.
+ // Registers:
+ // dictionary_: NameDictionary to probe.
+ // result_: used as scratch.
+ // index_: will hold an index of entry if lookup is successful.
+ // might alias with result_.
+ // Returns:
+ // result_ is zero if lookup failed, non zero otherwise.
+
+ Label in_dictionary, maybe_in_dictionary, not_in_dictionary;
+
+ Register scratch = result_;
+
+ __ mov(scratch, FieldOperand(dictionary_, kCapacityOffset));
+ __ dec(scratch);
+ __ SmiUntag(scratch);
+ __ push(scratch);
+
+ // If names of slots in range from 1 to kProbes - 1 for the hash value are
+ // not equal to the name and kProbes-th slot is not used (its name is the
+ // undefined value), it guarantees the hash table doesn't contain the
+ // property. It's true even if some slots represent deleted properties
+ // (their names are the null value).
+ for (int i = kInlinedProbes; i < kTotalProbes; i++) {
+ // Compute the masked index: (hash + i + i * i) & mask.
+ __ mov(scratch, Operand(esp, 2 * kPointerSize));
+ if (i > 0) {
+ __ add(scratch, Immediate(NameDictionary::GetProbeOffset(i)));
+ }
+ __ and_(scratch, Operand(esp, 0));
+
+ // Scale the index by multiplying by the entry size.
+ ASSERT(NameDictionary::kEntrySize == 3);
+ __ lea(index_, Operand(scratch, scratch, times_2, 0)); // index *= 3.
+
+ // Having undefined at this place means the name is not contained.
+ ASSERT_EQ(kSmiTagSize, 1);
+ __ mov(scratch, Operand(dictionary_,
+ index_,
+ times_pointer_size,
+ kElementsStartOffset - kHeapObjectTag));
+ __ cmp(scratch, isolate()->factory()->undefined_value());
+ __ j(equal, ¬_in_dictionary);
+
+ // Stop if found the property.
+ __ cmp(scratch, Operand(esp, 3 * kPointerSize));
+ __ j(equal, &in_dictionary);
+
+ if (i != kTotalProbes - 1 && mode_ == NEGATIVE_LOOKUP) {
+ // If we hit a key that is not a unique name during negative
+ // lookup we have to bailout as this key might be equal to the
+ // key we are looking for.
+
+ // Check if the entry name is not a unique name.
+ __ mov(scratch, FieldOperand(scratch, HeapObject::kMapOffset));
+ __ JumpIfNotUniqueName(FieldOperand(scratch, Map::kInstanceTypeOffset),
+ &maybe_in_dictionary);
+ }
+ }
+
+ __ bind(&maybe_in_dictionary);
+ // If we are doing negative lookup then probing failure should be
+ // treated as a lookup success. For positive lookup probing failure
+ // should be treated as lookup failure.
+ if (mode_ == POSITIVE_LOOKUP) {
+ __ mov(result_, Immediate(0));
+ __ Drop(1);
+ __ ret(2 * kPointerSize);
+ }
+
+ __ bind(&in_dictionary);
+ __ mov(result_, Immediate(1));
+ __ Drop(1);
+ __ ret(2 * kPointerSize);
+
+ __ bind(¬_in_dictionary);
+ __ mov(result_, Immediate(0));
+ __ Drop(1);
+ __ ret(2 * kPointerSize);
+}
+
+
+void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(
+ Isolate* isolate) {
+ StoreBufferOverflowStub stub(isolate);
+ stub.GetCode();
+}
+
+
+// Takes the input in 3 registers: address_ value_ and object_. A pointer to
+// the value has just been written into the object, now this stub makes sure
+// we keep the GC informed. The word in the object where the value has been
+// written is in the address register.
+void RecordWriteStub::Generate(MacroAssembler* masm) {
+ Label skip_to_incremental_noncompacting;
+ Label skip_to_incremental_compacting;
+
+ // The first two instructions are generated with labels so as to get the
+ // offset fixed up correctly by the bind(Label*) call. We patch it back and
+ // forth between a compare instructions (a nop in this position) and the
+ // real branch when we start and stop incremental heap marking.
+ __ jmp(&skip_to_incremental_noncompacting, Label::kNear);
+ __ jmp(&skip_to_incremental_compacting, Label::kFar);
+
+ if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
+ __ RememberedSetHelper(object_,
+ address_,
+ value_,
+ MacroAssembler::kReturnAtEnd);
+ } else {
+ __ ret(0);
+ }
+
+ __ bind(&skip_to_incremental_noncompacting);
+ GenerateIncremental(masm, INCREMENTAL);
+
+ __ bind(&skip_to_incremental_compacting);
+ GenerateIncremental(masm, INCREMENTAL_COMPACTION);
+
+ // Initial mode of the stub is expected to be STORE_BUFFER_ONLY.
+ // Will be checked in IncrementalMarking::ActivateGeneratedStub.
+ masm->set_byte_at(0, kTwoByteNopInstruction);
+ masm->set_byte_at(2, kFiveByteNopInstruction);
+}
+
+
+void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
+ regs_.Save(masm);
+
+ if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
+ Label dont_need_remembered_set;
+
+ __ mov(regs_.scratch0(), Operand(regs_.address(), 0));
+ __ JumpIfNotInNewSpace(regs_.scratch0(), // Value.
+ regs_.scratch0(),
+ &dont_need_remembered_set);
+
+ __ CheckPageFlag(regs_.object(),
+ regs_.scratch0(),
+ 1 << MemoryChunk::SCAN_ON_SCAVENGE,
+ not_zero,
+ &dont_need_remembered_set);
+
+ // First notify the incremental marker if necessary, then update the
+ // remembered set.
+ CheckNeedsToInformIncrementalMarker(
+ masm,
+ kUpdateRememberedSetOnNoNeedToInformIncrementalMarker,
+ mode);
+ InformIncrementalMarker(masm);
+ regs_.Restore(masm);
+ __ RememberedSetHelper(object_,
+ address_,
+ value_,
+ MacroAssembler::kReturnAtEnd);
+
+ __ bind(&dont_need_remembered_set);
+ }
+
+ CheckNeedsToInformIncrementalMarker(
+ masm,
+ kReturnOnNoNeedToInformIncrementalMarker,
+ mode);
+ InformIncrementalMarker(masm);
+ regs_.Restore(masm);
+ __ ret(0);
+}
+
+
+void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) {
+ regs_.SaveCallerSaveRegisters(masm);
+ int argument_count = 3;
+ __ PrepareCallCFunction(argument_count, regs_.scratch0());
+ __ mov(Operand(esp, 0 * kPointerSize), regs_.object());
+ __ mov(Operand(esp, 1 * kPointerSize), regs_.address()); // Slot.
+ __ mov(Operand(esp, 2 * kPointerSize),
+ Immediate(ExternalReference::isolate_address(isolate())));
+
+ AllowExternalCallThatCantCauseGC scope(masm);
+ __ CallCFunction(
+ ExternalReference::incremental_marking_record_write_function(isolate()),
+ argument_count);
+
+ regs_.RestoreCallerSaveRegisters(masm);
+}
+
+
+void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
+ MacroAssembler* masm,
+ OnNoNeedToInformIncrementalMarker on_no_need,
+ Mode mode) {
+ Label object_is_black, need_incremental, need_incremental_pop_object;
+
+ __ mov(regs_.scratch0(), Immediate(~Page::kPageAlignmentMask));
+ __ and_(regs_.scratch0(), regs_.object());
+ __ mov(regs_.scratch1(),
+ Operand(regs_.scratch0(),
+ MemoryChunk::kWriteBarrierCounterOffset));
+ __ sub(regs_.scratch1(), Immediate(1));
+ __ mov(Operand(regs_.scratch0(),
+ MemoryChunk::kWriteBarrierCounterOffset),
+ regs_.scratch1());
+ __ j(negative, &need_incremental);
+
+ // Let's look at the color of the object: If it is not black we don't have
+ // to inform the incremental marker.
+ __ JumpIfBlack(regs_.object(),
+ regs_.scratch0(),
+ regs_.scratch1(),
+ &object_is_black,
+ Label::kNear);
+
+ regs_.Restore(masm);
+ if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
+ __ RememberedSetHelper(object_,
+ address_,
+ value_,
+ MacroAssembler::kReturnAtEnd);
+ } else {
+ __ ret(0);
+ }
+
+ __ bind(&object_is_black);
+
+ // Get the value from the slot.
+ __ mov(regs_.scratch0(), Operand(regs_.address(), 0));
+
+ if (mode == INCREMENTAL_COMPACTION) {
+ Label ensure_not_white;
+
+ __ CheckPageFlag(regs_.scratch0(), // Contains value.
+ regs_.scratch1(), // Scratch.
+ MemoryChunk::kEvacuationCandidateMask,
+ zero,
+ &ensure_not_white,
+ Label::kNear);
+
+ __ CheckPageFlag(regs_.object(),
+ regs_.scratch1(), // Scratch.
+ MemoryChunk::kSkipEvacuationSlotsRecordingMask,
+ not_zero,
+ &ensure_not_white,
+ Label::kNear);
+
+ __ jmp(&need_incremental);
+
+ __ bind(&ensure_not_white);
+ }
+
+ // We need an extra register for this, so we push the object register
+ // temporarily.
+ __ push(regs_.object());
+ __ EnsureNotWhite(regs_.scratch0(), // The value.
+ regs_.scratch1(), // Scratch.
+ regs_.object(), // Scratch.
+ &need_incremental_pop_object,
+ Label::kNear);
+ __ pop(regs_.object());
+
+ regs_.Restore(masm);
+ if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
+ __ RememberedSetHelper(object_,
+ address_,
+ value_,
+ MacroAssembler::kReturnAtEnd);
+ } else {
+ __ ret(0);
+ }
+
+ __ bind(&need_incremental_pop_object);
+ __ pop(regs_.object());
+
+ __ bind(&need_incremental);
+
+ // Fall through when we need to inform the incremental marker.
+}
+
+
+void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- eax : element value to store
+ // -- ecx : element index as smi
+ // -- esp[0] : return address
+ // -- esp[4] : array literal index in function
+ // -- esp[8] : array literal
+ // clobbers ebx, edx, edi
+ // -----------------------------------
+
+ Label element_done;
+ Label double_elements;
+ Label smi_element;
+ Label slow_elements;
+ Label slow_elements_from_double;
+ Label fast_elements;
+
+ // Get array literal index, array literal and its map.
+ __ mov(edx, Operand(esp, 1 * kPointerSize));
+ __ mov(ebx, Operand(esp, 2 * kPointerSize));
+ __ mov(edi, FieldOperand(ebx, JSObject::kMapOffset));
+
+ __ CheckFastElements(edi, &double_elements);
+
+ // Check for FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS elements
+ __ JumpIfSmi(eax, &smi_element);
+ __ CheckFastSmiElements(edi, &fast_elements, Label::kNear);
+
+ // Store into the array literal requires a elements transition. Call into
+ // the runtime.
+
+ __ bind(&slow_elements);
+ __ pop(edi); // Pop return address and remember to put back later for tail
+ // call.
+ __ push(ebx);
+ __ push(ecx);
+ __ push(eax);
+ __ mov(ebx, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
+ __ push(FieldOperand(ebx, JSFunction::kLiteralsOffset));
+ __ push(edx);
+ __ push(edi); // Return return address so that tail call returns to right
+ // place.
+ __ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1);
+
+ __ bind(&slow_elements_from_double);
+ __ pop(edx);
+ __ jmp(&slow_elements);
+
+ // Array literal has ElementsKind of FAST_*_ELEMENTS and value is an object.
+ __ bind(&fast_elements);
+ __ mov(ebx, FieldOperand(ebx, JSObject::kElementsOffset));
+ __ lea(ecx, FieldOperand(ebx, ecx, times_half_pointer_size,
+ FixedArrayBase::kHeaderSize));
+ __ mov(Operand(ecx, 0), eax);
+ // Update the write barrier for the array store.
+ __ RecordWrite(ebx, ecx, eax,
+ EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+ __ ret(0);
+
+ // Array literal has ElementsKind of FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS,
+ // and value is Smi.
+ __ bind(&smi_element);
+ __ mov(ebx, FieldOperand(ebx, JSObject::kElementsOffset));
+ __ mov(FieldOperand(ebx, ecx, times_half_pointer_size,
+ FixedArrayBase::kHeaderSize), eax);
+ __ ret(0);
+
+ // Array literal has ElementsKind of FAST_*_DOUBLE_ELEMENTS.
+ __ bind(&double_elements);
+
+ __ push(edx);
+ __ mov(edx, FieldOperand(ebx, JSObject::kElementsOffset));
+ __ StoreNumberToDoubleElements(eax,
+ edx,
+ ecx,
+ edi,
+ &slow_elements_from_double,
+ false);
+ __ pop(edx);
+ __ ret(0);
+}
+
+
+void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
+ CEntryStub ces(isolate(), 1);
+ __ call(ces.GetCode(), RelocInfo::CODE_TARGET);
+ int parameter_count_offset =
+ StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset;
+ __ mov(ebx, MemOperand(ebp, parameter_count_offset));
+ masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE);
+ __ pop(ecx);
+ int additional_offset = function_mode_ == JS_FUNCTION_STUB_MODE
+ ? kPointerSize
+ : 0;
+ __ lea(esp, MemOperand(esp, ebx, times_pointer_size, additional_offset));
+ __ jmp(ecx); // Return to IC Miss stub, continuation still on stack.
+}
+
+
+void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
+ if (masm->isolate()->function_entry_hook() != NULL) {
+ ProfileEntryHookStub stub(masm->isolate());
+ masm->CallStub(&stub);
+ }
+}
+
+
+void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
+ // Save volatile registers.
+ const int kNumSavedRegisters = 3;
+ __ push(eax);
+ __ push(ecx);
+ __ push(edx);
+
+ // Calculate and push the original stack pointer.
+ __ lea(eax, Operand(esp, (kNumSavedRegisters + 1) * kPointerSize));
+ __ push(eax);
+
+ // Retrieve our return address and use it to calculate the calling
+ // function's address.
+ __ mov(eax, Operand(esp, (kNumSavedRegisters + 1) * kPointerSize));
+ __ sub(eax, Immediate(Assembler::kCallInstructionLength));
+ __ push(eax);
+
+ // Call the entry hook.
+ ASSERT(isolate()->function_entry_hook() != NULL);
+ __ call(FUNCTION_ADDR(isolate()->function_entry_hook()),
+ RelocInfo::RUNTIME_ENTRY);
+ __ add(esp, Immediate(2 * kPointerSize));
+
+ // Restore ecx.
+ __ pop(edx);
+ __ pop(ecx);
+ __ pop(eax);
+
+ __ ret(0);
+}
+
+
+template<class T>
+static void CreateArrayDispatch(MacroAssembler* masm,
+ AllocationSiteOverrideMode mode) {
+ if (mode == DISABLE_ALLOCATION_SITES) {
+ T stub(masm->isolate(),
+ GetInitialFastElementsKind(),
+ mode);
+ __ TailCallStub(&stub);
+ } else if (mode == DONT_OVERRIDE) {
+ int last_index = GetSequenceIndexFromFastElementsKind(
+ TERMINAL_FAST_ELEMENTS_KIND);
+ for (int i = 0; i <= last_index; ++i) {
+ Label next;
+ ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
+ __ cmp(edx, kind);
+ __ j(not_equal, &next);
+ T stub(masm->isolate(), kind);
+ __ TailCallStub(&stub);
+ __ bind(&next);
+ }
+
+ // If we reached this point there is a problem.
+ __ Abort(kUnexpectedElementsKindInArrayConstructor);
+ } else {
+ UNREACHABLE();
+ }
+}
+
+
+static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
+ AllocationSiteOverrideMode mode) {
+ // ebx - allocation site (if mode != DISABLE_ALLOCATION_SITES)
+ // edx - kind (if mode != DISABLE_ALLOCATION_SITES)
+ // eax - number of arguments
+ // edi - constructor?
+ // esp[0] - return address
+ // esp[4] - last argument
+ Label normal_sequence;
+ if (mode == DONT_OVERRIDE) {
+ ASSERT(FAST_SMI_ELEMENTS == 0);
+ ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
+ ASSERT(FAST_ELEMENTS == 2);
+ ASSERT(FAST_HOLEY_ELEMENTS == 3);
+ ASSERT(FAST_DOUBLE_ELEMENTS == 4);
+ ASSERT(FAST_HOLEY_DOUBLE_ELEMENTS == 5);
+
+ // is the low bit set? If so, we are holey and that is good.
+ __ test_b(edx, 1);
+ __ j(not_zero, &normal_sequence);
+ }
+
+ // look at the first argument
+ __ mov(ecx, Operand(esp, kPointerSize));
+ __ test(ecx, ecx);
+ __ j(zero, &normal_sequence);
+
+ if (mode == DISABLE_ALLOCATION_SITES) {
+ ElementsKind initial = GetInitialFastElementsKind();
+ ElementsKind holey_initial = GetHoleyElementsKind(initial);
+
+ ArraySingleArgumentConstructorStub stub_holey(masm->isolate(),
+ holey_initial,
+ DISABLE_ALLOCATION_SITES);
+ __ TailCallStub(&stub_holey);
+
+ __ bind(&normal_sequence);
+ ArraySingleArgumentConstructorStub stub(masm->isolate(),
+ initial,
+ DISABLE_ALLOCATION_SITES);
+ __ TailCallStub(&stub);
+ } else if (mode == DONT_OVERRIDE) {
+ // We are going to create a holey array, but our kind is non-holey.
+ // Fix kind and retry.
+ __ inc(edx);
+
+ if (FLAG_debug_code) {
+ Handle<Map> allocation_site_map =
+ masm->isolate()->factory()->allocation_site_map();
+ __ cmp(FieldOperand(ebx, 0), Immediate(allocation_site_map));
+ __ Assert(equal, kExpectedAllocationSite);
+ }
+
+ // Save the resulting elements kind in type info. We can't just store r3
+ // in the AllocationSite::transition_info field because elements kind is
+ // restricted to a portion of the field...upper bits need to be left alone.
+ STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
+ __ add(FieldOperand(ebx, AllocationSite::kTransitionInfoOffset),
+ Immediate(Smi::FromInt(kFastElementsKindPackedToHoley)));
+
+ __ bind(&normal_sequence);
+ int last_index = GetSequenceIndexFromFastElementsKind(
+ TERMINAL_FAST_ELEMENTS_KIND);
+ for (int i = 0; i <= last_index; ++i) {
+ Label next;
+ ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
+ __ cmp(edx, kind);
+ __ j(not_equal, &next);
+ ArraySingleArgumentConstructorStub stub(masm->isolate(), kind);
+ __ TailCallStub(&stub);
+ __ bind(&next);
+ }
+
+ // If we reached this point there is a problem.
+ __ Abort(kUnexpectedElementsKindInArrayConstructor);
+ } else {
+ UNREACHABLE();
+ }
+}
+
+
+template<class T>
+static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) {
+ int to_index = GetSequenceIndexFromFastElementsKind(
+ TERMINAL_FAST_ELEMENTS_KIND);
+ for (int i = 0; i <= to_index; ++i) {
+ ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
+ T stub(isolate, kind);
+ stub.GetCode();
+ if (AllocationSite::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE) {
+ T stub1(isolate, kind, DISABLE_ALLOCATION_SITES);
+ stub1.GetCode();
+ }
+ }
+}
+
+
+void ArrayConstructorStubBase::GenerateStubsAheadOfTime(Isolate* isolate) {
+ ArrayConstructorStubAheadOfTimeHelper<ArrayNoArgumentConstructorStub>(
+ isolate);
+ ArrayConstructorStubAheadOfTimeHelper<ArraySingleArgumentConstructorStub>(
+ isolate);
+ ArrayConstructorStubAheadOfTimeHelper<ArrayNArgumentsConstructorStub>(
+ isolate);
+}
+
+
+void InternalArrayConstructorStubBase::GenerateStubsAheadOfTime(
+ Isolate* isolate) {
+ ElementsKind kinds[2] = { FAST_ELEMENTS, FAST_HOLEY_ELEMENTS };
+ for (int i = 0; i < 2; i++) {
+ // For internal arrays we only need a few things
+ InternalArrayNoArgumentConstructorStub stubh1(isolate, kinds[i]);
+ stubh1.GetCode();
+ InternalArraySingleArgumentConstructorStub stubh2(isolate, kinds[i]);
+ stubh2.GetCode();
+ InternalArrayNArgumentsConstructorStub stubh3(isolate, kinds[i]);
+ stubh3.GetCode();
+ }
+}
+
+
+void ArrayConstructorStub::GenerateDispatchToArrayStub(
+ MacroAssembler* masm,
+ AllocationSiteOverrideMode mode) {
+ if (argument_count_ == ANY) {
+ Label not_zero_case, not_one_case;
+ __ test(eax, eax);
+ __ j(not_zero, ¬_zero_case);
+ CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
+
+ __ bind(¬_zero_case);
+ __ cmp(eax, 1);
+ __ j(greater, ¬_one_case);
+ CreateArrayDispatchOneArgument(masm, mode);
+
+ __ bind(¬_one_case);
+ CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
+ } else if (argument_count_ == NONE) {
+ CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
+ } else if (argument_count_ == ONE) {
+ CreateArrayDispatchOneArgument(masm, mode);
+ } else if (argument_count_ == MORE_THAN_ONE) {
+ CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
+ } else {
+ UNREACHABLE();
+ }
+}
+
+
+void ArrayConstructorStub::Generate(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- eax : argc (only if argument_count_ == ANY)
+ // -- ebx : AllocationSite or undefined
+ // -- edi : constructor
+ // -- esp[0] : return address
+ // -- esp[4] : last argument
+ // -----------------------------------
+ if (FLAG_debug_code) {
+ // The array construct code is only set for the global and natives
+ // builtin Array functions which always have maps.
+
+ // Initial map for the builtin Array function should be a map.
+ __ mov(ecx, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset));
+ // Will both indicate a NULL and a Smi.
+ __ test(ecx, Immediate(kSmiTagMask));
+ __ Assert(not_zero, kUnexpectedInitialMapForArrayFunction);
+ __ CmpObjectType(ecx, MAP_TYPE, ecx);
+ __ Assert(equal, kUnexpectedInitialMapForArrayFunction);
+
+ // We should either have undefined in ebx or a valid AllocationSite
+ __ AssertUndefinedOrAllocationSite(ebx);
+ }
+
+ Label no_info;
+ // If the feedback vector is the undefined value call an array constructor
+ // that doesn't use AllocationSites.
+ __ cmp(ebx, isolate()->factory()->undefined_value());
+ __ j(equal, &no_info);
+
+ // Only look at the lower 16 bits of the transition info.
+ __ mov(edx, FieldOperand(ebx, AllocationSite::kTransitionInfoOffset));
+ __ SmiUntag(edx);
+ STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
+ __ and_(edx, Immediate(AllocationSite::ElementsKindBits::kMask));
+ GenerateDispatchToArrayStub(masm, DONT_OVERRIDE);
+
+ __ bind(&no_info);
+ GenerateDispatchToArrayStub(masm, DISABLE_ALLOCATION_SITES);
+}
+
+
+void InternalArrayConstructorStub::GenerateCase(
+ MacroAssembler* masm, ElementsKind kind) {
+ Label not_zero_case, not_one_case;
+ Label normal_sequence;
+
+ __ test(eax, eax);
+ __ j(not_zero, ¬_zero_case);
+ InternalArrayNoArgumentConstructorStub stub0(isolate(), kind);
+ __ TailCallStub(&stub0);
+
+ __ bind(¬_zero_case);
+ __ cmp(eax, 1);
+ __ j(greater, ¬_one_case);
+
+ if (IsFastPackedElementsKind(kind)) {
+ // We might need to create a holey array
+ // look at the first argument
+ __ mov(ecx, Operand(esp, kPointerSize));
+ __ test(ecx, ecx);
+ __ j(zero, &normal_sequence);
+
+ InternalArraySingleArgumentConstructorStub
+ stub1_holey(isolate(), GetHoleyElementsKind(kind));
+ __ TailCallStub(&stub1_holey);
+ }
+
+ __ bind(&normal_sequence);
+ InternalArraySingleArgumentConstructorStub stub1(isolate(), kind);
+ __ TailCallStub(&stub1);
+
+ __ bind(¬_one_case);
+ InternalArrayNArgumentsConstructorStub stubN(isolate(), kind);
+ __ TailCallStub(&stubN);
+}
+
+
+void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- eax : argc
+ // -- edi : constructor
+ // -- esp[0] : return address
+ // -- esp[4] : last argument
+ // -----------------------------------
+
+ if (FLAG_debug_code) {
+ // The array construct code is only set for the global and natives
+ // builtin Array functions which always have maps.
+
+ // Initial map for the builtin Array function should be a map.
+ __ mov(ecx, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset));
+ // Will both indicate a NULL and a Smi.
+ __ test(ecx, Immediate(kSmiTagMask));
+ __ Assert(not_zero, kUnexpectedInitialMapForArrayFunction);
+ __ CmpObjectType(ecx, MAP_TYPE, ecx);
+ __ Assert(equal, kUnexpectedInitialMapForArrayFunction);
+ }
+
+ // Figure out the right elements kind
+ __ mov(ecx, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset));
+
+ // Load the map's "bit field 2" into |result|. We only need the first byte,
+ // but the following masking takes care of that anyway.
+ __ mov(ecx, FieldOperand(ecx, Map::kBitField2Offset));
+ // Retrieve elements_kind from bit field 2.
+ __ and_(ecx, Map::kElementsKindMask);
+ __ shr(ecx, Map::kElementsKindShift);
+
+ if (FLAG_debug_code) {
+ Label done;
+ __ cmp(ecx, Immediate(FAST_ELEMENTS));
+ __ j(equal, &done);
+ __ cmp(ecx, Immediate(FAST_HOLEY_ELEMENTS));
+ __ Assert(equal,
+ kInvalidElementsKindForInternalArrayOrInternalPackedArray);
+ __ bind(&done);
+ }
+
+ Label fast_elements_case;
+ __ cmp(ecx, Immediate(FAST_ELEMENTS));
+ __ j(equal, &fast_elements_case);
+ GenerateCase(masm, FAST_HOLEY_ELEMENTS);
+
+ __ bind(&fast_elements_case);
+ GenerateCase(masm, FAST_ELEMENTS);
+}
+
+
+void CallApiFunctionStub::Generate(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- eax : callee
+ // -- ebx : call_data
+ // -- ecx : holder
+ // -- edx : api_function_address
+ // -- esi : context
+ // --
+ // -- esp[0] : return address
+ // -- esp[4] : last argument
+ // -- ...
+ // -- esp[argc * 4] : first argument
+ // -- esp[(argc + 1) * 4] : receiver
+ // -----------------------------------
+
+ Register callee = eax;
+ Register call_data = ebx;
+ Register holder = ecx;
+ Register api_function_address = edx;
+ Register return_address = edi;
+ Register context = esi;
+
+ int argc = ArgumentBits::decode(bit_field_);
+ bool is_store = IsStoreBits::decode(bit_field_);
+ bool call_data_undefined = CallDataUndefinedBits::decode(bit_field_);
+
+ typedef FunctionCallbackArguments FCA;
+
+ STATIC_ASSERT(FCA::kContextSaveIndex == 6);
+ STATIC_ASSERT(FCA::kCalleeIndex == 5);
+ STATIC_ASSERT(FCA::kDataIndex == 4);
+ STATIC_ASSERT(FCA::kReturnValueOffset == 3);
+ STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2);
+ STATIC_ASSERT(FCA::kIsolateIndex == 1);
+ STATIC_ASSERT(FCA::kHolderIndex == 0);
+ STATIC_ASSERT(FCA::kArgsLength == 7);
+
+ __ pop(return_address);
+
+ // context save
+ __ push(context);
+ // load context from callee
+ __ mov(context, FieldOperand(callee, JSFunction::kContextOffset));
+
+ // callee
+ __ push(callee);
+
+ // call data
+ __ push(call_data);
+
+ Register scratch = call_data;
+ if (!call_data_undefined) {
+ // return value
+ __ push(Immediate(isolate()->factory()->undefined_value()));
+ // return value default
+ __ push(Immediate(isolate()->factory()->undefined_value()));
+ } else {
+ // return value
+ __ push(scratch);
+ // return value default
+ __ push(scratch);
+ }
+ // isolate
+ __ push(Immediate(reinterpret_cast<int>(isolate())));
+ // holder
+ __ push(holder);
+
+ __ mov(scratch, esp);
+
+ // return address
+ __ push(return_address);
+
+ // API function gets reference to the v8::Arguments. If CPU profiler
+ // is enabled wrapper function will be called and we need to pass
+ // address of the callback as additional parameter, always allocate
+ // space for it.
+ const int kApiArgc = 1 + 1;
+
+ // Allocate the v8::Arguments structure in the arguments' space since
+ // it's not controlled by GC.
+ const int kApiStackSpace = 4;
+
+ __ PrepareCallApiFunction(kApiArgc + kApiStackSpace);
+
+ // FunctionCallbackInfo::implicit_args_.
+ __ mov(ApiParameterOperand(2), scratch);
+ __ add(scratch, Immediate((argc + FCA::kArgsLength - 1) * kPointerSize));
+ // FunctionCallbackInfo::values_.
+ __ mov(ApiParameterOperand(3), scratch);
+ // FunctionCallbackInfo::length_.
+ __ Move(ApiParameterOperand(4), Immediate(argc));
+ // FunctionCallbackInfo::is_construct_call_.
+ __ Move(ApiParameterOperand(5), Immediate(0));
+
+ // v8::InvocationCallback's argument.
+ __ lea(scratch, ApiParameterOperand(2));
+ __ mov(ApiParameterOperand(0), scratch);
+
+ ExternalReference thunk_ref =
+ ExternalReference::invoke_function_callback(isolate());
+
+ Operand context_restore_operand(ebp,
+ (2 + FCA::kContextSaveIndex) * kPointerSize);
+ // Stores return the first js argument
+ int return_value_offset = 0;
+ if (is_store) {
+ return_value_offset = 2 + FCA::kArgsLength;
+ } else {
+ return_value_offset = 2 + FCA::kReturnValueOffset;
+ }
+ Operand return_value_operand(ebp, return_value_offset * kPointerSize);
+ __ CallApiFunctionAndReturn(api_function_address,
+ thunk_ref,
+ ApiParameterOperand(1),
+ argc + FCA::kArgsLength + 1,
+ return_value_operand,
+ &context_restore_operand);
+}
+
+
+void CallApiGetterStub::Generate(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- esp[0] : return address
+ // -- esp[4] : name
+ // -- esp[8 - kArgsLength*4] : PropertyCallbackArguments object
+ // -- ...
+ // -- edx : api_function_address
+ // -----------------------------------
+
+ // array for v8::Arguments::values_, handler for name and pointer
+ // to the values (it considered as smi in GC).
+ const int kStackSpace = PropertyCallbackArguments::kArgsLength + 2;
+ // Allocate space for opional callback address parameter in case
+ // CPU profiler is active.
+ const int kApiArgc = 2 + 1;
+
+ Register api_function_address = edx;
+ Register scratch = ebx;
+
+ // load address of name
+ __ lea(scratch, Operand(esp, 1 * kPointerSize));
+
+ __ PrepareCallApiFunction(kApiArgc);
+ __ mov(ApiParameterOperand(0), scratch); // name.
+ __ add(scratch, Immediate(kPointerSize));
+ __ mov(ApiParameterOperand(1), scratch); // arguments pointer.
+
+ ExternalReference thunk_ref =
+ ExternalReference::invoke_accessor_getter_callback(isolate());
+
+ __ CallApiFunctionAndReturn(api_function_address,
+ thunk_ref,
+ ApiParameterOperand(2),
+ kStackSpace,
+ Operand(ebp, 7 * kPointerSize),
+ NULL);
+}
+
+
+#undef __
+
+} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_X87
--- /dev/null
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_X87_CODE_STUBS_X87_H_
+#define V8_X87_CODE_STUBS_X87_H_
+
+#include "macro-assembler.h"
+#include "ic-inl.h"
+
+namespace v8 {
+namespace internal {
+
+
+void ArrayNativeCode(MacroAssembler* masm,
+ bool construct_call,
+ Label* call_generic_code);
+
+
+class StoreBufferOverflowStub: public PlatformCodeStub {
+ public:
+ explicit StoreBufferOverflowStub(Isolate* isolate)
+ : PlatformCodeStub(isolate) { }
+
+ void Generate(MacroAssembler* masm);
+
+ static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate);
+ virtual bool SometimesSetsUpAFrame() { return false; }
+
+ private:
+ Major MajorKey() { return StoreBufferOverflow; }
+ int MinorKey() { return 0; }
+};
+
+
+class StringHelper : public AllStatic {
+ public:
+ // Generate code for copying characters using the rep movs instruction.
+ // Copies ecx characters from esi to edi. Copying of overlapping regions is
+ // not supported.
+ static void GenerateCopyCharactersREP(MacroAssembler* masm,
+ Register dest, // Must be edi.
+ Register src, // Must be esi.
+ Register count, // Must be ecx.
+ Register scratch, // Neither of above.
+ bool ascii);
+
+ // Generate string hash.
+ static void GenerateHashInit(MacroAssembler* masm,
+ Register hash,
+ Register character,
+ Register scratch);
+ static void GenerateHashAddCharacter(MacroAssembler* masm,
+ Register hash,
+ Register character,
+ Register scratch);
+ static void GenerateHashGetHash(MacroAssembler* masm,
+ Register hash,
+ Register scratch);
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper);
+};
+
+
+class SubStringStub: public PlatformCodeStub {
+ public:
+ explicit SubStringStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
+
+ private:
+ Major MajorKey() { return SubString; }
+ int MinorKey() { return 0; }
+
+ void Generate(MacroAssembler* masm);
+};
+
+
+class StringCompareStub: public PlatformCodeStub {
+ public:
+ explicit StringCompareStub(Isolate* isolate) : PlatformCodeStub(isolate) { }
+
+ // Compares two flat ASCII strings and returns result in eax.
+ static void GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
+ Register left,
+ Register right,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3);
+
+ // Compares two flat ASCII strings for equality and returns result
+ // in eax.
+ static void GenerateFlatAsciiStringEquals(MacroAssembler* masm,
+ Register left,
+ Register right,
+ Register scratch1,
+ Register scratch2);
+
+ private:
+ virtual Major MajorKey() { return StringCompare; }
+ virtual int MinorKey() { return 0; }
+ virtual void Generate(MacroAssembler* masm);
+
+ static void GenerateAsciiCharsCompareLoop(
+ MacroAssembler* masm,
+ Register left,
+ Register right,
+ Register length,
+ Register scratch,
+ Label* chars_not_equal,
+ Label::Distance chars_not_equal_near = Label::kFar);
+};
+
+
+class NameDictionaryLookupStub: public PlatformCodeStub {
+ public:
+ enum LookupMode { POSITIVE_LOOKUP, NEGATIVE_LOOKUP };
+
+ NameDictionaryLookupStub(Isolate* isolate,
+ Register dictionary,
+ Register result,
+ Register index,
+ LookupMode mode)
+ : PlatformCodeStub(isolate),
+ dictionary_(dictionary), result_(result), index_(index), mode_(mode) { }
+
+ void Generate(MacroAssembler* masm);
+
+ static void GenerateNegativeLookup(MacroAssembler* masm,
+ Label* miss,
+ Label* done,
+ Register properties,
+ Handle<Name> name,
+ Register r0);
+
+ static void GeneratePositiveLookup(MacroAssembler* masm,
+ Label* miss,
+ Label* done,
+ Register elements,
+ Register name,
+ Register r0,
+ Register r1);
+
+ virtual bool SometimesSetsUpAFrame() { return false; }
+
+ private:
+ static const int kInlinedProbes = 4;
+ static const int kTotalProbes = 20;
+
+ static const int kCapacityOffset =
+ NameDictionary::kHeaderSize +
+ NameDictionary::kCapacityIndex * kPointerSize;
+
+ static const int kElementsStartOffset =
+ NameDictionary::kHeaderSize +
+ NameDictionary::kElementsStartIndex * kPointerSize;
+
+ Major MajorKey() { return NameDictionaryLookup; }
+
+ int MinorKey() {
+ return DictionaryBits::encode(dictionary_.code()) |
+ ResultBits::encode(result_.code()) |
+ IndexBits::encode(index_.code()) |
+ LookupModeBits::encode(mode_);
+ }
+
+ class DictionaryBits: public BitField<int, 0, 3> {};
+ class ResultBits: public BitField<int, 3, 3> {};
+ class IndexBits: public BitField<int, 6, 3> {};
+ class LookupModeBits: public BitField<LookupMode, 9, 1> {};
+
+ Register dictionary_;
+ Register result_;
+ Register index_;
+ LookupMode mode_;
+};
+
+
+class RecordWriteStub: public PlatformCodeStub {
+ public:
+ RecordWriteStub(Isolate* isolate,
+ Register object,
+ Register value,
+ Register address,
+ RememberedSetAction remembered_set_action)
+ : PlatformCodeStub(isolate),
+ object_(object),
+ value_(value),
+ address_(address),
+ remembered_set_action_(remembered_set_action),
+ regs_(object, // An input reg.
+ address, // An input reg.
+ value) { // One scratch reg.
+ }
+
+ enum Mode {
+ STORE_BUFFER_ONLY,
+ INCREMENTAL,
+ INCREMENTAL_COMPACTION
+ };
+
+ virtual bool SometimesSetsUpAFrame() { return false; }
+
+ static const byte kTwoByteNopInstruction = 0x3c; // Cmpb al, #imm8.
+ static const byte kTwoByteJumpInstruction = 0xeb; // Jmp #imm8.
+
+ static const byte kFiveByteNopInstruction = 0x3d; // Cmpl eax, #imm32.
+ static const byte kFiveByteJumpInstruction = 0xe9; // Jmp #imm32.
+
+ static Mode GetMode(Code* stub) {
+ byte first_instruction = stub->instruction_start()[0];
+ byte second_instruction = stub->instruction_start()[2];
+
+ if (first_instruction == kTwoByteJumpInstruction) {
+ return INCREMENTAL;
+ }
+
+ ASSERT(first_instruction == kTwoByteNopInstruction);
+
+ if (second_instruction == kFiveByteJumpInstruction) {
+ return INCREMENTAL_COMPACTION;
+ }
+
+ ASSERT(second_instruction == kFiveByteNopInstruction);
+
+ return STORE_BUFFER_ONLY;
+ }
+
+ static void Patch(Code* stub, Mode mode) {
+ switch (mode) {
+ case STORE_BUFFER_ONLY:
+ ASSERT(GetMode(stub) == INCREMENTAL ||
+ GetMode(stub) == INCREMENTAL_COMPACTION);
+ stub->instruction_start()[0] = kTwoByteNopInstruction;
+ stub->instruction_start()[2] = kFiveByteNopInstruction;
+ break;
+ case INCREMENTAL:
+ ASSERT(GetMode(stub) == STORE_BUFFER_ONLY);
+ stub->instruction_start()[0] = kTwoByteJumpInstruction;
+ break;
+ case INCREMENTAL_COMPACTION:
+ ASSERT(GetMode(stub) == STORE_BUFFER_ONLY);
+ stub->instruction_start()[0] = kTwoByteNopInstruction;
+ stub->instruction_start()[2] = kFiveByteJumpInstruction;
+ break;
+ }
+ ASSERT(GetMode(stub) == mode);
+ CPU::FlushICache(stub->instruction_start(), 7);
+ }
+
+ private:
+ // This is a helper class for freeing up 3 scratch registers, where the third
+ // is always ecx (needed for shift operations). The input is two registers
+ // that must be preserved and one scratch register provided by the caller.
+ class RegisterAllocation {
+ public:
+ RegisterAllocation(Register object,
+ Register address,
+ Register scratch0)
+ : object_orig_(object),
+ address_orig_(address),
+ scratch0_orig_(scratch0),
+ object_(object),
+ address_(address),
+ scratch0_(scratch0) {
+ ASSERT(!AreAliased(scratch0, object, address, no_reg));
+ scratch1_ = GetRegThatIsNotEcxOr(object_, address_, scratch0_);
+ if (scratch0.is(ecx)) {
+ scratch0_ = GetRegThatIsNotEcxOr(object_, address_, scratch1_);
+ }
+ if (object.is(ecx)) {
+ object_ = GetRegThatIsNotEcxOr(address_, scratch0_, scratch1_);
+ }
+ if (address.is(ecx)) {
+ address_ = GetRegThatIsNotEcxOr(object_, scratch0_, scratch1_);
+ }
+ ASSERT(!AreAliased(scratch0_, object_, address_, ecx));
+ }
+
+ void Save(MacroAssembler* masm) {
+ ASSERT(!address_orig_.is(object_));
+ ASSERT(object_.is(object_orig_) || address_.is(address_orig_));
+ ASSERT(!AreAliased(object_, address_, scratch1_, scratch0_));
+ ASSERT(!AreAliased(object_orig_, address_, scratch1_, scratch0_));
+ ASSERT(!AreAliased(object_, address_orig_, scratch1_, scratch0_));
+ // We don't have to save scratch0_orig_ because it was given to us as
+ // a scratch register. But if we had to switch to a different reg then
+ // we should save the new scratch0_.
+ if (!scratch0_.is(scratch0_orig_)) masm->push(scratch0_);
+ if (!ecx.is(scratch0_orig_) &&
+ !ecx.is(object_orig_) &&
+ !ecx.is(address_orig_)) {
+ masm->push(ecx);
+ }
+ masm->push(scratch1_);
+ if (!address_.is(address_orig_)) {
+ masm->push(address_);
+ masm->mov(address_, address_orig_);
+ }
+ if (!object_.is(object_orig_)) {
+ masm->push(object_);
+ masm->mov(object_, object_orig_);
+ }
+ }
+
+ void Restore(MacroAssembler* masm) {
+ // These will have been preserved the entire time, so we just need to move
+ // them back. Only in one case is the orig_ reg different from the plain
+ // one, since only one of them can alias with ecx.
+ if (!object_.is(object_orig_)) {
+ masm->mov(object_orig_, object_);
+ masm->pop(object_);
+ }
+ if (!address_.is(address_orig_)) {
+ masm->mov(address_orig_, address_);
+ masm->pop(address_);
+ }
+ masm->pop(scratch1_);
+ if (!ecx.is(scratch0_orig_) &&
+ !ecx.is(object_orig_) &&
+ !ecx.is(address_orig_)) {
+ masm->pop(ecx);
+ }
+ if (!scratch0_.is(scratch0_orig_)) masm->pop(scratch0_);
+ }
+
+ // If we have to call into C then we need to save and restore all caller-
+ // saved registers that were not already preserved. The caller saved
+ // registers are eax, ecx and edx. The three scratch registers (incl. ecx)
+ // will be restored by other means so we don't bother pushing them here.
+ void SaveCallerSaveRegisters(MacroAssembler* masm) {
+ if (!scratch0_.is(eax) && !scratch1_.is(eax)) masm->push(eax);
+ if (!scratch0_.is(edx) && !scratch1_.is(edx)) masm->push(edx);
+ }
+
+ inline void RestoreCallerSaveRegisters(MacroAssembler*masm) {
+ if (!scratch0_.is(edx) && !scratch1_.is(edx)) masm->pop(edx);
+ if (!scratch0_.is(eax) && !scratch1_.is(eax)) masm->pop(eax);
+ }
+
+ inline Register object() { return object_; }
+ inline Register address() { return address_; }
+ inline Register scratch0() { return scratch0_; }
+ inline Register scratch1() { return scratch1_; }
+
+ private:
+ Register object_orig_;
+ Register address_orig_;
+ Register scratch0_orig_;
+ Register object_;
+ Register address_;
+ Register scratch0_;
+ Register scratch1_;
+ // Third scratch register is always ecx.
+
+ Register GetRegThatIsNotEcxOr(Register r1,
+ Register r2,
+ Register r3) {
+ for (int i = 0; i < Register::NumAllocatableRegisters(); i++) {
+ Register candidate = Register::FromAllocationIndex(i);
+ if (candidate.is(ecx)) continue;
+ if (candidate.is(r1)) continue;
+ if (candidate.is(r2)) continue;
+ if (candidate.is(r3)) continue;
+ return candidate;
+ }
+ UNREACHABLE();
+ return no_reg;
+ }
+ friend class RecordWriteStub;
+ };
+
+ enum OnNoNeedToInformIncrementalMarker {
+ kReturnOnNoNeedToInformIncrementalMarker,
+ kUpdateRememberedSetOnNoNeedToInformIncrementalMarker
+ }
+;
+ void Generate(MacroAssembler* masm);
+ void GenerateIncremental(MacroAssembler* masm, Mode mode);
+ void CheckNeedsToInformIncrementalMarker(
+ MacroAssembler* masm,
+ OnNoNeedToInformIncrementalMarker on_no_need,
+ Mode mode);
+ void InformIncrementalMarker(MacroAssembler* masm);
+
+ Major MajorKey() { return RecordWrite; }
+
+ int MinorKey() {
+ return ObjectBits::encode(object_.code()) |
+ ValueBits::encode(value_.code()) |
+ AddressBits::encode(address_.code()) |
+ RememberedSetActionBits::encode(remembered_set_action_);
+ }
+
+ void Activate(Code* code) {
+ code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code);
+ }
+
+ class ObjectBits: public BitField<int, 0, 3> {};
+ class ValueBits: public BitField<int, 3, 3> {};
+ class AddressBits: public BitField<int, 6, 3> {};
+ class RememberedSetActionBits: public BitField<RememberedSetAction, 9, 1> {};
+
+ Register object_;
+ Register value_;
+ Register address_;
+ RememberedSetAction remembered_set_action_;
+ RegisterAllocation regs_;
+};
+
+
+} } // namespace v8::internal
+
+#endif // V8_X87_CODE_STUBS_X87_H_
--- /dev/null
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "v8.h"
+
+#if V8_TARGET_ARCH_X87
+
+#include "codegen.h"
+#include "heap.h"
+#include "macro-assembler.h"
+
+namespace v8 {
+namespace internal {
+
+
+// -------------------------------------------------------------------------
+// Platform-specific RuntimeCallHelper functions.
+
+void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
+ masm->EnterFrame(StackFrame::INTERNAL);
+ ASSERT(!masm->has_frame());
+ masm->set_has_frame(true);
+}
+
+
+void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
+ masm->LeaveFrame(StackFrame::INTERNAL);
+ ASSERT(masm->has_frame());
+ masm->set_has_frame(false);
+}
+
+
+#define __ masm.
+
+
+UnaryMathFunction CreateExpFunction() {
+ // No SSE2 support
+ return &std::exp;
+}
+
+
+UnaryMathFunction CreateSqrtFunction() {
+ // No SSE2 support
+ return &std::sqrt;
+}
+
+
+// Helper functions for CreateMemMoveFunction.
+#undef __
+#define __ ACCESS_MASM(masm)
+
+enum Direction { FORWARD, BACKWARD };
+enum Alignment { MOVE_ALIGNED, MOVE_UNALIGNED };
+
+
+void MemMoveEmitPopAndReturn(MacroAssembler* masm) {
+ __ pop(esi);
+ __ pop(edi);
+ __ ret(0);
+}
+
+
+#undef __
+#define __ masm.
+
+
+class LabelConverter {
+ public:
+ explicit LabelConverter(byte* buffer) : buffer_(buffer) {}
+ int32_t address(Label* l) const {
+ return reinterpret_cast<int32_t>(buffer_) + l->pos();
+ }
+ private:
+ byte* buffer_;
+};
+
+
+OS::MemMoveFunction CreateMemMoveFunction() {
+ size_t actual_size;
+ // Allocate buffer in executable space.
+ byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true));
+ if (buffer == NULL) return NULL;
+ MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
+ LabelConverter conv(buffer);
+
+ // Generated code is put into a fixed, unmovable buffer, and not into
+ // the V8 heap. We can't, and don't, refer to any relocatable addresses
+ // (e.g. the JavaScript nan-object).
+
+ // 32-bit C declaration function calls pass arguments on stack.
+
+ // Stack layout:
+ // esp[12]: Third argument, size.
+ // esp[8]: Second argument, source pointer.
+ // esp[4]: First argument, destination pointer.
+ // esp[0]: return address
+
+ const int kDestinationOffset = 1 * kPointerSize;
+ const int kSourceOffset = 2 * kPointerSize;
+ const int kSizeOffset = 3 * kPointerSize;
+
+ int stack_offset = 0; // Update if we change the stack height.
+
+ Label backward, backward_much_overlap;
+ Label forward_much_overlap, small_size, medium_size, pop_and_return;
+ __ push(edi);
+ __ push(esi);
+ stack_offset += 2 * kPointerSize;
+ Register dst = edi;
+ Register src = esi;
+ Register count = ecx;
+ __ mov(dst, Operand(esp, stack_offset + kDestinationOffset));
+ __ mov(src, Operand(esp, stack_offset + kSourceOffset));
+ __ mov(count, Operand(esp, stack_offset + kSizeOffset));
+
+ __ cmp(dst, src);
+ __ j(equal, &pop_and_return);
+
+ // No SSE2.
+ Label forward;
+ __ cmp(count, 0);
+ __ j(equal, &pop_and_return);
+ __ cmp(dst, src);
+ __ j(above, &backward);
+ __ jmp(&forward);
+ {
+ // Simple forward copier.
+ Label forward_loop_1byte, forward_loop_4byte;
+ __ bind(&forward_loop_4byte);
+ __ mov(eax, Operand(src, 0));
+ __ sub(count, Immediate(4));
+ __ add(src, Immediate(4));
+ __ mov(Operand(dst, 0), eax);
+ __ add(dst, Immediate(4));
+ __ bind(&forward); // Entry point.
+ __ cmp(count, 3);
+ __ j(above, &forward_loop_4byte);
+ __ bind(&forward_loop_1byte);
+ __ cmp(count, 0);
+ __ j(below_equal, &pop_and_return);
+ __ mov_b(eax, Operand(src, 0));
+ __ dec(count);
+ __ inc(src);
+ __ mov_b(Operand(dst, 0), eax);
+ __ inc(dst);
+ __ jmp(&forward_loop_1byte);
+ }
+ {
+ // Simple backward copier.
+ Label backward_loop_1byte, backward_loop_4byte, entry_shortcut;
+ __ bind(&backward);
+ __ add(src, count);
+ __ add(dst, count);
+ __ cmp(count, 3);
+ __ j(below_equal, &entry_shortcut);
+
+ __ bind(&backward_loop_4byte);
+ __ sub(src, Immediate(4));
+ __ sub(count, Immediate(4));
+ __ mov(eax, Operand(src, 0));
+ __ sub(dst, Immediate(4));
+ __ mov(Operand(dst, 0), eax);
+ __ cmp(count, 3);
+ __ j(above, &backward_loop_4byte);
+ __ bind(&backward_loop_1byte);
+ __ cmp(count, 0);
+ __ j(below_equal, &pop_and_return);
+ __ bind(&entry_shortcut);
+ __ dec(src);
+ __ dec(count);
+ __ mov_b(eax, Operand(src, 0));
+ __ dec(dst);
+ __ mov_b(Operand(dst, 0), eax);
+ __ jmp(&backward_loop_1byte);
+ }
+
+ __ bind(&pop_and_return);
+ MemMoveEmitPopAndReturn(&masm);
+
+ CodeDesc desc;
+ masm.GetCode(&desc);
+ ASSERT(!RelocInfo::RequiresRelocation(desc));
+ CPU::FlushICache(buffer, actual_size);
+ OS::ProtectCode(buffer, actual_size);
+ // TODO(jkummerow): It would be nice to register this code creation event
+ // with the PROFILE / GDBJIT system.
+ return FUNCTION_CAST<OS::MemMoveFunction>(buffer);
+}
+
+
+#undef __
+
+// -------------------------------------------------------------------------
+// Code generators
+
+#define __ ACCESS_MASM(masm)
+
+
+void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
+ MacroAssembler* masm, AllocationSiteMode mode,
+ Label* allocation_memento_found) {
+ // ----------- S t a t e -------------
+ // -- eax : value
+ // -- ebx : target map
+ // -- ecx : key
+ // -- edx : receiver
+ // -- esp[0] : return address
+ // -----------------------------------
+ if (mode == TRACK_ALLOCATION_SITE) {
+ ASSERT(allocation_memento_found != NULL);
+ __ JumpIfJSArrayHasAllocationMemento(edx, edi, allocation_memento_found);
+ }
+
+ // Set transitioned map.
+ __ mov(FieldOperand(edx, HeapObject::kMapOffset), ebx);
+ __ RecordWriteField(edx,
+ HeapObject::kMapOffset,
+ ebx,
+ edi,
+ EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+}
+
+
+void ElementsTransitionGenerator::GenerateSmiToDouble(
+ MacroAssembler* masm, AllocationSiteMode mode, Label* fail) {
+ // ----------- S t a t e -------------
+ // -- eax : value
+ // -- ebx : target map
+ // -- ecx : key
+ // -- edx : receiver
+ // -- esp[0] : return address
+ // -----------------------------------
+ Label loop, entry, convert_hole, gc_required, only_change_map;
+
+ if (mode == TRACK_ALLOCATION_SITE) {
+ __ JumpIfJSArrayHasAllocationMemento(edx, edi, fail);
+ }
+
+ // Check for empty arrays, which only require a map transition and no changes
+ // to the backing store.
+ __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
+ __ cmp(edi, Immediate(masm->isolate()->factory()->empty_fixed_array()));
+ __ j(equal, &only_change_map);
+
+ __ push(eax);
+ __ push(ebx);
+
+ __ mov(edi, FieldOperand(edi, FixedArray::kLengthOffset));
+
+ // Allocate new FixedDoubleArray.
+ // edx: receiver
+ // edi: length of source FixedArray (smi-tagged)
+ AllocationFlags flags =
+ static_cast<AllocationFlags>(TAG_OBJECT | DOUBLE_ALIGNMENT);
+ __ Allocate(FixedDoubleArray::kHeaderSize, times_8, edi,
+ REGISTER_VALUE_IS_SMI, eax, ebx, no_reg, &gc_required, flags);
+
+ // eax: destination FixedDoubleArray
+ // edi: number of elements
+ // edx: receiver
+ __ mov(FieldOperand(eax, HeapObject::kMapOffset),
+ Immediate(masm->isolate()->factory()->fixed_double_array_map()));
+ __ mov(FieldOperand(eax, FixedDoubleArray::kLengthOffset), edi);
+ __ mov(esi, FieldOperand(edx, JSObject::kElementsOffset));
+ // Replace receiver's backing store with newly created FixedDoubleArray.
+ __ mov(FieldOperand(edx, JSObject::kElementsOffset), eax);
+ __ mov(ebx, eax);
+ __ RecordWriteField(edx,
+ JSObject::kElementsOffset,
+ ebx,
+ edi,
+ EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+
+ __ mov(edi, FieldOperand(esi, FixedArray::kLengthOffset));
+
+ // Prepare for conversion loop.
+ ExternalReference canonical_the_hole_nan_reference =
+ ExternalReference::address_of_the_hole_nan();
+ __ jmp(&entry);
+
+ // Call into runtime if GC is required.
+ __ bind(&gc_required);
+ // Restore registers before jumping into runtime.
+ __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+ __ pop(ebx);
+ __ pop(eax);
+ __ jmp(fail);
+
+ // Convert and copy elements
+ // esi: source FixedArray
+ __ bind(&loop);
+ __ mov(ebx, FieldOperand(esi, edi, times_2, FixedArray::kHeaderSize));
+ // ebx: current element from source
+ // edi: index of current element
+ __ JumpIfNotSmi(ebx, &convert_hole);
+
+ // Normal smi, convert it to double and store.
+ __ SmiUntag(ebx);
+ __ push(ebx);
+ __ fild_s(Operand(esp, 0));
+ __ pop(ebx);
+ __ fstp_d(FieldOperand(eax, edi, times_4, FixedDoubleArray::kHeaderSize));
+ __ jmp(&entry);
+
+ // Found hole, store hole_nan_as_double instead.
+ __ bind(&convert_hole);
+
+ if (FLAG_debug_code) {
+ __ cmp(ebx, masm->isolate()->factory()->the_hole_value());
+ __ Assert(equal, kObjectFoundInSmiOnlyArray);
+ }
+
+ __ fld_d(Operand::StaticVariable(canonical_the_hole_nan_reference));
+ __ fstp_d(FieldOperand(eax, edi, times_4, FixedDoubleArray::kHeaderSize));
+
+ __ bind(&entry);
+ __ sub(edi, Immediate(Smi::FromInt(1)));
+ __ j(not_sign, &loop);
+
+ __ pop(ebx);
+ __ pop(eax);
+
+ // Restore esi.
+ __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+
+ __ bind(&only_change_map);
+ // eax: value
+ // ebx: target map
+ // Set transitioned map.
+ __ mov(FieldOperand(edx, HeapObject::kMapOffset), ebx);
+ __ RecordWriteField(edx,
+ HeapObject::kMapOffset,
+ ebx,
+ edi,
+ OMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+}
+
+
+void ElementsTransitionGenerator::GenerateDoubleToObject(
+ MacroAssembler* masm, AllocationSiteMode mode, Label* fail) {
+ // ----------- S t a t e -------------
+ // -- eax : value
+ // -- ebx : target map
+ // -- ecx : key
+ // -- edx : receiver
+ // -- esp[0] : return address
+ // -----------------------------------
+ Label loop, entry, convert_hole, gc_required, only_change_map, success;
+
+ if (mode == TRACK_ALLOCATION_SITE) {
+ __ JumpIfJSArrayHasAllocationMemento(edx, edi, fail);
+ }
+
+ // Check for empty arrays, which only require a map transition and no changes
+ // to the backing store.
+ __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
+ __ cmp(edi, Immediate(masm->isolate()->factory()->empty_fixed_array()));
+ __ j(equal, &only_change_map);
+
+ __ push(eax);
+ __ push(edx);
+ __ push(ebx);
+
+ __ mov(ebx, FieldOperand(edi, FixedDoubleArray::kLengthOffset));
+
+ // Allocate new FixedArray.
+ // ebx: length of source FixedDoubleArray (smi-tagged)
+ __ lea(edi, Operand(ebx, times_2, FixedArray::kHeaderSize));
+ __ Allocate(edi, eax, esi, no_reg, &gc_required, TAG_OBJECT);
+
+ // eax: destination FixedArray
+ // ebx: number of elements
+ __ mov(FieldOperand(eax, HeapObject::kMapOffset),
+ Immediate(masm->isolate()->factory()->fixed_array_map()));
+ __ mov(FieldOperand(eax, FixedArray::kLengthOffset), ebx);
+ __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
+
+ __ jmp(&entry);
+
+ // ebx: target map
+ // edx: receiver
+ // Set transitioned map.
+ __ bind(&only_change_map);
+ __ mov(FieldOperand(edx, HeapObject::kMapOffset), ebx);
+ __ RecordWriteField(edx,
+ HeapObject::kMapOffset,
+ ebx,
+ edi,
+ OMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+ __ jmp(&success);
+
+ // Call into runtime if GC is required.
+ __ bind(&gc_required);
+ __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+ __ pop(ebx);
+ __ pop(edx);
+ __ pop(eax);
+ __ jmp(fail);
+
+ // Box doubles into heap numbers.
+ // edi: source FixedDoubleArray
+ // eax: destination FixedArray
+ __ bind(&loop);
+ // ebx: index of current element (smi-tagged)
+ uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32);
+ __ cmp(FieldOperand(edi, ebx, times_4, offset), Immediate(kHoleNanUpper32));
+ __ j(equal, &convert_hole);
+
+ // Non-hole double, copy value into a heap number.
+ __ AllocateHeapNumber(edx, esi, no_reg, &gc_required);
+ // edx: new heap number
+ __ mov(esi, FieldOperand(edi, ebx, times_4, FixedDoubleArray::kHeaderSize));
+ __ mov(FieldOperand(edx, HeapNumber::kValueOffset), esi);
+ __ mov(esi, FieldOperand(edi, ebx, times_4, offset));
+ __ mov(FieldOperand(edx, HeapNumber::kValueOffset + kPointerSize), esi);
+ __ mov(FieldOperand(eax, ebx, times_2, FixedArray::kHeaderSize), edx);
+ __ mov(esi, ebx);
+ __ RecordWriteArray(eax,
+ edx,
+ esi,
+ EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+ __ jmp(&entry, Label::kNear);
+
+ // Replace the-hole NaN with the-hole pointer.
+ __ bind(&convert_hole);
+ __ mov(FieldOperand(eax, ebx, times_2, FixedArray::kHeaderSize),
+ masm->isolate()->factory()->the_hole_value());
+
+ __ bind(&entry);
+ __ sub(ebx, Immediate(Smi::FromInt(1)));
+ __ j(not_sign, &loop);
+
+ __ pop(ebx);
+ __ pop(edx);
+ // ebx: target map
+ // edx: receiver
+ // Set transitioned map.
+ __ mov(FieldOperand(edx, HeapObject::kMapOffset), ebx);
+ __ RecordWriteField(edx,
+ HeapObject::kMapOffset,
+ ebx,
+ edi,
+ OMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+ // Replace receiver's backing store with newly created and filled FixedArray.
+ __ mov(FieldOperand(edx, JSObject::kElementsOffset), eax);
+ __ RecordWriteField(edx,
+ JSObject::kElementsOffset,
+ eax,
+ edi,
+ EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+
+ // Restore registers.
+ __ pop(eax);
+ __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+
+ __ bind(&success);
+}
+
+
+void StringCharLoadGenerator::Generate(MacroAssembler* masm,
+ Factory* factory,
+ Register string,
+ Register index,
+ Register result,
+ Label* call_runtime) {
+ // Fetch the instance type of the receiver into result register.
+ __ mov(result, FieldOperand(string, HeapObject::kMapOffset));
+ __ movzx_b(result, FieldOperand(result, Map::kInstanceTypeOffset));
+
+ // We need special handling for indirect strings.
+ Label check_sequential;
+ __ test(result, Immediate(kIsIndirectStringMask));
+ __ j(zero, &check_sequential, Label::kNear);
+
+ // Dispatch on the indirect string shape: slice or cons.
+ Label cons_string;
+ __ test(result, Immediate(kSlicedNotConsMask));
+ __ j(zero, &cons_string, Label::kNear);
+
+ // Handle slices.
+ Label indirect_string_loaded;
+ __ mov(result, FieldOperand(string, SlicedString::kOffsetOffset));
+ __ SmiUntag(result);
+ __ add(index, result);
+ __ mov(string, FieldOperand(string, SlicedString::kParentOffset));
+ __ jmp(&indirect_string_loaded, Label::kNear);
+
+ // Handle cons strings.
+ // Check whether the right hand side is the empty string (i.e. if
+ // this is really a flat string in a cons string). If that is not
+ // the case we would rather go to the runtime system now to flatten
+ // the string.
+ __ bind(&cons_string);
+ __ cmp(FieldOperand(string, ConsString::kSecondOffset),
+ Immediate(factory->empty_string()));
+ __ j(not_equal, call_runtime);
+ __ mov(string, FieldOperand(string, ConsString::kFirstOffset));
+
+ __ bind(&indirect_string_loaded);
+ __ mov(result, FieldOperand(string, HeapObject::kMapOffset));
+ __ movzx_b(result, FieldOperand(result, Map::kInstanceTypeOffset));
+
+ // Distinguish sequential and external strings. Only these two string
+ // representations can reach here (slices and flat cons strings have been
+ // reduced to the underlying sequential or external string).
+ Label seq_string;
+ __ bind(&check_sequential);
+ STATIC_ASSERT(kSeqStringTag == 0);
+ __ test(result, Immediate(kStringRepresentationMask));
+ __ j(zero, &seq_string, Label::kNear);
+
+ // Handle external strings.
+ Label ascii_external, done;
+ if (FLAG_debug_code) {
+ // Assert that we do not have a cons or slice (indirect strings) here.
+ // Sequential strings have already been ruled out.
+ __ test(result, Immediate(kIsIndirectStringMask));
+ __ Assert(zero, kExternalStringExpectedButNotFound);
+ }
+ // Rule out short external strings.
+ STATIC_CHECK(kShortExternalStringTag != 0);
+ __ test_b(result, kShortExternalStringMask);
+ __ j(not_zero, call_runtime);
+ // Check encoding.
+ STATIC_ASSERT(kTwoByteStringTag == 0);
+ __ test_b(result, kStringEncodingMask);
+ __ mov(result, FieldOperand(string, ExternalString::kResourceDataOffset));
+ __ j(not_equal, &ascii_external, Label::kNear);
+ // Two-byte string.
+ __ movzx_w(result, Operand(result, index, times_2, 0));
+ __ jmp(&done, Label::kNear);
+ __ bind(&ascii_external);
+ // Ascii string.
+ __ movzx_b(result, Operand(result, index, times_1, 0));
+ __ jmp(&done, Label::kNear);
+
+ // Dispatch on the encoding: ASCII or two-byte.
+ Label ascii;
+ __ bind(&seq_string);
+ STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
+ STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
+ __ test(result, Immediate(kStringEncodingMask));
+ __ j(not_zero, &ascii, Label::kNear);
+
+ // Two-byte string.
+ // Load the two-byte character code into the result register.
+ __ movzx_w(result, FieldOperand(string,
+ index,
+ times_2,
+ SeqTwoByteString::kHeaderSize));
+ __ jmp(&done, Label::kNear);
+
+ // Ascii string.
+ // Load the byte into the result register.
+ __ bind(&ascii);
+ __ movzx_b(result, FieldOperand(string,
+ index,
+ times_1,
+ SeqOneByteString::kHeaderSize));
+ __ bind(&done);
+}
+
+
+#undef __
+
+
+CodeAgingHelper::CodeAgingHelper() {
+ ASSERT(young_sequence_.length() == kNoCodeAgeSequenceLength);
+ CodePatcher patcher(young_sequence_.start(), young_sequence_.length());
+ patcher.masm()->push(ebp);
+ patcher.masm()->mov(ebp, esp);
+ patcher.masm()->push(esi);
+ patcher.masm()->push(edi);
+}
+
+
+#ifdef DEBUG
+bool CodeAgingHelper::IsOld(byte* candidate) const {
+ return *candidate == kCallOpcode;
+}
+#endif
+
+
+bool Code::IsYoungSequence(Isolate* isolate, byte* sequence) {
+ bool result = isolate->code_aging_helper()->IsYoung(sequence);
+ ASSERT(result || isolate->code_aging_helper()->IsOld(sequence));
+ return result;
+}
+
+
+void Code::GetCodeAgeAndParity(Isolate* isolate, byte* sequence, Age* age,
+ MarkingParity* parity) {
+ if (IsYoungSequence(isolate, sequence)) {
+ *age = kNoAgeCodeAge;
+ *parity = NO_MARKING_PARITY;
+ } else {
+ sequence++; // Skip the kCallOpcode byte
+ Address target_address = sequence + *reinterpret_cast<int*>(sequence) +
+ Assembler::kCallTargetAddressOffset;
+ Code* stub = GetCodeFromTargetAddress(target_address);
+ GetCodeAgeAndParity(stub, age, parity);
+ }
+}
+
+
+void Code::PatchPlatformCodeAge(Isolate* isolate,
+ byte* sequence,
+ Code::Age age,
+ MarkingParity parity) {
+ uint32_t young_length = isolate->code_aging_helper()->young_sequence_length();
+ if (age == kNoAgeCodeAge) {
+ isolate->code_aging_helper()->CopyYoungSequenceTo(sequence);
+ CPU::FlushICache(sequence, young_length);
+ } else {
+ Code* stub = GetCodeAgeStub(isolate, age, parity);
+ CodePatcher patcher(sequence, young_length);
+ patcher.masm()->call(stub->instruction_start(), RelocInfo::NONE32);
+ }
+}
+
+
+} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_X87
--- /dev/null
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_X87_CODEGEN_X87_H_
+#define V8_X87_CODEGEN_X87_H_
+
+#include "ast.h"
+#include "ic-inl.h"
+
+namespace v8 {
+namespace internal {
+
+
+class StringCharLoadGenerator : public AllStatic {
+ public:
+ // Generates the code for handling different string types and loading the
+ // indexed character into |result|. We expect |index| as untagged input and
+ // |result| as untagged output.
+ static void Generate(MacroAssembler* masm,
+ Factory* factory,
+ Register string,
+ Register index,
+ Register result,
+ Label* call_runtime);
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(StringCharLoadGenerator);
+};
+
+} } // namespace v8::internal
+
+#endif // V8_X87_CODEGEN_X87_H_
--- /dev/null
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// CPU specific code for ia32 independent of OS goes here.
+
+#ifdef __GNUC__
+#include "third_party/valgrind/valgrind.h"
+#endif
+
+#include "v8.h"
+
+#if V8_TARGET_ARCH_X87
+
+#include "cpu.h"
+#include "macro-assembler.h"
+
+namespace v8 {
+namespace internal {
+
+void CPU::FlushICache(void* start, size_t size) {
+ // No need to flush the instruction cache on Intel. On Intel instruction
+ // cache flushing is only necessary when multiple cores running the same
+ // code simultaneously. V8 (and JavaScript) is single threaded and when code
+ // is patched on an intel CPU the core performing the patching will have its
+ // own instruction cache updated automatically.
+
+ // If flushing of the instruction cache becomes necessary Windows has the
+ // API function FlushInstructionCache.
+
+ // By default, valgrind only checks the stack for writes that might need to
+ // invalidate already cached translated code. This leads to random
+ // instability when code patches or moves are sometimes unnoticed. One
+ // solution is to run valgrind with --smc-check=all, but this comes at a big
+ // performance cost. We can notify valgrind to invalidate its cache.
+#ifdef VALGRIND_DISCARD_TRANSLATIONS
+ unsigned res = VALGRIND_DISCARD_TRANSLATIONS(start, size);
+ USE(res);
+#endif
+}
+
+} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_X87
--- /dev/null
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "v8.h"
+
+#if V8_TARGET_ARCH_X87
+
+#include "codegen.h"
+#include "debug.h"
+
+
+namespace v8 {
+namespace internal {
+
+bool BreakLocationIterator::IsDebugBreakAtReturn() {
+ return Debug::IsDebugBreakAtReturn(rinfo());
+}
+
+
+// Patch the JS frame exit code with a debug break call. See
+// CodeGenerator::VisitReturnStatement and VirtualFrame::Exit in codegen-x87.cc
+// for the precise return instructions sequence.
+void BreakLocationIterator::SetDebugBreakAtReturn() {
+ ASSERT(Assembler::kJSReturnSequenceLength >=
+ Assembler::kCallInstructionLength);
+ rinfo()->PatchCodeWithCall(
+ debug_info_->GetIsolate()->builtins()->Return_DebugBreak()->entry(),
+ Assembler::kJSReturnSequenceLength - Assembler::kCallInstructionLength);
+}
+
+
+// Restore the JS frame exit code.
+void BreakLocationIterator::ClearDebugBreakAtReturn() {
+ rinfo()->PatchCode(original_rinfo()->pc(),
+ Assembler::kJSReturnSequenceLength);
+}
+
+
+// A debug break in the frame exit code is identified by the JS frame exit code
+// having been patched with a call instruction.
+bool Debug::IsDebugBreakAtReturn(RelocInfo* rinfo) {
+ ASSERT(RelocInfo::IsJSReturn(rinfo->rmode()));
+ return rinfo->IsPatchedReturnSequence();
+}
+
+
+bool BreakLocationIterator::IsDebugBreakAtSlot() {
+ ASSERT(IsDebugBreakSlot());
+ // Check whether the debug break slot instructions have been patched.
+ return rinfo()->IsPatchedDebugBreakSlotSequence();
+}
+
+
+void BreakLocationIterator::SetDebugBreakAtSlot() {
+ ASSERT(IsDebugBreakSlot());
+ Isolate* isolate = debug_info_->GetIsolate();
+ rinfo()->PatchCodeWithCall(
+ isolate->builtins()->Slot_DebugBreak()->entry(),
+ Assembler::kDebugBreakSlotLength - Assembler::kCallInstructionLength);
+}
+
+
+void BreakLocationIterator::ClearDebugBreakAtSlot() {
+ ASSERT(IsDebugBreakSlot());
+ rinfo()->PatchCode(original_rinfo()->pc(), Assembler::kDebugBreakSlotLength);
+}
+
+
+// All debug break stubs support padding for LiveEdit.
+const bool Debug::FramePaddingLayout::kIsSupported = true;
+
+
+#define __ ACCESS_MASM(masm)
+
+static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
+ RegList object_regs,
+ RegList non_object_regs,
+ bool convert_call_to_jmp) {
+ // Enter an internal frame.
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ // Load padding words on stack.
+ for (int i = 0; i < Debug::FramePaddingLayout::kInitialSize; i++) {
+ __ push(Immediate(Smi::FromInt(
+ Debug::FramePaddingLayout::kPaddingValue)));
+ }
+ __ push(Immediate(Smi::FromInt(Debug::FramePaddingLayout::kInitialSize)));
+
+ // Store the registers containing live values on the expression stack to
+ // make sure that these are correctly updated during GC. Non object values
+ // are stored as a smi causing it to be untouched by GC.
+ ASSERT((object_regs & ~kJSCallerSaved) == 0);
+ ASSERT((non_object_regs & ~kJSCallerSaved) == 0);
+ ASSERT((object_regs & non_object_regs) == 0);
+ for (int i = 0; i < kNumJSCallerSaved; i++) {
+ int r = JSCallerSavedCode(i);
+ Register reg = { r };
+ if ((object_regs & (1 << r)) != 0) {
+ __ push(reg);
+ }
+ if ((non_object_regs & (1 << r)) != 0) {
+ if (FLAG_debug_code) {
+ __ test(reg, Immediate(0xc0000000));
+ __ Assert(zero, kUnableToEncodeValueAsSmi);
+ }
+ __ SmiTag(reg);
+ __ push(reg);
+ }
+ }
+
+#ifdef DEBUG
+ __ RecordComment("// Calling from debug break to runtime - come in - over");
+#endif
+ __ Move(eax, Immediate(0)); // No arguments.
+ __ mov(ebx, Immediate(ExternalReference::debug_break(masm->isolate())));
+
+ CEntryStub ceb(masm->isolate(), 1);
+ __ CallStub(&ceb);
+
+ // Automatically find register that could be used after register restore.
+ // We need one register for padding skip instructions.
+ Register unused_reg = { -1 };
+
+ // Restore the register values containing object pointers from the
+ // expression stack.
+ for (int i = kNumJSCallerSaved; --i >= 0;) {
+ int r = JSCallerSavedCode(i);
+ Register reg = { r };
+ if (FLAG_debug_code) {
+ __ Move(reg, Immediate(kDebugZapValue));
+ }
+ bool taken = reg.code() == esi.code();
+ if ((object_regs & (1 << r)) != 0) {
+ __ pop(reg);
+ taken = true;
+ }
+ if ((non_object_regs & (1 << r)) != 0) {
+ __ pop(reg);
+ __ SmiUntag(reg);
+ taken = true;
+ }
+ if (!taken) {
+ unused_reg = reg;
+ }
+ }
+
+ ASSERT(unused_reg.code() != -1);
+
+ // Read current padding counter and skip corresponding number of words.
+ __ pop(unused_reg);
+ // We divide stored value by 2 (untagging) and multiply it by word's size.
+ STATIC_ASSERT(kSmiTagSize == 1 && kSmiShiftSize == 0);
+ __ lea(esp, Operand(esp, unused_reg, times_half_pointer_size, 0));
+
+ // Get rid of the internal frame.
+ }
+
+ // If this call did not replace a call but patched other code then there will
+ // be an unwanted return address left on the stack. Here we get rid of that.
+ if (convert_call_to_jmp) {
+ __ add(esp, Immediate(kPointerSize));
+ }
+
+ // Now that the break point has been handled, resume normal execution by
+ // jumping to the target address intended by the caller and that was
+ // overwritten by the address of DebugBreakXXX.
+ ExternalReference after_break_target =
+ ExternalReference::debug_after_break_target_address(masm->isolate());
+ __ jmp(Operand::StaticVariable(after_break_target));
+}
+
+
+void Debug::GenerateCallICStubDebugBreak(MacroAssembler* masm) {
+ // Register state for CallICStub
+ // ----------- S t a t e -------------
+ // -- edx : type feedback slot (smi)
+ // -- edi : function
+ // -----------------------------------
+ Generate_DebugBreakCallHelper(masm, edx.bit() | edi.bit(),
+ 0, false);
+}
+
+
+void Debug::GenerateLoadICDebugBreak(MacroAssembler* masm) {
+ // Register state for IC load call (from ic-x87.cc).
+ // ----------- S t a t e -------------
+ // -- ecx : name
+ // -- edx : receiver
+ // -----------------------------------
+ Generate_DebugBreakCallHelper(masm, ecx.bit() | edx.bit(), 0, false);
+}
+
+
+void Debug::GenerateStoreICDebugBreak(MacroAssembler* masm) {
+ // Register state for IC store call (from ic-x87.cc).
+ // ----------- S t a t e -------------
+ // -- eax : value
+ // -- ecx : name
+ // -- edx : receiver
+ // -----------------------------------
+ Generate_DebugBreakCallHelper(
+ masm, eax.bit() | ecx.bit() | edx.bit(), 0, false);
+}
+
+
+void Debug::GenerateKeyedLoadICDebugBreak(MacroAssembler* masm) {
+ // Register state for keyed IC load call (from ic-x87.cc).
+ // ----------- S t a t e -------------
+ // -- ecx : key
+ // -- edx : receiver
+ // -----------------------------------
+ Generate_DebugBreakCallHelper(masm, ecx.bit() | edx.bit(), 0, false);
+}
+
+
+void Debug::GenerateKeyedStoreICDebugBreak(MacroAssembler* masm) {
+ // Register state for keyed IC load call (from ic-x87.cc).
+ // ----------- S t a t e -------------
+ // -- eax : value
+ // -- ecx : key
+ // -- edx : receiver
+ // -----------------------------------
+ Generate_DebugBreakCallHelper(
+ masm, eax.bit() | ecx.bit() | edx.bit(), 0, false);
+}
+
+
+void Debug::GenerateCompareNilICDebugBreak(MacroAssembler* masm) {
+ // Register state for CompareNil IC
+ // ----------- S t a t e -------------
+ // -- eax : value
+ // -----------------------------------
+ Generate_DebugBreakCallHelper(masm, eax.bit(), 0, false);
+}
+
+
+void Debug::GenerateReturnDebugBreak(MacroAssembler* masm) {
+ // Register state just before return from JS function (from codegen-x87.cc).
+ // ----------- S t a t e -------------
+ // -- eax: return value
+ // -----------------------------------
+ Generate_DebugBreakCallHelper(masm, eax.bit(), 0, true);
+}
+
+
+void Debug::GenerateCallFunctionStubDebugBreak(MacroAssembler* masm) {
+ // Register state for CallFunctionStub (from code-stubs-x87.cc).
+ // ----------- S t a t e -------------
+ // -- edi: function
+ // -----------------------------------
+ Generate_DebugBreakCallHelper(masm, edi.bit(), 0, false);
+}
+
+
+void Debug::GenerateCallConstructStubDebugBreak(MacroAssembler* masm) {
+ // Register state for CallConstructStub (from code-stubs-x87.cc).
+ // eax is the actual number of arguments not encoded as a smi see comment
+ // above IC call.
+ // ----------- S t a t e -------------
+ // -- eax: number of arguments (not smi)
+ // -- edi: constructor function
+ // -----------------------------------
+ // The number of arguments in eax is not smi encoded.
+ Generate_DebugBreakCallHelper(masm, edi.bit(), eax.bit(), false);
+}
+
+
+void Debug::GenerateCallConstructStubRecordDebugBreak(MacroAssembler* masm) {
+ // Register state for CallConstructStub (from code-stubs-x87.cc).
+ // eax is the actual number of arguments not encoded as a smi see comment
+ // above IC call.
+ // ----------- S t a t e -------------
+ // -- eax: number of arguments (not smi)
+ // -- ebx: feedback array
+ // -- edx: feedback slot (smi)
+ // -- edi: constructor function
+ // -----------------------------------
+ // The number of arguments in eax is not smi encoded.
+ Generate_DebugBreakCallHelper(masm, ebx.bit() | edx.bit() | edi.bit(),
+ eax.bit(), false);
+}
+
+
+void Debug::GenerateSlot(MacroAssembler* masm) {
+ // Generate enough nop's to make space for a call instruction.
+ Label check_codesize;
+ __ bind(&check_codesize);
+ __ RecordDebugBreakSlot();
+ __ Nop(Assembler::kDebugBreakSlotLength);
+ ASSERT_EQ(Assembler::kDebugBreakSlotLength,
+ masm->SizeOfCodeGeneratedSince(&check_codesize));
+}
+
+
+void Debug::GenerateSlotDebugBreak(MacroAssembler* masm) {
+ // In the places where a debug break slot is inserted no registers can contain
+ // object pointers.
+ Generate_DebugBreakCallHelper(masm, 0, 0, true);
+}
+
+
+void Debug::GeneratePlainReturnLiveEdit(MacroAssembler* masm) {
+ masm->ret(0);
+}
+
+
+void Debug::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
+ ExternalReference restarter_frame_function_slot =
+ ExternalReference::debug_restarter_frame_function_pointer_address(
+ masm->isolate());
+ __ mov(Operand::StaticVariable(restarter_frame_function_slot), Immediate(0));
+
+ // We do not know our frame height, but set esp based on ebp.
+ __ lea(esp, Operand(ebp, -1 * kPointerSize));
+
+ __ pop(edi); // Function.
+ __ pop(ebp);
+
+ // Load context from the function.
+ __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
+
+ // Get function code.
+ __ mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+ __ mov(edx, FieldOperand(edx, SharedFunctionInfo::kCodeOffset));
+ __ lea(edx, FieldOperand(edx, Code::kHeaderSize));
+
+ // Re-run JSFunction, edi is function, esi is context.
+ __ jmp(edx);
+}
+
+const bool Debug::kFrameDropperSupported = true;
+
+#undef __
+
+} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_X87
--- /dev/null
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "v8.h"
+
+#if V8_TARGET_ARCH_X87
+
+#include "codegen.h"
+#include "deoptimizer.h"
+#include "full-codegen.h"
+#include "safepoint-table.h"
+
+namespace v8 {
+namespace internal {
+
+const int Deoptimizer::table_entry_size_ = 10;
+
+
+int Deoptimizer::patch_size() {
+ return Assembler::kCallInstructionLength;
+}
+
+
+void Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(Handle<Code> code) {
+ Isolate* isolate = code->GetIsolate();
+ HandleScope scope(isolate);
+
+ // Compute the size of relocation information needed for the code
+ // patching in Deoptimizer::DeoptimizeFunction.
+ int min_reloc_size = 0;
+ int prev_pc_offset = 0;
+ DeoptimizationInputData* deopt_data =
+ DeoptimizationInputData::cast(code->deoptimization_data());
+ for (int i = 0; i < deopt_data->DeoptCount(); i++) {
+ int pc_offset = deopt_data->Pc(i)->value();
+ if (pc_offset == -1) continue;
+ ASSERT_GE(pc_offset, prev_pc_offset);
+ int pc_delta = pc_offset - prev_pc_offset;
+ // We use RUNTIME_ENTRY reloc info which has a size of 2 bytes
+ // if encodable with small pc delta encoding and up to 6 bytes
+ // otherwise.
+ if (pc_delta <= RelocInfo::kMaxSmallPCDelta) {
+ min_reloc_size += 2;
+ } else {
+ min_reloc_size += 6;
+ }
+ prev_pc_offset = pc_offset;
+ }
+
+ // If the relocation information is not big enough we create a new
+ // relocation info object that is padded with comments to make it
+ // big enough for lazy doptimization.
+ int reloc_length = code->relocation_info()->length();
+ if (min_reloc_size > reloc_length) {
+ int comment_reloc_size = RelocInfo::kMinRelocCommentSize;
+ // Padding needed.
+ int min_padding = min_reloc_size - reloc_length;
+ // Number of comments needed to take up at least that much space.
+ int additional_comments =
+ (min_padding + comment_reloc_size - 1) / comment_reloc_size;
+ // Actual padding size.
+ int padding = additional_comments * comment_reloc_size;
+ // Allocate new relocation info and copy old relocation to the end
+ // of the new relocation info array because relocation info is
+ // written and read backwards.
+ Factory* factory = isolate->factory();
+ Handle<ByteArray> new_reloc =
+ factory->NewByteArray(reloc_length + padding, TENURED);
+ OS::MemCopy(new_reloc->GetDataStartAddress() + padding,
+ code->relocation_info()->GetDataStartAddress(),
+ reloc_length);
+ // Create a relocation writer to write the comments in the padding
+ // space. Use position 0 for everything to ensure short encoding.
+ RelocInfoWriter reloc_info_writer(
+ new_reloc->GetDataStartAddress() + padding, 0);
+ intptr_t comment_string
+ = reinterpret_cast<intptr_t>(RelocInfo::kFillerCommentString);
+ RelocInfo rinfo(0, RelocInfo::COMMENT, comment_string, NULL);
+ for (int i = 0; i < additional_comments; ++i) {
+#ifdef DEBUG
+ byte* pos_before = reloc_info_writer.pos();
+#endif
+ reloc_info_writer.Write(&rinfo);
+ ASSERT(RelocInfo::kMinRelocCommentSize ==
+ pos_before - reloc_info_writer.pos());
+ }
+ // Replace relocation information on the code object.
+ code->set_relocation_info(*new_reloc);
+ }
+}
+
+
+void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
+ Address code_start_address = code->instruction_start();
+
+ if (FLAG_zap_code_space) {
+ // Fail hard and early if we enter this code object again.
+ byte* pointer = code->FindCodeAgeSequence();
+ if (pointer != NULL) {
+ pointer += kNoCodeAgeSequenceLength;
+ } else {
+ pointer = code->instruction_start();
+ }
+ CodePatcher patcher(pointer, 1);
+ patcher.masm()->int3();
+
+ DeoptimizationInputData* data =
+ DeoptimizationInputData::cast(code->deoptimization_data());
+ int osr_offset = data->OsrPcOffset()->value();
+ if (osr_offset > 0) {
+ CodePatcher osr_patcher(code->instruction_start() + osr_offset, 1);
+ osr_patcher.masm()->int3();
+ }
+ }
+
+ // We will overwrite the code's relocation info in-place. Relocation info
+ // is written backward. The relocation info is the payload of a byte
+ // array. Later on we will slide this to the start of the byte array and
+ // create a filler object in the remaining space.
+ ByteArray* reloc_info = code->relocation_info();
+ Address reloc_end_address = reloc_info->address() + reloc_info->Size();
+ RelocInfoWriter reloc_info_writer(reloc_end_address, code_start_address);
+
+ // Since the call is a relative encoding, write new
+ // reloc info. We do not need any of the existing reloc info because the
+ // existing code will not be used again (we zap it in debug builds).
+ //
+ // Emit call to lazy deoptimization at all lazy deopt points.
+ DeoptimizationInputData* deopt_data =
+ DeoptimizationInputData::cast(code->deoptimization_data());
+ SharedFunctionInfo* shared =
+ SharedFunctionInfo::cast(deopt_data->SharedFunctionInfo());
+ shared->EvictFromOptimizedCodeMap(code, "deoptimized code");
+#ifdef DEBUG
+ Address prev_call_address = NULL;
+#endif
+ // For each LLazyBailout instruction insert a call to the corresponding
+ // deoptimization entry.
+ for (int i = 0; i < deopt_data->DeoptCount(); i++) {
+ if (deopt_data->Pc(i)->value() == -1) continue;
+ // Patch lazy deoptimization entry.
+ Address call_address = code_start_address + deopt_data->Pc(i)->value();
+ CodePatcher patcher(call_address, patch_size());
+ Address deopt_entry = GetDeoptimizationEntry(isolate, i, LAZY);
+ patcher.masm()->call(deopt_entry, RelocInfo::NONE32);
+ // We use RUNTIME_ENTRY for deoptimization bailouts.
+ RelocInfo rinfo(call_address + 1, // 1 after the call opcode.
+ RelocInfo::RUNTIME_ENTRY,
+ reinterpret_cast<intptr_t>(deopt_entry),
+ NULL);
+ reloc_info_writer.Write(&rinfo);
+ ASSERT_GE(reloc_info_writer.pos(),
+ reloc_info->address() + ByteArray::kHeaderSize);
+ ASSERT(prev_call_address == NULL ||
+ call_address >= prev_call_address + patch_size());
+ ASSERT(call_address + patch_size() <= code->instruction_end());
+#ifdef DEBUG
+ prev_call_address = call_address;
+#endif
+ }
+
+ // Move the relocation info to the beginning of the byte array.
+ int new_reloc_size = reloc_end_address - reloc_info_writer.pos();
+ OS::MemMove(
+ code->relocation_start(), reloc_info_writer.pos(), new_reloc_size);
+
+ // The relocation info is in place, update the size.
+ reloc_info->set_length(new_reloc_size);
+
+ // Handle the junk part after the new relocation info. We will create
+ // a non-live object in the extra space at the end of the former reloc info.
+ Address junk_address = reloc_info->address() + reloc_info->Size();
+ ASSERT(junk_address <= reloc_end_address);
+ isolate->heap()->CreateFillerObjectAt(junk_address,
+ reloc_end_address - junk_address);
+}
+
+
+void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
+ // Set the register values. The values are not important as there are no
+ // callee saved registers in JavaScript frames, so all registers are
+ // spilled. Registers ebp and esp are set to the correct values though.
+
+ for (int i = 0; i < Register::kNumRegisters; i++) {
+ input_->SetRegister(i, i * 4);
+ }
+ input_->SetRegister(esp.code(), reinterpret_cast<intptr_t>(frame->sp()));
+ input_->SetRegister(ebp.code(), reinterpret_cast<intptr_t>(frame->fp()));
+ for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); i++) {
+ input_->SetDoubleRegister(i, 0.0);
+ }
+
+ // Fill the frame content from the actual data on the frame.
+ for (unsigned i = 0; i < input_->GetFrameSize(); i += kPointerSize) {
+ input_->SetFrameSlot(i, Memory::uint32_at(tos + i));
+ }
+}
+
+
+void Deoptimizer::SetPlatformCompiledStubRegisters(
+ FrameDescription* output_frame, CodeStubInterfaceDescriptor* descriptor) {
+ intptr_t handler =
+ reinterpret_cast<intptr_t>(descriptor->deoptimization_handler_);
+ int params = descriptor->GetHandlerParameterCount();
+ output_frame->SetRegister(eax.code(), params);
+ output_frame->SetRegister(ebx.code(), handler);
+}
+
+
+void Deoptimizer::CopyDoubleRegisters(FrameDescription* output_frame) {
+ // Do nothing for X87.
+ return;
+}
+
+
+bool Deoptimizer::HasAlignmentPadding(JSFunction* function) {
+ int parameter_count = function->shared()->formal_parameter_count() + 1;
+ unsigned input_frame_size = input_->GetFrameSize();
+ unsigned alignment_state_offset =
+ input_frame_size - parameter_count * kPointerSize -
+ StandardFrameConstants::kFixedFrameSize -
+ kPointerSize;
+ ASSERT(JavaScriptFrameConstants::kDynamicAlignmentStateOffset ==
+ JavaScriptFrameConstants::kLocal0Offset);
+ int32_t alignment_state = input_->GetFrameSlot(alignment_state_offset);
+ return (alignment_state == kAlignmentPaddingPushed);
+}
+
+
+#define __ masm()->
+
+void Deoptimizer::EntryGenerator::Generate() {
+ GeneratePrologue();
+
+ // Save all general purpose registers before messing with them.
+ const int kNumberOfRegisters = Register::kNumRegisters;
+ __ pushad();
+
+ const int kSavedRegistersAreaSize = kNumberOfRegisters * kPointerSize;
+
+ // Get the bailout id from the stack.
+ __ mov(ebx, Operand(esp, kSavedRegistersAreaSize));
+
+ // Get the address of the location in the code object
+ // and compute the fp-to-sp delta in register edx.
+ __ mov(ecx, Operand(esp, kSavedRegistersAreaSize + 1 * kPointerSize));
+ __ lea(edx, Operand(esp, kSavedRegistersAreaSize + 2 * kPointerSize));
+
+ __ sub(edx, ebp);
+ __ neg(edx);
+
+ // Allocate a new deoptimizer object.
+ __ PrepareCallCFunction(6, eax);
+ __ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
+ __ mov(Operand(esp, 0 * kPointerSize), eax); // Function.
+ __ mov(Operand(esp, 1 * kPointerSize), Immediate(type())); // Bailout type.
+ __ mov(Operand(esp, 2 * kPointerSize), ebx); // Bailout id.
+ __ mov(Operand(esp, 3 * kPointerSize), ecx); // Code address or 0.
+ __ mov(Operand(esp, 4 * kPointerSize), edx); // Fp-to-sp delta.
+ __ mov(Operand(esp, 5 * kPointerSize),
+ Immediate(ExternalReference::isolate_address(isolate())));
+ {
+ AllowExternalCallThatCantCauseGC scope(masm());
+ __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate()), 6);
+ }
+
+ // Preserve deoptimizer object in register eax and get the input
+ // frame descriptor pointer.
+ __ mov(ebx, Operand(eax, Deoptimizer::input_offset()));
+
+ // Fill in the input registers.
+ for (int i = kNumberOfRegisters - 1; i >= 0; i--) {
+ int offset = (i * kPointerSize) + FrameDescription::registers_offset();
+ __ pop(Operand(ebx, offset));
+ }
+
+ // Clear FPU all exceptions.
+ // TODO(ulan): Find out why the TOP register is not zero here in some cases,
+ // and check that the generated code never deoptimizes with unbalanced stack.
+ __ fnclex();
+
+ // Remove the bailout id, return address and the double registers.
+ __ add(esp, Immediate(2 * kPointerSize));
+
+ // Compute a pointer to the unwinding limit in register ecx; that is
+ // the first stack slot not part of the input frame.
+ __ mov(ecx, Operand(ebx, FrameDescription::frame_size_offset()));
+ __ add(ecx, esp);
+
+ // Unwind the stack down to - but not including - the unwinding
+ // limit and copy the contents of the activation frame to the input
+ // frame description.
+ __ lea(edx, Operand(ebx, FrameDescription::frame_content_offset()));
+ Label pop_loop_header;
+ __ jmp(&pop_loop_header);
+ Label pop_loop;
+ __ bind(&pop_loop);
+ __ pop(Operand(edx, 0));
+ __ add(edx, Immediate(sizeof(uint32_t)));
+ __ bind(&pop_loop_header);
+ __ cmp(ecx, esp);
+ __ j(not_equal, &pop_loop);
+
+ // Compute the output frame in the deoptimizer.
+ __ push(eax);
+ __ PrepareCallCFunction(1, ebx);
+ __ mov(Operand(esp, 0 * kPointerSize), eax);
+ {
+ AllowExternalCallThatCantCauseGC scope(masm());
+ __ CallCFunction(
+ ExternalReference::compute_output_frames_function(isolate()), 1);
+ }
+ __ pop(eax);
+
+ // If frame was dynamically aligned, pop padding.
+ Label no_padding;
+ __ cmp(Operand(eax, Deoptimizer::has_alignment_padding_offset()),
+ Immediate(0));
+ __ j(equal, &no_padding);
+ __ pop(ecx);
+ if (FLAG_debug_code) {
+ __ cmp(ecx, Immediate(kAlignmentZapValue));
+ __ Assert(equal, kAlignmentMarkerExpected);
+ }
+ __ bind(&no_padding);
+
+ // Replace the current frame with the output frames.
+ Label outer_push_loop, inner_push_loop,
+ outer_loop_header, inner_loop_header;
+ // Outer loop state: eax = current FrameDescription**, edx = one past the
+ // last FrameDescription**.
+ __ mov(edx, Operand(eax, Deoptimizer::output_count_offset()));
+ __ mov(eax, Operand(eax, Deoptimizer::output_offset()));
+ __ lea(edx, Operand(eax, edx, times_4, 0));
+ __ jmp(&outer_loop_header);
+ __ bind(&outer_push_loop);
+ // Inner loop state: ebx = current FrameDescription*, ecx = loop index.
+ __ mov(ebx, Operand(eax, 0));
+ __ mov(ecx, Operand(ebx, FrameDescription::frame_size_offset()));
+ __ jmp(&inner_loop_header);
+ __ bind(&inner_push_loop);
+ __ sub(ecx, Immediate(sizeof(uint32_t)));
+ __ push(Operand(ebx, ecx, times_1, FrameDescription::frame_content_offset()));
+ __ bind(&inner_loop_header);
+ __ test(ecx, ecx);
+ __ j(not_zero, &inner_push_loop);
+ __ add(eax, Immediate(kPointerSize));
+ __ bind(&outer_loop_header);
+ __ cmp(eax, edx);
+ __ j(below, &outer_push_loop);
+
+ // Push state, pc, and continuation from the last output frame.
+ __ push(Operand(ebx, FrameDescription::state_offset()));
+ __ push(Operand(ebx, FrameDescription::pc_offset()));
+ __ push(Operand(ebx, FrameDescription::continuation_offset()));
+
+
+ // Push the registers from the last output frame.
+ for (int i = 0; i < kNumberOfRegisters; i++) {
+ int offset = (i * kPointerSize) + FrameDescription::registers_offset();
+ __ push(Operand(ebx, offset));
+ }
+
+ // Restore the registers from the stack.
+ __ popad();
+
+ // Return to the continuation point.
+ __ ret(0);
+}
+
+
+void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
+ // Create a sequence of deoptimization entries.
+ Label done;
+ for (int i = 0; i < count(); i++) {
+ int start = masm()->pc_offset();
+ USE(start);
+ __ push_imm32(i);
+ __ jmp(&done);
+ ASSERT(masm()->pc_offset() - start == table_entry_size_);
+ }
+ __ bind(&done);
+}
+
+
+void FrameDescription::SetCallerPc(unsigned offset, intptr_t value) {
+ SetFrameSlot(offset, value);
+}
+
+
+void FrameDescription::SetCallerFp(unsigned offset, intptr_t value) {
+ SetFrameSlot(offset, value);
+}
+
+
+void FrameDescription::SetCallerConstantPool(unsigned offset, intptr_t value) {
+ // No out-of-line constant pool support.
+ UNREACHABLE();
+}
+
+
+#undef __
+
+
+} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_X87
--- /dev/null
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <assert.h>
+#include <stdio.h>
+#include <stdarg.h>
+
+#include "v8.h"
+
+#if V8_TARGET_ARCH_X87
+
+#include "disasm.h"
+
+namespace disasm {
+
+enum OperandOrder {
+ UNSET_OP_ORDER = 0,
+ REG_OPER_OP_ORDER,
+ OPER_REG_OP_ORDER
+};
+
+
+//------------------------------------------------------------------
+// Tables
+//------------------------------------------------------------------
+struct ByteMnemonic {
+ int b; // -1 terminates, otherwise must be in range (0..255)
+ const char* mnem;
+ OperandOrder op_order_;
+};
+
+
+static const ByteMnemonic two_operands_instr[] = {
+ {0x01, "add", OPER_REG_OP_ORDER},
+ {0x03, "add", REG_OPER_OP_ORDER},
+ {0x09, "or", OPER_REG_OP_ORDER},
+ {0x0B, "or", REG_OPER_OP_ORDER},
+ {0x1B, "sbb", REG_OPER_OP_ORDER},
+ {0x21, "and", OPER_REG_OP_ORDER},
+ {0x23, "and", REG_OPER_OP_ORDER},
+ {0x29, "sub", OPER_REG_OP_ORDER},
+ {0x2A, "subb", REG_OPER_OP_ORDER},
+ {0x2B, "sub", REG_OPER_OP_ORDER},
+ {0x31, "xor", OPER_REG_OP_ORDER},
+ {0x33, "xor", REG_OPER_OP_ORDER},
+ {0x38, "cmpb", OPER_REG_OP_ORDER},
+ {0x3A, "cmpb", REG_OPER_OP_ORDER},
+ {0x3B, "cmp", REG_OPER_OP_ORDER},
+ {0x84, "test_b", REG_OPER_OP_ORDER},
+ {0x85, "test", REG_OPER_OP_ORDER},
+ {0x87, "xchg", REG_OPER_OP_ORDER},
+ {0x8A, "mov_b", REG_OPER_OP_ORDER},
+ {0x8B, "mov", REG_OPER_OP_ORDER},
+ {0x8D, "lea", REG_OPER_OP_ORDER},
+ {-1, "", UNSET_OP_ORDER}
+};
+
+
+static const ByteMnemonic zero_operands_instr[] = {
+ {0xC3, "ret", UNSET_OP_ORDER},
+ {0xC9, "leave", UNSET_OP_ORDER},
+ {0x90, "nop", UNSET_OP_ORDER},
+ {0xF4, "hlt", UNSET_OP_ORDER},
+ {0xCC, "int3", UNSET_OP_ORDER},
+ {0x60, "pushad", UNSET_OP_ORDER},
+ {0x61, "popad", UNSET_OP_ORDER},
+ {0x9C, "pushfd", UNSET_OP_ORDER},
+ {0x9D, "popfd", UNSET_OP_ORDER},
+ {0x9E, "sahf", UNSET_OP_ORDER},
+ {0x99, "cdq", UNSET_OP_ORDER},
+ {0x9B, "fwait", UNSET_OP_ORDER},
+ {0xFC, "cld", UNSET_OP_ORDER},
+ {0xAB, "stos", UNSET_OP_ORDER},
+ {-1, "", UNSET_OP_ORDER}
+};
+
+
+static const ByteMnemonic call_jump_instr[] = {
+ {0xE8, "call", UNSET_OP_ORDER},
+ {0xE9, "jmp", UNSET_OP_ORDER},
+ {-1, "", UNSET_OP_ORDER}
+};
+
+
+static const ByteMnemonic short_immediate_instr[] = {
+ {0x05, "add", UNSET_OP_ORDER},
+ {0x0D, "or", UNSET_OP_ORDER},
+ {0x15, "adc", UNSET_OP_ORDER},
+ {0x25, "and", UNSET_OP_ORDER},
+ {0x2D, "sub", UNSET_OP_ORDER},
+ {0x35, "xor", UNSET_OP_ORDER},
+ {0x3D, "cmp", UNSET_OP_ORDER},
+ {-1, "", UNSET_OP_ORDER}
+};
+
+
+// Generally we don't want to generate these because they are subject to partial
+// register stalls. They are included for completeness and because the cmp
+// variant is used by the RecordWrite stub. Because it does not update the
+// register it is not subject to partial register stalls.
+static ByteMnemonic byte_immediate_instr[] = {
+ {0x0c, "or", UNSET_OP_ORDER},
+ {0x24, "and", UNSET_OP_ORDER},
+ {0x34, "xor", UNSET_OP_ORDER},
+ {0x3c, "cmp", UNSET_OP_ORDER},
+ {-1, "", UNSET_OP_ORDER}
+};
+
+
+static const char* const jump_conditional_mnem[] = {
+ /*0*/ "jo", "jno", "jc", "jnc",
+ /*4*/ "jz", "jnz", "jna", "ja",
+ /*8*/ "js", "jns", "jpe", "jpo",
+ /*12*/ "jl", "jnl", "jng", "jg"
+};
+
+
+static const char* const set_conditional_mnem[] = {
+ /*0*/ "seto", "setno", "setc", "setnc",
+ /*4*/ "setz", "setnz", "setna", "seta",
+ /*8*/ "sets", "setns", "setpe", "setpo",
+ /*12*/ "setl", "setnl", "setng", "setg"
+};
+
+
+static const char* const conditional_move_mnem[] = {
+ /*0*/ "cmovo", "cmovno", "cmovc", "cmovnc",
+ /*4*/ "cmovz", "cmovnz", "cmovna", "cmova",
+ /*8*/ "cmovs", "cmovns", "cmovpe", "cmovpo",
+ /*12*/ "cmovl", "cmovnl", "cmovng", "cmovg"
+};
+
+
+enum InstructionType {
+ NO_INSTR,
+ ZERO_OPERANDS_INSTR,
+ TWO_OPERANDS_INSTR,
+ JUMP_CONDITIONAL_SHORT_INSTR,
+ REGISTER_INSTR,
+ MOVE_REG_INSTR,
+ CALL_JUMP_INSTR,
+ SHORT_IMMEDIATE_INSTR,
+ BYTE_IMMEDIATE_INSTR
+};
+
+
+struct InstructionDesc {
+ const char* mnem;
+ InstructionType type;
+ OperandOrder op_order_;
+};
+
+
+class InstructionTable {
+ public:
+ InstructionTable();
+ const InstructionDesc& Get(byte x) const { return instructions_[x]; }
+ static InstructionTable* get_instance() {
+ static InstructionTable table;
+ return &table;
+ }
+
+ private:
+ InstructionDesc instructions_[256];
+ void Clear();
+ void Init();
+ void CopyTable(const ByteMnemonic bm[], InstructionType type);
+ void SetTableRange(InstructionType type,
+ byte start,
+ byte end,
+ const char* mnem);
+ void AddJumpConditionalShort();
+};
+
+
+InstructionTable::InstructionTable() {
+ Clear();
+ Init();
+}
+
+
+void InstructionTable::Clear() {
+ for (int i = 0; i < 256; i++) {
+ instructions_[i].mnem = "";
+ instructions_[i].type = NO_INSTR;
+ instructions_[i].op_order_ = UNSET_OP_ORDER;
+ }
+}
+
+
+void InstructionTable::Init() {
+ CopyTable(two_operands_instr, TWO_OPERANDS_INSTR);
+ CopyTable(zero_operands_instr, ZERO_OPERANDS_INSTR);
+ CopyTable(call_jump_instr, CALL_JUMP_INSTR);
+ CopyTable(short_immediate_instr, SHORT_IMMEDIATE_INSTR);
+ CopyTable(byte_immediate_instr, BYTE_IMMEDIATE_INSTR);
+ AddJumpConditionalShort();
+ SetTableRange(REGISTER_INSTR, 0x40, 0x47, "inc");
+ SetTableRange(REGISTER_INSTR, 0x48, 0x4F, "dec");
+ SetTableRange(REGISTER_INSTR, 0x50, 0x57, "push");
+ SetTableRange(REGISTER_INSTR, 0x58, 0x5F, "pop");
+ SetTableRange(REGISTER_INSTR, 0x91, 0x97, "xchg eax,"); // 0x90 is nop.
+ SetTableRange(MOVE_REG_INSTR, 0xB8, 0xBF, "mov");
+}
+
+
+void InstructionTable::CopyTable(const ByteMnemonic bm[],
+ InstructionType type) {
+ for (int i = 0; bm[i].b >= 0; i++) {
+ InstructionDesc* id = &instructions_[bm[i].b];
+ id->mnem = bm[i].mnem;
+ id->op_order_ = bm[i].op_order_;
+ ASSERT_EQ(NO_INSTR, id->type); // Information not already entered.
+ id->type = type;
+ }
+}
+
+
+void InstructionTable::SetTableRange(InstructionType type,
+ byte start,
+ byte end,
+ const char* mnem) {
+ for (byte b = start; b <= end; b++) {
+ InstructionDesc* id = &instructions_[b];
+ ASSERT_EQ(NO_INSTR, id->type); // Information not already entered.
+ id->mnem = mnem;
+ id->type = type;
+ }
+}
+
+
+void InstructionTable::AddJumpConditionalShort() {
+ for (byte b = 0x70; b <= 0x7F; b++) {
+ InstructionDesc* id = &instructions_[b];
+ ASSERT_EQ(NO_INSTR, id->type); // Information not already entered.
+ id->mnem = jump_conditional_mnem[b & 0x0F];
+ id->type = JUMP_CONDITIONAL_SHORT_INSTR;
+ }
+}
+
+
+// The X87 disassembler implementation.
+class DisassemblerX87 {
+ public:
+ DisassemblerX87(const NameConverter& converter,
+ bool abort_on_unimplemented = true)
+ : converter_(converter),
+ instruction_table_(InstructionTable::get_instance()),
+ tmp_buffer_pos_(0),
+ abort_on_unimplemented_(abort_on_unimplemented) {
+ tmp_buffer_[0] = '\0';
+ }
+
+ virtual ~DisassemblerX87() {}
+
+ // Writes one disassembled instruction into 'buffer' (0-terminated).
+ // Returns the length of the disassembled machine instruction in bytes.
+ int InstructionDecode(v8::internal::Vector<char> buffer, byte* instruction);
+
+ private:
+ const NameConverter& converter_;
+ InstructionTable* instruction_table_;
+ v8::internal::EmbeddedVector<char, 128> tmp_buffer_;
+ unsigned int tmp_buffer_pos_;
+ bool abort_on_unimplemented_;
+
+ enum {
+ eax = 0,
+ ecx = 1,
+ edx = 2,
+ ebx = 3,
+ esp = 4,
+ ebp = 5,
+ esi = 6,
+ edi = 7
+ };
+
+
+ enum ShiftOpcodeExtension {
+ kROL = 0,
+ kROR = 1,
+ kRCL = 2,
+ kRCR = 3,
+ kSHL = 4,
+ KSHR = 5,
+ kSAR = 7
+ };
+
+
+ const char* NameOfCPURegister(int reg) const {
+ return converter_.NameOfCPURegister(reg);
+ }
+
+
+ const char* NameOfByteCPURegister(int reg) const {
+ return converter_.NameOfByteCPURegister(reg);
+ }
+
+
+ const char* NameOfXMMRegister(int reg) const {
+ return converter_.NameOfXMMRegister(reg);
+ }
+
+
+ const char* NameOfAddress(byte* addr) const {
+ return converter_.NameOfAddress(addr);
+ }
+
+
+ // Disassembler helper functions.
+ static void get_modrm(byte data, int* mod, int* regop, int* rm) {
+ *mod = (data >> 6) & 3;
+ *regop = (data & 0x38) >> 3;
+ *rm = data & 7;
+ }
+
+
+ static void get_sib(byte data, int* scale, int* index, int* base) {
+ *scale = (data >> 6) & 3;
+ *index = (data >> 3) & 7;
+ *base = data & 7;
+ }
+
+ typedef const char* (DisassemblerX87::*RegisterNameMapping)(int reg) const;
+
+ int PrintRightOperandHelper(byte* modrmp, RegisterNameMapping register_name);
+ int PrintRightOperand(byte* modrmp);
+ int PrintRightByteOperand(byte* modrmp);
+ int PrintRightXMMOperand(byte* modrmp);
+ int PrintOperands(const char* mnem, OperandOrder op_order, byte* data);
+ int PrintImmediateOp(byte* data);
+ int F7Instruction(byte* data);
+ int D1D3C1Instruction(byte* data);
+ int JumpShort(byte* data);
+ int JumpConditional(byte* data, const char* comment);
+ int JumpConditionalShort(byte* data, const char* comment);
+ int SetCC(byte* data);
+ int CMov(byte* data);
+ int FPUInstruction(byte* data);
+ int MemoryFPUInstruction(int escape_opcode, int regop, byte* modrm_start);
+ int RegisterFPUInstruction(int escape_opcode, byte modrm_byte);
+ void AppendToBuffer(const char* format, ...);
+
+
+ void UnimplementedInstruction() {
+ if (abort_on_unimplemented_) {
+ UNIMPLEMENTED();
+ } else {
+ AppendToBuffer("'Unimplemented Instruction'");
+ }
+ }
+};
+
+
+void DisassemblerX87::AppendToBuffer(const char* format, ...) {
+ v8::internal::Vector<char> buf = tmp_buffer_ + tmp_buffer_pos_;
+ va_list args;
+ va_start(args, format);
+ int result = v8::internal::OS::VSNPrintF(buf, format, args);
+ va_end(args);
+ tmp_buffer_pos_ += result;
+}
+
+int DisassemblerX87::PrintRightOperandHelper(
+ byte* modrmp,
+ RegisterNameMapping direct_register_name) {
+ int mod, regop, rm;
+ get_modrm(*modrmp, &mod, ®op, &rm);
+ RegisterNameMapping register_name = (mod == 3) ? direct_register_name :
+ &DisassemblerX87::NameOfCPURegister;
+ switch (mod) {
+ case 0:
+ if (rm == ebp) {
+ int32_t disp = *reinterpret_cast<int32_t*>(modrmp+1);
+ AppendToBuffer("[0x%x]", disp);
+ return 5;
+ } else if (rm == esp) {
+ byte sib = *(modrmp + 1);
+ int scale, index, base;
+ get_sib(sib, &scale, &index, &base);
+ if (index == esp && base == esp && scale == 0 /*times_1*/) {
+ AppendToBuffer("[%s]", (this->*register_name)(rm));
+ return 2;
+ } else if (base == ebp) {
+ int32_t disp = *reinterpret_cast<int32_t*>(modrmp + 2);
+ AppendToBuffer("[%s*%d%s0x%x]",
+ (this->*register_name)(index),
+ 1 << scale,
+ disp < 0 ? "-" : "+",
+ disp < 0 ? -disp : disp);
+ return 6;
+ } else if (index != esp && base != ebp) {
+ // [base+index*scale]
+ AppendToBuffer("[%s+%s*%d]",
+ (this->*register_name)(base),
+ (this->*register_name)(index),
+ 1 << scale);
+ return 2;
+ } else {
+ UnimplementedInstruction();
+ return 1;
+ }
+ } else {
+ AppendToBuffer("[%s]", (this->*register_name)(rm));
+ return 1;
+ }
+ break;
+ case 1: // fall through
+ case 2:
+ if (rm == esp) {
+ byte sib = *(modrmp + 1);
+ int scale, index, base;
+ get_sib(sib, &scale, &index, &base);
+ int disp = mod == 2 ? *reinterpret_cast<int32_t*>(modrmp + 2)
+ : *reinterpret_cast<int8_t*>(modrmp + 2);
+ if (index == base && index == rm /*esp*/ && scale == 0 /*times_1*/) {
+ AppendToBuffer("[%s%s0x%x]",
+ (this->*register_name)(rm),
+ disp < 0 ? "-" : "+",
+ disp < 0 ? -disp : disp);
+ } else {
+ AppendToBuffer("[%s+%s*%d%s0x%x]",
+ (this->*register_name)(base),
+ (this->*register_name)(index),
+ 1 << scale,
+ disp < 0 ? "-" : "+",
+ disp < 0 ? -disp : disp);
+ }
+ return mod == 2 ? 6 : 3;
+ } else {
+ // No sib.
+ int disp = mod == 2 ? *reinterpret_cast<int32_t*>(modrmp + 1)
+ : *reinterpret_cast<int8_t*>(modrmp + 1);
+ AppendToBuffer("[%s%s0x%x]",
+ (this->*register_name)(rm),
+ disp < 0 ? "-" : "+",
+ disp < 0 ? -disp : disp);
+ return mod == 2 ? 5 : 2;
+ }
+ break;
+ case 3:
+ AppendToBuffer("%s", (this->*register_name)(rm));
+ return 1;
+ default:
+ UnimplementedInstruction();
+ return 1;
+ }
+ UNREACHABLE();
+}
+
+
+int DisassemblerX87::PrintRightOperand(byte* modrmp) {
+ return PrintRightOperandHelper(modrmp, &DisassemblerX87::NameOfCPURegister);
+}
+
+
+int DisassemblerX87::PrintRightByteOperand(byte* modrmp) {
+ return PrintRightOperandHelper(modrmp,
+ &DisassemblerX87::NameOfByteCPURegister);
+}
+
+
+int DisassemblerX87::PrintRightXMMOperand(byte* modrmp) {
+ return PrintRightOperandHelper(modrmp,
+ &DisassemblerX87::NameOfXMMRegister);
+}
+
+
+// Returns number of bytes used including the current *data.
+// Writes instruction's mnemonic, left and right operands to 'tmp_buffer_'.
+int DisassemblerX87::PrintOperands(const char* mnem,
+ OperandOrder op_order,
+ byte* data) {
+ byte modrm = *data;
+ int mod, regop, rm;
+ get_modrm(modrm, &mod, ®op, &rm);
+ int advance = 0;
+ switch (op_order) {
+ case REG_OPER_OP_ORDER: {
+ AppendToBuffer("%s %s,", mnem, NameOfCPURegister(regop));
+ advance = PrintRightOperand(data);
+ break;
+ }
+ case OPER_REG_OP_ORDER: {
+ AppendToBuffer("%s ", mnem);
+ advance = PrintRightOperand(data);
+ AppendToBuffer(",%s", NameOfCPURegister(regop));
+ break;
+ }
+ default:
+ UNREACHABLE();
+ break;
+ }
+ return advance;
+}
+
+
+// Returns number of bytes used by machine instruction, including *data byte.
+// Writes immediate instructions to 'tmp_buffer_'.
+int DisassemblerX87::PrintImmediateOp(byte* data) {
+ bool sign_extension_bit = (*data & 0x02) != 0;
+ byte modrm = *(data+1);
+ int mod, regop, rm;
+ get_modrm(modrm, &mod, ®op, &rm);
+ const char* mnem = "Imm???";
+ switch (regop) {
+ case 0: mnem = "add"; break;
+ case 1: mnem = "or"; break;
+ case 2: mnem = "adc"; break;
+ case 4: mnem = "and"; break;
+ case 5: mnem = "sub"; break;
+ case 6: mnem = "xor"; break;
+ case 7: mnem = "cmp"; break;
+ default: UnimplementedInstruction();
+ }
+ AppendToBuffer("%s ", mnem);
+ int count = PrintRightOperand(data+1);
+ if (sign_extension_bit) {
+ AppendToBuffer(",0x%x", *(data + 1 + count));
+ return 1 + count + 1 /*int8*/;
+ } else {
+ AppendToBuffer(",0x%x", *reinterpret_cast<int32_t*>(data + 1 + count));
+ return 1 + count + 4 /*int32_t*/;
+ }
+}
+
+
+// Returns number of bytes used, including *data.
+int DisassemblerX87::F7Instruction(byte* data) {
+ ASSERT_EQ(0xF7, *data);
+ byte modrm = *(data+1);
+ int mod, regop, rm;
+ get_modrm(modrm, &mod, ®op, &rm);
+ if (mod == 3 && regop != 0) {
+ const char* mnem = NULL;
+ switch (regop) {
+ case 2: mnem = "not"; break;
+ case 3: mnem = "neg"; break;
+ case 4: mnem = "mul"; break;
+ case 5: mnem = "imul"; break;
+ case 7: mnem = "idiv"; break;
+ default: UnimplementedInstruction();
+ }
+ AppendToBuffer("%s %s", mnem, NameOfCPURegister(rm));
+ return 2;
+ } else if (mod == 3 && regop == eax) {
+ int32_t imm = *reinterpret_cast<int32_t*>(data+2);
+ AppendToBuffer("test %s,0x%x", NameOfCPURegister(rm), imm);
+ return 6;
+ } else if (regop == eax) {
+ AppendToBuffer("test ");
+ int count = PrintRightOperand(data+1);
+ int32_t imm = *reinterpret_cast<int32_t*>(data+1+count);
+ AppendToBuffer(",0x%x", imm);
+ return 1+count+4 /*int32_t*/;
+ } else {
+ UnimplementedInstruction();
+ return 2;
+ }
+}
+
+
+int DisassemblerX87::D1D3C1Instruction(byte* data) {
+ byte op = *data;
+ ASSERT(op == 0xD1 || op == 0xD3 || op == 0xC1);
+ byte modrm = *(data+1);
+ int mod, regop, rm;
+ get_modrm(modrm, &mod, ®op, &rm);
+ int imm8 = -1;
+ int num_bytes = 2;
+ if (mod == 3) {
+ const char* mnem = NULL;
+ switch (regop) {
+ case kROL: mnem = "rol"; break;
+ case kROR: mnem = "ror"; break;
+ case kRCL: mnem = "rcl"; break;
+ case kRCR: mnem = "rcr"; break;
+ case kSHL: mnem = "shl"; break;
+ case KSHR: mnem = "shr"; break;
+ case kSAR: mnem = "sar"; break;
+ default: UnimplementedInstruction();
+ }
+ if (op == 0xD1) {
+ imm8 = 1;
+ } else if (op == 0xC1) {
+ imm8 = *(data+2);
+ num_bytes = 3;
+ } else if (op == 0xD3) {
+ // Shift/rotate by cl.
+ }
+ ASSERT_NE(NULL, mnem);
+ AppendToBuffer("%s %s,", mnem, NameOfCPURegister(rm));
+ if (imm8 >= 0) {
+ AppendToBuffer("%d", imm8);
+ } else {
+ AppendToBuffer("cl");
+ }
+ } else {
+ UnimplementedInstruction();
+ }
+ return num_bytes;
+}
+
+
+// Returns number of bytes used, including *data.
+int DisassemblerX87::JumpShort(byte* data) {
+ ASSERT_EQ(0xEB, *data);
+ byte b = *(data+1);
+ byte* dest = data + static_cast<int8_t>(b) + 2;
+ AppendToBuffer("jmp %s", NameOfAddress(dest));
+ return 2;
+}
+
+
+// Returns number of bytes used, including *data.
+int DisassemblerX87::JumpConditional(byte* data, const char* comment) {
+ ASSERT_EQ(0x0F, *data);
+ byte cond = *(data+1) & 0x0F;
+ byte* dest = data + *reinterpret_cast<int32_t*>(data+2) + 6;
+ const char* mnem = jump_conditional_mnem[cond];
+ AppendToBuffer("%s %s", mnem, NameOfAddress(dest));
+ if (comment != NULL) {
+ AppendToBuffer(", %s", comment);
+ }
+ return 6; // includes 0x0F
+}
+
+
+// Returns number of bytes used, including *data.
+int DisassemblerX87::JumpConditionalShort(byte* data, const char* comment) {
+ byte cond = *data & 0x0F;
+ byte b = *(data+1);
+ byte* dest = data + static_cast<int8_t>(b) + 2;
+ const char* mnem = jump_conditional_mnem[cond];
+ AppendToBuffer("%s %s", mnem, NameOfAddress(dest));
+ if (comment != NULL) {
+ AppendToBuffer(", %s", comment);
+ }
+ return 2;
+}
+
+
+// Returns number of bytes used, including *data.
+int DisassemblerX87::SetCC(byte* data) {
+ ASSERT_EQ(0x0F, *data);
+ byte cond = *(data+1) & 0x0F;
+ const char* mnem = set_conditional_mnem[cond];
+ AppendToBuffer("%s ", mnem);
+ PrintRightByteOperand(data+2);
+ return 3; // Includes 0x0F.
+}
+
+
+// Returns number of bytes used, including *data.
+int DisassemblerX87::CMov(byte* data) {
+ ASSERT_EQ(0x0F, *data);
+ byte cond = *(data + 1) & 0x0F;
+ const char* mnem = conditional_move_mnem[cond];
+ int op_size = PrintOperands(mnem, REG_OPER_OP_ORDER, data + 2);
+ return 2 + op_size; // includes 0x0F
+}
+
+
+// Returns number of bytes used, including *data.
+int DisassemblerX87::FPUInstruction(byte* data) {
+ byte escape_opcode = *data;
+ ASSERT_EQ(0xD8, escape_opcode & 0xF8);
+ byte modrm_byte = *(data+1);
+
+ if (modrm_byte >= 0xC0) {
+ return RegisterFPUInstruction(escape_opcode, modrm_byte);
+ } else {
+ return MemoryFPUInstruction(escape_opcode, modrm_byte, data+1);
+ }
+}
+
+int DisassemblerX87::MemoryFPUInstruction(int escape_opcode,
+ int modrm_byte,
+ byte* modrm_start) {
+ const char* mnem = "?";
+ int regop = (modrm_byte >> 3) & 0x7; // reg/op field of modrm byte.
+ switch (escape_opcode) {
+ case 0xD9: switch (regop) {
+ case 0: mnem = "fld_s"; break;
+ case 2: mnem = "fst_s"; break;
+ case 3: mnem = "fstp_s"; break;
+ case 7: mnem = "fstcw"; break;
+ default: UnimplementedInstruction();
+ }
+ break;
+
+ case 0xDB: switch (regop) {
+ case 0: mnem = "fild_s"; break;
+ case 1: mnem = "fisttp_s"; break;
+ case 2: mnem = "fist_s"; break;
+ case 3: mnem = "fistp_s"; break;
+ default: UnimplementedInstruction();
+ }
+ break;
+
+ case 0xDD: switch (regop) {
+ case 0: mnem = "fld_d"; break;
+ case 1: mnem = "fisttp_d"; break;
+ case 2: mnem = "fst_d"; break;
+ case 3: mnem = "fstp_d"; break;
+ default: UnimplementedInstruction();
+ }
+ break;
+
+ case 0xDF: switch (regop) {
+ case 5: mnem = "fild_d"; break;
+ case 7: mnem = "fistp_d"; break;
+ default: UnimplementedInstruction();
+ }
+ break;
+
+ default: UnimplementedInstruction();
+ }
+ AppendToBuffer("%s ", mnem);
+ int count = PrintRightOperand(modrm_start);
+ return count + 1;
+}
+
+int DisassemblerX87::RegisterFPUInstruction(int escape_opcode,
+ byte modrm_byte) {
+ bool has_register = false; // Is the FPU register encoded in modrm_byte?
+ const char* mnem = "?";
+
+ switch (escape_opcode) {
+ case 0xD8:
+ has_register = true;
+ switch (modrm_byte & 0xF8) {
+ case 0xC0: mnem = "fadd_i"; break;
+ case 0xE0: mnem = "fsub_i"; break;
+ case 0xC8: mnem = "fmul_i"; break;
+ case 0xF0: mnem = "fdiv_i"; break;
+ default: UnimplementedInstruction();
+ }
+ break;
+
+ case 0xD9:
+ switch (modrm_byte & 0xF8) {
+ case 0xC0:
+ mnem = "fld";
+ has_register = true;
+ break;
+ case 0xC8:
+ mnem = "fxch";
+ has_register = true;
+ break;
+ default:
+ switch (modrm_byte) {
+ case 0xE0: mnem = "fchs"; break;
+ case 0xE1: mnem = "fabs"; break;
+ case 0xE4: mnem = "ftst"; break;
+ case 0xE8: mnem = "fld1"; break;
+ case 0xEB: mnem = "fldpi"; break;
+ case 0xED: mnem = "fldln2"; break;
+ case 0xEE: mnem = "fldz"; break;
+ case 0xF0: mnem = "f2xm1"; break;
+ case 0xF1: mnem = "fyl2x"; break;
+ case 0xF4: mnem = "fxtract"; break;
+ case 0xF5: mnem = "fprem1"; break;
+ case 0xF7: mnem = "fincstp"; break;
+ case 0xF8: mnem = "fprem"; break;
+ case 0xFC: mnem = "frndint"; break;
+ case 0xFD: mnem = "fscale"; break;
+ case 0xFE: mnem = "fsin"; break;
+ case 0xFF: mnem = "fcos"; break;
+ default: UnimplementedInstruction();
+ }
+ }
+ break;
+
+ case 0xDA:
+ if (modrm_byte == 0xE9) {
+ mnem = "fucompp";
+ } else {
+ UnimplementedInstruction();
+ }
+ break;
+
+ case 0xDB:
+ if ((modrm_byte & 0xF8) == 0xE8) {
+ mnem = "fucomi";
+ has_register = true;
+ } else if (modrm_byte == 0xE2) {
+ mnem = "fclex";
+ } else if (modrm_byte == 0xE3) {
+ mnem = "fninit";
+ } else {
+ UnimplementedInstruction();
+ }
+ break;
+
+ case 0xDC:
+ has_register = true;
+ switch (modrm_byte & 0xF8) {
+ case 0xC0: mnem = "fadd"; break;
+ case 0xE8: mnem = "fsub"; break;
+ case 0xC8: mnem = "fmul"; break;
+ case 0xF8: mnem = "fdiv"; break;
+ default: UnimplementedInstruction();
+ }
+ break;
+
+ case 0xDD:
+ has_register = true;
+ switch (modrm_byte & 0xF8) {
+ case 0xC0: mnem = "ffree"; break;
+ case 0xD0: mnem = "fst"; break;
+ case 0xD8: mnem = "fstp"; break;
+ default: UnimplementedInstruction();
+ }
+ break;
+
+ case 0xDE:
+ if (modrm_byte == 0xD9) {
+ mnem = "fcompp";
+ } else {
+ has_register = true;
+ switch (modrm_byte & 0xF8) {
+ case 0xC0: mnem = "faddp"; break;
+ case 0xE8: mnem = "fsubp"; break;
+ case 0xC8: mnem = "fmulp"; break;
+ case 0xF8: mnem = "fdivp"; break;
+ default: UnimplementedInstruction();
+ }
+ }
+ break;
+
+ case 0xDF:
+ if (modrm_byte == 0xE0) {
+ mnem = "fnstsw_ax";
+ } else if ((modrm_byte & 0xF8) == 0xE8) {
+ mnem = "fucomip";
+ has_register = true;
+ }
+ break;
+
+ default: UnimplementedInstruction();
+ }
+
+ if (has_register) {
+ AppendToBuffer("%s st%d", mnem, modrm_byte & 0x7);
+ } else {
+ AppendToBuffer("%s", mnem);
+ }
+ return 2;
+}
+
+
+// Mnemonics for instructions 0xF0 byte.
+// Returns NULL if the instruction is not handled here.
+static const char* F0Mnem(byte f0byte) {
+ switch (f0byte) {
+ case 0x18: return "prefetch";
+ case 0xA2: return "cpuid";
+ case 0xBE: return "movsx_b";
+ case 0xBF: return "movsx_w";
+ case 0xB6: return "movzx_b";
+ case 0xB7: return "movzx_w";
+ case 0xAF: return "imul";
+ case 0xA5: return "shld";
+ case 0xAD: return "shrd";
+ case 0xAC: return "shrd"; // 3-operand version.
+ case 0xAB: return "bts";
+ case 0xBD: return "bsr";
+ default: return NULL;
+ }
+}
+
+
+// Disassembled instruction '*instr' and writes it into 'out_buffer'.
+int DisassemblerX87::InstructionDecode(v8::internal::Vector<char> out_buffer,
+ byte* instr) {
+ tmp_buffer_pos_ = 0; // starting to write as position 0
+ byte* data = instr;
+ // Check for hints.
+ const char* branch_hint = NULL;
+ // We use these two prefixes only with branch prediction
+ if (*data == 0x3E /*ds*/) {
+ branch_hint = "predicted taken";
+ data++;
+ } else if (*data == 0x2E /*cs*/) {
+ branch_hint = "predicted not taken";
+ data++;
+ }
+ bool processed = true; // Will be set to false if the current instruction
+ // is not in 'instructions' table.
+ const InstructionDesc& idesc = instruction_table_->Get(*data);
+ switch (idesc.type) {
+ case ZERO_OPERANDS_INSTR:
+ AppendToBuffer(idesc.mnem);
+ data++;
+ break;
+
+ case TWO_OPERANDS_INSTR:
+ data++;
+ data += PrintOperands(idesc.mnem, idesc.op_order_, data);
+ break;
+
+ case JUMP_CONDITIONAL_SHORT_INSTR:
+ data += JumpConditionalShort(data, branch_hint);
+ break;
+
+ case REGISTER_INSTR:
+ AppendToBuffer("%s %s", idesc.mnem, NameOfCPURegister(*data & 0x07));
+ data++;
+ break;
+
+ case MOVE_REG_INSTR: {
+ byte* addr = reinterpret_cast<byte*>(*reinterpret_cast<int32_t*>(data+1));
+ AppendToBuffer("mov %s,%s",
+ NameOfCPURegister(*data & 0x07),
+ NameOfAddress(addr));
+ data += 5;
+ break;
+ }
+
+ case CALL_JUMP_INSTR: {
+ byte* addr = data + *reinterpret_cast<int32_t*>(data+1) + 5;
+ AppendToBuffer("%s %s", idesc.mnem, NameOfAddress(addr));
+ data += 5;
+ break;
+ }
+
+ case SHORT_IMMEDIATE_INSTR: {
+ byte* addr = reinterpret_cast<byte*>(*reinterpret_cast<int32_t*>(data+1));
+ AppendToBuffer("%s eax,%s", idesc.mnem, NameOfAddress(addr));
+ data += 5;
+ break;
+ }
+
+ case BYTE_IMMEDIATE_INSTR: {
+ AppendToBuffer("%s al,0x%x", idesc.mnem, data[1]);
+ data += 2;
+ break;
+ }
+
+ case NO_INSTR:
+ processed = false;
+ break;
+
+ default:
+ UNIMPLEMENTED(); // This type is not implemented.
+ }
+ //----------------------------
+ if (!processed) {
+ switch (*data) {
+ case 0xC2:
+ AppendToBuffer("ret 0x%x", *reinterpret_cast<uint16_t*>(data+1));
+ data += 3;
+ break;
+
+ case 0x69: // fall through
+ case 0x6B:
+ { int mod, regop, rm;
+ get_modrm(*(data+1), &mod, ®op, &rm);
+ int32_t imm =
+ *data == 0x6B ? *(data+2) : *reinterpret_cast<int32_t*>(data+2);
+ AppendToBuffer("imul %s,%s,0x%x",
+ NameOfCPURegister(regop),
+ NameOfCPURegister(rm),
+ imm);
+ data += 2 + (*data == 0x6B ? 1 : 4);
+ }
+ break;
+
+ case 0xF6:
+ { data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, ®op, &rm);
+ if (regop == eax) {
+ AppendToBuffer("test_b ");
+ data += PrintRightByteOperand(data);
+ int32_t imm = *data;
+ AppendToBuffer(",0x%x", imm);
+ data++;
+ } else {
+ UnimplementedInstruction();
+ }
+ }
+ break;
+
+ case 0x81: // fall through
+ case 0x83: // 0x81 with sign extension bit set
+ data += PrintImmediateOp(data);
+ break;
+
+ case 0x0F:
+ { byte f0byte = data[1];
+ const char* f0mnem = F0Mnem(f0byte);
+ if (f0byte == 0x18) {
+ data += 2;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, ®op, &rm);
+ const char* suffix[] = {"nta", "1", "2", "3"};
+ AppendToBuffer("%s%s ", f0mnem, suffix[regop & 0x03]);
+ data += PrintRightOperand(data);
+ } else if (f0byte == 0x1F && data[2] == 0) {
+ AppendToBuffer("nop"); // 3 byte nop.
+ data += 3;
+ } else if (f0byte == 0x1F && data[2] == 0x40 && data[3] == 0) {
+ AppendToBuffer("nop"); // 4 byte nop.
+ data += 4;
+ } else if (f0byte == 0x1F && data[2] == 0x44 && data[3] == 0 &&
+ data[4] == 0) {
+ AppendToBuffer("nop"); // 5 byte nop.
+ data += 5;
+ } else if (f0byte == 0x1F && data[2] == 0x80 && data[3] == 0 &&
+ data[4] == 0 && data[5] == 0 && data[6] == 0) {
+ AppendToBuffer("nop"); // 7 byte nop.
+ data += 7;
+ } else if (f0byte == 0x1F && data[2] == 0x84 && data[3] == 0 &&
+ data[4] == 0 && data[5] == 0 && data[6] == 0 &&
+ data[7] == 0) {
+ AppendToBuffer("nop"); // 8 byte nop.
+ data += 8;
+ } else if (f0byte == 0xA2 || f0byte == 0x31) {
+ AppendToBuffer("%s", f0mnem);
+ data += 2;
+ } else if (f0byte == 0x28) {
+ data += 2;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, ®op, &rm);
+ AppendToBuffer("movaps %s,%s",
+ NameOfXMMRegister(regop),
+ NameOfXMMRegister(rm));
+ data++;
+ } else if (f0byte >= 0x53 && f0byte <= 0x5F) {
+ const char* const pseudo_op[] = {
+ "rcpps",
+ "andps",
+ "andnps",
+ "orps",
+ "xorps",
+ "addps",
+ "mulps",
+ "cvtps2pd",
+ "cvtdq2ps",
+ "subps",
+ "minps",
+ "divps",
+ "maxps",
+ };
+
+ data += 2;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, ®op, &rm);
+ AppendToBuffer("%s %s,",
+ pseudo_op[f0byte - 0x53],
+ NameOfXMMRegister(regop));
+ data += PrintRightXMMOperand(data);
+ } else if (f0byte == 0x50) {
+ data += 2;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, ®op, &rm);
+ AppendToBuffer("movmskps %s,%s",
+ NameOfCPURegister(regop),
+ NameOfXMMRegister(rm));
+ data++;
+ } else if (f0byte== 0xC6) {
+ // shufps xmm, xmm/m128, imm8
+ data += 2;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, ®op, &rm);
+ int8_t imm8 = static_cast<int8_t>(data[1]);
+ AppendToBuffer("shufps %s,%s,%d",
+ NameOfXMMRegister(rm),
+ NameOfXMMRegister(regop),
+ static_cast<int>(imm8));
+ data += 2;
+ } else if ((f0byte & 0xF0) == 0x80) {
+ data += JumpConditional(data, branch_hint);
+ } else if (f0byte == 0xBE || f0byte == 0xBF || f0byte == 0xB6 ||
+ f0byte == 0xB7 || f0byte == 0xAF) {
+ data += 2;
+ data += PrintOperands(f0mnem, REG_OPER_OP_ORDER, data);
+ } else if ((f0byte & 0xF0) == 0x90) {
+ data += SetCC(data);
+ } else if ((f0byte & 0xF0) == 0x40) {
+ data += CMov(data);
+ } else if (f0byte == 0xAB || f0byte == 0xA5 || f0byte == 0xAD) {
+ // shrd, shld, bts
+ data += 2;
+ AppendToBuffer("%s ", f0mnem);
+ int mod, regop, rm;
+ get_modrm(*data, &mod, ®op, &rm);
+ data += PrintRightOperand(data);
+ if (f0byte == 0xAB) {
+ AppendToBuffer(",%s", NameOfCPURegister(regop));
+ } else {
+ AppendToBuffer(",%s,cl", NameOfCPURegister(regop));
+ }
+ } else if (f0byte == 0xBD) {
+ data += 2;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, ®op, &rm);
+ AppendToBuffer("%s %s,", f0mnem, NameOfCPURegister(regop));
+ data += PrintRightOperand(data);
+ } else {
+ UnimplementedInstruction();
+ }
+ }
+ break;
+
+ case 0x8F:
+ { data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, ®op, &rm);
+ if (regop == eax) {
+ AppendToBuffer("pop ");
+ data += PrintRightOperand(data);
+ }
+ }
+ break;
+
+ case 0xFF:
+ { data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, ®op, &rm);
+ const char* mnem = NULL;
+ switch (regop) {
+ case esi: mnem = "push"; break;
+ case eax: mnem = "inc"; break;
+ case ecx: mnem = "dec"; break;
+ case edx: mnem = "call"; break;
+ case esp: mnem = "jmp"; break;
+ default: mnem = "???";
+ }
+ AppendToBuffer("%s ", mnem);
+ data += PrintRightOperand(data);
+ }
+ break;
+
+ case 0xC7: // imm32, fall through
+ case 0xC6: // imm8
+ { bool is_byte = *data == 0xC6;
+ data++;
+ if (is_byte) {
+ AppendToBuffer("%s ", "mov_b");
+ data += PrintRightByteOperand(data);
+ int32_t imm = *data;
+ AppendToBuffer(",0x%x", imm);
+ data++;
+ } else {
+ AppendToBuffer("%s ", "mov");
+ data += PrintRightOperand(data);
+ int32_t imm = *reinterpret_cast<int32_t*>(data);
+ AppendToBuffer(",0x%x", imm);
+ data += 4;
+ }
+ }
+ break;
+
+ case 0x80:
+ { data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, ®op, &rm);
+ const char* mnem = NULL;
+ switch (regop) {
+ case 5: mnem = "subb"; break;
+ case 7: mnem = "cmpb"; break;
+ default: UnimplementedInstruction();
+ }
+ AppendToBuffer("%s ", mnem);
+ data += PrintRightByteOperand(data);
+ int32_t imm = *data;
+ AppendToBuffer(",0x%x", imm);
+ data++;
+ }
+ break;
+
+ case 0x88: // 8bit, fall through
+ case 0x89: // 32bit
+ { bool is_byte = *data == 0x88;
+ int mod, regop, rm;
+ data++;
+ get_modrm(*data, &mod, ®op, &rm);
+ if (is_byte) {
+ AppendToBuffer("%s ", "mov_b");
+ data += PrintRightByteOperand(data);
+ AppendToBuffer(",%s", NameOfByteCPURegister(regop));
+ } else {
+ AppendToBuffer("%s ", "mov");
+ data += PrintRightOperand(data);
+ AppendToBuffer(",%s", NameOfCPURegister(regop));
+ }
+ }
+ break;
+
+ case 0x66: // prefix
+ while (*data == 0x66) data++;
+ if (*data == 0xf && data[1] == 0x1f) {
+ AppendToBuffer("nop"); // 0x66 prefix
+ } else if (*data == 0x90) {
+ AppendToBuffer("nop"); // 0x66 prefix
+ } else if (*data == 0x8B) {
+ data++;
+ data += PrintOperands("mov_w", REG_OPER_OP_ORDER, data);
+ } else if (*data == 0x89) {
+ data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, ®op, &rm);
+ AppendToBuffer("mov_w ");
+ data += PrintRightOperand(data);
+ AppendToBuffer(",%s", NameOfCPURegister(regop));
+ } else if (*data == 0xC7) {
+ data++;
+ AppendToBuffer("%s ", "mov_w");
+ data += PrintRightOperand(data);
+ int imm = *reinterpret_cast<int16_t*>(data);
+ AppendToBuffer(",0x%x", imm);
+ data += 2;
+ } else if (*data == 0x0F) {
+ data++;
+ if (*data == 0x38) {
+ data++;
+ if (*data == 0x17) {
+ data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, ®op, &rm);
+ AppendToBuffer("ptest %s,%s",
+ NameOfXMMRegister(regop),
+ NameOfXMMRegister(rm));
+ data++;
+ } else if (*data == 0x2A) {
+ // movntdqa
+ data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, ®op, &rm);
+ AppendToBuffer("movntdqa %s,", NameOfXMMRegister(regop));
+ data += PrintRightOperand(data);
+ } else {
+ UnimplementedInstruction();
+ }
+ } else if (*data == 0x3A) {
+ data++;
+ if (*data == 0x0B) {
+ data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, ®op, &rm);
+ int8_t imm8 = static_cast<int8_t>(data[1]);
+ AppendToBuffer("roundsd %s,%s,%d",
+ NameOfXMMRegister(regop),
+ NameOfXMMRegister(rm),
+ static_cast<int>(imm8));
+ data += 2;
+ } else if (*data == 0x16) {
+ data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, ®op, &rm);
+ int8_t imm8 = static_cast<int8_t>(data[1]);
+ AppendToBuffer("pextrd %s,%s,%d",
+ NameOfCPURegister(regop),
+ NameOfXMMRegister(rm),
+ static_cast<int>(imm8));
+ data += 2;
+ } else if (*data == 0x17) {
+ data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, ®op, &rm);
+ int8_t imm8 = static_cast<int8_t>(data[1]);
+ AppendToBuffer("extractps %s,%s,%d",
+ NameOfCPURegister(rm),
+ NameOfXMMRegister(regop),
+ static_cast<int>(imm8));
+ data += 2;
+ } else if (*data == 0x22) {
+ data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, ®op, &rm);
+ int8_t imm8 = static_cast<int8_t>(data[1]);
+ AppendToBuffer("pinsrd %s,%s,%d",
+ NameOfXMMRegister(regop),
+ NameOfCPURegister(rm),
+ static_cast<int>(imm8));
+ data += 2;
+ } else {
+ UnimplementedInstruction();
+ }
+ } else if (*data == 0x2E || *data == 0x2F) {
+ const char* mnem = (*data == 0x2E) ? "ucomisd" : "comisd";
+ data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, ®op, &rm);
+ if (mod == 0x3) {
+ AppendToBuffer("%s %s,%s", mnem,
+ NameOfXMMRegister(regop),
+ NameOfXMMRegister(rm));
+ data++;
+ } else {
+ AppendToBuffer("%s %s,", mnem, NameOfXMMRegister(regop));
+ data += PrintRightOperand(data);
+ }
+ } else if (*data == 0x50) {
+ data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, ®op, &rm);
+ AppendToBuffer("movmskpd %s,%s",
+ NameOfCPURegister(regop),
+ NameOfXMMRegister(rm));
+ data++;
+ } else if (*data == 0x54) {
+ data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, ®op, &rm);
+ AppendToBuffer("andpd %s,%s",
+ NameOfXMMRegister(regop),
+ NameOfXMMRegister(rm));
+ data++;
+ } else if (*data == 0x56) {
+ data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, ®op, &rm);
+ AppendToBuffer("orpd %s,%s",
+ NameOfXMMRegister(regop),
+ NameOfXMMRegister(rm));
+ data++;
+ } else if (*data == 0x57) {
+ data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, ®op, &rm);
+ AppendToBuffer("xorpd %s,%s",
+ NameOfXMMRegister(regop),
+ NameOfXMMRegister(rm));
+ data++;
+ } else if (*data == 0x6E) {
+ data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, ®op, &rm);
+ AppendToBuffer("movd %s,", NameOfXMMRegister(regop));
+ data += PrintRightOperand(data);
+ } else if (*data == 0x6F) {
+ data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, ®op, &rm);
+ AppendToBuffer("movdqa %s,", NameOfXMMRegister(regop));
+ data += PrintRightXMMOperand(data);
+ } else if (*data == 0x70) {
+ data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, ®op, &rm);
+ int8_t imm8 = static_cast<int8_t>(data[1]);
+ AppendToBuffer("pshufd %s,%s,%d",
+ NameOfXMMRegister(regop),
+ NameOfXMMRegister(rm),
+ static_cast<int>(imm8));
+ data += 2;
+ } else if (*data == 0x76) {
+ data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, ®op, &rm);
+ AppendToBuffer("pcmpeqd %s,%s",
+ NameOfXMMRegister(regop),
+ NameOfXMMRegister(rm));
+ data++;
+ } else if (*data == 0x90) {
+ data++;
+ AppendToBuffer("nop"); // 2 byte nop.
+ } else if (*data == 0xF3) {
+ data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, ®op, &rm);
+ AppendToBuffer("psllq %s,%s",
+ NameOfXMMRegister(regop),
+ NameOfXMMRegister(rm));
+ data++;
+ } else if (*data == 0x73) {
+ data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, ®op, &rm);
+ int8_t imm8 = static_cast<int8_t>(data[1]);
+ ASSERT(regop == esi || regop == edx);
+ AppendToBuffer("%s %s,%d",
+ (regop == esi) ? "psllq" : "psrlq",
+ NameOfXMMRegister(rm),
+ static_cast<int>(imm8));
+ data += 2;
+ } else if (*data == 0xD3) {
+ data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, ®op, &rm);
+ AppendToBuffer("psrlq %s,%s",
+ NameOfXMMRegister(regop),
+ NameOfXMMRegister(rm));
+ data++;
+ } else if (*data == 0x7F) {
+ AppendToBuffer("movdqa ");
+ data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, ®op, &rm);
+ data += PrintRightXMMOperand(data);
+ AppendToBuffer(",%s", NameOfXMMRegister(regop));
+ } else if (*data == 0x7E) {
+ data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, ®op, &rm);
+ AppendToBuffer("movd ");
+ data += PrintRightOperand(data);
+ AppendToBuffer(",%s", NameOfXMMRegister(regop));
+ } else if (*data == 0xDB) {
+ data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, ®op, &rm);
+ AppendToBuffer("pand %s,%s",
+ NameOfXMMRegister(regop),
+ NameOfXMMRegister(rm));
+ data++;
+ } else if (*data == 0xE7) {
+ data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, ®op, &rm);
+ if (mod == 3) {
+ AppendToBuffer("movntdq ");
+ data += PrintRightOperand(data);
+ AppendToBuffer(",%s", NameOfXMMRegister(regop));
+ } else {
+ UnimplementedInstruction();
+ }
+ } else if (*data == 0xEF) {
+ data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, ®op, &rm);
+ AppendToBuffer("pxor %s,%s",
+ NameOfXMMRegister(regop),
+ NameOfXMMRegister(rm));
+ data++;
+ } else if (*data == 0xEB) {
+ data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, ®op, &rm);
+ AppendToBuffer("por %s,%s",
+ NameOfXMMRegister(regop),
+ NameOfXMMRegister(rm));
+ data++;
+ } else {
+ UnimplementedInstruction();
+ }
+ } else {
+ UnimplementedInstruction();
+ }
+ break;
+
+ case 0xFE:
+ { data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, ®op, &rm);
+ if (regop == ecx) {
+ AppendToBuffer("dec_b ");
+ data += PrintRightOperand(data);
+ } else {
+ UnimplementedInstruction();
+ }
+ }
+ break;
+
+ case 0x68:
+ AppendToBuffer("push 0x%x", *reinterpret_cast<int32_t*>(data+1));
+ data += 5;
+ break;
+
+ case 0x6A:
+ AppendToBuffer("push 0x%x", *reinterpret_cast<int8_t*>(data + 1));
+ data += 2;
+ break;
+
+ case 0xA8:
+ AppendToBuffer("test al,0x%x", *reinterpret_cast<uint8_t*>(data+1));
+ data += 2;
+ break;
+
+ case 0xA9:
+ AppendToBuffer("test eax,0x%x", *reinterpret_cast<int32_t*>(data+1));
+ data += 5;
+ break;
+
+ case 0xD1: // fall through
+ case 0xD3: // fall through
+ case 0xC1:
+ data += D1D3C1Instruction(data);
+ break;
+
+ case 0xD8: // fall through
+ case 0xD9: // fall through
+ case 0xDA: // fall through
+ case 0xDB: // fall through
+ case 0xDC: // fall through
+ case 0xDD: // fall through
+ case 0xDE: // fall through
+ case 0xDF:
+ data += FPUInstruction(data);
+ break;
+
+ case 0xEB:
+ data += JumpShort(data);
+ break;
+
+ case 0xF2:
+ if (*(data+1) == 0x0F) {
+ byte b2 = *(data+2);
+ if (b2 == 0x11) {
+ AppendToBuffer("movsd ");
+ data += 3;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, ®op, &rm);
+ data += PrintRightXMMOperand(data);
+ AppendToBuffer(",%s", NameOfXMMRegister(regop));
+ } else if (b2 == 0x10) {
+ data += 3;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, ®op, &rm);
+ AppendToBuffer("movsd %s,", NameOfXMMRegister(regop));
+ data += PrintRightXMMOperand(data);
+ } else if (b2 == 0x5A) {
+ data += 3;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, ®op, &rm);
+ AppendToBuffer("cvtsd2ss %s,", NameOfXMMRegister(regop));
+ data += PrintRightXMMOperand(data);
+ } else {
+ const char* mnem = "?";
+ switch (b2) {
+ case 0x2A: mnem = "cvtsi2sd"; break;
+ case 0x2C: mnem = "cvttsd2si"; break;
+ case 0x2D: mnem = "cvtsd2si"; break;
+ case 0x51: mnem = "sqrtsd"; break;
+ case 0x58: mnem = "addsd"; break;
+ case 0x59: mnem = "mulsd"; break;
+ case 0x5C: mnem = "subsd"; break;
+ case 0x5E: mnem = "divsd"; break;
+ }
+ data += 3;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, ®op, &rm);
+ if (b2 == 0x2A) {
+ AppendToBuffer("%s %s,", mnem, NameOfXMMRegister(regop));
+ data += PrintRightOperand(data);
+ } else if (b2 == 0x2C || b2 == 0x2D) {
+ AppendToBuffer("%s %s,", mnem, NameOfCPURegister(regop));
+ data += PrintRightXMMOperand(data);
+ } else if (b2 == 0xC2) {
+ // Intel manual 2A, Table 3-18.
+ const char* const pseudo_op[] = {
+ "cmpeqsd",
+ "cmpltsd",
+ "cmplesd",
+ "cmpunordsd",
+ "cmpneqsd",
+ "cmpnltsd",
+ "cmpnlesd",
+ "cmpordsd"
+ };
+ AppendToBuffer("%s %s,%s",
+ pseudo_op[data[1]],
+ NameOfXMMRegister(regop),
+ NameOfXMMRegister(rm));
+ data += 2;
+ } else {
+ AppendToBuffer("%s %s,", mnem, NameOfXMMRegister(regop));
+ data += PrintRightXMMOperand(data);
+ }
+ }
+ } else {
+ UnimplementedInstruction();
+ }
+ break;
+
+ case 0xF3:
+ if (*(data+1) == 0x0F) {
+ byte b2 = *(data+2);
+ if (b2 == 0x11) {
+ AppendToBuffer("movss ");
+ data += 3;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, ®op, &rm);
+ data += PrintRightXMMOperand(data);
+ AppendToBuffer(",%s", NameOfXMMRegister(regop));
+ } else if (b2 == 0x10) {
+ data += 3;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, ®op, &rm);
+ AppendToBuffer("movss %s,", NameOfXMMRegister(regop));
+ data += PrintRightXMMOperand(data);
+ } else if (b2 == 0x2C) {
+ data += 3;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, ®op, &rm);
+ AppendToBuffer("cvttss2si %s,", NameOfCPURegister(regop));
+ data += PrintRightXMMOperand(data);
+ } else if (b2 == 0x5A) {
+ data += 3;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, ®op, &rm);
+ AppendToBuffer("cvtss2sd %s,", NameOfXMMRegister(regop));
+ data += PrintRightXMMOperand(data);
+ } else if (b2 == 0x6F) {
+ data += 3;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, ®op, &rm);
+ AppendToBuffer("movdqu %s,", NameOfXMMRegister(regop));
+ data += PrintRightXMMOperand(data);
+ } else if (b2 == 0x7F) {
+ AppendToBuffer("movdqu ");
+ data += 3;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, ®op, &rm);
+ data += PrintRightXMMOperand(data);
+ AppendToBuffer(",%s", NameOfXMMRegister(regop));
+ } else {
+ UnimplementedInstruction();
+ }
+ } else if (*(data+1) == 0xA5) {
+ data += 2;
+ AppendToBuffer("rep_movs");
+ } else if (*(data+1) == 0xAB) {
+ data += 2;
+ AppendToBuffer("rep_stos");
+ } else {
+ UnimplementedInstruction();
+ }
+ break;
+
+ case 0xF7:
+ data += F7Instruction(data);
+ break;
+
+ default:
+ UnimplementedInstruction();
+ }
+ }
+
+ if (tmp_buffer_pos_ < sizeof tmp_buffer_) {
+ tmp_buffer_[tmp_buffer_pos_] = '\0';
+ }
+
+ int instr_len = data - instr;
+ if (instr_len == 0) {
+ printf("%02x", *data);
+ }
+ ASSERT(instr_len > 0); // Ensure progress.
+
+ int outp = 0;
+ // Instruction bytes.
+ for (byte* bp = instr; bp < data; bp++) {
+ outp += v8::internal::OS::SNPrintF(out_buffer + outp,
+ "%02x",
+ *bp);
+ }
+ for (int i = 6 - instr_len; i >= 0; i--) {
+ outp += v8::internal::OS::SNPrintF(out_buffer + outp,
+ " ");
+ }
+
+ outp += v8::internal::OS::SNPrintF(out_buffer + outp,
+ " %s",
+ tmp_buffer_.start());
+ return instr_len;
+} // NOLINT (function is too long)
+
+
+//------------------------------------------------------------------------------
+
+
+static const char* cpu_regs[8] = {
+ "eax", "ecx", "edx", "ebx", "esp", "ebp", "esi", "edi"
+};
+
+
+static const char* byte_cpu_regs[8] = {
+ "al", "cl", "dl", "bl", "ah", "ch", "dh", "bh"
+};
+
+
+static const char* xmm_regs[8] = {
+ "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7"
+};
+
+
+const char* NameConverter::NameOfAddress(byte* addr) const {
+ v8::internal::OS::SNPrintF(tmp_buffer_, "%p", addr);
+ return tmp_buffer_.start();
+}
+
+
+const char* NameConverter::NameOfConstant(byte* addr) const {
+ return NameOfAddress(addr);
+}
+
+
+const char* NameConverter::NameOfCPURegister(int reg) const {
+ if (0 <= reg && reg < 8) return cpu_regs[reg];
+ return "noreg";
+}
+
+
+const char* NameConverter::NameOfByteCPURegister(int reg) const {
+ if (0 <= reg && reg < 8) return byte_cpu_regs[reg];
+ return "noreg";
+}
+
+
+const char* NameConverter::NameOfXMMRegister(int reg) const {
+ if (0 <= reg && reg < 8) return xmm_regs[reg];
+ return "noxmmreg";
+}
+
+
+const char* NameConverter::NameInCode(byte* addr) const {
+ // X87 does not embed debug strings at the moment.
+ UNREACHABLE();
+ return "";
+}
+
+
+//------------------------------------------------------------------------------
+
+Disassembler::Disassembler(const NameConverter& converter)
+ : converter_(converter) {}
+
+
+Disassembler::~Disassembler() {}
+
+
+int Disassembler::InstructionDecode(v8::internal::Vector<char> buffer,
+ byte* instruction) {
+ DisassemblerX87 d(converter_, false /*do not crash if unimplemented*/);
+ return d.InstructionDecode(buffer, instruction);
+}
+
+
+// The IA-32 assembler does not currently use constant pools.
+int Disassembler::ConstantPoolSizeAt(byte* instruction) { return -1; }
+
+
+/*static*/ void Disassembler::Disassemble(FILE* f, byte* begin, byte* end) {
+ NameConverter converter;
+ Disassembler d(converter);
+ for (byte* pc = begin; pc < end;) {
+ v8::internal::EmbeddedVector<char, 128> buffer;
+ buffer[0] = '\0';
+ byte* prev_pc = pc;
+ pc += d.InstructionDecode(buffer, pc);
+ fprintf(f, "%p", prev_pc);
+ fprintf(f, " ");
+
+ for (byte* bp = prev_pc; bp < pc; bp++) {
+ fprintf(f, "%02x", *bp);
+ }
+ for (int i = 6 - (pc - prev_pc); i >= 0; i--) {
+ fprintf(f, " ");
+ }
+ fprintf(f, " %s\n", buffer.start());
+ }
+}
+
+
+} // namespace disasm
+
+#endif // V8_TARGET_ARCH_X87
--- /dev/null
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "v8.h"
+
+#if V8_TARGET_ARCH_X87
+
+#include "assembler.h"
+#include "assembler-x87.h"
+#include "assembler-x87-inl.h"
+#include "frames.h"
+
+namespace v8 {
+namespace internal {
+
+
+Register JavaScriptFrame::fp_register() { return ebp; }
+Register JavaScriptFrame::context_register() { return esi; }
+Register JavaScriptFrame::constant_pool_pointer_register() {
+ UNREACHABLE();
+ return no_reg;
+}
+
+
+Register StubFailureTrampolineFrame::fp_register() { return ebp; }
+Register StubFailureTrampolineFrame::context_register() { return esi; }
+Register StubFailureTrampolineFrame::constant_pool_pointer_register() {
+ UNREACHABLE();
+ return no_reg;
+}
+
+
+Object*& ExitFrame::constant_pool_slot() const {
+ UNREACHABLE();
+ return Memory::Object_at(NULL);
+}
+
+
+} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_X87
--- /dev/null
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_X87_FRAMES_X87_H_
+#define V8_X87_FRAMES_X87_H_
+
+namespace v8 {
+namespace internal {
+
+
+// Register lists
+// Note that the bit values must match those used in actual instruction encoding
+const int kNumRegs = 8;
+
+
+// Caller-saved registers
+const RegList kJSCallerSaved =
+ 1 << 0 | // eax
+ 1 << 1 | // ecx
+ 1 << 2 | // edx
+ 1 << 3 | // ebx - used as a caller-saved register in JavaScript code
+ 1 << 7; // edi - callee function
+
+const int kNumJSCallerSaved = 5;
+
+
+// Number of registers for which space is reserved in safepoints.
+const int kNumSafepointRegisters = 8;
+
+const int kNoAlignmentPadding = 0;
+const int kAlignmentPaddingPushed = 2;
+const int kAlignmentZapValue = 0x12345678; // Not heap object tagged.
+
+// ----------------------------------------------------
+
+
+class EntryFrameConstants : public AllStatic {
+ public:
+ static const int kCallerFPOffset = -6 * kPointerSize;
+
+ static const int kFunctionArgOffset = +3 * kPointerSize;
+ static const int kReceiverArgOffset = +4 * kPointerSize;
+ static const int kArgcOffset = +5 * kPointerSize;
+ static const int kArgvOffset = +6 * kPointerSize;
+};
+
+
+class ExitFrameConstants : public AllStatic {
+ public:
+ static const int kFrameSize = 2 * kPointerSize;
+
+ static const int kCodeOffset = -2 * kPointerSize;
+ static const int kSPOffset = -1 * kPointerSize;
+
+ static const int kCallerFPOffset = 0 * kPointerSize;
+ static const int kCallerPCOffset = +1 * kPointerSize;
+
+ // FP-relative displacement of the caller's SP. It points just
+ // below the saved PC.
+ static const int kCallerSPDisplacement = +2 * kPointerSize;
+
+ static const int kConstantPoolOffset = 0; // Not used
+};
+
+
+class JavaScriptFrameConstants : public AllStatic {
+ public:
+ // FP-relative.
+ static const int kLocal0Offset = StandardFrameConstants::kExpressionsOffset;
+ static const int kLastParameterOffset = +2 * kPointerSize;
+ static const int kFunctionOffset = StandardFrameConstants::kMarkerOffset;
+
+ // Caller SP-relative.
+ static const int kParam0Offset = -2 * kPointerSize;
+ static const int kReceiverOffset = -1 * kPointerSize;
+
+ static const int kDynamicAlignmentStateOffset = kLocal0Offset;
+};
+
+
+class ArgumentsAdaptorFrameConstants : public AllStatic {
+ public:
+ // FP-relative.
+ static const int kLengthOffset = StandardFrameConstants::kExpressionsOffset;
+
+ static const int kFrameSize =
+ StandardFrameConstants::kFixedFrameSize + kPointerSize;
+};
+
+
+class ConstructFrameConstants : public AllStatic {
+ public:
+ // FP-relative.
+ static const int kImplicitReceiverOffset = -5 * kPointerSize;
+ static const int kConstructorOffset = kMinInt;
+ static const int kLengthOffset = -4 * kPointerSize;
+ static const int kCodeOffset = StandardFrameConstants::kExpressionsOffset;
+
+ static const int kFrameSize =
+ StandardFrameConstants::kFixedFrameSize + 3 * kPointerSize;
+};
+
+
+class InternalFrameConstants : public AllStatic {
+ public:
+ // FP-relative.
+ static const int kCodeOffset = StandardFrameConstants::kExpressionsOffset;
+};
+
+
+inline Object* JavaScriptFrame::function_slot_object() const {
+ const int offset = JavaScriptFrameConstants::kFunctionOffset;
+ return Memory::Object_at(fp() + offset);
+}
+
+
+inline void StackHandler::SetFp(Address slot, Address fp) {
+ Memory::Address_at(slot) = fp;
+}
+
+
+} } // namespace v8::internal
+
+#endif // V8_X87_FRAMES_X87_H_
--- /dev/null
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "v8.h"
+
+#if V8_TARGET_ARCH_X87
+
+#include "code-stubs.h"
+#include "codegen.h"
+#include "compiler.h"
+#include "debug.h"
+#include "full-codegen.h"
+#include "isolate-inl.h"
+#include "parser.h"
+#include "scopes.h"
+#include "stub-cache.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm_)
+
+
+class JumpPatchSite BASE_EMBEDDED {
+ public:
+ explicit JumpPatchSite(MacroAssembler* masm) : masm_(masm) {
+#ifdef DEBUG
+ info_emitted_ = false;
+#endif
+ }
+
+ ~JumpPatchSite() {
+ ASSERT(patch_site_.is_bound() == info_emitted_);
+ }
+
+ void EmitJumpIfNotSmi(Register reg,
+ Label* target,
+ Label::Distance distance = Label::kFar) {
+ __ test(reg, Immediate(kSmiTagMask));
+ EmitJump(not_carry, target, distance); // Always taken before patched.
+ }
+
+ void EmitJumpIfSmi(Register reg,
+ Label* target,
+ Label::Distance distance = Label::kFar) {
+ __ test(reg, Immediate(kSmiTagMask));
+ EmitJump(carry, target, distance); // Never taken before patched.
+ }
+
+ void EmitPatchInfo() {
+ if (patch_site_.is_bound()) {
+ int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(&patch_site_);
+ ASSERT(is_uint8(delta_to_patch_site));
+ __ test(eax, Immediate(delta_to_patch_site));
+#ifdef DEBUG
+ info_emitted_ = true;
+#endif
+ } else {
+ __ nop(); // Signals no inlined code.
+ }
+ }
+
+ private:
+ // jc will be patched with jz, jnc will become jnz.
+ void EmitJump(Condition cc, Label* target, Label::Distance distance) {
+ ASSERT(!patch_site_.is_bound() && !info_emitted_);
+ ASSERT(cc == carry || cc == not_carry);
+ __ bind(&patch_site_);
+ __ j(cc, target, distance);
+ }
+
+ MacroAssembler* masm_;
+ Label patch_site_;
+#ifdef DEBUG
+ bool info_emitted_;
+#endif
+};
+
+
+static void EmitStackCheck(MacroAssembler* masm_,
+ int pointers = 0,
+ Register scratch = esp) {
+ Label ok;
+ Isolate* isolate = masm_->isolate();
+ ASSERT(scratch.is(esp) == (pointers == 0));
+ ExternalReference stack_limit;
+ if (pointers != 0) {
+ __ mov(scratch, esp);
+ __ sub(scratch, Immediate(pointers * kPointerSize));
+ stack_limit = ExternalReference::address_of_real_stack_limit(isolate);
+ } else {
+ stack_limit = ExternalReference::address_of_stack_limit(isolate);
+ }
+ __ cmp(scratch, Operand::StaticVariable(stack_limit));
+ __ j(above_equal, &ok, Label::kNear);
+ __ call(isolate->builtins()->StackCheck(), RelocInfo::CODE_TARGET);
+ __ bind(&ok);
+}
+
+
+// Generate code for a JS function. On entry to the function the receiver
+// and arguments have been pushed on the stack left to right, with the
+// return address on top of them. The actual argument count matches the
+// formal parameter count expected by the function.
+//
+// The live registers are:
+// o edi: the JS function object being called (i.e. ourselves)
+// o esi: our context
+// o ebp: our caller's frame pointer
+// o esp: stack pointer (pointing to return address)
+//
+// The function builds a JS frame. Please see JavaScriptFrameConstants in
+// frames-x87.h for its layout.
+void FullCodeGenerator::Generate() {
+ CompilationInfo* info = info_;
+ handler_table_ =
+ isolate()->factory()->NewFixedArray(function()->handler_count(), TENURED);
+
+ profiling_counter_ = isolate()->factory()->NewCell(
+ Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget), isolate()));
+ SetFunctionPosition(function());
+ Comment cmnt(masm_, "[ function compiled by full code generator");
+
+ ProfileEntryHookStub::MaybeCallEntryHook(masm_);
+
+#ifdef DEBUG
+ if (strlen(FLAG_stop_at) > 0 &&
+ info->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
+ __ int3();
+ }
+#endif
+
+ // Sloppy mode functions and builtins need to replace the receiver with the
+ // global proxy when called as functions (without an explicit receiver
+ // object).
+ if (info->strict_mode() == SLOPPY && !info->is_native()) {
+ Label ok;
+ // +1 for return address.
+ int receiver_offset = (info->scope()->num_parameters() + 1) * kPointerSize;
+ __ mov(ecx, Operand(esp, receiver_offset));
+
+ __ cmp(ecx, isolate()->factory()->undefined_value());
+ __ j(not_equal, &ok, Label::kNear);
+
+ __ mov(ecx, GlobalObjectOperand());
+ __ mov(ecx, FieldOperand(ecx, GlobalObject::kGlobalReceiverOffset));
+
+ __ mov(Operand(esp, receiver_offset), ecx);
+
+ __ bind(&ok);
+ }
+
+ // Open a frame scope to indicate that there is a frame on the stack. The
+ // MANUAL indicates that the scope shouldn't actually generate code to set up
+ // the frame (that is done below).
+ FrameScope frame_scope(masm_, StackFrame::MANUAL);
+
+ info->set_prologue_offset(masm_->pc_offset());
+ __ Prologue(info->IsCodePreAgingActive());
+ info->AddNoFrameRange(0, masm_->pc_offset());
+
+ { Comment cmnt(masm_, "[ Allocate locals");
+ int locals_count = info->scope()->num_stack_slots();
+ // Generators allocate locals, if any, in context slots.
+ ASSERT(!info->function()->is_generator() || locals_count == 0);
+ if (locals_count == 1) {
+ __ push(Immediate(isolate()->factory()->undefined_value()));
+ } else if (locals_count > 1) {
+ if (locals_count >= 128) {
+ EmitStackCheck(masm_, locals_count, ecx);
+ }
+ __ mov(eax, Immediate(isolate()->factory()->undefined_value()));
+ const int kMaxPushes = 32;
+ if (locals_count >= kMaxPushes) {
+ int loop_iterations = locals_count / kMaxPushes;
+ __ mov(ecx, loop_iterations);
+ Label loop_header;
+ __ bind(&loop_header);
+ // Do pushes.
+ for (int i = 0; i < kMaxPushes; i++) {
+ __ push(eax);
+ }
+ __ dec(ecx);
+ __ j(not_zero, &loop_header, Label::kNear);
+ }
+ int remaining = locals_count % kMaxPushes;
+ // Emit the remaining pushes.
+ for (int i = 0; i < remaining; i++) {
+ __ push(eax);
+ }
+ }
+ }
+
+ bool function_in_register = true;
+
+ // Possibly allocate a local context.
+ int heap_slots = info->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
+ if (heap_slots > 0) {
+ Comment cmnt(masm_, "[ Allocate context");
+ // Argument to NewContext is the function, which is still in edi.
+ if (FLAG_harmony_scoping && info->scope()->is_global_scope()) {
+ __ push(edi);
+ __ Push(info->scope()->GetScopeInfo());
+ __ CallRuntime(Runtime::kHiddenNewGlobalContext, 2);
+ } else if (heap_slots <= FastNewContextStub::kMaximumSlots) {
+ FastNewContextStub stub(isolate(), heap_slots);
+ __ CallStub(&stub);
+ } else {
+ __ push(edi);
+ __ CallRuntime(Runtime::kHiddenNewFunctionContext, 1);
+ }
+ function_in_register = false;
+ // Context is returned in eax. It replaces the context passed to us.
+ // It's saved in the stack and kept live in esi.
+ __ mov(esi, eax);
+ __ mov(Operand(ebp, StandardFrameConstants::kContextOffset), eax);
+
+ // Copy parameters into context if necessary.
+ int num_parameters = info->scope()->num_parameters();
+ for (int i = 0; i < num_parameters; i++) {
+ Variable* var = scope()->parameter(i);
+ if (var->IsContextSlot()) {
+ int parameter_offset = StandardFrameConstants::kCallerSPOffset +
+ (num_parameters - 1 - i) * kPointerSize;
+ // Load parameter from stack.
+ __ mov(eax, Operand(ebp, parameter_offset));
+ // Store it in the context.
+ int context_offset = Context::SlotOffset(var->index());
+ __ mov(Operand(esi, context_offset), eax);
+ // Update the write barrier. This clobbers eax and ebx.
+ __ RecordWriteContextSlot(esi,
+ context_offset,
+ eax,
+ ebx);
+ }
+ }
+ }
+
+ Variable* arguments = scope()->arguments();
+ if (arguments != NULL) {
+ // Function uses arguments object.
+ Comment cmnt(masm_, "[ Allocate arguments object");
+ if (function_in_register) {
+ __ push(edi);
+ } else {
+ __ push(Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
+ }
+ // Receiver is just before the parameters on the caller's stack.
+ int num_parameters = info->scope()->num_parameters();
+ int offset = num_parameters * kPointerSize;
+ __ lea(edx,
+ Operand(ebp, StandardFrameConstants::kCallerSPOffset + offset));
+ __ push(edx);
+ __ push(Immediate(Smi::FromInt(num_parameters)));
+ // Arguments to ArgumentsAccessStub:
+ // function, receiver address, parameter count.
+ // The stub will rewrite receiver and parameter count if the previous
+ // stack frame was an arguments adapter frame.
+ ArgumentsAccessStub::Type type;
+ if (strict_mode() == STRICT) {
+ type = ArgumentsAccessStub::NEW_STRICT;
+ } else if (function()->has_duplicate_parameters()) {
+ type = ArgumentsAccessStub::NEW_SLOPPY_SLOW;
+ } else {
+ type = ArgumentsAccessStub::NEW_SLOPPY_FAST;
+ }
+ ArgumentsAccessStub stub(isolate(), type);
+ __ CallStub(&stub);
+
+ SetVar(arguments, eax, ebx, edx);
+ }
+
+ if (FLAG_trace) {
+ __ CallRuntime(Runtime::kTraceEnter, 0);
+ }
+
+ // Visit the declarations and body unless there is an illegal
+ // redeclaration.
+ if (scope()->HasIllegalRedeclaration()) {
+ Comment cmnt(masm_, "[ Declarations");
+ scope()->VisitIllegalRedeclaration(this);
+
+ } else {
+ PrepareForBailoutForId(BailoutId::FunctionEntry(), NO_REGISTERS);
+ { Comment cmnt(masm_, "[ Declarations");
+ // For named function expressions, declare the function name as a
+ // constant.
+ if (scope()->is_function_scope() && scope()->function() != NULL) {
+ VariableDeclaration* function = scope()->function();
+ ASSERT(function->proxy()->var()->mode() == CONST ||
+ function->proxy()->var()->mode() == CONST_LEGACY);
+ ASSERT(function->proxy()->var()->location() != Variable::UNALLOCATED);
+ VisitVariableDeclaration(function);
+ }
+ VisitDeclarations(scope()->declarations());
+ }
+
+ { Comment cmnt(masm_, "[ Stack check");
+ PrepareForBailoutForId(BailoutId::Declarations(), NO_REGISTERS);
+ EmitStackCheck(masm_);
+ }
+
+ { Comment cmnt(masm_, "[ Body");
+ ASSERT(loop_depth() == 0);
+ VisitStatements(function()->body());
+ ASSERT(loop_depth() == 0);
+ }
+ }
+
+ // Always emit a 'return undefined' in case control fell off the end of
+ // the body.
+ { Comment cmnt(masm_, "[ return <undefined>;");
+ __ mov(eax, isolate()->factory()->undefined_value());
+ EmitReturnSequence();
+ }
+}
+
+
+void FullCodeGenerator::ClearAccumulator() {
+ __ Move(eax, Immediate(Smi::FromInt(0)));
+}
+
+
+void FullCodeGenerator::EmitProfilingCounterDecrement(int delta) {
+ __ mov(ebx, Immediate(profiling_counter_));
+ __ sub(FieldOperand(ebx, Cell::kValueOffset),
+ Immediate(Smi::FromInt(delta)));
+}
+
+
+void FullCodeGenerator::EmitProfilingCounterReset() {
+ int reset_value = FLAG_interrupt_budget;
+ __ mov(ebx, Immediate(profiling_counter_));
+ __ mov(FieldOperand(ebx, Cell::kValueOffset),
+ Immediate(Smi::FromInt(reset_value)));
+}
+
+
+void FullCodeGenerator::EmitBackEdgeBookkeeping(IterationStatement* stmt,
+ Label* back_edge_target) {
+ Comment cmnt(masm_, "[ Back edge bookkeeping");
+ Label ok;
+
+ ASSERT(back_edge_target->is_bound());
+ int distance = masm_->SizeOfCodeGeneratedSince(back_edge_target);
+ int weight = Min(kMaxBackEdgeWeight,
+ Max(1, distance / kCodeSizeMultiplier));
+ EmitProfilingCounterDecrement(weight);
+ __ j(positive, &ok, Label::kNear);
+ __ call(isolate()->builtins()->InterruptCheck(), RelocInfo::CODE_TARGET);
+
+ // Record a mapping of this PC offset to the OSR id. This is used to find
+ // the AST id from the unoptimized code in order to use it as a key into
+ // the deoptimization input data found in the optimized code.
+ RecordBackEdge(stmt->OsrEntryId());
+
+ EmitProfilingCounterReset();
+
+ __ bind(&ok);
+ PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
+ // Record a mapping of the OSR id to this PC. This is used if the OSR
+ // entry becomes the target of a bailout. We don't expect it to be, but
+ // we want it to work if it is.
+ PrepareForBailoutForId(stmt->OsrEntryId(), NO_REGISTERS);
+}
+
+
+void FullCodeGenerator::EmitReturnSequence() {
+ Comment cmnt(masm_, "[ Return sequence");
+ if (return_label_.is_bound()) {
+ __ jmp(&return_label_);
+ } else {
+ // Common return label
+ __ bind(&return_label_);
+ if (FLAG_trace) {
+ __ push(eax);
+ __ CallRuntime(Runtime::kTraceExit, 1);
+ }
+ // Pretend that the exit is a backwards jump to the entry.
+ int weight = 1;
+ if (info_->ShouldSelfOptimize()) {
+ weight = FLAG_interrupt_budget / FLAG_self_opt_count;
+ } else {
+ int distance = masm_->pc_offset();
+ weight = Min(kMaxBackEdgeWeight,
+ Max(1, distance / kCodeSizeMultiplier));
+ }
+ EmitProfilingCounterDecrement(weight);
+ Label ok;
+ __ j(positive, &ok, Label::kNear);
+ __ push(eax);
+ __ call(isolate()->builtins()->InterruptCheck(),
+ RelocInfo::CODE_TARGET);
+ __ pop(eax);
+ EmitProfilingCounterReset();
+ __ bind(&ok);
+#ifdef DEBUG
+ // Add a label for checking the size of the code used for returning.
+ Label check_exit_codesize;
+ masm_->bind(&check_exit_codesize);
+#endif
+ SetSourcePosition(function()->end_position() - 1);
+ __ RecordJSReturn();
+ // Do not use the leave instruction here because it is too short to
+ // patch with the code required by the debugger.
+ __ mov(esp, ebp);
+ int no_frame_start = masm_->pc_offset();
+ __ pop(ebp);
+
+ int arguments_bytes = (info_->scope()->num_parameters() + 1) * kPointerSize;
+ __ Ret(arguments_bytes, ecx);
+ // Check that the size of the code used for returning is large enough
+ // for the debugger's requirements.
+ ASSERT(Assembler::kJSReturnSequenceLength <=
+ masm_->SizeOfCodeGeneratedSince(&check_exit_codesize));
+ info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
+ }
+}
+
+
+void FullCodeGenerator::EffectContext::Plug(Variable* var) const {
+ ASSERT(var->IsStackAllocated() || var->IsContextSlot());
+}
+
+
+void FullCodeGenerator::AccumulatorValueContext::Plug(Variable* var) const {
+ ASSERT(var->IsStackAllocated() || var->IsContextSlot());
+ codegen()->GetVar(result_register(), var);
+}
+
+
+void FullCodeGenerator::StackValueContext::Plug(Variable* var) const {
+ ASSERT(var->IsStackAllocated() || var->IsContextSlot());
+ MemOperand operand = codegen()->VarOperand(var, result_register());
+ // Memory operands can be pushed directly.
+ __ push(operand);
+}
+
+
+void FullCodeGenerator::TestContext::Plug(Variable* var) const {
+ // For simplicity we always test the accumulator register.
+ codegen()->GetVar(result_register(), var);
+ codegen()->PrepareForBailoutBeforeSplit(condition(), false, NULL, NULL);
+ codegen()->DoTest(this);
+}
+
+
+void FullCodeGenerator::EffectContext::Plug(Heap::RootListIndex index) const {
+ UNREACHABLE(); // Not used on X87.
+}
+
+
+void FullCodeGenerator::AccumulatorValueContext::Plug(
+ Heap::RootListIndex index) const {
+ UNREACHABLE(); // Not used on X87.
+}
+
+
+void FullCodeGenerator::StackValueContext::Plug(
+ Heap::RootListIndex index) const {
+ UNREACHABLE(); // Not used on X87.
+}
+
+
+void FullCodeGenerator::TestContext::Plug(Heap::RootListIndex index) const {
+ UNREACHABLE(); // Not used on X87.
+}
+
+
+void FullCodeGenerator::EffectContext::Plug(Handle<Object> lit) const {
+}
+
+
+void FullCodeGenerator::AccumulatorValueContext::Plug(
+ Handle<Object> lit) const {
+ if (lit->IsSmi()) {
+ __ SafeMove(result_register(), Immediate(lit));
+ } else {
+ __ Move(result_register(), Immediate(lit));
+ }
+}
+
+
+void FullCodeGenerator::StackValueContext::Plug(Handle<Object> lit) const {
+ if (lit->IsSmi()) {
+ __ SafePush(Immediate(lit));
+ } else {
+ __ push(Immediate(lit));
+ }
+}
+
+
+void FullCodeGenerator::TestContext::Plug(Handle<Object> lit) const {
+ codegen()->PrepareForBailoutBeforeSplit(condition(),
+ true,
+ true_label_,
+ false_label_);
+ ASSERT(!lit->IsUndetectableObject()); // There are no undetectable literals.
+ if (lit->IsUndefined() || lit->IsNull() || lit->IsFalse()) {
+ if (false_label_ != fall_through_) __ jmp(false_label_);
+ } else if (lit->IsTrue() || lit->IsJSObject()) {
+ if (true_label_ != fall_through_) __ jmp(true_label_);
+ } else if (lit->IsString()) {
+ if (String::cast(*lit)->length() == 0) {
+ if (false_label_ != fall_through_) __ jmp(false_label_);
+ } else {
+ if (true_label_ != fall_through_) __ jmp(true_label_);
+ }
+ } else if (lit->IsSmi()) {
+ if (Smi::cast(*lit)->value() == 0) {
+ if (false_label_ != fall_through_) __ jmp(false_label_);
+ } else {
+ if (true_label_ != fall_through_) __ jmp(true_label_);
+ }
+ } else {
+ // For simplicity we always test the accumulator register.
+ __ mov(result_register(), lit);
+ codegen()->DoTest(this);
+ }
+}
+
+
+void FullCodeGenerator::EffectContext::DropAndPlug(int count,
+ Register reg) const {
+ ASSERT(count > 0);
+ __ Drop(count);
+}
+
+
+void FullCodeGenerator::AccumulatorValueContext::DropAndPlug(
+ int count,
+ Register reg) const {
+ ASSERT(count > 0);
+ __ Drop(count);
+ __ Move(result_register(), reg);
+}
+
+
+void FullCodeGenerator::StackValueContext::DropAndPlug(int count,
+ Register reg) const {
+ ASSERT(count > 0);
+ if (count > 1) __ Drop(count - 1);
+ __ mov(Operand(esp, 0), reg);
+}
+
+
+void FullCodeGenerator::TestContext::DropAndPlug(int count,
+ Register reg) const {
+ ASSERT(count > 0);
+ // For simplicity we always test the accumulator register.
+ __ Drop(count);
+ __ Move(result_register(), reg);
+ codegen()->PrepareForBailoutBeforeSplit(condition(), false, NULL, NULL);
+ codegen()->DoTest(this);
+}
+
+
+void FullCodeGenerator::EffectContext::Plug(Label* materialize_true,
+ Label* materialize_false) const {
+ ASSERT(materialize_true == materialize_false);
+ __ bind(materialize_true);
+}
+
+
+void FullCodeGenerator::AccumulatorValueContext::Plug(
+ Label* materialize_true,
+ Label* materialize_false) const {
+ Label done;
+ __ bind(materialize_true);
+ __ mov(result_register(), isolate()->factory()->true_value());
+ __ jmp(&done, Label::kNear);
+ __ bind(materialize_false);
+ __ mov(result_register(), isolate()->factory()->false_value());
+ __ bind(&done);
+}
+
+
+void FullCodeGenerator::StackValueContext::Plug(
+ Label* materialize_true,
+ Label* materialize_false) const {
+ Label done;
+ __ bind(materialize_true);
+ __ push(Immediate(isolate()->factory()->true_value()));
+ __ jmp(&done, Label::kNear);
+ __ bind(materialize_false);
+ __ push(Immediate(isolate()->factory()->false_value()));
+ __ bind(&done);
+}
+
+
+void FullCodeGenerator::TestContext::Plug(Label* materialize_true,
+ Label* materialize_false) const {
+ ASSERT(materialize_true == true_label_);
+ ASSERT(materialize_false == false_label_);
+}
+
+
+void FullCodeGenerator::EffectContext::Plug(bool flag) const {
+}
+
+
+void FullCodeGenerator::AccumulatorValueContext::Plug(bool flag) const {
+ Handle<Object> value = flag
+ ? isolate()->factory()->true_value()
+ : isolate()->factory()->false_value();
+ __ mov(result_register(), value);
+}
+
+
+void FullCodeGenerator::StackValueContext::Plug(bool flag) const {
+ Handle<Object> value = flag
+ ? isolate()->factory()->true_value()
+ : isolate()->factory()->false_value();
+ __ push(Immediate(value));
+}
+
+
+void FullCodeGenerator::TestContext::Plug(bool flag) const {
+ codegen()->PrepareForBailoutBeforeSplit(condition(),
+ true,
+ true_label_,
+ false_label_);
+ if (flag) {
+ if (true_label_ != fall_through_) __ jmp(true_label_);
+ } else {
+ if (false_label_ != fall_through_) __ jmp(false_label_);
+ }
+}
+
+
+void FullCodeGenerator::DoTest(Expression* condition,
+ Label* if_true,
+ Label* if_false,
+ Label* fall_through) {
+ Handle<Code> ic = ToBooleanStub::GetUninitialized(isolate());
+ CallIC(ic, condition->test_id());
+ __ test(result_register(), result_register());
+ // The stub returns nonzero for true.
+ Split(not_zero, if_true, if_false, fall_through);
+}
+
+
+void FullCodeGenerator::Split(Condition cc,
+ Label* if_true,
+ Label* if_false,
+ Label* fall_through) {
+ if (if_false == fall_through) {
+ __ j(cc, if_true);
+ } else if (if_true == fall_through) {
+ __ j(NegateCondition(cc), if_false);
+ } else {
+ __ j(cc, if_true);
+ __ jmp(if_false);
+ }
+}
+
+
+MemOperand FullCodeGenerator::StackOperand(Variable* var) {
+ ASSERT(var->IsStackAllocated());
+ // Offset is negative because higher indexes are at lower addresses.
+ int offset = -var->index() * kPointerSize;
+ // Adjust by a (parameter or local) base offset.
+ if (var->IsParameter()) {
+ offset += (info_->scope()->num_parameters() + 1) * kPointerSize;
+ } else {
+ offset += JavaScriptFrameConstants::kLocal0Offset;
+ }
+ return Operand(ebp, offset);
+}
+
+
+MemOperand FullCodeGenerator::VarOperand(Variable* var, Register scratch) {
+ ASSERT(var->IsContextSlot() || var->IsStackAllocated());
+ if (var->IsContextSlot()) {
+ int context_chain_length = scope()->ContextChainLength(var->scope());
+ __ LoadContext(scratch, context_chain_length);
+ return ContextOperand(scratch, var->index());
+ } else {
+ return StackOperand(var);
+ }
+}
+
+
+void FullCodeGenerator::GetVar(Register dest, Variable* var) {
+ ASSERT(var->IsContextSlot() || var->IsStackAllocated());
+ MemOperand location = VarOperand(var, dest);
+ __ mov(dest, location);
+}
+
+
+void FullCodeGenerator::SetVar(Variable* var,
+ Register src,
+ Register scratch0,
+ Register scratch1) {
+ ASSERT(var->IsContextSlot() || var->IsStackAllocated());
+ ASSERT(!scratch0.is(src));
+ ASSERT(!scratch0.is(scratch1));
+ ASSERT(!scratch1.is(src));
+ MemOperand location = VarOperand(var, scratch0);
+ __ mov(location, src);
+
+ // Emit the write barrier code if the location is in the heap.
+ if (var->IsContextSlot()) {
+ int offset = Context::SlotOffset(var->index());
+ ASSERT(!scratch0.is(esi) && !src.is(esi) && !scratch1.is(esi));
+ __ RecordWriteContextSlot(scratch0, offset, src, scratch1);
+ }
+}
+
+
+void FullCodeGenerator::PrepareForBailoutBeforeSplit(Expression* expr,
+ bool should_normalize,
+ Label* if_true,
+ Label* if_false) {
+ // Only prepare for bailouts before splits if we're in a test
+ // context. Otherwise, we let the Visit function deal with the
+ // preparation to avoid preparing with the same AST id twice.
+ if (!context()->IsTest() || !info_->IsOptimizable()) return;
+
+ Label skip;
+ if (should_normalize) __ jmp(&skip, Label::kNear);
+ PrepareForBailout(expr, TOS_REG);
+ if (should_normalize) {
+ __ cmp(eax, isolate()->factory()->true_value());
+ Split(equal, if_true, if_false, NULL);
+ __ bind(&skip);
+ }
+}
+
+
+void FullCodeGenerator::EmitDebugCheckDeclarationContext(Variable* variable) {
+ // The variable in the declaration always resides in the current context.
+ ASSERT_EQ(0, scope()->ContextChainLength(variable->scope()));
+ if (generate_debug_code_) {
+ // Check that we're not inside a with or catch context.
+ __ mov(ebx, FieldOperand(esi, HeapObject::kMapOffset));
+ __ cmp(ebx, isolate()->factory()->with_context_map());
+ __ Check(not_equal, kDeclarationInWithContext);
+ __ cmp(ebx, isolate()->factory()->catch_context_map());
+ __ Check(not_equal, kDeclarationInCatchContext);
+ }
+}
+
+
+void FullCodeGenerator::VisitVariableDeclaration(
+ VariableDeclaration* declaration) {
+ // If it was not possible to allocate the variable at compile time, we
+ // need to "declare" it at runtime to make sure it actually exists in the
+ // local context.
+ VariableProxy* proxy = declaration->proxy();
+ VariableMode mode = declaration->mode();
+ Variable* variable = proxy->var();
+ bool hole_init = mode == LET || mode == CONST || mode == CONST_LEGACY;
+ switch (variable->location()) {
+ case Variable::UNALLOCATED:
+ globals_->Add(variable->name(), zone());
+ globals_->Add(variable->binding_needs_init()
+ ? isolate()->factory()->the_hole_value()
+ : isolate()->factory()->undefined_value(), zone());
+ break;
+
+ case Variable::PARAMETER:
+ case Variable::LOCAL:
+ if (hole_init) {
+ Comment cmnt(masm_, "[ VariableDeclaration");
+ __ mov(StackOperand(variable),
+ Immediate(isolate()->factory()->the_hole_value()));
+ }
+ break;
+
+ case Variable::CONTEXT:
+ if (hole_init) {
+ Comment cmnt(masm_, "[ VariableDeclaration");
+ EmitDebugCheckDeclarationContext(variable);
+ __ mov(ContextOperand(esi, variable->index()),
+ Immediate(isolate()->factory()->the_hole_value()));
+ // No write barrier since the hole value is in old space.
+ PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
+ }
+ break;
+
+ case Variable::LOOKUP: {
+ Comment cmnt(masm_, "[ VariableDeclaration");
+ __ push(esi);
+ __ push(Immediate(variable->name()));
+ // VariableDeclaration nodes are always introduced in one of four modes.
+ ASSERT(IsDeclaredVariableMode(mode));
+ PropertyAttributes attr =
+ IsImmutableVariableMode(mode) ? READ_ONLY : NONE;
+ __ push(Immediate(Smi::FromInt(attr)));
+ // Push initial value, if any.
+ // Note: For variables we must not push an initial value (such as
+ // 'undefined') because we may have a (legal) redeclaration and we
+ // must not destroy the current value.
+ if (hole_init) {
+ __ push(Immediate(isolate()->factory()->the_hole_value()));
+ } else {
+ __ push(Immediate(Smi::FromInt(0))); // Indicates no initial value.
+ }
+ __ CallRuntime(Runtime::kHiddenDeclareContextSlot, 4);
+ break;
+ }
+ }
+}
+
+
+void FullCodeGenerator::VisitFunctionDeclaration(
+ FunctionDeclaration* declaration) {
+ VariableProxy* proxy = declaration->proxy();
+ Variable* variable = proxy->var();
+ switch (variable->location()) {
+ case Variable::UNALLOCATED: {
+ globals_->Add(variable->name(), zone());
+ Handle<SharedFunctionInfo> function =
+ Compiler::BuildFunctionInfo(declaration->fun(), script());
+ // Check for stack-overflow exception.
+ if (function.is_null()) return SetStackOverflow();
+ globals_->Add(function, zone());
+ break;
+ }
+
+ case Variable::PARAMETER:
+ case Variable::LOCAL: {
+ Comment cmnt(masm_, "[ FunctionDeclaration");
+ VisitForAccumulatorValue(declaration->fun());
+ __ mov(StackOperand(variable), result_register());
+ break;
+ }
+
+ case Variable::CONTEXT: {
+ Comment cmnt(masm_, "[ FunctionDeclaration");
+ EmitDebugCheckDeclarationContext(variable);
+ VisitForAccumulatorValue(declaration->fun());
+ __ mov(ContextOperand(esi, variable->index()), result_register());
+ // We know that we have written a function, which is not a smi.
+ __ RecordWriteContextSlot(esi,
+ Context::SlotOffset(variable->index()),
+ result_register(),
+ ecx,
+ EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+ PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
+ break;
+ }
+
+ case Variable::LOOKUP: {
+ Comment cmnt(masm_, "[ FunctionDeclaration");
+ __ push(esi);
+ __ push(Immediate(variable->name()));
+ __ push(Immediate(Smi::FromInt(NONE)));
+ VisitForStackValue(declaration->fun());
+ __ CallRuntime(Runtime::kHiddenDeclareContextSlot, 4);
+ break;
+ }
+ }
+}
+
+
+void FullCodeGenerator::VisitModuleDeclaration(ModuleDeclaration* declaration) {
+ Variable* variable = declaration->proxy()->var();
+ ASSERT(variable->location() == Variable::CONTEXT);
+ ASSERT(variable->interface()->IsFrozen());
+
+ Comment cmnt(masm_, "[ ModuleDeclaration");
+ EmitDebugCheckDeclarationContext(variable);
+
+ // Load instance object.
+ __ LoadContext(eax, scope_->ContextChainLength(scope_->GlobalScope()));
+ __ mov(eax, ContextOperand(eax, variable->interface()->Index()));
+ __ mov(eax, ContextOperand(eax, Context::EXTENSION_INDEX));
+
+ // Assign it.
+ __ mov(ContextOperand(esi, variable->index()), eax);
+ // We know that we have written a module, which is not a smi.
+ __ RecordWriteContextSlot(esi,
+ Context::SlotOffset(variable->index()),
+ eax,
+ ecx,
+ EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+ PrepareForBailoutForId(declaration->proxy()->id(), NO_REGISTERS);
+
+ // Traverse into body.
+ Visit(declaration->module());
+}
+
+
+void FullCodeGenerator::VisitImportDeclaration(ImportDeclaration* declaration) {
+ VariableProxy* proxy = declaration->proxy();
+ Variable* variable = proxy->var();
+ switch (variable->location()) {
+ case Variable::UNALLOCATED:
+ // TODO(rossberg)
+ break;
+
+ case Variable::CONTEXT: {
+ Comment cmnt(masm_, "[ ImportDeclaration");
+ EmitDebugCheckDeclarationContext(variable);
+ // TODO(rossberg)
+ break;
+ }
+
+ case Variable::PARAMETER:
+ case Variable::LOCAL:
+ case Variable::LOOKUP:
+ UNREACHABLE();
+ }
+}
+
+
+void FullCodeGenerator::VisitExportDeclaration(ExportDeclaration* declaration) {
+ // TODO(rossberg)
+}
+
+
+void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
+ // Call the runtime to declare the globals.
+ __ push(esi); // The context is the first argument.
+ __ Push(pairs);
+ __ Push(Smi::FromInt(DeclareGlobalsFlags()));
+ __ CallRuntime(Runtime::kHiddenDeclareGlobals, 3);
+ // Return value is ignored.
+}
+
+
+void FullCodeGenerator::DeclareModules(Handle<FixedArray> descriptions) {
+ // Call the runtime to declare the modules.
+ __ Push(descriptions);
+ __ CallRuntime(Runtime::kHiddenDeclareModules, 1);
+ // Return value is ignored.
+}
+
+
+void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
+ Comment cmnt(masm_, "[ SwitchStatement");
+ Breakable nested_statement(this, stmt);
+ SetStatementPosition(stmt);
+
+ // Keep the switch value on the stack until a case matches.
+ VisitForStackValue(stmt->tag());
+ PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
+
+ ZoneList<CaseClause*>* clauses = stmt->cases();
+ CaseClause* default_clause = NULL; // Can occur anywhere in the list.
+
+ Label next_test; // Recycled for each test.
+ // Compile all the tests with branches to their bodies.
+ for (int i = 0; i < clauses->length(); i++) {
+ CaseClause* clause = clauses->at(i);
+ clause->body_target()->Unuse();
+
+ // The default is not a test, but remember it as final fall through.
+ if (clause->is_default()) {
+ default_clause = clause;
+ continue;
+ }
+
+ Comment cmnt(masm_, "[ Case comparison");
+ __ bind(&next_test);
+ next_test.Unuse();
+
+ // Compile the label expression.
+ VisitForAccumulatorValue(clause->label());
+
+ // Perform the comparison as if via '==='.
+ __ mov(edx, Operand(esp, 0)); // Switch value.
+ bool inline_smi_code = ShouldInlineSmiCase(Token::EQ_STRICT);
+ JumpPatchSite patch_site(masm_);
+ if (inline_smi_code) {
+ Label slow_case;
+ __ mov(ecx, edx);
+ __ or_(ecx, eax);
+ patch_site.EmitJumpIfNotSmi(ecx, &slow_case, Label::kNear);
+
+ __ cmp(edx, eax);
+ __ j(not_equal, &next_test);
+ __ Drop(1); // Switch value is no longer needed.
+ __ jmp(clause->body_target());
+ __ bind(&slow_case);
+ }
+
+ // Record position before stub call for type feedback.
+ SetSourcePosition(clause->position());
+ Handle<Code> ic = CompareIC::GetUninitialized(isolate(), Token::EQ_STRICT);
+ CallIC(ic, clause->CompareId());
+ patch_site.EmitPatchInfo();
+
+ Label skip;
+ __ jmp(&skip, Label::kNear);
+ PrepareForBailout(clause, TOS_REG);
+ __ cmp(eax, isolate()->factory()->true_value());
+ __ j(not_equal, &next_test);
+ __ Drop(1);
+ __ jmp(clause->body_target());
+ __ bind(&skip);
+
+ __ test(eax, eax);
+ __ j(not_equal, &next_test);
+ __ Drop(1); // Switch value is no longer needed.
+ __ jmp(clause->body_target());
+ }
+
+ // Discard the test value and jump to the default if present, otherwise to
+ // the end of the statement.
+ __ bind(&next_test);
+ __ Drop(1); // Switch value is no longer needed.
+ if (default_clause == NULL) {
+ __ jmp(nested_statement.break_label());
+ } else {
+ __ jmp(default_clause->body_target());
+ }
+
+ // Compile all the case bodies.
+ for (int i = 0; i < clauses->length(); i++) {
+ Comment cmnt(masm_, "[ Case body");
+ CaseClause* clause = clauses->at(i);
+ __ bind(clause->body_target());
+ PrepareForBailoutForId(clause->EntryId(), NO_REGISTERS);
+ VisitStatements(clause->statements());
+ }
+
+ __ bind(nested_statement.break_label());
+ PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
+}
+
+
+void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
+ Comment cmnt(masm_, "[ ForInStatement");
+ int slot = stmt->ForInFeedbackSlot();
+
+ SetStatementPosition(stmt);
+
+ Label loop, exit;
+ ForIn loop_statement(this, stmt);
+ increment_loop_depth();
+
+ // Get the object to enumerate over. If the object is null or undefined, skip
+ // over the loop. See ECMA-262 version 5, section 12.6.4.
+ VisitForAccumulatorValue(stmt->enumerable());
+ __ cmp(eax, isolate()->factory()->undefined_value());
+ __ j(equal, &exit);
+ __ cmp(eax, isolate()->factory()->null_value());
+ __ j(equal, &exit);
+
+ PrepareForBailoutForId(stmt->PrepareId(), TOS_REG);
+
+ // Convert the object to a JS object.
+ Label convert, done_convert;
+ __ JumpIfSmi(eax, &convert, Label::kNear);
+ __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ecx);
+ __ j(above_equal, &done_convert, Label::kNear);
+ __ bind(&convert);
+ __ push(eax);
+ __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+ __ bind(&done_convert);
+ __ push(eax);
+
+ // Check for proxies.
+ Label call_runtime, use_cache, fixed_array;
+ STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
+ __ CmpObjectType(eax, LAST_JS_PROXY_TYPE, ecx);
+ __ j(below_equal, &call_runtime);
+
+ // Check cache validity in generated code. This is a fast case for
+ // the JSObject::IsSimpleEnum cache validity checks. If we cannot
+ // guarantee cache validity, call the runtime system to check cache
+ // validity or get the property names in a fixed array.
+ __ CheckEnumCache(&call_runtime);
+
+ __ mov(eax, FieldOperand(eax, HeapObject::kMapOffset));
+ __ jmp(&use_cache, Label::kNear);
+
+ // Get the set of properties to enumerate.
+ __ bind(&call_runtime);
+ __ push(eax);
+ __ CallRuntime(Runtime::kGetPropertyNamesFast, 1);
+ __ cmp(FieldOperand(eax, HeapObject::kMapOffset),
+ isolate()->factory()->meta_map());
+ __ j(not_equal, &fixed_array);
+
+
+ // We got a map in register eax. Get the enumeration cache from it.
+ Label no_descriptors;
+ __ bind(&use_cache);
+
+ __ EnumLength(edx, eax);
+ __ cmp(edx, Immediate(Smi::FromInt(0)));
+ __ j(equal, &no_descriptors);
+
+ __ LoadInstanceDescriptors(eax, ecx);
+ __ mov(ecx, FieldOperand(ecx, DescriptorArray::kEnumCacheOffset));
+ __ mov(ecx, FieldOperand(ecx, DescriptorArray::kEnumCacheBridgeCacheOffset));
+
+ // Set up the four remaining stack slots.
+ __ push(eax); // Map.
+ __ push(ecx); // Enumeration cache.
+ __ push(edx); // Number of valid entries for the map in the enum cache.
+ __ push(Immediate(Smi::FromInt(0))); // Initial index.
+ __ jmp(&loop);
+
+ __ bind(&no_descriptors);
+ __ add(esp, Immediate(kPointerSize));
+ __ jmp(&exit);
+
+ // We got a fixed array in register eax. Iterate through that.
+ Label non_proxy;
+ __ bind(&fixed_array);
+
+ // No need for a write barrier, we are storing a Smi in the feedback vector.
+ __ LoadHeapObject(ebx, FeedbackVector());
+ __ mov(FieldOperand(ebx, FixedArray::OffsetOfElementAt(slot)),
+ Immediate(TypeFeedbackInfo::MegamorphicSentinel(isolate())));
+
+ __ mov(ebx, Immediate(Smi::FromInt(1))); // Smi indicates slow check
+ __ mov(ecx, Operand(esp, 0 * kPointerSize)); // Get enumerated object
+ STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
+ __ CmpObjectType(ecx, LAST_JS_PROXY_TYPE, ecx);
+ __ j(above, &non_proxy);
+ __ Move(ebx, Immediate(Smi::FromInt(0))); // Zero indicates proxy
+ __ bind(&non_proxy);
+ __ push(ebx); // Smi
+ __ push(eax); // Array
+ __ mov(eax, FieldOperand(eax, FixedArray::kLengthOffset));
+ __ push(eax); // Fixed array length (as smi).
+ __ push(Immediate(Smi::FromInt(0))); // Initial index.
+
+ // Generate code for doing the condition check.
+ PrepareForBailoutForId(stmt->BodyId(), NO_REGISTERS);
+ __ bind(&loop);
+ __ mov(eax, Operand(esp, 0 * kPointerSize)); // Get the current index.
+ __ cmp(eax, Operand(esp, 1 * kPointerSize)); // Compare to the array length.
+ __ j(above_equal, loop_statement.break_label());
+
+ // Get the current entry of the array into register ebx.
+ __ mov(ebx, Operand(esp, 2 * kPointerSize));
+ __ mov(ebx, FieldOperand(ebx, eax, times_2, FixedArray::kHeaderSize));
+
+ // Get the expected map from the stack or a smi in the
+ // permanent slow case into register edx.
+ __ mov(edx, Operand(esp, 3 * kPointerSize));
+
+ // Check if the expected map still matches that of the enumerable.
+ // If not, we may have to filter the key.
+ Label update_each;
+ __ mov(ecx, Operand(esp, 4 * kPointerSize));
+ __ cmp(edx, FieldOperand(ecx, HeapObject::kMapOffset));
+ __ j(equal, &update_each, Label::kNear);
+
+ // For proxies, no filtering is done.
+ // TODO(rossberg): What if only a prototype is a proxy? Not specified yet.
+ ASSERT(Smi::FromInt(0) == 0);
+ __ test(edx, edx);
+ __ j(zero, &update_each);
+
+ // Convert the entry to a string or null if it isn't a property
+ // anymore. If the property has been removed while iterating, we
+ // just skip it.
+ __ push(ecx); // Enumerable.
+ __ push(ebx); // Current entry.
+ __ InvokeBuiltin(Builtins::FILTER_KEY, CALL_FUNCTION);
+ __ test(eax, eax);
+ __ j(equal, loop_statement.continue_label());
+ __ mov(ebx, eax);
+
+ // Update the 'each' property or variable from the possibly filtered
+ // entry in register ebx.
+ __ bind(&update_each);
+ __ mov(result_register(), ebx);
+ // Perform the assignment as if via '='.
+ { EffectContext context(this);
+ EmitAssignment(stmt->each());
+ }
+
+ // Generate code for the body of the loop.
+ Visit(stmt->body());
+
+ // Generate code for going to the next element by incrementing the
+ // index (smi) stored on top of the stack.
+ __ bind(loop_statement.continue_label());
+ __ add(Operand(esp, 0 * kPointerSize), Immediate(Smi::FromInt(1)));
+
+ EmitBackEdgeBookkeeping(stmt, &loop);
+ __ jmp(&loop);
+
+ // Remove the pointers stored on the stack.
+ __ bind(loop_statement.break_label());
+ __ add(esp, Immediate(5 * kPointerSize));
+
+ // Exit and decrement the loop depth.
+ PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
+ __ bind(&exit);
+ decrement_loop_depth();
+}
+
+
+void FullCodeGenerator::VisitForOfStatement(ForOfStatement* stmt) {
+ Comment cmnt(masm_, "[ ForOfStatement");
+ SetStatementPosition(stmt);
+
+ Iteration loop_statement(this, stmt);
+ increment_loop_depth();
+
+ // var iterator = iterable[@@iterator]()
+ VisitForAccumulatorValue(stmt->assign_iterator());
+
+ // As with for-in, skip the loop if the iterator is null or undefined.
+ __ CompareRoot(eax, Heap::kUndefinedValueRootIndex);
+ __ j(equal, loop_statement.break_label());
+ __ CompareRoot(eax, Heap::kNullValueRootIndex);
+ __ j(equal, loop_statement.break_label());
+
+ // Convert the iterator to a JS object.
+ Label convert, done_convert;
+ __ JumpIfSmi(eax, &convert);
+ __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ecx);
+ __ j(above_equal, &done_convert);
+ __ bind(&convert);
+ __ push(eax);
+ __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+ __ bind(&done_convert);
+
+ // Loop entry.
+ __ bind(loop_statement.continue_label());
+
+ // result = iterator.next()
+ VisitForEffect(stmt->next_result());
+
+ // if (result.done) break;
+ Label result_not_done;
+ VisitForControl(stmt->result_done(),
+ loop_statement.break_label(),
+ &result_not_done,
+ &result_not_done);
+ __ bind(&result_not_done);
+
+ // each = result.value
+ VisitForEffect(stmt->assign_each());
+
+ // Generate code for the body of the loop.
+ Visit(stmt->body());
+
+ // Check stack before looping.
+ PrepareForBailoutForId(stmt->BackEdgeId(), NO_REGISTERS);
+ EmitBackEdgeBookkeeping(stmt, loop_statement.continue_label());
+ __ jmp(loop_statement.continue_label());
+
+ // Exit and decrement the loop depth.
+ PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
+ __ bind(loop_statement.break_label());
+ decrement_loop_depth();
+}
+
+
+void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
+ bool pretenure) {
+ // Use the fast case closure allocation code that allocates in new
+ // space for nested functions that don't need literals cloning. If
+ // we're running with the --always-opt or the --prepare-always-opt
+ // flag, we need to use the runtime function so that the new function
+ // we are creating here gets a chance to have its code optimized and
+ // doesn't just get a copy of the existing unoptimized code.
+ if (!FLAG_always_opt &&
+ !FLAG_prepare_always_opt &&
+ !pretenure &&
+ scope()->is_function_scope() &&
+ info->num_literals() == 0) {
+ FastNewClosureStub stub(isolate(),
+ info->strict_mode(),
+ info->is_generator());
+ __ mov(ebx, Immediate(info));
+ __ CallStub(&stub);
+ } else {
+ __ push(esi);
+ __ push(Immediate(info));
+ __ push(Immediate(pretenure
+ ? isolate()->factory()->true_value()
+ : isolate()->factory()->false_value()));
+ __ CallRuntime(Runtime::kHiddenNewClosure, 3);
+ }
+ context()->Plug(eax);
+}
+
+
+void FullCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
+ Comment cmnt(masm_, "[ VariableProxy");
+ EmitVariableLoad(expr);
+}
+
+
+void FullCodeGenerator::EmitLoadGlobalCheckExtensions(Variable* var,
+ TypeofState typeof_state,
+ Label* slow) {
+ Register context = esi;
+ Register temp = edx;
+
+ Scope* s = scope();
+ while (s != NULL) {
+ if (s->num_heap_slots() > 0) {
+ if (s->calls_sloppy_eval()) {
+ // Check that extension is NULL.
+ __ cmp(ContextOperand(context, Context::EXTENSION_INDEX),
+ Immediate(0));
+ __ j(not_equal, slow);
+ }
+ // Load next context in chain.
+ __ mov(temp, ContextOperand(context, Context::PREVIOUS_INDEX));
+ // Walk the rest of the chain without clobbering esi.
+ context = temp;
+ }
+ // If no outer scope calls eval, we do not need to check more
+ // context extensions. If we have reached an eval scope, we check
+ // all extensions from this point.
+ if (!s->outer_scope_calls_sloppy_eval() || s->is_eval_scope()) break;
+ s = s->outer_scope();
+ }
+
+ if (s != NULL && s->is_eval_scope()) {
+ // Loop up the context chain. There is no frame effect so it is
+ // safe to use raw labels here.
+ Label next, fast;
+ if (!context.is(temp)) {
+ __ mov(temp, context);
+ }
+ __ bind(&next);
+ // Terminate at native context.
+ __ cmp(FieldOperand(temp, HeapObject::kMapOffset),
+ Immediate(isolate()->factory()->native_context_map()));
+ __ j(equal, &fast, Label::kNear);
+ // Check that extension is NULL.
+ __ cmp(ContextOperand(temp, Context::EXTENSION_INDEX), Immediate(0));
+ __ j(not_equal, slow);
+ // Load next context in chain.
+ __ mov(temp, ContextOperand(temp, Context::PREVIOUS_INDEX));
+ __ jmp(&next);
+ __ bind(&fast);
+ }
+
+ // All extension objects were empty and it is safe to use a global
+ // load IC call.
+ __ mov(edx, GlobalObjectOperand());
+ __ mov(ecx, var->name());
+ ContextualMode mode = (typeof_state == INSIDE_TYPEOF)
+ ? NOT_CONTEXTUAL
+ : CONTEXTUAL;
+
+ CallLoadIC(mode);
+}
+
+
+MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(Variable* var,
+ Label* slow) {
+ ASSERT(var->IsContextSlot());
+ Register context = esi;
+ Register temp = ebx;
+
+ for (Scope* s = scope(); s != var->scope(); s = s->outer_scope()) {
+ if (s->num_heap_slots() > 0) {
+ if (s->calls_sloppy_eval()) {
+ // Check that extension is NULL.
+ __ cmp(ContextOperand(context, Context::EXTENSION_INDEX),
+ Immediate(0));
+ __ j(not_equal, slow);
+ }
+ __ mov(temp, ContextOperand(context, Context::PREVIOUS_INDEX));
+ // Walk the rest of the chain without clobbering esi.
+ context = temp;
+ }
+ }
+ // Check that last extension is NULL.
+ __ cmp(ContextOperand(context, Context::EXTENSION_INDEX), Immediate(0));
+ __ j(not_equal, slow);
+
+ // This function is used only for loads, not stores, so it's safe to
+ // return an esi-based operand (the write barrier cannot be allowed to
+ // destroy the esi register).
+ return ContextOperand(context, var->index());
+}
+
+
+void FullCodeGenerator::EmitDynamicLookupFastCase(Variable* var,
+ TypeofState typeof_state,
+ Label* slow,
+ Label* done) {
+ // Generate fast-case code for variables that might be shadowed by
+ // eval-introduced variables. Eval is used a lot without
+ // introducing variables. In those cases, we do not want to
+ // perform a runtime call for all variables in the scope
+ // containing the eval.
+ if (var->mode() == DYNAMIC_GLOBAL) {
+ EmitLoadGlobalCheckExtensions(var, typeof_state, slow);
+ __ jmp(done);
+ } else if (var->mode() == DYNAMIC_LOCAL) {
+ Variable* local = var->local_if_not_shadowed();
+ __ mov(eax, ContextSlotOperandCheckExtensions(local, slow));
+ if (local->mode() == LET || local->mode() == CONST ||
+ local->mode() == CONST_LEGACY) {
+ __ cmp(eax, isolate()->factory()->the_hole_value());
+ __ j(not_equal, done);
+ if (local->mode() == CONST_LEGACY) {
+ __ mov(eax, isolate()->factory()->undefined_value());
+ } else { // LET || CONST
+ __ push(Immediate(var->name()));
+ __ CallRuntime(Runtime::kHiddenThrowReferenceError, 1);
+ }
+ }
+ __ jmp(done);
+ }
+}
+
+
+void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
+ // Record position before possible IC call.
+ SetSourcePosition(proxy->position());
+ Variable* var = proxy->var();
+
+ // Three cases: global variables, lookup variables, and all other types of
+ // variables.
+ switch (var->location()) {
+ case Variable::UNALLOCATED: {
+ Comment cmnt(masm_, "[ Global variable");
+ // Use inline caching. Variable name is passed in ecx and the global
+ // object in eax.
+ __ mov(edx, GlobalObjectOperand());
+ __ mov(ecx, var->name());
+ CallLoadIC(CONTEXTUAL);
+ context()->Plug(eax);
+ break;
+ }
+
+ case Variable::PARAMETER:
+ case Variable::LOCAL:
+ case Variable::CONTEXT: {
+ Comment cmnt(masm_, var->IsContextSlot() ? "[ Context variable"
+ : "[ Stack variable");
+ if (var->binding_needs_init()) {
+ // var->scope() may be NULL when the proxy is located in eval code and
+ // refers to a potential outside binding. Currently those bindings are
+ // always looked up dynamically, i.e. in that case
+ // var->location() == LOOKUP.
+ // always holds.
+ ASSERT(var->scope() != NULL);
+
+ // Check if the binding really needs an initialization check. The check
+ // can be skipped in the following situation: we have a LET or CONST
+ // binding in harmony mode, both the Variable and the VariableProxy have
+ // the same declaration scope (i.e. they are both in global code, in the
+ // same function or in the same eval code) and the VariableProxy is in
+ // the source physically located after the initializer of the variable.
+ //
+ // We cannot skip any initialization checks for CONST in non-harmony
+ // mode because const variables may be declared but never initialized:
+ // if (false) { const x; }; var y = x;
+ //
+ // The condition on the declaration scopes is a conservative check for
+ // nested functions that access a binding and are called before the
+ // binding is initialized:
+ // function() { f(); let x = 1; function f() { x = 2; } }
+ //
+ bool skip_init_check;
+ if (var->scope()->DeclarationScope() != scope()->DeclarationScope()) {
+ skip_init_check = false;
+ } else {
+ // Check that we always have valid source position.
+ ASSERT(var->initializer_position() != RelocInfo::kNoPosition);
+ ASSERT(proxy->position() != RelocInfo::kNoPosition);
+ skip_init_check = var->mode() != CONST_LEGACY &&
+ var->initializer_position() < proxy->position();
+ }
+
+ if (!skip_init_check) {
+ // Let and const need a read barrier.
+ Label done;
+ GetVar(eax, var);
+ __ cmp(eax, isolate()->factory()->the_hole_value());
+ __ j(not_equal, &done, Label::kNear);
+ if (var->mode() == LET || var->mode() == CONST) {
+ // Throw a reference error when using an uninitialized let/const
+ // binding in harmony mode.
+ __ push(Immediate(var->name()));
+ __ CallRuntime(Runtime::kHiddenThrowReferenceError, 1);
+ } else {
+ // Uninitalized const bindings outside of harmony mode are unholed.
+ ASSERT(var->mode() == CONST_LEGACY);
+ __ mov(eax, isolate()->factory()->undefined_value());
+ }
+ __ bind(&done);
+ context()->Plug(eax);
+ break;
+ }
+ }
+ context()->Plug(var);
+ break;
+ }
+
+ case Variable::LOOKUP: {
+ Comment cmnt(masm_, "[ Lookup variable");
+ Label done, slow;
+ // Generate code for loading from variables potentially shadowed
+ // by eval-introduced variables.
+ EmitDynamicLookupFastCase(var, NOT_INSIDE_TYPEOF, &slow, &done);
+ __ bind(&slow);
+ __ push(esi); // Context.
+ __ push(Immediate(var->name()));
+ __ CallRuntime(Runtime::kHiddenLoadContextSlot, 2);
+ __ bind(&done);
+ context()->Plug(eax);
+ break;
+ }
+ }
+}
+
+
+void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
+ Comment cmnt(masm_, "[ RegExpLiteral");
+ Label materialized;
+ // Registers will be used as follows:
+ // edi = JS function.
+ // ecx = literals array.
+ // ebx = regexp literal.
+ // eax = regexp literal clone.
+ __ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
+ __ mov(ecx, FieldOperand(edi, JSFunction::kLiteralsOffset));
+ int literal_offset =
+ FixedArray::kHeaderSize + expr->literal_index() * kPointerSize;
+ __ mov(ebx, FieldOperand(ecx, literal_offset));
+ __ cmp(ebx, isolate()->factory()->undefined_value());
+ __ j(not_equal, &materialized, Label::kNear);
+
+ // Create regexp literal using runtime function
+ // Result will be in eax.
+ __ push(ecx);
+ __ push(Immediate(Smi::FromInt(expr->literal_index())));
+ __ push(Immediate(expr->pattern()));
+ __ push(Immediate(expr->flags()));
+ __ CallRuntime(Runtime::kHiddenMaterializeRegExpLiteral, 4);
+ __ mov(ebx, eax);
+
+ __ bind(&materialized);
+ int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
+ Label allocated, runtime_allocate;
+ __ Allocate(size, eax, ecx, edx, &runtime_allocate, TAG_OBJECT);
+ __ jmp(&allocated);
+
+ __ bind(&runtime_allocate);
+ __ push(ebx);
+ __ push(Immediate(Smi::FromInt(size)));
+ __ CallRuntime(Runtime::kHiddenAllocateInNewSpace, 1);
+ __ pop(ebx);
+
+ __ bind(&allocated);
+ // Copy the content into the newly allocated memory.
+ // (Unroll copy loop once for better throughput).
+ for (int i = 0; i < size - kPointerSize; i += 2 * kPointerSize) {
+ __ mov(edx, FieldOperand(ebx, i));
+ __ mov(ecx, FieldOperand(ebx, i + kPointerSize));
+ __ mov(FieldOperand(eax, i), edx);
+ __ mov(FieldOperand(eax, i + kPointerSize), ecx);
+ }
+ if ((size % (2 * kPointerSize)) != 0) {
+ __ mov(edx, FieldOperand(ebx, size - kPointerSize));
+ __ mov(FieldOperand(eax, size - kPointerSize), edx);
+ }
+ context()->Plug(eax);
+}
+
+
+void FullCodeGenerator::EmitAccessor(Expression* expression) {
+ if (expression == NULL) {
+ __ push(Immediate(isolate()->factory()->null_value()));
+ } else {
+ VisitForStackValue(expression);
+ }
+}
+
+
+void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
+ Comment cmnt(masm_, "[ ObjectLiteral");
+
+ expr->BuildConstantProperties(isolate());
+ Handle<FixedArray> constant_properties = expr->constant_properties();
+ int flags = expr->fast_elements()
+ ? ObjectLiteral::kFastElements
+ : ObjectLiteral::kNoFlags;
+ flags |= expr->has_function()
+ ? ObjectLiteral::kHasFunction
+ : ObjectLiteral::kNoFlags;
+ int properties_count = constant_properties->length() / 2;
+ if (expr->may_store_doubles() || expr->depth() > 1 ||
+ masm()->serializer_enabled() ||
+ flags != ObjectLiteral::kFastElements ||
+ properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) {
+ __ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
+ __ push(FieldOperand(edi, JSFunction::kLiteralsOffset));
+ __ push(Immediate(Smi::FromInt(expr->literal_index())));
+ __ push(Immediate(constant_properties));
+ __ push(Immediate(Smi::FromInt(flags)));
+ __ CallRuntime(Runtime::kHiddenCreateObjectLiteral, 4);
+ } else {
+ __ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
+ __ mov(eax, FieldOperand(edi, JSFunction::kLiteralsOffset));
+ __ mov(ebx, Immediate(Smi::FromInt(expr->literal_index())));
+ __ mov(ecx, Immediate(constant_properties));
+ __ mov(edx, Immediate(Smi::FromInt(flags)));
+ FastCloneShallowObjectStub stub(isolate(), properties_count);
+ __ CallStub(&stub);
+ }
+
+ // If result_saved is true the result is on top of the stack. If
+ // result_saved is false the result is in eax.
+ bool result_saved = false;
+
+ // Mark all computed expressions that are bound to a key that
+ // is shadowed by a later occurrence of the same key. For the
+ // marked expressions, no store code is emitted.
+ expr->CalculateEmitStore(zone());
+
+ AccessorTable accessor_table(zone());
+ for (int i = 0; i < expr->properties()->length(); i++) {
+ ObjectLiteral::Property* property = expr->properties()->at(i);
+ if (property->IsCompileTimeValue()) continue;
+
+ Literal* key = property->key();
+ Expression* value = property->value();
+ if (!result_saved) {
+ __ push(eax); // Save result on the stack
+ result_saved = true;
+ }
+ switch (property->kind()) {
+ case ObjectLiteral::Property::CONSTANT:
+ UNREACHABLE();
+ case ObjectLiteral::Property::MATERIALIZED_LITERAL:
+ ASSERT(!CompileTimeValue::IsCompileTimeValue(value));
+ // Fall through.
+ case ObjectLiteral::Property::COMPUTED:
+ if (key->value()->IsInternalizedString()) {
+ if (property->emit_store()) {
+ VisitForAccumulatorValue(value);
+ __ mov(ecx, Immediate(key->value()));
+ __ mov(edx, Operand(esp, 0));
+ CallStoreIC(key->LiteralFeedbackId());
+ PrepareForBailoutForId(key->id(), NO_REGISTERS);
+ } else {
+ VisitForEffect(value);
+ }
+ break;
+ }
+ __ push(Operand(esp, 0)); // Duplicate receiver.
+ VisitForStackValue(key);
+ VisitForStackValue(value);
+ if (property->emit_store()) {
+ __ push(Immediate(Smi::FromInt(NONE))); // PropertyAttributes
+ __ CallRuntime(Runtime::kSetProperty, 4);
+ } else {
+ __ Drop(3);
+ }
+ break;
+ case ObjectLiteral::Property::PROTOTYPE:
+ __ push(Operand(esp, 0)); // Duplicate receiver.
+ VisitForStackValue(value);
+ if (property->emit_store()) {
+ __ CallRuntime(Runtime::kSetPrototype, 2);
+ } else {
+ __ Drop(2);
+ }
+ break;
+ case ObjectLiteral::Property::GETTER:
+ accessor_table.lookup(key)->second->getter = value;
+ break;
+ case ObjectLiteral::Property::SETTER:
+ accessor_table.lookup(key)->second->setter = value;
+ break;
+ }
+ }
+
+ // Emit code to define accessors, using only a single call to the runtime for
+ // each pair of corresponding getters and setters.
+ for (AccessorTable::Iterator it = accessor_table.begin();
+ it != accessor_table.end();
+ ++it) {
+ __ push(Operand(esp, 0)); // Duplicate receiver.
+ VisitForStackValue(it->first);
+ EmitAccessor(it->second->getter);
+ EmitAccessor(it->second->setter);
+ __ push(Immediate(Smi::FromInt(NONE)));
+ __ CallRuntime(Runtime::kDefineOrRedefineAccessorProperty, 5);
+ }
+
+ if (expr->has_function()) {
+ ASSERT(result_saved);
+ __ push(Operand(esp, 0));
+ __ CallRuntime(Runtime::kToFastProperties, 1);
+ }
+
+ if (result_saved) {
+ context()->PlugTOS();
+ } else {
+ context()->Plug(eax);
+ }
+}
+
+
+void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
+ Comment cmnt(masm_, "[ ArrayLiteral");
+
+ expr->BuildConstantElements(isolate());
+ int flags = expr->depth() == 1
+ ? ArrayLiteral::kShallowElements
+ : ArrayLiteral::kNoFlags;
+
+ ZoneList<Expression*>* subexprs = expr->values();
+ int length = subexprs->length();
+ Handle<FixedArray> constant_elements = expr->constant_elements();
+ ASSERT_EQ(2, constant_elements->length());
+ ElementsKind constant_elements_kind =
+ static_cast<ElementsKind>(Smi::cast(constant_elements->get(0))->value());
+ bool has_constant_fast_elements =
+ IsFastObjectElementsKind(constant_elements_kind);
+ Handle<FixedArrayBase> constant_elements_values(
+ FixedArrayBase::cast(constant_elements->get(1)));
+
+ AllocationSiteMode allocation_site_mode = TRACK_ALLOCATION_SITE;
+ if (has_constant_fast_elements && !FLAG_allocation_site_pretenuring) {
+ // If the only customer of allocation sites is transitioning, then
+ // we can turn it off if we don't have anywhere else to transition to.
+ allocation_site_mode = DONT_TRACK_ALLOCATION_SITE;
+ }
+
+ if (expr->depth() > 1) {
+ __ mov(ebx, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
+ __ push(FieldOperand(ebx, JSFunction::kLiteralsOffset));
+ __ push(Immediate(Smi::FromInt(expr->literal_index())));
+ __ push(Immediate(constant_elements));
+ __ push(Immediate(Smi::FromInt(flags)));
+ __ CallRuntime(Runtime::kHiddenCreateArrayLiteral, 4);
+ } else {
+ __ mov(ebx, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
+ __ mov(eax, FieldOperand(ebx, JSFunction::kLiteralsOffset));
+ __ mov(ebx, Immediate(Smi::FromInt(expr->literal_index())));
+ __ mov(ecx, Immediate(constant_elements));
+ FastCloneShallowArrayStub stub(isolate(), allocation_site_mode);
+ __ CallStub(&stub);
+ }
+
+ bool result_saved = false; // Is the result saved to the stack?
+
+ // Emit code to evaluate all the non-constant subexpressions and to store
+ // them into the newly cloned array.
+ for (int i = 0; i < length; i++) {
+ Expression* subexpr = subexprs->at(i);
+ // If the subexpression is a literal or a simple materialized literal it
+ // is already set in the cloned array.
+ if (CompileTimeValue::IsCompileTimeValue(subexpr)) continue;
+
+ if (!result_saved) {
+ __ push(eax); // array literal.
+ __ push(Immediate(Smi::FromInt(expr->literal_index())));
+ result_saved = true;
+ }
+ VisitForAccumulatorValue(subexpr);
+
+ if (IsFastObjectElementsKind(constant_elements_kind)) {
+ // Fast-case array literal with ElementsKind of FAST_*_ELEMENTS, they
+ // cannot transition and don't need to call the runtime stub.
+ int offset = FixedArray::kHeaderSize + (i * kPointerSize);
+ __ mov(ebx, Operand(esp, kPointerSize)); // Copy of array literal.
+ __ mov(ebx, FieldOperand(ebx, JSObject::kElementsOffset));
+ // Store the subexpression value in the array's elements.
+ __ mov(FieldOperand(ebx, offset), result_register());
+ // Update the write barrier for the array store.
+ __ RecordWriteField(ebx, offset, result_register(), ecx,
+ EMIT_REMEMBERED_SET,
+ INLINE_SMI_CHECK);
+ } else {
+ // Store the subexpression value in the array's elements.
+ __ mov(ecx, Immediate(Smi::FromInt(i)));
+ StoreArrayLiteralElementStub stub(isolate());
+ __ CallStub(&stub);
+ }
+
+ PrepareForBailoutForId(expr->GetIdForElement(i), NO_REGISTERS);
+ }
+
+ if (result_saved) {
+ __ add(esp, Immediate(kPointerSize)); // literal index
+ context()->PlugTOS();
+ } else {
+ context()->Plug(eax);
+ }
+}
+
+
+void FullCodeGenerator::VisitAssignment(Assignment* expr) {
+ ASSERT(expr->target()->IsValidReferenceExpression());
+
+ Comment cmnt(masm_, "[ Assignment");
+
+ // Left-hand side can only be a property, a global or a (parameter or local)
+ // slot.
+ enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
+ LhsKind assign_type = VARIABLE;
+ Property* property = expr->target()->AsProperty();
+ if (property != NULL) {
+ assign_type = (property->key()->IsPropertyName())
+ ? NAMED_PROPERTY
+ : KEYED_PROPERTY;
+ }
+
+ // Evaluate LHS expression.
+ switch (assign_type) {
+ case VARIABLE:
+ // Nothing to do here.
+ break;
+ case NAMED_PROPERTY:
+ if (expr->is_compound()) {
+ // We need the receiver both on the stack and in edx.
+ VisitForStackValue(property->obj());
+ __ mov(edx, Operand(esp, 0));
+ } else {
+ VisitForStackValue(property->obj());
+ }
+ break;
+ case KEYED_PROPERTY: {
+ if (expr->is_compound()) {
+ VisitForStackValue(property->obj());
+ VisitForStackValue(property->key());
+ __ mov(edx, Operand(esp, kPointerSize)); // Object.
+ __ mov(ecx, Operand(esp, 0)); // Key.
+ } else {
+ VisitForStackValue(property->obj());
+ VisitForStackValue(property->key());
+ }
+ break;
+ }
+ }
+
+ // For compound assignments we need another deoptimization point after the
+ // variable/property load.
+ if (expr->is_compound()) {
+ AccumulatorValueContext result_context(this);
+ { AccumulatorValueContext left_operand_context(this);
+ switch (assign_type) {
+ case VARIABLE:
+ EmitVariableLoad(expr->target()->AsVariableProxy());
+ PrepareForBailout(expr->target(), TOS_REG);
+ break;
+ case NAMED_PROPERTY:
+ EmitNamedPropertyLoad(property);
+ PrepareForBailoutForId(property->LoadId(), TOS_REG);
+ break;
+ case KEYED_PROPERTY:
+ EmitKeyedPropertyLoad(property);
+ PrepareForBailoutForId(property->LoadId(), TOS_REG);
+ break;
+ }
+ }
+
+ Token::Value op = expr->binary_op();
+ __ push(eax); // Left operand goes on the stack.
+ VisitForAccumulatorValue(expr->value());
+
+ OverwriteMode mode = expr->value()->ResultOverwriteAllowed()
+ ? OVERWRITE_RIGHT
+ : NO_OVERWRITE;
+ SetSourcePosition(expr->position() + 1);
+ if (ShouldInlineSmiCase(op)) {
+ EmitInlineSmiBinaryOp(expr->binary_operation(),
+ op,
+ mode,
+ expr->target(),
+ expr->value());
+ } else {
+ EmitBinaryOp(expr->binary_operation(), op, mode);
+ }
+
+ // Deoptimization point in case the binary operation may have side effects.
+ PrepareForBailout(expr->binary_operation(), TOS_REG);
+ } else {
+ VisitForAccumulatorValue(expr->value());
+ }
+
+ // Record source position before possible IC call.
+ SetSourcePosition(expr->position());
+
+ // Store the value.
+ switch (assign_type) {
+ case VARIABLE:
+ EmitVariableAssignment(expr->target()->AsVariableProxy()->var(),
+ expr->op());
+ PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+ context()->Plug(eax);
+ break;
+ case NAMED_PROPERTY:
+ EmitNamedPropertyAssignment(expr);
+ break;
+ case KEYED_PROPERTY:
+ EmitKeyedPropertyAssignment(expr);
+ break;
+ }
+}
+
+
+void FullCodeGenerator::VisitYield(Yield* expr) {
+ Comment cmnt(masm_, "[ Yield");
+ // Evaluate yielded value first; the initial iterator definition depends on
+ // this. It stays on the stack while we update the iterator.
+ VisitForStackValue(expr->expression());
+
+ switch (expr->yield_kind()) {
+ case Yield::SUSPEND:
+ // Pop value from top-of-stack slot; box result into result register.
+ EmitCreateIteratorResult(false);
+ __ push(result_register());
+ // Fall through.
+ case Yield::INITIAL: {
+ Label suspend, continuation, post_runtime, resume;
+
+ __ jmp(&suspend);
+
+ __ bind(&continuation);
+ __ jmp(&resume);
+
+ __ bind(&suspend);
+ VisitForAccumulatorValue(expr->generator_object());
+ ASSERT(continuation.pos() > 0 && Smi::IsValid(continuation.pos()));
+ __ mov(FieldOperand(eax, JSGeneratorObject::kContinuationOffset),
+ Immediate(Smi::FromInt(continuation.pos())));
+ __ mov(FieldOperand(eax, JSGeneratorObject::kContextOffset), esi);
+ __ mov(ecx, esi);
+ __ RecordWriteField(eax, JSGeneratorObject::kContextOffset, ecx, edx);
+ __ lea(ebx, Operand(ebp, StandardFrameConstants::kExpressionsOffset));
+ __ cmp(esp, ebx);
+ __ j(equal, &post_runtime);
+ __ push(eax); // generator object
+ __ CallRuntime(Runtime::kHiddenSuspendJSGeneratorObject, 1);
+ __ mov(context_register(),
+ Operand(ebp, StandardFrameConstants::kContextOffset));
+ __ bind(&post_runtime);
+ __ pop(result_register());
+ EmitReturnSequence();
+
+ __ bind(&resume);
+ context()->Plug(result_register());
+ break;
+ }
+
+ case Yield::FINAL: {
+ VisitForAccumulatorValue(expr->generator_object());
+ __ mov(FieldOperand(result_register(),
+ JSGeneratorObject::kContinuationOffset),
+ Immediate(Smi::FromInt(JSGeneratorObject::kGeneratorClosed)));
+ // Pop value from top-of-stack slot, box result into result register.
+ EmitCreateIteratorResult(true);
+ EmitUnwindBeforeReturn();
+ EmitReturnSequence();
+ break;
+ }
+
+ case Yield::DELEGATING: {
+ VisitForStackValue(expr->generator_object());
+
+ // Initial stack layout is as follows:
+ // [sp + 1 * kPointerSize] iter
+ // [sp + 0 * kPointerSize] g
+
+ Label l_catch, l_try, l_suspend, l_continuation, l_resume;
+ Label l_next, l_call, l_loop;
+ // Initial send value is undefined.
+ __ mov(eax, isolate()->factory()->undefined_value());
+ __ jmp(&l_next);
+
+ // catch (e) { receiver = iter; f = 'throw'; arg = e; goto l_call; }
+ __ bind(&l_catch);
+ handler_table()->set(expr->index(), Smi::FromInt(l_catch.pos()));
+ __ mov(ecx, isolate()->factory()->throw_string()); // "throw"
+ __ push(ecx); // "throw"
+ __ push(Operand(esp, 2 * kPointerSize)); // iter
+ __ push(eax); // exception
+ __ jmp(&l_call);
+
+ // try { received = %yield result }
+ // Shuffle the received result above a try handler and yield it without
+ // re-boxing.
+ __ bind(&l_try);
+ __ pop(eax); // result
+ __ PushTryHandler(StackHandler::CATCH, expr->index());
+ const int handler_size = StackHandlerConstants::kSize;
+ __ push(eax); // result
+ __ jmp(&l_suspend);
+ __ bind(&l_continuation);
+ __ jmp(&l_resume);
+ __ bind(&l_suspend);
+ const int generator_object_depth = kPointerSize + handler_size;
+ __ mov(eax, Operand(esp, generator_object_depth));
+ __ push(eax); // g
+ ASSERT(l_continuation.pos() > 0 && Smi::IsValid(l_continuation.pos()));
+ __ mov(FieldOperand(eax, JSGeneratorObject::kContinuationOffset),
+ Immediate(Smi::FromInt(l_continuation.pos())));
+ __ mov(FieldOperand(eax, JSGeneratorObject::kContextOffset), esi);
+ __ mov(ecx, esi);
+ __ RecordWriteField(eax, JSGeneratorObject::kContextOffset, ecx, edx);
+ __ CallRuntime(Runtime::kHiddenSuspendJSGeneratorObject, 1);
+ __ mov(context_register(),
+ Operand(ebp, StandardFrameConstants::kContextOffset));
+ __ pop(eax); // result
+ EmitReturnSequence();
+ __ bind(&l_resume); // received in eax
+ __ PopTryHandler();
+
+ // receiver = iter; f = iter.next; arg = received;
+ __ bind(&l_next);
+ __ mov(ecx, isolate()->factory()->next_string()); // "next"
+ __ push(ecx);
+ __ push(Operand(esp, 2 * kPointerSize)); // iter
+ __ push(eax); // received
+
+ // result = receiver[f](arg);
+ __ bind(&l_call);
+ __ mov(edx, Operand(esp, kPointerSize));
+ Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
+ CallIC(ic, TypeFeedbackId::None());
+ __ mov(edi, eax);
+ __ mov(Operand(esp, 2 * kPointerSize), edi);
+ CallFunctionStub stub(isolate(), 1, CALL_AS_METHOD);
+ __ CallStub(&stub);
+
+ __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+ __ Drop(1); // The function is still on the stack; drop it.
+
+ // if (!result.done) goto l_try;
+ __ bind(&l_loop);
+ __ push(eax); // save result
+ __ mov(edx, eax); // result
+ __ mov(ecx, isolate()->factory()->done_string()); // "done"
+ CallLoadIC(NOT_CONTEXTUAL); // result.done in eax
+ Handle<Code> bool_ic = ToBooleanStub::GetUninitialized(isolate());
+ CallIC(bool_ic);
+ __ test(eax, eax);
+ __ j(zero, &l_try);
+
+ // result.value
+ __ pop(edx); // result
+ __ mov(ecx, isolate()->factory()->value_string()); // "value"
+ CallLoadIC(NOT_CONTEXTUAL); // result.value in eax
+ context()->DropAndPlug(2, eax); // drop iter and g
+ break;
+ }
+ }
+}
+
+
+void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
+ Expression *value,
+ JSGeneratorObject::ResumeMode resume_mode) {
+ // The value stays in eax, and is ultimately read by the resumed generator, as
+ // if CallRuntime(Runtime::kHiddenSuspendJSGeneratorObject) returned it. Or it
+ // is read to throw the value when the resumed generator is already closed.
+ // ebx will hold the generator object until the activation has been resumed.
+ VisitForStackValue(generator);
+ VisitForAccumulatorValue(value);
+ __ pop(ebx);
+
+ // Check generator state.
+ Label wrong_state, closed_state, done;
+ STATIC_ASSERT(JSGeneratorObject::kGeneratorExecuting < 0);
+ STATIC_ASSERT(JSGeneratorObject::kGeneratorClosed == 0);
+ __ cmp(FieldOperand(ebx, JSGeneratorObject::kContinuationOffset),
+ Immediate(Smi::FromInt(0)));
+ __ j(equal, &closed_state);
+ __ j(less, &wrong_state);
+
+ // Load suspended function and context.
+ __ mov(esi, FieldOperand(ebx, JSGeneratorObject::kContextOffset));
+ __ mov(edi, FieldOperand(ebx, JSGeneratorObject::kFunctionOffset));
+
+ // Push receiver.
+ __ push(FieldOperand(ebx, JSGeneratorObject::kReceiverOffset));
+
+ // Push holes for arguments to generator function.
+ __ mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+ __ mov(edx,
+ FieldOperand(edx, SharedFunctionInfo::kFormalParameterCountOffset));
+ __ mov(ecx, isolate()->factory()->the_hole_value());
+ Label push_argument_holes, push_frame;
+ __ bind(&push_argument_holes);
+ __ sub(edx, Immediate(Smi::FromInt(1)));
+ __ j(carry, &push_frame);
+ __ push(ecx);
+ __ jmp(&push_argument_holes);
+
+ // Enter a new JavaScript frame, and initialize its slots as they were when
+ // the generator was suspended.
+ Label resume_frame;
+ __ bind(&push_frame);
+ __ call(&resume_frame);
+ __ jmp(&done);
+ __ bind(&resume_frame);
+ __ push(ebp); // Caller's frame pointer.
+ __ mov(ebp, esp);
+ __ push(esi); // Callee's context.
+ __ push(edi); // Callee's JS Function.
+
+ // Load the operand stack size.
+ __ mov(edx, FieldOperand(ebx, JSGeneratorObject::kOperandStackOffset));
+ __ mov(edx, FieldOperand(edx, FixedArray::kLengthOffset));
+ __ SmiUntag(edx);
+
+ // If we are sending a value and there is no operand stack, we can jump back
+ // in directly.
+ if (resume_mode == JSGeneratorObject::NEXT) {
+ Label slow_resume;
+ __ cmp(edx, Immediate(0));
+ __ j(not_zero, &slow_resume);
+ __ mov(edx, FieldOperand(edi, JSFunction::kCodeEntryOffset));
+ __ mov(ecx, FieldOperand(ebx, JSGeneratorObject::kContinuationOffset));
+ __ SmiUntag(ecx);
+ __ add(edx, ecx);
+ __ mov(FieldOperand(ebx, JSGeneratorObject::kContinuationOffset),
+ Immediate(Smi::FromInt(JSGeneratorObject::kGeneratorExecuting)));
+ __ jmp(edx);
+ __ bind(&slow_resume);
+ }
+
+ // Otherwise, we push holes for the operand stack and call the runtime to fix
+ // up the stack and the handlers.
+ Label push_operand_holes, call_resume;
+ __ bind(&push_operand_holes);
+ __ sub(edx, Immediate(1));
+ __ j(carry, &call_resume);
+ __ push(ecx);
+ __ jmp(&push_operand_holes);
+ __ bind(&call_resume);
+ __ push(ebx);
+ __ push(result_register());
+ __ Push(Smi::FromInt(resume_mode));
+ __ CallRuntime(Runtime::kHiddenResumeJSGeneratorObject, 3);
+ // Not reached: the runtime call returns elsewhere.
+ __ Abort(kGeneratorFailedToResume);
+
+ // Reach here when generator is closed.
+ __ bind(&closed_state);
+ if (resume_mode == JSGeneratorObject::NEXT) {
+ // Return completed iterator result when generator is closed.
+ __ push(Immediate(isolate()->factory()->undefined_value()));
+ // Pop value from top-of-stack slot; box result into result register.
+ EmitCreateIteratorResult(true);
+ } else {
+ // Throw the provided value.
+ __ push(eax);
+ __ CallRuntime(Runtime::kHiddenThrow, 1);
+ }
+ __ jmp(&done);
+
+ // Throw error if we attempt to operate on a running generator.
+ __ bind(&wrong_state);
+ __ push(ebx);
+ __ CallRuntime(Runtime::kHiddenThrowGeneratorStateError, 1);
+
+ __ bind(&done);
+ context()->Plug(result_register());
+}
+
+
+void FullCodeGenerator::EmitCreateIteratorResult(bool done) {
+ Label gc_required;
+ Label allocated;
+
+ Handle<Map> map(isolate()->native_context()->iterator_result_map());
+
+ __ Allocate(map->instance_size(), eax, ecx, edx, &gc_required, TAG_OBJECT);
+ __ jmp(&allocated);
+
+ __ bind(&gc_required);
+ __ Push(Smi::FromInt(map->instance_size()));
+ __ CallRuntime(Runtime::kHiddenAllocateInNewSpace, 1);
+ __ mov(context_register(),
+ Operand(ebp, StandardFrameConstants::kContextOffset));
+
+ __ bind(&allocated);
+ __ mov(ebx, map);
+ __ pop(ecx);
+ __ mov(edx, isolate()->factory()->ToBoolean(done));
+ ASSERT_EQ(map->instance_size(), 5 * kPointerSize);
+ __ mov(FieldOperand(eax, HeapObject::kMapOffset), ebx);
+ __ mov(FieldOperand(eax, JSObject::kPropertiesOffset),
+ isolate()->factory()->empty_fixed_array());
+ __ mov(FieldOperand(eax, JSObject::kElementsOffset),
+ isolate()->factory()->empty_fixed_array());
+ __ mov(FieldOperand(eax, JSGeneratorObject::kResultValuePropertyOffset), ecx);
+ __ mov(FieldOperand(eax, JSGeneratorObject::kResultDonePropertyOffset), edx);
+
+ // Only the value field needs a write barrier, as the other values are in the
+ // root set.
+ __ RecordWriteField(eax, JSGeneratorObject::kResultValuePropertyOffset,
+ ecx, edx);
+}
+
+
+void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
+ SetSourcePosition(prop->position());
+ Literal* key = prop->key()->AsLiteral();
+ ASSERT(!key->value()->IsSmi());
+ __ mov(ecx, Immediate(key->value()));
+ CallLoadIC(NOT_CONTEXTUAL, prop->PropertyFeedbackId());
+}
+
+
+void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
+ SetSourcePosition(prop->position());
+ Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
+ CallIC(ic, prop->PropertyFeedbackId());
+}
+
+
+void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
+ Token::Value op,
+ OverwriteMode mode,
+ Expression* left,
+ Expression* right) {
+ // Do combined smi check of the operands. Left operand is on the
+ // stack. Right operand is in eax.
+ Label smi_case, done, stub_call;
+ __ pop(edx);
+ __ mov(ecx, eax);
+ __ or_(eax, edx);
+ JumpPatchSite patch_site(masm_);
+ patch_site.EmitJumpIfSmi(eax, &smi_case, Label::kNear);
+
+ __ bind(&stub_call);
+ __ mov(eax, ecx);
+ BinaryOpICStub stub(isolate(), op, mode);
+ CallIC(stub.GetCode(), expr->BinaryOperationFeedbackId());
+ patch_site.EmitPatchInfo();
+ __ jmp(&done, Label::kNear);
+
+ // Smi case.
+ __ bind(&smi_case);
+ __ mov(eax, edx); // Copy left operand in case of a stub call.
+
+ switch (op) {
+ case Token::SAR:
+ __ SmiUntag(ecx);
+ __ sar_cl(eax); // No checks of result necessary
+ __ and_(eax, Immediate(~kSmiTagMask));
+ break;
+ case Token::SHL: {
+ Label result_ok;
+ __ SmiUntag(eax);
+ __ SmiUntag(ecx);
+ __ shl_cl(eax);
+ // Check that the *signed* result fits in a smi.
+ __ cmp(eax, 0xc0000000);
+ __ j(positive, &result_ok);
+ __ SmiTag(ecx);
+ __ jmp(&stub_call);
+ __ bind(&result_ok);
+ __ SmiTag(eax);
+ break;
+ }
+ case Token::SHR: {
+ Label result_ok;
+ __ SmiUntag(eax);
+ __ SmiUntag(ecx);
+ __ shr_cl(eax);
+ __ test(eax, Immediate(0xc0000000));
+ __ j(zero, &result_ok);
+ __ SmiTag(ecx);
+ __ jmp(&stub_call);
+ __ bind(&result_ok);
+ __ SmiTag(eax);
+ break;
+ }
+ case Token::ADD:
+ __ add(eax, ecx);
+ __ j(overflow, &stub_call);
+ break;
+ case Token::SUB:
+ __ sub(eax, ecx);
+ __ j(overflow, &stub_call);
+ break;
+ case Token::MUL: {
+ __ SmiUntag(eax);
+ __ imul(eax, ecx);
+ __ j(overflow, &stub_call);
+ __ test(eax, eax);
+ __ j(not_zero, &done, Label::kNear);
+ __ mov(ebx, edx);
+ __ or_(ebx, ecx);
+ __ j(negative, &stub_call);
+ break;
+ }
+ case Token::BIT_OR:
+ __ or_(eax, ecx);
+ break;
+ case Token::BIT_AND:
+ __ and_(eax, ecx);
+ break;
+ case Token::BIT_XOR:
+ __ xor_(eax, ecx);
+ break;
+ default:
+ UNREACHABLE();
+ }
+
+ __ bind(&done);
+ context()->Plug(eax);
+}
+
+
+void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr,
+ Token::Value op,
+ OverwriteMode mode) {
+ __ pop(edx);
+ BinaryOpICStub stub(isolate(), op, mode);
+ JumpPatchSite patch_site(masm_); // unbound, signals no inlined smi code.
+ CallIC(stub.GetCode(), expr->BinaryOperationFeedbackId());
+ patch_site.EmitPatchInfo();
+ context()->Plug(eax);
+}
+
+
+void FullCodeGenerator::EmitAssignment(Expression* expr) {
+ ASSERT(expr->IsValidReferenceExpression());
+
+ // Left-hand side can only be a property, a global or a (parameter or local)
+ // slot.
+ enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
+ LhsKind assign_type = VARIABLE;
+ Property* prop = expr->AsProperty();
+ if (prop != NULL) {
+ assign_type = (prop->key()->IsPropertyName())
+ ? NAMED_PROPERTY
+ : KEYED_PROPERTY;
+ }
+
+ switch (assign_type) {
+ case VARIABLE: {
+ Variable* var = expr->AsVariableProxy()->var();
+ EffectContext context(this);
+ EmitVariableAssignment(var, Token::ASSIGN);
+ break;
+ }
+ case NAMED_PROPERTY: {
+ __ push(eax); // Preserve value.
+ VisitForAccumulatorValue(prop->obj());
+ __ mov(edx, eax);
+ __ pop(eax); // Restore value.
+ __ mov(ecx, prop->key()->AsLiteral()->value());
+ CallStoreIC();
+ break;
+ }
+ case KEYED_PROPERTY: {
+ __ push(eax); // Preserve value.
+ VisitForStackValue(prop->obj());
+ VisitForAccumulatorValue(prop->key());
+ __ mov(ecx, eax);
+ __ pop(edx); // Receiver.
+ __ pop(eax); // Restore value.
+ Handle<Code> ic = strict_mode() == SLOPPY
+ ? isolate()->builtins()->KeyedStoreIC_Initialize()
+ : isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
+ CallIC(ic);
+ break;
+ }
+ }
+ context()->Plug(eax);
+}
+
+
+void FullCodeGenerator::EmitStoreToStackLocalOrContextSlot(
+ Variable* var, MemOperand location) {
+ __ mov(location, eax);
+ if (var->IsContextSlot()) {
+ __ mov(edx, eax);
+ int offset = Context::SlotOffset(var->index());
+ __ RecordWriteContextSlot(ecx, offset, edx, ebx);
+ }
+}
+
+
+void FullCodeGenerator::EmitCallStoreContextSlot(
+ Handle<String> name, StrictMode strict_mode) {
+ __ push(eax); // Value.
+ __ push(esi); // Context.
+ __ push(Immediate(name));
+ __ push(Immediate(Smi::FromInt(strict_mode)));
+ __ CallRuntime(Runtime::kHiddenStoreContextSlot, 4);
+}
+
+
+void FullCodeGenerator::EmitVariableAssignment(Variable* var,
+ Token::Value op) {
+ if (var->IsUnallocated()) {
+ // Global var, const, or let.
+ __ mov(ecx, var->name());
+ __ mov(edx, GlobalObjectOperand());
+ CallStoreIC();
+
+ } else if (op == Token::INIT_CONST_LEGACY) {
+ // Const initializers need a write barrier.
+ ASSERT(!var->IsParameter()); // No const parameters.
+ if (var->IsLookupSlot()) {
+ __ push(eax);
+ __ push(esi);
+ __ push(Immediate(var->name()));
+ __ CallRuntime(Runtime::kHiddenInitializeConstContextSlot, 3);
+ } else {
+ ASSERT(var->IsStackLocal() || var->IsContextSlot());
+ Label skip;
+ MemOperand location = VarOperand(var, ecx);
+ __ mov(edx, location);
+ __ cmp(edx, isolate()->factory()->the_hole_value());
+ __ j(not_equal, &skip, Label::kNear);
+ EmitStoreToStackLocalOrContextSlot(var, location);
+ __ bind(&skip);
+ }
+
+ } else if (var->mode() == LET && op != Token::INIT_LET) {
+ // Non-initializing assignment to let variable needs a write barrier.
+ if (var->IsLookupSlot()) {
+ EmitCallStoreContextSlot(var->name(), strict_mode());
+ } else {
+ ASSERT(var->IsStackAllocated() || var->IsContextSlot());
+ Label assign;
+ MemOperand location = VarOperand(var, ecx);
+ __ mov(edx, location);
+ __ cmp(edx, isolate()->factory()->the_hole_value());
+ __ j(not_equal, &assign, Label::kNear);
+ __ push(Immediate(var->name()));
+ __ CallRuntime(Runtime::kHiddenThrowReferenceError, 1);
+ __ bind(&assign);
+ EmitStoreToStackLocalOrContextSlot(var, location);
+ }
+
+ } else if (!var->is_const_mode() || op == Token::INIT_CONST) {
+ // Assignment to var or initializing assignment to let/const
+ // in harmony mode.
+ if (var->IsLookupSlot()) {
+ EmitCallStoreContextSlot(var->name(), strict_mode());
+ } else {
+ ASSERT(var->IsStackAllocated() || var->IsContextSlot());
+ MemOperand location = VarOperand(var, ecx);
+ if (generate_debug_code_ && op == Token::INIT_LET) {
+ // Check for an uninitialized let binding.
+ __ mov(edx, location);
+ __ cmp(edx, isolate()->factory()->the_hole_value());
+ __ Check(equal, kLetBindingReInitialization);
+ }
+ EmitStoreToStackLocalOrContextSlot(var, location);
+ }
+ }
+ // Non-initializing assignments to consts are ignored.
+}
+
+
+void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
+ // Assignment to a property, using a named store IC.
+ // eax : value
+ // esp[0] : receiver
+
+ Property* prop = expr->target()->AsProperty();
+ ASSERT(prop != NULL);
+ ASSERT(prop->key()->AsLiteral() != NULL);
+
+ // Record source code position before IC call.
+ SetSourcePosition(expr->position());
+ __ mov(ecx, prop->key()->AsLiteral()->value());
+ __ pop(edx);
+ CallStoreIC(expr->AssignmentFeedbackId());
+ PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+ context()->Plug(eax);
+}
+
+
+void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
+ // Assignment to a property, using a keyed store IC.
+ // eax : value
+ // esp[0] : key
+ // esp[kPointerSize] : receiver
+
+ __ pop(ecx); // Key.
+ __ pop(edx);
+ // Record source code position before IC call.
+ SetSourcePosition(expr->position());
+ Handle<Code> ic = strict_mode() == SLOPPY
+ ? isolate()->builtins()->KeyedStoreIC_Initialize()
+ : isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
+ CallIC(ic, expr->AssignmentFeedbackId());
+
+ PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+ context()->Plug(eax);
+}
+
+
+void FullCodeGenerator::VisitProperty(Property* expr) {
+ Comment cmnt(masm_, "[ Property");
+ Expression* key = expr->key();
+
+ if (key->IsPropertyName()) {
+ VisitForAccumulatorValue(expr->obj());
+ __ mov(edx, result_register());
+ EmitNamedPropertyLoad(expr);
+ PrepareForBailoutForId(expr->LoadId(), TOS_REG);
+ context()->Plug(eax);
+ } else {
+ VisitForStackValue(expr->obj());
+ VisitForAccumulatorValue(expr->key());
+ __ pop(edx); // Object.
+ __ mov(ecx, result_register()); // Key.
+ EmitKeyedPropertyLoad(expr);
+ context()->Plug(eax);
+ }
+}
+
+
+void FullCodeGenerator::CallIC(Handle<Code> code,
+ TypeFeedbackId ast_id) {
+ ic_total_count_++;
+ __ call(code, RelocInfo::CODE_TARGET, ast_id);
+}
+
+
+// Code common for calls using the IC.
+void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
+ Expression* callee = expr->expression();
+
+ CallIC::CallType call_type = callee->IsVariableProxy()
+ ? CallIC::FUNCTION
+ : CallIC::METHOD;
+ // Get the target function.
+ if (call_type == CallIC::FUNCTION) {
+ { StackValueContext context(this);
+ EmitVariableLoad(callee->AsVariableProxy());
+ PrepareForBailout(callee, NO_REGISTERS);
+ }
+ // Push undefined as receiver. This is patched in the method prologue if it
+ // is a sloppy mode method.
+ __ push(Immediate(isolate()->factory()->undefined_value()));
+ } else {
+ // Load the function from the receiver.
+ ASSERT(callee->IsProperty());
+ __ mov(edx, Operand(esp, 0));
+ EmitNamedPropertyLoad(callee->AsProperty());
+ PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
+ // Push the target function under the receiver.
+ __ push(Operand(esp, 0));
+ __ mov(Operand(esp, kPointerSize), eax);
+ }
+
+ EmitCall(expr, call_type);
+}
+
+
+// Code common for calls using the IC.
+void FullCodeGenerator::EmitKeyedCallWithLoadIC(Call* expr,
+ Expression* key) {
+ // Load the key.
+ VisitForAccumulatorValue(key);
+
+ Expression* callee = expr->expression();
+
+ // Load the function from the receiver.
+ ASSERT(callee->IsProperty());
+ __ mov(edx, Operand(esp, 0));
+ // Move the key into the right register for the keyed load IC.
+ __ mov(ecx, eax);
+ EmitKeyedPropertyLoad(callee->AsProperty());
+ PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
+
+ // Push the target function under the receiver.
+ __ push(Operand(esp, 0));
+ __ mov(Operand(esp, kPointerSize), eax);
+
+ EmitCall(expr, CallIC::METHOD);
+}
+
+
+void FullCodeGenerator::EmitCall(Call* expr, CallIC::CallType call_type) {
+ // Load the arguments.
+ ZoneList<Expression*>* args = expr->arguments();
+ int arg_count = args->length();
+ { PreservePositionScope scope(masm()->positions_recorder());
+ for (int i = 0; i < arg_count; i++) {
+ VisitForStackValue(args->at(i));
+ }
+ }
+
+ // Record source position of the IC call.
+ SetSourcePosition(expr->position());
+ Handle<Code> ic = CallIC::initialize_stub(
+ isolate(), arg_count, call_type);
+ __ Move(edx, Immediate(Smi::FromInt(expr->CallFeedbackSlot())));
+ __ mov(edi, Operand(esp, (arg_count + 1) * kPointerSize));
+ // Don't assign a type feedback id to the IC, since type feedback is provided
+ // by the vector above.
+ CallIC(ic);
+
+ RecordJSReturnSite(expr);
+
+ // Restore context register.
+ __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+
+ context()->DropAndPlug(1, eax);
+}
+
+
+void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
+ // Push copy of the first argument or undefined if it doesn't exist.
+ if (arg_count > 0) {
+ __ push(Operand(esp, arg_count * kPointerSize));
+ } else {
+ __ push(Immediate(isolate()->factory()->undefined_value()));
+ }
+
+ // Push the receiver of the enclosing function.
+ __ push(Operand(ebp, (2 + info_->scope()->num_parameters()) * kPointerSize));
+ // Push the language mode.
+ __ push(Immediate(Smi::FromInt(strict_mode())));
+
+ // Push the start position of the scope the calls resides in.
+ __ push(Immediate(Smi::FromInt(scope()->start_position())));
+
+ // Do the runtime call.
+ __ CallRuntime(Runtime::kHiddenResolvePossiblyDirectEval, 5);
+}
+
+
+void FullCodeGenerator::VisitCall(Call* expr) {
+#ifdef DEBUG
+ // We want to verify that RecordJSReturnSite gets called on all paths
+ // through this function. Avoid early returns.
+ expr->return_is_recorded_ = false;
+#endif
+
+ Comment cmnt(masm_, "[ Call");
+ Expression* callee = expr->expression();
+ Call::CallType call_type = expr->GetCallType(isolate());
+
+ if (call_type == Call::POSSIBLY_EVAL_CALL) {
+ // In a call to eval, we first call RuntimeHidden_ResolvePossiblyDirectEval
+ // to resolve the function we need to call and the receiver of the call.
+ // Then we call the resolved function using the given arguments.
+ ZoneList<Expression*>* args = expr->arguments();
+ int arg_count = args->length();
+ { PreservePositionScope pos_scope(masm()->positions_recorder());
+ VisitForStackValue(callee);
+ // Reserved receiver slot.
+ __ push(Immediate(isolate()->factory()->undefined_value()));
+ // Push the arguments.
+ for (int i = 0; i < arg_count; i++) {
+ VisitForStackValue(args->at(i));
+ }
+
+ // Push a copy of the function (found below the arguments) and
+ // resolve eval.
+ __ push(Operand(esp, (arg_count + 1) * kPointerSize));
+ EmitResolvePossiblyDirectEval(arg_count);
+
+ // The runtime call returns a pair of values in eax (function) and
+ // edx (receiver). Touch up the stack with the right values.
+ __ mov(Operand(esp, (arg_count + 0) * kPointerSize), edx);
+ __ mov(Operand(esp, (arg_count + 1) * kPointerSize), eax);
+ }
+ // Record source position for debugger.
+ SetSourcePosition(expr->position());
+ CallFunctionStub stub(isolate(), arg_count, NO_CALL_FUNCTION_FLAGS);
+ __ mov(edi, Operand(esp, (arg_count + 1) * kPointerSize));
+ __ CallStub(&stub);
+ RecordJSReturnSite(expr);
+ // Restore context register.
+ __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+ context()->DropAndPlug(1, eax);
+
+ } else if (call_type == Call::GLOBAL_CALL) {
+ EmitCallWithLoadIC(expr);
+
+ } else if (call_type == Call::LOOKUP_SLOT_CALL) {
+ // Call to a lookup slot (dynamically introduced variable).
+ VariableProxy* proxy = callee->AsVariableProxy();
+ Label slow, done;
+ { PreservePositionScope scope(masm()->positions_recorder());
+ // Generate code for loading from variables potentially shadowed by
+ // eval-introduced variables.
+ EmitDynamicLookupFastCase(proxy->var(), NOT_INSIDE_TYPEOF, &slow, &done);
+ }
+ __ bind(&slow);
+ // Call the runtime to find the function to call (returned in eax) and
+ // the object holding it (returned in edx).
+ __ push(context_register());
+ __ push(Immediate(proxy->name()));
+ __ CallRuntime(Runtime::kHiddenLoadContextSlot, 2);
+ __ push(eax); // Function.
+ __ push(edx); // Receiver.
+
+ // If fast case code has been generated, emit code to push the function
+ // and receiver and have the slow path jump around this code.
+ if (done.is_linked()) {
+ Label call;
+ __ jmp(&call, Label::kNear);
+ __ bind(&done);
+ // Push function.
+ __ push(eax);
+ // The receiver is implicitly the global receiver. Indicate this by
+ // passing the hole to the call function stub.
+ __ push(Immediate(isolate()->factory()->undefined_value()));
+ __ bind(&call);
+ }
+
+ // The receiver is either the global receiver or an object found by
+ // LoadContextSlot.
+ EmitCall(expr);
+
+ } else if (call_type == Call::PROPERTY_CALL) {
+ Property* property = callee->AsProperty();
+ { PreservePositionScope scope(masm()->positions_recorder());
+ VisitForStackValue(property->obj());
+ }
+ if (property->key()->IsPropertyName()) {
+ EmitCallWithLoadIC(expr);
+ } else {
+ EmitKeyedCallWithLoadIC(expr, property->key());
+ }
+
+ } else {
+ ASSERT(call_type == Call::OTHER_CALL);
+ // Call to an arbitrary expression not handled specially above.
+ { PreservePositionScope scope(masm()->positions_recorder());
+ VisitForStackValue(callee);
+ }
+ __ push(Immediate(isolate()->factory()->undefined_value()));
+ // Emit function call.
+ EmitCall(expr);
+ }
+
+#ifdef DEBUG
+ // RecordJSReturnSite should have been called.
+ ASSERT(expr->return_is_recorded_);
+#endif
+}
+
+
+void FullCodeGenerator::VisitCallNew(CallNew* expr) {
+ Comment cmnt(masm_, "[ CallNew");
+ // According to ECMA-262, section 11.2.2, page 44, the function
+ // expression in new calls must be evaluated before the
+ // arguments.
+
+ // Push constructor on the stack. If it's not a function it's used as
+ // receiver for CALL_NON_FUNCTION, otherwise the value on the stack is
+ // ignored.
+ VisitForStackValue(expr->expression());
+
+ // Push the arguments ("left-to-right") on the stack.
+ ZoneList<Expression*>* args = expr->arguments();
+ int arg_count = args->length();
+ for (int i = 0; i < arg_count; i++) {
+ VisitForStackValue(args->at(i));
+ }
+
+ // Call the construct call builtin that handles allocation and
+ // constructor invocation.
+ SetSourcePosition(expr->position());
+
+ // Load function and argument count into edi and eax.
+ __ Move(eax, Immediate(arg_count));
+ __ mov(edi, Operand(esp, arg_count * kPointerSize));
+
+ // Record call targets in unoptimized code.
+ if (FLAG_pretenuring_call_new) {
+ EnsureSlotContainsAllocationSite(expr->AllocationSiteFeedbackSlot());
+ ASSERT(expr->AllocationSiteFeedbackSlot() ==
+ expr->CallNewFeedbackSlot() + 1);
+ }
+
+ __ LoadHeapObject(ebx, FeedbackVector());
+ __ mov(edx, Immediate(Smi::FromInt(expr->CallNewFeedbackSlot())));
+
+ CallConstructStub stub(isolate(), RECORD_CONSTRUCTOR_TARGET);
+ __ call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
+ PrepareForBailoutForId(expr->ReturnId(), TOS_REG);
+ context()->Plug(eax);
+}
+
+
+void FullCodeGenerator::EmitIsSmi(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 1);
+
+ VisitForAccumulatorValue(args->at(0));
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ __ test(eax, Immediate(kSmiTagMask));
+ Split(zero, if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitIsNonNegativeSmi(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 1);
+
+ VisitForAccumulatorValue(args->at(0));
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ __ test(eax, Immediate(kSmiTagMask | 0x80000000));
+ Split(zero, if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitIsObject(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 1);
+
+ VisitForAccumulatorValue(args->at(0));
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ __ JumpIfSmi(eax, if_false);
+ __ cmp(eax, isolate()->factory()->null_value());
+ __ j(equal, if_true);
+ __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
+ // Undetectable objects behave like undefined when tested with typeof.
+ __ movzx_b(ecx, FieldOperand(ebx, Map::kBitFieldOffset));
+ __ test(ecx, Immediate(1 << Map::kIsUndetectable));
+ __ j(not_zero, if_false);
+ __ movzx_b(ecx, FieldOperand(ebx, Map::kInstanceTypeOffset));
+ __ cmp(ecx, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE);
+ __ j(below, if_false);
+ __ cmp(ecx, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ Split(below_equal, if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitIsSpecObject(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 1);
+
+ VisitForAccumulatorValue(args->at(0));
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ __ JumpIfSmi(eax, if_false);
+ __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ebx);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ Split(above_equal, if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitIsUndetectableObject(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 1);
+
+ VisitForAccumulatorValue(args->at(0));
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ __ JumpIfSmi(eax, if_false);
+ __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
+ __ movzx_b(ebx, FieldOperand(ebx, Map::kBitFieldOffset));
+ __ test(ebx, Immediate(1 << Map::kIsUndetectable));
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ Split(not_zero, if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
+ CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 1);
+
+ VisitForAccumulatorValue(args->at(0));
+
+ Label materialize_true, materialize_false, skip_lookup;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ __ AssertNotSmi(eax);
+
+ // Check whether this map has already been checked to be safe for default
+ // valueOf.
+ __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
+ __ test_b(FieldOperand(ebx, Map::kBitField2Offset),
+ 1 << Map::kStringWrapperSafeForDefaultValueOf);
+ __ j(not_zero, &skip_lookup);
+
+ // Check for fast case object. Return false for slow case objects.
+ __ mov(ecx, FieldOperand(eax, JSObject::kPropertiesOffset));
+ __ mov(ecx, FieldOperand(ecx, HeapObject::kMapOffset));
+ __ cmp(ecx, isolate()->factory()->hash_table_map());
+ __ j(equal, if_false);
+
+ // Look for valueOf string in the descriptor array, and indicate false if
+ // found. Since we omit an enumeration index check, if it is added via a
+ // transition that shares its descriptor array, this is a false positive.
+ Label entry, loop, done;
+
+ // Skip loop if no descriptors are valid.
+ __ NumberOfOwnDescriptors(ecx, ebx);
+ __ cmp(ecx, 0);
+ __ j(equal, &done);
+
+ __ LoadInstanceDescriptors(ebx, ebx);
+ // ebx: descriptor array.
+ // ecx: valid entries in the descriptor array.
+ // Calculate the end of the descriptor array.
+ STATIC_ASSERT(kSmiTag == 0);
+ STATIC_ASSERT(kSmiTagSize == 1);
+ STATIC_ASSERT(kPointerSize == 4);
+ __ imul(ecx, ecx, DescriptorArray::kDescriptorSize);
+ __ lea(ecx, Operand(ebx, ecx, times_4, DescriptorArray::kFirstOffset));
+ // Calculate location of the first key name.
+ __ add(ebx, Immediate(DescriptorArray::kFirstOffset));
+ // Loop through all the keys in the descriptor array. If one of these is the
+ // internalized string "valueOf" the result is false.
+ __ jmp(&entry);
+ __ bind(&loop);
+ __ mov(edx, FieldOperand(ebx, 0));
+ __ cmp(edx, isolate()->factory()->value_of_string());
+ __ j(equal, if_false);
+ __ add(ebx, Immediate(DescriptorArray::kDescriptorSize * kPointerSize));
+ __ bind(&entry);
+ __ cmp(ebx, ecx);
+ __ j(not_equal, &loop);
+
+ __ bind(&done);
+
+ // Reload map as register ebx was used as temporary above.
+ __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
+
+ // Set the bit in the map to indicate that there is no local valueOf field.
+ __ or_(FieldOperand(ebx, Map::kBitField2Offset),
+ Immediate(1 << Map::kStringWrapperSafeForDefaultValueOf));
+
+ __ bind(&skip_lookup);
+
+ // If a valueOf property is not found on the object check that its
+ // prototype is the un-modified String prototype. If not result is false.
+ __ mov(ecx, FieldOperand(ebx, Map::kPrototypeOffset));
+ __ JumpIfSmi(ecx, if_false);
+ __ mov(ecx, FieldOperand(ecx, HeapObject::kMapOffset));
+ __ mov(edx, Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+ __ mov(edx,
+ FieldOperand(edx, GlobalObject::kNativeContextOffset));
+ __ cmp(ecx,
+ ContextOperand(edx,
+ Context::STRING_FUNCTION_PROTOTYPE_MAP_INDEX));
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ Split(equal, if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitIsFunction(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 1);
+
+ VisitForAccumulatorValue(args->at(0));
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ __ JumpIfSmi(eax, if_false);
+ __ CmpObjectType(eax, JS_FUNCTION_TYPE, ebx);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ Split(equal, if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitIsMinusZero(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 1);
+
+ VisitForAccumulatorValue(args->at(0));
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ Handle<Map> map = masm()->isolate()->factory()->heap_number_map();
+ __ CheckMap(eax, map, if_false, DO_SMI_CHECK);
+ // Check if the exponent half is 0x80000000. Comparing against 1 and
+ // checking for overflow is the shortest possible encoding.
+ __ cmp(FieldOperand(eax, HeapNumber::kExponentOffset), Immediate(0x1));
+ __ j(no_overflow, if_false);
+ __ cmp(FieldOperand(eax, HeapNumber::kMantissaOffset), Immediate(0x0));
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ Split(equal, if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+
+
+void FullCodeGenerator::EmitIsArray(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 1);
+
+ VisitForAccumulatorValue(args->at(0));
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ __ JumpIfSmi(eax, if_false);
+ __ CmpObjectType(eax, JS_ARRAY_TYPE, ebx);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ Split(equal, if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitIsRegExp(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 1);
+
+ VisitForAccumulatorValue(args->at(0));
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ __ JumpIfSmi(eax, if_false);
+ __ CmpObjectType(eax, JS_REGEXP_TYPE, ebx);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ Split(equal, if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+
+
+void FullCodeGenerator::EmitIsConstructCall(CallRuntime* expr) {
+ ASSERT(expr->arguments()->length() == 0);
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ // Get the frame pointer for the calling frame.
+ __ mov(eax, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
+
+ // Skip the arguments adaptor frame if it exists.
+ Label check_frame_marker;
+ __ cmp(Operand(eax, StandardFrameConstants::kContextOffset),
+ Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ j(not_equal, &check_frame_marker);
+ __ mov(eax, Operand(eax, StandardFrameConstants::kCallerFPOffset));
+
+ // Check the marker in the calling frame.
+ __ bind(&check_frame_marker);
+ __ cmp(Operand(eax, StandardFrameConstants::kMarkerOffset),
+ Immediate(Smi::FromInt(StackFrame::CONSTRUCT)));
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ Split(equal, if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitObjectEquals(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 2);
+
+ // Load the two objects into registers and perform the comparison.
+ VisitForStackValue(args->at(0));
+ VisitForAccumulatorValue(args->at(1));
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ __ pop(ebx);
+ __ cmp(eax, ebx);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ Split(equal, if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitArguments(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 1);
+
+ // ArgumentsAccessStub expects the key in edx and the formal
+ // parameter count in eax.
+ VisitForAccumulatorValue(args->at(0));
+ __ mov(edx, eax);
+ __ Move(eax, Immediate(Smi::FromInt(info_->scope()->num_parameters())));
+ ArgumentsAccessStub stub(isolate(), ArgumentsAccessStub::READ_ELEMENT);
+ __ CallStub(&stub);
+ context()->Plug(eax);
+}
+
+
+void FullCodeGenerator::EmitArgumentsLength(CallRuntime* expr) {
+ ASSERT(expr->arguments()->length() == 0);
+
+ Label exit;
+ // Get the number of formal parameters.
+ __ Move(eax, Immediate(Smi::FromInt(info_->scope()->num_parameters())));
+
+ // Check if the calling frame is an arguments adaptor frame.
+ __ mov(ebx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
+ __ cmp(Operand(ebx, StandardFrameConstants::kContextOffset),
+ Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ j(not_equal, &exit);
+
+ // Arguments adaptor case: Read the arguments length from the
+ // adaptor frame.
+ __ mov(eax, Operand(ebx, ArgumentsAdaptorFrameConstants::kLengthOffset));
+
+ __ bind(&exit);
+ __ AssertSmi(eax);
+ context()->Plug(eax);
+}
+
+
+void FullCodeGenerator::EmitClassOf(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 1);
+ Label done, null, function, non_function_constructor;
+
+ VisitForAccumulatorValue(args->at(0));
+
+ // If the object is a smi, we return null.
+ __ JumpIfSmi(eax, &null);
+
+ // Check that the object is a JS object but take special care of JS
+ // functions to make sure they have 'Function' as their class.
+ // Assume that there are only two callable types, and one of them is at
+ // either end of the type range for JS object types. Saves extra comparisons.
+ STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
+ __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, eax);
+ // Map is now in eax.
+ __ j(below, &null);
+ STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
+ FIRST_SPEC_OBJECT_TYPE + 1);
+ __ j(equal, &function);
+
+ __ CmpInstanceType(eax, LAST_SPEC_OBJECT_TYPE);
+ STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
+ LAST_SPEC_OBJECT_TYPE - 1);
+ __ j(equal, &function);
+ // Assume that there is no larger type.
+ STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE == LAST_TYPE - 1);
+
+ // Check if the constructor in the map is a JS function.
+ __ mov(eax, FieldOperand(eax, Map::kConstructorOffset));
+ __ CmpObjectType(eax, JS_FUNCTION_TYPE, ebx);
+ __ j(not_equal, &non_function_constructor);
+
+ // eax now contains the constructor function. Grab the
+ // instance class name from there.
+ __ mov(eax, FieldOperand(eax, JSFunction::kSharedFunctionInfoOffset));
+ __ mov(eax, FieldOperand(eax, SharedFunctionInfo::kInstanceClassNameOffset));
+ __ jmp(&done);
+
+ // Functions have class 'Function'.
+ __ bind(&function);
+ __ mov(eax, isolate()->factory()->function_class_string());
+ __ jmp(&done);
+
+ // Objects with a non-function constructor have class 'Object'.
+ __ bind(&non_function_constructor);
+ __ mov(eax, isolate()->factory()->Object_string());
+ __ jmp(&done);
+
+ // Non-JS objects have class null.
+ __ bind(&null);
+ __ mov(eax, isolate()->factory()->null_value());
+
+ // All done.
+ __ bind(&done);
+
+ context()->Plug(eax);
+}
+
+
+void FullCodeGenerator::EmitSubString(CallRuntime* expr) {
+ // Load the arguments on the stack and call the stub.
+ SubStringStub stub(isolate());
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 3);
+ VisitForStackValue(args->at(0));
+ VisitForStackValue(args->at(1));
+ VisitForStackValue(args->at(2));
+ __ CallStub(&stub);
+ context()->Plug(eax);
+}
+
+
+void FullCodeGenerator::EmitRegExpExec(CallRuntime* expr) {
+ // Load the arguments on the stack and call the stub.
+ RegExpExecStub stub(isolate());
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 4);
+ VisitForStackValue(args->at(0));
+ VisitForStackValue(args->at(1));
+ VisitForStackValue(args->at(2));
+ VisitForStackValue(args->at(3));
+ __ CallStub(&stub);
+ context()->Plug(eax);
+}
+
+
+void FullCodeGenerator::EmitValueOf(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 1);
+
+ VisitForAccumulatorValue(args->at(0)); // Load the object.
+
+ Label done;
+ // If the object is a smi return the object.
+ __ JumpIfSmi(eax, &done, Label::kNear);
+ // If the object is not a value type, return the object.
+ __ CmpObjectType(eax, JS_VALUE_TYPE, ebx);
+ __ j(not_equal, &done, Label::kNear);
+ __ mov(eax, FieldOperand(eax, JSValue::kValueOffset));
+
+ __ bind(&done);
+ context()->Plug(eax);
+}
+
+
+void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 2);
+ ASSERT_NE(NULL, args->at(1)->AsLiteral());
+ Smi* index = Smi::cast(*(args->at(1)->AsLiteral()->value()));
+
+ VisitForAccumulatorValue(args->at(0)); // Load the object.
+
+ Label runtime, done, not_date_object;
+ Register object = eax;
+ Register result = eax;
+ Register scratch = ecx;
+
+ __ JumpIfSmi(object, ¬_date_object);
+ __ CmpObjectType(object, JS_DATE_TYPE, scratch);
+ __ j(not_equal, ¬_date_object);
+
+ if (index->value() == 0) {
+ __ mov(result, FieldOperand(object, JSDate::kValueOffset));
+ __ jmp(&done);
+ } else {
+ if (index->value() < JSDate::kFirstUncachedField) {
+ ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
+ __ mov(scratch, Operand::StaticVariable(stamp));
+ __ cmp(scratch, FieldOperand(object, JSDate::kCacheStampOffset));
+ __ j(not_equal, &runtime, Label::kNear);
+ __ mov(result, FieldOperand(object, JSDate::kValueOffset +
+ kPointerSize * index->value()));
+ __ jmp(&done);
+ }
+ __ bind(&runtime);
+ __ PrepareCallCFunction(2, scratch);
+ __ mov(Operand(esp, 0), object);
+ __ mov(Operand(esp, 1 * kPointerSize), Immediate(index));
+ __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
+ __ jmp(&done);
+ }
+
+ __ bind(¬_date_object);
+ __ CallRuntime(Runtime::kHiddenThrowNotDateError, 0);
+ __ bind(&done);
+ context()->Plug(result);
+}
+
+
+void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT_EQ(3, args->length());
+
+ Register string = eax;
+ Register index = ebx;
+ Register value = ecx;
+
+ VisitForStackValue(args->at(1)); // index
+ VisitForStackValue(args->at(2)); // value
+ VisitForAccumulatorValue(args->at(0)); // string
+
+ __ pop(value);
+ __ pop(index);
+
+ if (FLAG_debug_code) {
+ __ test(value, Immediate(kSmiTagMask));
+ __ Check(zero, kNonSmiValue);
+ __ test(index, Immediate(kSmiTagMask));
+ __ Check(zero, kNonSmiValue);
+ }
+
+ __ SmiUntag(value);
+ __ SmiUntag(index);
+
+ if (FLAG_debug_code) {
+ static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
+ __ EmitSeqStringSetCharCheck(string, index, value, one_byte_seq_type);
+ }
+
+ __ mov_b(FieldOperand(string, index, times_1, SeqOneByteString::kHeaderSize),
+ value);
+ context()->Plug(string);
+}
+
+
+void FullCodeGenerator::EmitTwoByteSeqStringSetChar(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT_EQ(3, args->length());
+
+ Register string = eax;
+ Register index = ebx;
+ Register value = ecx;
+
+ VisitForStackValue(args->at(1)); // index
+ VisitForStackValue(args->at(2)); // value
+ VisitForAccumulatorValue(args->at(0)); // string
+ __ pop(value);
+ __ pop(index);
+
+ if (FLAG_debug_code) {
+ __ test(value, Immediate(kSmiTagMask));
+ __ Check(zero, kNonSmiValue);
+ __ test(index, Immediate(kSmiTagMask));
+ __ Check(zero, kNonSmiValue);
+ __ SmiUntag(index);
+ static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
+ __ EmitSeqStringSetCharCheck(string, index, value, two_byte_seq_type);
+ __ SmiTag(index);
+ }
+
+ __ SmiUntag(value);
+ // No need to untag a smi for two-byte addressing.
+ __ mov_w(FieldOperand(string, index, times_1, SeqTwoByteString::kHeaderSize),
+ value);
+ context()->Plug(string);
+}
+
+
+void FullCodeGenerator::EmitMathPow(CallRuntime* expr) {
+ // Load the arguments on the stack and call the runtime function.
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 2);
+ VisitForStackValue(args->at(0));
+ VisitForStackValue(args->at(1));
+
+ __ CallRuntime(Runtime::kHiddenMathPowSlow, 2);
+ context()->Plug(eax);
+}
+
+
+void FullCodeGenerator::EmitSetValueOf(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 2);
+
+ VisitForStackValue(args->at(0)); // Load the object.
+ VisitForAccumulatorValue(args->at(1)); // Load the value.
+ __ pop(ebx); // eax = value. ebx = object.
+
+ Label done;
+ // If the object is a smi, return the value.
+ __ JumpIfSmi(ebx, &done, Label::kNear);
+
+ // If the object is not a value type, return the value.
+ __ CmpObjectType(ebx, JS_VALUE_TYPE, ecx);
+ __ j(not_equal, &done, Label::kNear);
+
+ // Store the value.
+ __ mov(FieldOperand(ebx, JSValue::kValueOffset), eax);
+
+ // Update the write barrier. Save the value as it will be
+ // overwritten by the write barrier code and is needed afterward.
+ __ mov(edx, eax);
+ __ RecordWriteField(ebx, JSValue::kValueOffset, edx, ecx);
+
+ __ bind(&done);
+ context()->Plug(eax);
+}
+
+
+void FullCodeGenerator::EmitNumberToString(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT_EQ(args->length(), 1);
+
+ // Load the argument into eax and call the stub.
+ VisitForAccumulatorValue(args->at(0));
+
+ NumberToStringStub stub(isolate());
+ __ CallStub(&stub);
+ context()->Plug(eax);
+}
+
+
+void FullCodeGenerator::EmitStringCharFromCode(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 1);
+
+ VisitForAccumulatorValue(args->at(0));
+
+ Label done;
+ StringCharFromCodeGenerator generator(eax, ebx);
+ generator.GenerateFast(masm_);
+ __ jmp(&done);
+
+ NopRuntimeCallHelper call_helper;
+ generator.GenerateSlow(masm_, call_helper);
+
+ __ bind(&done);
+ context()->Plug(ebx);
+}
+
+
+void FullCodeGenerator::EmitStringCharCodeAt(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 2);
+
+ VisitForStackValue(args->at(0));
+ VisitForAccumulatorValue(args->at(1));
+
+ Register object = ebx;
+ Register index = eax;
+ Register result = edx;
+
+ __ pop(object);
+
+ Label need_conversion;
+ Label index_out_of_range;
+ Label done;
+ StringCharCodeAtGenerator generator(object,
+ index,
+ result,
+ &need_conversion,
+ &need_conversion,
+ &index_out_of_range,
+ STRING_INDEX_IS_NUMBER);
+ generator.GenerateFast(masm_);
+ __ jmp(&done);
+
+ __ bind(&index_out_of_range);
+ // When the index is out of range, the spec requires us to return
+ // NaN.
+ __ Move(result, Immediate(isolate()->factory()->nan_value()));
+ __ jmp(&done);
+
+ __ bind(&need_conversion);
+ // Move the undefined value into the result register, which will
+ // trigger conversion.
+ __ Move(result, Immediate(isolate()->factory()->undefined_value()));
+ __ jmp(&done);
+
+ NopRuntimeCallHelper call_helper;
+ generator.GenerateSlow(masm_, call_helper);
+
+ __ bind(&done);
+ context()->Plug(result);
+}
+
+
+void FullCodeGenerator::EmitStringCharAt(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 2);
+
+ VisitForStackValue(args->at(0));
+ VisitForAccumulatorValue(args->at(1));
+
+ Register object = ebx;
+ Register index = eax;
+ Register scratch = edx;
+ Register result = eax;
+
+ __ pop(object);
+
+ Label need_conversion;
+ Label index_out_of_range;
+ Label done;
+ StringCharAtGenerator generator(object,
+ index,
+ scratch,
+ result,
+ &need_conversion,
+ &need_conversion,
+ &index_out_of_range,
+ STRING_INDEX_IS_NUMBER);
+ generator.GenerateFast(masm_);
+ __ jmp(&done);
+
+ __ bind(&index_out_of_range);
+ // When the index is out of range, the spec requires us to return
+ // the empty string.
+ __ Move(result, Immediate(isolate()->factory()->empty_string()));
+ __ jmp(&done);
+
+ __ bind(&need_conversion);
+ // Move smi zero into the result register, which will trigger
+ // conversion.
+ __ Move(result, Immediate(Smi::FromInt(0)));
+ __ jmp(&done);
+
+ NopRuntimeCallHelper call_helper;
+ generator.GenerateSlow(masm_, call_helper);
+
+ __ bind(&done);
+ context()->Plug(result);
+}
+
+
+void FullCodeGenerator::EmitStringAdd(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT_EQ(2, args->length());
+ VisitForStackValue(args->at(0));
+ VisitForAccumulatorValue(args->at(1));
+
+ __ pop(edx);
+ StringAddStub stub(isolate(), STRING_ADD_CHECK_BOTH, NOT_TENURED);
+ __ CallStub(&stub);
+ context()->Plug(eax);
+}
+
+
+void FullCodeGenerator::EmitStringCompare(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT_EQ(2, args->length());
+
+ VisitForStackValue(args->at(0));
+ VisitForStackValue(args->at(1));
+
+ StringCompareStub stub(isolate());
+ __ CallStub(&stub);
+ context()->Plug(eax);
+}
+
+
+void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() >= 2);
+
+ int arg_count = args->length() - 2; // 2 ~ receiver and function.
+ for (int i = 0; i < arg_count + 1; ++i) {
+ VisitForStackValue(args->at(i));
+ }
+ VisitForAccumulatorValue(args->last()); // Function.
+
+ Label runtime, done;
+ // Check for non-function argument (including proxy).
+ __ JumpIfSmi(eax, &runtime);
+ __ CmpObjectType(eax, JS_FUNCTION_TYPE, ebx);
+ __ j(not_equal, &runtime);
+
+ // InvokeFunction requires the function in edi. Move it in there.
+ __ mov(edi, result_register());
+ ParameterCount count(arg_count);
+ __ InvokeFunction(edi, count, CALL_FUNCTION, NullCallWrapper());
+ __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+ __ jmp(&done);
+
+ __ bind(&runtime);
+ __ push(eax);
+ __ CallRuntime(Runtime::kCall, args->length());
+ __ bind(&done);
+
+ context()->Plug(eax);
+}
+
+
+void FullCodeGenerator::EmitRegExpConstructResult(CallRuntime* expr) {
+ // Load the arguments on the stack and call the stub.
+ RegExpConstructResultStub stub(isolate());
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 3);
+ VisitForStackValue(args->at(0));
+ VisitForStackValue(args->at(1));
+ VisitForAccumulatorValue(args->at(2));
+ __ pop(ebx);
+ __ pop(ecx);
+ __ CallStub(&stub);
+ context()->Plug(eax);
+}
+
+
+void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT_EQ(2, args->length());
+
+ ASSERT_NE(NULL, args->at(0)->AsLiteral());
+ int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->value()))->value();
+
+ Handle<FixedArray> jsfunction_result_caches(
+ isolate()->native_context()->jsfunction_result_caches());
+ if (jsfunction_result_caches->length() <= cache_id) {
+ __ Abort(kAttemptToUseUndefinedCache);
+ __ mov(eax, isolate()->factory()->undefined_value());
+ context()->Plug(eax);
+ return;
+ }
+
+ VisitForAccumulatorValue(args->at(1));
+
+ Register key = eax;
+ Register cache = ebx;
+ Register tmp = ecx;
+ __ mov(cache, ContextOperand(esi, Context::GLOBAL_OBJECT_INDEX));
+ __ mov(cache,
+ FieldOperand(cache, GlobalObject::kNativeContextOffset));
+ __ mov(cache, ContextOperand(cache, Context::JSFUNCTION_RESULT_CACHES_INDEX));
+ __ mov(cache,
+ FieldOperand(cache, FixedArray::OffsetOfElementAt(cache_id)));
+
+ Label done, not_found;
+ STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
+ __ mov(tmp, FieldOperand(cache, JSFunctionResultCache::kFingerOffset));
+ // tmp now holds finger offset as a smi.
+ __ cmp(key, FixedArrayElementOperand(cache, tmp));
+ __ j(not_equal, ¬_found);
+
+ __ mov(eax, FixedArrayElementOperand(cache, tmp, 1));
+ __ jmp(&done);
+
+ __ bind(¬_found);
+ // Call runtime to perform the lookup.
+ __ push(cache);
+ __ push(key);
+ __ CallRuntime(Runtime::kHiddenGetFromCache, 2);
+
+ __ bind(&done);
+ context()->Plug(eax);
+}
+
+
+void FullCodeGenerator::EmitHasCachedArrayIndex(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 1);
+
+ VisitForAccumulatorValue(args->at(0));
+
+ __ AssertString(eax);
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ __ test(FieldOperand(eax, String::kHashFieldOffset),
+ Immediate(String::kContainsCachedArrayIndexMask));
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ Split(zero, if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitGetCachedArrayIndex(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 1);
+ VisitForAccumulatorValue(args->at(0));
+
+ __ AssertString(eax);
+
+ __ mov(eax, FieldOperand(eax, String::kHashFieldOffset));
+ __ IndexFromHash(eax, eax);
+
+ context()->Plug(eax);
+}
+
+
+void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
+ Label bailout, done, one_char_separator, long_separator,
+ non_trivial_array, not_size_one_array, loop,
+ loop_1, loop_1_condition, loop_2, loop_2_entry, loop_3, loop_3_entry;
+
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 2);
+ // We will leave the separator on the stack until the end of the function.
+ VisitForStackValue(args->at(1));
+ // Load this to eax (= array)
+ VisitForAccumulatorValue(args->at(0));
+ // All aliases of the same register have disjoint lifetimes.
+ Register array = eax;
+ Register elements = no_reg; // Will be eax.
+
+ Register index = edx;
+
+ Register string_length = ecx;
+
+ Register string = esi;
+
+ Register scratch = ebx;
+
+ Register array_length = edi;
+ Register result_pos = no_reg; // Will be edi.
+
+ // Separator operand is already pushed.
+ Operand separator_operand = Operand(esp, 2 * kPointerSize);
+ Operand result_operand = Operand(esp, 1 * kPointerSize);
+ Operand array_length_operand = Operand(esp, 0);
+ __ sub(esp, Immediate(2 * kPointerSize));
+ __ cld();
+ // Check that the array is a JSArray
+ __ JumpIfSmi(array, &bailout);
+ __ CmpObjectType(array, JS_ARRAY_TYPE, scratch);
+ __ j(not_equal, &bailout);
+
+ // Check that the array has fast elements.
+ __ CheckFastElements(scratch, &bailout);
+
+ // If the array has length zero, return the empty string.
+ __ mov(array_length, FieldOperand(array, JSArray::kLengthOffset));
+ __ SmiUntag(array_length);
+ __ j(not_zero, &non_trivial_array);
+ __ mov(result_operand, isolate()->factory()->empty_string());
+ __ jmp(&done);
+
+ // Save the array length.
+ __ bind(&non_trivial_array);
+ __ mov(array_length_operand, array_length);
+
+ // Save the FixedArray containing array's elements.
+ // End of array's live range.
+ elements = array;
+ __ mov(elements, FieldOperand(array, JSArray::kElementsOffset));
+ array = no_reg;
+
+
+ // Check that all array elements are sequential ASCII strings, and
+ // accumulate the sum of their lengths, as a smi-encoded value.
+ __ Move(index, Immediate(0));
+ __ Move(string_length, Immediate(0));
+ // Loop condition: while (index < length).
+ // Live loop registers: index, array_length, string,
+ // scratch, string_length, elements.
+ if (generate_debug_code_) {
+ __ cmp(index, array_length);
+ __ Assert(less, kNoEmptyArraysHereInEmitFastAsciiArrayJoin);
+ }
+ __ bind(&loop);
+ __ mov(string, FieldOperand(elements,
+ index,
+ times_pointer_size,
+ FixedArray::kHeaderSize));
+ __ JumpIfSmi(string, &bailout);
+ __ mov(scratch, FieldOperand(string, HeapObject::kMapOffset));
+ __ movzx_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
+ __ and_(scratch, Immediate(
+ kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask));
+ __ cmp(scratch, kStringTag | kOneByteStringTag | kSeqStringTag);
+ __ j(not_equal, &bailout);
+ __ add(string_length,
+ FieldOperand(string, SeqOneByteString::kLengthOffset));
+ __ j(overflow, &bailout);
+ __ add(index, Immediate(1));
+ __ cmp(index, array_length);
+ __ j(less, &loop);
+
+ // If array_length is 1, return elements[0], a string.
+ __ cmp(array_length, 1);
+ __ j(not_equal, ¬_size_one_array);
+ __ mov(scratch, FieldOperand(elements, FixedArray::kHeaderSize));
+ __ mov(result_operand, scratch);
+ __ jmp(&done);
+
+ __ bind(¬_size_one_array);
+
+ // End of array_length live range.
+ result_pos = array_length;
+ array_length = no_reg;
+
+ // Live registers:
+ // string_length: Sum of string lengths, as a smi.
+ // elements: FixedArray of strings.
+
+ // Check that the separator is a flat ASCII string.
+ __ mov(string, separator_operand);
+ __ JumpIfSmi(string, &bailout);
+ __ mov(scratch, FieldOperand(string, HeapObject::kMapOffset));
+ __ movzx_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
+ __ and_(scratch, Immediate(
+ kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask));
+ __ cmp(scratch, kStringTag | kOneByteStringTag | kSeqStringTag);
+ __ j(not_equal, &bailout);
+
+ // Add (separator length times array_length) - separator length
+ // to string_length.
+ __ mov(scratch, separator_operand);
+ __ mov(scratch, FieldOperand(scratch, SeqOneByteString::kLengthOffset));
+ __ sub(string_length, scratch); // May be negative, temporarily.
+ __ imul(scratch, array_length_operand);
+ __ j(overflow, &bailout);
+ __ add(string_length, scratch);
+ __ j(overflow, &bailout);
+
+ __ shr(string_length, 1);
+ // Live registers and stack values:
+ // string_length
+ // elements
+ __ AllocateAsciiString(result_pos, string_length, scratch,
+ index, string, &bailout);
+ __ mov(result_operand, result_pos);
+ __ lea(result_pos, FieldOperand(result_pos, SeqOneByteString::kHeaderSize));
+
+
+ __ mov(string, separator_operand);
+ __ cmp(FieldOperand(string, SeqOneByteString::kLengthOffset),
+ Immediate(Smi::FromInt(1)));
+ __ j(equal, &one_char_separator);
+ __ j(greater, &long_separator);
+
+
+ // Empty separator case
+ __ mov(index, Immediate(0));
+ __ jmp(&loop_1_condition);
+ // Loop condition: while (index < length).
+ __ bind(&loop_1);
+ // Each iteration of the loop concatenates one string to the result.
+ // Live values in registers:
+ // index: which element of the elements array we are adding to the result.
+ // result_pos: the position to which we are currently copying characters.
+ // elements: the FixedArray of strings we are joining.
+
+ // Get string = array[index].
+ __ mov(string, FieldOperand(elements, index,
+ times_pointer_size,
+ FixedArray::kHeaderSize));
+ __ mov(string_length,
+ FieldOperand(string, String::kLengthOffset));
+ __ shr(string_length, 1);
+ __ lea(string,
+ FieldOperand(string, SeqOneByteString::kHeaderSize));
+ __ CopyBytes(string, result_pos, string_length, scratch);
+ __ add(index, Immediate(1));
+ __ bind(&loop_1_condition);
+ __ cmp(index, array_length_operand);
+ __ j(less, &loop_1); // End while (index < length).
+ __ jmp(&done);
+
+
+
+ // One-character separator case
+ __ bind(&one_char_separator);
+ // Replace separator with its ASCII character value.
+ __ mov_b(scratch, FieldOperand(string, SeqOneByteString::kHeaderSize));
+ __ mov_b(separator_operand, scratch);
+
+ __ Move(index, Immediate(0));
+ // Jump into the loop after the code that copies the separator, so the first
+ // element is not preceded by a separator
+ __ jmp(&loop_2_entry);
+ // Loop condition: while (index < length).
+ __ bind(&loop_2);
+ // Each iteration of the loop concatenates one string to the result.
+ // Live values in registers:
+ // index: which element of the elements array we are adding to the result.
+ // result_pos: the position to which we are currently copying characters.
+
+ // Copy the separator character to the result.
+ __ mov_b(scratch, separator_operand);
+ __ mov_b(Operand(result_pos, 0), scratch);
+ __ inc(result_pos);
+
+ __ bind(&loop_2_entry);
+ // Get string = array[index].
+ __ mov(string, FieldOperand(elements, index,
+ times_pointer_size,
+ FixedArray::kHeaderSize));
+ __ mov(string_length,
+ FieldOperand(string, String::kLengthOffset));
+ __ shr(string_length, 1);
+ __ lea(string,
+ FieldOperand(string, SeqOneByteString::kHeaderSize));
+ __ CopyBytes(string, result_pos, string_length, scratch);
+ __ add(index, Immediate(1));
+
+ __ cmp(index, array_length_operand);
+ __ j(less, &loop_2); // End while (index < length).
+ __ jmp(&done);
+
+
+ // Long separator case (separator is more than one character).
+ __ bind(&long_separator);
+
+ __ Move(index, Immediate(0));
+ // Jump into the loop after the code that copies the separator, so the first
+ // element is not preceded by a separator
+ __ jmp(&loop_3_entry);
+ // Loop condition: while (index < length).
+ __ bind(&loop_3);
+ // Each iteration of the loop concatenates one string to the result.
+ // Live values in registers:
+ // index: which element of the elements array we are adding to the result.
+ // result_pos: the position to which we are currently copying characters.
+
+ // Copy the separator to the result.
+ __ mov(string, separator_operand);
+ __ mov(string_length,
+ FieldOperand(string, String::kLengthOffset));
+ __ shr(string_length, 1);
+ __ lea(string,
+ FieldOperand(string, SeqOneByteString::kHeaderSize));
+ __ CopyBytes(string, result_pos, string_length, scratch);
+
+ __ bind(&loop_3_entry);
+ // Get string = array[index].
+ __ mov(string, FieldOperand(elements, index,
+ times_pointer_size,
+ FixedArray::kHeaderSize));
+ __ mov(string_length,
+ FieldOperand(string, String::kLengthOffset));
+ __ shr(string_length, 1);
+ __ lea(string,
+ FieldOperand(string, SeqOneByteString::kHeaderSize));
+ __ CopyBytes(string, result_pos, string_length, scratch);
+ __ add(index, Immediate(1));
+
+ __ cmp(index, array_length_operand);
+ __ j(less, &loop_3); // End while (index < length).
+ __ jmp(&done);
+
+
+ __ bind(&bailout);
+ __ mov(result_operand, isolate()->factory()->undefined_value());
+ __ bind(&done);
+ __ mov(eax, result_operand);
+ // Drop temp values from the stack, and restore context register.
+ __ add(esp, Immediate(3 * kPointerSize));
+
+ __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+ context()->Plug(eax);
+}
+
+
+void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
+ if (expr->function() != NULL &&
+ expr->function()->intrinsic_type == Runtime::INLINE) {
+ Comment cmnt(masm_, "[ InlineRuntimeCall");
+ EmitInlineRuntimeCall(expr);
+ return;
+ }
+
+ Comment cmnt(masm_, "[ CallRuntime");
+ ZoneList<Expression*>* args = expr->arguments();
+
+ if (expr->is_jsruntime()) {
+ // Push the builtins object as receiver.
+ __ mov(eax, GlobalObjectOperand());
+ __ push(FieldOperand(eax, GlobalObject::kBuiltinsOffset));
+
+ // Load the function from the receiver.
+ __ mov(edx, Operand(esp, 0));
+ __ mov(ecx, Immediate(expr->name()));
+ CallLoadIC(NOT_CONTEXTUAL, expr->CallRuntimeFeedbackId());
+
+ // Push the target function under the receiver.
+ __ push(Operand(esp, 0));
+ __ mov(Operand(esp, kPointerSize), eax);
+
+ // Code common for calls using the IC.
+ ZoneList<Expression*>* args = expr->arguments();
+ int arg_count = args->length();
+ for (int i = 0; i < arg_count; i++) {
+ VisitForStackValue(args->at(i));
+ }
+
+ // Record source position of the IC call.
+ SetSourcePosition(expr->position());
+ CallFunctionStub stub(isolate(), arg_count, NO_CALL_FUNCTION_FLAGS);
+ __ mov(edi, Operand(esp, (arg_count + 1) * kPointerSize));
+ __ CallStub(&stub);
+ // Restore context register.
+ __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+ context()->DropAndPlug(1, eax);
+
+ } else {
+ // Push the arguments ("left-to-right").
+ int arg_count = args->length();
+ for (int i = 0; i < arg_count; i++) {
+ VisitForStackValue(args->at(i));
+ }
+
+ // Call the C runtime function.
+ __ CallRuntime(expr->function(), arg_count);
+
+ context()->Plug(eax);
+ }
+}
+
+
+void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
+ switch (expr->op()) {
+ case Token::DELETE: {
+ Comment cmnt(masm_, "[ UnaryOperation (DELETE)");
+ Property* property = expr->expression()->AsProperty();
+ VariableProxy* proxy = expr->expression()->AsVariableProxy();
+
+ if (property != NULL) {
+ VisitForStackValue(property->obj());
+ VisitForStackValue(property->key());
+ __ push(Immediate(Smi::FromInt(strict_mode())));
+ __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
+ context()->Plug(eax);
+ } else if (proxy != NULL) {
+ Variable* var = proxy->var();
+ // Delete of an unqualified identifier is disallowed in strict mode
+ // but "delete this" is allowed.
+ ASSERT(strict_mode() == SLOPPY || var->is_this());
+ if (var->IsUnallocated()) {
+ __ push(GlobalObjectOperand());
+ __ push(Immediate(var->name()));
+ __ push(Immediate(Smi::FromInt(SLOPPY)));
+ __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
+ context()->Plug(eax);
+ } else if (var->IsStackAllocated() || var->IsContextSlot()) {
+ // Result of deleting non-global variables is false. 'this' is
+ // not really a variable, though we implement it as one. The
+ // subexpression does not have side effects.
+ context()->Plug(var->is_this());
+ } else {
+ // Non-global variable. Call the runtime to try to delete from the
+ // context where the variable was introduced.
+ __ push(context_register());
+ __ push(Immediate(var->name()));
+ __ CallRuntime(Runtime::kHiddenDeleteContextSlot, 2);
+ context()->Plug(eax);
+ }
+ } else {
+ // Result of deleting non-property, non-variable reference is true.
+ // The subexpression may have side effects.
+ VisitForEffect(expr->expression());
+ context()->Plug(true);
+ }
+ break;
+ }
+
+ case Token::VOID: {
+ Comment cmnt(masm_, "[ UnaryOperation (VOID)");
+ VisitForEffect(expr->expression());
+ context()->Plug(isolate()->factory()->undefined_value());
+ break;
+ }
+
+ case Token::NOT: {
+ Comment cmnt(masm_, "[ UnaryOperation (NOT)");
+ if (context()->IsEffect()) {
+ // Unary NOT has no side effects so it's only necessary to visit the
+ // subexpression. Match the optimizing compiler by not branching.
+ VisitForEffect(expr->expression());
+ } else if (context()->IsTest()) {
+ const TestContext* test = TestContext::cast(context());
+ // The labels are swapped for the recursive call.
+ VisitForControl(expr->expression(),
+ test->false_label(),
+ test->true_label(),
+ test->fall_through());
+ context()->Plug(test->true_label(), test->false_label());
+ } else {
+ // We handle value contexts explicitly rather than simply visiting
+ // for control and plugging the control flow into the context,
+ // because we need to prepare a pair of extra administrative AST ids
+ // for the optimizing compiler.
+ ASSERT(context()->IsAccumulatorValue() || context()->IsStackValue());
+ Label materialize_true, materialize_false, done;
+ VisitForControl(expr->expression(),
+ &materialize_false,
+ &materialize_true,
+ &materialize_true);
+ __ bind(&materialize_true);
+ PrepareForBailoutForId(expr->MaterializeTrueId(), NO_REGISTERS);
+ if (context()->IsAccumulatorValue()) {
+ __ mov(eax, isolate()->factory()->true_value());
+ } else {
+ __ Push(isolate()->factory()->true_value());
+ }
+ __ jmp(&done, Label::kNear);
+ __ bind(&materialize_false);
+ PrepareForBailoutForId(expr->MaterializeFalseId(), NO_REGISTERS);
+ if (context()->IsAccumulatorValue()) {
+ __ mov(eax, isolate()->factory()->false_value());
+ } else {
+ __ Push(isolate()->factory()->false_value());
+ }
+ __ bind(&done);
+ }
+ break;
+ }
+
+ case Token::TYPEOF: {
+ Comment cmnt(masm_, "[ UnaryOperation (TYPEOF)");
+ { StackValueContext context(this);
+ VisitForTypeofValue(expr->expression());
+ }
+ __ CallRuntime(Runtime::kTypeof, 1);
+ context()->Plug(eax);
+ break;
+ }
+
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
+ ASSERT(expr->expression()->IsValidReferenceExpression());
+
+ Comment cmnt(masm_, "[ CountOperation");
+ SetSourcePosition(expr->position());
+
+ // Expression can only be a property, a global or a (parameter or local)
+ // slot.
+ enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
+ LhsKind assign_type = VARIABLE;
+ Property* prop = expr->expression()->AsProperty();
+ // In case of a property we use the uninitialized expression context
+ // of the key to detect a named property.
+ if (prop != NULL) {
+ assign_type =
+ (prop->key()->IsPropertyName()) ? NAMED_PROPERTY : KEYED_PROPERTY;
+ }
+
+ // Evaluate expression and get value.
+ if (assign_type == VARIABLE) {
+ ASSERT(expr->expression()->AsVariableProxy()->var() != NULL);
+ AccumulatorValueContext context(this);
+ EmitVariableLoad(expr->expression()->AsVariableProxy());
+ } else {
+ // Reserve space for result of postfix operation.
+ if (expr->is_postfix() && !context()->IsEffect()) {
+ __ push(Immediate(Smi::FromInt(0)));
+ }
+ if (assign_type == NAMED_PROPERTY) {
+ // Put the object both on the stack and in edx.
+ VisitForAccumulatorValue(prop->obj());
+ __ push(eax);
+ __ mov(edx, eax);
+ EmitNamedPropertyLoad(prop);
+ } else {
+ VisitForStackValue(prop->obj());
+ VisitForStackValue(prop->key());
+ __ mov(edx, Operand(esp, kPointerSize)); // Object.
+ __ mov(ecx, Operand(esp, 0)); // Key.
+ EmitKeyedPropertyLoad(prop);
+ }
+ }
+
+ // We need a second deoptimization point after loading the value
+ // in case evaluating the property load my have a side effect.
+ if (assign_type == VARIABLE) {
+ PrepareForBailout(expr->expression(), TOS_REG);
+ } else {
+ PrepareForBailoutForId(prop->LoadId(), TOS_REG);
+ }
+
+ // Inline smi case if we are in a loop.
+ Label done, stub_call;
+ JumpPatchSite patch_site(masm_);
+ if (ShouldInlineSmiCase(expr->op())) {
+ Label slow;
+ patch_site.EmitJumpIfNotSmi(eax, &slow, Label::kNear);
+
+ // Save result for postfix expressions.
+ if (expr->is_postfix()) {
+ if (!context()->IsEffect()) {
+ // Save the result on the stack. If we have a named or keyed property
+ // we store the result under the receiver that is currently on top
+ // of the stack.
+ switch (assign_type) {
+ case VARIABLE:
+ __ push(eax);
+ break;
+ case NAMED_PROPERTY:
+ __ mov(Operand(esp, kPointerSize), eax);
+ break;
+ case KEYED_PROPERTY:
+ __ mov(Operand(esp, 2 * kPointerSize), eax);
+ break;
+ }
+ }
+ }
+
+ if (expr->op() == Token::INC) {
+ __ add(eax, Immediate(Smi::FromInt(1)));
+ } else {
+ __ sub(eax, Immediate(Smi::FromInt(1)));
+ }
+ __ j(no_overflow, &done, Label::kNear);
+ // Call stub. Undo operation first.
+ if (expr->op() == Token::INC) {
+ __ sub(eax, Immediate(Smi::FromInt(1)));
+ } else {
+ __ add(eax, Immediate(Smi::FromInt(1)));
+ }
+ __ jmp(&stub_call, Label::kNear);
+ __ bind(&slow);
+ }
+ ToNumberStub convert_stub(isolate());
+ __ CallStub(&convert_stub);
+
+ // Save result for postfix expressions.
+ if (expr->is_postfix()) {
+ if (!context()->IsEffect()) {
+ // Save the result on the stack. If we have a named or keyed property
+ // we store the result under the receiver that is currently on top
+ // of the stack.
+ switch (assign_type) {
+ case VARIABLE:
+ __ push(eax);
+ break;
+ case NAMED_PROPERTY:
+ __ mov(Operand(esp, kPointerSize), eax);
+ break;
+ case KEYED_PROPERTY:
+ __ mov(Operand(esp, 2 * kPointerSize), eax);
+ break;
+ }
+ }
+ }
+
+ // Record position before stub call.
+ SetSourcePosition(expr->position());
+
+ // Call stub for +1/-1.
+ __ bind(&stub_call);
+ __ mov(edx, eax);
+ __ mov(eax, Immediate(Smi::FromInt(1)));
+ BinaryOpICStub stub(isolate(), expr->binary_op(), NO_OVERWRITE);
+ CallIC(stub.GetCode(), expr->CountBinOpFeedbackId());
+ patch_site.EmitPatchInfo();
+ __ bind(&done);
+
+ // Store the value returned in eax.
+ switch (assign_type) {
+ case VARIABLE:
+ if (expr->is_postfix()) {
+ // Perform the assignment as if via '='.
+ { EffectContext context(this);
+ EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
+ Token::ASSIGN);
+ PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+ context.Plug(eax);
+ }
+ // For all contexts except EffectContext We have the result on
+ // top of the stack.
+ if (!context()->IsEffect()) {
+ context()->PlugTOS();
+ }
+ } else {
+ // Perform the assignment as if via '='.
+ EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
+ Token::ASSIGN);
+ PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+ context()->Plug(eax);
+ }
+ break;
+ case NAMED_PROPERTY: {
+ __ mov(ecx, prop->key()->AsLiteral()->value());
+ __ pop(edx);
+ CallStoreIC(expr->CountStoreFeedbackId());
+ PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+ if (expr->is_postfix()) {
+ if (!context()->IsEffect()) {
+ context()->PlugTOS();
+ }
+ } else {
+ context()->Plug(eax);
+ }
+ break;
+ }
+ case KEYED_PROPERTY: {
+ __ pop(ecx);
+ __ pop(edx);
+ Handle<Code> ic = strict_mode() == SLOPPY
+ ? isolate()->builtins()->KeyedStoreIC_Initialize()
+ : isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
+ CallIC(ic, expr->CountStoreFeedbackId());
+ PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+ if (expr->is_postfix()) {
+ // Result is on the stack
+ if (!context()->IsEffect()) {
+ context()->PlugTOS();
+ }
+ } else {
+ context()->Plug(eax);
+ }
+ break;
+ }
+ }
+}
+
+
+void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
+ VariableProxy* proxy = expr->AsVariableProxy();
+ ASSERT(!context()->IsEffect());
+ ASSERT(!context()->IsTest());
+
+ if (proxy != NULL && proxy->var()->IsUnallocated()) {
+ Comment cmnt(masm_, "[ Global variable");
+ __ mov(edx, GlobalObjectOperand());
+ __ mov(ecx, Immediate(proxy->name()));
+ // Use a regular load, not a contextual load, to avoid a reference
+ // error.
+ CallLoadIC(NOT_CONTEXTUAL);
+ PrepareForBailout(expr, TOS_REG);
+ context()->Plug(eax);
+ } else if (proxy != NULL && proxy->var()->IsLookupSlot()) {
+ Comment cmnt(masm_, "[ Lookup slot");
+ Label done, slow;
+
+ // Generate code for loading from variables potentially shadowed
+ // by eval-introduced variables.
+ EmitDynamicLookupFastCase(proxy->var(), INSIDE_TYPEOF, &slow, &done);
+
+ __ bind(&slow);
+ __ push(esi);
+ __ push(Immediate(proxy->name()));
+ __ CallRuntime(Runtime::kHiddenLoadContextSlotNoReferenceError, 2);
+ PrepareForBailout(expr, TOS_REG);
+ __ bind(&done);
+
+ context()->Plug(eax);
+ } else {
+ // This expression cannot throw a reference error at the top level.
+ VisitInDuplicateContext(expr);
+ }
+}
+
+
+void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
+ Expression* sub_expr,
+ Handle<String> check) {
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ { AccumulatorValueContext context(this);
+ VisitForTypeofValue(sub_expr);
+ }
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+
+ Factory* factory = isolate()->factory();
+ if (String::Equals(check, factory->number_string())) {
+ __ JumpIfSmi(eax, if_true);
+ __ cmp(FieldOperand(eax, HeapObject::kMapOffset),
+ isolate()->factory()->heap_number_map());
+ Split(equal, if_true, if_false, fall_through);
+ } else if (String::Equals(check, factory->string_string())) {
+ __ JumpIfSmi(eax, if_false);
+ __ CmpObjectType(eax, FIRST_NONSTRING_TYPE, edx);
+ __ j(above_equal, if_false);
+ // Check for undetectable objects => false.
+ __ test_b(FieldOperand(edx, Map::kBitFieldOffset),
+ 1 << Map::kIsUndetectable);
+ Split(zero, if_true, if_false, fall_through);
+ } else if (String::Equals(check, factory->symbol_string())) {
+ __ JumpIfSmi(eax, if_false);
+ __ CmpObjectType(eax, SYMBOL_TYPE, edx);
+ Split(equal, if_true, if_false, fall_through);
+ } else if (String::Equals(check, factory->boolean_string())) {
+ __ cmp(eax, isolate()->factory()->true_value());
+ __ j(equal, if_true);
+ __ cmp(eax, isolate()->factory()->false_value());
+ Split(equal, if_true, if_false, fall_through);
+ } else if (FLAG_harmony_typeof &&
+ String::Equals(check, factory->null_string())) {
+ __ cmp(eax, isolate()->factory()->null_value());
+ Split(equal, if_true, if_false, fall_through);
+ } else if (String::Equals(check, factory->undefined_string())) {
+ __ cmp(eax, isolate()->factory()->undefined_value());
+ __ j(equal, if_true);
+ __ JumpIfSmi(eax, if_false);
+ // Check for undetectable objects => true.
+ __ mov(edx, FieldOperand(eax, HeapObject::kMapOffset));
+ __ movzx_b(ecx, FieldOperand(edx, Map::kBitFieldOffset));
+ __ test(ecx, Immediate(1 << Map::kIsUndetectable));
+ Split(not_zero, if_true, if_false, fall_through);
+ } else if (String::Equals(check, factory->function_string())) {
+ __ JumpIfSmi(eax, if_false);
+ STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
+ __ CmpObjectType(eax, JS_FUNCTION_TYPE, edx);
+ __ j(equal, if_true);
+ __ CmpInstanceType(edx, JS_FUNCTION_PROXY_TYPE);
+ Split(equal, if_true, if_false, fall_through);
+ } else if (String::Equals(check, factory->object_string())) {
+ __ JumpIfSmi(eax, if_false);
+ if (!FLAG_harmony_typeof) {
+ __ cmp(eax, isolate()->factory()->null_value());
+ __ j(equal, if_true);
+ }
+ __ CmpObjectType(eax, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE, edx);
+ __ j(below, if_false);
+ __ CmpInstanceType(edx, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
+ __ j(above, if_false);
+ // Check for undetectable objects => false.
+ __ test_b(FieldOperand(edx, Map::kBitFieldOffset),
+ 1 << Map::kIsUndetectable);
+ Split(zero, if_true, if_false, fall_through);
+ } else {
+ if (if_false != fall_through) __ jmp(if_false);
+ }
+ context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
+ Comment cmnt(masm_, "[ CompareOperation");
+ SetSourcePosition(expr->position());
+
+ // First we try a fast inlined version of the compare when one of
+ // the operands is a literal.
+ if (TryLiteralCompare(expr)) return;
+
+ // Always perform the comparison for its control flow. Pack the result
+ // into the expression's context after the comparison is performed.
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ Token::Value op = expr->op();
+ VisitForStackValue(expr->left());
+ switch (op) {
+ case Token::IN:
+ VisitForStackValue(expr->right());
+ __ InvokeBuiltin(Builtins::IN, CALL_FUNCTION);
+ PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
+ __ cmp(eax, isolate()->factory()->true_value());
+ Split(equal, if_true, if_false, fall_through);
+ break;
+
+ case Token::INSTANCEOF: {
+ VisitForStackValue(expr->right());
+ InstanceofStub stub(isolate(), InstanceofStub::kNoFlags);
+ __ CallStub(&stub);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ __ test(eax, eax);
+ // The stub returns 0 for true.
+ Split(zero, if_true, if_false, fall_through);
+ break;
+ }
+
+ default: {
+ VisitForAccumulatorValue(expr->right());
+ Condition cc = CompareIC::ComputeCondition(op);
+ __ pop(edx);
+
+ bool inline_smi_code = ShouldInlineSmiCase(op);
+ JumpPatchSite patch_site(masm_);
+ if (inline_smi_code) {
+ Label slow_case;
+ __ mov(ecx, edx);
+ __ or_(ecx, eax);
+ patch_site.EmitJumpIfNotSmi(ecx, &slow_case, Label::kNear);
+ __ cmp(edx, eax);
+ Split(cc, if_true, if_false, NULL);
+ __ bind(&slow_case);
+ }
+
+ // Record position and call the compare IC.
+ SetSourcePosition(expr->position());
+ Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
+ CallIC(ic, expr->CompareOperationFeedbackId());
+ patch_site.EmitPatchInfo();
+
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ __ test(eax, eax);
+ Split(cc, if_true, if_false, fall_through);
+ }
+ }
+
+ // Convert the result of the comparison into one expected for this
+ // expression's context.
+ context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr,
+ Expression* sub_expr,
+ NilValue nil) {
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ VisitForAccumulatorValue(sub_expr);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+
+ Handle<Object> nil_value = nil == kNullValue
+ ? isolate()->factory()->null_value()
+ : isolate()->factory()->undefined_value();
+ if (expr->op() == Token::EQ_STRICT) {
+ __ cmp(eax, nil_value);
+ Split(equal, if_true, if_false, fall_through);
+ } else {
+ Handle<Code> ic = CompareNilICStub::GetUninitialized(isolate(), nil);
+ CallIC(ic, expr->CompareOperationFeedbackId());
+ __ test(eax, eax);
+ Split(not_zero, if_true, if_false, fall_through);
+ }
+ context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::VisitThisFunction(ThisFunction* expr) {
+ __ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
+ context()->Plug(eax);
+}
+
+
+Register FullCodeGenerator::result_register() {
+ return eax;
+}
+
+
+Register FullCodeGenerator::context_register() {
+ return esi;
+}
+
+
+void FullCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
+ ASSERT_EQ(POINTER_SIZE_ALIGN(frame_offset), frame_offset);
+ __ mov(Operand(ebp, frame_offset), value);
+}
+
+
+void FullCodeGenerator::LoadContextField(Register dst, int context_index) {
+ __ mov(dst, ContextOperand(esi, context_index));
+}
+
+
+void FullCodeGenerator::PushFunctionArgumentForContextAllocation() {
+ Scope* declaration_scope = scope()->DeclarationScope();
+ if (declaration_scope->is_global_scope() ||
+ declaration_scope->is_module_scope()) {
+ // Contexts nested in the native context have a canonical empty function
+ // as their closure, not the anonymous closure containing the global
+ // code. Pass a smi sentinel and let the runtime look up the empty
+ // function.
+ __ push(Immediate(Smi::FromInt(0)));
+ } else if (declaration_scope->is_eval_scope()) {
+ // Contexts nested inside eval code have the same closure as the context
+ // calling eval, not the anonymous closure containing the eval code.
+ // Fetch it from the context.
+ __ push(ContextOperand(esi, Context::CLOSURE_INDEX));
+ } else {
+ ASSERT(declaration_scope->is_function_scope());
+ __ push(Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
+ }
+}
+
+
+// ----------------------------------------------------------------------------
+// Non-local control flow support.
+
+void FullCodeGenerator::EnterFinallyBlock() {
+ // Cook return address on top of stack (smi encoded Code* delta)
+ ASSERT(!result_register().is(edx));
+ __ pop(edx);
+ __ sub(edx, Immediate(masm_->CodeObject()));
+ STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
+ STATIC_ASSERT(kSmiTag == 0);
+ __ SmiTag(edx);
+ __ push(edx);
+
+ // Store result register while executing finally block.
+ __ push(result_register());
+
+ // Store pending message while executing finally block.
+ ExternalReference pending_message_obj =
+ ExternalReference::address_of_pending_message_obj(isolate());
+ __ mov(edx, Operand::StaticVariable(pending_message_obj));
+ __ push(edx);
+
+ ExternalReference has_pending_message =
+ ExternalReference::address_of_has_pending_message(isolate());
+ __ mov(edx, Operand::StaticVariable(has_pending_message));
+ __ SmiTag(edx);
+ __ push(edx);
+
+ ExternalReference pending_message_script =
+ ExternalReference::address_of_pending_message_script(isolate());
+ __ mov(edx, Operand::StaticVariable(pending_message_script));
+ __ push(edx);
+}
+
+
+void FullCodeGenerator::ExitFinallyBlock() {
+ ASSERT(!result_register().is(edx));
+ // Restore pending message from stack.
+ __ pop(edx);
+ ExternalReference pending_message_script =
+ ExternalReference::address_of_pending_message_script(isolate());
+ __ mov(Operand::StaticVariable(pending_message_script), edx);
+
+ __ pop(edx);
+ __ SmiUntag(edx);
+ ExternalReference has_pending_message =
+ ExternalReference::address_of_has_pending_message(isolate());
+ __ mov(Operand::StaticVariable(has_pending_message), edx);
+
+ __ pop(edx);
+ ExternalReference pending_message_obj =
+ ExternalReference::address_of_pending_message_obj(isolate());
+ __ mov(Operand::StaticVariable(pending_message_obj), edx);
+
+ // Restore result register from stack.
+ __ pop(result_register());
+
+ // Uncook return address.
+ __ pop(edx);
+ __ SmiUntag(edx);
+ __ add(edx, Immediate(masm_->CodeObject()));
+ __ jmp(edx);
+}
+
+
+#undef __
+
+#define __ ACCESS_MASM(masm())
+
+FullCodeGenerator::NestedStatement* FullCodeGenerator::TryFinally::Exit(
+ int* stack_depth,
+ int* context_length) {
+ // The macros used here must preserve the result register.
+
+ // Because the handler block contains the context of the finally
+ // code, we can restore it directly from there for the finally code
+ // rather than iteratively unwinding contexts via their previous
+ // links.
+ __ Drop(*stack_depth); // Down to the handler block.
+ if (*context_length > 0) {
+ // Restore the context to its dedicated register and the stack.
+ __ mov(esi, Operand(esp, StackHandlerConstants::kContextOffset));
+ __ mov(Operand(ebp, StandardFrameConstants::kContextOffset), esi);
+ }
+ __ PopTryHandler();
+ __ call(finally_entry_);
+
+ *stack_depth = 0;
+ *context_length = 0;
+ return previous_;
+}
+
+#undef __
+
+
+static const byte kJnsInstruction = 0x79;
+static const byte kJnsOffset = 0x11;
+static const byte kNopByteOne = 0x66;
+static const byte kNopByteTwo = 0x90;
+#ifdef DEBUG
+static const byte kCallInstruction = 0xe8;
+#endif
+
+
+void BackEdgeTable::PatchAt(Code* unoptimized_code,
+ Address pc,
+ BackEdgeState target_state,
+ Code* replacement_code) {
+ Address call_target_address = pc - kIntSize;
+ Address jns_instr_address = call_target_address - 3;
+ Address jns_offset_address = call_target_address - 2;
+
+ switch (target_state) {
+ case INTERRUPT:
+ // sub <profiling_counter>, <delta> ;; Not changed
+ // jns ok
+ // call <interrupt stub>
+ // ok:
+ *jns_instr_address = kJnsInstruction;
+ *jns_offset_address = kJnsOffset;
+ break;
+ case ON_STACK_REPLACEMENT:
+ case OSR_AFTER_STACK_CHECK:
+ // sub <profiling_counter>, <delta> ;; Not changed
+ // nop
+ // nop
+ // call <on-stack replacment>
+ // ok:
+ *jns_instr_address = kNopByteOne;
+ *jns_offset_address = kNopByteTwo;
+ break;
+ }
+
+ Assembler::set_target_address_at(call_target_address,
+ unoptimized_code,
+ replacement_code->entry());
+ unoptimized_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
+ unoptimized_code, call_target_address, replacement_code);
+}
+
+
+BackEdgeTable::BackEdgeState BackEdgeTable::GetBackEdgeState(
+ Isolate* isolate,
+ Code* unoptimized_code,
+ Address pc) {
+ Address call_target_address = pc - kIntSize;
+ Address jns_instr_address = call_target_address - 3;
+ ASSERT_EQ(kCallInstruction, *(call_target_address - 1));
+
+ if (*jns_instr_address == kJnsInstruction) {
+ ASSERT_EQ(kJnsOffset, *(call_target_address - 2));
+ ASSERT_EQ(isolate->builtins()->InterruptCheck()->entry(),
+ Assembler::target_address_at(call_target_address,
+ unoptimized_code));
+ return INTERRUPT;
+ }
+
+ ASSERT_EQ(kNopByteOne, *jns_instr_address);
+ ASSERT_EQ(kNopByteTwo, *(call_target_address - 2));
+
+ if (Assembler::target_address_at(call_target_address, unoptimized_code) ==
+ isolate->builtins()->OnStackReplacement()->entry()) {
+ return ON_STACK_REPLACEMENT;
+ }
+
+ ASSERT_EQ(isolate->builtins()->OsrAfterStackCheck()->entry(),
+ Assembler::target_address_at(call_target_address,
+ unoptimized_code));
+ return OSR_AFTER_STACK_CHECK;
+}
+
+
+} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_X87
--- /dev/null
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "v8.h"
+
+#if V8_TARGET_ARCH_X87
+
+#include "codegen.h"
+#include "ic-inl.h"
+#include "runtime.h"
+#include "stub-cache.h"
+
+namespace v8 {
+namespace internal {
+
+// ----------------------------------------------------------------------------
+// Static IC stub generators.
+//
+
+#define __ ACCESS_MASM(masm)
+
+
+static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm,
+ Register type,
+ Label* global_object) {
+ // Register usage:
+ // type: holds the receiver instance type on entry.
+ __ cmp(type, JS_GLOBAL_OBJECT_TYPE);
+ __ j(equal, global_object);
+ __ cmp(type, JS_BUILTINS_OBJECT_TYPE);
+ __ j(equal, global_object);
+ __ cmp(type, JS_GLOBAL_PROXY_TYPE);
+ __ j(equal, global_object);
+}
+
+
+// Generated code falls through if the receiver is a regular non-global
+// JS object with slow properties and no interceptors.
+static void GenerateNameDictionaryReceiverCheck(MacroAssembler* masm,
+ Register receiver,
+ Register r0,
+ Register r1,
+ Label* miss) {
+ // Register usage:
+ // receiver: holds the receiver on entry and is unchanged.
+ // r0: used to hold receiver instance type.
+ // Holds the property dictionary on fall through.
+ // r1: used to hold receivers map.
+
+ // Check that the receiver isn't a smi.
+ __ JumpIfSmi(receiver, miss);
+
+ // Check that the receiver is a valid JS object.
+ __ mov(r1, FieldOperand(receiver, HeapObject::kMapOffset));
+ __ movzx_b(r0, FieldOperand(r1, Map::kInstanceTypeOffset));
+ __ cmp(r0, FIRST_SPEC_OBJECT_TYPE);
+ __ j(below, miss);
+
+ // If this assert fails, we have to check upper bound too.
+ STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE);
+
+ GenerateGlobalInstanceTypeCheck(masm, r0, miss);
+
+ // Check for non-global object that requires access check.
+ __ test_b(FieldOperand(r1, Map::kBitFieldOffset),
+ (1 << Map::kIsAccessCheckNeeded) |
+ (1 << Map::kHasNamedInterceptor));
+ __ j(not_zero, miss);
+
+ __ mov(r0, FieldOperand(receiver, JSObject::kPropertiesOffset));
+ __ CheckMap(r0, masm->isolate()->factory()->hash_table_map(), miss,
+ DONT_DO_SMI_CHECK);
+}
+
+
+// Helper function used to load a property from a dictionary backing
+// storage. This function may fail to load a property even though it is
+// in the dictionary, so code at miss_label must always call a backup
+// property load that is complete. This function is safe to call if
+// name is not internalized, and will jump to the miss_label in that
+// case. The generated code assumes that the receiver has slow
+// properties, is not a global object and does not have interceptors.
+static void GenerateDictionaryLoad(MacroAssembler* masm,
+ Label* miss_label,
+ Register elements,
+ Register name,
+ Register r0,
+ Register r1,
+ Register result) {
+ // Register use:
+ //
+ // elements - holds the property dictionary on entry and is unchanged.
+ //
+ // name - holds the name of the property on entry and is unchanged.
+ //
+ // Scratch registers:
+ //
+ // r0 - used for the index into the property dictionary
+ //
+ // r1 - used to hold the capacity of the property dictionary.
+ //
+ // result - holds the result on exit.
+
+ Label done;
+
+ // Probe the dictionary.
+ NameDictionaryLookupStub::GeneratePositiveLookup(masm,
+ miss_label,
+ &done,
+ elements,
+ name,
+ r0,
+ r1);
+
+ // If probing finds an entry in the dictionary, r0 contains the
+ // index into the dictionary. Check that the value is a normal
+ // property.
+ __ bind(&done);
+ const int kElementsStartOffset =
+ NameDictionary::kHeaderSize +
+ NameDictionary::kElementsStartIndex * kPointerSize;
+ const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
+ __ test(Operand(elements, r0, times_4, kDetailsOffset - kHeapObjectTag),
+ Immediate(PropertyDetails::TypeField::kMask << kSmiTagSize));
+ __ j(not_zero, miss_label);
+
+ // Get the value at the masked, scaled index.
+ const int kValueOffset = kElementsStartOffset + kPointerSize;
+ __ mov(result, Operand(elements, r0, times_4, kValueOffset - kHeapObjectTag));
+}
+
+
+// Helper function used to store a property to a dictionary backing
+// storage. This function may fail to store a property eventhough it
+// is in the dictionary, so code at miss_label must always call a
+// backup property store that is complete. This function is safe to
+// call if name is not internalized, and will jump to the miss_label in
+// that case. The generated code assumes that the receiver has slow
+// properties, is not a global object and does not have interceptors.
+static void GenerateDictionaryStore(MacroAssembler* masm,
+ Label* miss_label,
+ Register elements,
+ Register name,
+ Register value,
+ Register r0,
+ Register r1) {
+ // Register use:
+ //
+ // elements - holds the property dictionary on entry and is clobbered.
+ //
+ // name - holds the name of the property on entry and is unchanged.
+ //
+ // value - holds the value to store and is unchanged.
+ //
+ // r0 - used for index into the property dictionary and is clobbered.
+ //
+ // r1 - used to hold the capacity of the property dictionary and is clobbered.
+ Label done;
+
+
+ // Probe the dictionary.
+ NameDictionaryLookupStub::GeneratePositiveLookup(masm,
+ miss_label,
+ &done,
+ elements,
+ name,
+ r0,
+ r1);
+
+ // If probing finds an entry in the dictionary, r0 contains the
+ // index into the dictionary. Check that the value is a normal
+ // property that is not read only.
+ __ bind(&done);
+ const int kElementsStartOffset =
+ NameDictionary::kHeaderSize +
+ NameDictionary::kElementsStartIndex * kPointerSize;
+ const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
+ const int kTypeAndReadOnlyMask =
+ (PropertyDetails::TypeField::kMask |
+ PropertyDetails::AttributesField::encode(READ_ONLY)) << kSmiTagSize;
+ __ test(Operand(elements, r0, times_4, kDetailsOffset - kHeapObjectTag),
+ Immediate(kTypeAndReadOnlyMask));
+ __ j(not_zero, miss_label);
+
+ // Store the value at the masked, scaled index.
+ const int kValueOffset = kElementsStartOffset + kPointerSize;
+ __ lea(r0, Operand(elements, r0, times_4, kValueOffset - kHeapObjectTag));
+ __ mov(Operand(r0, 0), value);
+
+ // Update write barrier. Make sure not to clobber the value.
+ __ mov(r1, value);
+ __ RecordWrite(elements, r0, r1);
+}
+
+
+// Checks the receiver for special cases (value type, slow case bits).
+// Falls through for regular JS object.
+static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
+ Register receiver,
+ Register map,
+ int interceptor_bit,
+ Label* slow) {
+ // Register use:
+ // receiver - holds the receiver and is unchanged.
+ // Scratch registers:
+ // map - used to hold the map of the receiver.
+
+ // Check that the object isn't a smi.
+ __ JumpIfSmi(receiver, slow);
+
+ // Get the map of the receiver.
+ __ mov(map, FieldOperand(receiver, HeapObject::kMapOffset));
+
+ // Check bit field.
+ __ test_b(FieldOperand(map, Map::kBitFieldOffset),
+ (1 << Map::kIsAccessCheckNeeded) | (1 << interceptor_bit));
+ __ j(not_zero, slow);
+ // Check that the object is some kind of JS object EXCEPT JS Value type.
+ // In the case that the object is a value-wrapper object,
+ // we enter the runtime system to make sure that indexing
+ // into string objects works as intended.
+ ASSERT(JS_OBJECT_TYPE > JS_VALUE_TYPE);
+
+ __ CmpInstanceType(map, JS_OBJECT_TYPE);
+ __ j(below, slow);
+}
+
+
+// Loads an indexed element from a fast case array.
+// If not_fast_array is NULL, doesn't perform the elements map check.
+static void GenerateFastArrayLoad(MacroAssembler* masm,
+ Register receiver,
+ Register key,
+ Register scratch,
+ Register result,
+ Label* not_fast_array,
+ Label* out_of_range) {
+ // Register use:
+ // receiver - holds the receiver and is unchanged.
+ // key - holds the key and is unchanged (must be a smi).
+ // Scratch registers:
+ // scratch - used to hold elements of the receiver and the loaded value.
+ // result - holds the result on exit if the load succeeds and
+ // we fall through.
+
+ __ mov(scratch, FieldOperand(receiver, JSObject::kElementsOffset));
+ if (not_fast_array != NULL) {
+ // Check that the object is in fast mode and writable.
+ __ CheckMap(scratch,
+ masm->isolate()->factory()->fixed_array_map(),
+ not_fast_array,
+ DONT_DO_SMI_CHECK);
+ } else {
+ __ AssertFastElements(scratch);
+ }
+ // Check that the key (index) is within bounds.
+ __ cmp(key, FieldOperand(scratch, FixedArray::kLengthOffset));
+ __ j(above_equal, out_of_range);
+ // Fast case: Do the load.
+ STATIC_ASSERT((kPointerSize == 4) && (kSmiTagSize == 1) && (kSmiTag == 0));
+ __ mov(scratch, FieldOperand(scratch, key, times_2, FixedArray::kHeaderSize));
+ __ cmp(scratch, Immediate(masm->isolate()->factory()->the_hole_value()));
+ // In case the loaded value is the_hole we have to consult GetProperty
+ // to ensure the prototype chain is searched.
+ __ j(equal, out_of_range);
+ if (!result.is(scratch)) {
+ __ mov(result, scratch);
+ }
+}
+
+
+// Checks whether a key is an array index string or a unique name.
+// Falls through if the key is a unique name.
+static void GenerateKeyNameCheck(MacroAssembler* masm,
+ Register key,
+ Register map,
+ Register hash,
+ Label* index_string,
+ Label* not_unique) {
+ // Register use:
+ // key - holds the key and is unchanged. Assumed to be non-smi.
+ // Scratch registers:
+ // map - used to hold the map of the key.
+ // hash - used to hold the hash of the key.
+ Label unique;
+ __ CmpObjectType(key, LAST_UNIQUE_NAME_TYPE, map);
+ __ j(above, not_unique);
+ STATIC_ASSERT(LAST_UNIQUE_NAME_TYPE == FIRST_NONSTRING_TYPE);
+ __ j(equal, &unique);
+
+ // Is the string an array index, with cached numeric value?
+ __ mov(hash, FieldOperand(key, Name::kHashFieldOffset));
+ __ test(hash, Immediate(Name::kContainsCachedArrayIndexMask));
+ __ j(zero, index_string);
+
+ // Is the string internalized? We already know it's a string so a single
+ // bit test is enough.
+ STATIC_ASSERT(kNotInternalizedTag != 0);
+ __ test_b(FieldOperand(map, Map::kInstanceTypeOffset),
+ kIsNotInternalizedMask);
+ __ j(not_zero, not_unique);
+
+ __ bind(&unique);
+}
+
+
+static Operand GenerateMappedArgumentsLookup(MacroAssembler* masm,
+ Register object,
+ Register key,
+ Register scratch1,
+ Register scratch2,
+ Label* unmapped_case,
+ Label* slow_case) {
+ Heap* heap = masm->isolate()->heap();
+ Factory* factory = masm->isolate()->factory();
+
+ // Check that the receiver is a JSObject. Because of the elements
+ // map check later, we do not need to check for interceptors or
+ // whether it requires access checks.
+ __ JumpIfSmi(object, slow_case);
+ // Check that the object is some kind of JSObject.
+ __ CmpObjectType(object, FIRST_JS_RECEIVER_TYPE, scratch1);
+ __ j(below, slow_case);
+
+ // Check that the key is a positive smi.
+ __ test(key, Immediate(0x80000001));
+ __ j(not_zero, slow_case);
+
+ // Load the elements into scratch1 and check its map.
+ Handle<Map> arguments_map(heap->sloppy_arguments_elements_map());
+ __ mov(scratch1, FieldOperand(object, JSObject::kElementsOffset));
+ __ CheckMap(scratch1, arguments_map, slow_case, DONT_DO_SMI_CHECK);
+
+ // Check if element is in the range of mapped arguments. If not, jump
+ // to the unmapped lookup with the parameter map in scratch1.
+ __ mov(scratch2, FieldOperand(scratch1, FixedArray::kLengthOffset));
+ __ sub(scratch2, Immediate(Smi::FromInt(2)));
+ __ cmp(key, scratch2);
+ __ j(above_equal, unmapped_case);
+
+ // Load element index and check whether it is the hole.
+ const int kHeaderSize = FixedArray::kHeaderSize + 2 * kPointerSize;
+ __ mov(scratch2, FieldOperand(scratch1,
+ key,
+ times_half_pointer_size,
+ kHeaderSize));
+ __ cmp(scratch2, factory->the_hole_value());
+ __ j(equal, unmapped_case);
+
+ // Load value from context and return it. We can reuse scratch1 because
+ // we do not jump to the unmapped lookup (which requires the parameter
+ // map in scratch1).
+ const int kContextOffset = FixedArray::kHeaderSize;
+ __ mov(scratch1, FieldOperand(scratch1, kContextOffset));
+ return FieldOperand(scratch1,
+ scratch2,
+ times_half_pointer_size,
+ Context::kHeaderSize);
+}
+
+
+static Operand GenerateUnmappedArgumentsLookup(MacroAssembler* masm,
+ Register key,
+ Register parameter_map,
+ Register scratch,
+ Label* slow_case) {
+ // Element is in arguments backing store, which is referenced by the
+ // second element of the parameter_map.
+ const int kBackingStoreOffset = FixedArray::kHeaderSize + kPointerSize;
+ Register backing_store = parameter_map;
+ __ mov(backing_store, FieldOperand(parameter_map, kBackingStoreOffset));
+ Handle<Map> fixed_array_map(masm->isolate()->heap()->fixed_array_map());
+ __ CheckMap(backing_store, fixed_array_map, slow_case, DONT_DO_SMI_CHECK);
+ __ mov(scratch, FieldOperand(backing_store, FixedArray::kLengthOffset));
+ __ cmp(key, scratch);
+ __ j(greater_equal, slow_case);
+ return FieldOperand(backing_store,
+ key,
+ times_half_pointer_size,
+ FixedArray::kHeaderSize);
+}
+
+
+void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- ecx : key
+ // -- edx : receiver
+ // -- esp[0] : return address
+ // -----------------------------------
+ Label slow, check_name, index_smi, index_name, property_array_property;
+ Label probe_dictionary, check_number_dictionary;
+
+ // Check that the key is a smi.
+ __ JumpIfNotSmi(ecx, &check_name);
+ __ bind(&index_smi);
+ // Now the key is known to be a smi. This place is also jumped to from
+ // where a numeric string is converted to a smi.
+
+ GenerateKeyedLoadReceiverCheck(
+ masm, edx, eax, Map::kHasIndexedInterceptor, &slow);
+
+ // Check the receiver's map to see if it has fast elements.
+ __ CheckFastElements(eax, &check_number_dictionary);
+
+ GenerateFastArrayLoad(masm, edx, ecx, eax, eax, NULL, &slow);
+ Isolate* isolate = masm->isolate();
+ Counters* counters = isolate->counters();
+ __ IncrementCounter(counters->keyed_load_generic_smi(), 1);
+ __ ret(0);
+
+ __ bind(&check_number_dictionary);
+ __ mov(ebx, ecx);
+ __ SmiUntag(ebx);
+ __ mov(eax, FieldOperand(edx, JSObject::kElementsOffset));
+
+ // Check whether the elements is a number dictionary.
+ // edx: receiver
+ // ebx: untagged index
+ // ecx: key
+ // eax: elements
+ __ CheckMap(eax,
+ isolate->factory()->hash_table_map(),
+ &slow,
+ DONT_DO_SMI_CHECK);
+ Label slow_pop_receiver;
+ // Push receiver on the stack to free up a register for the dictionary
+ // probing.
+ __ push(edx);
+ __ LoadFromNumberDictionary(&slow_pop_receiver, eax, ecx, ebx, edx, edi, eax);
+ // Pop receiver before returning.
+ __ pop(edx);
+ __ ret(0);
+
+ __ bind(&slow_pop_receiver);
+ // Pop the receiver from the stack and jump to runtime.
+ __ pop(edx);
+
+ __ bind(&slow);
+ // Slow case: jump to runtime.
+ // edx: receiver
+ // ecx: key
+ __ IncrementCounter(counters->keyed_load_generic_slow(), 1);
+ GenerateRuntimeGetProperty(masm);
+
+ __ bind(&check_name);
+ GenerateKeyNameCheck(masm, ecx, eax, ebx, &index_name, &slow);
+
+ GenerateKeyedLoadReceiverCheck(
+ masm, edx, eax, Map::kHasNamedInterceptor, &slow);
+
+ // If the receiver is a fast-case object, check the keyed lookup
+ // cache. Otherwise probe the dictionary.
+ __ mov(ebx, FieldOperand(edx, JSObject::kPropertiesOffset));
+ __ cmp(FieldOperand(ebx, HeapObject::kMapOffset),
+ Immediate(isolate->factory()->hash_table_map()));
+ __ j(equal, &probe_dictionary);
+
+ // The receiver's map is still in eax, compute the keyed lookup cache hash
+ // based on 32 bits of the map pointer and the string hash.
+ if (FLAG_debug_code) {
+ __ cmp(eax, FieldOperand(edx, HeapObject::kMapOffset));
+ __ Check(equal, kMapIsNoLongerInEax);
+ }
+ __ mov(ebx, eax); // Keep the map around for later.
+ __ shr(eax, KeyedLookupCache::kMapHashShift);
+ __ mov(edi, FieldOperand(ecx, String::kHashFieldOffset));
+ __ shr(edi, String::kHashShift);
+ __ xor_(eax, edi);
+ __ and_(eax, KeyedLookupCache::kCapacityMask & KeyedLookupCache::kHashMask);
+
+ // Load the key (consisting of map and internalized string) from the cache and
+ // check for match.
+ Label load_in_object_property;
+ static const int kEntriesPerBucket = KeyedLookupCache::kEntriesPerBucket;
+ Label hit_on_nth_entry[kEntriesPerBucket];
+ ExternalReference cache_keys =
+ ExternalReference::keyed_lookup_cache_keys(masm->isolate());
+
+ for (int i = 0; i < kEntriesPerBucket - 1; i++) {
+ Label try_next_entry;
+ __ mov(edi, eax);
+ __ shl(edi, kPointerSizeLog2 + 1);
+ if (i != 0) {
+ __ add(edi, Immediate(kPointerSize * i * 2));
+ }
+ __ cmp(ebx, Operand::StaticArray(edi, times_1, cache_keys));
+ __ j(not_equal, &try_next_entry);
+ __ add(edi, Immediate(kPointerSize));
+ __ cmp(ecx, Operand::StaticArray(edi, times_1, cache_keys));
+ __ j(equal, &hit_on_nth_entry[i]);
+ __ bind(&try_next_entry);
+ }
+
+ __ lea(edi, Operand(eax, 1));
+ __ shl(edi, kPointerSizeLog2 + 1);
+ __ add(edi, Immediate(kPointerSize * (kEntriesPerBucket - 1) * 2));
+ __ cmp(ebx, Operand::StaticArray(edi, times_1, cache_keys));
+ __ j(not_equal, &slow);
+ __ add(edi, Immediate(kPointerSize));
+ __ cmp(ecx, Operand::StaticArray(edi, times_1, cache_keys));
+ __ j(not_equal, &slow);
+
+ // Get field offset.
+ // edx : receiver
+ // ebx : receiver's map
+ // ecx : key
+ // eax : lookup cache index
+ ExternalReference cache_field_offsets =
+ ExternalReference::keyed_lookup_cache_field_offsets(masm->isolate());
+
+ // Hit on nth entry.
+ for (int i = kEntriesPerBucket - 1; i >= 0; i--) {
+ __ bind(&hit_on_nth_entry[i]);
+ if (i != 0) {
+ __ add(eax, Immediate(i));
+ }
+ __ mov(edi,
+ Operand::StaticArray(eax, times_pointer_size, cache_field_offsets));
+ __ movzx_b(eax, FieldOperand(ebx, Map::kInObjectPropertiesOffset));
+ __ sub(edi, eax);
+ __ j(above_equal, &property_array_property);
+ if (i != 0) {
+ __ jmp(&load_in_object_property);
+ }
+ }
+
+ // Load in-object property.
+ __ bind(&load_in_object_property);
+ __ movzx_b(eax, FieldOperand(ebx, Map::kInstanceSizeOffset));
+ __ add(eax, edi);
+ __ mov(eax, FieldOperand(edx, eax, times_pointer_size, 0));
+ __ IncrementCounter(counters->keyed_load_generic_lookup_cache(), 1);
+ __ ret(0);
+
+ // Load property array property.
+ __ bind(&property_array_property);
+ __ mov(eax, FieldOperand(edx, JSObject::kPropertiesOffset));
+ __ mov(eax, FieldOperand(eax, edi, times_pointer_size,
+ FixedArray::kHeaderSize));
+ __ IncrementCounter(counters->keyed_load_generic_lookup_cache(), 1);
+ __ ret(0);
+
+ // Do a quick inline probe of the receiver's dictionary, if it
+ // exists.
+ __ bind(&probe_dictionary);
+
+ __ mov(eax, FieldOperand(edx, JSObject::kMapOffset));
+ __ movzx_b(eax, FieldOperand(eax, Map::kInstanceTypeOffset));
+ GenerateGlobalInstanceTypeCheck(masm, eax, &slow);
+
+ GenerateDictionaryLoad(masm, &slow, ebx, ecx, eax, edi, eax);
+ __ IncrementCounter(counters->keyed_load_generic_symbol(), 1);
+ __ ret(0);
+
+ __ bind(&index_name);
+ __ IndexFromHash(ebx, ecx);
+ // Now jump to the place where smi keys are handled.
+ __ jmp(&index_smi);
+}
+
+
+void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- ecx : key (index)
+ // -- edx : receiver
+ // -- esp[0] : return address
+ // -----------------------------------
+ Label miss;
+
+ Register receiver = edx;
+ Register index = ecx;
+ Register scratch = ebx;
+ Register result = eax;
+
+ StringCharAtGenerator char_at_generator(receiver,
+ index,
+ scratch,
+ result,
+ &miss, // When not a string.
+ &miss, // When not a number.
+ &miss, // When index out of range.
+ STRING_INDEX_IS_ARRAY_INDEX);
+ char_at_generator.GenerateFast(masm);
+ __ ret(0);
+
+ StubRuntimeCallHelper call_helper;
+ char_at_generator.GenerateSlow(masm, call_helper);
+
+ __ bind(&miss);
+ GenerateMiss(masm);
+}
+
+
+void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- ecx : key
+ // -- edx : receiver
+ // -- esp[0] : return address
+ // -----------------------------------
+ Label slow;
+
+ // Check that the receiver isn't a smi.
+ __ JumpIfSmi(edx, &slow);
+
+ // Check that the key is an array index, that is Uint32.
+ __ test(ecx, Immediate(kSmiTagMask | kSmiSignMask));
+ __ j(not_zero, &slow);
+
+ // Get the map of the receiver.
+ __ mov(eax, FieldOperand(edx, HeapObject::kMapOffset));
+
+ // Check that it has indexed interceptor and access checks
+ // are not enabled for this object.
+ __ movzx_b(eax, FieldOperand(eax, Map::kBitFieldOffset));
+ __ and_(eax, Immediate(kSlowCaseBitFieldMask));
+ __ cmp(eax, Immediate(1 << Map::kHasIndexedInterceptor));
+ __ j(not_zero, &slow);
+
+ // Everything is fine, call runtime.
+ __ pop(eax);
+ __ push(edx); // receiver
+ __ push(ecx); // key
+ __ push(eax); // return address
+
+ // Perform tail call to the entry.
+ ExternalReference ref =
+ ExternalReference(IC_Utility(kKeyedLoadPropertyWithInterceptor),
+ masm->isolate());
+ __ TailCallExternalReference(ref, 2, 1);
+
+ __ bind(&slow);
+ GenerateMiss(masm);
+}
+
+
+void KeyedLoadIC::GenerateSloppyArguments(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- ecx : key
+ // -- edx : receiver
+ // -- esp[0] : return address
+ // -----------------------------------
+ Label slow, notin;
+ Factory* factory = masm->isolate()->factory();
+ Operand mapped_location =
+ GenerateMappedArgumentsLookup(masm, edx, ecx, ebx, eax, ¬in, &slow);
+ __ mov(eax, mapped_location);
+ __ Ret();
+ __ bind(¬in);
+ // The unmapped lookup expects that the parameter map is in ebx.
+ Operand unmapped_location =
+ GenerateUnmappedArgumentsLookup(masm, ecx, ebx, eax, &slow);
+ __ cmp(unmapped_location, factory->the_hole_value());
+ __ j(equal, &slow);
+ __ mov(eax, unmapped_location);
+ __ Ret();
+ __ bind(&slow);
+ GenerateMiss(masm);
+}
+
+
+void KeyedStoreIC::GenerateSloppyArguments(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- eax : value
+ // -- ecx : key
+ // -- edx : receiver
+ // -- esp[0] : return address
+ // -----------------------------------
+ Label slow, notin;
+ Operand mapped_location =
+ GenerateMappedArgumentsLookup(masm, edx, ecx, ebx, edi, ¬in, &slow);
+ __ mov(mapped_location, eax);
+ __ lea(ecx, mapped_location);
+ __ mov(edx, eax);
+ __ RecordWrite(ebx, ecx, edx);
+ __ Ret();
+ __ bind(¬in);
+ // The unmapped lookup expects that the parameter map is in ebx.
+ Operand unmapped_location =
+ GenerateUnmappedArgumentsLookup(masm, ecx, ebx, edi, &slow);
+ __ mov(unmapped_location, eax);
+ __ lea(edi, unmapped_location);
+ __ mov(edx, eax);
+ __ RecordWrite(ebx, edi, edx);
+ __ Ret();
+ __ bind(&slow);
+ GenerateMiss(masm);
+}
+
+
+static void KeyedStoreGenerateGenericHelper(
+ MacroAssembler* masm,
+ Label* fast_object,
+ Label* fast_double,
+ Label* slow,
+ KeyedStoreCheckMap check_map,
+ KeyedStoreIncrementLength increment_length) {
+ Label transition_smi_elements;
+ Label finish_object_store, non_double_value, transition_double_elements;
+ Label fast_double_without_map_check;
+ // eax: value
+ // ecx: key (a smi)
+ // edx: receiver
+ // ebx: FixedArray receiver->elements
+ // edi: receiver map
+ // Fast case: Do the store, could either Object or double.
+ __ bind(fast_object);
+ if (check_map == kCheckMap) {
+ __ mov(edi, FieldOperand(ebx, HeapObject::kMapOffset));
+ __ cmp(edi, masm->isolate()->factory()->fixed_array_map());
+ __ j(not_equal, fast_double);
+ }
+
+ // HOLECHECK: guards "A[i] = V"
+ // We have to go to the runtime if the current value is the hole because
+ // there may be a callback on the element
+ Label holecheck_passed1;
+ __ cmp(FixedArrayElementOperand(ebx, ecx),
+ masm->isolate()->factory()->the_hole_value());
+ __ j(not_equal, &holecheck_passed1);
+ __ JumpIfDictionaryInPrototypeChain(edx, ebx, edi, slow);
+ __ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset));
+
+ __ bind(&holecheck_passed1);
+
+ // Smi stores don't require further checks.
+ Label non_smi_value;
+ __ JumpIfNotSmi(eax, &non_smi_value);
+ if (increment_length == kIncrementLength) {
+ // Add 1 to receiver->length.
+ __ add(FieldOperand(edx, JSArray::kLengthOffset),
+ Immediate(Smi::FromInt(1)));
+ }
+ // It's irrelevant whether array is smi-only or not when writing a smi.
+ __ mov(FixedArrayElementOperand(ebx, ecx), eax);
+ __ ret(0);
+
+ __ bind(&non_smi_value);
+ // Escape to elements kind transition case.
+ __ mov(edi, FieldOperand(edx, HeapObject::kMapOffset));
+ __ CheckFastObjectElements(edi, &transition_smi_elements);
+
+ // Fast elements array, store the value to the elements backing store.
+ __ bind(&finish_object_store);
+ if (increment_length == kIncrementLength) {
+ // Add 1 to receiver->length.
+ __ add(FieldOperand(edx, JSArray::kLengthOffset),
+ Immediate(Smi::FromInt(1)));
+ }
+ __ mov(FixedArrayElementOperand(ebx, ecx), eax);
+ // Update write barrier for the elements array address.
+ __ mov(edx, eax); // Preserve the value which is returned.
+ __ RecordWriteArray(
+ ebx, edx, ecx, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+ __ ret(0);
+
+ __ bind(fast_double);
+ if (check_map == kCheckMap) {
+ // Check for fast double array case. If this fails, call through to the
+ // runtime.
+ __ cmp(edi, masm->isolate()->factory()->fixed_double_array_map());
+ __ j(not_equal, slow);
+ // If the value is a number, store it as a double in the FastDoubleElements
+ // array.
+ }
+
+ // HOLECHECK: guards "A[i] double hole?"
+ // We have to see if the double version of the hole is present. If so
+ // go to the runtime.
+ uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32);
+ __ cmp(FieldOperand(ebx, ecx, times_4, offset), Immediate(kHoleNanUpper32));
+ __ j(not_equal, &fast_double_without_map_check);
+ __ JumpIfDictionaryInPrototypeChain(edx, ebx, edi, slow);
+ __ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset));
+
+ __ bind(&fast_double_without_map_check);
+ __ StoreNumberToDoubleElements(eax, ebx, ecx, edi,
+ &transition_double_elements, false);
+ if (increment_length == kIncrementLength) {
+ // Add 1 to receiver->length.
+ __ add(FieldOperand(edx, JSArray::kLengthOffset),
+ Immediate(Smi::FromInt(1)));
+ }
+ __ ret(0);
+
+ __ bind(&transition_smi_elements);
+ __ mov(ebx, FieldOperand(edx, HeapObject::kMapOffset));
+
+ // Transition the array appropriately depending on the value type.
+ __ CheckMap(eax,
+ masm->isolate()->factory()->heap_number_map(),
+ &non_double_value,
+ DONT_DO_SMI_CHECK);
+
+ // Value is a double. Transition FAST_SMI_ELEMENTS -> FAST_DOUBLE_ELEMENTS
+ // and complete the store.
+ __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
+ FAST_DOUBLE_ELEMENTS,
+ ebx,
+ edi,
+ slow);
+ AllocationSiteMode mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS,
+ FAST_DOUBLE_ELEMENTS);
+ ElementsTransitionGenerator::GenerateSmiToDouble(masm, mode, slow);
+ __ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset));
+ __ jmp(&fast_double_without_map_check);
+
+ __ bind(&non_double_value);
+ // Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS
+ __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
+ FAST_ELEMENTS,
+ ebx,
+ edi,
+ slow);
+ mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS);
+ ElementsTransitionGenerator::GenerateMapChangeElementsTransition(masm, mode,
+ slow);
+ __ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset));
+ __ jmp(&finish_object_store);
+
+ __ bind(&transition_double_elements);
+ // Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a
+ // HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
+ // transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
+ __ mov(ebx, FieldOperand(edx, HeapObject::kMapOffset));
+ __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS,
+ FAST_ELEMENTS,
+ ebx,
+ edi,
+ slow);
+ mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS);
+ ElementsTransitionGenerator::GenerateDoubleToObject(masm, mode, slow);
+ __ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset));
+ __ jmp(&finish_object_store);
+}
+
+
+void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
+ StrictMode strict_mode) {
+ // ----------- S t a t e -------------
+ // -- eax : value
+ // -- ecx : key
+ // -- edx : receiver
+ // -- esp[0] : return address
+ // -----------------------------------
+ Label slow, fast_object, fast_object_grow;
+ Label fast_double, fast_double_grow;
+ Label array, extra, check_if_double_array;
+
+ // Check that the object isn't a smi.
+ __ JumpIfSmi(edx, &slow);
+ // Get the map from the receiver.
+ __ mov(edi, FieldOperand(edx, HeapObject::kMapOffset));
+ // Check that the receiver does not require access checks and is not observed.
+ // The generic stub does not perform map checks or handle observed objects.
+ __ test_b(FieldOperand(edi, Map::kBitFieldOffset),
+ 1 << Map::kIsAccessCheckNeeded | 1 << Map::kIsObserved);
+ __ j(not_zero, &slow);
+ // Check that the key is a smi.
+ __ JumpIfNotSmi(ecx, &slow);
+ __ CmpInstanceType(edi, JS_ARRAY_TYPE);
+ __ j(equal, &array);
+ // Check that the object is some kind of JSObject.
+ __ CmpInstanceType(edi, FIRST_JS_OBJECT_TYPE);
+ __ j(below, &slow);
+
+ // Object case: Check key against length in the elements array.
+ // eax: value
+ // edx: JSObject
+ // ecx: key (a smi)
+ // edi: receiver map
+ __ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset));
+ // Check array bounds. Both the key and the length of FixedArray are smis.
+ __ cmp(ecx, FieldOperand(ebx, FixedArray::kLengthOffset));
+ __ j(below, &fast_object);
+
+ // Slow case: call runtime.
+ __ bind(&slow);
+ GenerateRuntimeSetProperty(masm, strict_mode);
+
+ // Extra capacity case: Check if there is extra capacity to
+ // perform the store and update the length. Used for adding one
+ // element to the array by writing to array[array.length].
+ __ bind(&extra);
+ // eax: value
+ // edx: receiver, a JSArray
+ // ecx: key, a smi.
+ // ebx: receiver->elements, a FixedArray
+ // edi: receiver map
+ // flags: compare (ecx, edx.length())
+ // do not leave holes in the array:
+ __ j(not_equal, &slow);
+ __ cmp(ecx, FieldOperand(ebx, FixedArray::kLengthOffset));
+ __ j(above_equal, &slow);
+ __ mov(edi, FieldOperand(ebx, HeapObject::kMapOffset));
+ __ cmp(edi, masm->isolate()->factory()->fixed_array_map());
+ __ j(not_equal, &check_if_double_array);
+ __ jmp(&fast_object_grow);
+
+ __ bind(&check_if_double_array);
+ __ cmp(edi, masm->isolate()->factory()->fixed_double_array_map());
+ __ j(not_equal, &slow);
+ __ jmp(&fast_double_grow);
+
+ // Array case: Get the length and the elements array from the JS
+ // array. Check that the array is in fast mode (and writable); if it
+ // is the length is always a smi.
+ __ bind(&array);
+ // eax: value
+ // edx: receiver, a JSArray
+ // ecx: key, a smi.
+ // edi: receiver map
+ __ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset));
+
+ // Check the key against the length in the array and fall through to the
+ // common store code.
+ __ cmp(ecx, FieldOperand(edx, JSArray::kLengthOffset)); // Compare smis.
+ __ j(above_equal, &extra);
+
+ KeyedStoreGenerateGenericHelper(masm, &fast_object, &fast_double,
+ &slow, kCheckMap, kDontIncrementLength);
+ KeyedStoreGenerateGenericHelper(masm, &fast_object_grow, &fast_double_grow,
+ &slow, kDontCheckMap, kIncrementLength);
+}
+
+
+void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- ecx : name
+ // -- edx : receiver
+ // -- esp[0] : return address
+ // -----------------------------------
+
+ // Probe the stub cache.
+ Code::Flags flags = Code::ComputeHandlerFlags(Code::LOAD_IC);
+ masm->isolate()->stub_cache()->GenerateProbe(
+ masm, flags, edx, ecx, ebx, eax);
+
+ // Cache miss: Jump to runtime.
+ GenerateMiss(masm);
+}
+
+
+void LoadIC::GenerateNormal(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- ecx : name
+ // -- edx : receiver
+ // -- esp[0] : return address
+ // -----------------------------------
+ Label miss;
+
+ GenerateNameDictionaryReceiverCheck(masm, edx, eax, ebx, &miss);
+
+ // eax: elements
+ // Search the dictionary placing the result in eax.
+ GenerateDictionaryLoad(masm, &miss, eax, ecx, edi, ebx, eax);
+ __ ret(0);
+
+ // Cache miss: Jump to runtime.
+ __ bind(&miss);
+ GenerateMiss(masm);
+}
+
+
+void LoadIC::GenerateMiss(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- ecx : name
+ // -- edx : receiver
+ // -- esp[0] : return address
+ // -----------------------------------
+
+ __ IncrementCounter(masm->isolate()->counters()->load_miss(), 1);
+
+ __ pop(ebx);
+ __ push(edx); // receiver
+ __ push(ecx); // name
+ __ push(ebx); // return address
+
+ // Perform tail call to the entry.
+ ExternalReference ref =
+ ExternalReference(IC_Utility(kLoadIC_Miss), masm->isolate());
+ __ TailCallExternalReference(ref, 2, 1);
+}
+
+
+void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- ecx : key
+ // -- edx : receiver
+ // -- esp[0] : return address
+ // -----------------------------------
+
+ __ pop(ebx);
+ __ push(edx); // receiver
+ __ push(ecx); // name
+ __ push(ebx); // return address
+
+ // Perform tail call to the entry.
+ __ TailCallRuntime(Runtime::kGetProperty, 2, 1);
+}
+
+
+void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- ecx : key
+ // -- edx : receiver
+ // -- esp[0] : return address
+ // -----------------------------------
+
+ __ IncrementCounter(masm->isolate()->counters()->keyed_load_miss(), 1);
+
+ __ pop(ebx);
+ __ push(edx); // receiver
+ __ push(ecx); // name
+ __ push(ebx); // return address
+
+ // Perform tail call to the entry.
+ ExternalReference ref =
+ ExternalReference(IC_Utility(kKeyedLoadIC_Miss), masm->isolate());
+ __ TailCallExternalReference(ref, 2, 1);
+}
+
+
+void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- ecx : key
+ // -- edx : receiver
+ // -- esp[0] : return address
+ // -----------------------------------
+
+ __ pop(ebx);
+ __ push(edx); // receiver
+ __ push(ecx); // name
+ __ push(ebx); // return address
+
+ // Perform tail call to the entry.
+ __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
+}
+
+
+void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- eax : value
+ // -- ecx : name
+ // -- edx : receiver
+ // -- esp[0] : return address
+ // -----------------------------------
+ Code::Flags flags = Code::ComputeHandlerFlags(Code::STORE_IC);
+ masm->isolate()->stub_cache()->GenerateProbe(
+ masm, flags, edx, ecx, ebx, no_reg);
+
+ // Cache miss: Jump to runtime.
+ GenerateMiss(masm);
+}
+
+
+void StoreIC::GenerateMiss(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- eax : value
+ // -- ecx : name
+ // -- edx : receiver
+ // -- esp[0] : return address
+ // -----------------------------------
+
+ __ pop(ebx);
+ __ push(edx);
+ __ push(ecx);
+ __ push(eax);
+ __ push(ebx);
+
+ // Perform tail call to the entry.
+ ExternalReference ref =
+ ExternalReference(IC_Utility(kStoreIC_Miss), masm->isolate());
+ __ TailCallExternalReference(ref, 3, 1);
+}
+
+
+void StoreIC::GenerateNormal(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- eax : value
+ // -- ecx : name
+ // -- edx : receiver
+ // -- esp[0] : return address
+ // -----------------------------------
+
+ Label miss, restore_miss;
+
+ GenerateNameDictionaryReceiverCheck(masm, edx, ebx, edi, &miss);
+
+ // A lot of registers are needed for storing to slow case
+ // objects. Push and restore receiver but rely on
+ // GenerateDictionaryStore preserving the value and name.
+ __ push(edx);
+ GenerateDictionaryStore(masm, &restore_miss, ebx, ecx, eax, edx, edi);
+ __ Drop(1);
+ Counters* counters = masm->isolate()->counters();
+ __ IncrementCounter(counters->store_normal_hit(), 1);
+ __ ret(0);
+
+ __ bind(&restore_miss);
+ __ pop(edx);
+
+ __ bind(&miss);
+ __ IncrementCounter(counters->store_normal_miss(), 1);
+ GenerateMiss(masm);
+}
+
+
+void StoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
+ StrictMode strict_mode) {
+ // ----------- S t a t e -------------
+ // -- eax : value
+ // -- ecx : name
+ // -- edx : receiver
+ // -- esp[0] : return address
+ // -----------------------------------
+ __ pop(ebx);
+ __ push(edx);
+ __ push(ecx);
+ __ push(eax);
+ __ push(Immediate(Smi::FromInt(NONE))); // PropertyAttributes
+ __ push(Immediate(Smi::FromInt(strict_mode)));
+ __ push(ebx); // return address
+
+ // Do tail-call to runtime routine.
+ __ TailCallRuntime(Runtime::kSetProperty, 5, 1);
+}
+
+
+void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
+ StrictMode strict_mode) {
+ // ----------- S t a t e -------------
+ // -- eax : value
+ // -- ecx : key
+ // -- edx : receiver
+ // -- esp[0] : return address
+ // -----------------------------------
+
+ __ pop(ebx);
+ __ push(edx);
+ __ push(ecx);
+ __ push(eax);
+ __ push(Immediate(Smi::FromInt(NONE))); // PropertyAttributes
+ __ push(Immediate(Smi::FromInt(strict_mode))); // Strict mode.
+ __ push(ebx); // return address
+
+ // Do tail-call to runtime routine.
+ __ TailCallRuntime(Runtime::kSetProperty, 5, 1);
+}
+
+
+void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- eax : value
+ // -- ecx : key
+ // -- edx : receiver
+ // -- esp[0] : return address
+ // -----------------------------------
+
+ __ pop(ebx);
+ __ push(edx);
+ __ push(ecx);
+ __ push(eax);
+ __ push(ebx);
+
+ // Do tail-call to runtime routine.
+ ExternalReference ref =
+ ExternalReference(IC_Utility(kKeyedStoreIC_Miss), masm->isolate());
+ __ TailCallExternalReference(ref, 3, 1);
+}
+
+
+void StoreIC::GenerateSlow(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- eax : value
+ // -- ecx : key
+ // -- edx : receiver
+ // -- esp[0] : return address
+ // -----------------------------------
+
+ __ pop(ebx);
+ __ push(edx);
+ __ push(ecx);
+ __ push(eax);
+ __ push(ebx); // return address
+
+ // Do tail-call to runtime routine.
+ ExternalReference ref(IC_Utility(kStoreIC_Slow), masm->isolate());
+ __ TailCallExternalReference(ref, 3, 1);
+}
+
+
+void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- eax : value
+ // -- ecx : key
+ // -- edx : receiver
+ // -- esp[0] : return address
+ // -----------------------------------
+
+ __ pop(ebx);
+ __ push(edx);
+ __ push(ecx);
+ __ push(eax);
+ __ push(ebx); // return address
+
+ // Do tail-call to runtime routine.
+ ExternalReference ref(IC_Utility(kKeyedStoreIC_Slow), masm->isolate());
+ __ TailCallExternalReference(ref, 3, 1);
+}
+
+
+#undef __
+
+
+Condition CompareIC::ComputeCondition(Token::Value op) {
+ switch (op) {
+ case Token::EQ_STRICT:
+ case Token::EQ:
+ return equal;
+ case Token::LT:
+ return less;
+ case Token::GT:
+ return greater;
+ case Token::LTE:
+ return less_equal;
+ case Token::GTE:
+ return greater_equal;
+ default:
+ UNREACHABLE();
+ return no_condition;
+ }
+}
+
+
+bool CompareIC::HasInlinedSmiCode(Address address) {
+ // The address of the instruction following the call.
+ Address test_instruction_address =
+ address + Assembler::kCallTargetAddressOffset;
+
+ // If the instruction following the call is not a test al, nothing
+ // was inlined.
+ return *test_instruction_address == Assembler::kTestAlByte;
+}
+
+
+void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
+ // The address of the instruction following the call.
+ Address test_instruction_address =
+ address + Assembler::kCallTargetAddressOffset;
+
+ // If the instruction following the call is not a test al, nothing
+ // was inlined.
+ if (*test_instruction_address != Assembler::kTestAlByte) {
+ ASSERT(*test_instruction_address == Assembler::kNopByte);
+ return;
+ }
+
+ Address delta_address = test_instruction_address + 1;
+ // The delta to the start of the map check instruction and the
+ // condition code uses at the patched jump.
+ uint8_t delta = *reinterpret_cast<uint8_t*>(delta_address);
+ if (FLAG_trace_ic) {
+ PrintF("[ patching ic at %p, test=%p, delta=%d\n",
+ address, test_instruction_address, delta);
+ }
+
+ // Patch with a short conditional jump. Enabling means switching from a short
+ // jump-if-carry/not-carry to jump-if-zero/not-zero, whereas disabling is the
+ // reverse operation of that.
+ Address jmp_address = test_instruction_address - delta;
+ ASSERT((check == ENABLE_INLINED_SMI_CHECK)
+ ? (*jmp_address == Assembler::kJncShortOpcode ||
+ *jmp_address == Assembler::kJcShortOpcode)
+ : (*jmp_address == Assembler::kJnzShortOpcode ||
+ *jmp_address == Assembler::kJzShortOpcode));
+ Condition cc = (check == ENABLE_INLINED_SMI_CHECK)
+ ? (*jmp_address == Assembler::kJncShortOpcode ? not_zero : zero)
+ : (*jmp_address == Assembler::kJnzShortOpcode ? not_carry : carry);
+ *jmp_address = static_cast<byte>(Assembler::kJccShortPrefix | cc);
+}
+
+
+} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_X87
--- /dev/null
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "v8.h"
+
+#if V8_TARGET_ARCH_X87
+
+#include "x87/lithium-codegen-x87.h"
+#include "ic.h"
+#include "code-stubs.h"
+#include "deoptimizer.h"
+#include "stub-cache.h"
+#include "codegen.h"
+#include "hydrogen-osr.h"
+
+namespace v8 {
+namespace internal {
+
+
+// When invoking builtins, we need to record the safepoint in the middle of
+// the invoke instruction sequence generated by the macro assembler.
+class SafepointGenerator V8_FINAL : public CallWrapper {
+ public:
+ SafepointGenerator(LCodeGen* codegen,
+ LPointerMap* pointers,
+ Safepoint::DeoptMode mode)
+ : codegen_(codegen),
+ pointers_(pointers),
+ deopt_mode_(mode) {}
+ virtual ~SafepointGenerator() {}
+
+ virtual void BeforeCall(int call_size) const V8_OVERRIDE {}
+
+ virtual void AfterCall() const V8_OVERRIDE {
+ codegen_->RecordSafepoint(pointers_, deopt_mode_);
+ }
+
+ private:
+ LCodeGen* codegen_;
+ LPointerMap* pointers_;
+ Safepoint::DeoptMode deopt_mode_;
+};
+
+
+#define __ masm()->
+
+bool LCodeGen::GenerateCode() {
+ LPhase phase("Z_Code generation", chunk());
+ ASSERT(is_unused());
+ status_ = GENERATING;
+
+ // Open a frame scope to indicate that there is a frame on the stack. The
+ // MANUAL indicates that the scope shouldn't actually generate code to set up
+ // the frame (that is done in GeneratePrologue).
+ FrameScope frame_scope(masm_, StackFrame::MANUAL);
+
+ support_aligned_spilled_doubles_ = info()->IsOptimizing();
+
+ dynamic_frame_alignment_ = info()->IsOptimizing() &&
+ ((chunk()->num_double_slots() > 2 &&
+ !chunk()->graph()->is_recursive()) ||
+ !info()->osr_ast_id().IsNone());
+
+ return GeneratePrologue() &&
+ GenerateBody() &&
+ GenerateDeferredCode() &&
+ GenerateJumpTable() &&
+ GenerateSafepointTable();
+}
+
+
+void LCodeGen::FinishCode(Handle<Code> code) {
+ ASSERT(is_done());
+ code->set_stack_slots(GetStackSlotCount());
+ code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
+ if (code->is_optimized_code()) RegisterWeakObjectsInOptimizedCode(code);
+ PopulateDeoptimizationData(code);
+ if (!info()->IsStub()) {
+ Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(code);
+ }
+}
+
+
+#ifdef _MSC_VER
+void LCodeGen::MakeSureStackPagesMapped(int offset) {
+ const int kPageSize = 4 * KB;
+ for (offset -= kPageSize; offset > 0; offset -= kPageSize) {
+ __ mov(Operand(esp, offset), eax);
+ }
+}
+#endif
+
+
+bool LCodeGen::GeneratePrologue() {
+ ASSERT(is_generating());
+
+ if (info()->IsOptimizing()) {
+ ProfileEntryHookStub::MaybeCallEntryHook(masm_);
+
+#ifdef DEBUG
+ if (strlen(FLAG_stop_at) > 0 &&
+ info_->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
+ __ int3();
+ }
+#endif
+
+ // Sloppy mode functions and builtins need to replace the receiver with the
+ // global proxy when called as functions (without an explicit receiver
+ // object).
+ if (info_->this_has_uses() &&
+ info_->strict_mode() == SLOPPY &&
+ !info_->is_native()) {
+ Label ok;
+ // +1 for return address.
+ int receiver_offset = (scope()->num_parameters() + 1) * kPointerSize;
+ __ mov(ecx, Operand(esp, receiver_offset));
+
+ __ cmp(ecx, isolate()->factory()->undefined_value());
+ __ j(not_equal, &ok, Label::kNear);
+
+ __ mov(ecx, GlobalObjectOperand());
+ __ mov(ecx, FieldOperand(ecx, GlobalObject::kGlobalReceiverOffset));
+
+ __ mov(Operand(esp, receiver_offset), ecx);
+
+ __ bind(&ok);
+ }
+
+ if (support_aligned_spilled_doubles_ && dynamic_frame_alignment_) {
+ // Move state of dynamic frame alignment into edx.
+ __ Move(edx, Immediate(kNoAlignmentPadding));
+
+ Label do_not_pad, align_loop;
+ STATIC_ASSERT(kDoubleSize == 2 * kPointerSize);
+ // Align esp + 4 to a multiple of 2 * kPointerSize.
+ __ test(esp, Immediate(kPointerSize));
+ __ j(not_zero, &do_not_pad, Label::kNear);
+ __ push(Immediate(0));
+ __ mov(ebx, esp);
+ __ mov(edx, Immediate(kAlignmentPaddingPushed));
+ // Copy arguments, receiver, and return address.
+ __ mov(ecx, Immediate(scope()->num_parameters() + 2));
+
+ __ bind(&align_loop);
+ __ mov(eax, Operand(ebx, 1 * kPointerSize));
+ __ mov(Operand(ebx, 0), eax);
+ __ add(Operand(ebx), Immediate(kPointerSize));
+ __ dec(ecx);
+ __ j(not_zero, &align_loop, Label::kNear);
+ __ mov(Operand(ebx, 0), Immediate(kAlignmentZapValue));
+ __ bind(&do_not_pad);
+ }
+ }
+
+ info()->set_prologue_offset(masm_->pc_offset());
+ if (NeedsEagerFrame()) {
+ ASSERT(!frame_is_built_);
+ frame_is_built_ = true;
+ if (info()->IsStub()) {
+ __ StubPrologue();
+ } else {
+ __ Prologue(info()->IsCodePreAgingActive());
+ }
+ info()->AddNoFrameRange(0, masm_->pc_offset());
+ }
+
+ if (info()->IsOptimizing() &&
+ dynamic_frame_alignment_ &&
+ FLAG_debug_code) {
+ __ test(esp, Immediate(kPointerSize));
+ __ Assert(zero, kFrameIsExpectedToBeAligned);
+ }
+
+ // Reserve space for the stack slots needed by the code.
+ int slots = GetStackSlotCount();
+ ASSERT(slots != 0 || !info()->IsOptimizing());
+ if (slots > 0) {
+ if (slots == 1) {
+ if (dynamic_frame_alignment_) {
+ __ push(edx);
+ } else {
+ __ push(Immediate(kNoAlignmentPadding));
+ }
+ } else {
+ if (FLAG_debug_code) {
+ __ sub(Operand(esp), Immediate(slots * kPointerSize));
+#ifdef _MSC_VER
+ MakeSureStackPagesMapped(slots * kPointerSize);
+#endif
+ __ push(eax);
+ __ mov(Operand(eax), Immediate(slots));
+ Label loop;
+ __ bind(&loop);
+ __ mov(MemOperand(esp, eax, times_4, 0),
+ Immediate(kSlotsZapValue));
+ __ dec(eax);
+ __ j(not_zero, &loop);
+ __ pop(eax);
+ } else {
+ __ sub(Operand(esp), Immediate(slots * kPointerSize));
+#ifdef _MSC_VER
+ MakeSureStackPagesMapped(slots * kPointerSize);
+#endif
+ }
+
+ if (support_aligned_spilled_doubles_) {
+ Comment(";;; Store dynamic frame alignment tag for spilled doubles");
+ // Store dynamic frame alignment state in the first local.
+ int offset = JavaScriptFrameConstants::kDynamicAlignmentStateOffset;
+ if (dynamic_frame_alignment_) {
+ __ mov(Operand(ebp, offset), edx);
+ } else {
+ __ mov(Operand(ebp, offset), Immediate(kNoAlignmentPadding));
+ }
+ }
+ }
+ }
+
+ // Possibly allocate a local context.
+ int heap_slots = info_->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
+ if (heap_slots > 0) {
+ Comment(";;; Allocate local context");
+ // Argument to NewContext is the function, which is still in edi.
+ if (heap_slots <= FastNewContextStub::kMaximumSlots) {
+ FastNewContextStub stub(isolate(), heap_slots);
+ __ CallStub(&stub);
+ } else {
+ __ push(edi);
+ __ CallRuntime(Runtime::kHiddenNewFunctionContext, 1);
+ }
+ RecordSafepoint(Safepoint::kNoLazyDeopt);
+ // Context is returned in eax. It replaces the context passed to us.
+ // It's saved in the stack and kept live in esi.
+ __ mov(esi, eax);
+ __ mov(Operand(ebp, StandardFrameConstants::kContextOffset), eax);
+
+ // Copy parameters into context if necessary.
+ int num_parameters = scope()->num_parameters();
+ for (int i = 0; i < num_parameters; i++) {
+ Variable* var = scope()->parameter(i);
+ if (var->IsContextSlot()) {
+ int parameter_offset = StandardFrameConstants::kCallerSPOffset +
+ (num_parameters - 1 - i) * kPointerSize;
+ // Load parameter from stack.
+ __ mov(eax, Operand(ebp, parameter_offset));
+ // Store it in the context.
+ int context_offset = Context::SlotOffset(var->index());
+ __ mov(Operand(esi, context_offset), eax);
+ // Update the write barrier. This clobbers eax and ebx.
+ __ RecordWriteContextSlot(esi,
+ context_offset,
+ eax,
+ ebx);
+ }
+ }
+ Comment(";;; End allocate local context");
+ }
+
+ // Trace the call.
+ if (FLAG_trace && info()->IsOptimizing()) {
+ // We have not executed any compiled code yet, so esi still holds the
+ // incoming context.
+ __ CallRuntime(Runtime::kTraceEnter, 0);
+ }
+ return !is_aborted();
+}
+
+
+void LCodeGen::GenerateOsrPrologue() {
+ // Generate the OSR entry prologue at the first unknown OSR value, or if there
+ // are none, at the OSR entrypoint instruction.
+ if (osr_pc_offset_ >= 0) return;
+
+ osr_pc_offset_ = masm()->pc_offset();
+
+ // Move state of dynamic frame alignment into edx.
+ __ Move(edx, Immediate(kNoAlignmentPadding));
+
+ if (support_aligned_spilled_doubles_ && dynamic_frame_alignment_) {
+ Label do_not_pad, align_loop;
+ // Align ebp + 4 to a multiple of 2 * kPointerSize.
+ __ test(ebp, Immediate(kPointerSize));
+ __ j(zero, &do_not_pad, Label::kNear);
+ __ push(Immediate(0));
+ __ mov(ebx, esp);
+ __ mov(edx, Immediate(kAlignmentPaddingPushed));
+
+ // Move all parts of the frame over one word. The frame consists of:
+ // unoptimized frame slots, alignment state, context, frame pointer, return
+ // address, receiver, and the arguments.
+ __ mov(ecx, Immediate(scope()->num_parameters() +
+ 5 + graph()->osr()->UnoptimizedFrameSlots()));
+
+ __ bind(&align_loop);
+ __ mov(eax, Operand(ebx, 1 * kPointerSize));
+ __ mov(Operand(ebx, 0), eax);
+ __ add(Operand(ebx), Immediate(kPointerSize));
+ __ dec(ecx);
+ __ j(not_zero, &align_loop, Label::kNear);
+ __ mov(Operand(ebx, 0), Immediate(kAlignmentZapValue));
+ __ sub(Operand(ebp), Immediate(kPointerSize));
+ __ bind(&do_not_pad);
+ }
+
+ // Save the first local, which is overwritten by the alignment state.
+ Operand alignment_loc = MemOperand(ebp, -3 * kPointerSize);
+ __ push(alignment_loc);
+
+ // Set the dynamic frame alignment state.
+ __ mov(alignment_loc, edx);
+
+ // Adjust the frame size, subsuming the unoptimized frame into the
+ // optimized frame.
+ int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
+ ASSERT(slots >= 1);
+ __ sub(esp, Immediate((slots - 1) * kPointerSize));
+}
+
+
+void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) {
+ if (instr->IsCall()) {
+ EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
+ }
+ if (!instr->IsLazyBailout() && !instr->IsGap()) {
+ safepoints_.BumpLastLazySafepointIndex();
+ }
+ FlushX87StackIfNecessary(instr);
+}
+
+
+void LCodeGen::GenerateBodyInstructionPost(LInstruction* instr) {
+ if (instr->IsGoto()) {
+ x87_stack_.LeavingBlock(current_block_, LGoto::cast(instr));
+ } else if (FLAG_debug_code && FLAG_enable_slow_asserts &&
+ !instr->IsGap() && !instr->IsReturn()) {
+ if (instr->ClobbersDoubleRegisters(isolate())) {
+ if (instr->HasDoubleRegisterResult()) {
+ ASSERT_EQ(1, x87_stack_.depth());
+ } else {
+ ASSERT_EQ(0, x87_stack_.depth());
+ }
+ }
+ __ VerifyX87StackDepth(x87_stack_.depth());
+ }
+}
+
+
+bool LCodeGen::GenerateJumpTable() {
+ Label needs_frame;
+ if (jump_table_.length() > 0) {
+ Comment(";;; -------------------- Jump table --------------------");
+ }
+ for (int i = 0; i < jump_table_.length(); i++) {
+ __ bind(&jump_table_[i].label);
+ Address entry = jump_table_[i].address;
+ Deoptimizer::BailoutType type = jump_table_[i].bailout_type;
+ int id = Deoptimizer::GetDeoptimizationId(isolate(), entry, type);
+ if (id == Deoptimizer::kNotDeoptimizationEntry) {
+ Comment(";;; jump table entry %d.", i);
+ } else {
+ Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id);
+ }
+ if (jump_table_[i].needs_frame) {
+ ASSERT(!info()->saves_caller_doubles());
+ __ push(Immediate(ExternalReference::ForDeoptEntry(entry)));
+ if (needs_frame.is_bound()) {
+ __ jmp(&needs_frame);
+ } else {
+ __ bind(&needs_frame);
+ __ push(MemOperand(ebp, StandardFrameConstants::kContextOffset));
+ // This variant of deopt can only be used with stubs. Since we don't
+ // have a function pointer to install in the stack frame that we're
+ // building, install a special marker there instead.
+ ASSERT(info()->IsStub());
+ __ push(Immediate(Smi::FromInt(StackFrame::STUB)));
+ // Push a PC inside the function so that the deopt code can find where
+ // the deopt comes from. It doesn't have to be the precise return
+ // address of a "calling" LAZY deopt, it only has to be somewhere
+ // inside the code body.
+ Label push_approx_pc;
+ __ call(&push_approx_pc);
+ __ bind(&push_approx_pc);
+ // Push the continuation which was stashed were the ebp should
+ // be. Replace it with the saved ebp.
+ __ push(MemOperand(esp, 3 * kPointerSize));
+ __ mov(MemOperand(esp, 4 * kPointerSize), ebp);
+ __ lea(ebp, MemOperand(esp, 4 * kPointerSize));
+ __ ret(0); // Call the continuation without clobbering registers.
+ }
+ } else {
+ __ call(entry, RelocInfo::RUNTIME_ENTRY);
+ }
+ }
+ return !is_aborted();
+}
+
+
+bool LCodeGen::GenerateDeferredCode() {
+ ASSERT(is_generating());
+ if (deferred_.length() > 0) {
+ for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
+ LDeferredCode* code = deferred_[i];
+ X87Stack copy(code->x87_stack());
+ x87_stack_ = copy;
+
+ HValue* value =
+ instructions_->at(code->instruction_index())->hydrogen_value();
+ RecordAndWritePosition(
+ chunk()->graph()->SourcePositionToScriptPosition(value->position()));
+
+ Comment(";;; <@%d,#%d> "
+ "-------------------- Deferred %s --------------------",
+ code->instruction_index(),
+ code->instr()->hydrogen_value()->id(),
+ code->instr()->Mnemonic());
+ __ bind(code->entry());
+ if (NeedsDeferredFrame()) {
+ Comment(";;; Build frame");
+ ASSERT(!frame_is_built_);
+ ASSERT(info()->IsStub());
+ frame_is_built_ = true;
+ // Build the frame in such a way that esi isn't trashed.
+ __ push(ebp); // Caller's frame pointer.
+ __ push(Operand(ebp, StandardFrameConstants::kContextOffset));
+ __ push(Immediate(Smi::FromInt(StackFrame::STUB)));
+ __ lea(ebp, Operand(esp, 2 * kPointerSize));
+ Comment(";;; Deferred code");
+ }
+ code->Generate();
+ if (NeedsDeferredFrame()) {
+ __ bind(code->done());
+ Comment(";;; Destroy frame");
+ ASSERT(frame_is_built_);
+ frame_is_built_ = false;
+ __ mov(esp, ebp);
+ __ pop(ebp);
+ }
+ __ jmp(code->exit());
+ }
+ }
+
+ // Deferred code is the last part of the instruction sequence. Mark
+ // the generated code as done unless we bailed out.
+ if (!is_aborted()) status_ = DONE;
+ return !is_aborted();
+}
+
+
+bool LCodeGen::GenerateSafepointTable() {
+ ASSERT(is_done());
+ if (!info()->IsStub()) {
+ // For lazy deoptimization we need space to patch a call after every call.
+ // Ensure there is always space for such patching, even if the code ends
+ // in a call.
+ int target_offset = masm()->pc_offset() + Deoptimizer::patch_size();
+ while (masm()->pc_offset() < target_offset) {
+ masm()->nop();
+ }
+ }
+ safepoints_.Emit(masm(), GetStackSlotCount());
+ return !is_aborted();
+}
+
+
+Register LCodeGen::ToRegister(int index) const {
+ return Register::FromAllocationIndex(index);
+}
+
+
+X87Register LCodeGen::ToX87Register(int index) const {
+ return X87Register::FromAllocationIndex(index);
+}
+
+
+void LCodeGen::X87LoadForUsage(X87Register reg) {
+ ASSERT(x87_stack_.Contains(reg));
+ x87_stack_.Fxch(reg);
+ x87_stack_.pop();
+}
+
+
+void LCodeGen::X87LoadForUsage(X87Register reg1, X87Register reg2) {
+ ASSERT(x87_stack_.Contains(reg1));
+ ASSERT(x87_stack_.Contains(reg2));
+ x87_stack_.Fxch(reg1, 1);
+ x87_stack_.Fxch(reg2);
+ x87_stack_.pop();
+ x87_stack_.pop();
+}
+
+
+void LCodeGen::X87Stack::Fxch(X87Register reg, int other_slot) {
+ ASSERT(is_mutable_);
+ ASSERT(Contains(reg) && stack_depth_ > other_slot);
+ int i = ArrayIndex(reg);
+ int st = st2idx(i);
+ if (st != other_slot) {
+ int other_i = st2idx(other_slot);
+ X87Register other = stack_[other_i];
+ stack_[other_i] = reg;
+ stack_[i] = other;
+ if (st == 0) {
+ __ fxch(other_slot);
+ } else if (other_slot == 0) {
+ __ fxch(st);
+ } else {
+ __ fxch(st);
+ __ fxch(other_slot);
+ __ fxch(st);
+ }
+ }
+}
+
+
+int LCodeGen::X87Stack::st2idx(int pos) {
+ return stack_depth_ - pos - 1;
+}
+
+
+int LCodeGen::X87Stack::ArrayIndex(X87Register reg) {
+ for (int i = 0; i < stack_depth_; i++) {
+ if (stack_[i].is(reg)) return i;
+ }
+ UNREACHABLE();
+ return -1;
+}
+
+
+bool LCodeGen::X87Stack::Contains(X87Register reg) {
+ for (int i = 0; i < stack_depth_; i++) {
+ if (stack_[i].is(reg)) return true;
+ }
+ return false;
+}
+
+
+void LCodeGen::X87Stack::Free(X87Register reg) {
+ ASSERT(is_mutable_);
+ ASSERT(Contains(reg));
+ int i = ArrayIndex(reg);
+ int st = st2idx(i);
+ if (st > 0) {
+ // keep track of how fstp(i) changes the order of elements
+ int tos_i = st2idx(0);
+ stack_[i] = stack_[tos_i];
+ }
+ pop();
+ __ fstp(st);
+}
+
+
+void LCodeGen::X87Mov(X87Register dst, Operand src, X87OperandType opts) {
+ if (x87_stack_.Contains(dst)) {
+ x87_stack_.Fxch(dst);
+ __ fstp(0);
+ } else {
+ x87_stack_.push(dst);
+ }
+ X87Fld(src, opts);
+}
+
+
+void LCodeGen::X87Fld(Operand src, X87OperandType opts) {
+ ASSERT(!src.is_reg_only());
+ switch (opts) {
+ case kX87DoubleOperand:
+ __ fld_d(src);
+ break;
+ case kX87FloatOperand:
+ __ fld_s(src);
+ break;
+ case kX87IntOperand:
+ __ fild_s(src);
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+void LCodeGen::X87Mov(Operand dst, X87Register src, X87OperandType opts) {
+ ASSERT(!dst.is_reg_only());
+ x87_stack_.Fxch(src);
+ switch (opts) {
+ case kX87DoubleOperand:
+ __ fst_d(dst);
+ break;
+ case kX87IntOperand:
+ __ fist_s(dst);
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+void LCodeGen::X87Stack::PrepareToWrite(X87Register reg) {
+ ASSERT(is_mutable_);
+ if (Contains(reg)) {
+ Free(reg);
+ }
+ // Mark this register as the next register to write to
+ stack_[stack_depth_] = reg;
+}
+
+
+void LCodeGen::X87Stack::CommitWrite(X87Register reg) {
+ ASSERT(is_mutable_);
+ // Assert the reg is prepared to write, but not on the virtual stack yet
+ ASSERT(!Contains(reg) && stack_[stack_depth_].is(reg) &&
+ stack_depth_ < X87Register::kMaxNumAllocatableRegisters);
+ stack_depth_++;
+}
+
+
+void LCodeGen::X87PrepareBinaryOp(
+ X87Register left, X87Register right, X87Register result) {
+ // You need to use DefineSameAsFirst for x87 instructions
+ ASSERT(result.is(left));
+ x87_stack_.Fxch(right, 1);
+ x87_stack_.Fxch(left);
+}
+
+
+void LCodeGen::X87Stack::FlushIfNecessary(LInstruction* instr, LCodeGen* cgen) {
+ if (stack_depth_ > 0 && instr->ClobbersDoubleRegisters(isolate())) {
+ bool double_inputs = instr->HasDoubleRegisterInput();
+
+ // Flush stack from tos down, since FreeX87() will mess with tos
+ for (int i = stack_depth_-1; i >= 0; i--) {
+ X87Register reg = stack_[i];
+ // Skip registers which contain the inputs for the next instruction
+ // when flushing the stack
+ if (double_inputs && instr->IsDoubleInput(reg, cgen)) {
+ continue;
+ }
+ Free(reg);
+ if (i < stack_depth_-1) i++;
+ }
+ }
+ if (instr->IsReturn()) {
+ while (stack_depth_ > 0) {
+ __ fstp(0);
+ stack_depth_--;
+ }
+ if (FLAG_debug_code && FLAG_enable_slow_asserts) __ VerifyX87StackDepth(0);
+ }
+}
+
+
+void LCodeGen::X87Stack::LeavingBlock(int current_block_id, LGoto* goto_instr) {
+ ASSERT(stack_depth_ <= 1);
+ // If ever used for new stubs producing two pairs of doubles joined into two
+ // phis this assert hits. That situation is not handled, since the two stacks
+ // might have st0 and st1 swapped.
+ if (current_block_id + 1 != goto_instr->block_id()) {
+ // If we have a value on the x87 stack on leaving a block, it must be a
+ // phi input. If the next block we compile is not the join block, we have
+ // to discard the stack state.
+ stack_depth_ = 0;
+ }
+}
+
+
+void LCodeGen::EmitFlushX87ForDeopt() {
+ // The deoptimizer does not support X87 Registers. But as long as we
+ // deopt from a stub its not a problem, since we will re-materialize the
+ // original stub inputs, which can't be double registers.
+ ASSERT(info()->IsStub());
+ if (FLAG_debug_code && FLAG_enable_slow_asserts) {
+ __ pushfd();
+ __ VerifyX87StackDepth(x87_stack_.depth());
+ __ popfd();
+ }
+ for (int i = 0; i < x87_stack_.depth(); i++) __ fstp(0);
+}
+
+
+Register LCodeGen::ToRegister(LOperand* op) const {
+ ASSERT(op->IsRegister());
+ return ToRegister(op->index());
+}
+
+
+X87Register LCodeGen::ToX87Register(LOperand* op) const {
+ ASSERT(op->IsDoubleRegister());
+ return ToX87Register(op->index());
+}
+
+
+int32_t LCodeGen::ToInteger32(LConstantOperand* op) const {
+ return ToRepresentation(op, Representation::Integer32());
+}
+
+
+int32_t LCodeGen::ToRepresentation(LConstantOperand* op,
+ const Representation& r) const {
+ HConstant* constant = chunk_->LookupConstant(op);
+ int32_t value = constant->Integer32Value();
+ if (r.IsInteger32()) return value;
+ ASSERT(r.IsSmiOrTagged());
+ return reinterpret_cast<int32_t>(Smi::FromInt(value));
+}
+
+
+Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
+ HConstant* constant = chunk_->LookupConstant(op);
+ ASSERT(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
+ return constant->handle(isolate());
+}
+
+
+double LCodeGen::ToDouble(LConstantOperand* op) const {
+ HConstant* constant = chunk_->LookupConstant(op);
+ ASSERT(constant->HasDoubleValue());
+ return constant->DoubleValue();
+}
+
+
+ExternalReference LCodeGen::ToExternalReference(LConstantOperand* op) const {
+ HConstant* constant = chunk_->LookupConstant(op);
+ ASSERT(constant->HasExternalReferenceValue());
+ return constant->ExternalReferenceValue();
+}
+
+
+bool LCodeGen::IsInteger32(LConstantOperand* op) const {
+ return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32();
+}
+
+
+bool LCodeGen::IsSmi(LConstantOperand* op) const {
+ return chunk_->LookupLiteralRepresentation(op).IsSmi();
+}
+
+
+static int ArgumentsOffsetWithoutFrame(int index) {
+ ASSERT(index < 0);
+ return -(index + 1) * kPointerSize + kPCOnStackSize;
+}
+
+
+Operand LCodeGen::ToOperand(LOperand* op) const {
+ if (op->IsRegister()) return Operand(ToRegister(op));
+ ASSERT(!op->IsDoubleRegister());
+ ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot());
+ if (NeedsEagerFrame()) {
+ return Operand(ebp, StackSlotOffset(op->index()));
+ } else {
+ // Retrieve parameter without eager stack-frame relative to the
+ // stack-pointer.
+ return Operand(esp, ArgumentsOffsetWithoutFrame(op->index()));
+ }
+}
+
+
+Operand LCodeGen::HighOperand(LOperand* op) {
+ ASSERT(op->IsDoubleStackSlot());
+ if (NeedsEagerFrame()) {
+ return Operand(ebp, StackSlotOffset(op->index()) + kPointerSize);
+ } else {
+ // Retrieve parameter without eager stack-frame relative to the
+ // stack-pointer.
+ return Operand(
+ esp, ArgumentsOffsetWithoutFrame(op->index()) + kPointerSize);
+ }
+}
+
+
+void LCodeGen::WriteTranslation(LEnvironment* environment,
+ Translation* translation) {
+ if (environment == NULL) return;
+
+ // The translation includes one command per value in the environment.
+ int translation_size = environment->translation_size();
+ // The output frame height does not include the parameters.
+ int height = translation_size - environment->parameter_count();
+
+ WriteTranslation(environment->outer(), translation);
+ bool has_closure_id = !info()->closure().is_null() &&
+ !info()->closure().is_identical_to(environment->closure());
+ int closure_id = has_closure_id
+ ? DefineDeoptimizationLiteral(environment->closure())
+ : Translation::kSelfLiteralId;
+ switch (environment->frame_type()) {
+ case JS_FUNCTION:
+ translation->BeginJSFrame(environment->ast_id(), closure_id, height);
+ break;
+ case JS_CONSTRUCT:
+ translation->BeginConstructStubFrame(closure_id, translation_size);
+ break;
+ case JS_GETTER:
+ ASSERT(translation_size == 1);
+ ASSERT(height == 0);
+ translation->BeginGetterStubFrame(closure_id);
+ break;
+ case JS_SETTER:
+ ASSERT(translation_size == 2);
+ ASSERT(height == 0);
+ translation->BeginSetterStubFrame(closure_id);
+ break;
+ case ARGUMENTS_ADAPTOR:
+ translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
+ break;
+ case STUB:
+ translation->BeginCompiledStubFrame();
+ break;
+ default:
+ UNREACHABLE();
+ }
+
+ int object_index = 0;
+ int dematerialized_index = 0;
+ for (int i = 0; i < translation_size; ++i) {
+ LOperand* value = environment->values()->at(i);
+ AddToTranslation(environment,
+ translation,
+ value,
+ environment->HasTaggedValueAt(i),
+ environment->HasUint32ValueAt(i),
+ &object_index,
+ &dematerialized_index);
+ }
+}
+
+
+void LCodeGen::AddToTranslation(LEnvironment* environment,
+ Translation* translation,
+ LOperand* op,
+ bool is_tagged,
+ bool is_uint32,
+ int* object_index_pointer,
+ int* dematerialized_index_pointer) {
+ if (op == LEnvironment::materialization_marker()) {
+ int object_index = (*object_index_pointer)++;
+ if (environment->ObjectIsDuplicateAt(object_index)) {
+ int dupe_of = environment->ObjectDuplicateOfAt(object_index);
+ translation->DuplicateObject(dupe_of);
+ return;
+ }
+ int object_length = environment->ObjectLengthAt(object_index);
+ if (environment->ObjectIsArgumentsAt(object_index)) {
+ translation->BeginArgumentsObject(object_length);
+ } else {
+ translation->BeginCapturedObject(object_length);
+ }
+ int dematerialized_index = *dematerialized_index_pointer;
+ int env_offset = environment->translation_size() + dematerialized_index;
+ *dematerialized_index_pointer += object_length;
+ for (int i = 0; i < object_length; ++i) {
+ LOperand* value = environment->values()->at(env_offset + i);
+ AddToTranslation(environment,
+ translation,
+ value,
+ environment->HasTaggedValueAt(env_offset + i),
+ environment->HasUint32ValueAt(env_offset + i),
+ object_index_pointer,
+ dematerialized_index_pointer);
+ }
+ return;
+ }
+
+ if (op->IsStackSlot()) {
+ if (is_tagged) {
+ translation->StoreStackSlot(op->index());
+ } else if (is_uint32) {
+ translation->StoreUint32StackSlot(op->index());
+ } else {
+ translation->StoreInt32StackSlot(op->index());
+ }
+ } else if (op->IsDoubleStackSlot()) {
+ translation->StoreDoubleStackSlot(op->index());
+ } else if (op->IsRegister()) {
+ Register reg = ToRegister(op);
+ if (is_tagged) {
+ translation->StoreRegister(reg);
+ } else if (is_uint32) {
+ translation->StoreUint32Register(reg);
+ } else {
+ translation->StoreInt32Register(reg);
+ }
+ } else if (op->IsConstantOperand()) {
+ HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
+ int src_index = DefineDeoptimizationLiteral(constant->handle(isolate()));
+ translation->StoreLiteral(src_index);
+ } else {
+ UNREACHABLE();
+ }
+}
+
+
+void LCodeGen::CallCodeGeneric(Handle<Code> code,
+ RelocInfo::Mode mode,
+ LInstruction* instr,
+ SafepointMode safepoint_mode) {
+ ASSERT(instr != NULL);
+ __ call(code, mode);
+ RecordSafepointWithLazyDeopt(instr, safepoint_mode);
+
+ // Signal that we don't inline smi code before these stubs in the
+ // optimizing code generator.
+ if (code->kind() == Code::BINARY_OP_IC ||
+ code->kind() == Code::COMPARE_IC) {
+ __ nop();
+ }
+}
+
+
+void LCodeGen::CallCode(Handle<Code> code,
+ RelocInfo::Mode mode,
+ LInstruction* instr) {
+ CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT);
+}
+
+
+void LCodeGen::CallRuntime(const Runtime::Function* fun,
+ int argc,
+ LInstruction* instr) {
+ ASSERT(instr != NULL);
+ ASSERT(instr->HasPointerMap());
+
+ __ CallRuntime(fun, argc);
+
+ RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
+
+ ASSERT(info()->is_calling());
+}
+
+
+void LCodeGen::LoadContextFromDeferred(LOperand* context) {
+ if (context->IsRegister()) {
+ if (!ToRegister(context).is(esi)) {
+ __ mov(esi, ToRegister(context));
+ }
+ } else if (context->IsStackSlot()) {
+ __ mov(esi, ToOperand(context));
+ } else if (context->IsConstantOperand()) {
+ HConstant* constant =
+ chunk_->LookupConstant(LConstantOperand::cast(context));
+ __ LoadObject(esi, Handle<Object>::cast(constant->handle(isolate())));
+ } else {
+ UNREACHABLE();
+ }
+}
+
+void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
+ int argc,
+ LInstruction* instr,
+ LOperand* context) {
+ LoadContextFromDeferred(context);
+
+ __ CallRuntime(id);
+ RecordSafepointWithRegisters(
+ instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
+
+ ASSERT(info()->is_calling());
+}
+
+
+void LCodeGen::RegisterEnvironmentForDeoptimization(
+ LEnvironment* environment, Safepoint::DeoptMode mode) {
+ environment->set_has_been_used();
+ if (!environment->HasBeenRegistered()) {
+ // Physical stack frame layout:
+ // -x ............. -4 0 ..................................... y
+ // [incoming arguments] [spill slots] [pushed outgoing arguments]
+
+ // Layout of the environment:
+ // 0 ..................................................... size-1
+ // [parameters] [locals] [expression stack including arguments]
+
+ // Layout of the translation:
+ // 0 ........................................................ size - 1 + 4
+ // [expression stack including arguments] [locals] [4 words] [parameters]
+ // |>------------ translation_size ------------<|
+
+ int frame_count = 0;
+ int jsframe_count = 0;
+ for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
+ ++frame_count;
+ if (e->frame_type() == JS_FUNCTION) {
+ ++jsframe_count;
+ }
+ }
+ Translation translation(&translations_, frame_count, jsframe_count, zone());
+ WriteTranslation(environment, &translation);
+ int deoptimization_index = deoptimizations_.length();
+ int pc_offset = masm()->pc_offset();
+ environment->Register(deoptimization_index,
+ translation.index(),
+ (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
+ deoptimizations_.Add(environment, zone());
+ }
+}
+
+
+void LCodeGen::DeoptimizeIf(Condition cc,
+ LEnvironment* environment,
+ Deoptimizer::BailoutType bailout_type) {
+ RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
+ ASSERT(environment->HasBeenRegistered());
+ int id = environment->deoptimization_index();
+ ASSERT(info()->IsOptimizing() || info()->IsStub());
+ Address entry =
+ Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
+ if (entry == NULL) {
+ Abort(kBailoutWasNotPrepared);
+ return;
+ }
+
+ if (DeoptEveryNTimes()) {
+ ExternalReference count = ExternalReference::stress_deopt_count(isolate());
+ Label no_deopt;
+ __ pushfd();
+ __ push(eax);
+ __ mov(eax, Operand::StaticVariable(count));
+ __ sub(eax, Immediate(1));
+ __ j(not_zero, &no_deopt, Label::kNear);
+ if (FLAG_trap_on_deopt) __ int3();
+ __ mov(eax, Immediate(FLAG_deopt_every_n_times));
+ __ mov(Operand::StaticVariable(count), eax);
+ __ pop(eax);
+ __ popfd();
+ ASSERT(frame_is_built_);
+ __ call(entry, RelocInfo::RUNTIME_ENTRY);
+ __ bind(&no_deopt);
+ __ mov(Operand::StaticVariable(count), eax);
+ __ pop(eax);
+ __ popfd();
+ }
+
+ // Before Instructions which can deopt, we normally flush the x87 stack. But
+ // we can have inputs or outputs of the current instruction on the stack,
+ // thus we need to flush them here from the physical stack to leave it in a
+ // consistent state.
+ if (x87_stack_.depth() > 0) {
+ Label done;
+ if (cc != no_condition) __ j(NegateCondition(cc), &done, Label::kNear);
+ EmitFlushX87ForDeopt();
+ __ bind(&done);
+ }
+
+ if (info()->ShouldTrapOnDeopt()) {
+ Label done;
+ if (cc != no_condition) __ j(NegateCondition(cc), &done, Label::kNear);
+ __ int3();
+ __ bind(&done);
+ }
+
+ ASSERT(info()->IsStub() || frame_is_built_);
+ if (cc == no_condition && frame_is_built_) {
+ __ call(entry, RelocInfo::RUNTIME_ENTRY);
+ } else {
+ // We often have several deopts to the same entry, reuse the last
+ // jump entry if this is the case.
+ if (jump_table_.is_empty() ||
+ jump_table_.last().address != entry ||
+ jump_table_.last().needs_frame != !frame_is_built_ ||
+ jump_table_.last().bailout_type != bailout_type) {
+ Deoptimizer::JumpTableEntry table_entry(entry,
+ bailout_type,
+ !frame_is_built_);
+ jump_table_.Add(table_entry, zone());
+ }
+ if (cc == no_condition) {
+ __ jmp(&jump_table_.last().label);
+ } else {
+ __ j(cc, &jump_table_.last().label);
+ }
+ }
+}
+
+
+void LCodeGen::DeoptimizeIf(Condition cc,
+ LEnvironment* environment) {
+ Deoptimizer::BailoutType bailout_type = info()->IsStub()
+ ? Deoptimizer::LAZY
+ : Deoptimizer::EAGER;
+ DeoptimizeIf(cc, environment, bailout_type);
+}
+
+
+void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
+ int length = deoptimizations_.length();
+ if (length == 0) return;
+ Handle<DeoptimizationInputData> data =
+ DeoptimizationInputData::New(isolate(), length, TENURED);
+
+ Handle<ByteArray> translations =
+ translations_.CreateByteArray(isolate()->factory());
+ data->SetTranslationByteArray(*translations);
+ data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
+ data->SetOptimizationId(Smi::FromInt(info_->optimization_id()));
+ if (info_->IsOptimizing()) {
+ // Reference to shared function info does not change between phases.
+ AllowDeferredHandleDereference allow_handle_dereference;
+ data->SetSharedFunctionInfo(*info_->shared_info());
+ } else {
+ data->SetSharedFunctionInfo(Smi::FromInt(0));
+ }
+
+ Handle<FixedArray> literals =
+ factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
+ { AllowDeferredHandleDereference copy_handles;
+ for (int i = 0; i < deoptimization_literals_.length(); i++) {
+ literals->set(i, *deoptimization_literals_[i]);
+ }
+ data->SetLiteralArray(*literals);
+ }
+
+ data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id().ToInt()));
+ data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
+
+ // Populate the deoptimization entries.
+ for (int i = 0; i < length; i++) {
+ LEnvironment* env = deoptimizations_[i];
+ data->SetAstId(i, env->ast_id());
+ data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
+ data->SetArgumentsStackHeight(i,
+ Smi::FromInt(env->arguments_stack_height()));
+ data->SetPc(i, Smi::FromInt(env->pc_offset()));
+ }
+ code->set_deoptimization_data(*data);
+}
+
+
+int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) {
+ int result = deoptimization_literals_.length();
+ for (int i = 0; i < deoptimization_literals_.length(); ++i) {
+ if (deoptimization_literals_[i].is_identical_to(literal)) return i;
+ }
+ deoptimization_literals_.Add(literal, zone());
+ return result;
+}
+
+
+void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
+ ASSERT(deoptimization_literals_.length() == 0);
+
+ const ZoneList<Handle<JSFunction> >* inlined_closures =
+ chunk()->inlined_closures();
+
+ for (int i = 0, length = inlined_closures->length();
+ i < length;
+ i++) {
+ DefineDeoptimizationLiteral(inlined_closures->at(i));
+ }
+
+ inlined_function_count_ = deoptimization_literals_.length();
+}
+
+
+void LCodeGen::RecordSafepointWithLazyDeopt(
+ LInstruction* instr, SafepointMode safepoint_mode) {
+ if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
+ RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
+ } else {
+ ASSERT(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
+ RecordSafepointWithRegisters(
+ instr->pointer_map(), 0, Safepoint::kLazyDeopt);
+ }
+}
+
+
+void LCodeGen::RecordSafepoint(
+ LPointerMap* pointers,
+ Safepoint::Kind kind,
+ int arguments,
+ Safepoint::DeoptMode deopt_mode) {
+ ASSERT(kind == expected_safepoint_kind_);
+ const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
+ Safepoint safepoint =
+ safepoints_.DefineSafepoint(masm(), kind, arguments, deopt_mode);
+ for (int i = 0; i < operands->length(); i++) {
+ LOperand* pointer = operands->at(i);
+ if (pointer->IsStackSlot()) {
+ safepoint.DefinePointerSlot(pointer->index(), zone());
+ } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
+ safepoint.DefinePointerRegister(ToRegister(pointer), zone());
+ }
+ }
+}
+
+
+void LCodeGen::RecordSafepoint(LPointerMap* pointers,
+ Safepoint::DeoptMode mode) {
+ RecordSafepoint(pointers, Safepoint::kSimple, 0, mode);
+}
+
+
+void LCodeGen::RecordSafepoint(Safepoint::DeoptMode mode) {
+ LPointerMap empty_pointers(zone());
+ RecordSafepoint(&empty_pointers, mode);
+}
+
+
+void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
+ int arguments,
+ Safepoint::DeoptMode mode) {
+ RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments, mode);
+}
+
+
+void LCodeGen::RecordAndWritePosition(int position) {
+ if (position == RelocInfo::kNoPosition) return;
+ masm()->positions_recorder()->RecordPosition(position);
+ masm()->positions_recorder()->WriteRecordedPositions();
+}
+
+
+static const char* LabelType(LLabel* label) {
+ if (label->is_loop_header()) return " (loop header)";
+ if (label->is_osr_entry()) return " (OSR entry)";
+ return "";
+}
+
+
+void LCodeGen::DoLabel(LLabel* label) {
+ Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------",
+ current_instruction_,
+ label->hydrogen_value()->id(),
+ label->block_id(),
+ LabelType(label));
+ __ bind(label->label());
+ current_block_ = label->block_id();
+ DoGap(label);
+}
+
+
+void LCodeGen::DoParallelMove(LParallelMove* move) {
+ resolver_.Resolve(move);
+}
+
+
+void LCodeGen::DoGap(LGap* gap) {
+ for (int i = LGap::FIRST_INNER_POSITION;
+ i <= LGap::LAST_INNER_POSITION;
+ i++) {
+ LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
+ LParallelMove* move = gap->GetParallelMove(inner_pos);
+ if (move != NULL) DoParallelMove(move);
+ }
+}
+
+
+void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
+ DoGap(instr);
+}
+
+
+void LCodeGen::DoParameter(LParameter* instr) {
+ // Nothing to do.
+}
+
+
+void LCodeGen::DoCallStub(LCallStub* instr) {
+ ASSERT(ToRegister(instr->context()).is(esi));
+ ASSERT(ToRegister(instr->result()).is(eax));
+ switch (instr->hydrogen()->major_key()) {
+ case CodeStub::RegExpExec: {
+ RegExpExecStub stub(isolate());
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ break;
+ }
+ case CodeStub::SubString: {
+ SubStringStub stub(isolate());
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ break;
+ }
+ case CodeStub::StringCompare: {
+ StringCompareStub stub(isolate());
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
+ GenerateOsrPrologue();
+}
+
+
+void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
+ Register dividend = ToRegister(instr->dividend());
+ int32_t divisor = instr->divisor();
+ ASSERT(dividend.is(ToRegister(instr->result())));
+
+ // Theoretically, a variation of the branch-free code for integer division by
+ // a power of 2 (calculating the remainder via an additional multiplication
+ // (which gets simplified to an 'and') and subtraction) should be faster, and
+ // this is exactly what GCC and clang emit. Nevertheless, benchmarks seem to
+ // indicate that positive dividends are heavily favored, so the branching
+ // version performs better.
+ HMod* hmod = instr->hydrogen();
+ int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
+ Label dividend_is_not_negative, done;
+ if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) {
+ __ test(dividend, dividend);
+ __ j(not_sign, ÷nd_is_not_negative, Label::kNear);
+ // Note that this is correct even for kMinInt operands.
+ __ neg(dividend);
+ __ and_(dividend, mask);
+ __ neg(dividend);
+ if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ DeoptimizeIf(zero, instr->environment());
+ }
+ __ jmp(&done, Label::kNear);
+ }
+
+ __ bind(÷nd_is_not_negative);
+ __ and_(dividend, mask);
+ __ bind(&done);
+}
+
+
+void LCodeGen::DoModByConstI(LModByConstI* instr) {
+ Register dividend = ToRegister(instr->dividend());
+ int32_t divisor = instr->divisor();
+ ASSERT(ToRegister(instr->result()).is(eax));
+
+ if (divisor == 0) {
+ DeoptimizeIf(no_condition, instr->environment());
+ return;
+ }
+
+ __ TruncatingDiv(dividend, Abs(divisor));
+ __ imul(edx, edx, Abs(divisor));
+ __ mov(eax, dividend);
+ __ sub(eax, edx);
+
+ // Check for negative zero.
+ HMod* hmod = instr->hydrogen();
+ if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ Label remainder_not_zero;
+ __ j(not_zero, &remainder_not_zero, Label::kNear);
+ __ cmp(dividend, Immediate(0));
+ DeoptimizeIf(less, instr->environment());
+ __ bind(&remainder_not_zero);
+ }
+}
+
+
+void LCodeGen::DoModI(LModI* instr) {
+ HMod* hmod = instr->hydrogen();
+
+ Register left_reg = ToRegister(instr->left());
+ ASSERT(left_reg.is(eax));
+ Register right_reg = ToRegister(instr->right());
+ ASSERT(!right_reg.is(eax));
+ ASSERT(!right_reg.is(edx));
+ Register result_reg = ToRegister(instr->result());
+ ASSERT(result_reg.is(edx));
+
+ Label done;
+ // Check for x % 0, idiv would signal a divide error. We have to
+ // deopt in this case because we can't return a NaN.
+ if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
+ __ test(right_reg, Operand(right_reg));
+ DeoptimizeIf(zero, instr->environment());
+ }
+
+ // Check for kMinInt % -1, idiv would signal a divide error. We
+ // have to deopt if we care about -0, because we can't return that.
+ if (hmod->CheckFlag(HValue::kCanOverflow)) {
+ Label no_overflow_possible;
+ __ cmp(left_reg, kMinInt);
+ __ j(not_equal, &no_overflow_possible, Label::kNear);
+ __ cmp(right_reg, -1);
+ if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ DeoptimizeIf(equal, instr->environment());
+ } else {
+ __ j(not_equal, &no_overflow_possible, Label::kNear);
+ __ Move(result_reg, Immediate(0));
+ __ jmp(&done, Label::kNear);
+ }
+ __ bind(&no_overflow_possible);
+ }
+
+ // Sign extend dividend in eax into edx:eax.
+ __ cdq();
+
+ // If we care about -0, test if the dividend is <0 and the result is 0.
+ if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ Label positive_left;
+ __ test(left_reg, Operand(left_reg));
+ __ j(not_sign, &positive_left, Label::kNear);
+ __ idiv(right_reg);
+ __ test(result_reg, Operand(result_reg));
+ DeoptimizeIf(zero, instr->environment());
+ __ jmp(&done, Label::kNear);
+ __ bind(&positive_left);
+ }
+ __ idiv(right_reg);
+ __ bind(&done);
+}
+
+
+void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
+ Register dividend = ToRegister(instr->dividend());
+ int32_t divisor = instr->divisor();
+ Register result = ToRegister(instr->result());
+ ASSERT(divisor == kMinInt || IsPowerOf2(Abs(divisor)));
+ ASSERT(!result.is(dividend));
+
+ // Check for (0 / -x) that will produce negative zero.
+ HDiv* hdiv = instr->hydrogen();
+ if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
+ __ test(dividend, dividend);
+ DeoptimizeIf(zero, instr->environment());
+ }
+ // Check for (kMinInt / -1).
+ if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
+ __ cmp(dividend, kMinInt);
+ DeoptimizeIf(zero, instr->environment());
+ }
+ // Deoptimize if remainder will not be 0.
+ if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
+ divisor != 1 && divisor != -1) {
+ int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
+ __ test(dividend, Immediate(mask));
+ DeoptimizeIf(not_zero, instr->environment());
+ }
+ __ Move(result, dividend);
+ int32_t shift = WhichPowerOf2Abs(divisor);
+ if (shift > 0) {
+ // The arithmetic shift is always OK, the 'if' is an optimization only.
+ if (shift > 1) __ sar(result, 31);
+ __ shr(result, 32 - shift);
+ __ add(result, dividend);
+ __ sar(result, shift);
+ }
+ if (divisor < 0) __ neg(result);
+}
+
+
+void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
+ Register dividend = ToRegister(instr->dividend());
+ int32_t divisor = instr->divisor();
+ ASSERT(ToRegister(instr->result()).is(edx));
+
+ if (divisor == 0) {
+ DeoptimizeIf(no_condition, instr->environment());
+ return;
+ }
+
+ // Check for (0 / -x) that will produce negative zero.
+ HDiv* hdiv = instr->hydrogen();
+ if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
+ __ test(dividend, dividend);
+ DeoptimizeIf(zero, instr->environment());
+ }
+
+ __ TruncatingDiv(dividend, Abs(divisor));
+ if (divisor < 0) __ neg(edx);
+
+ if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
+ __ mov(eax, edx);
+ __ imul(eax, eax, divisor);
+ __ sub(eax, dividend);
+ DeoptimizeIf(not_equal, instr->environment());
+ }
+}
+
+
+// TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI.
+void LCodeGen::DoDivI(LDivI* instr) {
+ HBinaryOperation* hdiv = instr->hydrogen();
+ Register dividend = ToRegister(instr->dividend());
+ Register divisor = ToRegister(instr->divisor());
+ Register remainder = ToRegister(instr->temp());
+ ASSERT(dividend.is(eax));
+ ASSERT(remainder.is(edx));
+ ASSERT(ToRegister(instr->result()).is(eax));
+ ASSERT(!divisor.is(eax));
+ ASSERT(!divisor.is(edx));
+
+ // Check for x / 0.
+ if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
+ __ test(divisor, divisor);
+ DeoptimizeIf(zero, instr->environment());
+ }
+
+ // Check for (0 / -x) that will produce negative zero.
+ if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ Label dividend_not_zero;
+ __ test(dividend, dividend);
+ __ j(not_zero, ÷nd_not_zero, Label::kNear);
+ __ test(divisor, divisor);
+ DeoptimizeIf(sign, instr->environment());
+ __ bind(÷nd_not_zero);
+ }
+
+ // Check for (kMinInt / -1).
+ if (hdiv->CheckFlag(HValue::kCanOverflow)) {
+ Label dividend_not_min_int;
+ __ cmp(dividend, kMinInt);
+ __ j(not_zero, ÷nd_not_min_int, Label::kNear);
+ __ cmp(divisor, -1);
+ DeoptimizeIf(zero, instr->environment());
+ __ bind(÷nd_not_min_int);
+ }
+
+ // Sign extend to edx (= remainder).
+ __ cdq();
+ __ idiv(divisor);
+
+ if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
+ // Deoptimize if remainder is not 0.
+ __ test(remainder, remainder);
+ DeoptimizeIf(not_zero, instr->environment());
+ }
+}
+
+
+void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
+ Register dividend = ToRegister(instr->dividend());
+ int32_t divisor = instr->divisor();
+ ASSERT(dividend.is(ToRegister(instr->result())));
+
+ // If the divisor is positive, things are easy: There can be no deopts and we
+ // can simply do an arithmetic right shift.
+ if (divisor == 1) return;
+ int32_t shift = WhichPowerOf2Abs(divisor);
+ if (divisor > 1) {
+ __ sar(dividend, shift);
+ return;
+ }
+
+ // If the divisor is negative, we have to negate and handle edge cases.
+ __ neg(dividend);
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ DeoptimizeIf(zero, instr->environment());
+ }
+
+ if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
+ __ sar(dividend, shift);
+ return;
+ }
+
+ // Dividing by -1 is basically negation, unless we overflow.
+ if (divisor == -1) {
+ DeoptimizeIf(overflow, instr->environment());
+ return;
+ }
+
+ Label not_kmin_int, done;
+ __ j(no_overflow, ¬_kmin_int, Label::kNear);
+ __ mov(dividend, Immediate(kMinInt / divisor));
+ __ jmp(&done, Label::kNear);
+ __ bind(¬_kmin_int);
+ __ sar(dividend, shift);
+ __ bind(&done);
+}
+
+
+void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
+ Register dividend = ToRegister(instr->dividend());
+ int32_t divisor = instr->divisor();
+ ASSERT(ToRegister(instr->result()).is(edx));
+
+ if (divisor == 0) {
+ DeoptimizeIf(no_condition, instr->environment());
+ return;
+ }
+
+ // Check for (0 / -x) that will produce negative zero.
+ HMathFloorOfDiv* hdiv = instr->hydrogen();
+ if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
+ __ test(dividend, dividend);
+ DeoptimizeIf(zero, instr->environment());
+ }
+
+ // Easy case: We need no dynamic check for the dividend and the flooring
+ // division is the same as the truncating division.
+ if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) ||
+ (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) {
+ __ TruncatingDiv(dividend, Abs(divisor));
+ if (divisor < 0) __ neg(edx);
+ return;
+ }
+
+ // In the general case we may need to adjust before and after the truncating
+ // division to get a flooring division.
+ Register temp = ToRegister(instr->temp3());
+ ASSERT(!temp.is(dividend) && !temp.is(eax) && !temp.is(edx));
+ Label needs_adjustment, done;
+ __ cmp(dividend, Immediate(0));
+ __ j(divisor > 0 ? less : greater, &needs_adjustment, Label::kNear);
+ __ TruncatingDiv(dividend, Abs(divisor));
+ if (divisor < 0) __ neg(edx);
+ __ jmp(&done, Label::kNear);
+ __ bind(&needs_adjustment);
+ __ lea(temp, Operand(dividend, divisor > 0 ? 1 : -1));
+ __ TruncatingDiv(temp, Abs(divisor));
+ if (divisor < 0) __ neg(edx);
+ __ dec(edx);
+ __ bind(&done);
+}
+
+
+// TODO(svenpanne) Refactor this to avoid code duplication with DoDivI.
+void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
+ HBinaryOperation* hdiv = instr->hydrogen();
+ Register dividend = ToRegister(instr->dividend());
+ Register divisor = ToRegister(instr->divisor());
+ Register remainder = ToRegister(instr->temp());
+ Register result = ToRegister(instr->result());
+ ASSERT(dividend.is(eax));
+ ASSERT(remainder.is(edx));
+ ASSERT(result.is(eax));
+ ASSERT(!divisor.is(eax));
+ ASSERT(!divisor.is(edx));
+
+ // Check for x / 0.
+ if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
+ __ test(divisor, divisor);
+ DeoptimizeIf(zero, instr->environment());
+ }
+
+ // Check for (0 / -x) that will produce negative zero.
+ if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ Label dividend_not_zero;
+ __ test(dividend, dividend);
+ __ j(not_zero, ÷nd_not_zero, Label::kNear);
+ __ test(divisor, divisor);
+ DeoptimizeIf(sign, instr->environment());
+ __ bind(÷nd_not_zero);
+ }
+
+ // Check for (kMinInt / -1).
+ if (hdiv->CheckFlag(HValue::kCanOverflow)) {
+ Label dividend_not_min_int;
+ __ cmp(dividend, kMinInt);
+ __ j(not_zero, ÷nd_not_min_int, Label::kNear);
+ __ cmp(divisor, -1);
+ DeoptimizeIf(zero, instr->environment());
+ __ bind(÷nd_not_min_int);
+ }
+
+ // Sign extend to edx (= remainder).
+ __ cdq();
+ __ idiv(divisor);
+
+ Label done;
+ __ test(remainder, remainder);
+ __ j(zero, &done, Label::kNear);
+ __ xor_(remainder, divisor);
+ __ sar(remainder, 31);
+ __ add(result, remainder);
+ __ bind(&done);
+}
+
+
+void LCodeGen::DoMulI(LMulI* instr) {
+ Register left = ToRegister(instr->left());
+ LOperand* right = instr->right();
+
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ __ mov(ToRegister(instr->temp()), left);
+ }
+
+ if (right->IsConstantOperand()) {
+ // Try strength reductions on the multiplication.
+ // All replacement instructions are at most as long as the imul
+ // and have better latency.
+ int constant = ToInteger32(LConstantOperand::cast(right));
+ if (constant == -1) {
+ __ neg(left);
+ } else if (constant == 0) {
+ __ xor_(left, Operand(left));
+ } else if (constant == 2) {
+ __ add(left, Operand(left));
+ } else if (!instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
+ // If we know that the multiplication can't overflow, it's safe to
+ // use instructions that don't set the overflow flag for the
+ // multiplication.
+ switch (constant) {
+ case 1:
+ // Do nothing.
+ break;
+ case 3:
+ __ lea(left, Operand(left, left, times_2, 0));
+ break;
+ case 4:
+ __ shl(left, 2);
+ break;
+ case 5:
+ __ lea(left, Operand(left, left, times_4, 0));
+ break;
+ case 8:
+ __ shl(left, 3);
+ break;
+ case 9:
+ __ lea(left, Operand(left, left, times_8, 0));
+ break;
+ case 16:
+ __ shl(left, 4);
+ break;
+ default:
+ __ imul(left, left, constant);
+ break;
+ }
+ } else {
+ __ imul(left, left, constant);
+ }
+ } else {
+ if (instr->hydrogen()->representation().IsSmi()) {
+ __ SmiUntag(left);
+ }
+ __ imul(left, ToOperand(right));
+ }
+
+ if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
+ DeoptimizeIf(overflow, instr->environment());
+ }
+
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ // Bail out if the result is supposed to be negative zero.
+ Label done;
+ __ test(left, Operand(left));
+ __ j(not_zero, &done, Label::kNear);
+ if (right->IsConstantOperand()) {
+ if (ToInteger32(LConstantOperand::cast(right)) < 0) {
+ DeoptimizeIf(no_condition, instr->environment());
+ } else if (ToInteger32(LConstantOperand::cast(right)) == 0) {
+ __ cmp(ToRegister(instr->temp()), Immediate(0));
+ DeoptimizeIf(less, instr->environment());
+ }
+ } else {
+ // Test the non-zero operand for negative sign.
+ __ or_(ToRegister(instr->temp()), ToOperand(right));
+ DeoptimizeIf(sign, instr->environment());
+ }
+ __ bind(&done);
+ }
+}
+
+
+void LCodeGen::DoBitI(LBitI* instr) {
+ LOperand* left = instr->left();
+ LOperand* right = instr->right();
+ ASSERT(left->Equals(instr->result()));
+ ASSERT(left->IsRegister());
+
+ if (right->IsConstantOperand()) {
+ int32_t right_operand =
+ ToRepresentation(LConstantOperand::cast(right),
+ instr->hydrogen()->representation());
+ switch (instr->op()) {
+ case Token::BIT_AND:
+ __ and_(ToRegister(left), right_operand);
+ break;
+ case Token::BIT_OR:
+ __ or_(ToRegister(left), right_operand);
+ break;
+ case Token::BIT_XOR:
+ if (right_operand == int32_t(~0)) {
+ __ not_(ToRegister(left));
+ } else {
+ __ xor_(ToRegister(left), right_operand);
+ }
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ } else {
+ switch (instr->op()) {
+ case Token::BIT_AND:
+ __ and_(ToRegister(left), ToOperand(right));
+ break;
+ case Token::BIT_OR:
+ __ or_(ToRegister(left), ToOperand(right));
+ break;
+ case Token::BIT_XOR:
+ __ xor_(ToRegister(left), ToOperand(right));
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ }
+}
+
+
+void LCodeGen::DoShiftI(LShiftI* instr) {
+ LOperand* left = instr->left();
+ LOperand* right = instr->right();
+ ASSERT(left->Equals(instr->result()));
+ ASSERT(left->IsRegister());
+ if (right->IsRegister()) {
+ ASSERT(ToRegister(right).is(ecx));
+
+ switch (instr->op()) {
+ case Token::ROR:
+ __ ror_cl(ToRegister(left));
+ if (instr->can_deopt()) {
+ __ test(ToRegister(left), ToRegister(left));
+ DeoptimizeIf(sign, instr->environment());
+ }
+ break;
+ case Token::SAR:
+ __ sar_cl(ToRegister(left));
+ break;
+ case Token::SHR:
+ __ shr_cl(ToRegister(left));
+ if (instr->can_deopt()) {
+ __ test(ToRegister(left), ToRegister(left));
+ DeoptimizeIf(sign, instr->environment());
+ }
+ break;
+ case Token::SHL:
+ __ shl_cl(ToRegister(left));
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ } else {
+ int value = ToInteger32(LConstantOperand::cast(right));
+ uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
+ switch (instr->op()) {
+ case Token::ROR:
+ if (shift_count == 0 && instr->can_deopt()) {
+ __ test(ToRegister(left), ToRegister(left));
+ DeoptimizeIf(sign, instr->environment());
+ } else {
+ __ ror(ToRegister(left), shift_count);
+ }
+ break;
+ case Token::SAR:
+ if (shift_count != 0) {
+ __ sar(ToRegister(left), shift_count);
+ }
+ break;
+ case Token::SHR:
+ if (shift_count != 0) {
+ __ shr(ToRegister(left), shift_count);
+ } else if (instr->can_deopt()) {
+ __ test(ToRegister(left), ToRegister(left));
+ DeoptimizeIf(sign, instr->environment());
+ }
+ break;
+ case Token::SHL:
+ if (shift_count != 0) {
+ if (instr->hydrogen_value()->representation().IsSmi() &&
+ instr->can_deopt()) {
+ if (shift_count != 1) {
+ __ shl(ToRegister(left), shift_count - 1);
+ }
+ __ SmiTag(ToRegister(left));
+ DeoptimizeIf(overflow, instr->environment());
+ } else {
+ __ shl(ToRegister(left), shift_count);
+ }
+ }
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ }
+}
+
+
+void LCodeGen::DoSubI(LSubI* instr) {
+ LOperand* left = instr->left();
+ LOperand* right = instr->right();
+ ASSERT(left->Equals(instr->result()));
+
+ if (right->IsConstantOperand()) {
+ __ sub(ToOperand(left),
+ ToImmediate(right, instr->hydrogen()->representation()));
+ } else {
+ __ sub(ToRegister(left), ToOperand(right));
+ }
+ if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
+ DeoptimizeIf(overflow, instr->environment());
+ }
+}
+
+
+void LCodeGen::DoConstantI(LConstantI* instr) {
+ __ Move(ToRegister(instr->result()), Immediate(instr->value()));
+}
+
+
+void LCodeGen::DoConstantS(LConstantS* instr) {
+ __ Move(ToRegister(instr->result()), Immediate(instr->value()));
+}
+
+
+void LCodeGen::DoConstantD(LConstantD* instr) {
+ double v = instr->value();
+ uint64_t int_val = BitCast<uint64_t, double>(v);
+ int32_t lower = static_cast<int32_t>(int_val);
+ int32_t upper = static_cast<int32_t>(int_val >> (kBitsPerInt));
+ ASSERT(instr->result()->IsDoubleRegister());
+
+ __ push(Immediate(upper));
+ __ push(Immediate(lower));
+ X87Register reg = ToX87Register(instr->result());
+ X87Mov(reg, Operand(esp, 0));
+ __ add(Operand(esp), Immediate(kDoubleSize));
+}
+
+
+void LCodeGen::DoConstantE(LConstantE* instr) {
+ __ lea(ToRegister(instr->result()), Operand::StaticVariable(instr->value()));
+}
+
+
+void LCodeGen::DoConstantT(LConstantT* instr) {
+ Register reg = ToRegister(instr->result());
+ Handle<Object> object = instr->value(isolate());
+ AllowDeferredHandleDereference smi_check;
+ __ LoadObject(reg, object);
+}
+
+
+void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
+ Register result = ToRegister(instr->result());
+ Register map = ToRegister(instr->value());
+ __ EnumLength(result, map);
+}
+
+
+void LCodeGen::DoDateField(LDateField* instr) {
+ Register object = ToRegister(instr->date());
+ Register result = ToRegister(instr->result());
+ Register scratch = ToRegister(instr->temp());
+ Smi* index = instr->index();
+ Label runtime, done;
+ ASSERT(object.is(result));
+ ASSERT(object.is(eax));
+
+ __ test(object, Immediate(kSmiTagMask));
+ DeoptimizeIf(zero, instr->environment());
+ __ CmpObjectType(object, JS_DATE_TYPE, scratch);
+ DeoptimizeIf(not_equal, instr->environment());
+
+ if (index->value() == 0) {
+ __ mov(result, FieldOperand(object, JSDate::kValueOffset));
+ } else {
+ if (index->value() < JSDate::kFirstUncachedField) {
+ ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
+ __ mov(scratch, Operand::StaticVariable(stamp));
+ __ cmp(scratch, FieldOperand(object, JSDate::kCacheStampOffset));
+ __ j(not_equal, &runtime, Label::kNear);
+ __ mov(result, FieldOperand(object, JSDate::kValueOffset +
+ kPointerSize * index->value()));
+ __ jmp(&done, Label::kNear);
+ }
+ __ bind(&runtime);
+ __ PrepareCallCFunction(2, scratch);
+ __ mov(Operand(esp, 0), object);
+ __ mov(Operand(esp, 1 * kPointerSize), Immediate(index));
+ __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
+ __ bind(&done);
+ }
+}
+
+
+Operand LCodeGen::BuildSeqStringOperand(Register string,
+ LOperand* index,
+ String::Encoding encoding) {
+ if (index->IsConstantOperand()) {
+ int offset = ToRepresentation(LConstantOperand::cast(index),
+ Representation::Integer32());
+ if (encoding == String::TWO_BYTE_ENCODING) {
+ offset *= kUC16Size;
+ }
+ STATIC_ASSERT(kCharSize == 1);
+ return FieldOperand(string, SeqString::kHeaderSize + offset);
+ }
+ return FieldOperand(
+ string, ToRegister(index),
+ encoding == String::ONE_BYTE_ENCODING ? times_1 : times_2,
+ SeqString::kHeaderSize);
+}
+
+
+void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) {
+ String::Encoding encoding = instr->hydrogen()->encoding();
+ Register result = ToRegister(instr->result());
+ Register string = ToRegister(instr->string());
+
+ if (FLAG_debug_code) {
+ __ push(string);
+ __ mov(string, FieldOperand(string, HeapObject::kMapOffset));
+ __ movzx_b(string, FieldOperand(string, Map::kInstanceTypeOffset));
+
+ __ and_(string, Immediate(kStringRepresentationMask | kStringEncodingMask));
+ static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
+ static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
+ __ cmp(string, Immediate(encoding == String::ONE_BYTE_ENCODING
+ ? one_byte_seq_type : two_byte_seq_type));
+ __ Check(equal, kUnexpectedStringType);
+ __ pop(string);
+ }
+
+ Operand operand = BuildSeqStringOperand(string, instr->index(), encoding);
+ if (encoding == String::ONE_BYTE_ENCODING) {
+ __ movzx_b(result, operand);
+ } else {
+ __ movzx_w(result, operand);
+ }
+}
+
+
+void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
+ String::Encoding encoding = instr->hydrogen()->encoding();
+ Register string = ToRegister(instr->string());
+
+ if (FLAG_debug_code) {
+ Register value = ToRegister(instr->value());
+ Register index = ToRegister(instr->index());
+ static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
+ static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
+ int encoding_mask =
+ instr->hydrogen()->encoding() == String::ONE_BYTE_ENCODING
+ ? one_byte_seq_type : two_byte_seq_type;
+ __ EmitSeqStringSetCharCheck(string, index, value, encoding_mask);
+ }
+
+ Operand operand = BuildSeqStringOperand(string, instr->index(), encoding);
+ if (instr->value()->IsConstantOperand()) {
+ int value = ToRepresentation(LConstantOperand::cast(instr->value()),
+ Representation::Integer32());
+ ASSERT_LE(0, value);
+ if (encoding == String::ONE_BYTE_ENCODING) {
+ ASSERT_LE(value, String::kMaxOneByteCharCode);
+ __ mov_b(operand, static_cast<int8_t>(value));
+ } else {
+ ASSERT_LE(value, String::kMaxUtf16CodeUnit);
+ __ mov_w(operand, static_cast<int16_t>(value));
+ }
+ } else {
+ Register value = ToRegister(instr->value());
+ if (encoding == String::ONE_BYTE_ENCODING) {
+ __ mov_b(operand, value);
+ } else {
+ __ mov_w(operand, value);
+ }
+ }
+}
+
+
+void LCodeGen::DoAddI(LAddI* instr) {
+ LOperand* left = instr->left();
+ LOperand* right = instr->right();
+
+ if (LAddI::UseLea(instr->hydrogen()) && !left->Equals(instr->result())) {
+ if (right->IsConstantOperand()) {
+ int32_t offset = ToRepresentation(LConstantOperand::cast(right),
+ instr->hydrogen()->representation());
+ __ lea(ToRegister(instr->result()), MemOperand(ToRegister(left), offset));
+ } else {
+ Operand address(ToRegister(left), ToRegister(right), times_1, 0);
+ __ lea(ToRegister(instr->result()), address);
+ }
+ } else {
+ if (right->IsConstantOperand()) {
+ __ add(ToOperand(left),
+ ToImmediate(right, instr->hydrogen()->representation()));
+ } else {
+ __ add(ToRegister(left), ToOperand(right));
+ }
+ if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
+ DeoptimizeIf(overflow, instr->environment());
+ }
+ }
+}
+
+
+void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
+ LOperand* left = instr->left();
+ LOperand* right = instr->right();
+ ASSERT(left->Equals(instr->result()));
+ HMathMinMax::Operation operation = instr->hydrogen()->operation();
+ if (instr->hydrogen()->representation().IsSmiOrInteger32()) {
+ Label return_left;
+ Condition condition = (operation == HMathMinMax::kMathMin)
+ ? less_equal
+ : greater_equal;
+ if (right->IsConstantOperand()) {
+ Operand left_op = ToOperand(left);
+ Immediate immediate = ToImmediate(LConstantOperand::cast(instr->right()),
+ instr->hydrogen()->representation());
+ __ cmp(left_op, immediate);
+ __ j(condition, &return_left, Label::kNear);
+ __ mov(left_op, immediate);
+ } else {
+ Register left_reg = ToRegister(left);
+ Operand right_op = ToOperand(right);
+ __ cmp(left_reg, right_op);
+ __ j(condition, &return_left, Label::kNear);
+ __ mov(left_reg, right_op);
+ }
+ __ bind(&return_left);
+ } else {
+ // TODO(weiliang) use X87 for double representation.
+ UNIMPLEMENTED();
+ }
+}
+
+
+void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
+ X87Register left = ToX87Register(instr->left());
+ X87Register right = ToX87Register(instr->right());
+ X87Register result = ToX87Register(instr->result());
+ if (instr->op() != Token::MOD) {
+ X87PrepareBinaryOp(left, right, result);
+ }
+ switch (instr->op()) {
+ case Token::ADD:
+ __ fadd_i(1);
+ break;
+ case Token::SUB:
+ __ fsub_i(1);
+ break;
+ case Token::MUL:
+ __ fmul_i(1);
+ break;
+ case Token::DIV:
+ __ fdiv_i(1);
+ break;
+ case Token::MOD: {
+ // Pass two doubles as arguments on the stack.
+ __ PrepareCallCFunction(4, eax);
+ X87Mov(Operand(esp, 1 * kDoubleSize), right);
+ X87Mov(Operand(esp, 0), left);
+ X87Free(right);
+ ASSERT(left.is(result));
+ X87PrepareToWrite(result);
+ __ CallCFunction(
+ ExternalReference::mod_two_doubles_operation(isolate()),
+ 4);
+
+ // Return value is in st(0) on ia32.
+ X87CommitWrite(result);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ break;
+ }
+}
+
+
+void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
+ ASSERT(ToRegister(instr->context()).is(esi));
+ ASSERT(ToRegister(instr->left()).is(edx));
+ ASSERT(ToRegister(instr->right()).is(eax));
+ ASSERT(ToRegister(instr->result()).is(eax));
+
+ BinaryOpICStub stub(isolate(), instr->op(), NO_OVERWRITE);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+}
+
+
+template<class InstrType>
+void LCodeGen::EmitBranch(InstrType instr, Condition cc) {
+ int left_block = instr->TrueDestination(chunk_);
+ int right_block = instr->FalseDestination(chunk_);
+
+ int next_block = GetNextEmittedBlock();
+
+ if (right_block == left_block || cc == no_condition) {
+ EmitGoto(left_block);
+ } else if (left_block == next_block) {
+ __ j(NegateCondition(cc), chunk_->GetAssemblyLabel(right_block));
+ } else if (right_block == next_block) {
+ __ j(cc, chunk_->GetAssemblyLabel(left_block));
+ } else {
+ __ j(cc, chunk_->GetAssemblyLabel(left_block));
+ __ jmp(chunk_->GetAssemblyLabel(right_block));
+ }
+}
+
+
+template<class InstrType>
+void LCodeGen::EmitFalseBranch(InstrType instr, Condition cc) {
+ int false_block = instr->FalseDestination(chunk_);
+ if (cc == no_condition) {
+ __ jmp(chunk_->GetAssemblyLabel(false_block));
+ } else {
+ __ j(cc, chunk_->GetAssemblyLabel(false_block));
+ }
+}
+
+
+void LCodeGen::DoBranch(LBranch* instr) {
+ Representation r = instr->hydrogen()->value()->representation();
+ if (r.IsSmiOrInteger32()) {
+ Register reg = ToRegister(instr->value());
+ __ test(reg, Operand(reg));
+ EmitBranch(instr, not_zero);
+ } else if (r.IsDouble()) {
+ UNREACHABLE();
+ } else {
+ ASSERT(r.IsTagged());
+ Register reg = ToRegister(instr->value());
+ HType type = instr->hydrogen()->value()->type();
+ if (type.IsBoolean()) {
+ ASSERT(!info()->IsStub());
+ __ cmp(reg, factory()->true_value());
+ EmitBranch(instr, equal);
+ } else if (type.IsSmi()) {
+ ASSERT(!info()->IsStub());
+ __ test(reg, Operand(reg));
+ EmitBranch(instr, not_equal);
+ } else if (type.IsJSArray()) {
+ ASSERT(!info()->IsStub());
+ EmitBranch(instr, no_condition);
+ } else if (type.IsHeapNumber()) {
+ UNREACHABLE();
+ } else if (type.IsString()) {
+ ASSERT(!info()->IsStub());
+ __ cmp(FieldOperand(reg, String::kLengthOffset), Immediate(0));
+ EmitBranch(instr, not_equal);
+ } else {
+ ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
+ if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
+
+ if (expected.Contains(ToBooleanStub::UNDEFINED)) {
+ // undefined -> false.
+ __ cmp(reg, factory()->undefined_value());
+ __ j(equal, instr->FalseLabel(chunk_));
+ }
+ if (expected.Contains(ToBooleanStub::BOOLEAN)) {
+ // true -> true.
+ __ cmp(reg, factory()->true_value());
+ __ j(equal, instr->TrueLabel(chunk_));
+ // false -> false.
+ __ cmp(reg, factory()->false_value());
+ __ j(equal, instr->FalseLabel(chunk_));
+ }
+ if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
+ // 'null' -> false.
+ __ cmp(reg, factory()->null_value());
+ __ j(equal, instr->FalseLabel(chunk_));
+ }
+
+ if (expected.Contains(ToBooleanStub::SMI)) {
+ // Smis: 0 -> false, all other -> true.
+ __ test(reg, Operand(reg));
+ __ j(equal, instr->FalseLabel(chunk_));
+ __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
+ } else if (expected.NeedsMap()) {
+ // If we need a map later and have a Smi -> deopt.
+ __ test(reg, Immediate(kSmiTagMask));
+ DeoptimizeIf(zero, instr->environment());
+ }
+
+ Register map = no_reg; // Keep the compiler happy.
+ if (expected.NeedsMap()) {
+ map = ToRegister(instr->temp());
+ ASSERT(!map.is(reg));
+ __ mov(map, FieldOperand(reg, HeapObject::kMapOffset));
+
+ if (expected.CanBeUndetectable()) {
+ // Undetectable -> false.
+ __ test_b(FieldOperand(map, Map::kBitFieldOffset),
+ 1 << Map::kIsUndetectable);
+ __ j(not_zero, instr->FalseLabel(chunk_));
+ }
+ }
+
+ if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
+ // spec object -> true.
+ __ CmpInstanceType(map, FIRST_SPEC_OBJECT_TYPE);
+ __ j(above_equal, instr->TrueLabel(chunk_));
+ }
+
+ if (expected.Contains(ToBooleanStub::STRING)) {
+ // String value -> false iff empty.
+ Label not_string;
+ __ CmpInstanceType(map, FIRST_NONSTRING_TYPE);
+ __ j(above_equal, ¬_string, Label::kNear);
+ __ cmp(FieldOperand(reg, String::kLengthOffset), Immediate(0));
+ __ j(not_zero, instr->TrueLabel(chunk_));
+ __ jmp(instr->FalseLabel(chunk_));
+ __ bind(¬_string);
+ }
+
+ if (expected.Contains(ToBooleanStub::SYMBOL)) {
+ // Symbol value -> true.
+ __ CmpInstanceType(map, SYMBOL_TYPE);
+ __ j(equal, instr->TrueLabel(chunk_));
+ }
+
+ if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
+ // heap number -> false iff +0, -0, or NaN.
+ Label not_heap_number;
+ __ cmp(FieldOperand(reg, HeapObject::kMapOffset),
+ factory()->heap_number_map());
+ __ j(not_equal, ¬_heap_number, Label::kNear);
+ __ fldz();
+ __ fld_d(FieldOperand(reg, HeapNumber::kValueOffset));
+ __ FCmp();
+ __ j(zero, instr->FalseLabel(chunk_));
+ __ jmp(instr->TrueLabel(chunk_));
+ __ bind(¬_heap_number);
+ }
+
+ if (!expected.IsGeneric()) {
+ // We've seen something for the first time -> deopt.
+ // This can only happen if we are not generic already.
+ DeoptimizeIf(no_condition, instr->environment());
+ }
+ }
+ }
+}
+
+
+void LCodeGen::EmitGoto(int block) {
+ if (!IsNextEmittedBlock(block)) {
+ __ jmp(chunk_->GetAssemblyLabel(LookupDestination(block)));
+ }
+}
+
+
+void LCodeGen::DoClobberDoubles(LClobberDoubles* instr) {
+}
+
+
+void LCodeGen::DoGoto(LGoto* instr) {
+ EmitGoto(instr->block_id());
+}
+
+
+Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
+ Condition cond = no_condition;
+ switch (op) {
+ case Token::EQ:
+ case Token::EQ_STRICT:
+ cond = equal;
+ break;
+ case Token::NE:
+ case Token::NE_STRICT:
+ cond = not_equal;
+ break;
+ case Token::LT:
+ cond = is_unsigned ? below : less;
+ break;
+ case Token::GT:
+ cond = is_unsigned ? above : greater;
+ break;
+ case Token::LTE:
+ cond = is_unsigned ? below_equal : less_equal;
+ break;
+ case Token::GTE:
+ cond = is_unsigned ? above_equal : greater_equal;
+ break;
+ case Token::IN:
+ case Token::INSTANCEOF:
+ default:
+ UNREACHABLE();
+ }
+ return cond;
+}
+
+
+void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
+ LOperand* left = instr->left();
+ LOperand* right = instr->right();
+ bool is_unsigned =
+ instr->is_double() || instr->hydrogen()->CheckFlag(HInstruction::kUint32);
+ Condition cc = TokenToCondition(instr->op(), is_unsigned);
+
+ if (left->IsConstantOperand() && right->IsConstantOperand()) {
+ // We can statically evaluate the comparison.
+ double left_val = ToDouble(LConstantOperand::cast(left));
+ double right_val = ToDouble(LConstantOperand::cast(right));
+ int next_block = EvalComparison(instr->op(), left_val, right_val) ?
+ instr->TrueDestination(chunk_) : instr->FalseDestination(chunk_);
+ EmitGoto(next_block);
+ } else {
+ if (instr->is_double()) {
+ X87LoadForUsage(ToX87Register(right), ToX87Register(left));
+ __ FCmp();
+ // Don't base result on EFLAGS when a NaN is involved. Instead
+ // jump to the false block.
+ __ j(parity_even, instr->FalseLabel(chunk_));
+ } else {
+ if (right->IsConstantOperand()) {
+ __ cmp(ToOperand(left),
+ ToImmediate(right, instr->hydrogen()->representation()));
+ } else if (left->IsConstantOperand()) {
+ __ cmp(ToOperand(right),
+ ToImmediate(left, instr->hydrogen()->representation()));
+ // We transposed the operands. Reverse the condition.
+ cc = ReverseCondition(cc);
+ } else {
+ __ cmp(ToRegister(left), ToOperand(right));
+ }
+ }
+ EmitBranch(instr, cc);
+ }
+}
+
+
+void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
+ Register left = ToRegister(instr->left());
+
+ if (instr->right()->IsConstantOperand()) {
+ Handle<Object> right = ToHandle(LConstantOperand::cast(instr->right()));
+ __ CmpObject(left, right);
+ } else {
+ Operand right = ToOperand(instr->right());
+ __ cmp(left, right);
+ }
+ EmitBranch(instr, equal);
+}
+
+
+void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
+ if (instr->hydrogen()->representation().IsTagged()) {
+ Register input_reg = ToRegister(instr->object());
+ __ cmp(input_reg, factory()->the_hole_value());
+ EmitBranch(instr, equal);
+ return;
+ }
+
+ // Put the value to the top of stack
+ X87Register src = ToX87Register(instr->object());
+ X87LoadForUsage(src);
+ __ fld(0);
+ __ fld(0);
+ __ FCmp();
+ Label ok;
+ __ j(parity_even, &ok, Label::kNear);
+ __ fstp(0);
+ EmitFalseBranch(instr, no_condition);
+ __ bind(&ok);
+
+
+ __ sub(esp, Immediate(kDoubleSize));
+ __ fstp_d(MemOperand(esp, 0));
+
+ __ add(esp, Immediate(kDoubleSize));
+ int offset = sizeof(kHoleNanUpper32);
+ __ cmp(MemOperand(esp, -offset), Immediate(kHoleNanUpper32));
+ EmitBranch(instr, equal);
+}
+
+
+void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) {
+ Representation rep = instr->hydrogen()->value()->representation();
+ ASSERT(!rep.IsInteger32());
+
+ if (rep.IsDouble()) {
+ UNREACHABLE();
+ } else {
+ Register value = ToRegister(instr->value());
+ Handle<Map> map = masm()->isolate()->factory()->heap_number_map();
+ __ CheckMap(value, map, instr->FalseLabel(chunk()), DO_SMI_CHECK);
+ __ cmp(FieldOperand(value, HeapNumber::kExponentOffset),
+ Immediate(0x1));
+ EmitFalseBranch(instr, no_overflow);
+ __ cmp(FieldOperand(value, HeapNumber::kMantissaOffset),
+ Immediate(0x00000000));
+ EmitBranch(instr, equal);
+ }
+}
+
+
+Condition LCodeGen::EmitIsObject(Register input,
+ Register temp1,
+ Label* is_not_object,
+ Label* is_object) {
+ __ JumpIfSmi(input, is_not_object);
+
+ __ cmp(input, isolate()->factory()->null_value());
+ __ j(equal, is_object);
+
+ __ mov(temp1, FieldOperand(input, HeapObject::kMapOffset));
+ // Undetectable objects behave like undefined.
+ __ test_b(FieldOperand(temp1, Map::kBitFieldOffset),
+ 1 << Map::kIsUndetectable);
+ __ j(not_zero, is_not_object);
+
+ __ movzx_b(temp1, FieldOperand(temp1, Map::kInstanceTypeOffset));
+ __ cmp(temp1, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE);
+ __ j(below, is_not_object);
+ __ cmp(temp1, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
+ return below_equal;
+}
+
+
+void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
+ Register reg = ToRegister(instr->value());
+ Register temp = ToRegister(instr->temp());
+
+ Condition true_cond = EmitIsObject(
+ reg, temp, instr->FalseLabel(chunk_), instr->TrueLabel(chunk_));
+
+ EmitBranch(instr, true_cond);
+}
+
+
+Condition LCodeGen::EmitIsString(Register input,
+ Register temp1,
+ Label* is_not_string,
+ SmiCheck check_needed = INLINE_SMI_CHECK) {
+ if (check_needed == INLINE_SMI_CHECK) {
+ __ JumpIfSmi(input, is_not_string);
+ }
+
+ Condition cond = masm_->IsObjectStringType(input, temp1, temp1);
+
+ return cond;
+}
+
+
+void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
+ Register reg = ToRegister(instr->value());
+ Register temp = ToRegister(instr->temp());
+
+ SmiCheck check_needed =
+ instr->hydrogen()->value()->IsHeapObject()
+ ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
+
+ Condition true_cond = EmitIsString(
+ reg, temp, instr->FalseLabel(chunk_), check_needed);
+
+ EmitBranch(instr, true_cond);
+}
+
+
+void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
+ Operand input = ToOperand(instr->value());
+
+ __ test(input, Immediate(kSmiTagMask));
+ EmitBranch(instr, zero);
+}
+
+
+void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
+ Register input = ToRegister(instr->value());
+ Register temp = ToRegister(instr->temp());
+
+ if (!instr->hydrogen()->value()->IsHeapObject()) {
+ STATIC_ASSERT(kSmiTag == 0);
+ __ JumpIfSmi(input, instr->FalseLabel(chunk_));
+ }
+ __ mov(temp, FieldOperand(input, HeapObject::kMapOffset));
+ __ test_b(FieldOperand(temp, Map::kBitFieldOffset),
+ 1 << Map::kIsUndetectable);
+ EmitBranch(instr, not_zero);
+}
+
+
+static Condition ComputeCompareCondition(Token::Value op) {
+ switch (op) {
+ case Token::EQ_STRICT:
+ case Token::EQ:
+ return equal;
+ case Token::LT:
+ return less;
+ case Token::GT:
+ return greater;
+ case Token::LTE:
+ return less_equal;
+ case Token::GTE:
+ return greater_equal;
+ default:
+ UNREACHABLE();
+ return no_condition;
+ }
+}
+
+
+void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
+ Token::Value op = instr->op();
+
+ Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
+
+ Condition condition = ComputeCompareCondition(op);
+ __ test(eax, Operand(eax));
+
+ EmitBranch(instr, condition);
+}
+
+
+static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
+ InstanceType from = instr->from();
+ InstanceType to = instr->to();
+ if (from == FIRST_TYPE) return to;
+ ASSERT(from == to || to == LAST_TYPE);
+ return from;
+}
+
+
+static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
+ InstanceType from = instr->from();
+ InstanceType to = instr->to();
+ if (from == to) return equal;
+ if (to == LAST_TYPE) return above_equal;
+ if (from == FIRST_TYPE) return below_equal;
+ UNREACHABLE();
+ return equal;
+}
+
+
+void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
+ Register input = ToRegister(instr->value());
+ Register temp = ToRegister(instr->temp());
+
+ if (!instr->hydrogen()->value()->IsHeapObject()) {
+ __ JumpIfSmi(input, instr->FalseLabel(chunk_));
+ }
+
+ __ CmpObjectType(input, TestType(instr->hydrogen()), temp);
+ EmitBranch(instr, BranchCondition(instr->hydrogen()));
+}
+
+
+void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
+ Register input = ToRegister(instr->value());
+ Register result = ToRegister(instr->result());
+
+ __ AssertString(input);
+
+ __ mov(result, FieldOperand(input, String::kHashFieldOffset));
+ __ IndexFromHash(result, result);
+}
+
+
+void LCodeGen::DoHasCachedArrayIndexAndBranch(
+ LHasCachedArrayIndexAndBranch* instr) {
+ Register input = ToRegister(instr->value());
+
+ __ test(FieldOperand(input, String::kHashFieldOffset),
+ Immediate(String::kContainsCachedArrayIndexMask));
+ EmitBranch(instr, equal);
+}
+
+
+// Branches to a label or falls through with the answer in the z flag. Trashes
+// the temp registers, but not the input.
+void LCodeGen::EmitClassOfTest(Label* is_true,
+ Label* is_false,
+ Handle<String>class_name,
+ Register input,
+ Register temp,
+ Register temp2) {
+ ASSERT(!input.is(temp));
+ ASSERT(!input.is(temp2));
+ ASSERT(!temp.is(temp2));
+ __ JumpIfSmi(input, is_false);
+
+ if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Function"))) {
+ // Assuming the following assertions, we can use the same compares to test
+ // for both being a function type and being in the object type range.
+ STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
+ STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
+ FIRST_SPEC_OBJECT_TYPE + 1);
+ STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
+ LAST_SPEC_OBJECT_TYPE - 1);
+ STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
+ __ CmpObjectType(input, FIRST_SPEC_OBJECT_TYPE, temp);
+ __ j(below, is_false);
+ __ j(equal, is_true);
+ __ CmpInstanceType(temp, LAST_SPEC_OBJECT_TYPE);
+ __ j(equal, is_true);
+ } else {
+ // Faster code path to avoid two compares: subtract lower bound from the
+ // actual type and do a signed compare with the width of the type range.
+ __ mov(temp, FieldOperand(input, HeapObject::kMapOffset));
+ __ movzx_b(temp2, FieldOperand(temp, Map::kInstanceTypeOffset));
+ __ sub(Operand(temp2), Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
+ __ cmp(Operand(temp2), Immediate(LAST_NONCALLABLE_SPEC_OBJECT_TYPE -
+ FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
+ __ j(above, is_false);
+ }
+
+ // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
+ // Check if the constructor in the map is a function.
+ __ mov(temp, FieldOperand(temp, Map::kConstructorOffset));
+ // Objects with a non-function constructor have class 'Object'.
+ __ CmpObjectType(temp, JS_FUNCTION_TYPE, temp2);
+ if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Object"))) {
+ __ j(not_equal, is_true);
+ } else {
+ __ j(not_equal, is_false);
+ }
+
+ // temp now contains the constructor function. Grab the
+ // instance class name from there.
+ __ mov(temp, FieldOperand(temp, JSFunction::kSharedFunctionInfoOffset));
+ __ mov(temp, FieldOperand(temp,
+ SharedFunctionInfo::kInstanceClassNameOffset));
+ // The class name we are testing against is internalized since it's a literal.
+ // The name in the constructor is internalized because of the way the context
+ // is booted. This routine isn't expected to work for random API-created
+ // classes and it doesn't have to because you can't access it with natives
+ // syntax. Since both sides are internalized it is sufficient to use an
+ // identity comparison.
+ __ cmp(temp, class_name);
+ // End with the answer in the z flag.
+}
+
+
+void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
+ Register input = ToRegister(instr->value());
+ Register temp = ToRegister(instr->temp());
+ Register temp2 = ToRegister(instr->temp2());
+
+ Handle<String> class_name = instr->hydrogen()->class_name();
+
+ EmitClassOfTest(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
+ class_name, input, temp, temp2);
+
+ EmitBranch(instr, equal);
+}
+
+
+void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
+ Register reg = ToRegister(instr->value());
+ __ cmp(FieldOperand(reg, HeapObject::kMapOffset), instr->map());
+ EmitBranch(instr, equal);
+}
+
+
+void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
+ // Object and function are in fixed registers defined by the stub.
+ ASSERT(ToRegister(instr->context()).is(esi));
+ InstanceofStub stub(isolate(), InstanceofStub::kArgsInRegisters);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+
+ Label true_value, done;
+ __ test(eax, Operand(eax));
+ __ j(zero, &true_value, Label::kNear);
+ __ mov(ToRegister(instr->result()), factory()->false_value());
+ __ jmp(&done, Label::kNear);
+ __ bind(&true_value);
+ __ mov(ToRegister(instr->result()), factory()->true_value());
+ __ bind(&done);
+}
+
+
+void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
+ class DeferredInstanceOfKnownGlobal V8_FINAL : public LDeferredCode {
+ public:
+ DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
+ LInstanceOfKnownGlobal* instr,
+ const X87Stack& x87_stack)
+ : LDeferredCode(codegen, x87_stack), instr_(instr) { }
+ virtual void Generate() V8_OVERRIDE {
+ codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_);
+ }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ Label* map_check() { return &map_check_; }
+ private:
+ LInstanceOfKnownGlobal* instr_;
+ Label map_check_;
+ };
+
+ DeferredInstanceOfKnownGlobal* deferred;
+ deferred = new(zone()) DeferredInstanceOfKnownGlobal(this, instr, x87_stack_);
+
+ Label done, false_result;
+ Register object = ToRegister(instr->value());
+ Register temp = ToRegister(instr->temp());
+
+ // A Smi is not an instance of anything.
+ __ JumpIfSmi(object, &false_result, Label::kNear);
+
+ // This is the inlined call site instanceof cache. The two occurences of the
+ // hole value will be patched to the last map/result pair generated by the
+ // instanceof stub.
+ Label cache_miss;
+ Register map = ToRegister(instr->temp());
+ __ mov(map, FieldOperand(object, HeapObject::kMapOffset));
+ __ bind(deferred->map_check()); // Label for calculating code patching.
+ Handle<Cell> cache_cell = factory()->NewCell(factory()->the_hole_value());
+ __ cmp(map, Operand::ForCell(cache_cell)); // Patched to cached map.
+ __ j(not_equal, &cache_miss, Label::kNear);
+ __ mov(eax, factory()->the_hole_value()); // Patched to either true or false.
+ __ jmp(&done, Label::kNear);
+
+ // The inlined call site cache did not match. Check for null and string
+ // before calling the deferred code.
+ __ bind(&cache_miss);
+ // Null is not an instance of anything.
+ __ cmp(object, factory()->null_value());
+ __ j(equal, &false_result, Label::kNear);
+
+ // String values are not instances of anything.
+ Condition is_string = masm_->IsObjectStringType(object, temp, temp);
+ __ j(is_string, &false_result, Label::kNear);
+
+ // Go to the deferred code.
+ __ jmp(deferred->entry());
+
+ __ bind(&false_result);
+ __ mov(ToRegister(instr->result()), factory()->false_value());
+
+ // Here result has either true or false. Deferred code also produces true or
+ // false object.
+ __ bind(deferred->exit());
+ __ bind(&done);
+}
+
+
+void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
+ Label* map_check) {
+ PushSafepointRegistersScope scope(this);
+
+ InstanceofStub::Flags flags = InstanceofStub::kNoFlags;
+ flags = static_cast<InstanceofStub::Flags>(
+ flags | InstanceofStub::kArgsInRegisters);
+ flags = static_cast<InstanceofStub::Flags>(
+ flags | InstanceofStub::kCallSiteInlineCheck);
+ flags = static_cast<InstanceofStub::Flags>(
+ flags | InstanceofStub::kReturnTrueFalseObject);
+ InstanceofStub stub(isolate(), flags);
+
+ // Get the temp register reserved by the instruction. This needs to be a
+ // register which is pushed last by PushSafepointRegisters as top of the
+ // stack is used to pass the offset to the location of the map check to
+ // the stub.
+ Register temp = ToRegister(instr->temp());
+ ASSERT(MacroAssembler::SafepointRegisterStackIndex(temp) == 0);
+ __ LoadHeapObject(InstanceofStub::right(), instr->function());
+ static const int kAdditionalDelta = 13;
+ int delta = masm_->SizeOfCodeGeneratedSince(map_check) + kAdditionalDelta;
+ __ mov(temp, Immediate(delta));
+ __ StoreToSafepointRegisterSlot(temp, temp);
+ CallCodeGeneric(stub.GetCode(),
+ RelocInfo::CODE_TARGET,
+ instr,
+ RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
+ // Get the deoptimization index of the LLazyBailout-environment that
+ // corresponds to this instruction.
+ LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment();
+ safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
+
+ // Put the result value into the eax slot and restore all registers.
+ __ StoreToSafepointRegisterSlot(eax, eax);
+}
+
+
+void LCodeGen::DoCmpT(LCmpT* instr) {
+ Token::Value op = instr->op();
+
+ Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
+
+ Condition condition = ComputeCompareCondition(op);
+ Label true_value, done;
+ __ test(eax, Operand(eax));
+ __ j(condition, &true_value, Label::kNear);
+ __ mov(ToRegister(instr->result()), factory()->false_value());
+ __ jmp(&done, Label::kNear);
+ __ bind(&true_value);
+ __ mov(ToRegister(instr->result()), factory()->true_value());
+ __ bind(&done);
+}
+
+
+void LCodeGen::EmitReturn(LReturn* instr, bool dynamic_frame_alignment) {
+ int extra_value_count = dynamic_frame_alignment ? 2 : 1;
+
+ if (instr->has_constant_parameter_count()) {
+ int parameter_count = ToInteger32(instr->constant_parameter_count());
+ if (dynamic_frame_alignment && FLAG_debug_code) {
+ __ cmp(Operand(esp,
+ (parameter_count + extra_value_count) * kPointerSize),
+ Immediate(kAlignmentZapValue));
+ __ Assert(equal, kExpectedAlignmentMarker);
+ }
+ __ Ret((parameter_count + extra_value_count) * kPointerSize, ecx);
+ } else {
+ Register reg = ToRegister(instr->parameter_count());
+ // The argument count parameter is a smi
+ __ SmiUntag(reg);
+ Register return_addr_reg = reg.is(ecx) ? ebx : ecx;
+ if (dynamic_frame_alignment && FLAG_debug_code) {
+ ASSERT(extra_value_count == 2);
+ __ cmp(Operand(esp, reg, times_pointer_size,
+ extra_value_count * kPointerSize),
+ Immediate(kAlignmentZapValue));
+ __ Assert(equal, kExpectedAlignmentMarker);
+ }
+
+ // emit code to restore stack based on instr->parameter_count()
+ __ pop(return_addr_reg); // save return address
+ if (dynamic_frame_alignment) {
+ __ inc(reg); // 1 more for alignment
+ }
+ __ shl(reg, kPointerSizeLog2);
+ __ add(esp, reg);
+ __ jmp(return_addr_reg);
+ }
+}
+
+
+void LCodeGen::DoReturn(LReturn* instr) {
+ if (FLAG_trace && info()->IsOptimizing()) {
+ // Preserve the return value on the stack and rely on the runtime call
+ // to return the value in the same register. We're leaving the code
+ // managed by the register allocator and tearing down the frame, it's
+ // safe to write to the context register.
+ __ push(eax);
+ __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+ __ CallRuntime(Runtime::kTraceExit, 1);
+ }
+ if (dynamic_frame_alignment_) {
+ // Fetch the state of the dynamic frame alignment.
+ __ mov(edx, Operand(ebp,
+ JavaScriptFrameConstants::kDynamicAlignmentStateOffset));
+ }
+ int no_frame_start = -1;
+ if (NeedsEagerFrame()) {
+ __ mov(esp, ebp);
+ __ pop(ebp);
+ no_frame_start = masm_->pc_offset();
+ }
+ if (dynamic_frame_alignment_) {
+ Label no_padding;
+ __ cmp(edx, Immediate(kNoAlignmentPadding));
+ __ j(equal, &no_padding, Label::kNear);
+
+ EmitReturn(instr, true);
+ __ bind(&no_padding);
+ }
+
+ EmitReturn(instr, false);
+ if (no_frame_start != -1) {
+ info()->AddNoFrameRange(no_frame_start, masm_->pc_offset());
+ }
+}
+
+
+void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
+ Register result = ToRegister(instr->result());
+ __ mov(result, Operand::ForCell(instr->hydrogen()->cell().handle()));
+ if (instr->hydrogen()->RequiresHoleCheck()) {
+ __ cmp(result, factory()->the_hole_value());
+ DeoptimizeIf(equal, instr->environment());
+ }
+}
+
+
+void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
+ ASSERT(ToRegister(instr->context()).is(esi));
+ ASSERT(ToRegister(instr->global_object()).is(edx));
+ ASSERT(ToRegister(instr->result()).is(eax));
+
+ __ mov(ecx, instr->name());
+ ContextualMode mode = instr->for_typeof() ? NOT_CONTEXTUAL : CONTEXTUAL;
+ Handle<Code> ic = LoadIC::initialize_stub(isolate(), mode);
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
+ Register value = ToRegister(instr->value());
+ Handle<PropertyCell> cell_handle = instr->hydrogen()->cell().handle();
+
+ // If the cell we are storing to contains the hole it could have
+ // been deleted from the property dictionary. In that case, we need
+ // to update the property details in the property dictionary to mark
+ // it as no longer deleted. We deoptimize in that case.
+ if (instr->hydrogen()->RequiresHoleCheck()) {
+ __ cmp(Operand::ForCell(cell_handle), factory()->the_hole_value());
+ DeoptimizeIf(equal, instr->environment());
+ }
+
+ // Store the value.
+ __ mov(Operand::ForCell(cell_handle), value);
+ // Cells are always rescanned, so no write barrier here.
+}
+
+
+void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
+ Register context = ToRegister(instr->context());
+ Register result = ToRegister(instr->result());
+ __ mov(result, ContextOperand(context, instr->slot_index()));
+
+ if (instr->hydrogen()->RequiresHoleCheck()) {
+ __ cmp(result, factory()->the_hole_value());
+ if (instr->hydrogen()->DeoptimizesOnHole()) {
+ DeoptimizeIf(equal, instr->environment());
+ } else {
+ Label is_not_hole;
+ __ j(not_equal, &is_not_hole, Label::kNear);
+ __ mov(result, factory()->undefined_value());
+ __ bind(&is_not_hole);
+ }
+ }
+}
+
+
+void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
+ Register context = ToRegister(instr->context());
+ Register value = ToRegister(instr->value());
+
+ Label skip_assignment;
+
+ Operand target = ContextOperand(context, instr->slot_index());
+ if (instr->hydrogen()->RequiresHoleCheck()) {
+ __ cmp(target, factory()->the_hole_value());
+ if (instr->hydrogen()->DeoptimizesOnHole()) {
+ DeoptimizeIf(equal, instr->environment());
+ } else {
+ __ j(not_equal, &skip_assignment, Label::kNear);
+ }
+ }
+
+ __ mov(target, value);
+ if (instr->hydrogen()->NeedsWriteBarrier()) {
+ SmiCheck check_needed =
+ instr->hydrogen()->value()->IsHeapObject()
+ ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
+ Register temp = ToRegister(instr->temp());
+ int offset = Context::SlotOffset(instr->slot_index());
+ __ RecordWriteContextSlot(context,
+ offset,
+ value,
+ temp,
+ EMIT_REMEMBERED_SET,
+ check_needed);
+ }
+
+ __ bind(&skip_assignment);
+}
+
+
+void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
+ HObjectAccess access = instr->hydrogen()->access();
+ int offset = access.offset();
+
+ if (access.IsExternalMemory()) {
+ Register result = ToRegister(instr->result());
+ MemOperand operand = instr->object()->IsConstantOperand()
+ ? MemOperand::StaticVariable(ToExternalReference(
+ LConstantOperand::cast(instr->object())))
+ : MemOperand(ToRegister(instr->object()), offset);
+ __ Load(result, operand, access.representation());
+ return;
+ }
+
+ Register object = ToRegister(instr->object());
+ if (instr->hydrogen()->representation().IsDouble()) {
+ X87Mov(ToX87Register(instr->result()), FieldOperand(object, offset));
+ return;
+ }
+
+ Register result = ToRegister(instr->result());
+ if (!access.IsInobject()) {
+ __ mov(result, FieldOperand(object, JSObject::kPropertiesOffset));
+ object = result;
+ }
+ __ Load(result, FieldOperand(object, offset), access.representation());
+}
+
+
+void LCodeGen::EmitPushTaggedOperand(LOperand* operand) {
+ ASSERT(!operand->IsDoubleRegister());
+ if (operand->IsConstantOperand()) {
+ Handle<Object> object = ToHandle(LConstantOperand::cast(operand));
+ AllowDeferredHandleDereference smi_check;
+ if (object->IsSmi()) {
+ __ Push(Handle<Smi>::cast(object));
+ } else {
+ __ PushHeapObject(Handle<HeapObject>::cast(object));
+ }
+ } else if (operand->IsRegister()) {
+ __ push(ToRegister(operand));
+ } else {
+ __ push(ToOperand(operand));
+ }
+}
+
+
+void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
+ ASSERT(ToRegister(instr->context()).is(esi));
+ ASSERT(ToRegister(instr->object()).is(edx));
+ ASSERT(ToRegister(instr->result()).is(eax));
+
+ __ mov(ecx, instr->name());
+ Handle<Code> ic = LoadIC::initialize_stub(isolate(), NOT_CONTEXTUAL);
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
+ Register function = ToRegister(instr->function());
+ Register temp = ToRegister(instr->temp());
+ Register result = ToRegister(instr->result());
+
+ // Check that the function really is a function.
+ __ CmpObjectType(function, JS_FUNCTION_TYPE, result);
+ DeoptimizeIf(not_equal, instr->environment());
+
+ // Check whether the function has an instance prototype.
+ Label non_instance;
+ __ test_b(FieldOperand(result, Map::kBitFieldOffset),
+ 1 << Map::kHasNonInstancePrototype);
+ __ j(not_zero, &non_instance, Label::kNear);
+
+ // Get the prototype or initial map from the function.
+ __ mov(result,
+ FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
+
+ // Check that the function has a prototype or an initial map.
+ __ cmp(Operand(result), Immediate(factory()->the_hole_value()));
+ DeoptimizeIf(equal, instr->environment());
+
+ // If the function does not have an initial map, we're done.
+ Label done;
+ __ CmpObjectType(result, MAP_TYPE, temp);
+ __ j(not_equal, &done, Label::kNear);
+
+ // Get the prototype from the initial map.
+ __ mov(result, FieldOperand(result, Map::kPrototypeOffset));
+ __ jmp(&done, Label::kNear);
+
+ // Non-instance prototype: Fetch prototype from constructor field
+ // in the function's map.
+ __ bind(&non_instance);
+ __ mov(result, FieldOperand(result, Map::kConstructorOffset));
+
+ // All done.
+ __ bind(&done);
+}
+
+
+void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
+ Register result = ToRegister(instr->result());
+ __ LoadRoot(result, instr->index());
+}
+
+
+void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
+ Register arguments = ToRegister(instr->arguments());
+ Register result = ToRegister(instr->result());
+ if (instr->length()->IsConstantOperand() &&
+ instr->index()->IsConstantOperand()) {
+ int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
+ int const_length = ToInteger32(LConstantOperand::cast(instr->length()));
+ int index = (const_length - const_index) + 1;
+ __ mov(result, Operand(arguments, index * kPointerSize));
+ } else {
+ Register length = ToRegister(instr->length());
+ Operand index = ToOperand(instr->index());
+ // There are two words between the frame pointer and the last argument.
+ // Subtracting from length accounts for one of them add one more.
+ __ sub(length, index);
+ __ mov(result, Operand(arguments, length, times_4, kPointerSize));
+ }
+}
+
+
+void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
+ ElementsKind elements_kind = instr->elements_kind();
+ LOperand* key = instr->key();
+ if (!key->IsConstantOperand() &&
+ ExternalArrayOpRequiresTemp(instr->hydrogen()->key()->representation(),
+ elements_kind)) {
+ __ SmiUntag(ToRegister(key));
+ }
+ Operand operand(BuildFastArrayOperand(
+ instr->elements(),
+ key,
+ instr->hydrogen()->key()->representation(),
+ elements_kind,
+ instr->base_offset()));
+ if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
+ elements_kind == FLOAT32_ELEMENTS) {
+ X87Mov(ToX87Register(instr->result()), operand, kX87FloatOperand);
+ } else if (elements_kind == EXTERNAL_FLOAT64_ELEMENTS ||
+ elements_kind == FLOAT64_ELEMENTS) {
+ X87Mov(ToX87Register(instr->result()), operand);
+ } else {
+ Register result(ToRegister(instr->result()));
+ switch (elements_kind) {
+ case EXTERNAL_INT8_ELEMENTS:
+ case INT8_ELEMENTS:
+ __ movsx_b(result, operand);
+ break;
+ case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
+ case EXTERNAL_UINT8_ELEMENTS:
+ case UINT8_ELEMENTS:
+ case UINT8_CLAMPED_ELEMENTS:
+ __ movzx_b(result, operand);
+ break;
+ case EXTERNAL_INT16_ELEMENTS:
+ case INT16_ELEMENTS:
+ __ movsx_w(result, operand);
+ break;
+ case EXTERNAL_UINT16_ELEMENTS:
+ case UINT16_ELEMENTS:
+ __ movzx_w(result, operand);
+ break;
+ case EXTERNAL_INT32_ELEMENTS:
+ case INT32_ELEMENTS:
+ __ mov(result, operand);
+ break;
+ case EXTERNAL_UINT32_ELEMENTS:
+ case UINT32_ELEMENTS:
+ __ mov(result, operand);
+ if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
+ __ test(result, Operand(result));
+ DeoptimizeIf(negative, instr->environment());
+ }
+ break;
+ case EXTERNAL_FLOAT32_ELEMENTS:
+ case EXTERNAL_FLOAT64_ELEMENTS:
+ case FLOAT32_ELEMENTS:
+ case FLOAT64_ELEMENTS:
+ case FAST_SMI_ELEMENTS:
+ case FAST_ELEMENTS:
+ case FAST_DOUBLE_ELEMENTS:
+ case FAST_HOLEY_SMI_ELEMENTS:
+ case FAST_HOLEY_ELEMENTS:
+ case FAST_HOLEY_DOUBLE_ELEMENTS:
+ case DICTIONARY_ELEMENTS:
+ case SLOPPY_ARGUMENTS_ELEMENTS:
+ UNREACHABLE();
+ break;
+ }
+ }
+}
+
+
+void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
+ if (instr->hydrogen()->RequiresHoleCheck()) {
+ Operand hole_check_operand = BuildFastArrayOperand(
+ instr->elements(), instr->key(),
+ instr->hydrogen()->key()->representation(),
+ FAST_DOUBLE_ELEMENTS,
+ instr->base_offset() + sizeof(kHoleNanLower32));
+ __ cmp(hole_check_operand, Immediate(kHoleNanUpper32));
+ DeoptimizeIf(equal, instr->environment());
+ }
+
+ Operand double_load_operand = BuildFastArrayOperand(
+ instr->elements(),
+ instr->key(),
+ instr->hydrogen()->key()->representation(),
+ FAST_DOUBLE_ELEMENTS,
+ instr->base_offset());
+ X87Mov(ToX87Register(instr->result()), double_load_operand);
+}
+
+
+void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
+ Register result = ToRegister(instr->result());
+
+ // Load the result.
+ __ mov(result,
+ BuildFastArrayOperand(instr->elements(),
+ instr->key(),
+ instr->hydrogen()->key()->representation(),
+ FAST_ELEMENTS,
+ instr->base_offset()));
+
+ // Check for the hole value.
+ if (instr->hydrogen()->RequiresHoleCheck()) {
+ if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
+ __ test(result, Immediate(kSmiTagMask));
+ DeoptimizeIf(not_equal, instr->environment());
+ } else {
+ __ cmp(result, factory()->the_hole_value());
+ DeoptimizeIf(equal, instr->environment());
+ }
+ }
+}
+
+
+void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
+ if (instr->is_typed_elements()) {
+ DoLoadKeyedExternalArray(instr);
+ } else if (instr->hydrogen()->representation().IsDouble()) {
+ DoLoadKeyedFixedDoubleArray(instr);
+ } else {
+ DoLoadKeyedFixedArray(instr);
+ }
+}
+
+
+Operand LCodeGen::BuildFastArrayOperand(
+ LOperand* elements_pointer,
+ LOperand* key,
+ Representation key_representation,
+ ElementsKind elements_kind,
+ uint32_t base_offset) {
+ Register elements_pointer_reg = ToRegister(elements_pointer);
+ int element_shift_size = ElementsKindToShiftSize(elements_kind);
+ int shift_size = element_shift_size;
+ if (key->IsConstantOperand()) {
+ int constant_value = ToInteger32(LConstantOperand::cast(key));
+ if (constant_value & 0xF0000000) {
+ Abort(kArrayIndexConstantValueTooBig);
+ }
+ return Operand(elements_pointer_reg,
+ ((constant_value) << shift_size)
+ + base_offset);
+ } else {
+ // Take the tag bit into account while computing the shift size.
+ if (key_representation.IsSmi() && (shift_size >= 1)) {
+ shift_size -= kSmiTagSize;
+ }
+ ScaleFactor scale_factor = static_cast<ScaleFactor>(shift_size);
+ return Operand(elements_pointer_reg,
+ ToRegister(key),
+ scale_factor,
+ base_offset);
+ }
+}
+
+
+void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
+ ASSERT(ToRegister(instr->context()).is(esi));
+ ASSERT(ToRegister(instr->object()).is(edx));
+ ASSERT(ToRegister(instr->key()).is(ecx));
+
+ Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
+ Register result = ToRegister(instr->result());
+
+ if (instr->hydrogen()->from_inlined()) {
+ __ lea(result, Operand(esp, -2 * kPointerSize));
+ } else {
+ // Check for arguments adapter frame.
+ Label done, adapted;
+ __ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
+ __ mov(result, Operand(result, StandardFrameConstants::kContextOffset));
+ __ cmp(Operand(result),
+ Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ j(equal, &adapted, Label::kNear);
+
+ // No arguments adaptor frame.
+ __ mov(result, Operand(ebp));
+ __ jmp(&done, Label::kNear);
+
+ // Arguments adaptor frame present.
+ __ bind(&adapted);
+ __ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
+
+ // Result is the frame pointer for the frame if not adapted and for the real
+ // frame below the adaptor frame if adapted.
+ __ bind(&done);
+ }
+}
+
+
+void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
+ Operand elem = ToOperand(instr->elements());
+ Register result = ToRegister(instr->result());
+
+ Label done;
+
+ // If no arguments adaptor frame the number of arguments is fixed.
+ __ cmp(ebp, elem);
+ __ mov(result, Immediate(scope()->num_parameters()));
+ __ j(equal, &done, Label::kNear);
+
+ // Arguments adaptor frame present. Get argument length from there.
+ __ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
+ __ mov(result, Operand(result,
+ ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ SmiUntag(result);
+
+ // Argument length is in result register.
+ __ bind(&done);
+}
+
+
+void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
+ Register receiver = ToRegister(instr->receiver());
+ Register function = ToRegister(instr->function());
+
+ // If the receiver is null or undefined, we have to pass the global
+ // object as a receiver to normal functions. Values have to be
+ // passed unchanged to builtins and strict-mode functions.
+ Label receiver_ok, global_object;
+ Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
+ Register scratch = ToRegister(instr->temp());
+
+ if (!instr->hydrogen()->known_function()) {
+ // Do not transform the receiver to object for strict mode
+ // functions.
+ __ mov(scratch,
+ FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
+ __ test_b(FieldOperand(scratch, SharedFunctionInfo::kStrictModeByteOffset),
+ 1 << SharedFunctionInfo::kStrictModeBitWithinByte);
+ __ j(not_equal, &receiver_ok, dist);
+
+ // Do not transform the receiver to object for builtins.
+ __ test_b(FieldOperand(scratch, SharedFunctionInfo::kNativeByteOffset),
+ 1 << SharedFunctionInfo::kNativeBitWithinByte);
+ __ j(not_equal, &receiver_ok, dist);
+ }
+
+ // Normal function. Replace undefined or null with global receiver.
+ __ cmp(receiver, factory()->null_value());
+ __ j(equal, &global_object, Label::kNear);
+ __ cmp(receiver, factory()->undefined_value());
+ __ j(equal, &global_object, Label::kNear);
+
+ // The receiver should be a JS object.
+ __ test(receiver, Immediate(kSmiTagMask));
+ DeoptimizeIf(equal, instr->environment());
+ __ CmpObjectType(receiver, FIRST_SPEC_OBJECT_TYPE, scratch);
+ DeoptimizeIf(below, instr->environment());
+
+ __ jmp(&receiver_ok, Label::kNear);
+ __ bind(&global_object);
+ __ mov(receiver, FieldOperand(function, JSFunction::kContextOffset));
+ const int global_offset = Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX);
+ __ mov(receiver, Operand(receiver, global_offset));
+ const int receiver_offset = GlobalObject::kGlobalReceiverOffset;
+ __ mov(receiver, FieldOperand(receiver, receiver_offset));
+ __ bind(&receiver_ok);
+}
+
+
+void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
+ Register receiver = ToRegister(instr->receiver());
+ Register function = ToRegister(instr->function());
+ Register length = ToRegister(instr->length());
+ Register elements = ToRegister(instr->elements());
+ ASSERT(receiver.is(eax)); // Used for parameter count.
+ ASSERT(function.is(edi)); // Required by InvokeFunction.
+ ASSERT(ToRegister(instr->result()).is(eax));
+
+ // Copy the arguments to this function possibly from the
+ // adaptor frame below it.
+ const uint32_t kArgumentsLimit = 1 * KB;
+ __ cmp(length, kArgumentsLimit);
+ DeoptimizeIf(above, instr->environment());
+
+ __ push(receiver);
+ __ mov(receiver, length);
+
+ // Loop through the arguments pushing them onto the execution
+ // stack.
+ Label invoke, loop;
+ // length is a small non-negative integer, due to the test above.
+ __ test(length, Operand(length));
+ __ j(zero, &invoke, Label::kNear);
+ __ bind(&loop);
+ __ push(Operand(elements, length, times_pointer_size, 1 * kPointerSize));
+ __ dec(length);
+ __ j(not_zero, &loop);
+
+ // Invoke the function.
+ __ bind(&invoke);
+ ASSERT(instr->HasPointerMap());
+ LPointerMap* pointers = instr->pointer_map();
+ SafepointGenerator safepoint_generator(
+ this, pointers, Safepoint::kLazyDeopt);
+ ParameterCount actual(eax);
+ __ InvokeFunction(function, actual, CALL_FUNCTION, safepoint_generator);
+}
+
+
+void LCodeGen::DoDebugBreak(LDebugBreak* instr) {
+ __ int3();
+}
+
+
+void LCodeGen::DoPushArgument(LPushArgument* instr) {
+ LOperand* argument = instr->value();
+ EmitPushTaggedOperand(argument);
+}
+
+
+void LCodeGen::DoDrop(LDrop* instr) {
+ __ Drop(instr->count());
+}
+
+
+void LCodeGen::DoThisFunction(LThisFunction* instr) {
+ Register result = ToRegister(instr->result());
+ __ mov(result, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
+}
+
+
+void LCodeGen::DoContext(LContext* instr) {
+ Register result = ToRegister(instr->result());
+ if (info()->IsOptimizing()) {
+ __ mov(result, Operand(ebp, StandardFrameConstants::kContextOffset));
+ } else {
+ // If there is no frame, the context must be in esi.
+ ASSERT(result.is(esi));
+ }
+}
+
+
+void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
+ ASSERT(ToRegister(instr->context()).is(esi));
+ __ push(esi); // The context is the first argument.
+ __ push(Immediate(instr->hydrogen()->pairs()));
+ __ push(Immediate(Smi::FromInt(instr->hydrogen()->flags())));
+ CallRuntime(Runtime::kHiddenDeclareGlobals, 3, instr);
+}
+
+
+void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
+ int formal_parameter_count,
+ int arity,
+ LInstruction* instr,
+ EDIState edi_state) {
+ bool dont_adapt_arguments =
+ formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
+ bool can_invoke_directly =
+ dont_adapt_arguments || formal_parameter_count == arity;
+
+ if (can_invoke_directly) {
+ if (edi_state == EDI_UNINITIALIZED) {
+ __ LoadHeapObject(edi, function);
+ }
+
+ // Change context.
+ __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
+
+ // Set eax to arguments count if adaption is not needed. Assumes that eax
+ // is available to write to at this point.
+ if (dont_adapt_arguments) {
+ __ mov(eax, arity);
+ }
+
+ // Invoke function directly.
+ if (function.is_identical_to(info()->closure())) {
+ __ CallSelf();
+ } else {
+ __ call(FieldOperand(edi, JSFunction::kCodeEntryOffset));
+ }
+ RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
+ } else {
+ // We need to adapt arguments.
+ LPointerMap* pointers = instr->pointer_map();
+ SafepointGenerator generator(
+ this, pointers, Safepoint::kLazyDeopt);
+ ParameterCount count(arity);
+ ParameterCount expected(formal_parameter_count);
+ __ InvokeFunction(function, expected, count, CALL_FUNCTION, generator);
+ }
+}
+
+
+void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
+ ASSERT(ToRegister(instr->result()).is(eax));
+
+ LPointerMap* pointers = instr->pointer_map();
+ SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
+
+ if (instr->target()->IsConstantOperand()) {
+ LConstantOperand* target = LConstantOperand::cast(instr->target());
+ Handle<Code> code = Handle<Code>::cast(ToHandle(target));
+ generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
+ __ call(code, RelocInfo::CODE_TARGET);
+ } else {
+ ASSERT(instr->target()->IsRegister());
+ Register target = ToRegister(instr->target());
+ generator.BeforeCall(__ CallSize(Operand(target)));
+ __ add(target, Immediate(Code::kHeaderSize - kHeapObjectTag));
+ __ call(target);
+ }
+ generator.AfterCall();
+}
+
+
+void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
+ ASSERT(ToRegister(instr->function()).is(edi));
+ ASSERT(ToRegister(instr->result()).is(eax));
+
+ if (instr->hydrogen()->pass_argument_count()) {
+ __ mov(eax, instr->arity());
+ }
+
+ // Change context.
+ __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
+
+ bool is_self_call = false;
+ if (instr->hydrogen()->function()->IsConstant()) {
+ HConstant* fun_const = HConstant::cast(instr->hydrogen()->function());
+ Handle<JSFunction> jsfun =
+ Handle<JSFunction>::cast(fun_const->handle(isolate()));
+ is_self_call = jsfun.is_identical_to(info()->closure());
+ }
+
+ if (is_self_call) {
+ __ CallSelf();
+ } else {
+ __ call(FieldOperand(edi, JSFunction::kCodeEntryOffset));
+ }
+
+ RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
+}
+
+
+void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
+ Register input_reg = ToRegister(instr->value());
+ __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
+ factory()->heap_number_map());
+ DeoptimizeIf(not_equal, instr->environment());
+
+ Label slow, allocated, done;
+ Register tmp = input_reg.is(eax) ? ecx : eax;
+ Register tmp2 = tmp.is(ecx) ? edx : input_reg.is(ecx) ? edx : ecx;
+
+ // Preserve the value of all registers.
+ PushSafepointRegistersScope scope(this);
+
+ __ mov(tmp, FieldOperand(input_reg, HeapNumber::kExponentOffset));
+ // Check the sign of the argument. If the argument is positive, just
+ // return it. We do not need to patch the stack since |input| and
+ // |result| are the same register and |input| will be restored
+ // unchanged by popping safepoint registers.
+ __ test(tmp, Immediate(HeapNumber::kSignMask));
+ __ j(zero, &done, Label::kNear);
+
+ __ AllocateHeapNumber(tmp, tmp2, no_reg, &slow);
+ __ jmp(&allocated, Label::kNear);
+
+ // Slow case: Call the runtime system to do the number allocation.
+ __ bind(&slow);
+ CallRuntimeFromDeferred(Runtime::kHiddenAllocateHeapNumber, 0,
+ instr, instr->context());
+ // Set the pointer to the new heap number in tmp.
+ if (!tmp.is(eax)) __ mov(tmp, eax);
+ // Restore input_reg after call to runtime.
+ __ LoadFromSafepointRegisterSlot(input_reg, input_reg);
+
+ __ bind(&allocated);
+ __ mov(tmp2, FieldOperand(input_reg, HeapNumber::kExponentOffset));
+ __ and_(tmp2, ~HeapNumber::kSignMask);
+ __ mov(FieldOperand(tmp, HeapNumber::kExponentOffset), tmp2);
+ __ mov(tmp2, FieldOperand(input_reg, HeapNumber::kMantissaOffset));
+ __ mov(FieldOperand(tmp, HeapNumber::kMantissaOffset), tmp2);
+ __ StoreToSafepointRegisterSlot(input_reg, tmp);
+
+ __ bind(&done);
+}
+
+
+void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) {
+ Register input_reg = ToRegister(instr->value());
+ __ test(input_reg, Operand(input_reg));
+ Label is_positive;
+ __ j(not_sign, &is_positive, Label::kNear);
+ __ neg(input_reg); // Sets flags.
+ DeoptimizeIf(negative, instr->environment());
+ __ bind(&is_positive);
+}
+
+
+void LCodeGen::DoMathAbs(LMathAbs* instr) {
+ // Class for deferred case.
+ class DeferredMathAbsTaggedHeapNumber V8_FINAL : public LDeferredCode {
+ public:
+ DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen,
+ LMathAbs* instr,
+ const X87Stack& x87_stack)
+ : LDeferredCode(codegen, x87_stack), instr_(instr) { }
+ virtual void Generate() V8_OVERRIDE {
+ codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
+ }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ private:
+ LMathAbs* instr_;
+ };
+
+ ASSERT(instr->value()->Equals(instr->result()));
+ Representation r = instr->hydrogen()->value()->representation();
+
+ if (r.IsDouble()) {
+ UNIMPLEMENTED();
+ } else if (r.IsSmiOrInteger32()) {
+ EmitIntegerMathAbs(instr);
+ } else { // Tagged case.
+ DeferredMathAbsTaggedHeapNumber* deferred =
+ new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr, x87_stack_);
+ Register input_reg = ToRegister(instr->value());
+ // Smi check.
+ __ JumpIfNotSmi(input_reg, deferred->entry());
+ EmitIntegerMathAbs(instr);
+ __ bind(deferred->exit());
+ }
+}
+
+
+void LCodeGen::DoMathFloor(LMathFloor* instr) {
+ UNIMPLEMENTED();
+}
+
+
+void LCodeGen::DoMathRound(LMathRound* instr) {
+ UNIMPLEMENTED();
+}
+
+
+void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
+ UNIMPLEMENTED();
+}
+
+
+void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
+ UNIMPLEMENTED();
+}
+
+
+void LCodeGen::DoPower(LPower* instr) {
+ UNIMPLEMENTED();
+}
+
+
+void LCodeGen::DoMathLog(LMathLog* instr) {
+ UNIMPLEMENTED();
+}
+
+
+void LCodeGen::DoMathClz32(LMathClz32* instr) {
+ UNIMPLEMENTED();
+}
+
+
+void LCodeGen::DoMathExp(LMathExp* instr) {
+ UNIMPLEMENTED();
+}
+
+
+void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
+ ASSERT(ToRegister(instr->context()).is(esi));
+ ASSERT(ToRegister(instr->function()).is(edi));
+ ASSERT(instr->HasPointerMap());
+
+ Handle<JSFunction> known_function = instr->hydrogen()->known_function();
+ if (known_function.is_null()) {
+ LPointerMap* pointers = instr->pointer_map();
+ SafepointGenerator generator(
+ this, pointers, Safepoint::kLazyDeopt);
+ ParameterCount count(instr->arity());
+ __ InvokeFunction(edi, count, CALL_FUNCTION, generator);
+ } else {
+ CallKnownFunction(known_function,
+ instr->hydrogen()->formal_parameter_count(),
+ instr->arity(),
+ instr,
+ EDI_CONTAINS_TARGET);
+ }
+}
+
+
+void LCodeGen::DoCallFunction(LCallFunction* instr) {
+ ASSERT(ToRegister(instr->context()).is(esi));
+ ASSERT(ToRegister(instr->function()).is(edi));
+ ASSERT(ToRegister(instr->result()).is(eax));
+
+ int arity = instr->arity();
+ CallFunctionStub stub(isolate(), arity, instr->hydrogen()->function_flags());
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoCallNew(LCallNew* instr) {
+ ASSERT(ToRegister(instr->context()).is(esi));
+ ASSERT(ToRegister(instr->constructor()).is(edi));
+ ASSERT(ToRegister(instr->result()).is(eax));
+
+ // No cell in ebx for construct type feedback in optimized code
+ __ mov(ebx, isolate()->factory()->undefined_value());
+ CallConstructStub stub(isolate(), NO_CALL_CONSTRUCTOR_FLAGS);
+ __ Move(eax, Immediate(instr->arity()));
+ CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
+}
+
+
+void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
+ ASSERT(ToRegister(instr->context()).is(esi));
+ ASSERT(ToRegister(instr->constructor()).is(edi));
+ ASSERT(ToRegister(instr->result()).is(eax));
+
+ __ Move(eax, Immediate(instr->arity()));
+ __ mov(ebx, isolate()->factory()->undefined_value());
+ ElementsKind kind = instr->hydrogen()->elements_kind();
+ AllocationSiteOverrideMode override_mode =
+ (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE)
+ ? DISABLE_ALLOCATION_SITES
+ : DONT_OVERRIDE;
+
+ if (instr->arity() == 0) {
+ ArrayNoArgumentConstructorStub stub(isolate(), kind, override_mode);
+ CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
+ } else if (instr->arity() == 1) {
+ Label done;
+ if (IsFastPackedElementsKind(kind)) {
+ Label packed_case;
+ // We might need a change here
+ // look at the first argument
+ __ mov(ecx, Operand(esp, 0));
+ __ test(ecx, ecx);
+ __ j(zero, &packed_case, Label::kNear);
+
+ ElementsKind holey_kind = GetHoleyElementsKind(kind);
+ ArraySingleArgumentConstructorStub stub(isolate(),
+ holey_kind,
+ override_mode);
+ CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
+ __ jmp(&done, Label::kNear);
+ __ bind(&packed_case);
+ }
+
+ ArraySingleArgumentConstructorStub stub(isolate(), kind, override_mode);
+ CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
+ __ bind(&done);
+ } else {
+ ArrayNArgumentsConstructorStub stub(isolate(), kind, override_mode);
+ CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
+ }
+}
+
+
+void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
+ ASSERT(ToRegister(instr->context()).is(esi));
+ CallRuntime(instr->function(), instr->arity(), instr);
+}
+
+
+void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
+ Register function = ToRegister(instr->function());
+ Register code_object = ToRegister(instr->code_object());
+ __ lea(code_object, FieldOperand(code_object, Code::kHeaderSize));
+ __ mov(FieldOperand(function, JSFunction::kCodeEntryOffset), code_object);
+}
+
+
+void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
+ Register result = ToRegister(instr->result());
+ Register base = ToRegister(instr->base_object());
+ if (instr->offset()->IsConstantOperand()) {
+ LConstantOperand* offset = LConstantOperand::cast(instr->offset());
+ __ lea(result, Operand(base, ToInteger32(offset)));
+ } else {
+ Register offset = ToRegister(instr->offset());
+ __ lea(result, Operand(base, offset, times_1, 0));
+ }
+}
+
+
+void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
+ Representation representation = instr->hydrogen()->field_representation();
+
+ HObjectAccess access = instr->hydrogen()->access();
+ int offset = access.offset();
+
+ if (access.IsExternalMemory()) {
+ ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
+ MemOperand operand = instr->object()->IsConstantOperand()
+ ? MemOperand::StaticVariable(
+ ToExternalReference(LConstantOperand::cast(instr->object())))
+ : MemOperand(ToRegister(instr->object()), offset);
+ if (instr->value()->IsConstantOperand()) {
+ LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
+ __ mov(operand, Immediate(ToInteger32(operand_value)));
+ } else {
+ Register value = ToRegister(instr->value());
+ __ Store(value, operand, representation);
+ }
+ return;
+ }
+
+ Register object = ToRegister(instr->object());
+ SmiCheck check_needed =
+ instr->hydrogen()->value()->IsHeapObject()
+ ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
+
+ ASSERT(!(representation.IsSmi() &&
+ instr->value()->IsConstantOperand() &&
+ !IsSmi(LConstantOperand::cast(instr->value()))));
+ if (representation.IsHeapObject()) {
+ if (instr->value()->IsConstantOperand()) {
+ LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
+ if (chunk_->LookupConstant(operand_value)->HasSmiValue()) {
+ DeoptimizeIf(no_condition, instr->environment());
+ }
+ } else {
+ if (!instr->hydrogen()->value()->type().IsHeapObject()) {
+ Register value = ToRegister(instr->value());
+ __ test(value, Immediate(kSmiTagMask));
+ DeoptimizeIf(zero, instr->environment());
+
+ // We know now that value is not a smi, so we can omit the check below.
+ check_needed = OMIT_SMI_CHECK;
+ }
+ }
+ } else if (representation.IsDouble()) {
+ ASSERT(access.IsInobject());
+ ASSERT(!instr->hydrogen()->has_transition());
+ ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
+ X87Register value = ToX87Register(instr->value());
+ X87Mov(FieldOperand(object, offset), value);
+ return;
+ }
+
+ if (instr->hydrogen()->has_transition()) {
+ Handle<Map> transition = instr->hydrogen()->transition_map();
+ AddDeprecationDependency(transition);
+ if (!instr->hydrogen()->NeedsWriteBarrierForMap()) {
+ __ mov(FieldOperand(object, HeapObject::kMapOffset), transition);
+ } else {
+ Register temp = ToRegister(instr->temp());
+ Register temp_map = ToRegister(instr->temp_map());
+ __ mov(temp_map, transition);
+ __ mov(FieldOperand(object, HeapObject::kMapOffset), temp_map);
+ // Update the write barrier for the map field.
+ __ RecordWriteField(object,
+ HeapObject::kMapOffset,
+ temp_map,
+ temp,
+ OMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+ }
+ }
+
+ // Do the store.
+ Register write_register = object;
+ if (!access.IsInobject()) {
+ write_register = ToRegister(instr->temp());
+ __ mov(write_register, FieldOperand(object, JSObject::kPropertiesOffset));
+ }
+
+ MemOperand operand = FieldOperand(write_register, offset);
+ if (instr->value()->IsConstantOperand()) {
+ LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
+ if (operand_value->IsRegister()) {
+ Register value = ToRegister(operand_value);
+ __ Store(value, operand, representation);
+ } else if (representation.IsInteger32()) {
+ Immediate immediate = ToImmediate(operand_value, representation);
+ ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
+ __ mov(operand, immediate);
+ } else {
+ Handle<Object> handle_value = ToHandle(operand_value);
+ ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
+ __ mov(operand, handle_value);
+ }
+ } else {
+ Register value = ToRegister(instr->value());
+ __ Store(value, operand, representation);
+ }
+
+ if (instr->hydrogen()->NeedsWriteBarrier()) {
+ Register value = ToRegister(instr->value());
+ Register temp = access.IsInobject() ? ToRegister(instr->temp()) : object;
+ // Update the write barrier for the object for in-object properties.
+ __ RecordWriteField(write_register,
+ offset,
+ value,
+ temp,
+ EMIT_REMEMBERED_SET,
+ check_needed);
+ }
+}
+
+
+void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
+ ASSERT(ToRegister(instr->context()).is(esi));
+ ASSERT(ToRegister(instr->object()).is(edx));
+ ASSERT(ToRegister(instr->value()).is(eax));
+
+ __ mov(ecx, instr->name());
+ Handle<Code> ic = StoreIC::initialize_stub(isolate(), instr->strict_mode());
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
+ Condition cc = instr->hydrogen()->allow_equality() ? above : above_equal;
+ if (instr->index()->IsConstantOperand()) {
+ __ cmp(ToOperand(instr->length()),
+ ToImmediate(LConstantOperand::cast(instr->index()),
+ instr->hydrogen()->length()->representation()));
+ cc = ReverseCondition(cc);
+ } else if (instr->length()->IsConstantOperand()) {
+ __ cmp(ToOperand(instr->index()),
+ ToImmediate(LConstantOperand::cast(instr->length()),
+ instr->hydrogen()->index()->representation()));
+ } else {
+ __ cmp(ToRegister(instr->index()), ToOperand(instr->length()));
+ }
+ if (FLAG_debug_code && instr->hydrogen()->skip_check()) {
+ Label done;
+ __ j(NegateCondition(cc), &done, Label::kNear);
+ __ int3();
+ __ bind(&done);
+ } else {
+ DeoptimizeIf(cc, instr->environment());
+ }
+}
+
+
+void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
+ ElementsKind elements_kind = instr->elements_kind();
+ LOperand* key = instr->key();
+ if (!key->IsConstantOperand() &&
+ ExternalArrayOpRequiresTemp(instr->hydrogen()->key()->representation(),
+ elements_kind)) {
+ __ SmiUntag(ToRegister(key));
+ }
+ Operand operand(BuildFastArrayOperand(
+ instr->elements(),
+ key,
+ instr->hydrogen()->key()->representation(),
+ elements_kind,
+ instr->base_offset()));
+ if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
+ elements_kind == FLOAT32_ELEMENTS) {
+ __ fld(0);
+ __ fstp_s(operand);
+ } else if (elements_kind == EXTERNAL_FLOAT64_ELEMENTS ||
+ elements_kind == FLOAT64_ELEMENTS) {
+ X87Mov(operand, ToX87Register(instr->value()));
+ } else {
+ Register value = ToRegister(instr->value());
+ switch (elements_kind) {
+ case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
+ case EXTERNAL_UINT8_ELEMENTS:
+ case EXTERNAL_INT8_ELEMENTS:
+ case UINT8_ELEMENTS:
+ case INT8_ELEMENTS:
+ case UINT8_CLAMPED_ELEMENTS:
+ __ mov_b(operand, value);
+ break;
+ case EXTERNAL_INT16_ELEMENTS:
+ case EXTERNAL_UINT16_ELEMENTS:
+ case UINT16_ELEMENTS:
+ case INT16_ELEMENTS:
+ __ mov_w(operand, value);
+ break;
+ case EXTERNAL_INT32_ELEMENTS:
+ case EXTERNAL_UINT32_ELEMENTS:
+ case UINT32_ELEMENTS:
+ case INT32_ELEMENTS:
+ __ mov(operand, value);
+ break;
+ case EXTERNAL_FLOAT32_ELEMENTS:
+ case EXTERNAL_FLOAT64_ELEMENTS:
+ case FLOAT32_ELEMENTS:
+ case FLOAT64_ELEMENTS:
+ case FAST_SMI_ELEMENTS:
+ case FAST_ELEMENTS:
+ case FAST_DOUBLE_ELEMENTS:
+ case FAST_HOLEY_SMI_ELEMENTS:
+ case FAST_HOLEY_ELEMENTS:
+ case FAST_HOLEY_DOUBLE_ELEMENTS:
+ case DICTIONARY_ELEMENTS:
+ case SLOPPY_ARGUMENTS_ELEMENTS:
+ UNREACHABLE();
+ break;
+ }
+ }
+}
+
+
+void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
+ ExternalReference canonical_nan_reference =
+ ExternalReference::address_of_canonical_non_hole_nan();
+ Operand double_store_operand = BuildFastArrayOperand(
+ instr->elements(),
+ instr->key(),
+ instr->hydrogen()->key()->representation(),
+ FAST_DOUBLE_ELEMENTS,
+ instr->base_offset());
+
+ // Can't use SSE2 in the serializer
+ if (instr->hydrogen()->IsConstantHoleStore()) {
+ // This means we should store the (double) hole. No floating point
+ // registers required.
+ double nan_double = FixedDoubleArray::hole_nan_as_double();
+ uint64_t int_val = BitCast<uint64_t, double>(nan_double);
+ int32_t lower = static_cast<int32_t>(int_val);
+ int32_t upper = static_cast<int32_t>(int_val >> (kBitsPerInt));
+
+ __ mov(double_store_operand, Immediate(lower));
+ Operand double_store_operand2 = BuildFastArrayOperand(
+ instr->elements(),
+ instr->key(),
+ instr->hydrogen()->key()->representation(),
+ FAST_DOUBLE_ELEMENTS,
+ instr->base_offset() + kPointerSize);
+ __ mov(double_store_operand2, Immediate(upper));
+ } else {
+ Label no_special_nan_handling;
+ X87Register value = ToX87Register(instr->value());
+ X87Fxch(value);
+
+ if (instr->NeedsCanonicalization()) {
+ __ fld(0);
+ __ fld(0);
+ __ FCmp();
+
+ __ j(parity_odd, &no_special_nan_handling, Label::kNear);
+ __ sub(esp, Immediate(kDoubleSize));
+ __ fst_d(MemOperand(esp, 0));
+ __ cmp(MemOperand(esp, sizeof(kHoleNanLower32)),
+ Immediate(kHoleNanUpper32));
+ __ add(esp, Immediate(kDoubleSize));
+ Label canonicalize;
+ __ j(not_equal, &canonicalize, Label::kNear);
+ __ jmp(&no_special_nan_handling, Label::kNear);
+ __ bind(&canonicalize);
+ __ fstp(0);
+ __ fld_d(Operand::StaticVariable(canonical_nan_reference));
+ }
+
+ __ bind(&no_special_nan_handling);
+ __ fst_d(double_store_operand);
+ }
+}
+
+
+void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
+ Register elements = ToRegister(instr->elements());
+ Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg;
+
+ Operand operand = BuildFastArrayOperand(
+ instr->elements(),
+ instr->key(),
+ instr->hydrogen()->key()->representation(),
+ FAST_ELEMENTS,
+ instr->base_offset());
+ if (instr->value()->IsRegister()) {
+ __ mov(operand, ToRegister(instr->value()));
+ } else {
+ LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
+ if (IsSmi(operand_value)) {
+ Immediate immediate = ToImmediate(operand_value, Representation::Smi());
+ __ mov(operand, immediate);
+ } else {
+ ASSERT(!IsInteger32(operand_value));
+ Handle<Object> handle_value = ToHandle(operand_value);
+ __ mov(operand, handle_value);
+ }
+ }
+
+ if (instr->hydrogen()->NeedsWriteBarrier()) {
+ ASSERT(instr->value()->IsRegister());
+ Register value = ToRegister(instr->value());
+ ASSERT(!instr->key()->IsConstantOperand());
+ SmiCheck check_needed =
+ instr->hydrogen()->value()->IsHeapObject()
+ ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
+ // Compute address of modified element and store it into key register.
+ __ lea(key, operand);
+ __ RecordWrite(elements,
+ key,
+ value,
+ EMIT_REMEMBERED_SET,
+ check_needed);
+ }
+}
+
+
+void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
+ // By cases...external, fast-double, fast
+ if (instr->is_typed_elements()) {
+ DoStoreKeyedExternalArray(instr);
+ } else if (instr->hydrogen()->value()->representation().IsDouble()) {
+ DoStoreKeyedFixedDoubleArray(instr);
+ } else {
+ DoStoreKeyedFixedArray(instr);
+ }
+}
+
+
+void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
+ ASSERT(ToRegister(instr->context()).is(esi));
+ ASSERT(ToRegister(instr->object()).is(edx));
+ ASSERT(ToRegister(instr->key()).is(ecx));
+ ASSERT(ToRegister(instr->value()).is(eax));
+
+ Handle<Code> ic = instr->strict_mode() == STRICT
+ ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
+ : isolate()->builtins()->KeyedStoreIC_Initialize();
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
+ Register object = ToRegister(instr->object());
+ Register temp = ToRegister(instr->temp());
+ Label no_memento_found;
+ __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found);
+ DeoptimizeIf(equal, instr->environment());
+ __ bind(&no_memento_found);
+}
+
+
+void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
+ Register object_reg = ToRegister(instr->object());
+
+ Handle<Map> from_map = instr->original_map();
+ Handle<Map> to_map = instr->transitioned_map();
+ ElementsKind from_kind = instr->from_kind();
+ ElementsKind to_kind = instr->to_kind();
+
+ Label not_applicable;
+ bool is_simple_map_transition =
+ IsSimpleMapChangeTransition(from_kind, to_kind);
+ Label::Distance branch_distance =
+ is_simple_map_transition ? Label::kNear : Label::kFar;
+ __ cmp(FieldOperand(object_reg, HeapObject::kMapOffset), from_map);
+ __ j(not_equal, ¬_applicable, branch_distance);
+ if (is_simple_map_transition) {
+ Register new_map_reg = ToRegister(instr->new_map_temp());
+ __ mov(FieldOperand(object_reg, HeapObject::kMapOffset),
+ Immediate(to_map));
+ // Write barrier.
+ ASSERT_NE(instr->temp(), NULL);
+ __ RecordWriteForMap(object_reg, to_map, new_map_reg,
+ ToRegister(instr->temp()));
+ } else {
+ ASSERT(ToRegister(instr->context()).is(esi));
+ ASSERT(object_reg.is(eax));
+ PushSafepointRegistersScope scope(this);
+ __ mov(ebx, to_map);
+ bool is_js_array = from_map->instance_type() == JS_ARRAY_TYPE;
+ TransitionElementsKindStub stub(isolate(), from_kind, to_kind, is_js_array);
+ __ CallStub(&stub);
+ RecordSafepointWithLazyDeopt(instr,
+ RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
+ }
+ __ bind(¬_applicable);
+}
+
+
+void LCodeGen::DoArrayShift(LArrayShift* instr) {
+ ASSERT(ToRegister(instr->context()).is(esi));
+ ASSERT(ToRegister(instr->object()).is(eax));
+ ASSERT(ToRegister(instr->result()).is(eax));
+ ArrayShiftStub stub(isolate(), instr->hydrogen()->kind());
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
+ class DeferredStringCharCodeAt V8_FINAL : public LDeferredCode {
+ public:
+ DeferredStringCharCodeAt(LCodeGen* codegen,
+ LStringCharCodeAt* instr,
+ const X87Stack& x87_stack)
+ : LDeferredCode(codegen, x87_stack), instr_(instr) { }
+ virtual void Generate() V8_OVERRIDE {
+ codegen()->DoDeferredStringCharCodeAt(instr_);
+ }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ private:
+ LStringCharCodeAt* instr_;
+ };
+
+ DeferredStringCharCodeAt* deferred =
+ new(zone()) DeferredStringCharCodeAt(this, instr, x87_stack_);
+
+ StringCharLoadGenerator::Generate(masm(),
+ factory(),
+ ToRegister(instr->string()),
+ ToRegister(instr->index()),
+ ToRegister(instr->result()),
+ deferred->entry());
+ __ bind(deferred->exit());
+}
+
+
+void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
+ Register string = ToRegister(instr->string());
+ Register result = ToRegister(instr->result());
+
+ // TODO(3095996): Get rid of this. For now, we need to make the
+ // result register contain a valid pointer because it is already
+ // contained in the register pointer map.
+ __ Move(result, Immediate(0));
+
+ PushSafepointRegistersScope scope(this);
+ __ push(string);
+ // Push the index as a smi. This is safe because of the checks in
+ // DoStringCharCodeAt above.
+ STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue);
+ if (instr->index()->IsConstantOperand()) {
+ Immediate immediate = ToImmediate(LConstantOperand::cast(instr->index()),
+ Representation::Smi());
+ __ push(immediate);
+ } else {
+ Register index = ToRegister(instr->index());
+ __ SmiTag(index);
+ __ push(index);
+ }
+ CallRuntimeFromDeferred(Runtime::kHiddenStringCharCodeAt, 2,
+ instr, instr->context());
+ __ AssertSmi(eax);
+ __ SmiUntag(eax);
+ __ StoreToSafepointRegisterSlot(result, eax);
+}
+
+
+void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
+ class DeferredStringCharFromCode V8_FINAL : public LDeferredCode {
+ public:
+ DeferredStringCharFromCode(LCodeGen* codegen,
+ LStringCharFromCode* instr,
+ const X87Stack& x87_stack)
+ : LDeferredCode(codegen, x87_stack), instr_(instr) { }
+ virtual void Generate() V8_OVERRIDE {
+ codegen()->DoDeferredStringCharFromCode(instr_);
+ }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ private:
+ LStringCharFromCode* instr_;
+ };
+
+ DeferredStringCharFromCode* deferred =
+ new(zone()) DeferredStringCharFromCode(this, instr, x87_stack_);
+
+ ASSERT(instr->hydrogen()->value()->representation().IsInteger32());
+ Register char_code = ToRegister(instr->char_code());
+ Register result = ToRegister(instr->result());
+ ASSERT(!char_code.is(result));
+
+ __ cmp(char_code, String::kMaxOneByteCharCode);
+ __ j(above, deferred->entry());
+ __ Move(result, Immediate(factory()->single_character_string_cache()));
+ __ mov(result, FieldOperand(result,
+ char_code, times_pointer_size,
+ FixedArray::kHeaderSize));
+ __ cmp(result, factory()->undefined_value());
+ __ j(equal, deferred->entry());
+ __ bind(deferred->exit());
+}
+
+
+void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
+ Register char_code = ToRegister(instr->char_code());
+ Register result = ToRegister(instr->result());
+
+ // TODO(3095996): Get rid of this. For now, we need to make the
+ // result register contain a valid pointer because it is already
+ // contained in the register pointer map.
+ __ Move(result, Immediate(0));
+
+ PushSafepointRegistersScope scope(this);
+ __ SmiTag(char_code);
+ __ push(char_code);
+ CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr, instr->context());
+ __ StoreToSafepointRegisterSlot(result, eax);
+}
+
+
+void LCodeGen::DoStringAdd(LStringAdd* instr) {
+ ASSERT(ToRegister(instr->context()).is(esi));
+ ASSERT(ToRegister(instr->left()).is(edx));
+ ASSERT(ToRegister(instr->right()).is(eax));
+ StringAddStub stub(isolate(),
+ instr->hydrogen()->flags(),
+ instr->hydrogen()->pretenure_flag());
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
+ LOperand* input = instr->value();
+ LOperand* output = instr->result();
+ ASSERT(input->IsRegister() || input->IsStackSlot());
+ ASSERT(output->IsDoubleRegister());
+ if (input->IsRegister()) {
+ Register input_reg = ToRegister(input);
+ __ push(input_reg);
+ X87Mov(ToX87Register(output), Operand(esp, 0), kX87IntOperand);
+ __ pop(input_reg);
+ } else {
+ X87Mov(ToX87Register(output), ToOperand(input), kX87IntOperand);
+ }
+}
+
+
+void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
+ LOperand* input = instr->value();
+ LOperand* output = instr->result();
+ X87Register res = ToX87Register(output);
+ X87PrepareToWrite(res);
+ __ LoadUint32NoSSE2(ToRegister(input));
+ X87CommitWrite(res);
+}
+
+
+void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
+ class DeferredNumberTagI V8_FINAL : public LDeferredCode {
+ public:
+ DeferredNumberTagI(LCodeGen* codegen,
+ LNumberTagI* instr,
+ const X87Stack& x87_stack)
+ : LDeferredCode(codegen, x87_stack), instr_(instr) { }
+ virtual void Generate() V8_OVERRIDE {
+ codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp(),
+ SIGNED_INT32);
+ }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ private:
+ LNumberTagI* instr_;
+ };
+
+ LOperand* input = instr->value();
+ ASSERT(input->IsRegister() && input->Equals(instr->result()));
+ Register reg = ToRegister(input);
+
+ DeferredNumberTagI* deferred =
+ new(zone()) DeferredNumberTagI(this, instr, x87_stack_);
+ __ SmiTag(reg);
+ __ j(overflow, deferred->entry());
+ __ bind(deferred->exit());
+}
+
+
+void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
+ class DeferredNumberTagU V8_FINAL : public LDeferredCode {
+ public:
+ DeferredNumberTagU(LCodeGen* codegen,
+ LNumberTagU* instr,
+ const X87Stack& x87_stack)
+ : LDeferredCode(codegen, x87_stack), instr_(instr) { }
+ virtual void Generate() V8_OVERRIDE {
+ codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp(),
+ UNSIGNED_INT32);
+ }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ private:
+ LNumberTagU* instr_;
+ };
+
+ LOperand* input = instr->value();
+ ASSERT(input->IsRegister() && input->Equals(instr->result()));
+ Register reg = ToRegister(input);
+
+ DeferredNumberTagU* deferred =
+ new(zone()) DeferredNumberTagU(this, instr, x87_stack_);
+ __ cmp(reg, Immediate(Smi::kMaxValue));
+ __ j(above, deferred->entry());
+ __ SmiTag(reg);
+ __ bind(deferred->exit());
+}
+
+
+void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr,
+ LOperand* value,
+ LOperand* temp,
+ IntegerSignedness signedness) {
+ Label done, slow;
+ Register reg = ToRegister(value);
+ Register tmp = ToRegister(temp);
+
+ if (signedness == SIGNED_INT32) {
+ // There was overflow, so bits 30 and 31 of the original integer
+ // disagree. Try to allocate a heap number in new space and store
+ // the value in there. If that fails, call the runtime system.
+ __ SmiUntag(reg);
+ __ xor_(reg, 0x80000000);
+ __ push(reg);
+ __ fild_s(Operand(esp, 0));
+ __ pop(reg);
+ } else {
+ // There's no fild variant for unsigned values, so zero-extend to a 64-bit
+ // int manually.
+ __ push(Immediate(0));
+ __ push(reg);
+ __ fild_d(Operand(esp, 0));
+ __ pop(reg);
+ __ pop(reg);
+ }
+
+ if (FLAG_inline_new) {
+ __ AllocateHeapNumber(reg, tmp, no_reg, &slow);
+ __ jmp(&done, Label::kNear);
+ }
+
+ // Slow case: Call the runtime system to do the number allocation.
+ __ bind(&slow);
+ {
+ // TODO(3095996): Put a valid pointer value in the stack slot where the
+ // result register is stored, as this register is in the pointer map, but
+ // contains an integer value.
+ __ Move(reg, Immediate(0));
+
+ // Preserve the value of all registers.
+ PushSafepointRegistersScope scope(this);
+
+ // NumberTagI and NumberTagD use the context from the frame, rather than
+ // the environment's HContext or HInlinedContext value.
+ // They only call Runtime::kHiddenAllocateHeapNumber.
+ // The corresponding HChange instructions are added in a phase that does
+ // not have easy access to the local context.
+ __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+ __ CallRuntime(Runtime::kHiddenAllocateHeapNumber);
+ RecordSafepointWithRegisters(
+ instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
+ __ StoreToSafepointRegisterSlot(reg, eax);
+ }
+
+ __ bind(&done);
+ __ fstp_d(FieldOperand(reg, HeapNumber::kValueOffset));
+}
+
+
+void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
+ class DeferredNumberTagD V8_FINAL : public LDeferredCode {
+ public:
+ DeferredNumberTagD(LCodeGen* codegen,
+ LNumberTagD* instr,
+ const X87Stack& x87_stack)
+ : LDeferredCode(codegen, x87_stack), instr_(instr) { }
+ virtual void Generate() V8_OVERRIDE {
+ codegen()->DoDeferredNumberTagD(instr_);
+ }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ private:
+ LNumberTagD* instr_;
+ };
+
+ Register reg = ToRegister(instr->result());
+
+ // Put the value to the top of stack
+ X87Register src = ToX87Register(instr->value());
+ X87LoadForUsage(src);
+
+ DeferredNumberTagD* deferred =
+ new(zone()) DeferredNumberTagD(this, instr, x87_stack_);
+ if (FLAG_inline_new) {
+ Register tmp = ToRegister(instr->temp());
+ __ AllocateHeapNumber(reg, tmp, no_reg, deferred->entry());
+ } else {
+ __ jmp(deferred->entry());
+ }
+ __ bind(deferred->exit());
+ __ fstp_d(FieldOperand(reg, HeapNumber::kValueOffset));
+}
+
+
+void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
+ // TODO(3095996): Get rid of this. For now, we need to make the
+ // result register contain a valid pointer because it is already
+ // contained in the register pointer map.
+ Register reg = ToRegister(instr->result());
+ __ Move(reg, Immediate(0));
+
+ PushSafepointRegistersScope scope(this);
+ // NumberTagI and NumberTagD use the context from the frame, rather than
+ // the environment's HContext or HInlinedContext value.
+ // They only call Runtime::kHiddenAllocateHeapNumber.
+ // The corresponding HChange instructions are added in a phase that does
+ // not have easy access to the local context.
+ __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+ __ CallRuntime(Runtime::kHiddenAllocateHeapNumber);
+ RecordSafepointWithRegisters(
+ instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
+ __ StoreToSafepointRegisterSlot(reg, eax);
+}
+
+
+void LCodeGen::DoSmiTag(LSmiTag* instr) {
+ HChange* hchange = instr->hydrogen();
+ Register input = ToRegister(instr->value());
+ if (hchange->CheckFlag(HValue::kCanOverflow) &&
+ hchange->value()->CheckFlag(HValue::kUint32)) {
+ __ test(input, Immediate(0xc0000000));
+ DeoptimizeIf(not_zero, instr->environment());
+ }
+ __ SmiTag(input);
+ if (hchange->CheckFlag(HValue::kCanOverflow) &&
+ !hchange->value()->CheckFlag(HValue::kUint32)) {
+ DeoptimizeIf(overflow, instr->environment());
+ }
+}
+
+
+void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
+ LOperand* input = instr->value();
+ Register result = ToRegister(input);
+ ASSERT(input->IsRegister() && input->Equals(instr->result()));
+ if (instr->needs_check()) {
+ __ test(result, Immediate(kSmiTagMask));
+ DeoptimizeIf(not_zero, instr->environment());
+ } else {
+ __ AssertSmi(result);
+ }
+ __ SmiUntag(result);
+}
+
+
+void LCodeGen::EmitNumberUntagDNoSSE2(Register input_reg,
+ Register temp_reg,
+ X87Register res_reg,
+ bool can_convert_undefined_to_nan,
+ bool deoptimize_on_minus_zero,
+ LEnvironment* env,
+ NumberUntagDMode mode) {
+ Label load_smi, done;
+
+ X87PrepareToWrite(res_reg);
+ if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
+ // Smi check.
+ __ JumpIfSmi(input_reg, &load_smi, Label::kNear);
+
+ // Heap number map check.
+ __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
+ factory()->heap_number_map());
+ if (!can_convert_undefined_to_nan) {
+ DeoptimizeIf(not_equal, env);
+ } else {
+ Label heap_number, convert;
+ __ j(equal, &heap_number, Label::kNear);
+
+ // Convert undefined (or hole) to NaN.
+ __ cmp(input_reg, factory()->undefined_value());
+ DeoptimizeIf(not_equal, env);
+
+ __ bind(&convert);
+ ExternalReference nan =
+ ExternalReference::address_of_canonical_non_hole_nan();
+ __ fld_d(Operand::StaticVariable(nan));
+ __ jmp(&done, Label::kNear);
+
+ __ bind(&heap_number);
+ }
+ // Heap number to x87 conversion.
+ __ fld_d(FieldOperand(input_reg, HeapNumber::kValueOffset));
+ if (deoptimize_on_minus_zero) {
+ __ fldz();
+ __ FCmp();
+ __ fld_d(FieldOperand(input_reg, HeapNumber::kValueOffset));
+ __ j(not_zero, &done, Label::kNear);
+
+ // Use general purpose registers to check if we have -0.0
+ __ mov(temp_reg, FieldOperand(input_reg, HeapNumber::kExponentOffset));
+ __ test(temp_reg, Immediate(HeapNumber::kSignMask));
+ __ j(zero, &done, Label::kNear);
+
+ // Pop FPU stack before deoptimizing.
+ __ fstp(0);
+ DeoptimizeIf(not_zero, env);
+ }
+ __ jmp(&done, Label::kNear);
+ } else {
+ ASSERT(mode == NUMBER_CANDIDATE_IS_SMI);
+ }
+
+ __ bind(&load_smi);
+ // Clobbering a temp is faster than re-tagging the
+ // input register since we avoid dependencies.
+ __ mov(temp_reg, input_reg);
+ __ SmiUntag(temp_reg); // Untag smi before converting to float.
+ __ push(temp_reg);
+ __ fild_s(Operand(esp, 0));
+ __ add(esp, Immediate(kPointerSize));
+ __ bind(&done);
+ X87CommitWrite(res_reg);
+}
+
+
+void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr, Label* done) {
+ Register input_reg = ToRegister(instr->value());
+
+ // The input was optimistically untagged; revert it.
+ STATIC_ASSERT(kSmiTagSize == 1);
+ __ lea(input_reg, Operand(input_reg, times_2, kHeapObjectTag));
+
+ if (instr->truncating()) {
+ Label no_heap_number, check_bools, check_false;
+
+ // Heap number map check.
+ __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
+ factory()->heap_number_map());
+ __ j(not_equal, &no_heap_number, Label::kNear);
+ __ TruncateHeapNumberToI(input_reg, input_reg);
+ __ jmp(done);
+
+ __ bind(&no_heap_number);
+ // Check for Oddballs. Undefined/False is converted to zero and True to one
+ // for truncating conversions.
+ __ cmp(input_reg, factory()->undefined_value());
+ __ j(not_equal, &check_bools, Label::kNear);
+ __ Move(input_reg, Immediate(0));
+ __ jmp(done);
+
+ __ bind(&check_bools);
+ __ cmp(input_reg, factory()->true_value());
+ __ j(not_equal, &check_false, Label::kNear);
+ __ Move(input_reg, Immediate(1));
+ __ jmp(done);
+
+ __ bind(&check_false);
+ __ cmp(input_reg, factory()->false_value());
+ __ RecordComment("Deferred TaggedToI: cannot truncate");
+ DeoptimizeIf(not_equal, instr->environment());
+ __ Move(input_reg, Immediate(0));
+ } else {
+ Label bailout;
+ __ TaggedToI(input_reg, input_reg,
+ instr->hydrogen()->GetMinusZeroMode(), &bailout);
+ __ jmp(done);
+ __ bind(&bailout);
+ DeoptimizeIf(no_condition, instr->environment());
+ }
+}
+
+
+void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
+ class DeferredTaggedToI V8_FINAL : public LDeferredCode {
+ public:
+ DeferredTaggedToI(LCodeGen* codegen,
+ LTaggedToI* instr,
+ const X87Stack& x87_stack)
+ : LDeferredCode(codegen, x87_stack), instr_(instr) { }
+ virtual void Generate() V8_OVERRIDE {
+ codegen()->DoDeferredTaggedToI(instr_, done());
+ }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ private:
+ LTaggedToI* instr_;
+ };
+
+ LOperand* input = instr->value();
+ ASSERT(input->IsRegister());
+ Register input_reg = ToRegister(input);
+ ASSERT(input_reg.is(ToRegister(instr->result())));
+
+ if (instr->hydrogen()->value()->representation().IsSmi()) {
+ __ SmiUntag(input_reg);
+ } else {
+ DeferredTaggedToI* deferred =
+ new(zone()) DeferredTaggedToI(this, instr, x87_stack_);
+ // Optimistically untag the input.
+ // If the input is a HeapObject, SmiUntag will set the carry flag.
+ STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
+ __ SmiUntag(input_reg);
+ // Branch to deferred code if the input was tagged.
+ // The deferred code will take care of restoring the tag.
+ __ j(carry, deferred->entry());
+ __ bind(deferred->exit());
+ }
+}
+
+
+void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
+ LOperand* input = instr->value();
+ ASSERT(input->IsRegister());
+ LOperand* temp = instr->temp();
+ ASSERT(temp->IsRegister());
+ LOperand* result = instr->result();
+ ASSERT(result->IsDoubleRegister());
+
+ Register input_reg = ToRegister(input);
+ bool deoptimize_on_minus_zero =
+ instr->hydrogen()->deoptimize_on_minus_zero();
+ Register temp_reg = ToRegister(temp);
+
+ HValue* value = instr->hydrogen()->value();
+ NumberUntagDMode mode = value->representation().IsSmi()
+ ? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED;
+
+ EmitNumberUntagDNoSSE2(input_reg,
+ temp_reg,
+ ToX87Register(result),
+ instr->hydrogen()->can_convert_undefined_to_nan(),
+ deoptimize_on_minus_zero,
+ instr->environment(),
+ mode);
+}
+
+
+void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
+ LOperand* input = instr->value();
+ ASSERT(input->IsDoubleRegister());
+ LOperand* result = instr->result();
+ ASSERT(result->IsRegister());
+ Register result_reg = ToRegister(result);
+
+ if (instr->truncating()) {
+ X87Register input_reg = ToX87Register(input);
+ X87Fxch(input_reg);
+ __ TruncateX87TOSToI(result_reg);
+ } else {
+ Label bailout, done;
+ X87Register input_reg = ToX87Register(input);
+ X87Fxch(input_reg);
+ __ X87TOSToI(result_reg, instr->hydrogen()->GetMinusZeroMode(),
+ &bailout, Label::kNear);
+ __ jmp(&done, Label::kNear);
+ __ bind(&bailout);
+ DeoptimizeIf(no_condition, instr->environment());
+ __ bind(&done);
+ }
+}
+
+
+void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
+ LOperand* input = instr->value();
+ ASSERT(input->IsDoubleRegister());
+ LOperand* result = instr->result();
+ ASSERT(result->IsRegister());
+ Register result_reg = ToRegister(result);
+
+ Label bailout, done;
+ X87Register input_reg = ToX87Register(input);
+ X87Fxch(input_reg);
+ __ X87TOSToI(result_reg, instr->hydrogen()->GetMinusZeroMode(),
+ &bailout, Label::kNear);
+ __ jmp(&done, Label::kNear);
+ __ bind(&bailout);
+ DeoptimizeIf(no_condition, instr->environment());
+ __ bind(&done);
+
+ __ SmiTag(result_reg);
+ DeoptimizeIf(overflow, instr->environment());
+}
+
+
+void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
+ LOperand* input = instr->value();
+ __ test(ToOperand(input), Immediate(kSmiTagMask));
+ DeoptimizeIf(not_zero, instr->environment());
+}
+
+
+void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
+ if (!instr->hydrogen()->value()->IsHeapObject()) {
+ LOperand* input = instr->value();
+ __ test(ToOperand(input), Immediate(kSmiTagMask));
+ DeoptimizeIf(zero, instr->environment());
+ }
+}
+
+
+void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
+ Register input = ToRegister(instr->value());
+ Register temp = ToRegister(instr->temp());
+
+ __ mov(temp, FieldOperand(input, HeapObject::kMapOffset));
+
+ if (instr->hydrogen()->is_interval_check()) {
+ InstanceType first;
+ InstanceType last;
+ instr->hydrogen()->GetCheckInterval(&first, &last);
+
+ __ cmpb(FieldOperand(temp, Map::kInstanceTypeOffset),
+ static_cast<int8_t>(first));
+
+ // If there is only one type in the interval check for equality.
+ if (first == last) {
+ DeoptimizeIf(not_equal, instr->environment());
+ } else {
+ DeoptimizeIf(below, instr->environment());
+ // Omit check for the last type.
+ if (last != LAST_TYPE) {
+ __ cmpb(FieldOperand(temp, Map::kInstanceTypeOffset),
+ static_cast<int8_t>(last));
+ DeoptimizeIf(above, instr->environment());
+ }
+ }
+ } else {
+ uint8_t mask;
+ uint8_t tag;
+ instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
+
+ if (IsPowerOf2(mask)) {
+ ASSERT(tag == 0 || IsPowerOf2(tag));
+ __ test_b(FieldOperand(temp, Map::kInstanceTypeOffset), mask);
+ DeoptimizeIf(tag == 0 ? not_zero : zero, instr->environment());
+ } else {
+ __ movzx_b(temp, FieldOperand(temp, Map::kInstanceTypeOffset));
+ __ and_(temp, mask);
+ __ cmp(temp, tag);
+ DeoptimizeIf(not_equal, instr->environment());
+ }
+ }
+}
+
+
+void LCodeGen::DoCheckValue(LCheckValue* instr) {
+ Handle<HeapObject> object = instr->hydrogen()->object().handle();
+ if (instr->hydrogen()->object_in_new_space()) {
+ Register reg = ToRegister(instr->value());
+ Handle<Cell> cell = isolate()->factory()->NewCell(object);
+ __ cmp(reg, Operand::ForCell(cell));
+ } else {
+ Operand operand = ToOperand(instr->value());
+ __ cmp(operand, object);
+ }
+ DeoptimizeIf(not_equal, instr->environment());
+}
+
+
+void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
+ {
+ PushSafepointRegistersScope scope(this);
+ __ push(object);
+ __ xor_(esi, esi);
+ __ CallRuntime(Runtime::kTryMigrateInstance);
+ RecordSafepointWithRegisters(
+ instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
+
+ __ test(eax, Immediate(kSmiTagMask));
+ }
+ DeoptimizeIf(zero, instr->environment());
+}
+
+
+void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
+ class DeferredCheckMaps V8_FINAL : public LDeferredCode {
+ public:
+ DeferredCheckMaps(LCodeGen* codegen,
+ LCheckMaps* instr,
+ Register object,
+ const X87Stack& x87_stack)
+ : LDeferredCode(codegen, x87_stack), instr_(instr), object_(object) {
+ SetExit(check_maps());
+ }
+ virtual void Generate() V8_OVERRIDE {
+ codegen()->DoDeferredInstanceMigration(instr_, object_);
+ }
+ Label* check_maps() { return &check_maps_; }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ private:
+ LCheckMaps* instr_;
+ Label check_maps_;
+ Register object_;
+ };
+
+ if (instr->hydrogen()->IsStabilityCheck()) {
+ const UniqueSet<Map>* maps = instr->hydrogen()->maps();
+ for (int i = 0; i < maps->size(); ++i) {
+ AddStabilityDependency(maps->at(i).handle());
+ }
+ return;
+ }
+
+ LOperand* input = instr->value();
+ ASSERT(input->IsRegister());
+ Register reg = ToRegister(input);
+
+ DeferredCheckMaps* deferred = NULL;
+ if (instr->hydrogen()->HasMigrationTarget()) {
+ deferred = new(zone()) DeferredCheckMaps(this, instr, reg, x87_stack_);
+ __ bind(deferred->check_maps());
+ }
+
+ const UniqueSet<Map>* maps = instr->hydrogen()->maps();
+ Label success;
+ for (int i = 0; i < maps->size() - 1; i++) {
+ Handle<Map> map = maps->at(i).handle();
+ __ CompareMap(reg, map);
+ __ j(equal, &success, Label::kNear);
+ }
+
+ Handle<Map> map = maps->at(maps->size() - 1).handle();
+ __ CompareMap(reg, map);
+ if (instr->hydrogen()->HasMigrationTarget()) {
+ __ j(not_equal, deferred->entry());
+ } else {
+ DeoptimizeIf(not_equal, instr->environment());
+ }
+
+ __ bind(&success);
+}
+
+
+void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
+ UNREACHABLE();
+}
+
+
+void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
+ ASSERT(instr->unclamped()->Equals(instr->result()));
+ Register value_reg = ToRegister(instr->result());
+ __ ClampUint8(value_reg);
+}
+
+
+void LCodeGen::DoClampTToUint8NoSSE2(LClampTToUint8NoSSE2* instr) {
+ Register input_reg = ToRegister(instr->unclamped());
+ Register result_reg = ToRegister(instr->result());
+ Register scratch = ToRegister(instr->scratch());
+ Register scratch2 = ToRegister(instr->scratch2());
+ Register scratch3 = ToRegister(instr->scratch3());
+ Label is_smi, done, heap_number, valid_exponent,
+ largest_value, zero_result, maybe_nan_or_infinity;
+
+ __ JumpIfSmi(input_reg, &is_smi);
+
+ // Check for heap number
+ __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
+ factory()->heap_number_map());
+ __ j(equal, &heap_number, Label::kNear);
+
+ // Check for undefined. Undefined is converted to zero for clamping
+ // conversions.
+ __ cmp(input_reg, factory()->undefined_value());
+ DeoptimizeIf(not_equal, instr->environment());
+ __ jmp(&zero_result, Label::kNear);
+
+ // Heap number
+ __ bind(&heap_number);
+
+ // Surprisingly, all of the hand-crafted bit-manipulations below are much
+ // faster than the x86 FPU built-in instruction, especially since "banker's
+ // rounding" would be additionally very expensive
+
+ // Get exponent word.
+ __ mov(scratch, FieldOperand(input_reg, HeapNumber::kExponentOffset));
+ __ mov(scratch3, FieldOperand(input_reg, HeapNumber::kMantissaOffset));
+
+ // Test for negative values --> clamp to zero
+ __ test(scratch, scratch);
+ __ j(negative, &zero_result, Label::kNear);
+
+ // Get exponent alone in scratch2.
+ __ mov(scratch2, scratch);
+ __ and_(scratch2, HeapNumber::kExponentMask);
+ __ shr(scratch2, HeapNumber::kExponentShift);
+ __ j(zero, &zero_result, Label::kNear);
+ __ sub(scratch2, Immediate(HeapNumber::kExponentBias - 1));
+ __ j(negative, &zero_result, Label::kNear);
+
+ const uint32_t non_int8_exponent = 7;
+ __ cmp(scratch2, Immediate(non_int8_exponent + 1));
+ // If the exponent is too big, check for special values.
+ __ j(greater, &maybe_nan_or_infinity, Label::kNear);
+
+ __ bind(&valid_exponent);
+ // Exponent word in scratch, exponent in scratch2. We know that 0 <= exponent
+ // < 7. The shift bias is the number of bits to shift the mantissa such that
+ // with an exponent of 7 such the that top-most one is in bit 30, allowing
+ // detection the rounding overflow of a 255.5 to 256 (bit 31 goes from 0 to
+ // 1).
+ int shift_bias = (30 - HeapNumber::kExponentShift) - 7 - 1;
+ __ lea(result_reg, MemOperand(scratch2, shift_bias));
+ // Here result_reg (ecx) is the shift, scratch is the exponent word. Get the
+ // top bits of the mantissa.
+ __ and_(scratch, HeapNumber::kMantissaMask);
+ // Put back the implicit 1 of the mantissa
+ __ or_(scratch, 1 << HeapNumber::kExponentShift);
+ // Shift up to round
+ __ shl_cl(scratch);
+ // Use "banker's rounding" to spec: If fractional part of number is 0.5, then
+ // use the bit in the "ones" place and add it to the "halves" place, which has
+ // the effect of rounding to even.
+ __ mov(scratch2, scratch);
+ const uint32_t one_half_bit_shift = 30 - sizeof(uint8_t) * 8;
+ const uint32_t one_bit_shift = one_half_bit_shift + 1;
+ __ and_(scratch2, Immediate((1 << one_bit_shift) - 1));
+ __ cmp(scratch2, Immediate(1 << one_half_bit_shift));
+ Label no_round;
+ __ j(less, &no_round, Label::kNear);
+ Label round_up;
+ __ mov(scratch2, Immediate(1 << one_half_bit_shift));
+ __ j(greater, &round_up, Label::kNear);
+ __ test(scratch3, scratch3);
+ __ j(not_zero, &round_up, Label::kNear);
+ __ mov(scratch2, scratch);
+ __ and_(scratch2, Immediate(1 << one_bit_shift));
+ __ shr(scratch2, 1);
+ __ bind(&round_up);
+ __ add(scratch, scratch2);
+ __ j(overflow, &largest_value, Label::kNear);
+ __ bind(&no_round);
+ __ shr(scratch, 23);
+ __ mov(result_reg, scratch);
+ __ jmp(&done, Label::kNear);
+
+ __ bind(&maybe_nan_or_infinity);
+ // Check for NaN/Infinity, all other values map to 255
+ __ cmp(scratch2, Immediate(HeapNumber::kInfinityOrNanExponent + 1));
+ __ j(not_equal, &largest_value, Label::kNear);
+
+ // Check for NaN, which differs from Infinity in that at least one mantissa
+ // bit is set.
+ __ and_(scratch, HeapNumber::kMantissaMask);
+ __ or_(scratch, FieldOperand(input_reg, HeapNumber::kMantissaOffset));
+ __ j(not_zero, &zero_result, Label::kNear); // M!=0 --> NaN
+ // Infinity -> Fall through to map to 255.
+
+ __ bind(&largest_value);
+ __ mov(result_reg, Immediate(255));
+ __ jmp(&done, Label::kNear);
+
+ __ bind(&zero_result);
+ __ xor_(result_reg, result_reg);
+ __ jmp(&done, Label::kNear);
+
+ // smi
+ __ bind(&is_smi);
+ if (!input_reg.is(result_reg)) {
+ __ mov(result_reg, input_reg);
+ }
+ __ SmiUntag(result_reg);
+ __ ClampUint8(result_reg);
+ __ bind(&done);
+}
+
+
+void LCodeGen::DoDoubleBits(LDoubleBits* instr) {
+ UNREACHABLE();
+}
+
+
+void LCodeGen::DoConstructDouble(LConstructDouble* instr) {
+ UNREACHABLE();
+}
+
+
+void LCodeGen::DoAllocate(LAllocate* instr) {
+ class DeferredAllocate V8_FINAL : public LDeferredCode {
+ public:
+ DeferredAllocate(LCodeGen* codegen,
+ LAllocate* instr,
+ const X87Stack& x87_stack)
+ : LDeferredCode(codegen, x87_stack), instr_(instr) { }
+ virtual void Generate() V8_OVERRIDE {
+ codegen()->DoDeferredAllocate(instr_);
+ }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ private:
+ LAllocate* instr_;
+ };
+
+ DeferredAllocate* deferred =
+ new(zone()) DeferredAllocate(this, instr, x87_stack_);
+
+ Register result = ToRegister(instr->result());
+ Register temp = ToRegister(instr->temp());
+
+ // Allocate memory for the object.
+ AllocationFlags flags = TAG_OBJECT;
+ if (instr->hydrogen()->MustAllocateDoubleAligned()) {
+ flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
+ }
+ if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
+ ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation());
+ ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
+ flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE);
+ } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
+ ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
+ flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_DATA_SPACE);
+ }
+
+ if (instr->size()->IsConstantOperand()) {
+ int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
+ if (size <= Page::kMaxRegularHeapObjectSize) {
+ __ Allocate(size, result, temp, no_reg, deferred->entry(), flags);
+ } else {
+ __ jmp(deferred->entry());
+ }
+ } else {
+ Register size = ToRegister(instr->size());
+ __ Allocate(size, result, temp, no_reg, deferred->entry(), flags);
+ }
+
+ __ bind(deferred->exit());
+
+ if (instr->hydrogen()->MustPrefillWithFiller()) {
+ if (instr->size()->IsConstantOperand()) {
+ int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
+ __ mov(temp, (size / kPointerSize) - 1);
+ } else {
+ temp = ToRegister(instr->size());
+ __ shr(temp, kPointerSizeLog2);
+ __ dec(temp);
+ }
+ Label loop;
+ __ bind(&loop);
+ __ mov(FieldOperand(result, temp, times_pointer_size, 0),
+ isolate()->factory()->one_pointer_filler_map());
+ __ dec(temp);
+ __ j(not_zero, &loop);
+ }
+}
+
+
+void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
+ Register result = ToRegister(instr->result());
+
+ // TODO(3095996): Get rid of this. For now, we need to make the
+ // result register contain a valid pointer because it is already
+ // contained in the register pointer map.
+ __ Move(result, Immediate(Smi::FromInt(0)));
+
+ PushSafepointRegistersScope scope(this);
+ if (instr->size()->IsRegister()) {
+ Register size = ToRegister(instr->size());
+ ASSERT(!size.is(result));
+ __ SmiTag(ToRegister(instr->size()));
+ __ push(size);
+ } else {
+ int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
+ if (size >= 0 && size <= Smi::kMaxValue) {
+ __ push(Immediate(Smi::FromInt(size)));
+ } else {
+ // We should never get here at runtime => abort
+ __ int3();
+ return;
+ }
+ }
+
+ int flags = AllocateDoubleAlignFlag::encode(
+ instr->hydrogen()->MustAllocateDoubleAligned());
+ if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
+ ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation());
+ ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
+ flags = AllocateTargetSpace::update(flags, OLD_POINTER_SPACE);
+ } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
+ ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
+ flags = AllocateTargetSpace::update(flags, OLD_DATA_SPACE);
+ } else {
+ flags = AllocateTargetSpace::update(flags, NEW_SPACE);
+ }
+ __ push(Immediate(Smi::FromInt(flags)));
+
+ CallRuntimeFromDeferred(
+ Runtime::kHiddenAllocateInTargetSpace, 2, instr, instr->context());
+ __ StoreToSafepointRegisterSlot(result, eax);
+}
+
+
+void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
+ ASSERT(ToRegister(instr->value()).is(eax));
+ __ push(eax);
+ CallRuntime(Runtime::kToFastProperties, 1, instr);
+}
+
+
+void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
+ ASSERT(ToRegister(instr->context()).is(esi));
+ Label materialized;
+ // Registers will be used as follows:
+ // ecx = literals array.
+ // ebx = regexp literal.
+ // eax = regexp literal clone.
+ // esi = context.
+ int literal_offset =
+ FixedArray::OffsetOfElementAt(instr->hydrogen()->literal_index());
+ __ LoadHeapObject(ecx, instr->hydrogen()->literals());
+ __ mov(ebx, FieldOperand(ecx, literal_offset));
+ __ cmp(ebx, factory()->undefined_value());
+ __ j(not_equal, &materialized, Label::kNear);
+
+ // Create regexp literal using runtime function
+ // Result will be in eax.
+ __ push(ecx);
+ __ push(Immediate(Smi::FromInt(instr->hydrogen()->literal_index())));
+ __ push(Immediate(instr->hydrogen()->pattern()));
+ __ push(Immediate(instr->hydrogen()->flags()));
+ CallRuntime(Runtime::kHiddenMaterializeRegExpLiteral, 4, instr);
+ __ mov(ebx, eax);
+
+ __ bind(&materialized);
+ int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
+ Label allocated, runtime_allocate;
+ __ Allocate(size, eax, ecx, edx, &runtime_allocate, TAG_OBJECT);
+ __ jmp(&allocated, Label::kNear);
+
+ __ bind(&runtime_allocate);
+ __ push(ebx);
+ __ push(Immediate(Smi::FromInt(size)));
+ CallRuntime(Runtime::kHiddenAllocateInNewSpace, 1, instr);
+ __ pop(ebx);
+
+ __ bind(&allocated);
+ // Copy the content into the newly allocated memory.
+ // (Unroll copy loop once for better throughput).
+ for (int i = 0; i < size - kPointerSize; i += 2 * kPointerSize) {
+ __ mov(edx, FieldOperand(ebx, i));
+ __ mov(ecx, FieldOperand(ebx, i + kPointerSize));
+ __ mov(FieldOperand(eax, i), edx);
+ __ mov(FieldOperand(eax, i + kPointerSize), ecx);
+ }
+ if ((size % (2 * kPointerSize)) != 0) {
+ __ mov(edx, FieldOperand(ebx, size - kPointerSize));
+ __ mov(FieldOperand(eax, size - kPointerSize), edx);
+ }
+}
+
+
+void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
+ ASSERT(ToRegister(instr->context()).is(esi));
+ // Use the fast case closure allocation code that allocates in new
+ // space for nested functions that don't need literals cloning.
+ bool pretenure = instr->hydrogen()->pretenure();
+ if (!pretenure && instr->hydrogen()->has_no_literals()) {
+ FastNewClosureStub stub(isolate(),
+ instr->hydrogen()->strict_mode(),
+ instr->hydrogen()->is_generator());
+ __ mov(ebx, Immediate(instr->hydrogen()->shared_info()));
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ } else {
+ __ push(esi);
+ __ push(Immediate(instr->hydrogen()->shared_info()));
+ __ push(Immediate(pretenure ? factory()->true_value()
+ : factory()->false_value()));
+ CallRuntime(Runtime::kHiddenNewClosure, 3, instr);
+ }
+}
+
+
+void LCodeGen::DoTypeof(LTypeof* instr) {
+ ASSERT(ToRegister(instr->context()).is(esi));
+ LOperand* input = instr->value();
+ EmitPushTaggedOperand(input);
+ CallRuntime(Runtime::kTypeof, 1, instr);
+}
+
+
+void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
+ Register input = ToRegister(instr->value());
+ Condition final_branch_condition = EmitTypeofIs(instr, input);
+ if (final_branch_condition != no_condition) {
+ EmitBranch(instr, final_branch_condition);
+ }
+}
+
+
+Condition LCodeGen::EmitTypeofIs(LTypeofIsAndBranch* instr, Register input) {
+ Label* true_label = instr->TrueLabel(chunk_);
+ Label* false_label = instr->FalseLabel(chunk_);
+ Handle<String> type_name = instr->type_literal();
+ int left_block = instr->TrueDestination(chunk_);
+ int right_block = instr->FalseDestination(chunk_);
+ int next_block = GetNextEmittedBlock();
+
+ Label::Distance true_distance = left_block == next_block ? Label::kNear
+ : Label::kFar;
+ Label::Distance false_distance = right_block == next_block ? Label::kNear
+ : Label::kFar;
+ Condition final_branch_condition = no_condition;
+ if (String::Equals(type_name, factory()->number_string())) {
+ __ JumpIfSmi(input, true_label, true_distance);
+ __ cmp(FieldOperand(input, HeapObject::kMapOffset),
+ factory()->heap_number_map());
+ final_branch_condition = equal;
+
+ } else if (String::Equals(type_name, factory()->string_string())) {
+ __ JumpIfSmi(input, false_label, false_distance);
+ __ CmpObjectType(input, FIRST_NONSTRING_TYPE, input);
+ __ j(above_equal, false_label, false_distance);
+ __ test_b(FieldOperand(input, Map::kBitFieldOffset),
+ 1 << Map::kIsUndetectable);
+ final_branch_condition = zero;
+
+ } else if (String::Equals(type_name, factory()->symbol_string())) {
+ __ JumpIfSmi(input, false_label, false_distance);
+ __ CmpObjectType(input, SYMBOL_TYPE, input);
+ final_branch_condition = equal;
+
+ } else if (String::Equals(type_name, factory()->boolean_string())) {
+ __ cmp(input, factory()->true_value());
+ __ j(equal, true_label, true_distance);
+ __ cmp(input, factory()->false_value());
+ final_branch_condition = equal;
+
+ } else if (FLAG_harmony_typeof &&
+ String::Equals(type_name, factory()->null_string())) {
+ __ cmp(input, factory()->null_value());
+ final_branch_condition = equal;
+
+ } else if (String::Equals(type_name, factory()->undefined_string())) {
+ __ cmp(input, factory()->undefined_value());
+ __ j(equal, true_label, true_distance);
+ __ JumpIfSmi(input, false_label, false_distance);
+ // Check for undetectable objects => true.
+ __ mov(input, FieldOperand(input, HeapObject::kMapOffset));
+ __ test_b(FieldOperand(input, Map::kBitFieldOffset),
+ 1 << Map::kIsUndetectable);
+ final_branch_condition = not_zero;
+
+ } else if (String::Equals(type_name, factory()->function_string())) {
+ STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
+ __ JumpIfSmi(input, false_label, false_distance);
+ __ CmpObjectType(input, JS_FUNCTION_TYPE, input);
+ __ j(equal, true_label, true_distance);
+ __ CmpInstanceType(input, JS_FUNCTION_PROXY_TYPE);
+ final_branch_condition = equal;
+
+ } else if (String::Equals(type_name, factory()->object_string())) {
+ __ JumpIfSmi(input, false_label, false_distance);
+ if (!FLAG_harmony_typeof) {
+ __ cmp(input, factory()->null_value());
+ __ j(equal, true_label, true_distance);
+ }
+ __ CmpObjectType(input, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE, input);
+ __ j(below, false_label, false_distance);
+ __ CmpInstanceType(input, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
+ __ j(above, false_label, false_distance);
+ // Check for undetectable objects => false.
+ __ test_b(FieldOperand(input, Map::kBitFieldOffset),
+ 1 << Map::kIsUndetectable);
+ final_branch_condition = zero;
+
+ } else {
+ __ jmp(false_label, false_distance);
+ }
+ return final_branch_condition;
+}
+
+
+void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
+ Register temp = ToRegister(instr->temp());
+
+ EmitIsConstructCall(temp);
+ EmitBranch(instr, equal);
+}
+
+
+void LCodeGen::EmitIsConstructCall(Register temp) {
+ // Get the frame pointer for the calling frame.
+ __ mov(temp, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
+
+ // Skip the arguments adaptor frame if it exists.
+ Label check_frame_marker;
+ __ cmp(Operand(temp, StandardFrameConstants::kContextOffset),
+ Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ j(not_equal, &check_frame_marker, Label::kNear);
+ __ mov(temp, Operand(temp, StandardFrameConstants::kCallerFPOffset));
+
+ // Check the marker in the calling frame.
+ __ bind(&check_frame_marker);
+ __ cmp(Operand(temp, StandardFrameConstants::kMarkerOffset),
+ Immediate(Smi::FromInt(StackFrame::CONSTRUCT)));
+}
+
+
+void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
+ if (!info()->IsStub()) {
+ // Ensure that we have enough space after the previous lazy-bailout
+ // instruction for patching the code here.
+ int current_pc = masm()->pc_offset();
+ if (current_pc < last_lazy_deopt_pc_ + space_needed) {
+ int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
+ __ Nop(padding_size);
+ }
+ }
+ last_lazy_deopt_pc_ = masm()->pc_offset();
+}
+
+
+void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
+ last_lazy_deopt_pc_ = masm()->pc_offset();
+ ASSERT(instr->HasEnvironment());
+ LEnvironment* env = instr->environment();
+ RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
+ safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
+}
+
+
+void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
+ Deoptimizer::BailoutType type = instr->hydrogen()->type();
+ // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the
+ // needed return address), even though the implementation of LAZY and EAGER is
+ // now identical. When LAZY is eventually completely folded into EAGER, remove
+ // the special case below.
+ if (info()->IsStub() && type == Deoptimizer::EAGER) {
+ type = Deoptimizer::LAZY;
+ }
+ Comment(";;; deoptimize: %s", instr->hydrogen()->reason());
+ DeoptimizeIf(no_condition, instr->environment(), type);
+}
+
+
+void LCodeGen::DoDummy(LDummy* instr) {
+ // Nothing to see here, move on!
+}
+
+
+void LCodeGen::DoDummyUse(LDummyUse* instr) {
+ // Nothing to see here, move on!
+}
+
+
+void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
+ PushSafepointRegistersScope scope(this);
+ __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+ __ CallRuntime(Runtime::kHiddenStackGuard);
+ RecordSafepointWithLazyDeopt(
+ instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
+ ASSERT(instr->HasEnvironment());
+ LEnvironment* env = instr->environment();
+ safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
+}
+
+
+void LCodeGen::DoStackCheck(LStackCheck* instr) {
+ class DeferredStackCheck V8_FINAL : public LDeferredCode {
+ public:
+ DeferredStackCheck(LCodeGen* codegen,
+ LStackCheck* instr,
+ const X87Stack& x87_stack)
+ : LDeferredCode(codegen, x87_stack), instr_(instr) { }
+ virtual void Generate() V8_OVERRIDE {
+ codegen()->DoDeferredStackCheck(instr_);
+ }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ private:
+ LStackCheck* instr_;
+ };
+
+ ASSERT(instr->HasEnvironment());
+ LEnvironment* env = instr->environment();
+ // There is no LLazyBailout instruction for stack-checks. We have to
+ // prepare for lazy deoptimization explicitly here.
+ if (instr->hydrogen()->is_function_entry()) {
+ // Perform stack overflow check.
+ Label done;
+ ExternalReference stack_limit =
+ ExternalReference::address_of_stack_limit(isolate());
+ __ cmp(esp, Operand::StaticVariable(stack_limit));
+ __ j(above_equal, &done, Label::kNear);
+
+ ASSERT(instr->context()->IsRegister());
+ ASSERT(ToRegister(instr->context()).is(esi));
+ CallCode(isolate()->builtins()->StackCheck(),
+ RelocInfo::CODE_TARGET,
+ instr);
+ __ bind(&done);
+ } else {
+ ASSERT(instr->hydrogen()->is_backwards_branch());
+ // Perform stack overflow check if this goto needs it before jumping.
+ DeferredStackCheck* deferred_stack_check =
+ new(zone()) DeferredStackCheck(this, instr, x87_stack_);
+ ExternalReference stack_limit =
+ ExternalReference::address_of_stack_limit(isolate());
+ __ cmp(esp, Operand::StaticVariable(stack_limit));
+ __ j(below, deferred_stack_check->entry());
+ EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
+ __ bind(instr->done_label());
+ deferred_stack_check->SetExit(instr->done_label());
+ RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
+ // Don't record a deoptimization index for the safepoint here.
+ // This will be done explicitly when emitting call and the safepoint in
+ // the deferred code.
+ }
+}
+
+
+void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
+ // This is a pseudo-instruction that ensures that the environment here is
+ // properly registered for deoptimization and records the assembler's PC
+ // offset.
+ LEnvironment* environment = instr->environment();
+
+ // If the environment were already registered, we would have no way of
+ // backpatching it with the spill slot operands.
+ ASSERT(!environment->HasBeenRegistered());
+ RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
+
+ GenerateOsrPrologue();
+}
+
+
+void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
+ ASSERT(ToRegister(instr->context()).is(esi));
+ __ cmp(eax, isolate()->factory()->undefined_value());
+ DeoptimizeIf(equal, instr->environment());
+
+ __ cmp(eax, isolate()->factory()->null_value());
+ DeoptimizeIf(equal, instr->environment());
+
+ __ test(eax, Immediate(kSmiTagMask));
+ DeoptimizeIf(zero, instr->environment());
+
+ STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
+ __ CmpObjectType(eax, LAST_JS_PROXY_TYPE, ecx);
+ DeoptimizeIf(below_equal, instr->environment());
+
+ Label use_cache, call_runtime;
+ __ CheckEnumCache(&call_runtime);
+
+ __ mov(eax, FieldOperand(eax, HeapObject::kMapOffset));
+ __ jmp(&use_cache, Label::kNear);
+
+ // Get the set of properties to enumerate.
+ __ bind(&call_runtime);
+ __ push(eax);
+ CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);
+
+ __ cmp(FieldOperand(eax, HeapObject::kMapOffset),
+ isolate()->factory()->meta_map());
+ DeoptimizeIf(not_equal, instr->environment());
+ __ bind(&use_cache);
+}
+
+
+void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
+ Register map = ToRegister(instr->map());
+ Register result = ToRegister(instr->result());
+ Label load_cache, done;
+ __ EnumLength(result, map);
+ __ cmp(result, Immediate(Smi::FromInt(0)));
+ __ j(not_equal, &load_cache, Label::kNear);
+ __ mov(result, isolate()->factory()->empty_fixed_array());
+ __ jmp(&done, Label::kNear);
+
+ __ bind(&load_cache);
+ __ LoadInstanceDescriptors(map, result);
+ __ mov(result,
+ FieldOperand(result, DescriptorArray::kEnumCacheOffset));
+ __ mov(result,
+ FieldOperand(result, FixedArray::SizeFor(instr->idx())));
+ __ bind(&done);
+ __ test(result, result);
+ DeoptimizeIf(equal, instr->environment());
+}
+
+
+void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
+ Register object = ToRegister(instr->value());
+ __ cmp(ToRegister(instr->map()),
+ FieldOperand(object, HeapObject::kMapOffset));
+ DeoptimizeIf(not_equal, instr->environment());
+}
+
+
+void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
+ Register object,
+ Register index) {
+ PushSafepointRegistersScope scope(this);
+ __ push(object);
+ __ push(index);
+ __ xor_(esi, esi);
+ __ CallRuntime(Runtime::kLoadMutableDouble);
+ RecordSafepointWithRegisters(
+ instr->pointer_map(), 2, Safepoint::kNoLazyDeopt);
+ __ StoreToSafepointRegisterSlot(object, eax);
+}
+
+
+void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
+ class DeferredLoadMutableDouble V8_FINAL : public LDeferredCode {
+ public:
+ DeferredLoadMutableDouble(LCodeGen* codegen,
+ LLoadFieldByIndex* instr,
+ Register object,
+ Register index,
+ const X87Stack& x87_stack)
+ : LDeferredCode(codegen, x87_stack),
+ instr_(instr),
+ object_(object),
+ index_(index) {
+ }
+ virtual void Generate() V8_OVERRIDE {
+ codegen()->DoDeferredLoadMutableDouble(instr_, object_, index_);
+ }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ private:
+ LLoadFieldByIndex* instr_;
+ Register object_;
+ Register index_;
+ };
+
+ Register object = ToRegister(instr->object());
+ Register index = ToRegister(instr->index());
+
+ DeferredLoadMutableDouble* deferred;
+ deferred = new(zone()) DeferredLoadMutableDouble(
+ this, instr, object, index, x87_stack_);
+
+ Label out_of_object, done;
+ __ test(index, Immediate(Smi::FromInt(1)));
+ __ j(not_zero, deferred->entry());
+
+ __ sar(index, 1);
+
+ __ cmp(index, Immediate(0));
+ __ j(less, &out_of_object, Label::kNear);
+ __ mov(object, FieldOperand(object,
+ index,
+ times_half_pointer_size,
+ JSObject::kHeaderSize));
+ __ jmp(&done, Label::kNear);
+
+ __ bind(&out_of_object);
+ __ mov(object, FieldOperand(object, JSObject::kPropertiesOffset));
+ __ neg(index);
+ // Index is now equal to out of object property index plus 1.
+ __ mov(object, FieldOperand(object,
+ index,
+ times_half_pointer_size,
+ FixedArray::kHeaderSize - kPointerSize));
+ __ bind(deferred->exit());
+ __ bind(&done);
+}
+
+
+#undef __
+
+} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_X87
--- /dev/null
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_X87_LITHIUM_CODEGEN_X87_H_
+#define V8_X87_LITHIUM_CODEGEN_X87_H_
+
+#include "x87/lithium-x87.h"
+
+#include "checks.h"
+#include "deoptimizer.h"
+#include "x87/lithium-gap-resolver-x87.h"
+#include "lithium-codegen.h"
+#include "safepoint-table.h"
+#include "scopes.h"
+#include "utils.h"
+
+namespace v8 {
+namespace internal {
+
+// Forward declarations.
+class LDeferredCode;
+class LGapNode;
+class SafepointGenerator;
+
+class LCodeGen: public LCodeGenBase {
+ public:
+ LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
+ : LCodeGenBase(chunk, assembler, info),
+ deoptimizations_(4, info->zone()),
+ jump_table_(4, info->zone()),
+ deoptimization_literals_(8, info->zone()),
+ inlined_function_count_(0),
+ scope_(info->scope()),
+ translations_(info->zone()),
+ deferred_(8, info->zone()),
+ dynamic_frame_alignment_(false),
+ support_aligned_spilled_doubles_(false),
+ osr_pc_offset_(-1),
+ frame_is_built_(false),
+ x87_stack_(assembler),
+ safepoints_(info->zone()),
+ resolver_(this),
+ expected_safepoint_kind_(Safepoint::kSimple) {
+ PopulateDeoptimizationLiteralsWithInlinedFunctions();
+ }
+
+ int LookupDestination(int block_id) const {
+ return chunk()->LookupDestination(block_id);
+ }
+
+ bool IsNextEmittedBlock(int block_id) const {
+ return LookupDestination(block_id) == GetNextEmittedBlock();
+ }
+
+ bool NeedsEagerFrame() const {
+ return GetStackSlotCount() > 0 ||
+ info()->is_non_deferred_calling() ||
+ !info()->IsStub() ||
+ info()->requires_frame();
+ }
+ bool NeedsDeferredFrame() const {
+ return !NeedsEagerFrame() && info()->is_deferred_calling();
+ }
+
+ // Support for converting LOperands to assembler types.
+ Operand ToOperand(LOperand* op) const;
+ Register ToRegister(LOperand* op) const;
+ X87Register ToX87Register(LOperand* op) const;
+
+ bool IsInteger32(LConstantOperand* op) const;
+ bool IsSmi(LConstantOperand* op) const;
+ Immediate ToImmediate(LOperand* op, const Representation& r) const {
+ return Immediate(ToRepresentation(LConstantOperand::cast(op), r));
+ }
+ double ToDouble(LConstantOperand* op) const;
+
+ // Support for non-sse2 (x87) floating point stack handling.
+ // These functions maintain the mapping of physical stack registers to our
+ // virtual registers between instructions.
+ enum X87OperandType { kX87DoubleOperand, kX87FloatOperand, kX87IntOperand };
+
+ void X87Mov(X87Register reg, Operand src,
+ X87OperandType operand = kX87DoubleOperand);
+ void X87Mov(Operand src, X87Register reg,
+ X87OperandType operand = kX87DoubleOperand);
+
+ void X87PrepareBinaryOp(
+ X87Register left, X87Register right, X87Register result);
+
+ void X87LoadForUsage(X87Register reg);
+ void X87LoadForUsage(X87Register reg1, X87Register reg2);
+ void X87PrepareToWrite(X87Register reg) { x87_stack_.PrepareToWrite(reg); }
+ void X87CommitWrite(X87Register reg) { x87_stack_.CommitWrite(reg); }
+
+ void X87Fxch(X87Register reg, int other_slot = 0) {
+ x87_stack_.Fxch(reg, other_slot);
+ }
+ void X87Free(X87Register reg) {
+ x87_stack_.Free(reg);
+ }
+
+
+ bool X87StackEmpty() {
+ return x87_stack_.depth() == 0;
+ }
+
+ Handle<Object> ToHandle(LConstantOperand* op) const;
+
+ // The operand denoting the second word (the one with a higher address) of
+ // a double stack slot.
+ Operand HighOperand(LOperand* op);
+
+ // Try to generate code for the entire chunk, but it may fail if the
+ // chunk contains constructs we cannot handle. Returns true if the
+ // code generation attempt succeeded.
+ bool GenerateCode();
+
+ // Finish the code by setting stack height, safepoint, and bailout
+ // information on it.
+ void FinishCode(Handle<Code> code);
+
+ // Deferred code support.
+ void DoDeferredNumberTagD(LNumberTagD* instr);
+
+ enum IntegerSignedness { SIGNED_INT32, UNSIGNED_INT32 };
+ void DoDeferredNumberTagIU(LInstruction* instr,
+ LOperand* value,
+ LOperand* temp,
+ IntegerSignedness signedness);
+
+ void DoDeferredTaggedToI(LTaggedToI* instr, Label* done);
+ void DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr);
+ void DoDeferredStackCheck(LStackCheck* instr);
+ void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
+ void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
+ void DoDeferredAllocate(LAllocate* instr);
+ void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
+ Label* map_check);
+ void DoDeferredInstanceMigration(LCheckMaps* instr, Register object);
+ void DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
+ Register object,
+ Register index);
+
+ // Parallel move support.
+ void DoParallelMove(LParallelMove* move);
+ void DoGap(LGap* instr);
+
+ // Emit frame translation commands for an environment.
+ void WriteTranslation(LEnvironment* environment, Translation* translation);
+
+ void EnsureRelocSpaceForDeoptimization();
+
+ // Declare methods that deal with the individual node types.
+#define DECLARE_DO(type) void Do##type(L##type* node);
+ LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
+#undef DECLARE_DO
+
+ private:
+ StrictMode strict_mode() const { return info()->strict_mode(); }
+
+ Scope* scope() const { return scope_; }
+
+ void EmitClassOfTest(Label* if_true,
+ Label* if_false,
+ Handle<String> class_name,
+ Register input,
+ Register temporary,
+ Register temporary2);
+
+ int GetStackSlotCount() const { return chunk()->spill_slot_count(); }
+
+ void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); }
+
+ // Code generation passes. Returns true if code generation should
+ // continue.
+ void GenerateBodyInstructionPre(LInstruction* instr) V8_OVERRIDE;
+ void GenerateBodyInstructionPost(LInstruction* instr) V8_OVERRIDE;
+ bool GeneratePrologue();
+ bool GenerateDeferredCode();
+ bool GenerateJumpTable();
+ bool GenerateSafepointTable();
+
+ // Generates the custom OSR entrypoint and sets the osr_pc_offset.
+ void GenerateOsrPrologue();
+
+ enum SafepointMode {
+ RECORD_SIMPLE_SAFEPOINT,
+ RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS
+ };
+
+ void CallCode(Handle<Code> code,
+ RelocInfo::Mode mode,
+ LInstruction* instr);
+
+ void CallCodeGeneric(Handle<Code> code,
+ RelocInfo::Mode mode,
+ LInstruction* instr,
+ SafepointMode safepoint_mode);
+
+ void CallRuntime(const Runtime::Function* fun,
+ int argc,
+ LInstruction* instr);
+
+ void CallRuntime(Runtime::FunctionId id,
+ int argc,
+ LInstruction* instr) {
+ const Runtime::Function* function = Runtime::FunctionForId(id);
+ CallRuntime(function, argc, instr);
+ }
+
+ void CallRuntimeFromDeferred(Runtime::FunctionId id,
+ int argc,
+ LInstruction* instr,
+ LOperand* context);
+
+ void LoadContextFromDeferred(LOperand* context);
+
+ enum EDIState {
+ EDI_UNINITIALIZED,
+ EDI_CONTAINS_TARGET
+ };
+
+ // Generate a direct call to a known function. Expects the function
+ // to be in edi.
+ void CallKnownFunction(Handle<JSFunction> function,
+ int formal_parameter_count,
+ int arity,
+ LInstruction* instr,
+ EDIState edi_state);
+
+ void RecordSafepointWithLazyDeopt(LInstruction* instr,
+ SafepointMode safepoint_mode);
+
+ void RegisterEnvironmentForDeoptimization(LEnvironment* environment,
+ Safepoint::DeoptMode mode);
+ void DeoptimizeIf(Condition cc,
+ LEnvironment* environment,
+ Deoptimizer::BailoutType bailout_type);
+ void DeoptimizeIf(Condition cc, LEnvironment* environment);
+
+ bool DeoptEveryNTimes() {
+ return FLAG_deopt_every_n_times != 0 && !info()->IsStub();
+ }
+
+ void AddToTranslation(LEnvironment* environment,
+ Translation* translation,
+ LOperand* op,
+ bool is_tagged,
+ bool is_uint32,
+ int* object_index_pointer,
+ int* dematerialized_index_pointer);
+ void PopulateDeoptimizationData(Handle<Code> code);
+ int DefineDeoptimizationLiteral(Handle<Object> literal);
+
+ void PopulateDeoptimizationLiteralsWithInlinedFunctions();
+
+ Register ToRegister(int index) const;
+ X87Register ToX87Register(int index) const;
+ int32_t ToRepresentation(LConstantOperand* op, const Representation& r) const;
+ int32_t ToInteger32(LConstantOperand* op) const;
+ ExternalReference ToExternalReference(LConstantOperand* op) const;
+
+ Operand BuildFastArrayOperand(LOperand* elements_pointer,
+ LOperand* key,
+ Representation key_representation,
+ ElementsKind elements_kind,
+ uint32_t base_offset);
+
+ Operand BuildSeqStringOperand(Register string,
+ LOperand* index,
+ String::Encoding encoding);
+
+ void EmitIntegerMathAbs(LMathAbs* instr);
+
+ // Support for recording safepoint and position information.
+ void RecordSafepoint(LPointerMap* pointers,
+ Safepoint::Kind kind,
+ int arguments,
+ Safepoint::DeoptMode mode);
+ void RecordSafepoint(LPointerMap* pointers, Safepoint::DeoptMode mode);
+ void RecordSafepoint(Safepoint::DeoptMode mode);
+ void RecordSafepointWithRegisters(LPointerMap* pointers,
+ int arguments,
+ Safepoint::DeoptMode mode);
+
+ void RecordAndWritePosition(int position) V8_OVERRIDE;
+
+ static Condition TokenToCondition(Token::Value op, bool is_unsigned);
+ void EmitGoto(int block);
+
+ // EmitBranch expects to be the last instruction of a block.
+ template<class InstrType>
+ void EmitBranch(InstrType instr, Condition cc);
+ template<class InstrType>
+ void EmitFalseBranch(InstrType instr, Condition cc);
+ void EmitNumberUntagDNoSSE2(
+ Register input,
+ Register temp,
+ X87Register res_reg,
+ bool allow_undefined_as_nan,
+ bool deoptimize_on_minus_zero,
+ LEnvironment* env,
+ NumberUntagDMode mode = NUMBER_CANDIDATE_IS_ANY_TAGGED);
+
+ // Emits optimized code for typeof x == "y". Modifies input register.
+ // Returns the condition on which a final split to
+ // true and false label should be made, to optimize fallthrough.
+ Condition EmitTypeofIs(LTypeofIsAndBranch* instr, Register input);
+
+ // Emits optimized code for %_IsObject(x). Preserves input register.
+ // Returns the condition on which a final split to
+ // true and false label should be made, to optimize fallthrough.
+ Condition EmitIsObject(Register input,
+ Register temp1,
+ Label* is_not_object,
+ Label* is_object);
+
+ // Emits optimized code for %_IsString(x). Preserves input register.
+ // Returns the condition on which a final split to
+ // true and false label should be made, to optimize fallthrough.
+ Condition EmitIsString(Register input,
+ Register temp1,
+ Label* is_not_string,
+ SmiCheck check_needed);
+
+ // Emits optimized code for %_IsConstructCall().
+ // Caller should branch on equal condition.
+ void EmitIsConstructCall(Register temp);
+
+ // Emits optimized code to deep-copy the contents of statically known
+ // object graphs (e.g. object literal boilerplate).
+ void EmitDeepCopy(Handle<JSObject> object,
+ Register result,
+ Register source,
+ int* offset,
+ AllocationSiteMode mode);
+
+ void EnsureSpaceForLazyDeopt(int space_needed) V8_OVERRIDE;
+ void DoLoadKeyedExternalArray(LLoadKeyed* instr);
+ void DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr);
+ void DoLoadKeyedFixedArray(LLoadKeyed* instr);
+ void DoStoreKeyedExternalArray(LStoreKeyed* instr);
+ void DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr);
+ void DoStoreKeyedFixedArray(LStoreKeyed* instr);
+
+ void EmitReturn(LReturn* instr, bool dynamic_frame_alignment);
+
+ // Emits code for pushing either a tagged constant, a (non-double)
+ // register, or a stack slot operand.
+ void EmitPushTaggedOperand(LOperand* operand);
+
+ void X87Fld(Operand src, X87OperandType opts);
+
+ void EmitFlushX87ForDeopt();
+ void FlushX87StackIfNecessary(LInstruction* instr) {
+ x87_stack_.FlushIfNecessary(instr, this);
+ }
+ friend class LGapResolver;
+
+#ifdef _MSC_VER
+ // On windows, you may not access the stack more than one page below
+ // the most recently mapped page. To make the allocated area randomly
+ // accessible, we write an arbitrary value to each page in range
+ // esp + offset - page_size .. esp in turn.
+ void MakeSureStackPagesMapped(int offset);
+#endif
+
+ ZoneList<LEnvironment*> deoptimizations_;
+ ZoneList<Deoptimizer::JumpTableEntry> jump_table_;
+ ZoneList<Handle<Object> > deoptimization_literals_;
+ int inlined_function_count_;
+ Scope* const scope_;
+ TranslationBuffer translations_;
+ ZoneList<LDeferredCode*> deferred_;
+ bool dynamic_frame_alignment_;
+ bool support_aligned_spilled_doubles_;
+ int osr_pc_offset_;
+ bool frame_is_built_;
+
+ class X87Stack {
+ public:
+ explicit X87Stack(MacroAssembler* masm)
+ : stack_depth_(0), is_mutable_(true), masm_(masm) { }
+ explicit X87Stack(const X87Stack& other)
+ : stack_depth_(other.stack_depth_), is_mutable_(false), masm_(masm()) {
+ for (int i = 0; i < stack_depth_; i++) {
+ stack_[i] = other.stack_[i];
+ }
+ }
+ bool operator==(const X87Stack& other) const {
+ if (stack_depth_ != other.stack_depth_) return false;
+ for (int i = 0; i < stack_depth_; i++) {
+ if (!stack_[i].is(other.stack_[i])) return false;
+ }
+ return true;
+ }
+ bool Contains(X87Register reg);
+ void Fxch(X87Register reg, int other_slot = 0);
+ void Free(X87Register reg);
+ void PrepareToWrite(X87Register reg);
+ void CommitWrite(X87Register reg);
+ void FlushIfNecessary(LInstruction* instr, LCodeGen* cgen);
+ void LeavingBlock(int current_block_id, LGoto* goto_instr);
+ int depth() const { return stack_depth_; }
+ void pop() {
+ ASSERT(is_mutable_);
+ stack_depth_--;
+ }
+ void push(X87Register reg) {
+ ASSERT(is_mutable_);
+ ASSERT(stack_depth_ < X87Register::kMaxNumAllocatableRegisters);
+ stack_[stack_depth_] = reg;
+ stack_depth_++;
+ }
+
+ MacroAssembler* masm() const { return masm_; }
+ Isolate* isolate() const { return masm_->isolate(); }
+
+ private:
+ int ArrayIndex(X87Register reg);
+ int st2idx(int pos);
+
+ X87Register stack_[X87Register::kMaxNumAllocatableRegisters];
+ int stack_depth_;
+ bool is_mutable_;
+ MacroAssembler* masm_;
+ };
+ X87Stack x87_stack_;
+
+ // Builder that keeps track of safepoints in the code. The table
+ // itself is emitted at the end of the generated code.
+ SafepointTableBuilder safepoints_;
+
+ // Compiler from a set of parallel moves to a sequential list of moves.
+ LGapResolver resolver_;
+
+ Safepoint::Kind expected_safepoint_kind_;
+
+ class PushSafepointRegistersScope V8_FINAL BASE_EMBEDDED {
+ public:
+ explicit PushSafepointRegistersScope(LCodeGen* codegen)
+ : codegen_(codegen) {
+ ASSERT(codegen_->expected_safepoint_kind_ == Safepoint::kSimple);
+ codegen_->masm_->PushSafepointRegisters();
+ codegen_->expected_safepoint_kind_ = Safepoint::kWithRegisters;
+ ASSERT(codegen_->info()->is_calling());
+ }
+
+ ~PushSafepointRegistersScope() {
+ ASSERT(codegen_->expected_safepoint_kind_ == Safepoint::kWithRegisters);
+ codegen_->masm_->PopSafepointRegisters();
+ codegen_->expected_safepoint_kind_ = Safepoint::kSimple;
+ }
+
+ private:
+ LCodeGen* codegen_;
+ };
+
+ friend class LDeferredCode;
+ friend class LEnvironment;
+ friend class SafepointGenerator;
+ DISALLOW_COPY_AND_ASSIGN(LCodeGen);
+};
+
+
+class LDeferredCode : public ZoneObject {
+ public:
+ explicit LDeferredCode(LCodeGen* codegen, const LCodeGen::X87Stack& x87_stack)
+ : codegen_(codegen),
+ external_exit_(NULL),
+ instruction_index_(codegen->current_instruction_),
+ x87_stack_(x87_stack) {
+ codegen->AddDeferredCode(this);
+ }
+
+ virtual ~LDeferredCode() {}
+ virtual void Generate() = 0;
+ virtual LInstruction* instr() = 0;
+
+ void SetExit(Label* exit) { external_exit_ = exit; }
+ Label* entry() { return &entry_; }
+ Label* exit() { return external_exit_ != NULL ? external_exit_ : &exit_; }
+ Label* done() { return codegen_->NeedsDeferredFrame() ? &done_ : exit(); }
+ int instruction_index() const { return instruction_index_; }
+ const LCodeGen::X87Stack& x87_stack() const { return x87_stack_; }
+
+ protected:
+ LCodeGen* codegen() const { return codegen_; }
+ MacroAssembler* masm() const { return codegen_->masm(); }
+
+ private:
+ LCodeGen* codegen_;
+ Label entry_;
+ Label exit_;
+ Label* external_exit_;
+ Label done_;
+ int instruction_index_;
+ LCodeGen::X87Stack x87_stack_;
+};
+
+} } // namespace v8::internal
+
+#endif // V8_X87_LITHIUM_CODEGEN_X87_H_
--- /dev/null
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "v8.h"
+
+#if V8_TARGET_ARCH_X87
+
+#include "x87/lithium-gap-resolver-x87.h"
+#include "x87/lithium-codegen-x87.h"
+
+namespace v8 {
+namespace internal {
+
+LGapResolver::LGapResolver(LCodeGen* owner)
+ : cgen_(owner),
+ moves_(32, owner->zone()),
+ source_uses_(),
+ destination_uses_(),
+ spilled_register_(-1) {}
+
+
+void LGapResolver::Resolve(LParallelMove* parallel_move) {
+ ASSERT(HasBeenReset());
+ // Build up a worklist of moves.
+ BuildInitialMoveList(parallel_move);
+
+ for (int i = 0; i < moves_.length(); ++i) {
+ LMoveOperands move = moves_[i];
+ // Skip constants to perform them last. They don't block other moves
+ // and skipping such moves with register destinations keeps those
+ // registers free for the whole algorithm.
+ if (!move.IsEliminated() && !move.source()->IsConstantOperand()) {
+ PerformMove(i);
+ }
+ }
+
+ // Perform the moves with constant sources.
+ for (int i = 0; i < moves_.length(); ++i) {
+ if (!moves_[i].IsEliminated()) {
+ ASSERT(moves_[i].source()->IsConstantOperand());
+ EmitMove(i);
+ }
+ }
+
+ Finish();
+ ASSERT(HasBeenReset());
+}
+
+
+void LGapResolver::BuildInitialMoveList(LParallelMove* parallel_move) {
+ // Perform a linear sweep of the moves to add them to the initial list of
+ // moves to perform, ignoring any move that is redundant (the source is
+ // the same as the destination, the destination is ignored and
+ // unallocated, or the move was already eliminated).
+ const ZoneList<LMoveOperands>* moves = parallel_move->move_operands();
+ for (int i = 0; i < moves->length(); ++i) {
+ LMoveOperands move = moves->at(i);
+ if (!move.IsRedundant()) AddMove(move);
+ }
+ Verify();
+}
+
+
+void LGapResolver::PerformMove(int index) {
+ // Each call to this function performs a move and deletes it from the move
+ // graph. We first recursively perform any move blocking this one. We
+ // mark a move as "pending" on entry to PerformMove in order to detect
+ // cycles in the move graph. We use operand swaps to resolve cycles,
+ // which means that a call to PerformMove could change any source operand
+ // in the move graph.
+
+ ASSERT(!moves_[index].IsPending());
+ ASSERT(!moves_[index].IsRedundant());
+
+ // Clear this move's destination to indicate a pending move. The actual
+ // destination is saved on the side.
+ ASSERT(moves_[index].source() != NULL); // Or else it will look eliminated.
+ LOperand* destination = moves_[index].destination();
+ moves_[index].set_destination(NULL);
+
+ // Perform a depth-first traversal of the move graph to resolve
+ // dependencies. Any unperformed, unpending move with a source the same
+ // as this one's destination blocks this one so recursively perform all
+ // such moves.
+ for (int i = 0; i < moves_.length(); ++i) {
+ LMoveOperands other_move = moves_[i];
+ if (other_move.Blocks(destination) && !other_move.IsPending()) {
+ // Though PerformMove can change any source operand in the move graph,
+ // this call cannot create a blocking move via a swap (this loop does
+ // not miss any). Assume there is a non-blocking move with source A
+ // and this move is blocked on source B and there is a swap of A and
+ // B. Then A and B must be involved in the same cycle (or they would
+ // not be swapped). Since this move's destination is B and there is
+ // only a single incoming edge to an operand, this move must also be
+ // involved in the same cycle. In that case, the blocking move will
+ // be created but will be "pending" when we return from PerformMove.
+ PerformMove(i);
+ }
+ }
+
+ // We are about to resolve this move and don't need it marked as
+ // pending, so restore its destination.
+ moves_[index].set_destination(destination);
+
+ // This move's source may have changed due to swaps to resolve cycles and
+ // so it may now be the last move in the cycle. If so remove it.
+ if (moves_[index].source()->Equals(destination)) {
+ RemoveMove(index);
+ return;
+ }
+
+ // The move may be blocked on a (at most one) pending move, in which case
+ // we have a cycle. Search for such a blocking move and perform a swap to
+ // resolve it.
+ for (int i = 0; i < moves_.length(); ++i) {
+ LMoveOperands other_move = moves_[i];
+ if (other_move.Blocks(destination)) {
+ ASSERT(other_move.IsPending());
+ EmitSwap(index);
+ return;
+ }
+ }
+
+ // This move is not blocked.
+ EmitMove(index);
+}
+
+
+void LGapResolver::AddMove(LMoveOperands move) {
+ LOperand* source = move.source();
+ if (source->IsRegister()) ++source_uses_[source->index()];
+
+ LOperand* destination = move.destination();
+ if (destination->IsRegister()) ++destination_uses_[destination->index()];
+
+ moves_.Add(move, cgen_->zone());
+}
+
+
+void LGapResolver::RemoveMove(int index) {
+ LOperand* source = moves_[index].source();
+ if (source->IsRegister()) {
+ --source_uses_[source->index()];
+ ASSERT(source_uses_[source->index()] >= 0);
+ }
+
+ LOperand* destination = moves_[index].destination();
+ if (destination->IsRegister()) {
+ --destination_uses_[destination->index()];
+ ASSERT(destination_uses_[destination->index()] >= 0);
+ }
+
+ moves_[index].Eliminate();
+}
+
+
+int LGapResolver::CountSourceUses(LOperand* operand) {
+ int count = 0;
+ for (int i = 0; i < moves_.length(); ++i) {
+ if (!moves_[i].IsEliminated() && moves_[i].source()->Equals(operand)) {
+ ++count;
+ }
+ }
+ return count;
+}
+
+
+Register LGapResolver::GetFreeRegisterNot(Register reg) {
+ int skip_index = reg.is(no_reg) ? -1 : Register::ToAllocationIndex(reg);
+ for (int i = 0; i < Register::NumAllocatableRegisters(); ++i) {
+ if (source_uses_[i] == 0 && destination_uses_[i] > 0 && i != skip_index) {
+ return Register::FromAllocationIndex(i);
+ }
+ }
+ return no_reg;
+}
+
+
+bool LGapResolver::HasBeenReset() {
+ if (!moves_.is_empty()) return false;
+ if (spilled_register_ >= 0) return false;
+
+ for (int i = 0; i < Register::NumAllocatableRegisters(); ++i) {
+ if (source_uses_[i] != 0) return false;
+ if (destination_uses_[i] != 0) return false;
+ }
+ return true;
+}
+
+
+void LGapResolver::Verify() {
+#ifdef ENABLE_SLOW_ASSERTS
+ // No operand should be the destination for more than one move.
+ for (int i = 0; i < moves_.length(); ++i) {
+ LOperand* destination = moves_[i].destination();
+ for (int j = i + 1; j < moves_.length(); ++j) {
+ SLOW_ASSERT(!destination->Equals(moves_[j].destination()));
+ }
+ }
+#endif
+}
+
+
+#define __ ACCESS_MASM(cgen_->masm())
+
+void LGapResolver::Finish() {
+ if (spilled_register_ >= 0) {
+ __ pop(Register::FromAllocationIndex(spilled_register_));
+ spilled_register_ = -1;
+ }
+ moves_.Rewind(0);
+}
+
+
+void LGapResolver::EnsureRestored(LOperand* operand) {
+ if (operand->IsRegister() && operand->index() == spilled_register_) {
+ __ pop(Register::FromAllocationIndex(spilled_register_));
+ spilled_register_ = -1;
+ }
+}
+
+
+Register LGapResolver::EnsureTempRegister() {
+ // 1. We may have already spilled to create a temp register.
+ if (spilled_register_ >= 0) {
+ return Register::FromAllocationIndex(spilled_register_);
+ }
+
+ // 2. We may have a free register that we can use without spilling.
+ Register free = GetFreeRegisterNot(no_reg);
+ if (!free.is(no_reg)) return free;
+
+ // 3. Prefer to spill a register that is not used in any remaining move
+ // because it will not need to be restored until the end.
+ for (int i = 0; i < Register::NumAllocatableRegisters(); ++i) {
+ if (source_uses_[i] == 0 && destination_uses_[i] == 0) {
+ Register scratch = Register::FromAllocationIndex(i);
+ __ push(scratch);
+ spilled_register_ = i;
+ return scratch;
+ }
+ }
+
+ // 4. Use an arbitrary register. Register 0 is as arbitrary as any other.
+ Register scratch = Register::FromAllocationIndex(0);
+ __ push(scratch);
+ spilled_register_ = 0;
+ return scratch;
+}
+
+
+void LGapResolver::EmitMove(int index) {
+ LOperand* source = moves_[index].source();
+ LOperand* destination = moves_[index].destination();
+ EnsureRestored(source);
+ EnsureRestored(destination);
+
+ // Dispatch on the source and destination operand kinds. Not all
+ // combinations are possible.
+ if (source->IsRegister()) {
+ ASSERT(destination->IsRegister() || destination->IsStackSlot());
+ Register src = cgen_->ToRegister(source);
+ Operand dst = cgen_->ToOperand(destination);
+ __ mov(dst, src);
+
+ } else if (source->IsStackSlot()) {
+ ASSERT(destination->IsRegister() || destination->IsStackSlot());
+ Operand src = cgen_->ToOperand(source);
+ if (destination->IsRegister()) {
+ Register dst = cgen_->ToRegister(destination);
+ __ mov(dst, src);
+ } else {
+ // Spill on demand to use a temporary register for memory-to-memory
+ // moves.
+ Register tmp = EnsureTempRegister();
+ Operand dst = cgen_->ToOperand(destination);
+ __ mov(tmp, src);
+ __ mov(dst, tmp);
+ }
+
+ } else if (source->IsConstantOperand()) {
+ LConstantOperand* constant_source = LConstantOperand::cast(source);
+ if (destination->IsRegister()) {
+ Register dst = cgen_->ToRegister(destination);
+ Representation r = cgen_->IsSmi(constant_source)
+ ? Representation::Smi() : Representation::Integer32();
+ if (cgen_->IsInteger32(constant_source)) {
+ __ Move(dst, cgen_->ToImmediate(constant_source, r));
+ } else {
+ __ LoadObject(dst, cgen_->ToHandle(constant_source));
+ }
+ } else if (destination->IsDoubleRegister()) {
+ double v = cgen_->ToDouble(constant_source);
+ uint64_t int_val = BitCast<uint64_t, double>(v);
+ int32_t lower = static_cast<int32_t>(int_val);
+ int32_t upper = static_cast<int32_t>(int_val >> kBitsPerInt);
+ __ push(Immediate(upper));
+ __ push(Immediate(lower));
+ X87Register dst = cgen_->ToX87Register(destination);
+ cgen_->X87Mov(dst, MemOperand(esp, 0));
+ __ add(esp, Immediate(kDoubleSize));
+ } else {
+ ASSERT(destination->IsStackSlot());
+ Operand dst = cgen_->ToOperand(destination);
+ Representation r = cgen_->IsSmi(constant_source)
+ ? Representation::Smi() : Representation::Integer32();
+ if (cgen_->IsInteger32(constant_source)) {
+ __ Move(dst, cgen_->ToImmediate(constant_source, r));
+ } else {
+ Register tmp = EnsureTempRegister();
+ __ LoadObject(tmp, cgen_->ToHandle(constant_source));
+ __ mov(dst, tmp);
+ }
+ }
+
+ } else if (source->IsDoubleRegister()) {
+ // load from the register onto the stack, store in destination, which must
+ // be a double stack slot in the non-SSE2 case.
+ ASSERT(destination->IsDoubleStackSlot());
+ Operand dst = cgen_->ToOperand(destination);
+ X87Register src = cgen_->ToX87Register(source);
+ cgen_->X87Mov(dst, src);
+ } else if (source->IsDoubleStackSlot()) {
+ // load from the stack slot on top of the floating point stack, and then
+ // store in destination. If destination is a double register, then it
+ // represents the top of the stack and nothing needs to be done.
+ if (destination->IsDoubleStackSlot()) {
+ Register tmp = EnsureTempRegister();
+ Operand src0 = cgen_->ToOperand(source);
+ Operand src1 = cgen_->HighOperand(source);
+ Operand dst0 = cgen_->ToOperand(destination);
+ Operand dst1 = cgen_->HighOperand(destination);
+ __ mov(tmp, src0); // Then use tmp to copy source to destination.
+ __ mov(dst0, tmp);
+ __ mov(tmp, src1);
+ __ mov(dst1, tmp);
+ } else {
+ Operand src = cgen_->ToOperand(source);
+ X87Register dst = cgen_->ToX87Register(destination);
+ cgen_->X87Mov(dst, src);
+ }
+ } else {
+ UNREACHABLE();
+ }
+
+ RemoveMove(index);
+}
+
+
+void LGapResolver::EmitSwap(int index) {
+ LOperand* source = moves_[index].source();
+ LOperand* destination = moves_[index].destination();
+ EnsureRestored(source);
+ EnsureRestored(destination);
+
+ // Dispatch on the source and destination operand kinds. Not all
+ // combinations are possible.
+ if (source->IsRegister() && destination->IsRegister()) {
+ // Register-register.
+ Register src = cgen_->ToRegister(source);
+ Register dst = cgen_->ToRegister(destination);
+ __ xchg(dst, src);
+
+ } else if ((source->IsRegister() && destination->IsStackSlot()) ||
+ (source->IsStackSlot() && destination->IsRegister())) {
+ // Register-memory. Use a free register as a temp if possible. Do not
+ // spill on demand because the simple spill implementation cannot avoid
+ // spilling src at this point.
+ Register tmp = GetFreeRegisterNot(no_reg);
+ Register reg =
+ cgen_->ToRegister(source->IsRegister() ? source : destination);
+ Operand mem =
+ cgen_->ToOperand(source->IsRegister() ? destination : source);
+ if (tmp.is(no_reg)) {
+ __ xor_(reg, mem);
+ __ xor_(mem, reg);
+ __ xor_(reg, mem);
+ } else {
+ __ mov(tmp, mem);
+ __ mov(mem, reg);
+ __ mov(reg, tmp);
+ }
+
+ } else if (source->IsStackSlot() && destination->IsStackSlot()) {
+ // Memory-memory. Spill on demand to use a temporary. If there is a
+ // free register after that, use it as a second temporary.
+ Register tmp0 = EnsureTempRegister();
+ Register tmp1 = GetFreeRegisterNot(tmp0);
+ Operand src = cgen_->ToOperand(source);
+ Operand dst = cgen_->ToOperand(destination);
+ if (tmp1.is(no_reg)) {
+ // Only one temp register available to us.
+ __ mov(tmp0, dst);
+ __ xor_(tmp0, src);
+ __ xor_(src, tmp0);
+ __ xor_(tmp0, src);
+ __ mov(dst, tmp0);
+ } else {
+ __ mov(tmp0, dst);
+ __ mov(tmp1, src);
+ __ mov(dst, tmp1);
+ __ mov(src, tmp0);
+ }
+ } else {
+ // No other combinations are possible.
+ UNREACHABLE();
+ }
+
+ // The swap of source and destination has executed a move from source to
+ // destination.
+ RemoveMove(index);
+
+ // Any unperformed (including pending) move with a source of either
+ // this move's source or destination needs to have their source
+ // changed to reflect the state of affairs after the swap.
+ for (int i = 0; i < moves_.length(); ++i) {
+ LMoveOperands other_move = moves_[i];
+ if (other_move.Blocks(source)) {
+ moves_[i].set_source(destination);
+ } else if (other_move.Blocks(destination)) {
+ moves_[i].set_source(source);
+ }
+ }
+
+ // In addition to swapping the actual uses as sources, we need to update
+ // the use counts.
+ if (source->IsRegister() && destination->IsRegister()) {
+ int temp = source_uses_[source->index()];
+ source_uses_[source->index()] = source_uses_[destination->index()];
+ source_uses_[destination->index()] = temp;
+ } else if (source->IsRegister()) {
+ // We don't have use counts for non-register operands like destination.
+ // Compute those counts now.
+ source_uses_[source->index()] = CountSourceUses(source);
+ } else if (destination->IsRegister()) {
+ source_uses_[destination->index()] = CountSourceUses(destination);
+ }
+}
+
+#undef __
+
+} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_X87
--- /dev/null
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_X87_LITHIUM_GAP_RESOLVER_X87_H_
+#define V8_X87_LITHIUM_GAP_RESOLVER_X87_H_
+
+#include "v8.h"
+
+#include "lithium.h"
+
+namespace v8 {
+namespace internal {
+
+class LCodeGen;
+class LGapResolver;
+
+class LGapResolver V8_FINAL BASE_EMBEDDED {
+ public:
+ explicit LGapResolver(LCodeGen* owner);
+
+ // Resolve a set of parallel moves, emitting assembler instructions.
+ void Resolve(LParallelMove* parallel_move);
+
+ private:
+ // Build the initial list of moves.
+ void BuildInitialMoveList(LParallelMove* parallel_move);
+
+ // Perform the move at the moves_ index in question (possibly requiring
+ // other moves to satisfy dependencies).
+ void PerformMove(int index);
+
+ // Emit any code necessary at the end of a gap move.
+ void Finish();
+
+ // Add or delete a move from the move graph without emitting any code.
+ // Used to build up the graph and remove trivial moves.
+ void AddMove(LMoveOperands move);
+ void RemoveMove(int index);
+
+ // Report the count of uses of operand as a source in a not-yet-performed
+ // move. Used to rebuild use counts.
+ int CountSourceUses(LOperand* operand);
+
+ // Emit a move and remove it from the move graph.
+ void EmitMove(int index);
+
+ // Execute a move by emitting a swap of two operands. The move from
+ // source to destination is removed from the move graph.
+ void EmitSwap(int index);
+
+ // Ensure that the given operand is not spilled.
+ void EnsureRestored(LOperand* operand);
+
+ // Return a register that can be used as a temp register, spilling
+ // something if necessary.
+ Register EnsureTempRegister();
+
+ // Return a known free register different from the given one (which could
+ // be no_reg---returning any free register), or no_reg if there is no such
+ // register.
+ Register GetFreeRegisterNot(Register reg);
+
+ // Verify that the state is the initial one, ready to resolve a single
+ // parallel move.
+ bool HasBeenReset();
+
+ // Verify the move list before performing moves.
+ void Verify();
+
+ LCodeGen* cgen_;
+
+ // List of moves not yet resolved.
+ ZoneList<LMoveOperands> moves_;
+
+ // Source and destination use counts for the general purpose registers.
+ int source_uses_[Register::kMaxNumAllocatableRegisters];
+ int destination_uses_[Register::kMaxNumAllocatableRegisters];
+
+ // If we had to spill on demand, the currently spilled register's
+ // allocation index.
+ int spilled_register_;
+};
+
+} } // namespace v8::internal
+
+#endif // V8_X87_LITHIUM_GAP_RESOLVER_X87_H_
--- /dev/null
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "v8.h"
+
+#if V8_TARGET_ARCH_X87
+
+#include "lithium-allocator-inl.h"
+#include "x87/lithium-x87.h"
+#include "x87/lithium-codegen-x87.h"
+#include "hydrogen-osr.h"
+
+namespace v8 {
+namespace internal {
+
+#define DEFINE_COMPILE(type) \
+ void L##type::CompileToNative(LCodeGen* generator) { \
+ generator->Do##type(this); \
+ }
+LITHIUM_CONCRETE_INSTRUCTION_LIST(DEFINE_COMPILE)
+#undef DEFINE_COMPILE
+
+
+#ifdef DEBUG
+void LInstruction::VerifyCall() {
+ // Call instructions can use only fixed registers as temporaries and
+ // outputs because all registers are blocked by the calling convention.
+ // Inputs operands must use a fixed register or use-at-start policy or
+ // a non-register policy.
+ ASSERT(Output() == NULL ||
+ LUnallocated::cast(Output())->HasFixedPolicy() ||
+ !LUnallocated::cast(Output())->HasRegisterPolicy());
+ for (UseIterator it(this); !it.Done(); it.Advance()) {
+ LUnallocated* operand = LUnallocated::cast(it.Current());
+ ASSERT(operand->HasFixedPolicy() ||
+ operand->IsUsedAtStart());
+ }
+ for (TempIterator it(this); !it.Done(); it.Advance()) {
+ LUnallocated* operand = LUnallocated::cast(it.Current());
+ ASSERT(operand->HasFixedPolicy() ||!operand->HasRegisterPolicy());
+ }
+}
+#endif
+
+
+bool LInstruction::HasDoubleRegisterResult() {
+ return HasResult() && result()->IsDoubleRegister();
+}
+
+
+bool LInstruction::HasDoubleRegisterInput() {
+ for (int i = 0; i < InputCount(); i++) {
+ LOperand* op = InputAt(i);
+ if (op != NULL && op->IsDoubleRegister()) {
+ return true;
+ }
+ }
+ return false;
+}
+
+
+bool LInstruction::IsDoubleInput(X87Register reg, LCodeGen* cgen) {
+ for (int i = 0; i < InputCount(); i++) {
+ LOperand* op = InputAt(i);
+ if (op != NULL && op->IsDoubleRegister()) {
+ if (cgen->ToX87Register(op).is(reg)) return true;
+ }
+ }
+ return false;
+}
+
+
+void LInstruction::PrintTo(StringStream* stream) {
+ stream->Add("%s ", this->Mnemonic());
+
+ PrintOutputOperandTo(stream);
+
+ PrintDataTo(stream);
+
+ if (HasEnvironment()) {
+ stream->Add(" ");
+ environment()->PrintTo(stream);
+ }
+
+ if (HasPointerMap()) {
+ stream->Add(" ");
+ pointer_map()->PrintTo(stream);
+ }
+}
+
+
+void LInstruction::PrintDataTo(StringStream* stream) {
+ stream->Add("= ");
+ for (int i = 0; i < InputCount(); i++) {
+ if (i > 0) stream->Add(" ");
+ if (InputAt(i) == NULL) {
+ stream->Add("NULL");
+ } else {
+ InputAt(i)->PrintTo(stream);
+ }
+ }
+}
+
+
+void LInstruction::PrintOutputOperandTo(StringStream* stream) {
+ if (HasResult()) result()->PrintTo(stream);
+}
+
+
+void LLabel::PrintDataTo(StringStream* stream) {
+ LGap::PrintDataTo(stream);
+ LLabel* rep = replacement();
+ if (rep != NULL) {
+ stream->Add(" Dead block replaced with B%d", rep->block_id());
+ }
+}
+
+
+bool LGap::IsRedundant() const {
+ for (int i = 0; i < 4; i++) {
+ if (parallel_moves_[i] != NULL && !parallel_moves_[i]->IsRedundant()) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+
+void LGap::PrintDataTo(StringStream* stream) {
+ for (int i = 0; i < 4; i++) {
+ stream->Add("(");
+ if (parallel_moves_[i] != NULL) {
+ parallel_moves_[i]->PrintDataTo(stream);
+ }
+ stream->Add(") ");
+ }
+}
+
+
+const char* LArithmeticD::Mnemonic() const {
+ switch (op()) {
+ case Token::ADD: return "add-d";
+ case Token::SUB: return "sub-d";
+ case Token::MUL: return "mul-d";
+ case Token::DIV: return "div-d";
+ case Token::MOD: return "mod-d";
+ default:
+ UNREACHABLE();
+ return NULL;
+ }
+}
+
+
+const char* LArithmeticT::Mnemonic() const {
+ switch (op()) {
+ case Token::ADD: return "add-t";
+ case Token::SUB: return "sub-t";
+ case Token::MUL: return "mul-t";
+ case Token::MOD: return "mod-t";
+ case Token::DIV: return "div-t";
+ case Token::BIT_AND: return "bit-and-t";
+ case Token::BIT_OR: return "bit-or-t";
+ case Token::BIT_XOR: return "bit-xor-t";
+ case Token::ROR: return "ror-t";
+ case Token::SHL: return "sal-t";
+ case Token::SAR: return "sar-t";
+ case Token::SHR: return "shr-t";
+ default:
+ UNREACHABLE();
+ return NULL;
+ }
+}
+
+
+bool LGoto::HasInterestingComment(LCodeGen* gen) const {
+ return !gen->IsNextEmittedBlock(block_id());
+}
+
+
+void LGoto::PrintDataTo(StringStream* stream) {
+ stream->Add("B%d", block_id());
+}
+
+
+void LBranch::PrintDataTo(StringStream* stream) {
+ stream->Add("B%d | B%d on ", true_block_id(), false_block_id());
+ value()->PrintTo(stream);
+}
+
+
+void LCompareNumericAndBranch::PrintDataTo(StringStream* stream) {
+ stream->Add("if ");
+ left()->PrintTo(stream);
+ stream->Add(" %s ", Token::String(op()));
+ right()->PrintTo(stream);
+ stream->Add(" then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LIsObjectAndBranch::PrintDataTo(StringStream* stream) {
+ stream->Add("if is_object(");
+ value()->PrintTo(stream);
+ stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LIsStringAndBranch::PrintDataTo(StringStream* stream) {
+ stream->Add("if is_string(");
+ value()->PrintTo(stream);
+ stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LIsSmiAndBranch::PrintDataTo(StringStream* stream) {
+ stream->Add("if is_smi(");
+ value()->PrintTo(stream);
+ stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LIsUndetectableAndBranch::PrintDataTo(StringStream* stream) {
+ stream->Add("if is_undetectable(");
+ value()->PrintTo(stream);
+ stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LStringCompareAndBranch::PrintDataTo(StringStream* stream) {
+ stream->Add("if string_compare(");
+ left()->PrintTo(stream);
+ right()->PrintTo(stream);
+ stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LHasInstanceTypeAndBranch::PrintDataTo(StringStream* stream) {
+ stream->Add("if has_instance_type(");
+ value()->PrintTo(stream);
+ stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LHasCachedArrayIndexAndBranch::PrintDataTo(StringStream* stream) {
+ stream->Add("if has_cached_array_index(");
+ value()->PrintTo(stream);
+ stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LClassOfTestAndBranch::PrintDataTo(StringStream* stream) {
+ stream->Add("if class_of_test(");
+ value()->PrintTo(stream);
+ stream->Add(", \"%o\") then B%d else B%d",
+ *hydrogen()->class_name(),
+ true_block_id(),
+ false_block_id());
+}
+
+
+void LTypeofIsAndBranch::PrintDataTo(StringStream* stream) {
+ stream->Add("if typeof ");
+ value()->PrintTo(stream);
+ stream->Add(" == \"%s\" then B%d else B%d",
+ hydrogen()->type_literal()->ToCString().get(),
+ true_block_id(), false_block_id());
+}
+
+
+void LStoreCodeEntry::PrintDataTo(StringStream* stream) {
+ stream->Add(" = ");
+ function()->PrintTo(stream);
+ stream->Add(".code_entry = ");
+ code_object()->PrintTo(stream);
+}
+
+
+void LInnerAllocatedObject::PrintDataTo(StringStream* stream) {
+ stream->Add(" = ");
+ base_object()->PrintTo(stream);
+ stream->Add(" + ");
+ offset()->PrintTo(stream);
+}
+
+
+void LCallJSFunction::PrintDataTo(StringStream* stream) {
+ stream->Add("= ");
+ function()->PrintTo(stream);
+ stream->Add("#%d / ", arity());
+}
+
+
+void LCallWithDescriptor::PrintDataTo(StringStream* stream) {
+ for (int i = 0; i < InputCount(); i++) {
+ InputAt(i)->PrintTo(stream);
+ stream->Add(" ");
+ }
+ stream->Add("#%d / ", arity());
+}
+
+
+void LLoadContextSlot::PrintDataTo(StringStream* stream) {
+ context()->PrintTo(stream);
+ stream->Add("[%d]", slot_index());
+}
+
+
+void LStoreContextSlot::PrintDataTo(StringStream* stream) {
+ context()->PrintTo(stream);
+ stream->Add("[%d] <- ", slot_index());
+ value()->PrintTo(stream);
+}
+
+
+void LInvokeFunction::PrintDataTo(StringStream* stream) {
+ stream->Add("= ");
+ context()->PrintTo(stream);
+ stream->Add(" ");
+ function()->PrintTo(stream);
+ stream->Add(" #%d / ", arity());
+}
+
+
+void LCallNew::PrintDataTo(StringStream* stream) {
+ stream->Add("= ");
+ context()->PrintTo(stream);
+ stream->Add(" ");
+ constructor()->PrintTo(stream);
+ stream->Add(" #%d / ", arity());
+}
+
+
+void LCallNewArray::PrintDataTo(StringStream* stream) {
+ stream->Add("= ");
+ context()->PrintTo(stream);
+ stream->Add(" ");
+ constructor()->PrintTo(stream);
+ stream->Add(" #%d / ", arity());
+ ElementsKind kind = hydrogen()->elements_kind();
+ stream->Add(" (%s) ", ElementsKindToString(kind));
+}
+
+
+void LAccessArgumentsAt::PrintDataTo(StringStream* stream) {
+ arguments()->PrintTo(stream);
+
+ stream->Add(" length ");
+ length()->PrintTo(stream);
+
+ stream->Add(" index ");
+ index()->PrintTo(stream);
+}
+
+
+int LPlatformChunk::GetNextSpillIndex(RegisterKind kind) {
+ // Skip a slot if for a double-width slot.
+ if (kind == DOUBLE_REGISTERS) {
+ spill_slot_count_++;
+ spill_slot_count_ |= 1;
+ num_double_slots_++;
+ }
+ return spill_slot_count_++;
+}
+
+
+LOperand* LPlatformChunk::GetNextSpillSlot(RegisterKind kind) {
+ int index = GetNextSpillIndex(kind);
+ if (kind == DOUBLE_REGISTERS) {
+ return LDoubleStackSlot::Create(index, zone());
+ } else {
+ ASSERT(kind == GENERAL_REGISTERS);
+ return LStackSlot::Create(index, zone());
+ }
+}
+
+
+void LStoreNamedField::PrintDataTo(StringStream* stream) {
+ object()->PrintTo(stream);
+ hydrogen()->access().PrintTo(stream);
+ stream->Add(" <- ");
+ value()->PrintTo(stream);
+}
+
+
+void LStoreNamedGeneric::PrintDataTo(StringStream* stream) {
+ object()->PrintTo(stream);
+ stream->Add(".");
+ stream->Add(String::cast(*name())->ToCString().get());
+ stream->Add(" <- ");
+ value()->PrintTo(stream);
+}
+
+
+void LLoadKeyed::PrintDataTo(StringStream* stream) {
+ elements()->PrintTo(stream);
+ stream->Add("[");
+ key()->PrintTo(stream);
+ if (hydrogen()->IsDehoisted()) {
+ stream->Add(" + %d]", base_offset());
+ } else {
+ stream->Add("]");
+ }
+}
+
+
+void LStoreKeyed::PrintDataTo(StringStream* stream) {
+ elements()->PrintTo(stream);
+ stream->Add("[");
+ key()->PrintTo(stream);
+ if (hydrogen()->IsDehoisted()) {
+ stream->Add(" + %d] <-", base_offset());
+ } else {
+ stream->Add("] <- ");
+ }
+
+ if (value() == NULL) {
+ ASSERT(hydrogen()->IsConstantHoleStore() &&
+ hydrogen()->value()->representation().IsDouble());
+ stream->Add("<the hole(nan)>");
+ } else {
+ value()->PrintTo(stream);
+ }
+}
+
+
+void LStoreKeyedGeneric::PrintDataTo(StringStream* stream) {
+ object()->PrintTo(stream);
+ stream->Add("[");
+ key()->PrintTo(stream);
+ stream->Add("] <- ");
+ value()->PrintTo(stream);
+}
+
+
+void LTransitionElementsKind::PrintDataTo(StringStream* stream) {
+ object()->PrintTo(stream);
+ stream->Add(" %p -> %p", *original_map(), *transitioned_map());
+}
+
+
+LPlatformChunk* LChunkBuilder::Build() {
+ ASSERT(is_unused());
+ chunk_ = new(zone()) LPlatformChunk(info(), graph());
+ LPhase phase("L_Building chunk", chunk_);
+ status_ = BUILDING;
+
+ // Reserve the first spill slot for the state of dynamic alignment.
+ if (info()->IsOptimizing()) {
+ int alignment_state_index = chunk_->GetNextSpillIndex(GENERAL_REGISTERS);
+ ASSERT_EQ(alignment_state_index, 0);
+ USE(alignment_state_index);
+ }
+
+ // If compiling for OSR, reserve space for the unoptimized frame,
+ // which will be subsumed into this frame.
+ if (graph()->has_osr()) {
+ for (int i = graph()->osr()->UnoptimizedFrameSlots(); i > 0; i--) {
+ chunk_->GetNextSpillIndex(GENERAL_REGISTERS);
+ }
+ }
+
+ const ZoneList<HBasicBlock*>* blocks = graph()->blocks();
+ for (int i = 0; i < blocks->length(); i++) {
+ HBasicBlock* next = NULL;
+ if (i < blocks->length() - 1) next = blocks->at(i + 1);
+ DoBasicBlock(blocks->at(i), next);
+ if (is_aborted()) return NULL;
+ }
+ status_ = DONE;
+ return chunk_;
+}
+
+
+void LChunkBuilder::Abort(BailoutReason reason) {
+ info()->set_bailout_reason(reason);
+ status_ = ABORTED;
+}
+
+
+LUnallocated* LChunkBuilder::ToUnallocated(Register reg) {
+ return new(zone()) LUnallocated(LUnallocated::FIXED_REGISTER,
+ Register::ToAllocationIndex(reg));
+}
+
+
+LOperand* LChunkBuilder::UseFixed(HValue* value, Register fixed_register) {
+ return Use(value, ToUnallocated(fixed_register));
+}
+
+
+LOperand* LChunkBuilder::UseRegister(HValue* value) {
+ return Use(value, new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
+}
+
+
+LOperand* LChunkBuilder::UseRegisterAtStart(HValue* value) {
+ return Use(value,
+ new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER,
+ LUnallocated::USED_AT_START));
+}
+
+
+LOperand* LChunkBuilder::UseTempRegister(HValue* value) {
+ return Use(value, new(zone()) LUnallocated(LUnallocated::WRITABLE_REGISTER));
+}
+
+
+LOperand* LChunkBuilder::Use(HValue* value) {
+ return Use(value, new(zone()) LUnallocated(LUnallocated::NONE));
+}
+
+
+LOperand* LChunkBuilder::UseAtStart(HValue* value) {
+ return Use(value, new(zone()) LUnallocated(LUnallocated::NONE,
+ LUnallocated::USED_AT_START));
+}
+
+
+static inline bool CanBeImmediateConstant(HValue* value) {
+ return value->IsConstant() && HConstant::cast(value)->NotInNewSpace();
+}
+
+
+LOperand* LChunkBuilder::UseOrConstant(HValue* value) {
+ return CanBeImmediateConstant(value)
+ ? chunk_->DefineConstantOperand(HConstant::cast(value))
+ : Use(value);
+}
+
+
+LOperand* LChunkBuilder::UseOrConstantAtStart(HValue* value) {
+ return CanBeImmediateConstant(value)
+ ? chunk_->DefineConstantOperand(HConstant::cast(value))
+ : UseAtStart(value);
+}
+
+
+LOperand* LChunkBuilder::UseFixedOrConstant(HValue* value,
+ Register fixed_register) {
+ return CanBeImmediateConstant(value)
+ ? chunk_->DefineConstantOperand(HConstant::cast(value))
+ : UseFixed(value, fixed_register);
+}
+
+
+LOperand* LChunkBuilder::UseRegisterOrConstant(HValue* value) {
+ return CanBeImmediateConstant(value)
+ ? chunk_->DefineConstantOperand(HConstant::cast(value))
+ : UseRegister(value);
+}
+
+
+LOperand* LChunkBuilder::UseRegisterOrConstantAtStart(HValue* value) {
+ return CanBeImmediateConstant(value)
+ ? chunk_->DefineConstantOperand(HConstant::cast(value))
+ : UseRegisterAtStart(value);
+}
+
+
+LOperand* LChunkBuilder::UseConstant(HValue* value) {
+ return chunk_->DefineConstantOperand(HConstant::cast(value));
+}
+
+
+LOperand* LChunkBuilder::UseAny(HValue* value) {
+ return value->IsConstant()
+ ? chunk_->DefineConstantOperand(HConstant::cast(value))
+ : Use(value, new(zone()) LUnallocated(LUnallocated::ANY));
+}
+
+
+LOperand* LChunkBuilder::Use(HValue* value, LUnallocated* operand) {
+ if (value->EmitAtUses()) {
+ HInstruction* instr = HInstruction::cast(value);
+ VisitInstruction(instr);
+ }
+ operand->set_virtual_register(value->id());
+ return operand;
+}
+
+
+LInstruction* LChunkBuilder::Define(LTemplateResultInstruction<1>* instr,
+ LUnallocated* result) {
+ result->set_virtual_register(current_instruction_->id());
+ instr->set_result(result);
+ return instr;
+}
+
+
+LInstruction* LChunkBuilder::DefineAsRegister(
+ LTemplateResultInstruction<1>* instr) {
+ return Define(instr,
+ new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
+}
+
+
+LInstruction* LChunkBuilder::DefineAsSpilled(
+ LTemplateResultInstruction<1>* instr,
+ int index) {
+ return Define(instr,
+ new(zone()) LUnallocated(LUnallocated::FIXED_SLOT, index));
+}
+
+
+LInstruction* LChunkBuilder::DefineSameAsFirst(
+ LTemplateResultInstruction<1>* instr) {
+ return Define(instr,
+ new(zone()) LUnallocated(LUnallocated::SAME_AS_FIRST_INPUT));
+}
+
+
+LInstruction* LChunkBuilder::DefineFixed(LTemplateResultInstruction<1>* instr,
+ Register reg) {
+ return Define(instr, ToUnallocated(reg));
+}
+
+
+LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
+ HEnvironment* hydrogen_env = current_block_->last_environment();
+ int argument_index_accumulator = 0;
+ ZoneList<HValue*> objects_to_materialize(0, zone());
+ instr->set_environment(CreateEnvironment(hydrogen_env,
+ &argument_index_accumulator,
+ &objects_to_materialize));
+ return instr;
+}
+
+
+LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
+ HInstruction* hinstr,
+ CanDeoptimize can_deoptimize) {
+ info()->MarkAsNonDeferredCalling();
+
+#ifdef DEBUG
+ instr->VerifyCall();
+#endif
+ instr->MarkAsCall();
+ instr = AssignPointerMap(instr);
+
+ // If instruction does not have side-effects lazy deoptimization
+ // after the call will try to deoptimize to the point before the call.
+ // Thus we still need to attach environment to this call even if
+ // call sequence can not deoptimize eagerly.
+ bool needs_environment =
+ (can_deoptimize == CAN_DEOPTIMIZE_EAGERLY) ||
+ !hinstr->HasObservableSideEffects();
+ if (needs_environment && !instr->HasEnvironment()) {
+ instr = AssignEnvironment(instr);
+ // We can't really figure out if the environment is needed or not.
+ instr->environment()->set_has_been_used();
+ }
+
+ return instr;
+}
+
+
+LInstruction* LChunkBuilder::AssignPointerMap(LInstruction* instr) {
+ ASSERT(!instr->HasPointerMap());
+ instr->set_pointer_map(new(zone()) LPointerMap(zone()));
+ return instr;
+}
+
+
+LUnallocated* LChunkBuilder::TempRegister() {
+ LUnallocated* operand =
+ new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER);
+ int vreg = allocator_->GetVirtualRegister();
+ if (!allocator_->AllocationOk()) {
+ Abort(kOutOfVirtualRegistersWhileTryingToAllocateTempRegister);
+ vreg = 0;
+ }
+ operand->set_virtual_register(vreg);
+ return operand;
+}
+
+
+LOperand* LChunkBuilder::FixedTemp(Register reg) {
+ LUnallocated* operand = ToUnallocated(reg);
+ ASSERT(operand->HasFixedPolicy());
+ return operand;
+}
+
+
+LInstruction* LChunkBuilder::DoBlockEntry(HBlockEntry* instr) {
+ return new(zone()) LLabel(instr->block());
+}
+
+
+LInstruction* LChunkBuilder::DoDummyUse(HDummyUse* instr) {
+ return DefineAsRegister(new(zone()) LDummyUse(UseAny(instr->value())));
+}
+
+
+LInstruction* LChunkBuilder::DoEnvironmentMarker(HEnvironmentMarker* instr) {
+ UNREACHABLE();
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoDeoptimize(HDeoptimize* instr) {
+ return AssignEnvironment(new(zone()) LDeoptimize);
+}
+
+
+LInstruction* LChunkBuilder::DoShift(Token::Value op,
+ HBitwiseBinaryOperation* instr) {
+ if (instr->representation().IsSmiOrInteger32()) {
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* left = UseRegisterAtStart(instr->left());
+
+ HValue* right_value = instr->right();
+ LOperand* right = NULL;
+ int constant_value = 0;
+ bool does_deopt = false;
+ if (right_value->IsConstant()) {
+ HConstant* constant = HConstant::cast(right_value);
+ right = chunk_->DefineConstantOperand(constant);
+ constant_value = constant->Integer32Value() & 0x1f;
+ // Left shifts can deoptimize if we shift by > 0 and the result cannot be
+ // truncated to smi.
+ if (instr->representation().IsSmi() && constant_value > 0) {
+ does_deopt = !instr->CheckUsesForFlag(HValue::kTruncatingToSmi);
+ }
+ } else {
+ right = UseFixed(right_value, ecx);
+ }
+
+ // Shift operations can only deoptimize if we do a logical shift by 0 and
+ // the result cannot be truncated to int32.
+ if (op == Token::SHR && constant_value == 0) {
+ if (FLAG_opt_safe_uint32_operations) {
+ does_deopt = !instr->CheckFlag(HInstruction::kUint32);
+ } else {
+ does_deopt = !instr->CheckUsesForFlag(HValue::kTruncatingToInt32);
+ }
+ }
+
+ LInstruction* result =
+ DefineSameAsFirst(new(zone()) LShiftI(op, left, right, does_deopt));
+ return does_deopt ? AssignEnvironment(result) : result;
+ } else {
+ return DoArithmeticT(op, instr);
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoArithmeticD(Token::Value op,
+ HArithmeticBinaryOperation* instr) {
+ ASSERT(instr->representation().IsDouble());
+ ASSERT(instr->left()->representation().IsDouble());
+ ASSERT(instr->right()->representation().IsDouble());
+ if (op == Token::MOD) {
+ LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
+ LOperand* right = UseRegisterAtStart(instr->BetterRightOperand());
+ LArithmeticD* result = new(zone()) LArithmeticD(op, left, right);
+ return MarkAsCall(DefineSameAsFirst(result), instr);
+ } else {
+ LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
+ LOperand* right = UseRegisterAtStart(instr->BetterRightOperand());
+ LArithmeticD* result = new(zone()) LArithmeticD(op, left, right);
+ return DefineSameAsFirst(result);
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoArithmeticT(Token::Value op,
+ HBinaryOperation* instr) {
+ HValue* left = instr->left();
+ HValue* right = instr->right();
+ ASSERT(left->representation().IsTagged());
+ ASSERT(right->representation().IsTagged());
+ LOperand* context = UseFixed(instr->context(), esi);
+ LOperand* left_operand = UseFixed(left, edx);
+ LOperand* right_operand = UseFixed(right, eax);
+ LArithmeticT* result =
+ new(zone()) LArithmeticT(op, context, left_operand, right_operand);
+ return MarkAsCall(DefineFixed(result, eax), instr);
+}
+
+
+void LChunkBuilder::DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block) {
+ ASSERT(is_building());
+ current_block_ = block;
+ next_block_ = next_block;
+ if (block->IsStartBlock()) {
+ block->UpdateEnvironment(graph_->start_environment());
+ argument_count_ = 0;
+ } else if (block->predecessors()->length() == 1) {
+ // We have a single predecessor => copy environment and outgoing
+ // argument count from the predecessor.
+ ASSERT(block->phis()->length() == 0);
+ HBasicBlock* pred = block->predecessors()->at(0);
+ HEnvironment* last_environment = pred->last_environment();
+ ASSERT(last_environment != NULL);
+ // Only copy the environment, if it is later used again.
+ if (pred->end()->SecondSuccessor() == NULL) {
+ ASSERT(pred->end()->FirstSuccessor() == block);
+ } else {
+ if (pred->end()->FirstSuccessor()->block_id() > block->block_id() ||
+ pred->end()->SecondSuccessor()->block_id() > block->block_id()) {
+ last_environment = last_environment->Copy();
+ }
+ }
+ block->UpdateEnvironment(last_environment);
+ ASSERT(pred->argument_count() >= 0);
+ argument_count_ = pred->argument_count();
+ } else {
+ // We are at a state join => process phis.
+ HBasicBlock* pred = block->predecessors()->at(0);
+ // No need to copy the environment, it cannot be used later.
+ HEnvironment* last_environment = pred->last_environment();
+ for (int i = 0; i < block->phis()->length(); ++i) {
+ HPhi* phi = block->phis()->at(i);
+ if (phi->HasMergedIndex()) {
+ last_environment->SetValueAt(phi->merged_index(), phi);
+ }
+ }
+ for (int i = 0; i < block->deleted_phis()->length(); ++i) {
+ if (block->deleted_phis()->at(i) < last_environment->length()) {
+ last_environment->SetValueAt(block->deleted_phis()->at(i),
+ graph_->GetConstantUndefined());
+ }
+ }
+ block->UpdateEnvironment(last_environment);
+ // Pick up the outgoing argument count of one of the predecessors.
+ argument_count_ = pred->argument_count();
+ }
+ HInstruction* current = block->first();
+ int start = chunk_->instructions()->length();
+ while (current != NULL && !is_aborted()) {
+ // Code for constants in registers is generated lazily.
+ if (!current->EmitAtUses()) {
+ VisitInstruction(current);
+ }
+ current = current->next();
+ }
+ int end = chunk_->instructions()->length() - 1;
+ if (end >= start) {
+ block->set_first_instruction_index(start);
+ block->set_last_instruction_index(end);
+ }
+ block->set_argument_count(argument_count_);
+ next_block_ = NULL;
+ current_block_ = NULL;
+}
+
+
+void LChunkBuilder::VisitInstruction(HInstruction* current) {
+ HInstruction* old_current = current_instruction_;
+ current_instruction_ = current;
+
+ LInstruction* instr = NULL;
+ if (current->CanReplaceWithDummyUses()) {
+ if (current->OperandCount() == 0) {
+ instr = DefineAsRegister(new(zone()) LDummy());
+ } else {
+ ASSERT(!current->OperandAt(0)->IsControlInstruction());
+ instr = DefineAsRegister(new(zone())
+ LDummyUse(UseAny(current->OperandAt(0))));
+ }
+ for (int i = 1; i < current->OperandCount(); ++i) {
+ if (current->OperandAt(i)->IsControlInstruction()) continue;
+ LInstruction* dummy =
+ new(zone()) LDummyUse(UseAny(current->OperandAt(i)));
+ dummy->set_hydrogen_value(current);
+ chunk_->AddInstruction(dummy, current_block_);
+ }
+ } else {
+ instr = current->CompileToLithium(this);
+ }
+
+ argument_count_ += current->argument_delta();
+ ASSERT(argument_count_ >= 0);
+
+ if (instr != NULL) {
+ AddInstruction(instr, current);
+ }
+
+ current_instruction_ = old_current;
+}
+
+
+void LChunkBuilder::AddInstruction(LInstruction* instr,
+ HInstruction* hydrogen_val) {
+ // Associate the hydrogen instruction first, since we may need it for
+ // the ClobbersRegisters() or ClobbersDoubleRegisters() calls below.
+ instr->set_hydrogen_value(hydrogen_val);
+
+#if DEBUG
+ // Make sure that the lithium instruction has either no fixed register
+ // constraints in temps or the result OR no uses that are only used at
+ // start. If this invariant doesn't hold, the register allocator can decide
+ // to insert a split of a range immediately before the instruction due to an
+ // already allocated register needing to be used for the instruction's fixed
+ // register constraint. In this case, The register allocator won't see an
+ // interference between the split child and the use-at-start (it would if
+ // the it was just a plain use), so it is free to move the split child into
+ // the same register that is used for the use-at-start.
+ // See https://code.google.com/p/chromium/issues/detail?id=201590
+ if (!(instr->ClobbersRegisters() &&
+ instr->ClobbersDoubleRegisters(isolate()))) {
+ int fixed = 0;
+ int used_at_start = 0;
+ for (UseIterator it(instr); !it.Done(); it.Advance()) {
+ LUnallocated* operand = LUnallocated::cast(it.Current());
+ if (operand->IsUsedAtStart()) ++used_at_start;
+ }
+ if (instr->Output() != NULL) {
+ if (LUnallocated::cast(instr->Output())->HasFixedPolicy()) ++fixed;
+ }
+ for (TempIterator it(instr); !it.Done(); it.Advance()) {
+ LUnallocated* operand = LUnallocated::cast(it.Current());
+ if (operand->HasFixedPolicy()) ++fixed;
+ }
+ ASSERT(fixed == 0 || used_at_start == 0);
+ }
+#endif
+
+ if (FLAG_stress_pointer_maps && !instr->HasPointerMap()) {
+ instr = AssignPointerMap(instr);
+ }
+ if (FLAG_stress_environments && !instr->HasEnvironment()) {
+ instr = AssignEnvironment(instr);
+ }
+ if (instr->IsGoto() && LGoto::cast(instr)->jumps_to_join()) {
+ // TODO(olivf) Since phis of spilled values are joined as registers
+ // (not in the stack slot), we need to allow the goto gaps to keep one
+ // x87 register alive. To ensure all other values are still spilled, we
+ // insert a fpu register barrier right before.
+ LClobberDoubles* clobber = new(zone()) LClobberDoubles(isolate());
+ clobber->set_hydrogen_value(hydrogen_val);
+ chunk_->AddInstruction(clobber, current_block_);
+ }
+ chunk_->AddInstruction(instr, current_block_);
+
+ if (instr->IsCall()) {
+ HValue* hydrogen_value_for_lazy_bailout = hydrogen_val;
+ LInstruction* instruction_needing_environment = NULL;
+ if (hydrogen_val->HasObservableSideEffects()) {
+ HSimulate* sim = HSimulate::cast(hydrogen_val->next());
+ instruction_needing_environment = instr;
+ sim->ReplayEnvironment(current_block_->last_environment());
+ hydrogen_value_for_lazy_bailout = sim;
+ }
+ LInstruction* bailout = AssignEnvironment(new(zone()) LLazyBailout());
+ bailout->set_hydrogen_value(hydrogen_value_for_lazy_bailout);
+ chunk_->AddInstruction(bailout, current_block_);
+ if (instruction_needing_environment != NULL) {
+ // Store the lazy deopt environment with the instruction if needed.
+ // Right now it is only used for LInstanceOfKnownGlobal.
+ instruction_needing_environment->
+ SetDeferredLazyDeoptimizationEnvironment(bailout->environment());
+ }
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoGoto(HGoto* instr) {
+ return new(zone()) LGoto(instr->FirstSuccessor());
+}
+
+
+LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
+ LInstruction* goto_instr = CheckElideControlInstruction(instr);
+ if (goto_instr != NULL) return goto_instr;
+
+ HValue* value = instr->value();
+ Representation r = value->representation();
+ HType type = value->type();
+ ToBooleanStub::Types expected = instr->expected_input_types();
+ if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
+
+ bool easy_case = !r.IsTagged() || type.IsBoolean() || type.IsSmi() ||
+ type.IsJSArray() || type.IsHeapNumber() || type.IsString();
+ LOperand* temp = !easy_case && expected.NeedsMap() ? TempRegister() : NULL;
+ LInstruction* branch = new(zone()) LBranch(UseRegister(value), temp);
+ if (!easy_case &&
+ ((!expected.Contains(ToBooleanStub::SMI) && expected.NeedsMap()) ||
+ !expected.IsGeneric())) {
+ branch = AssignEnvironment(branch);
+ }
+ return branch;
+}
+
+
+LInstruction* LChunkBuilder::DoDebugBreak(HDebugBreak* instr) {
+ return new(zone()) LDebugBreak();
+}
+
+
+LInstruction* LChunkBuilder::DoCompareMap(HCompareMap* instr) {
+ LInstruction* goto_instr = CheckElideControlInstruction(instr);
+ if (goto_instr != NULL) return goto_instr;
+
+ ASSERT(instr->value()->representation().IsTagged());
+ LOperand* value = UseRegisterAtStart(instr->value());
+ return new(zone()) LCmpMapAndBranch(value);
+}
+
+
+LInstruction* LChunkBuilder::DoArgumentsLength(HArgumentsLength* length) {
+ info()->MarkAsRequiresFrame();
+ return DefineAsRegister(new(zone()) LArgumentsLength(Use(length->value())));
+}
+
+
+LInstruction* LChunkBuilder::DoArgumentsElements(HArgumentsElements* elems) {
+ info()->MarkAsRequiresFrame();
+ return DefineAsRegister(new(zone()) LArgumentsElements);
+}
+
+
+LInstruction* LChunkBuilder::DoInstanceOf(HInstanceOf* instr) {
+ LOperand* left = UseFixed(instr->left(), InstanceofStub::left());
+ LOperand* right = UseFixed(instr->right(), InstanceofStub::right());
+ LOperand* context = UseFixed(instr->context(), esi);
+ LInstanceOf* result = new(zone()) LInstanceOf(context, left, right);
+ return MarkAsCall(DefineFixed(result, eax), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoInstanceOfKnownGlobal(
+ HInstanceOfKnownGlobal* instr) {
+ LInstanceOfKnownGlobal* result =
+ new(zone()) LInstanceOfKnownGlobal(
+ UseFixed(instr->context(), esi),
+ UseFixed(instr->left(), InstanceofStub::left()),
+ FixedTemp(edi));
+ return MarkAsCall(DefineFixed(result, eax), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoWrapReceiver(HWrapReceiver* instr) {
+ LOperand* receiver = UseRegister(instr->receiver());
+ LOperand* function = UseRegister(instr->function());
+ LOperand* temp = TempRegister();
+ LWrapReceiver* result =
+ new(zone()) LWrapReceiver(receiver, function, temp);
+ return AssignEnvironment(DefineSameAsFirst(result));
+}
+
+
+LInstruction* LChunkBuilder::DoApplyArguments(HApplyArguments* instr) {
+ LOperand* function = UseFixed(instr->function(), edi);
+ LOperand* receiver = UseFixed(instr->receiver(), eax);
+ LOperand* length = UseFixed(instr->length(), ebx);
+ LOperand* elements = UseFixed(instr->elements(), ecx);
+ LApplyArguments* result = new(zone()) LApplyArguments(function,
+ receiver,
+ length,
+ elements);
+ return MarkAsCall(DefineFixed(result, eax), instr, CAN_DEOPTIMIZE_EAGERLY);
+}
+
+
+LInstruction* LChunkBuilder::DoPushArguments(HPushArguments* instr) {
+ int argc = instr->OperandCount();
+ for (int i = 0; i < argc; ++i) {
+ LOperand* argument = UseAny(instr->argument(i));
+ AddInstruction(new(zone()) LPushArgument(argument), instr);
+ }
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoStoreCodeEntry(
+ HStoreCodeEntry* store_code_entry) {
+ LOperand* function = UseRegister(store_code_entry->function());
+ LOperand* code_object = UseTempRegister(store_code_entry->code_object());
+ return new(zone()) LStoreCodeEntry(function, code_object);
+}
+
+
+LInstruction* LChunkBuilder::DoInnerAllocatedObject(
+ HInnerAllocatedObject* instr) {
+ LOperand* base_object = UseRegisterAtStart(instr->base_object());
+ LOperand* offset = UseRegisterOrConstantAtStart(instr->offset());
+ return DefineAsRegister(
+ new(zone()) LInnerAllocatedObject(base_object, offset));
+}
+
+
+LInstruction* LChunkBuilder::DoThisFunction(HThisFunction* instr) {
+ return instr->HasNoUses()
+ ? NULL
+ : DefineAsRegister(new(zone()) LThisFunction);
+}
+
+
+LInstruction* LChunkBuilder::DoContext(HContext* instr) {
+ if (instr->HasNoUses()) return NULL;
+
+ if (info()->IsStub()) {
+ return DefineFixed(new(zone()) LContext, esi);
+ }
+
+ return DefineAsRegister(new(zone()) LContext);
+}
+
+
+LInstruction* LChunkBuilder::DoDeclareGlobals(HDeclareGlobals* instr) {
+ LOperand* context = UseFixed(instr->context(), esi);
+ return MarkAsCall(new(zone()) LDeclareGlobals(context), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCallJSFunction(
+ HCallJSFunction* instr) {
+ LOperand* function = UseFixed(instr->function(), edi);
+
+ LCallJSFunction* result = new(zone()) LCallJSFunction(function);
+
+ return MarkAsCall(DefineFixed(result, eax), instr, CANNOT_DEOPTIMIZE_EAGERLY);
+}
+
+
+LInstruction* LChunkBuilder::DoCallWithDescriptor(
+ HCallWithDescriptor* instr) {
+ const CallInterfaceDescriptor* descriptor = instr->descriptor();
+
+ LOperand* target = UseRegisterOrConstantAtStart(instr->target());
+ ZoneList<LOperand*> ops(instr->OperandCount(), zone());
+ ops.Add(target, zone());
+ for (int i = 1; i < instr->OperandCount(); i++) {
+ LOperand* op = UseFixed(instr->OperandAt(i),
+ descriptor->GetParameterRegister(i - 1));
+ ops.Add(op, zone());
+ }
+
+ LCallWithDescriptor* result = new(zone()) LCallWithDescriptor(
+ descriptor, ops, zone());
+ return MarkAsCall(DefineFixed(result, eax), instr, CANNOT_DEOPTIMIZE_EAGERLY);
+}
+
+
+LInstruction* LChunkBuilder::DoInvokeFunction(HInvokeFunction* instr) {
+ LOperand* context = UseFixed(instr->context(), esi);
+ LOperand* function = UseFixed(instr->function(), edi);
+ LInvokeFunction* result = new(zone()) LInvokeFunction(context, function);
+ return MarkAsCall(DefineFixed(result, eax), instr, CANNOT_DEOPTIMIZE_EAGERLY);
+}
+
+
+LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
+ switch (instr->op()) {
+ case kMathFloor: return DoMathFloor(instr);
+ case kMathRound: return DoMathRound(instr);
+ case kMathAbs: return DoMathAbs(instr);
+ case kMathLog: return DoMathLog(instr);
+ case kMathExp: return DoMathExp(instr);
+ case kMathSqrt: return DoMathSqrt(instr);
+ case kMathPowHalf: return DoMathPowHalf(instr);
+ case kMathClz32: return DoMathClz32(instr);
+ default:
+ UNREACHABLE();
+ return NULL;
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoMathFloor(HUnaryMathOperation* instr) {
+ LOperand* input = UseRegisterAtStart(instr->value());
+ LMathFloor* result = new(zone()) LMathFloor(input);
+ return AssignEnvironment(DefineAsRegister(result));
+}
+
+
+LInstruction* LChunkBuilder::DoMathRound(HUnaryMathOperation* instr) {
+ // Crankshaft is turned off for nosse2.
+ UNREACHABLE();
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoMathAbs(HUnaryMathOperation* instr) {
+ LOperand* context = UseAny(instr->context()); // Deferred use.
+ LOperand* input = UseRegisterAtStart(instr->value());
+ LInstruction* result =
+ DefineSameAsFirst(new(zone()) LMathAbs(context, input));
+ Representation r = instr->value()->representation();
+ if (!r.IsDouble() && !r.IsSmiOrInteger32()) result = AssignPointerMap(result);
+ if (!r.IsDouble()) result = AssignEnvironment(result);
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoMathLog(HUnaryMathOperation* instr) {
+ ASSERT(instr->representation().IsDouble());
+ ASSERT(instr->value()->representation().IsDouble());
+ LOperand* input = UseRegisterAtStart(instr->value());
+ return MarkAsCall(DefineSameAsFirst(new(zone()) LMathLog(input)), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoMathClz32(HUnaryMathOperation* instr) {
+ LOperand* input = UseRegisterAtStart(instr->value());
+ LMathClz32* result = new(zone()) LMathClz32(input);
+ return DefineAsRegister(result);
+}
+
+
+LInstruction* LChunkBuilder::DoMathExp(HUnaryMathOperation* instr) {
+ ASSERT(instr->representation().IsDouble());
+ ASSERT(instr->value()->representation().IsDouble());
+ LOperand* value = UseTempRegister(instr->value());
+ LOperand* temp1 = TempRegister();
+ LOperand* temp2 = TempRegister();
+ LMathExp* result = new(zone()) LMathExp(value, temp1, temp2);
+ return DefineAsRegister(result);
+}
+
+
+LInstruction* LChunkBuilder::DoMathSqrt(HUnaryMathOperation* instr) {
+ LOperand* input = UseRegisterAtStart(instr->value());
+ LMathSqrt* result = new(zone()) LMathSqrt(input);
+ return DefineSameAsFirst(result);
+}
+
+
+LInstruction* LChunkBuilder::DoMathPowHalf(HUnaryMathOperation* instr) {
+ LOperand* input = UseRegisterAtStart(instr->value());
+ LOperand* temp = TempRegister();
+ LMathPowHalf* result = new(zone()) LMathPowHalf(input, temp);
+ return DefineSameAsFirst(result);
+}
+
+
+LInstruction* LChunkBuilder::DoCallNew(HCallNew* instr) {
+ LOperand* context = UseFixed(instr->context(), esi);
+ LOperand* constructor = UseFixed(instr->constructor(), edi);
+ LCallNew* result = new(zone()) LCallNew(context, constructor);
+ return MarkAsCall(DefineFixed(result, eax), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCallNewArray(HCallNewArray* instr) {
+ LOperand* context = UseFixed(instr->context(), esi);
+ LOperand* constructor = UseFixed(instr->constructor(), edi);
+ LCallNewArray* result = new(zone()) LCallNewArray(context, constructor);
+ return MarkAsCall(DefineFixed(result, eax), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCallFunction(HCallFunction* instr) {
+ LOperand* context = UseFixed(instr->context(), esi);
+ LOperand* function = UseFixed(instr->function(), edi);
+ LCallFunction* call = new(zone()) LCallFunction(context, function);
+ return MarkAsCall(DefineFixed(call, eax), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCallRuntime(HCallRuntime* instr) {
+ LOperand* context = UseFixed(instr->context(), esi);
+ return MarkAsCall(DefineFixed(new(zone()) LCallRuntime(context), eax), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoRor(HRor* instr) {
+ return DoShift(Token::ROR, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoShr(HShr* instr) {
+ return DoShift(Token::SHR, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoSar(HSar* instr) {
+ return DoShift(Token::SAR, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoShl(HShl* instr) {
+ return DoShift(Token::SHL, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) {
+ if (instr->representation().IsSmiOrInteger32()) {
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ ASSERT(instr->CheckFlag(HValue::kTruncatingToInt32));
+
+ LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
+ LOperand* right = UseOrConstantAtStart(instr->BetterRightOperand());
+ return DefineSameAsFirst(new(zone()) LBitI(left, right));
+ } else {
+ return DoArithmeticT(instr->op(), instr);
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoDivByPowerOf2I(HDiv* instr) {
+ ASSERT(instr->representation().IsSmiOrInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegister(instr->left());
+ int32_t divisor = instr->right()->GetInteger32Constant();
+ LInstruction* result = DefineAsRegister(new(zone()) LDivByPowerOf2I(
+ dividend, divisor));
+ if ((instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) ||
+ (instr->CheckFlag(HValue::kCanOverflow) && divisor == -1) ||
+ (!instr->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
+ divisor != 1 && divisor != -1)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoDivByConstI(HDiv* instr) {
+ ASSERT(instr->representation().IsInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegister(instr->left());
+ int32_t divisor = instr->right()->GetInteger32Constant();
+ LOperand* temp1 = FixedTemp(eax);
+ LOperand* temp2 = FixedTemp(edx);
+ LInstruction* result = DefineFixed(new(zone()) LDivByConstI(
+ dividend, divisor, temp1, temp2), edx);
+ if (divisor == 0 ||
+ (instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) ||
+ !instr->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoDivI(HDiv* instr) {
+ ASSERT(instr->representation().IsSmiOrInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseFixed(instr->left(), eax);
+ LOperand* divisor = UseRegister(instr->right());
+ LOperand* temp = FixedTemp(edx);
+ LInstruction* result = DefineFixed(new(zone()) LDivI(
+ dividend, divisor, temp), eax);
+ if (instr->CheckFlag(HValue::kCanBeDivByZero) ||
+ instr->CheckFlag(HValue::kBailoutOnMinusZero) ||
+ instr->CheckFlag(HValue::kCanOverflow) ||
+ !instr->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
+ if (instr->representation().IsSmiOrInteger32()) {
+ if (instr->RightIsPowerOf2()) {
+ return DoDivByPowerOf2I(instr);
+ } else if (instr->right()->IsConstant()) {
+ return DoDivByConstI(instr);
+ } else {
+ return DoDivI(instr);
+ }
+ } else if (instr->representation().IsDouble()) {
+ return DoArithmeticD(Token::DIV, instr);
+ } else {
+ return DoArithmeticT(Token::DIV, instr);
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoFlooringDivByPowerOf2I(HMathFloorOfDiv* instr) {
+ LOperand* dividend = UseRegisterAtStart(instr->left());
+ int32_t divisor = instr->right()->GetInteger32Constant();
+ LInstruction* result = DefineSameAsFirst(new(zone()) LFlooringDivByPowerOf2I(
+ dividend, divisor));
+ if ((instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) ||
+ (instr->CheckFlag(HValue::kLeftCanBeMinInt) && divisor == -1)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoFlooringDivByConstI(HMathFloorOfDiv* instr) {
+ ASSERT(instr->representation().IsInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegister(instr->left());
+ int32_t divisor = instr->right()->GetInteger32Constant();
+ LOperand* temp1 = FixedTemp(eax);
+ LOperand* temp2 = FixedTemp(edx);
+ LOperand* temp3 =
+ ((divisor > 0 && !instr->CheckFlag(HValue::kLeftCanBeNegative)) ||
+ (divisor < 0 && !instr->CheckFlag(HValue::kLeftCanBePositive))) ?
+ NULL : TempRegister();
+ LInstruction* result =
+ DefineFixed(new(zone()) LFlooringDivByConstI(dividend,
+ divisor,
+ temp1,
+ temp2,
+ temp3),
+ edx);
+ if (divisor == 0 ||
+ (instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoFlooringDivI(HMathFloorOfDiv* instr) {
+ ASSERT(instr->representation().IsSmiOrInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseFixed(instr->left(), eax);
+ LOperand* divisor = UseRegister(instr->right());
+ LOperand* temp = FixedTemp(edx);
+ LInstruction* result = DefineFixed(new(zone()) LFlooringDivI(
+ dividend, divisor, temp), eax);
+ if (instr->CheckFlag(HValue::kCanBeDivByZero) ||
+ instr->CheckFlag(HValue::kBailoutOnMinusZero) ||
+ instr->CheckFlag(HValue::kCanOverflow)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoMathFloorOfDiv(HMathFloorOfDiv* instr) {
+ if (instr->RightIsPowerOf2()) {
+ return DoFlooringDivByPowerOf2I(instr);
+ } else if (instr->right()->IsConstant()) {
+ return DoFlooringDivByConstI(instr);
+ } else {
+ return DoFlooringDivI(instr);
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoModByPowerOf2I(HMod* instr) {
+ ASSERT(instr->representation().IsSmiOrInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegisterAtStart(instr->left());
+ int32_t divisor = instr->right()->GetInteger32Constant();
+ LInstruction* result = DefineSameAsFirst(new(zone()) LModByPowerOf2I(
+ dividend, divisor));
+ if (instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoModByConstI(HMod* instr) {
+ ASSERT(instr->representation().IsSmiOrInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegister(instr->left());
+ int32_t divisor = instr->right()->GetInteger32Constant();
+ LOperand* temp1 = FixedTemp(eax);
+ LOperand* temp2 = FixedTemp(edx);
+ LInstruction* result = DefineFixed(new(zone()) LModByConstI(
+ dividend, divisor, temp1, temp2), eax);
+ if (divisor == 0 || instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoModI(HMod* instr) {
+ ASSERT(instr->representation().IsSmiOrInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseFixed(instr->left(), eax);
+ LOperand* divisor = UseRegister(instr->right());
+ LOperand* temp = FixedTemp(edx);
+ LInstruction* result = DefineFixed(new(zone()) LModI(
+ dividend, divisor, temp), edx);
+ if (instr->CheckFlag(HValue::kCanBeDivByZero) ||
+ instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoMod(HMod* instr) {
+ if (instr->representation().IsSmiOrInteger32()) {
+ if (instr->RightIsPowerOf2()) {
+ return DoModByPowerOf2I(instr);
+ } else if (instr->right()->IsConstant()) {
+ return DoModByConstI(instr);
+ } else {
+ return DoModI(instr);
+ }
+ } else if (instr->representation().IsDouble()) {
+ return DoArithmeticD(Token::MOD, instr);
+ } else {
+ return DoArithmeticT(Token::MOD, instr);
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoMul(HMul* instr) {
+ if (instr->representation().IsSmiOrInteger32()) {
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
+ LOperand* right = UseOrConstant(instr->BetterRightOperand());
+ LOperand* temp = NULL;
+ if (instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ temp = TempRegister();
+ }
+ LMulI* mul = new(zone()) LMulI(left, right, temp);
+ if (instr->CheckFlag(HValue::kCanOverflow) ||
+ instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ AssignEnvironment(mul);
+ }
+ return DefineSameAsFirst(mul);
+ } else if (instr->representation().IsDouble()) {
+ return DoArithmeticD(Token::MUL, instr);
+ } else {
+ return DoArithmeticT(Token::MUL, instr);
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoSub(HSub* instr) {
+ if (instr->representation().IsSmiOrInteger32()) {
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* left = UseRegisterAtStart(instr->left());
+ LOperand* right = UseOrConstantAtStart(instr->right());
+ LSubI* sub = new(zone()) LSubI(left, right);
+ LInstruction* result = DefineSameAsFirst(sub);
+ if (instr->CheckFlag(HValue::kCanOverflow)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+ } else if (instr->representation().IsDouble()) {
+ return DoArithmeticD(Token::SUB, instr);
+ } else {
+ return DoArithmeticT(Token::SUB, instr);
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
+ if (instr->representation().IsSmiOrInteger32()) {
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ // Check to see if it would be advantageous to use an lea instruction rather
+ // than an add. This is the case when no overflow check is needed and there
+ // are multiple uses of the add's inputs, so using a 3-register add will
+ // preserve all input values for later uses.
+ bool use_lea = LAddI::UseLea(instr);
+ LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
+ HValue* right_candidate = instr->BetterRightOperand();
+ LOperand* right = use_lea
+ ? UseRegisterOrConstantAtStart(right_candidate)
+ : UseOrConstantAtStart(right_candidate);
+ LAddI* add = new(zone()) LAddI(left, right);
+ bool can_overflow = instr->CheckFlag(HValue::kCanOverflow);
+ LInstruction* result = use_lea
+ ? DefineAsRegister(add)
+ : DefineSameAsFirst(add);
+ if (can_overflow) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+ } else if (instr->representation().IsDouble()) {
+ return DoArithmeticD(Token::ADD, instr);
+ } else if (instr->representation().IsExternal()) {
+ ASSERT(instr->left()->representation().IsExternal());
+ ASSERT(instr->right()->representation().IsInteger32());
+ ASSERT(!instr->CheckFlag(HValue::kCanOverflow));
+ bool use_lea = LAddI::UseLea(instr);
+ LOperand* left = UseRegisterAtStart(instr->left());
+ HValue* right_candidate = instr->right();
+ LOperand* right = use_lea
+ ? UseRegisterOrConstantAtStart(right_candidate)
+ : UseOrConstantAtStart(right_candidate);
+ LAddI* add = new(zone()) LAddI(left, right);
+ LInstruction* result = use_lea
+ ? DefineAsRegister(add)
+ : DefineSameAsFirst(add);
+ return result;
+ } else {
+ return DoArithmeticT(Token::ADD, instr);
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoMathMinMax(HMathMinMax* instr) {
+ LOperand* left = NULL;
+ LOperand* right = NULL;
+ if (instr->representation().IsSmiOrInteger32()) {
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ left = UseRegisterAtStart(instr->BetterLeftOperand());
+ right = UseOrConstantAtStart(instr->BetterRightOperand());
+ } else {
+ ASSERT(instr->representation().IsDouble());
+ ASSERT(instr->left()->representation().IsDouble());
+ ASSERT(instr->right()->representation().IsDouble());
+ left = UseRegisterAtStart(instr->left());
+ right = UseRegisterAtStart(instr->right());
+ }
+ LMathMinMax* minmax = new(zone()) LMathMinMax(left, right);
+ return DefineSameAsFirst(minmax);
+}
+
+
+LInstruction* LChunkBuilder::DoPower(HPower* instr) {
+ // Crankshaft is turned off for nosse2.
+ UNREACHABLE();
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) {
+ ASSERT(instr->left()->representation().IsSmiOrTagged());
+ ASSERT(instr->right()->representation().IsSmiOrTagged());
+ LOperand* context = UseFixed(instr->context(), esi);
+ LOperand* left = UseFixed(instr->left(), edx);
+ LOperand* right = UseFixed(instr->right(), eax);
+ LCmpT* result = new(zone()) LCmpT(context, left, right);
+ return MarkAsCall(DefineFixed(result, eax), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCompareNumericAndBranch(
+ HCompareNumericAndBranch* instr) {
+ LInstruction* goto_instr = CheckElideControlInstruction(instr);
+ if (goto_instr != NULL) return goto_instr;
+ Representation r = instr->representation();
+ if (r.IsSmiOrInteger32()) {
+ ASSERT(instr->left()->representation().Equals(r));
+ ASSERT(instr->right()->representation().Equals(r));
+ LOperand* left = UseRegisterOrConstantAtStart(instr->left());
+ LOperand* right = UseOrConstantAtStart(instr->right());
+ return new(zone()) LCompareNumericAndBranch(left, right);
+ } else {
+ ASSERT(r.IsDouble());
+ ASSERT(instr->left()->representation().IsDouble());
+ ASSERT(instr->right()->representation().IsDouble());
+ LOperand* left;
+ LOperand* right;
+ if (CanBeImmediateConstant(instr->left()) &&
+ CanBeImmediateConstant(instr->right())) {
+ // The code generator requires either both inputs to be constant
+ // operands, or neither.
+ left = UseConstant(instr->left());
+ right = UseConstant(instr->right());
+ } else {
+ left = UseRegisterAtStart(instr->left());
+ right = UseRegisterAtStart(instr->right());
+ }
+ return new(zone()) LCompareNumericAndBranch(left, right);
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoCompareObjectEqAndBranch(
+ HCompareObjectEqAndBranch* instr) {
+ LInstruction* goto_instr = CheckElideControlInstruction(instr);
+ if (goto_instr != NULL) return goto_instr;
+ LOperand* left = UseRegisterAtStart(instr->left());
+ LOperand* right = UseOrConstantAtStart(instr->right());
+ return new(zone()) LCmpObjectEqAndBranch(left, right);
+}
+
+
+LInstruction* LChunkBuilder::DoCompareHoleAndBranch(
+ HCompareHoleAndBranch* instr) {
+ LOperand* value = UseRegisterAtStart(instr->value());
+ return new(zone()) LCmpHoleAndBranch(value);
+}
+
+
+LInstruction* LChunkBuilder::DoCompareMinusZeroAndBranch(
+ HCompareMinusZeroAndBranch* instr) {
+ LInstruction* goto_instr = CheckElideControlInstruction(instr);
+ if (goto_instr != NULL) return goto_instr;
+ LOperand* value = UseRegister(instr->value());
+ LOperand* scratch = TempRegister();
+ return new(zone()) LCompareMinusZeroAndBranch(value, scratch);
+}
+
+
+LInstruction* LChunkBuilder::DoIsObjectAndBranch(HIsObjectAndBranch* instr) {
+ ASSERT(instr->value()->representation().IsSmiOrTagged());
+ LOperand* temp = TempRegister();
+ return new(zone()) LIsObjectAndBranch(UseRegister(instr->value()), temp);
+}
+
+
+LInstruction* LChunkBuilder::DoIsStringAndBranch(HIsStringAndBranch* instr) {
+ ASSERT(instr->value()->representation().IsTagged());
+ LOperand* temp = TempRegister();
+ return new(zone()) LIsStringAndBranch(UseRegister(instr->value()), temp);
+}
+
+
+LInstruction* LChunkBuilder::DoIsSmiAndBranch(HIsSmiAndBranch* instr) {
+ ASSERT(instr->value()->representation().IsTagged());
+ return new(zone()) LIsSmiAndBranch(Use(instr->value()));
+}
+
+
+LInstruction* LChunkBuilder::DoIsUndetectableAndBranch(
+ HIsUndetectableAndBranch* instr) {
+ ASSERT(instr->value()->representation().IsTagged());
+ return new(zone()) LIsUndetectableAndBranch(
+ UseRegisterAtStart(instr->value()), TempRegister());
+}
+
+
+LInstruction* LChunkBuilder::DoStringCompareAndBranch(
+ HStringCompareAndBranch* instr) {
+ ASSERT(instr->left()->representation().IsTagged());
+ ASSERT(instr->right()->representation().IsTagged());
+ LOperand* context = UseFixed(instr->context(), esi);
+ LOperand* left = UseFixed(instr->left(), edx);
+ LOperand* right = UseFixed(instr->right(), eax);
+
+ LStringCompareAndBranch* result = new(zone())
+ LStringCompareAndBranch(context, left, right);
+
+ return MarkAsCall(result, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoHasInstanceTypeAndBranch(
+ HHasInstanceTypeAndBranch* instr) {
+ ASSERT(instr->value()->representation().IsTagged());
+ return new(zone()) LHasInstanceTypeAndBranch(
+ UseRegisterAtStart(instr->value()),
+ TempRegister());
+}
+
+
+LInstruction* LChunkBuilder::DoGetCachedArrayIndex(
+ HGetCachedArrayIndex* instr) {
+ ASSERT(instr->value()->representation().IsTagged());
+ LOperand* value = UseRegisterAtStart(instr->value());
+
+ return DefineAsRegister(new(zone()) LGetCachedArrayIndex(value));
+}
+
+
+LInstruction* LChunkBuilder::DoHasCachedArrayIndexAndBranch(
+ HHasCachedArrayIndexAndBranch* instr) {
+ ASSERT(instr->value()->representation().IsTagged());
+ return new(zone()) LHasCachedArrayIndexAndBranch(
+ UseRegisterAtStart(instr->value()));
+}
+
+
+LInstruction* LChunkBuilder::DoClassOfTestAndBranch(
+ HClassOfTestAndBranch* instr) {
+ ASSERT(instr->value()->representation().IsTagged());
+ return new(zone()) LClassOfTestAndBranch(UseRegister(instr->value()),
+ TempRegister(),
+ TempRegister());
+}
+
+
+LInstruction* LChunkBuilder::DoMapEnumLength(HMapEnumLength* instr) {
+ LOperand* map = UseRegisterAtStart(instr->value());
+ return DefineAsRegister(new(zone()) LMapEnumLength(map));
+}
+
+
+LInstruction* LChunkBuilder::DoDateField(HDateField* instr) {
+ LOperand* date = UseFixed(instr->value(), eax);
+ LDateField* result =
+ new(zone()) LDateField(date, FixedTemp(ecx), instr->index());
+ return MarkAsCall(DefineFixed(result, eax), instr, CAN_DEOPTIMIZE_EAGERLY);
+}
+
+
+LInstruction* LChunkBuilder::DoSeqStringGetChar(HSeqStringGetChar* instr) {
+ LOperand* string = UseRegisterAtStart(instr->string());
+ LOperand* index = UseRegisterOrConstantAtStart(instr->index());
+ return DefineAsRegister(new(zone()) LSeqStringGetChar(string, index));
+}
+
+
+LOperand* LChunkBuilder::GetSeqStringSetCharOperand(HSeqStringSetChar* instr) {
+ if (instr->encoding() == String::ONE_BYTE_ENCODING) {
+ if (FLAG_debug_code) {
+ return UseFixed(instr->value(), eax);
+ } else {
+ return UseFixedOrConstant(instr->value(), eax);
+ }
+ } else {
+ if (FLAG_debug_code) {
+ return UseRegisterAtStart(instr->value());
+ } else {
+ return UseRegisterOrConstantAtStart(instr->value());
+ }
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoSeqStringSetChar(HSeqStringSetChar* instr) {
+ LOperand* string = UseRegisterAtStart(instr->string());
+ LOperand* index = FLAG_debug_code
+ ? UseRegisterAtStart(instr->index())
+ : UseRegisterOrConstantAtStart(instr->index());
+ LOperand* value = GetSeqStringSetCharOperand(instr);
+ LOperand* context = FLAG_debug_code ? UseFixed(instr->context(), esi) : NULL;
+ LInstruction* result = new(zone()) LSeqStringSetChar(context, string,
+ index, value);
+ if (FLAG_debug_code) {
+ result = MarkAsCall(result, instr);
+ }
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) {
+ if (!FLAG_debug_code && instr->skip_check()) return NULL;
+ LOperand* index = UseRegisterOrConstantAtStart(instr->index());
+ LOperand* length = !index->IsConstantOperand()
+ ? UseOrConstantAtStart(instr->length())
+ : UseAtStart(instr->length());
+ LInstruction* result = new(zone()) LBoundsCheck(index, length);
+ if (!FLAG_debug_code || !instr->skip_check()) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoBoundsCheckBaseIndexInformation(
+ HBoundsCheckBaseIndexInformation* instr) {
+ UNREACHABLE();
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoAbnormalExit(HAbnormalExit* instr) {
+ // The control instruction marking the end of a block that completed
+ // abruptly (e.g., threw an exception). There is nothing specific to do.
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoUseConst(HUseConst* instr) {
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoForceRepresentation(HForceRepresentation* bad) {
+ // All HForceRepresentation instructions should be eliminated in the
+ // representation change phase of Hydrogen.
+ UNREACHABLE();
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoChange(HChange* instr) {
+ Representation from = instr->from();
+ Representation to = instr->to();
+ HValue* val = instr->value();
+ if (from.IsSmi()) {
+ if (to.IsTagged()) {
+ LOperand* value = UseRegister(val);
+ return DefineSameAsFirst(new(zone()) LDummyUse(value));
+ }
+ from = Representation::Tagged();
+ }
+ if (from.IsTagged()) {
+ if (to.IsDouble()) {
+ LOperand* value = UseRegister(val);
+ LOperand* temp = TempRegister();
+ LInstruction* result =
+ DefineAsRegister(new(zone()) LNumberUntagD(value, temp));
+ if (!val->representation().IsSmi()) result = AssignEnvironment(result);
+ return result;
+ } else if (to.IsSmi()) {
+ LOperand* value = UseRegister(val);
+ if (val->type().IsSmi()) {
+ return DefineSameAsFirst(new(zone()) LDummyUse(value));
+ }
+ return AssignEnvironment(DefineSameAsFirst(new(zone()) LCheckSmi(value)));
+ } else {
+ ASSERT(to.IsInteger32());
+ if (val->type().IsSmi() || val->representation().IsSmi()) {
+ LOperand* value = UseRegister(val);
+ return DefineSameAsFirst(new(zone()) LSmiUntag(value, false));
+ } else {
+ LOperand* value = UseRegister(val);
+ LInstruction* result = DefineSameAsFirst(new(zone()) LTaggedToI(value));
+ if (!val->representation().IsSmi()) result = AssignEnvironment(result);
+ return result;
+ }
+ }
+ } else if (from.IsDouble()) {
+ if (to.IsTagged()) {
+ info()->MarkAsDeferredCalling();
+ LOperand* value = UseRegisterAtStart(val);
+ LOperand* temp = FLAG_inline_new ? TempRegister() : NULL;
+ LUnallocated* result_temp = TempRegister();
+ LNumberTagD* result = new(zone()) LNumberTagD(value, temp);
+ return AssignPointerMap(Define(result, result_temp));
+ } else if (to.IsSmi()) {
+ LOperand* value = UseRegister(val);
+ return AssignEnvironment(
+ DefineAsRegister(new(zone()) LDoubleToSmi(value)));
+ } else {
+ ASSERT(to.IsInteger32());
+ bool truncating = instr->CanTruncateToInt32();
+ LOperand* value = UseRegister(val);
+ LInstruction* result = DefineAsRegister(new(zone()) LDoubleToI(value));
+ if (!truncating) result = AssignEnvironment(result);
+ return result;
+ }
+ } else if (from.IsInteger32()) {
+ info()->MarkAsDeferredCalling();
+ if (to.IsTagged()) {
+ if (!instr->CheckFlag(HValue::kCanOverflow)) {
+ LOperand* value = UseRegister(val);
+ return DefineSameAsFirst(new(zone()) LSmiTag(value));
+ } else if (val->CheckFlag(HInstruction::kUint32)) {
+ LOperand* value = UseRegister(val);
+ LOperand* temp = TempRegister();
+ LNumberTagU* result = new(zone()) LNumberTagU(value, temp);
+ return AssignPointerMap(DefineSameAsFirst(result));
+ } else {
+ LOperand* value = UseRegister(val);
+ LOperand* temp = TempRegister();
+ LNumberTagI* result = new(zone()) LNumberTagI(value, temp);
+ return AssignPointerMap(DefineSameAsFirst(result));
+ }
+ } else if (to.IsSmi()) {
+ LOperand* value = UseRegister(val);
+ LInstruction* result = DefineSameAsFirst(new(zone()) LSmiTag(value));
+ if (instr->CheckFlag(HValue::kCanOverflow)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+ } else {
+ ASSERT(to.IsDouble());
+ if (val->CheckFlag(HInstruction::kUint32)) {
+ return DefineAsRegister(new(zone()) LUint32ToDouble(UseRegister(val)));
+ } else {
+ return DefineAsRegister(new(zone()) LInteger32ToDouble(Use(val)));
+ }
+ }
+ }
+ UNREACHABLE();
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoCheckHeapObject(HCheckHeapObject* instr) {
+ LOperand* value = UseAtStart(instr->value());
+ LInstruction* result = new(zone()) LCheckNonSmi(value);
+ if (!instr->value()->IsHeapObject()) result = AssignEnvironment(result);
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoCheckSmi(HCheckSmi* instr) {
+ LOperand* value = UseRegisterAtStart(instr->value());
+ return AssignEnvironment(new(zone()) LCheckSmi(value));
+}
+
+
+LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) {
+ LOperand* value = UseRegisterAtStart(instr->value());
+ LOperand* temp = TempRegister();
+ LCheckInstanceType* result = new(zone()) LCheckInstanceType(value, temp);
+ return AssignEnvironment(result);
+}
+
+
+LInstruction* LChunkBuilder::DoCheckValue(HCheckValue* instr) {
+ // If the object is in new space, we'll emit a global cell compare and so
+ // want the value in a register. If the object gets promoted before we
+ // emit code, we will still get the register but will do an immediate
+ // compare instead of the cell compare. This is safe.
+ LOperand* value = instr->object_in_new_space()
+ ? UseRegisterAtStart(instr->value()) : UseAtStart(instr->value());
+ return AssignEnvironment(new(zone()) LCheckValue(value));
+}
+
+
+LInstruction* LChunkBuilder::DoCheckMaps(HCheckMaps* instr) {
+ if (instr->IsStabilityCheck()) return new(zone()) LCheckMaps;
+ LOperand* value = UseRegisterAtStart(instr->value());
+ LInstruction* result = AssignEnvironment(new(zone()) LCheckMaps(value));
+ if (instr->HasMigrationTarget()) {
+ info()->MarkAsDeferredCalling();
+ result = AssignPointerMap(result);
+ }
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoClampToUint8(HClampToUint8* instr) {
+ HValue* value = instr->value();
+ Representation input_rep = value->representation();
+ if (input_rep.IsDouble()) {
+ UNREACHABLE();
+ return NULL;
+ } else if (input_rep.IsInteger32()) {
+ LOperand* reg = UseFixed(value, eax);
+ return DefineFixed(new(zone()) LClampIToUint8(reg), eax);
+ } else {
+ ASSERT(input_rep.IsSmiOrTagged());
+ LOperand* value = UseRegister(instr->value());
+ LClampTToUint8NoSSE2* res =
+ new(zone()) LClampTToUint8NoSSE2(value, TempRegister(),
+ TempRegister(), TempRegister());
+ return AssignEnvironment(DefineFixed(res, ecx));
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoDoubleBits(HDoubleBits* instr) {
+ HValue* value = instr->value();
+ ASSERT(value->representation().IsDouble());
+ return DefineAsRegister(new(zone()) LDoubleBits(UseRegister(value)));
+}
+
+
+LInstruction* LChunkBuilder::DoConstructDouble(HConstructDouble* instr) {
+ LOperand* lo = UseRegister(instr->lo());
+ LOperand* hi = UseRegister(instr->hi());
+ return DefineAsRegister(new(zone()) LConstructDouble(hi, lo));
+}
+
+
+LInstruction* LChunkBuilder::DoReturn(HReturn* instr) {
+ LOperand* context = info()->IsStub() ? UseFixed(instr->context(), esi) : NULL;
+ LOperand* parameter_count = UseRegisterOrConstant(instr->parameter_count());
+ return new(zone()) LReturn(
+ UseFixed(instr->value(), eax), context, parameter_count);
+}
+
+
+LInstruction* LChunkBuilder::DoConstant(HConstant* instr) {
+ Representation r = instr->representation();
+ if (r.IsSmi()) {
+ return DefineAsRegister(new(zone()) LConstantS);
+ } else if (r.IsInteger32()) {
+ return DefineAsRegister(new(zone()) LConstantI);
+ } else if (r.IsDouble()) {
+ double value = instr->DoubleValue();
+ bool value_is_zero = BitCast<uint64_t, double>(value) == 0;
+ LOperand* temp = value_is_zero ? NULL : TempRegister();
+ return DefineAsRegister(new(zone()) LConstantD(temp));
+ } else if (r.IsExternal()) {
+ return DefineAsRegister(new(zone()) LConstantE);
+ } else if (r.IsTagged()) {
+ return DefineAsRegister(new(zone()) LConstantT);
+ } else {
+ UNREACHABLE();
+ return NULL;
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoLoadGlobalCell(HLoadGlobalCell* instr) {
+ LLoadGlobalCell* result = new(zone()) LLoadGlobalCell;
+ return instr->RequiresHoleCheck()
+ ? AssignEnvironment(DefineAsRegister(result))
+ : DefineAsRegister(result);
+}
+
+
+LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
+ LOperand* context = UseFixed(instr->context(), esi);
+ LOperand* global_object = UseFixed(instr->global_object(), edx);
+ LLoadGlobalGeneric* result =
+ new(zone()) LLoadGlobalGeneric(context, global_object);
+ return MarkAsCall(DefineFixed(result, eax), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoStoreGlobalCell(HStoreGlobalCell* instr) {
+ LStoreGlobalCell* result =
+ new(zone()) LStoreGlobalCell(UseRegister(instr->value()));
+ return instr->RequiresHoleCheck() ? AssignEnvironment(result) : result;
+}
+
+
+LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
+ LOperand* context = UseRegisterAtStart(instr->value());
+ LInstruction* result =
+ DefineAsRegister(new(zone()) LLoadContextSlot(context));
+ if (instr->RequiresHoleCheck() && instr->DeoptimizesOnHole()) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoStoreContextSlot(HStoreContextSlot* instr) {
+ LOperand* value;
+ LOperand* temp;
+ LOperand* context = UseRegister(instr->context());
+ if (instr->NeedsWriteBarrier()) {
+ value = UseTempRegister(instr->value());
+ temp = TempRegister();
+ } else {
+ value = UseRegister(instr->value());
+ temp = NULL;
+ }
+ LInstruction* result = new(zone()) LStoreContextSlot(context, value, temp);
+ if (instr->RequiresHoleCheck() && instr->DeoptimizesOnHole()) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) {
+ LOperand* obj = (instr->access().IsExternalMemory() &&
+ instr->access().offset() == 0)
+ ? UseRegisterOrConstantAtStart(instr->object())
+ : UseRegisterAtStart(instr->object());
+ return DefineAsRegister(new(zone()) LLoadNamedField(obj));
+}
+
+
+LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
+ LOperand* context = UseFixed(instr->context(), esi);
+ LOperand* object = UseFixed(instr->object(), edx);
+ LLoadNamedGeneric* result = new(zone()) LLoadNamedGeneric(context, object);
+ return MarkAsCall(DefineFixed(result, eax), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoLoadFunctionPrototype(
+ HLoadFunctionPrototype* instr) {
+ return AssignEnvironment(DefineAsRegister(
+ new(zone()) LLoadFunctionPrototype(UseRegister(instr->function()),
+ TempRegister())));
+}
+
+
+LInstruction* LChunkBuilder::DoLoadRoot(HLoadRoot* instr) {
+ return DefineAsRegister(new(zone()) LLoadRoot);
+}
+
+
+LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
+ ASSERT(instr->key()->representation().IsSmiOrInteger32());
+ ElementsKind elements_kind = instr->elements_kind();
+ bool clobbers_key = ExternalArrayOpRequiresTemp(
+ instr->key()->representation(), elements_kind);
+ LOperand* key = clobbers_key
+ ? UseTempRegister(instr->key())
+ : UseRegisterOrConstantAtStart(instr->key());
+ LInstruction* result = NULL;
+
+ if (!instr->is_typed_elements()) {
+ LOperand* obj = UseRegisterAtStart(instr->elements());
+ result = DefineAsRegister(new(zone()) LLoadKeyed(obj, key));
+ } else {
+ ASSERT(
+ (instr->representation().IsInteger32() &&
+ !(IsDoubleOrFloatElementsKind(instr->elements_kind()))) ||
+ (instr->representation().IsDouble() &&
+ (IsDoubleOrFloatElementsKind(instr->elements_kind()))));
+ LOperand* backing_store = UseRegister(instr->elements());
+ result = DefineAsRegister(new(zone()) LLoadKeyed(backing_store, key));
+ }
+
+ if ((instr->is_external() || instr->is_fixed_typed_array()) ?
+ // see LCodeGen::DoLoadKeyedExternalArray
+ ((instr->elements_kind() == EXTERNAL_UINT32_ELEMENTS ||
+ instr->elements_kind() == UINT32_ELEMENTS) &&
+ !instr->CheckFlag(HInstruction::kUint32)) :
+ // see LCodeGen::DoLoadKeyedFixedDoubleArray and
+ // LCodeGen::DoLoadKeyedFixedArray
+ instr->RequiresHoleCheck()) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
+ LOperand* context = UseFixed(instr->context(), esi);
+ LOperand* object = UseFixed(instr->object(), edx);
+ LOperand* key = UseFixed(instr->key(), ecx);
+
+ LLoadKeyedGeneric* result =
+ new(zone()) LLoadKeyedGeneric(context, object, key);
+ return MarkAsCall(DefineFixed(result, eax), instr);
+}
+
+
+LOperand* LChunkBuilder::GetStoreKeyedValueOperand(HStoreKeyed* instr) {
+ ElementsKind elements_kind = instr->elements_kind();
+
+ // Determine if we need a byte register in this case for the value.
+ bool val_is_fixed_register =
+ elements_kind == EXTERNAL_INT8_ELEMENTS ||
+ elements_kind == EXTERNAL_UINT8_ELEMENTS ||
+ elements_kind == EXTERNAL_UINT8_CLAMPED_ELEMENTS ||
+ elements_kind == UINT8_ELEMENTS ||
+ elements_kind == INT8_ELEMENTS ||
+ elements_kind == UINT8_CLAMPED_ELEMENTS;
+ if (val_is_fixed_register) {
+ return UseFixed(instr->value(), eax);
+ }
+
+ if (IsDoubleOrFloatElementsKind(elements_kind)) {
+ return UseRegisterAtStart(instr->value());
+ }
+
+ return UseRegister(instr->value());
+}
+
+
+LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
+ if (!instr->is_typed_elements()) {
+ ASSERT(instr->elements()->representation().IsTagged());
+ ASSERT(instr->key()->representation().IsInteger32() ||
+ instr->key()->representation().IsSmi());
+
+ if (instr->value()->representation().IsDouble()) {
+ LOperand* object = UseRegisterAtStart(instr->elements());
+ LOperand* val = NULL;
+ val = UseRegisterAtStart(instr->value());
+ LOperand* key = UseRegisterOrConstantAtStart(instr->key());
+ return new(zone()) LStoreKeyed(object, key, val);
+ } else {
+ ASSERT(instr->value()->representation().IsSmiOrTagged());
+ bool needs_write_barrier = instr->NeedsWriteBarrier();
+
+ LOperand* obj = UseRegister(instr->elements());
+ LOperand* val;
+ LOperand* key;
+ if (needs_write_barrier) {
+ val = UseTempRegister(instr->value());
+ key = UseTempRegister(instr->key());
+ } else {
+ val = UseRegisterOrConstantAtStart(instr->value());
+ key = UseRegisterOrConstantAtStart(instr->key());
+ }
+ return new(zone()) LStoreKeyed(obj, key, val);
+ }
+ }
+
+ ElementsKind elements_kind = instr->elements_kind();
+ ASSERT(
+ (instr->value()->representation().IsInteger32() &&
+ !IsDoubleOrFloatElementsKind(elements_kind)) ||
+ (instr->value()->representation().IsDouble() &&
+ IsDoubleOrFloatElementsKind(elements_kind)));
+ ASSERT((instr->is_fixed_typed_array() &&
+ instr->elements()->representation().IsTagged()) ||
+ (instr->is_external() &&
+ instr->elements()->representation().IsExternal()));
+
+ LOperand* backing_store = UseRegister(instr->elements());
+ LOperand* val = GetStoreKeyedValueOperand(instr);
+ bool clobbers_key = ExternalArrayOpRequiresTemp(
+ instr->key()->representation(), elements_kind);
+ LOperand* key = clobbers_key
+ ? UseTempRegister(instr->key())
+ : UseRegisterOrConstantAtStart(instr->key());
+ return new(zone()) LStoreKeyed(backing_store, key, val);
+}
+
+
+LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
+ LOperand* context = UseFixed(instr->context(), esi);
+ LOperand* object = UseFixed(instr->object(), edx);
+ LOperand* key = UseFixed(instr->key(), ecx);
+ LOperand* value = UseFixed(instr->value(), eax);
+
+ ASSERT(instr->object()->representation().IsTagged());
+ ASSERT(instr->key()->representation().IsTagged());
+ ASSERT(instr->value()->representation().IsTagged());
+
+ LStoreKeyedGeneric* result =
+ new(zone()) LStoreKeyedGeneric(context, object, key, value);
+ return MarkAsCall(result, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoTransitionElementsKind(
+ HTransitionElementsKind* instr) {
+ if (IsSimpleMapChangeTransition(instr->from_kind(), instr->to_kind())) {
+ LOperand* object = UseRegister(instr->object());
+ LOperand* new_map_reg = TempRegister();
+ LOperand* temp_reg = TempRegister();
+ LTransitionElementsKind* result =
+ new(zone()) LTransitionElementsKind(object, NULL,
+ new_map_reg, temp_reg);
+ return result;
+ } else {
+ LOperand* object = UseFixed(instr->object(), eax);
+ LOperand* context = UseFixed(instr->context(), esi);
+ LTransitionElementsKind* result =
+ new(zone()) LTransitionElementsKind(object, context, NULL, NULL);
+ return MarkAsCall(result, instr);
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoArrayShift(HArrayShift* instr) {
+ LOperand* object = UseFixed(instr->object(), eax);
+ LOperand* context = UseFixed(instr->context(), esi);
+ LArrayShift* result = new(zone()) LArrayShift(context, object);
+ return MarkAsCall(DefineFixed(result, eax), instr, CANNOT_DEOPTIMIZE_EAGERLY);
+}
+
+
+LInstruction* LChunkBuilder::DoTrapAllocationMemento(
+ HTrapAllocationMemento* instr) {
+ LOperand* object = UseRegister(instr->object());
+ LOperand* temp = TempRegister();
+ LTrapAllocationMemento* result =
+ new(zone()) LTrapAllocationMemento(object, temp);
+ return AssignEnvironment(result);
+}
+
+
+LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
+ bool is_in_object = instr->access().IsInobject();
+ bool is_external_location = instr->access().IsExternalMemory() &&
+ instr->access().offset() == 0;
+ bool needs_write_barrier = instr->NeedsWriteBarrier();
+ bool needs_write_barrier_for_map = instr->has_transition() &&
+ instr->NeedsWriteBarrierForMap();
+
+ LOperand* obj;
+ if (needs_write_barrier) {
+ obj = is_in_object
+ ? UseRegister(instr->object())
+ : UseTempRegister(instr->object());
+ } else if (is_external_location) {
+ ASSERT(!is_in_object);
+ ASSERT(!needs_write_barrier);
+ ASSERT(!needs_write_barrier_for_map);
+ obj = UseRegisterOrConstant(instr->object());
+ } else {
+ obj = needs_write_barrier_for_map
+ ? UseRegister(instr->object())
+ : UseRegisterAtStart(instr->object());
+ }
+
+ bool can_be_constant = instr->value()->IsConstant() &&
+ HConstant::cast(instr->value())->NotInNewSpace() &&
+ !instr->field_representation().IsDouble();
+
+ LOperand* val;
+ if (instr->field_representation().IsInteger8() ||
+ instr->field_representation().IsUInteger8()) {
+ // mov_b requires a byte register (i.e. any of eax, ebx, ecx, edx).
+ // Just force the value to be in eax and we're safe here.
+ val = UseFixed(instr->value(), eax);
+ } else if (needs_write_barrier) {
+ val = UseTempRegister(instr->value());
+ } else if (can_be_constant) {
+ val = UseRegisterOrConstant(instr->value());
+ } else if (instr->field_representation().IsSmi()) {
+ val = UseTempRegister(instr->value());
+ } else if (instr->field_representation().IsDouble()) {
+ val = UseRegisterAtStart(instr->value());
+ } else {
+ val = UseRegister(instr->value());
+ }
+
+ // We only need a scratch register if we have a write barrier or we
+ // have a store into the properties array (not in-object-property).
+ LOperand* temp = (!is_in_object || needs_write_barrier ||
+ needs_write_barrier_for_map) ? TempRegister() : NULL;
+
+ // We need a temporary register for write barrier of the map field.
+ LOperand* temp_map = needs_write_barrier_for_map ? TempRegister() : NULL;
+
+ LInstruction* result =
+ new(zone()) LStoreNamedField(obj, val, temp, temp_map);
+ if (!instr->access().IsExternalMemory() &&
+ instr->field_representation().IsHeapObject() &&
+ (val->IsConstantOperand()
+ ? HConstant::cast(instr->value())->HasSmiValue()
+ : !instr->value()->type().IsHeapObject())) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
+ LOperand* context = UseFixed(instr->context(), esi);
+ LOperand* object = UseFixed(instr->object(), edx);
+ LOperand* value = UseFixed(instr->value(), eax);
+
+ LStoreNamedGeneric* result =
+ new(zone()) LStoreNamedGeneric(context, object, value);
+ return MarkAsCall(result, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoStringAdd(HStringAdd* instr) {
+ LOperand* context = UseFixed(instr->context(), esi);
+ LOperand* left = UseFixed(instr->left(), edx);
+ LOperand* right = UseFixed(instr->right(), eax);
+ LStringAdd* string_add = new(zone()) LStringAdd(context, left, right);
+ return MarkAsCall(DefineFixed(string_add, eax), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoStringCharCodeAt(HStringCharCodeAt* instr) {
+ LOperand* string = UseTempRegister(instr->string());
+ LOperand* index = UseTempRegister(instr->index());
+ LOperand* context = UseAny(instr->context());
+ LStringCharCodeAt* result =
+ new(zone()) LStringCharCodeAt(context, string, index);
+ return AssignPointerMap(DefineAsRegister(result));
+}
+
+
+LInstruction* LChunkBuilder::DoStringCharFromCode(HStringCharFromCode* instr) {
+ LOperand* char_code = UseRegister(instr->value());
+ LOperand* context = UseAny(instr->context());
+ LStringCharFromCode* result =
+ new(zone()) LStringCharFromCode(context, char_code);
+ return AssignPointerMap(DefineAsRegister(result));
+}
+
+
+LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) {
+ info()->MarkAsDeferredCalling();
+ LOperand* context = UseAny(instr->context());
+ LOperand* size = instr->size()->IsConstant()
+ ? UseConstant(instr->size())
+ : UseTempRegister(instr->size());
+ LOperand* temp = TempRegister();
+ LAllocate* result = new(zone()) LAllocate(context, size, temp);
+ return AssignPointerMap(DefineAsRegister(result));
+}
+
+
+LInstruction* LChunkBuilder::DoRegExpLiteral(HRegExpLiteral* instr) {
+ LOperand* context = UseFixed(instr->context(), esi);
+ return MarkAsCall(
+ DefineFixed(new(zone()) LRegExpLiteral(context), eax), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoFunctionLiteral(HFunctionLiteral* instr) {
+ LOperand* context = UseFixed(instr->context(), esi);
+ return MarkAsCall(
+ DefineFixed(new(zone()) LFunctionLiteral(context), eax), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) {
+ ASSERT(argument_count_ == 0);
+ allocator_->MarkAsOsrEntry();
+ current_block_->last_environment()->set_ast_id(instr->ast_id());
+ return AssignEnvironment(new(zone()) LOsrEntry);
+}
+
+
+LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
+ LParameter* result = new(zone()) LParameter;
+ if (instr->kind() == HParameter::STACK_PARAMETER) {
+ int spill_index = chunk()->GetParameterStackSlot(instr->index());
+ return DefineAsSpilled(result, spill_index);
+ } else {
+ ASSERT(info()->IsStub());
+ CodeStubInterfaceDescriptor* descriptor =
+ info()->code_stub()->GetInterfaceDescriptor();
+ int index = static_cast<int>(instr->index());
+ Register reg = descriptor->GetParameterRegister(index);
+ return DefineFixed(result, reg);
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoUnknownOSRValue(HUnknownOSRValue* instr) {
+ // Use an index that corresponds to the location in the unoptimized frame,
+ // which the optimized frame will subsume.
+ int env_index = instr->index();
+ int spill_index = 0;
+ if (instr->environment()->is_parameter_index(env_index)) {
+ spill_index = chunk()->GetParameterStackSlot(env_index);
+ } else {
+ spill_index = env_index - instr->environment()->first_local_index();
+ if (spill_index > LUnallocated::kMaxFixedSlotIndex) {
+ Abort(kNotEnoughSpillSlotsForOsr);
+ spill_index = 0;
+ }
+ if (spill_index == 0) {
+ // The dynamic frame alignment state overwrites the first local.
+ // The first local is saved at the end of the unoptimized frame.
+ spill_index = graph()->osr()->UnoptimizedFrameSlots();
+ }
+ }
+ return DefineAsSpilled(new(zone()) LUnknownOSRValue, spill_index);
+}
+
+
+LInstruction* LChunkBuilder::DoCallStub(HCallStub* instr) {
+ LOperand* context = UseFixed(instr->context(), esi);
+ LCallStub* result = new(zone()) LCallStub(context);
+ return MarkAsCall(DefineFixed(result, eax), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoArgumentsObject(HArgumentsObject* instr) {
+ // There are no real uses of the arguments object.
+ // arguments.length and element access are supported directly on
+ // stack arguments, and any real arguments object use causes a bailout.
+ // So this value is never used.
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoCapturedObject(HCapturedObject* instr) {
+ instr->ReplayEnvironment(current_block_->last_environment());
+
+ // There are no real uses of a captured object.
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoAccessArgumentsAt(HAccessArgumentsAt* instr) {
+ info()->MarkAsRequiresFrame();
+ LOperand* args = UseRegister(instr->arguments());
+ LOperand* length;
+ LOperand* index;
+ if (instr->length()->IsConstant() && instr->index()->IsConstant()) {
+ length = UseRegisterOrConstant(instr->length());
+ index = UseOrConstant(instr->index());
+ } else {
+ length = UseTempRegister(instr->length());
+ index = Use(instr->index());
+ }
+ return DefineAsRegister(new(zone()) LAccessArgumentsAt(args, length, index));
+}
+
+
+LInstruction* LChunkBuilder::DoToFastProperties(HToFastProperties* instr) {
+ LOperand* object = UseFixed(instr->value(), eax);
+ LToFastProperties* result = new(zone()) LToFastProperties(object);
+ return MarkAsCall(DefineFixed(result, eax), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoTypeof(HTypeof* instr) {
+ LOperand* context = UseFixed(instr->context(), esi);
+ LOperand* value = UseAtStart(instr->value());
+ LTypeof* result = new(zone()) LTypeof(context, value);
+ return MarkAsCall(DefineFixed(result, eax), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoTypeofIsAndBranch(HTypeofIsAndBranch* instr) {
+ LInstruction* goto_instr = CheckElideControlInstruction(instr);
+ if (goto_instr != NULL) return goto_instr;
+ return new(zone()) LTypeofIsAndBranch(UseTempRegister(instr->value()));
+}
+
+
+LInstruction* LChunkBuilder::DoIsConstructCallAndBranch(
+ HIsConstructCallAndBranch* instr) {
+ return new(zone()) LIsConstructCallAndBranch(TempRegister());
+}
+
+
+LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) {
+ instr->ReplayEnvironment(current_block_->last_environment());
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoStackCheck(HStackCheck* instr) {
+ info()->MarkAsDeferredCalling();
+ if (instr->is_function_entry()) {
+ LOperand* context = UseFixed(instr->context(), esi);
+ return MarkAsCall(new(zone()) LStackCheck(context), instr);
+ } else {
+ ASSERT(instr->is_backwards_branch());
+ LOperand* context = UseAny(instr->context());
+ return AssignEnvironment(
+ AssignPointerMap(new(zone()) LStackCheck(context)));
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
+ HEnvironment* outer = current_block_->last_environment();
+ outer->set_ast_id(instr->ReturnId());
+ HConstant* undefined = graph()->GetConstantUndefined();
+ HEnvironment* inner = outer->CopyForInlining(instr->closure(),
+ instr->arguments_count(),
+ instr->function(),
+ undefined,
+ instr->inlining_kind());
+ // Only replay binding of arguments object if it wasn't removed from graph.
+ if (instr->arguments_var() != NULL && instr->arguments_object()->IsLinked()) {
+ inner->Bind(instr->arguments_var(), instr->arguments_object());
+ }
+ inner->set_entry(instr);
+ current_block_->UpdateEnvironment(inner);
+ chunk_->AddInlinedClosure(instr->closure());
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) {
+ LInstruction* pop = NULL;
+
+ HEnvironment* env = current_block_->last_environment();
+
+ if (env->entry()->arguments_pushed()) {
+ int argument_count = env->arguments_environment()->parameter_count();
+ pop = new(zone()) LDrop(argument_count);
+ ASSERT(instr->argument_delta() == -argument_count);
+ }
+
+ HEnvironment* outer = current_block_->last_environment()->
+ DiscardInlined(false);
+ current_block_->UpdateEnvironment(outer);
+ return pop;
+}
+
+
+LInstruction* LChunkBuilder::DoForInPrepareMap(HForInPrepareMap* instr) {
+ LOperand* context = UseFixed(instr->context(), esi);
+ LOperand* object = UseFixed(instr->enumerable(), eax);
+ LForInPrepareMap* result = new(zone()) LForInPrepareMap(context, object);
+ return MarkAsCall(DefineFixed(result, eax), instr, CAN_DEOPTIMIZE_EAGERLY);
+}
+
+
+LInstruction* LChunkBuilder::DoForInCacheArray(HForInCacheArray* instr) {
+ LOperand* map = UseRegister(instr->map());
+ return AssignEnvironment(DefineAsRegister(
+ new(zone()) LForInCacheArray(map)));
+}
+
+
+LInstruction* LChunkBuilder::DoCheckMapValue(HCheckMapValue* instr) {
+ LOperand* value = UseRegisterAtStart(instr->value());
+ LOperand* map = UseRegisterAtStart(instr->map());
+ return AssignEnvironment(new(zone()) LCheckMapValue(value, map));
+}
+
+
+LInstruction* LChunkBuilder::DoLoadFieldByIndex(HLoadFieldByIndex* instr) {
+ LOperand* object = UseRegister(instr->object());
+ LOperand* index = UseTempRegister(instr->index());
+ LLoadFieldByIndex* load = new(zone()) LLoadFieldByIndex(object, index);
+ LInstruction* result = DefineSameAsFirst(load);
+ return AssignPointerMap(result);
+}
+
+
+} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_X87
--- /dev/null
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_X87_LITHIUM_X87_H_
+#define V8_X87_LITHIUM_X87_H_
+
+#include "hydrogen.h"
+#include "lithium-allocator.h"
+#include "lithium.h"
+#include "safepoint-table.h"
+#include "utils.h"
+
+namespace v8 {
+namespace internal {
+
+// Forward declarations.
+class LCodeGen;
+
+#define LITHIUM_CONCRETE_INSTRUCTION_LIST(V) \
+ V(AccessArgumentsAt) \
+ V(AddI) \
+ V(Allocate) \
+ V(ApplyArguments) \
+ V(ArgumentsElements) \
+ V(ArgumentsLength) \
+ V(ArithmeticD) \
+ V(ArithmeticT) \
+ V(ArrayShift) \
+ V(BitI) \
+ V(BoundsCheck) \
+ V(Branch) \
+ V(CallJSFunction) \
+ V(CallWithDescriptor) \
+ V(CallFunction) \
+ V(CallNew) \
+ V(CallNewArray) \
+ V(CallRuntime) \
+ V(CallStub) \
+ V(CheckInstanceType) \
+ V(CheckMaps) \
+ V(CheckMapValue) \
+ V(CheckNonSmi) \
+ V(CheckSmi) \
+ V(CheckValue) \
+ V(ClampDToUint8) \
+ V(ClampIToUint8) \
+ V(ClampTToUint8NoSSE2) \
+ V(ClassOfTestAndBranch) \
+ V(ClobberDoubles) \
+ V(CompareMinusZeroAndBranch) \
+ V(CompareNumericAndBranch) \
+ V(CmpObjectEqAndBranch) \
+ V(CmpHoleAndBranch) \
+ V(CmpMapAndBranch) \
+ V(CmpT) \
+ V(ConstantD) \
+ V(ConstantE) \
+ V(ConstantI) \
+ V(ConstantS) \
+ V(ConstantT) \
+ V(ConstructDouble) \
+ V(Context) \
+ V(DateField) \
+ V(DebugBreak) \
+ V(DeclareGlobals) \
+ V(Deoptimize) \
+ V(DivByConstI) \
+ V(DivByPowerOf2I) \
+ V(DivI) \
+ V(DoubleBits) \
+ V(DoubleToI) \
+ V(DoubleToSmi) \
+ V(Drop) \
+ V(Dummy) \
+ V(DummyUse) \
+ V(FlooringDivByConstI) \
+ V(FlooringDivByPowerOf2I) \
+ V(FlooringDivI) \
+ V(ForInCacheArray) \
+ V(ForInPrepareMap) \
+ V(FunctionLiteral) \
+ V(GetCachedArrayIndex) \
+ V(Goto) \
+ V(HasCachedArrayIndexAndBranch) \
+ V(HasInstanceTypeAndBranch) \
+ V(InnerAllocatedObject) \
+ V(InstanceOf) \
+ V(InstanceOfKnownGlobal) \
+ V(InstructionGap) \
+ V(Integer32ToDouble) \
+ V(InvokeFunction) \
+ V(IsConstructCallAndBranch) \
+ V(IsObjectAndBranch) \
+ V(IsStringAndBranch) \
+ V(IsSmiAndBranch) \
+ V(IsUndetectableAndBranch) \
+ V(Label) \
+ V(LazyBailout) \
+ V(LoadContextSlot) \
+ V(LoadFieldByIndex) \
+ V(LoadFunctionPrototype) \
+ V(LoadGlobalCell) \
+ V(LoadGlobalGeneric) \
+ V(LoadKeyed) \
+ V(LoadKeyedGeneric) \
+ V(LoadNamedField) \
+ V(LoadNamedGeneric) \
+ V(LoadRoot) \
+ V(MapEnumLength) \
+ V(MathAbs) \
+ V(MathClz32) \
+ V(MathExp) \
+ V(MathFloor) \
+ V(MathLog) \
+ V(MathMinMax) \
+ V(MathPowHalf) \
+ V(MathRound) \
+ V(MathSqrt) \
+ V(ModByConstI) \
+ V(ModByPowerOf2I) \
+ V(ModI) \
+ V(MulI) \
+ V(NumberTagD) \
+ V(NumberTagI) \
+ V(NumberTagU) \
+ V(NumberUntagD) \
+ V(OsrEntry) \
+ V(Parameter) \
+ V(Power) \
+ V(PushArgument) \
+ V(RegExpLiteral) \
+ V(Return) \
+ V(SeqStringGetChar) \
+ V(SeqStringSetChar) \
+ V(ShiftI) \
+ V(SmiTag) \
+ V(SmiUntag) \
+ V(StackCheck) \
+ V(StoreCodeEntry) \
+ V(StoreContextSlot) \
+ V(StoreGlobalCell) \
+ V(StoreKeyed) \
+ V(StoreKeyedGeneric) \
+ V(StoreNamedField) \
+ V(StoreNamedGeneric) \
+ V(StringAdd) \
+ V(StringCharCodeAt) \
+ V(StringCharFromCode) \
+ V(StringCompareAndBranch) \
+ V(SubI) \
+ V(TaggedToI) \
+ V(ThisFunction) \
+ V(ToFastProperties) \
+ V(TransitionElementsKind) \
+ V(TrapAllocationMemento) \
+ V(Typeof) \
+ V(TypeofIsAndBranch) \
+ V(Uint32ToDouble) \
+ V(UnknownOSRValue) \
+ V(WrapReceiver)
+
+
+#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic) \
+ virtual Opcode opcode() const V8_FINAL V8_OVERRIDE { \
+ return LInstruction::k##type; \
+ } \
+ virtual void CompileToNative(LCodeGen* generator) V8_FINAL V8_OVERRIDE; \
+ virtual const char* Mnemonic() const V8_FINAL V8_OVERRIDE { \
+ return mnemonic; \
+ } \
+ static L##type* cast(LInstruction* instr) { \
+ ASSERT(instr->Is##type()); \
+ return reinterpret_cast<L##type*>(instr); \
+ }
+
+
+#define DECLARE_HYDROGEN_ACCESSOR(type) \
+ H##type* hydrogen() const { \
+ return H##type::cast(hydrogen_value()); \
+ }
+
+
+class LInstruction : public ZoneObject {
+ public:
+ LInstruction()
+ : environment_(NULL),
+ hydrogen_value_(NULL),
+ bit_field_(IsCallBits::encode(false)) {
+ }
+
+ virtual ~LInstruction() {}
+
+ virtual void CompileToNative(LCodeGen* generator) = 0;
+ virtual const char* Mnemonic() const = 0;
+ virtual void PrintTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintOutputOperandTo(StringStream* stream);
+
+ enum Opcode {
+ // Declare a unique enum value for each instruction.
+#define DECLARE_OPCODE(type) k##type,
+ LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_OPCODE)
+ kNumberOfInstructions
+#undef DECLARE_OPCODE
+ };
+
+ virtual Opcode opcode() const = 0;
+
+ // Declare non-virtual type testers for all leaf IR classes.
+#define DECLARE_PREDICATE(type) \
+ bool Is##type() const { return opcode() == k##type; }
+ LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_PREDICATE)
+#undef DECLARE_PREDICATE
+
+ // Declare virtual predicates for instructions that don't have
+ // an opcode.
+ virtual bool IsGap() const { return false; }
+
+ virtual bool IsControl() const { return false; }
+
+ void set_environment(LEnvironment* env) { environment_ = env; }
+ LEnvironment* environment() const { return environment_; }
+ bool HasEnvironment() const { return environment_ != NULL; }
+
+ void set_pointer_map(LPointerMap* p) { pointer_map_.set(p); }
+ LPointerMap* pointer_map() const { return pointer_map_.get(); }
+ bool HasPointerMap() const { return pointer_map_.is_set(); }
+
+ void set_hydrogen_value(HValue* value) { hydrogen_value_ = value; }
+ HValue* hydrogen_value() const { return hydrogen_value_; }
+
+ virtual void SetDeferredLazyDeoptimizationEnvironment(LEnvironment* env) { }
+
+ void MarkAsCall() { bit_field_ = IsCallBits::update(bit_field_, true); }
+ bool IsCall() const { return IsCallBits::decode(bit_field_); }
+
+ // Interface to the register allocator and iterators.
+ bool ClobbersTemps() const { return IsCall(); }
+ bool ClobbersRegisters() const { return IsCall(); }
+ virtual bool ClobbersDoubleRegisters(Isolate* isolate) const {
+ return IsCall() ||
+ // We only have rudimentary X87Stack tracking, thus in general
+ // cannot handle phi-nodes.
+ (IsControl());
+ }
+
+ virtual bool HasResult() const = 0;
+ virtual LOperand* result() const = 0;
+
+ bool HasDoubleRegisterResult();
+ bool HasDoubleRegisterInput();
+ bool IsDoubleInput(X87Register reg, LCodeGen* cgen);
+
+ LOperand* FirstInput() { return InputAt(0); }
+ LOperand* Output() { return HasResult() ? result() : NULL; }
+
+ virtual bool HasInterestingComment(LCodeGen* gen) const { return true; }
+
+#ifdef DEBUG
+ void VerifyCall();
+#endif
+
+ private:
+ // Iterator support.
+ friend class InputIterator;
+ virtual int InputCount() = 0;
+ virtual LOperand* InputAt(int i) = 0;
+
+ friend class TempIterator;
+ virtual int TempCount() = 0;
+ virtual LOperand* TempAt(int i) = 0;
+
+ class IsCallBits: public BitField<bool, 0, 1> {};
+
+ LEnvironment* environment_;
+ SetOncePointer<LPointerMap> pointer_map_;
+ HValue* hydrogen_value_;
+ int bit_field_;
+};
+
+
+// R = number of result operands (0 or 1).
+template<int R>
+class LTemplateResultInstruction : public LInstruction {
+ public:
+ // Allow 0 or 1 output operands.
+ STATIC_ASSERT(R == 0 || R == 1);
+ virtual bool HasResult() const V8_FINAL V8_OVERRIDE {
+ return R != 0 && result() != NULL;
+ }
+ void set_result(LOperand* operand) { results_[0] = operand; }
+ LOperand* result() const { return results_[0]; }
+
+ protected:
+ EmbeddedContainer<LOperand*, R> results_;
+};
+
+
+// R = number of result operands (0 or 1).
+// I = number of input operands.
+// T = number of temporary operands.
+template<int R, int I, int T>
+class LTemplateInstruction : public LTemplateResultInstruction<R> {
+ protected:
+ EmbeddedContainer<LOperand*, I> inputs_;
+ EmbeddedContainer<LOperand*, T> temps_;
+
+ private:
+ // Iterator support.
+ virtual int InputCount() V8_FINAL V8_OVERRIDE { return I; }
+ virtual LOperand* InputAt(int i) V8_FINAL V8_OVERRIDE { return inputs_[i]; }
+
+ virtual int TempCount() V8_FINAL V8_OVERRIDE { return T; }
+ virtual LOperand* TempAt(int i) V8_FINAL V8_OVERRIDE { return temps_[i]; }
+};
+
+
+class LGap : public LTemplateInstruction<0, 0, 0> {
+ public:
+ explicit LGap(HBasicBlock* block) : block_(block) {
+ parallel_moves_[BEFORE] = NULL;
+ parallel_moves_[START] = NULL;
+ parallel_moves_[END] = NULL;
+ parallel_moves_[AFTER] = NULL;
+ }
+
+ // Can't use the DECLARE-macro here because of sub-classes.
+ virtual bool IsGap() const V8_FINAL V8_OVERRIDE { return true; }
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ static LGap* cast(LInstruction* instr) {
+ ASSERT(instr->IsGap());
+ return reinterpret_cast<LGap*>(instr);
+ }
+
+ bool IsRedundant() const;
+
+ HBasicBlock* block() const { return block_; }
+
+ enum InnerPosition {
+ BEFORE,
+ START,
+ END,
+ AFTER,
+ FIRST_INNER_POSITION = BEFORE,
+ LAST_INNER_POSITION = AFTER
+ };
+
+ LParallelMove* GetOrCreateParallelMove(InnerPosition pos, Zone* zone) {
+ if (parallel_moves_[pos] == NULL) {
+ parallel_moves_[pos] = new(zone) LParallelMove(zone);
+ }
+ return parallel_moves_[pos];
+ }
+
+ LParallelMove* GetParallelMove(InnerPosition pos) {
+ return parallel_moves_[pos];
+ }
+
+ private:
+ LParallelMove* parallel_moves_[LAST_INNER_POSITION + 1];
+ HBasicBlock* block_;
+};
+
+
+class LInstructionGap V8_FINAL : public LGap {
+ public:
+ explicit LInstructionGap(HBasicBlock* block) : LGap(block) { }
+
+ virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE {
+ return !IsRedundant();
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(InstructionGap, "gap")
+};
+
+
+class LClobberDoubles V8_FINAL : public LTemplateInstruction<0, 0, 0> {
+ public:
+ explicit LClobberDoubles(Isolate* isolate) { }
+
+ virtual bool ClobbersDoubleRegisters(Isolate* isolate) const V8_OVERRIDE {
+ return true;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(ClobberDoubles, "clobber-d")
+};
+
+
+class LGoto V8_FINAL : public LTemplateInstruction<0, 0, 0> {
+ public:
+ explicit LGoto(HBasicBlock* block) : block_(block) { }
+
+ virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE;
+ DECLARE_CONCRETE_INSTRUCTION(Goto, "goto")
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual bool IsControl() const V8_OVERRIDE { return true; }
+
+ int block_id() const { return block_->block_id(); }
+ virtual bool ClobbersDoubleRegisters(Isolate* isolate) const V8_OVERRIDE {
+ return false;
+ }
+
+ bool jumps_to_join() const { return block_->predecessors()->length() > 1; }
+
+ private:
+ HBasicBlock* block_;
+};
+
+
+class LLazyBailout V8_FINAL : public LTemplateInstruction<0, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(LazyBailout, "lazy-bailout")
+};
+
+
+class LDummy V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+ explicit LDummy() { }
+ DECLARE_CONCRETE_INSTRUCTION(Dummy, "dummy")
+};
+
+
+class LDummyUse V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LDummyUse(LOperand* value) {
+ inputs_[0] = value;
+ }
+ DECLARE_CONCRETE_INSTRUCTION(DummyUse, "dummy-use")
+};
+
+
+class LDeoptimize V8_FINAL : public LTemplateInstruction<0, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(Deoptimize, "deoptimize")
+ DECLARE_HYDROGEN_ACCESSOR(Deoptimize)
+};
+
+
+class LLabel V8_FINAL : public LGap {
+ public:
+ explicit LLabel(HBasicBlock* block)
+ : LGap(block), replacement_(NULL) { }
+
+ virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE {
+ return false;
+ }
+ DECLARE_CONCRETE_INSTRUCTION(Label, "label")
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+
+ int block_id() const { return block()->block_id(); }
+ bool is_loop_header() const { return block()->IsLoopHeader(); }
+ bool is_osr_entry() const { return block()->is_osr_entry(); }
+ Label* label() { return &label_; }
+ LLabel* replacement() const { return replacement_; }
+ void set_replacement(LLabel* label) { replacement_ = label; }
+ bool HasReplacement() const { return replacement_ != NULL; }
+
+ private:
+ Label label_;
+ LLabel* replacement_;
+};
+
+
+class LParameter V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+ virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE {
+ return false;
+ }
+ DECLARE_CONCRETE_INSTRUCTION(Parameter, "parameter")
+};
+
+
+class LCallStub V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LCallStub(LOperand* context) {
+ inputs_[0] = context;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CallStub, "call-stub")
+ DECLARE_HYDROGEN_ACCESSOR(CallStub)
+};
+
+
+class LUnknownOSRValue V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+ virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE {
+ return false;
+ }
+ DECLARE_CONCRETE_INSTRUCTION(UnknownOSRValue, "unknown-osr-value")
+};
+
+
+template<int I, int T>
+class LControlInstruction: public LTemplateInstruction<0, I, T> {
+ public:
+ LControlInstruction() : false_label_(NULL), true_label_(NULL) { }
+
+ virtual bool IsControl() const V8_FINAL V8_OVERRIDE { return true; }
+
+ int SuccessorCount() { return hydrogen()->SuccessorCount(); }
+ HBasicBlock* SuccessorAt(int i) { return hydrogen()->SuccessorAt(i); }
+
+ int TrueDestination(LChunk* chunk) {
+ return chunk->LookupDestination(true_block_id());
+ }
+ int FalseDestination(LChunk* chunk) {
+ return chunk->LookupDestination(false_block_id());
+ }
+
+ Label* TrueLabel(LChunk* chunk) {
+ if (true_label_ == NULL) {
+ true_label_ = chunk->GetAssemblyLabel(TrueDestination(chunk));
+ }
+ return true_label_;
+ }
+ Label* FalseLabel(LChunk* chunk) {
+ if (false_label_ == NULL) {
+ false_label_ = chunk->GetAssemblyLabel(FalseDestination(chunk));
+ }
+ return false_label_;
+ }
+
+ protected:
+ int true_block_id() { return SuccessorAt(0)->block_id(); }
+ int false_block_id() { return SuccessorAt(1)->block_id(); }
+
+ private:
+ HControlInstruction* hydrogen() {
+ return HControlInstruction::cast(this->hydrogen_value());
+ }
+
+ Label* false_label_;
+ Label* true_label_;
+};
+
+
+class LWrapReceiver V8_FINAL : public LTemplateInstruction<1, 2, 1> {
+ public:
+ LWrapReceiver(LOperand* receiver,
+ LOperand* function,
+ LOperand* temp) {
+ inputs_[0] = receiver;
+ inputs_[1] = function;
+ temps_[0] = temp;
+ }
+
+ LOperand* receiver() { return inputs_[0]; }
+ LOperand* function() { return inputs_[1]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(WrapReceiver, "wrap-receiver")
+ DECLARE_HYDROGEN_ACCESSOR(WrapReceiver)
+};
+
+
+class LApplyArguments V8_FINAL : public LTemplateInstruction<1, 4, 0> {
+ public:
+ LApplyArguments(LOperand* function,
+ LOperand* receiver,
+ LOperand* length,
+ LOperand* elements) {
+ inputs_[0] = function;
+ inputs_[1] = receiver;
+ inputs_[2] = length;
+ inputs_[3] = elements;
+ }
+
+ LOperand* function() { return inputs_[0]; }
+ LOperand* receiver() { return inputs_[1]; }
+ LOperand* length() { return inputs_[2]; }
+ LOperand* elements() { return inputs_[3]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ApplyArguments, "apply-arguments")
+};
+
+
+class LAccessArgumentsAt V8_FINAL : public LTemplateInstruction<1, 3, 0> {
+ public:
+ LAccessArgumentsAt(LOperand* arguments, LOperand* length, LOperand* index) {
+ inputs_[0] = arguments;
+ inputs_[1] = length;
+ inputs_[2] = index;
+ }
+
+ LOperand* arguments() { return inputs_[0]; }
+ LOperand* length() { return inputs_[1]; }
+ LOperand* index() { return inputs_[2]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(AccessArgumentsAt, "access-arguments-at")
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+};
+
+
+class LArgumentsLength V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LArgumentsLength(LOperand* elements) {
+ inputs_[0] = elements;
+ }
+
+ LOperand* elements() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ArgumentsLength, "arguments-length")
+};
+
+
+class LArgumentsElements V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(ArgumentsElements, "arguments-elements")
+ DECLARE_HYDROGEN_ACCESSOR(ArgumentsElements)
+};
+
+
+class LDebugBreak V8_FINAL : public LTemplateInstruction<0, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(DebugBreak, "break")
+};
+
+
+class LModByPowerOf2I V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ LModByPowerOf2I(LOperand* dividend, int32_t divisor) {
+ inputs_[0] = dividend;
+ divisor_ = divisor;
+ }
+
+ LOperand* dividend() { return inputs_[0]; }
+ int32_t divisor() const { return divisor_; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ModByPowerOf2I, "mod-by-power-of-2-i")
+ DECLARE_HYDROGEN_ACCESSOR(Mod)
+
+ private:
+ int32_t divisor_;
+};
+
+
+class LModByConstI V8_FINAL : public LTemplateInstruction<1, 1, 2> {
+ public:
+ LModByConstI(LOperand* dividend,
+ int32_t divisor,
+ LOperand* temp1,
+ LOperand* temp2) {
+ inputs_[0] = dividend;
+ divisor_ = divisor;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
+ }
+
+ LOperand* dividend() { return inputs_[0]; }
+ int32_t divisor() const { return divisor_; }
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ModByConstI, "mod-by-const-i")
+ DECLARE_HYDROGEN_ACCESSOR(Mod)
+
+ private:
+ int32_t divisor_;
+};
+
+
+class LModI V8_FINAL : public LTemplateInstruction<1, 2, 1> {
+ public:
+ LModI(LOperand* left, LOperand* right, LOperand* temp) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ temps_[0] = temp;
+ }
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ModI, "mod-i")
+ DECLARE_HYDROGEN_ACCESSOR(Mod)
+};
+
+
+class LDivByPowerOf2I V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ LDivByPowerOf2I(LOperand* dividend, int32_t divisor) {
+ inputs_[0] = dividend;
+ divisor_ = divisor;
+ }
+
+ LOperand* dividend() { return inputs_[0]; }
+ int32_t divisor() const { return divisor_; }
+
+ DECLARE_CONCRETE_INSTRUCTION(DivByPowerOf2I, "div-by-power-of-2-i")
+ DECLARE_HYDROGEN_ACCESSOR(Div)
+
+ private:
+ int32_t divisor_;
+};
+
+
+class LDivByConstI V8_FINAL : public LTemplateInstruction<1, 1, 2> {
+ public:
+ LDivByConstI(LOperand* dividend,
+ int32_t divisor,
+ LOperand* temp1,
+ LOperand* temp2) {
+ inputs_[0] = dividend;
+ divisor_ = divisor;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
+ }
+
+ LOperand* dividend() { return inputs_[0]; }
+ int32_t divisor() const { return divisor_; }
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(DivByConstI, "div-by-const-i")
+ DECLARE_HYDROGEN_ACCESSOR(Div)
+
+ private:
+ int32_t divisor_;
+};
+
+
+class LDivI V8_FINAL : public LTemplateInstruction<1, 2, 1> {
+ public:
+ LDivI(LOperand* dividend, LOperand* divisor, LOperand* temp) {
+ inputs_[0] = dividend;
+ inputs_[1] = divisor;
+ temps_[0] = temp;
+ }
+
+ LOperand* dividend() { return inputs_[0]; }
+ LOperand* divisor() { return inputs_[1]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(DivI, "div-i")
+ DECLARE_HYDROGEN_ACCESSOR(BinaryOperation)
+};
+
+
+class LFlooringDivByPowerOf2I V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ LFlooringDivByPowerOf2I(LOperand* dividend, int32_t divisor) {
+ inputs_[0] = dividend;
+ divisor_ = divisor;
+ }
+
+ LOperand* dividend() { return inputs_[0]; }
+ int32_t divisor() const { return divisor_; }
+
+ DECLARE_CONCRETE_INSTRUCTION(FlooringDivByPowerOf2I,
+ "flooring-div-by-power-of-2-i")
+ DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv)
+
+ private:
+ int32_t divisor_;
+};
+
+
+class LFlooringDivByConstI V8_FINAL : public LTemplateInstruction<1, 1, 3> {
+ public:
+ LFlooringDivByConstI(LOperand* dividend,
+ int32_t divisor,
+ LOperand* temp1,
+ LOperand* temp2,
+ LOperand* temp3) {
+ inputs_[0] = dividend;
+ divisor_ = divisor;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
+ temps_[2] = temp3;
+ }
+
+ LOperand* dividend() { return inputs_[0]; }
+ int32_t divisor() const { return divisor_; }
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+ LOperand* temp3() { return temps_[2]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(FlooringDivByConstI, "flooring-div-by-const-i")
+ DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv)
+
+ private:
+ int32_t divisor_;
+};
+
+
+class LFlooringDivI V8_FINAL : public LTemplateInstruction<1, 2, 1> {
+ public:
+ LFlooringDivI(LOperand* dividend, LOperand* divisor, LOperand* temp) {
+ inputs_[0] = dividend;
+ inputs_[1] = divisor;
+ temps_[0] = temp;
+ }
+
+ LOperand* dividend() { return inputs_[0]; }
+ LOperand* divisor() { return inputs_[1]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(FlooringDivI, "flooring-div-i")
+ DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv)
+};
+
+
+class LMulI V8_FINAL : public LTemplateInstruction<1, 2, 1> {
+ public:
+ LMulI(LOperand* left, LOperand* right, LOperand* temp) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ temps_[0] = temp;
+ }
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MulI, "mul-i")
+ DECLARE_HYDROGEN_ACCESSOR(Mul)
+};
+
+
+class LCompareNumericAndBranch V8_FINAL : public LControlInstruction<2, 0> {
+ public:
+ LCompareNumericAndBranch(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CompareNumericAndBranch,
+ "compare-numeric-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(CompareNumericAndBranch)
+
+ Token::Value op() const { return hydrogen()->token(); }
+ bool is_double() const {
+ return hydrogen()->representation().IsDouble();
+ }
+
+ virtual void PrintDataTo(StringStream* stream);
+};
+
+
+class LMathFloor V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LMathFloor(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathFloor, "math-floor")
+ DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
+};
+
+
+class LMathRound V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LMathRound(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathRound, "math-round")
+ DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
+};
+
+
+class LMathAbs V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LMathAbs(LOperand* context, LOperand* value) {
+ inputs_[1] = context;
+ inputs_[0] = value;
+ }
+
+ LOperand* context() { return inputs_[1]; }
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathAbs, "math-abs")
+ DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
+};
+
+
+class LMathLog V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LMathLog(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathLog, "math-log")
+};
+
+
+class LMathClz32 V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LMathClz32(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathClz32, "math-clz32")
+};
+
+
+class LMathExp V8_FINAL : public LTemplateInstruction<1, 1, 2> {
+ public:
+ LMathExp(LOperand* value,
+ LOperand* temp1,
+ LOperand* temp2) {
+ inputs_[0] = value;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
+ ExternalReference::InitializeMathExpData();
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathExp, "math-exp")
+};
+
+
+class LMathSqrt V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LMathSqrt(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathSqrt, "math-sqrt")
+};
+
+
+class LMathPowHalf V8_FINAL : public LTemplateInstruction<1, 1, 1> {
+ public:
+ LMathPowHalf(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathPowHalf, "math-pow-half")
+};
+
+
+class LCmpObjectEqAndBranch V8_FINAL : public LControlInstruction<2, 0> {
+ public:
+ LCmpObjectEqAndBranch(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CmpObjectEqAndBranch, "cmp-object-eq-and-branch")
+};
+
+
+class LCmpHoleAndBranch V8_FINAL : public LControlInstruction<1, 0> {
+ public:
+ explicit LCmpHoleAndBranch(LOperand* object) {
+ inputs_[0] = object;
+ }
+
+ LOperand* object() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CmpHoleAndBranch, "cmp-hole-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(CompareHoleAndBranch)
+};
+
+
+class LCompareMinusZeroAndBranch V8_FINAL : public LControlInstruction<1, 1> {
+ public:
+ LCompareMinusZeroAndBranch(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CompareMinusZeroAndBranch,
+ "cmp-minus-zero-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(CompareMinusZeroAndBranch)
+};
+
+
+class LIsObjectAndBranch V8_FINAL : public LControlInstruction<1, 1> {
+ public:
+ LIsObjectAndBranch(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(IsObjectAndBranch, "is-object-and-branch")
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+};
+
+
+class LIsStringAndBranch V8_FINAL : public LControlInstruction<1, 1> {
+ public:
+ LIsStringAndBranch(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(IsStringAndBranch, "is-string-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(IsStringAndBranch)
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+};
+
+
+class LIsSmiAndBranch V8_FINAL : public LControlInstruction<1, 0> {
+ public:
+ explicit LIsSmiAndBranch(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(IsSmiAndBranch, "is-smi-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(IsSmiAndBranch)
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+};
+
+
+class LIsUndetectableAndBranch V8_FINAL : public LControlInstruction<1, 1> {
+ public:
+ LIsUndetectableAndBranch(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(IsUndetectableAndBranch,
+ "is-undetectable-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(IsUndetectableAndBranch)
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+};
+
+
+class LStringCompareAndBranch V8_FINAL : public LControlInstruction<3, 0> {
+ public:
+ LStringCompareAndBranch(LOperand* context, LOperand* left, LOperand* right) {
+ inputs_[0] = context;
+ inputs_[1] = left;
+ inputs_[2] = right;
+ }
+
+ LOperand* context() { return inputs_[1]; }
+ LOperand* left() { return inputs_[1]; }
+ LOperand* right() { return inputs_[2]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StringCompareAndBranch,
+ "string-compare-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(StringCompareAndBranch)
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+
+ Token::Value op() const { return hydrogen()->token(); }
+};
+
+
+class LHasInstanceTypeAndBranch V8_FINAL : public LControlInstruction<1, 1> {
+ public:
+ LHasInstanceTypeAndBranch(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(HasInstanceTypeAndBranch,
+ "has-instance-type-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(HasInstanceTypeAndBranch)
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+};
+
+
+class LGetCachedArrayIndex V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LGetCachedArrayIndex(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(GetCachedArrayIndex, "get-cached-array-index")
+ DECLARE_HYDROGEN_ACCESSOR(GetCachedArrayIndex)
+};
+
+
+class LHasCachedArrayIndexAndBranch V8_FINAL
+ : public LControlInstruction<1, 0> {
+ public:
+ explicit LHasCachedArrayIndexAndBranch(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndexAndBranch,
+ "has-cached-array-index-and-branch")
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+};
+
+
+class LIsConstructCallAndBranch V8_FINAL : public LControlInstruction<0, 1> {
+ public:
+ explicit LIsConstructCallAndBranch(LOperand* temp) {
+ temps_[0] = temp;
+ }
+
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(IsConstructCallAndBranch,
+ "is-construct-call-and-branch")
+};
+
+
+class LClassOfTestAndBranch V8_FINAL : public LControlInstruction<1, 2> {
+ public:
+ LClassOfTestAndBranch(LOperand* value, LOperand* temp, LOperand* temp2) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ temps_[1] = temp2;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ClassOfTestAndBranch,
+ "class-of-test-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(ClassOfTestAndBranch)
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+};
+
+
+class LCmpT V8_FINAL : public LTemplateInstruction<1, 3, 0> {
+ public:
+ LCmpT(LOperand* context, LOperand* left, LOperand* right) {
+ inputs_[0] = context;
+ inputs_[1] = left;
+ inputs_[2] = right;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(CmpT, "cmp-t")
+ DECLARE_HYDROGEN_ACCESSOR(CompareGeneric)
+
+ LOperand* context() { return inputs_[0]; }
+ Token::Value op() const { return hydrogen()->token(); }
+};
+
+
+class LInstanceOf V8_FINAL : public LTemplateInstruction<1, 3, 0> {
+ public:
+ LInstanceOf(LOperand* context, LOperand* left, LOperand* right) {
+ inputs_[0] = context;
+ inputs_[1] = left;
+ inputs_[2] = right;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(InstanceOf, "instance-of")
+};
+
+
+class LInstanceOfKnownGlobal V8_FINAL : public LTemplateInstruction<1, 2, 1> {
+ public:
+ LInstanceOfKnownGlobal(LOperand* context, LOperand* value, LOperand* temp) {
+ inputs_[0] = context;
+ inputs_[1] = value;
+ temps_[0] = temp;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* value() { return inputs_[1]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(InstanceOfKnownGlobal,
+ "instance-of-known-global")
+ DECLARE_HYDROGEN_ACCESSOR(InstanceOfKnownGlobal)
+
+ Handle<JSFunction> function() const { return hydrogen()->function(); }
+ LEnvironment* GetDeferredLazyDeoptimizationEnvironment() {
+ return lazy_deopt_env_;
+ }
+ virtual void SetDeferredLazyDeoptimizationEnvironment(
+ LEnvironment* env) V8_OVERRIDE {
+ lazy_deopt_env_ = env;
+ }
+
+ private:
+ LEnvironment* lazy_deopt_env_;
+};
+
+
+class LBoundsCheck V8_FINAL : public LTemplateInstruction<0, 2, 0> {
+ public:
+ LBoundsCheck(LOperand* index, LOperand* length) {
+ inputs_[0] = index;
+ inputs_[1] = length;
+ }
+
+ LOperand* index() { return inputs_[0]; }
+ LOperand* length() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(BoundsCheck, "bounds-check")
+ DECLARE_HYDROGEN_ACCESSOR(BoundsCheck)
+};
+
+
+class LBitI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LBitI(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(BitI, "bit-i")
+ DECLARE_HYDROGEN_ACCESSOR(Bitwise)
+
+ Token::Value op() const { return hydrogen()->op(); }
+};
+
+
+class LShiftI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LShiftI(Token::Value op, LOperand* left, LOperand* right, bool can_deopt)
+ : op_(op), can_deopt_(can_deopt) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ShiftI, "shift-i")
+
+ Token::Value op() const { return op_; }
+ bool can_deopt() const { return can_deopt_; }
+
+ private:
+ Token::Value op_;
+ bool can_deopt_;
+};
+
+
+class LSubI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LSubI(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(SubI, "sub-i")
+ DECLARE_HYDROGEN_ACCESSOR(Sub)
+};
+
+
+class LConstantI V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(ConstantI, "constant-i")
+ DECLARE_HYDROGEN_ACCESSOR(Constant)
+
+ int32_t value() const { return hydrogen()->Integer32Value(); }
+};
+
+
+class LConstantS V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(ConstantS, "constant-s")
+ DECLARE_HYDROGEN_ACCESSOR(Constant)
+
+ Smi* value() const { return Smi::FromInt(hydrogen()->Integer32Value()); }
+};
+
+
+class LConstantD V8_FINAL : public LTemplateInstruction<1, 0, 1> {
+ public:
+ explicit LConstantD(LOperand* temp) {
+ temps_[0] = temp;
+ }
+
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ConstantD, "constant-d")
+ DECLARE_HYDROGEN_ACCESSOR(Constant)
+
+ double value() const { return hydrogen()->DoubleValue(); }
+};
+
+
+class LConstantE V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(ConstantE, "constant-e")
+ DECLARE_HYDROGEN_ACCESSOR(Constant)
+
+ ExternalReference value() const {
+ return hydrogen()->ExternalReferenceValue();
+ }
+};
+
+
+class LConstantT V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(ConstantT, "constant-t")
+ DECLARE_HYDROGEN_ACCESSOR(Constant)
+
+ Handle<Object> value(Isolate* isolate) const {
+ return hydrogen()->handle(isolate);
+ }
+};
+
+
+class LBranch V8_FINAL : public LControlInstruction<1, 1> {
+ public:
+ LBranch(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(Branch, "branch")
+ DECLARE_HYDROGEN_ACCESSOR(Branch)
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+};
+
+
+class LCmpMapAndBranch V8_FINAL : public LControlInstruction<1, 0> {
+ public:
+ explicit LCmpMapAndBranch(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CmpMapAndBranch, "cmp-map-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(CompareMap)
+
+ Handle<Map> map() const { return hydrogen()->map().handle(); }
+};
+
+
+class LMapEnumLength V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LMapEnumLength(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MapEnumLength, "map-enum-length")
+};
+
+
+class LDateField V8_FINAL : public LTemplateInstruction<1, 1, 1> {
+ public:
+ LDateField(LOperand* date, LOperand* temp, Smi* index)
+ : index_(index) {
+ inputs_[0] = date;
+ temps_[0] = temp;
+ }
+
+ LOperand* date() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(DateField, "date-field")
+ DECLARE_HYDROGEN_ACCESSOR(DateField)
+
+ Smi* index() const { return index_; }
+
+ private:
+ Smi* index_;
+};
+
+
+class LSeqStringGetChar V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LSeqStringGetChar(LOperand* string, LOperand* index) {
+ inputs_[0] = string;
+ inputs_[1] = index;
+ }
+
+ LOperand* string() const { return inputs_[0]; }
+ LOperand* index() const { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(SeqStringGetChar, "seq-string-get-char")
+ DECLARE_HYDROGEN_ACCESSOR(SeqStringGetChar)
+};
+
+
+class LSeqStringSetChar V8_FINAL : public LTemplateInstruction<1, 4, 0> {
+ public:
+ LSeqStringSetChar(LOperand* context,
+ LOperand* string,
+ LOperand* index,
+ LOperand* value) {
+ inputs_[0] = context;
+ inputs_[1] = string;
+ inputs_[2] = index;
+ inputs_[3] = value;
+ }
+
+ LOperand* string() { return inputs_[1]; }
+ LOperand* index() { return inputs_[2]; }
+ LOperand* value() { return inputs_[3]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(SeqStringSetChar, "seq-string-set-char")
+ DECLARE_HYDROGEN_ACCESSOR(SeqStringSetChar)
+};
+
+
+class LAddI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LAddI(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
+ static bool UseLea(HAdd* add) {
+ return !add->CheckFlag(HValue::kCanOverflow) &&
+ add->BetterLeftOperand()->UseCount() > 1;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(AddI, "add-i")
+ DECLARE_HYDROGEN_ACCESSOR(Add)
+};
+
+
+class LMathMinMax V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LMathMinMax(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathMinMax, "math-min-max")
+ DECLARE_HYDROGEN_ACCESSOR(MathMinMax)
+};
+
+
+class LPower V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LPower(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(Power, "power")
+ DECLARE_HYDROGEN_ACCESSOR(Power)
+};
+
+
+class LArithmeticD V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LArithmeticD(Token::Value op, LOperand* left, LOperand* right)
+ : op_(op) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
+ Token::Value op() const { return op_; }
+
+ virtual Opcode opcode() const V8_OVERRIDE {
+ return LInstruction::kArithmeticD;
+ }
+ virtual void CompileToNative(LCodeGen* generator) V8_OVERRIDE;
+ virtual const char* Mnemonic() const V8_OVERRIDE;
+
+ private:
+ Token::Value op_;
+};
+
+
+class LArithmeticT V8_FINAL : public LTemplateInstruction<1, 3, 0> {
+ public:
+ LArithmeticT(Token::Value op,
+ LOperand* context,
+ LOperand* left,
+ LOperand* right)
+ : op_(op) {
+ inputs_[0] = context;
+ inputs_[1] = left;
+ inputs_[2] = right;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* left() { return inputs_[1]; }
+ LOperand* right() { return inputs_[2]; }
+
+ virtual Opcode opcode() const V8_OVERRIDE {
+ return LInstruction::kArithmeticT;
+ }
+ virtual void CompileToNative(LCodeGen* generator) V8_OVERRIDE;
+ virtual const char* Mnemonic() const V8_OVERRIDE;
+
+ Token::Value op() const { return op_; }
+
+ private:
+ Token::Value op_;
+};
+
+
+class LReturn V8_FINAL : public LTemplateInstruction<0, 3, 0> {
+ public:
+ explicit LReturn(LOperand* value,
+ LOperand* context,
+ LOperand* parameter_count) {
+ inputs_[0] = value;
+ inputs_[1] = context;
+ inputs_[2] = parameter_count;
+ }
+
+ bool has_constant_parameter_count() {
+ return parameter_count()->IsConstantOperand();
+ }
+ LConstantOperand* constant_parameter_count() {
+ ASSERT(has_constant_parameter_count());
+ return LConstantOperand::cast(parameter_count());
+ }
+ LOperand* parameter_count() { return inputs_[2]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(Return, "return")
+ DECLARE_HYDROGEN_ACCESSOR(Return)
+};
+
+
+class LLoadNamedField V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LLoadNamedField(LOperand* object) {
+ inputs_[0] = object;
+ }
+
+ LOperand* object() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadNamedField, "load-named-field")
+ DECLARE_HYDROGEN_ACCESSOR(LoadNamedField)
+};
+
+
+class LLoadNamedGeneric V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LLoadNamedGeneric(LOperand* context, LOperand* object) {
+ inputs_[0] = context;
+ inputs_[1] = object;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* object() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadNamedGeneric, "load-named-generic")
+ DECLARE_HYDROGEN_ACCESSOR(LoadNamedGeneric)
+
+ Handle<Object> name() const { return hydrogen()->name(); }
+};
+
+
+class LLoadFunctionPrototype V8_FINAL : public LTemplateInstruction<1, 1, 1> {
+ public:
+ LLoadFunctionPrototype(LOperand* function, LOperand* temp) {
+ inputs_[0] = function;
+ temps_[0] = temp;
+ }
+
+ LOperand* function() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadFunctionPrototype, "load-function-prototype")
+ DECLARE_HYDROGEN_ACCESSOR(LoadFunctionPrototype)
+};
+
+
+class LLoadRoot V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(LoadRoot, "load-root")
+ DECLARE_HYDROGEN_ACCESSOR(LoadRoot)
+
+ Heap::RootListIndex index() const { return hydrogen()->index(); }
+};
+
+
+class LLoadKeyed V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LLoadKeyed(LOperand* elements, LOperand* key) {
+ inputs_[0] = elements;
+ inputs_[1] = key;
+ }
+ LOperand* elements() { return inputs_[0]; }
+ LOperand* key() { return inputs_[1]; }
+ ElementsKind elements_kind() const {
+ return hydrogen()->elements_kind();
+ }
+ bool is_external() const {
+ return hydrogen()->is_external();
+ }
+ bool is_fixed_typed_array() const {
+ return hydrogen()->is_fixed_typed_array();
+ }
+ bool is_typed_elements() const {
+ return is_external() || is_fixed_typed_array();
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadKeyed, "load-keyed")
+ DECLARE_HYDROGEN_ACCESSOR(LoadKeyed)
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ uint32_t base_offset() const { return hydrogen()->base_offset(); }
+ bool key_is_smi() {
+ return hydrogen()->key()->representation().IsTagged();
+ }
+};
+
+
+inline static bool ExternalArrayOpRequiresTemp(
+ Representation key_representation,
+ ElementsKind elements_kind) {
+ // Operations that require the key to be divided by two to be converted into
+ // an index cannot fold the scale operation into a load and need an extra
+ // temp register to do the work.
+ return key_representation.IsSmi() &&
+ (elements_kind == EXTERNAL_INT8_ELEMENTS ||
+ elements_kind == EXTERNAL_UINT8_ELEMENTS ||
+ elements_kind == EXTERNAL_UINT8_CLAMPED_ELEMENTS ||
+ elements_kind == UINT8_ELEMENTS ||
+ elements_kind == INT8_ELEMENTS ||
+ elements_kind == UINT8_CLAMPED_ELEMENTS);
+}
+
+
+class LLoadKeyedGeneric V8_FINAL : public LTemplateInstruction<1, 3, 0> {
+ public:
+ LLoadKeyedGeneric(LOperand* context, LOperand* obj, LOperand* key) {
+ inputs_[0] = context;
+ inputs_[1] = obj;
+ inputs_[2] = key;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* object() { return inputs_[1]; }
+ LOperand* key() { return inputs_[2]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadKeyedGeneric, "load-keyed-generic")
+};
+
+
+class LLoadGlobalCell V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(LoadGlobalCell, "load-global-cell")
+ DECLARE_HYDROGEN_ACCESSOR(LoadGlobalCell)
+};
+
+
+class LLoadGlobalGeneric V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LLoadGlobalGeneric(LOperand* context, LOperand* global_object) {
+ inputs_[0] = context;
+ inputs_[1] = global_object;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* global_object() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadGlobalGeneric, "load-global-generic")
+ DECLARE_HYDROGEN_ACCESSOR(LoadGlobalGeneric)
+
+ Handle<Object> name() const { return hydrogen()->name(); }
+ bool for_typeof() const { return hydrogen()->for_typeof(); }
+};
+
+
+class LStoreGlobalCell V8_FINAL : public LTemplateInstruction<0, 1, 0> {
+ public:
+ explicit LStoreGlobalCell(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreGlobalCell, "store-global-cell")
+ DECLARE_HYDROGEN_ACCESSOR(StoreGlobalCell)
+};
+
+
+class LLoadContextSlot V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LLoadContextSlot(LOperand* context) {
+ inputs_[0] = context;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadContextSlot, "load-context-slot")
+ DECLARE_HYDROGEN_ACCESSOR(LoadContextSlot)
+
+ int slot_index() { return hydrogen()->slot_index(); }
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+};
+
+
+class LStoreContextSlot V8_FINAL : public LTemplateInstruction<0, 2, 1> {
+ public:
+ LStoreContextSlot(LOperand* context, LOperand* value, LOperand* temp) {
+ inputs_[0] = context;
+ inputs_[1] = value;
+ temps_[0] = temp;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* value() { return inputs_[1]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreContextSlot, "store-context-slot")
+ DECLARE_HYDROGEN_ACCESSOR(StoreContextSlot)
+
+ int slot_index() { return hydrogen()->slot_index(); }
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+};
+
+
+class LPushArgument V8_FINAL : public LTemplateInstruction<0, 1, 0> {
+ public:
+ explicit LPushArgument(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(PushArgument, "push-argument")
+};
+
+
+class LDrop V8_FINAL : public LTemplateInstruction<0, 0, 0> {
+ public:
+ explicit LDrop(int count) : count_(count) { }
+
+ int count() const { return count_; }
+
+ DECLARE_CONCRETE_INSTRUCTION(Drop, "drop")
+
+ private:
+ int count_;
+};
+
+
+class LStoreCodeEntry V8_FINAL: public LTemplateInstruction<0, 1, 1> {
+ public:
+ LStoreCodeEntry(LOperand* function, LOperand* code_object) {
+ inputs_[0] = function;
+ temps_[0] = code_object;
+ }
+
+ LOperand* function() { return inputs_[0]; }
+ LOperand* code_object() { return temps_[0]; }
+
+ virtual void PrintDataTo(StringStream* stream);
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreCodeEntry, "store-code-entry")
+ DECLARE_HYDROGEN_ACCESSOR(StoreCodeEntry)
+};
+
+
+class LInnerAllocatedObject V8_FINAL: public LTemplateInstruction<1, 2, 0> {
+ public:
+ LInnerAllocatedObject(LOperand* base_object, LOperand* offset) {
+ inputs_[0] = base_object;
+ inputs_[1] = offset;
+ }
+
+ LOperand* base_object() const { return inputs_[0]; }
+ LOperand* offset() const { return inputs_[1]; }
+
+ virtual void PrintDataTo(StringStream* stream);
+
+ DECLARE_CONCRETE_INSTRUCTION(InnerAllocatedObject, "inner-allocated-object")
+};
+
+
+class LThisFunction V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(ThisFunction, "this-function")
+ DECLARE_HYDROGEN_ACCESSOR(ThisFunction)
+};
+
+
+class LContext V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(Context, "context")
+ DECLARE_HYDROGEN_ACCESSOR(Context)
+};
+
+
+class LDeclareGlobals V8_FINAL : public LTemplateInstruction<0, 1, 0> {
+ public:
+ explicit LDeclareGlobals(LOperand* context) {
+ inputs_[0] = context;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(DeclareGlobals, "declare-globals")
+ DECLARE_HYDROGEN_ACCESSOR(DeclareGlobals)
+};
+
+
+class LCallJSFunction V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LCallJSFunction(LOperand* function) {
+ inputs_[0] = function;
+ }
+
+ LOperand* function() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CallJSFunction, "call-js-function")
+ DECLARE_HYDROGEN_ACCESSOR(CallJSFunction)
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+
+ int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
+class LCallWithDescriptor V8_FINAL : public LTemplateResultInstruction<1> {
+ public:
+ LCallWithDescriptor(const CallInterfaceDescriptor* descriptor,
+ ZoneList<LOperand*>& operands,
+ Zone* zone)
+ : inputs_(descriptor->environment_length() + 1, zone) {
+ ASSERT(descriptor->environment_length() + 1 == operands.length());
+ inputs_.AddAll(operands, zone);
+ }
+
+ LOperand* target() const { return inputs_[0]; }
+
+ private:
+ DECLARE_CONCRETE_INSTRUCTION(CallWithDescriptor, "call-with-descriptor")
+ DECLARE_HYDROGEN_ACCESSOR(CallWithDescriptor)
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+
+ int arity() const { return hydrogen()->argument_count() - 1; }
+
+ ZoneList<LOperand*> inputs_;
+
+ // Iterator support.
+ virtual int InputCount() V8_FINAL V8_OVERRIDE { return inputs_.length(); }
+ virtual LOperand* InputAt(int i) V8_FINAL V8_OVERRIDE { return inputs_[i]; }
+
+ virtual int TempCount() V8_FINAL V8_OVERRIDE { return 0; }
+ virtual LOperand* TempAt(int i) V8_FINAL V8_OVERRIDE { return NULL; }
+};
+
+
+class LInvokeFunction V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LInvokeFunction(LOperand* context, LOperand* function) {
+ inputs_[0] = context;
+ inputs_[1] = function;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* function() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(InvokeFunction, "invoke-function")
+ DECLARE_HYDROGEN_ACCESSOR(InvokeFunction)
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+
+ int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
+class LCallFunction V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ explicit LCallFunction(LOperand* context, LOperand* function) {
+ inputs_[0] = context;
+ inputs_[1] = function;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* function() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CallFunction, "call-function")
+ DECLARE_HYDROGEN_ACCESSOR(CallFunction)
+
+ int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
+class LCallNew V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LCallNew(LOperand* context, LOperand* constructor) {
+ inputs_[0] = context;
+ inputs_[1] = constructor;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* constructor() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CallNew, "call-new")
+ DECLARE_HYDROGEN_ACCESSOR(CallNew)
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+
+ int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
+class LCallNewArray V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LCallNewArray(LOperand* context, LOperand* constructor) {
+ inputs_[0] = context;
+ inputs_[1] = constructor;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* constructor() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CallNewArray, "call-new-array")
+ DECLARE_HYDROGEN_ACCESSOR(CallNewArray)
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+
+ int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
+class LCallRuntime V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LCallRuntime(LOperand* context) {
+ inputs_[0] = context;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime")
+ DECLARE_HYDROGEN_ACCESSOR(CallRuntime)
+
+ virtual bool ClobbersDoubleRegisters(Isolate* isolate) const V8_OVERRIDE {
+ return true;
+ }
+
+ const Runtime::Function* function() const { return hydrogen()->function(); }
+ int arity() const { return hydrogen()->argument_count(); }
+};
+
+
+class LInteger32ToDouble V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LInteger32ToDouble(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(Integer32ToDouble, "int32-to-double")
+};
+
+
+class LUint32ToDouble V8_FINAL : public LTemplateInstruction<1, 1, 1> {
+ public:
+ explicit LUint32ToDouble(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(Uint32ToDouble, "uint32-to-double")
+};
+
+
+class LNumberTagI V8_FINAL : public LTemplateInstruction<1, 1, 1> {
+ public:
+ LNumberTagI(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(NumberTagI, "number-tag-i")
+};
+
+
+class LNumberTagU V8_FINAL : public LTemplateInstruction<1, 1, 1> {
+ public:
+ LNumberTagU(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(NumberTagU, "number-tag-u")
+};
+
+
+class LNumberTagD V8_FINAL : public LTemplateInstruction<1, 1, 1> {
+ public:
+ LNumberTagD(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(NumberTagD, "number-tag-d")
+ DECLARE_HYDROGEN_ACCESSOR(Change)
+};
+
+
+// Sometimes truncating conversion from a tagged value to an int32.
+class LDoubleToI V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LDoubleToI(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(DoubleToI, "double-to-i")
+ DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
+
+ bool truncating() { return hydrogen()->CanTruncateToInt32(); }
+};
+
+
+class LDoubleToSmi V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LDoubleToSmi(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(DoubleToSmi, "double-to-smi")
+ DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
+};
+
+
+// Truncating conversion from a tagged value to an int32.
+class LTaggedToI V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LTaggedToI(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(TaggedToI, "tagged-to-i")
+ DECLARE_HYDROGEN_ACCESSOR(Change)
+
+ bool truncating() { return hydrogen()->CanTruncateToInt32(); }
+};
+
+
+class LSmiTag V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LSmiTag(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(SmiTag, "smi-tag")
+ DECLARE_HYDROGEN_ACCESSOR(Change)
+};
+
+
+class LNumberUntagD V8_FINAL : public LTemplateInstruction<1, 1, 1> {
+ public:
+ explicit LNumberUntagD(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(NumberUntagD, "double-untag")
+ DECLARE_HYDROGEN_ACCESSOR(Change);
+};
+
+
+class LSmiUntag V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ LSmiUntag(LOperand* value, bool needs_check)
+ : needs_check_(needs_check) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(SmiUntag, "smi-untag")
+
+ bool needs_check() const { return needs_check_; }
+
+ private:
+ bool needs_check_;
+};
+
+
+class LStoreNamedField V8_FINAL : public LTemplateInstruction<0, 2, 2> {
+ public:
+ LStoreNamedField(LOperand* obj,
+ LOperand* val,
+ LOperand* temp,
+ LOperand* temp_map) {
+ inputs_[0] = obj;
+ inputs_[1] = val;
+ temps_[0] = temp;
+ temps_[1] = temp_map;
+ }
+
+ LOperand* object() { return inputs_[0]; }
+ LOperand* value() { return inputs_[1]; }
+ LOperand* temp() { return temps_[0]; }
+ LOperand* temp_map() { return temps_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store-named-field")
+ DECLARE_HYDROGEN_ACCESSOR(StoreNamedField)
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+};
+
+
+class LStoreNamedGeneric V8_FINAL : public LTemplateInstruction<0, 3, 0> {
+ public:
+ LStoreNamedGeneric(LOperand* context, LOperand* object, LOperand* value) {
+ inputs_[0] = context;
+ inputs_[1] = object;
+ inputs_[2] = value;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* object() { return inputs_[1]; }
+ LOperand* value() { return inputs_[2]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store-named-generic")
+ DECLARE_HYDROGEN_ACCESSOR(StoreNamedGeneric)
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ Handle<Object> name() const { return hydrogen()->name(); }
+ StrictMode strict_mode() { return hydrogen()->strict_mode(); }
+};
+
+
+class LStoreKeyed V8_FINAL : public LTemplateInstruction<0, 3, 0> {
+ public:
+ LStoreKeyed(LOperand* obj, LOperand* key, LOperand* val) {
+ inputs_[0] = obj;
+ inputs_[1] = key;
+ inputs_[2] = val;
+ }
+
+ bool is_external() const { return hydrogen()->is_external(); }
+ bool is_fixed_typed_array() const {
+ return hydrogen()->is_fixed_typed_array();
+ }
+ bool is_typed_elements() const {
+ return is_external() || is_fixed_typed_array();
+ }
+ LOperand* elements() { return inputs_[0]; }
+ LOperand* key() { return inputs_[1]; }
+ LOperand* value() { return inputs_[2]; }
+ ElementsKind elements_kind() const {
+ return hydrogen()->elements_kind();
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreKeyed, "store-keyed")
+ DECLARE_HYDROGEN_ACCESSOR(StoreKeyed)
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ uint32_t base_offset() const { return hydrogen()->base_offset(); }
+ bool NeedsCanonicalization() { return hydrogen()->NeedsCanonicalization(); }
+};
+
+
+class LStoreKeyedGeneric V8_FINAL : public LTemplateInstruction<0, 4, 0> {
+ public:
+ LStoreKeyedGeneric(LOperand* context,
+ LOperand* object,
+ LOperand* key,
+ LOperand* value) {
+ inputs_[0] = context;
+ inputs_[1] = object;
+ inputs_[2] = key;
+ inputs_[3] = value;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* object() { return inputs_[1]; }
+ LOperand* key() { return inputs_[2]; }
+ LOperand* value() { return inputs_[3]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic")
+ DECLARE_HYDROGEN_ACCESSOR(StoreKeyedGeneric)
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+
+ StrictMode strict_mode() { return hydrogen()->strict_mode(); }
+};
+
+
+class LTransitionElementsKind V8_FINAL : public LTemplateInstruction<0, 2, 2> {
+ public:
+ LTransitionElementsKind(LOperand* object,
+ LOperand* context,
+ LOperand* new_map_temp,
+ LOperand* temp) {
+ inputs_[0] = object;
+ inputs_[1] = context;
+ temps_[0] = new_map_temp;
+ temps_[1] = temp;
+ }
+
+ LOperand* context() { return inputs_[1]; }
+ LOperand* object() { return inputs_[0]; }
+ LOperand* new_map_temp() { return temps_[0]; }
+ LOperand* temp() { return temps_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(TransitionElementsKind,
+ "transition-elements-kind")
+ DECLARE_HYDROGEN_ACCESSOR(TransitionElementsKind)
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+
+ Handle<Map> original_map() { return hydrogen()->original_map().handle(); }
+ Handle<Map> transitioned_map() {
+ return hydrogen()->transitioned_map().handle();
+ }
+ ElementsKind from_kind() { return hydrogen()->from_kind(); }
+ ElementsKind to_kind() { return hydrogen()->to_kind(); }
+};
+
+
+class LArrayShift V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LArrayShift(LOperand* context, LOperand* object) {
+ inputs_[0] = context;
+ inputs_[1] = object;
+ }
+
+ LOperand* context() const { return inputs_[0]; }
+ LOperand* object() const { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ArrayShift, "array-shift")
+ DECLARE_HYDROGEN_ACCESSOR(ArrayShift)
+};
+
+
+class LTrapAllocationMemento V8_FINAL : public LTemplateInstruction<0, 1, 1> {
+ public:
+ LTrapAllocationMemento(LOperand* object,
+ LOperand* temp) {
+ inputs_[0] = object;
+ temps_[0] = temp;
+ }
+
+ LOperand* object() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(TrapAllocationMemento,
+ "trap-allocation-memento")
+};
+
+
+class LStringAdd V8_FINAL : public LTemplateInstruction<1, 3, 0> {
+ public:
+ LStringAdd(LOperand* context, LOperand* left, LOperand* right) {
+ inputs_[0] = context;
+ inputs_[1] = left;
+ inputs_[2] = right;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* left() { return inputs_[1]; }
+ LOperand* right() { return inputs_[2]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StringAdd, "string-add")
+ DECLARE_HYDROGEN_ACCESSOR(StringAdd)
+};
+
+
+class LStringCharCodeAt V8_FINAL : public LTemplateInstruction<1, 3, 0> {
+ public:
+ LStringCharCodeAt(LOperand* context, LOperand* string, LOperand* index) {
+ inputs_[0] = context;
+ inputs_[1] = string;
+ inputs_[2] = index;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* string() { return inputs_[1]; }
+ LOperand* index() { return inputs_[2]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StringCharCodeAt, "string-char-code-at")
+ DECLARE_HYDROGEN_ACCESSOR(StringCharCodeAt)
+};
+
+
+class LStringCharFromCode V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LStringCharFromCode(LOperand* context, LOperand* char_code) {
+ inputs_[0] = context;
+ inputs_[1] = char_code;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* char_code() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StringCharFromCode, "string-char-from-code")
+ DECLARE_HYDROGEN_ACCESSOR(StringCharFromCode)
+};
+
+
+class LCheckValue V8_FINAL : public LTemplateInstruction<0, 1, 0> {
+ public:
+ explicit LCheckValue(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CheckValue, "check-value")
+ DECLARE_HYDROGEN_ACCESSOR(CheckValue)
+};
+
+
+class LCheckInstanceType V8_FINAL : public LTemplateInstruction<0, 1, 1> {
+ public:
+ LCheckInstanceType(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CheckInstanceType, "check-instance-type")
+ DECLARE_HYDROGEN_ACCESSOR(CheckInstanceType)
+};
+
+
+class LCheckMaps V8_FINAL : public LTemplateInstruction<0, 1, 0> {
+ public:
+ explicit LCheckMaps(LOperand* value = NULL) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CheckMaps, "check-maps")
+ DECLARE_HYDROGEN_ACCESSOR(CheckMaps)
+};
+
+
+class LCheckSmi V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LCheckSmi(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CheckSmi, "check-smi")
+};
+
+
+class LClampDToUint8 V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LClampDToUint8(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* unclamped() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ClampDToUint8, "clamp-d-to-uint8")
+};
+
+
+class LClampIToUint8 V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LClampIToUint8(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* unclamped() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ClampIToUint8, "clamp-i-to-uint8")
+};
+
+
+// Truncating conversion from a tagged value to an int32.
+class LClampTToUint8NoSSE2 V8_FINAL : public LTemplateInstruction<1, 1, 3> {
+ public:
+ LClampTToUint8NoSSE2(LOperand* unclamped,
+ LOperand* temp1,
+ LOperand* temp2,
+ LOperand* temp3) {
+ inputs_[0] = unclamped;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
+ temps_[2] = temp3;
+ }
+
+ LOperand* unclamped() { return inputs_[0]; }
+ LOperand* scratch() { return temps_[0]; }
+ LOperand* scratch2() { return temps_[1]; }
+ LOperand* scratch3() { return temps_[2]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ClampTToUint8NoSSE2,
+ "clamp-t-to-uint8-nosse2")
+ DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
+};
+
+
+class LCheckNonSmi V8_FINAL : public LTemplateInstruction<0, 1, 0> {
+ public:
+ explicit LCheckNonSmi(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CheckNonSmi, "check-non-smi")
+ DECLARE_HYDROGEN_ACCESSOR(CheckHeapObject)
+};
+
+
+class LDoubleBits V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LDoubleBits(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(DoubleBits, "double-bits")
+ DECLARE_HYDROGEN_ACCESSOR(DoubleBits)
+};
+
+
+class LConstructDouble V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LConstructDouble(LOperand* hi, LOperand* lo) {
+ inputs_[0] = hi;
+ inputs_[1] = lo;
+ }
+
+ LOperand* hi() { return inputs_[0]; }
+ LOperand* lo() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ConstructDouble, "construct-double")
+};
+
+
+class LAllocate V8_FINAL : public LTemplateInstruction<1, 2, 1> {
+ public:
+ LAllocate(LOperand* context, LOperand* size, LOperand* temp) {
+ inputs_[0] = context;
+ inputs_[1] = size;
+ temps_[0] = temp;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* size() { return inputs_[1]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(Allocate, "allocate")
+ DECLARE_HYDROGEN_ACCESSOR(Allocate)
+};
+
+
+class LRegExpLiteral V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LRegExpLiteral(LOperand* context) {
+ inputs_[0] = context;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(RegExpLiteral, "regexp-literal")
+ DECLARE_HYDROGEN_ACCESSOR(RegExpLiteral)
+};
+
+
+class LFunctionLiteral V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LFunctionLiteral(LOperand* context) {
+ inputs_[0] = context;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(FunctionLiteral, "function-literal")
+ DECLARE_HYDROGEN_ACCESSOR(FunctionLiteral)
+};
+
+
+class LToFastProperties V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LToFastProperties(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ToFastProperties, "to-fast-properties")
+ DECLARE_HYDROGEN_ACCESSOR(ToFastProperties)
+};
+
+
+class LTypeof V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LTypeof(LOperand* context, LOperand* value) {
+ inputs_[0] = context;
+ inputs_[1] = value;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* value() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(Typeof, "typeof")
+};
+
+
+class LTypeofIsAndBranch V8_FINAL : public LControlInstruction<1, 0> {
+ public:
+ explicit LTypeofIsAndBranch(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(TypeofIsAndBranch, "typeof-is-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(TypeofIsAndBranch)
+
+ Handle<String> type_literal() { return hydrogen()->type_literal(); }
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+};
+
+
+class LOsrEntry V8_FINAL : public LTemplateInstruction<0, 0, 0> {
+ public:
+ virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE {
+ return false;
+ }
+ DECLARE_CONCRETE_INSTRUCTION(OsrEntry, "osr-entry")
+};
+
+
+class LStackCheck V8_FINAL : public LTemplateInstruction<0, 1, 0> {
+ public:
+ explicit LStackCheck(LOperand* context) {
+ inputs_[0] = context;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StackCheck, "stack-check")
+ DECLARE_HYDROGEN_ACCESSOR(StackCheck)
+
+ Label* done_label() { return &done_label_; }
+
+ private:
+ Label done_label_;
+};
+
+
+class LForInPrepareMap V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LForInPrepareMap(LOperand* context, LOperand* object) {
+ inputs_[0] = context;
+ inputs_[1] = object;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* object() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ForInPrepareMap, "for-in-prepare-map")
+};
+
+
+class LForInCacheArray V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LForInCacheArray(LOperand* map) {
+ inputs_[0] = map;
+ }
+
+ LOperand* map() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ForInCacheArray, "for-in-cache-array")
+
+ int idx() {
+ return HForInCacheArray::cast(this->hydrogen_value())->idx();
+ }
+};
+
+
+class LCheckMapValue V8_FINAL : public LTemplateInstruction<0, 2, 0> {
+ public:
+ LCheckMapValue(LOperand* value, LOperand* map) {
+ inputs_[0] = value;
+ inputs_[1] = map;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* map() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CheckMapValue, "check-map-value")
+};
+
+
+class LLoadFieldByIndex V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LLoadFieldByIndex(LOperand* object, LOperand* index) {
+ inputs_[0] = object;
+ inputs_[1] = index;
+ }
+
+ LOperand* object() { return inputs_[0]; }
+ LOperand* index() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadFieldByIndex, "load-field-by-index")
+};
+
+
+class LChunkBuilder;
+class LPlatformChunk V8_FINAL : public LChunk {
+ public:
+ LPlatformChunk(CompilationInfo* info, HGraph* graph)
+ : LChunk(info, graph),
+ num_double_slots_(0) { }
+
+ int GetNextSpillIndex(RegisterKind kind);
+ LOperand* GetNextSpillSlot(RegisterKind kind);
+
+ int num_double_slots() const { return num_double_slots_; }
+
+ private:
+ int num_double_slots_;
+};
+
+
+class LChunkBuilder V8_FINAL : public LChunkBuilderBase {
+ public:
+ LChunkBuilder(CompilationInfo* info, HGraph* graph, LAllocator* allocator)
+ : LChunkBuilderBase(graph->zone()),
+ chunk_(NULL),
+ info_(info),
+ graph_(graph),
+ status_(UNUSED),
+ current_instruction_(NULL),
+ current_block_(NULL),
+ next_block_(NULL),
+ allocator_(allocator) { }
+
+ Isolate* isolate() const { return graph_->isolate(); }
+
+ // Build the sequence for the graph.
+ LPlatformChunk* Build();
+
+ LInstruction* CheckElideControlInstruction(HControlInstruction* instr);
+
+ // Declare methods that deal with the individual node types.
+#define DECLARE_DO(type) LInstruction* Do##type(H##type* node);
+ HYDROGEN_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
+#undef DECLARE_DO
+
+ LInstruction* DoMathFloor(HUnaryMathOperation* instr);
+ LInstruction* DoMathRound(HUnaryMathOperation* instr);
+ LInstruction* DoMathAbs(HUnaryMathOperation* instr);
+ LInstruction* DoMathLog(HUnaryMathOperation* instr);
+ LInstruction* DoMathExp(HUnaryMathOperation* instr);
+ LInstruction* DoMathSqrt(HUnaryMathOperation* instr);
+ LInstruction* DoMathPowHalf(HUnaryMathOperation* instr);
+ LInstruction* DoMathClz32(HUnaryMathOperation* instr);
+ LInstruction* DoDivByPowerOf2I(HDiv* instr);
+ LInstruction* DoDivByConstI(HDiv* instr);
+ LInstruction* DoDivI(HDiv* instr);
+ LInstruction* DoModByPowerOf2I(HMod* instr);
+ LInstruction* DoModByConstI(HMod* instr);
+ LInstruction* DoModI(HMod* instr);
+ LInstruction* DoFlooringDivByPowerOf2I(HMathFloorOfDiv* instr);
+ LInstruction* DoFlooringDivByConstI(HMathFloorOfDiv* instr);
+ LInstruction* DoFlooringDivI(HMathFloorOfDiv* instr);
+
+ private:
+ enum Status {
+ UNUSED,
+ BUILDING,
+ DONE,
+ ABORTED
+ };
+
+ LPlatformChunk* chunk() const { return chunk_; }
+ CompilationInfo* info() const { return info_; }
+ HGraph* graph() const { return graph_; }
+
+ bool is_unused() const { return status_ == UNUSED; }
+ bool is_building() const { return status_ == BUILDING; }
+ bool is_done() const { return status_ == DONE; }
+ bool is_aborted() const { return status_ == ABORTED; }
+
+ void Abort(BailoutReason reason);
+
+ // Methods for getting operands for Use / Define / Temp.
+ LUnallocated* ToUnallocated(Register reg);
+ LUnallocated* ToUnallocated(X87Register reg);
+
+ // Methods for setting up define-use relationships.
+ MUST_USE_RESULT LOperand* Use(HValue* value, LUnallocated* operand);
+ MUST_USE_RESULT LOperand* UseFixed(HValue* value, Register fixed_register);
+
+ // A value that is guaranteed to be allocated to a register.
+ // Operand created by UseRegister is guaranteed to be live until the end of
+ // instruction. This means that register allocator will not reuse it's
+ // register for any other operand inside instruction.
+ // Operand created by UseRegisterAtStart is guaranteed to be live only at
+ // instruction start. Register allocator is free to assign the same register
+ // to some other operand used inside instruction (i.e. temporary or
+ // output).
+ MUST_USE_RESULT LOperand* UseRegister(HValue* value);
+ MUST_USE_RESULT LOperand* UseRegisterAtStart(HValue* value);
+
+ // An input operand in a register that may be trashed.
+ MUST_USE_RESULT LOperand* UseTempRegister(HValue* value);
+
+ // An input operand in a register or stack slot.
+ MUST_USE_RESULT LOperand* Use(HValue* value);
+ MUST_USE_RESULT LOperand* UseAtStart(HValue* value);
+
+ // An input operand in a register, stack slot or a constant operand.
+ MUST_USE_RESULT LOperand* UseOrConstant(HValue* value);
+ MUST_USE_RESULT LOperand* UseOrConstantAtStart(HValue* value);
+
+ // An input operand in a fixed register or a constant operand.
+ MUST_USE_RESULT LOperand* UseFixedOrConstant(HValue* value,
+ Register fixed_register);
+
+ // An input operand in a register or a constant operand.
+ MUST_USE_RESULT LOperand* UseRegisterOrConstant(HValue* value);
+ MUST_USE_RESULT LOperand* UseRegisterOrConstantAtStart(HValue* value);
+
+ // An input operand in a constant operand.
+ MUST_USE_RESULT LOperand* UseConstant(HValue* value);
+
+ // An input operand in register, stack slot or a constant operand.
+ // Will not be moved to a register even if one is freely available.
+ virtual MUST_USE_RESULT LOperand* UseAny(HValue* value) V8_OVERRIDE;
+
+ // Temporary operand that must be in a register.
+ MUST_USE_RESULT LUnallocated* TempRegister();
+ MUST_USE_RESULT LOperand* FixedTemp(Register reg);
+
+ // Methods for setting up define-use relationships.
+ // Return the same instruction that they are passed.
+ LInstruction* Define(LTemplateResultInstruction<1>* instr,
+ LUnallocated* result);
+ LInstruction* DefineAsRegister(LTemplateResultInstruction<1>* instr);
+ LInstruction* DefineAsSpilled(LTemplateResultInstruction<1>* instr,
+ int index);
+ LInstruction* DefineSameAsFirst(LTemplateResultInstruction<1>* instr);
+ LInstruction* DefineFixed(LTemplateResultInstruction<1>* instr,
+ Register reg);
+ LInstruction* DefineX87TOS(LTemplateResultInstruction<1>* instr);
+ // Assigns an environment to an instruction. An instruction which can
+ // deoptimize must have an environment.
+ LInstruction* AssignEnvironment(LInstruction* instr);
+ // Assigns a pointer map to an instruction. An instruction which can
+ // trigger a GC or a lazy deoptimization must have a pointer map.
+ LInstruction* AssignPointerMap(LInstruction* instr);
+
+ enum CanDeoptimize { CAN_DEOPTIMIZE_EAGERLY, CANNOT_DEOPTIMIZE_EAGERLY };
+
+ LOperand* GetSeqStringSetCharOperand(HSeqStringSetChar* instr);
+
+ // Marks a call for the register allocator. Assigns a pointer map to
+ // support GC and lazy deoptimization. Assigns an environment to support
+ // eager deoptimization if CAN_DEOPTIMIZE_EAGERLY.
+ LInstruction* MarkAsCall(
+ LInstruction* instr,
+ HInstruction* hinstr,
+ CanDeoptimize can_deoptimize = CANNOT_DEOPTIMIZE_EAGERLY);
+
+ void VisitInstruction(HInstruction* current);
+ void AddInstruction(LInstruction* instr, HInstruction* current);
+
+ void DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block);
+ LInstruction* DoShift(Token::Value op, HBitwiseBinaryOperation* instr);
+ LInstruction* DoArithmeticD(Token::Value op,
+ HArithmeticBinaryOperation* instr);
+ LInstruction* DoArithmeticT(Token::Value op,
+ HBinaryOperation* instr);
+
+ LOperand* GetStoreKeyedValueOperand(HStoreKeyed* instr);
+
+ LPlatformChunk* chunk_;
+ CompilationInfo* info_;
+ HGraph* const graph_;
+ Status status_;
+ HInstruction* current_instruction_;
+ HBasicBlock* current_block_;
+ HBasicBlock* next_block_;
+ LAllocator* allocator_;
+
+ DISALLOW_COPY_AND_ASSIGN(LChunkBuilder);
+};
+
+#undef DECLARE_HYDROGEN_ACCESSOR
+#undef DECLARE_CONCRETE_INSTRUCTION
+
+} } // namespace v8::internal
+
+#endif // V8_X87_LITHIUM_X87_H_
--- /dev/null
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "v8.h"
+
+#if V8_TARGET_ARCH_X87
+
+#include "bootstrapper.h"
+#include "codegen.h"
+#include "cpu-profiler.h"
+#include "debug.h"
+#include "isolate-inl.h"
+#include "runtime.h"
+#include "serialize.h"
+
+namespace v8 {
+namespace internal {
+
+// -------------------------------------------------------------------------
+// MacroAssembler implementation.
+
+MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
+ : Assembler(arg_isolate, buffer, size),
+ generating_stub_(false),
+ has_frame_(false) {
+ if (isolate() != NULL) {
+ // TODO(titzer): should we just use a null handle here instead?
+ code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
+ isolate());
+ }
+}
+
+
+void MacroAssembler::Load(Register dst, const Operand& src, Representation r) {
+ ASSERT(!r.IsDouble());
+ if (r.IsInteger8()) {
+ movsx_b(dst, src);
+ } else if (r.IsUInteger8()) {
+ movzx_b(dst, src);
+ } else if (r.IsInteger16()) {
+ movsx_w(dst, src);
+ } else if (r.IsUInteger16()) {
+ movzx_w(dst, src);
+ } else {
+ mov(dst, src);
+ }
+}
+
+
+void MacroAssembler::Store(Register src, const Operand& dst, Representation r) {
+ ASSERT(!r.IsDouble());
+ if (r.IsInteger8() || r.IsUInteger8()) {
+ mov_b(dst, src);
+ } else if (r.IsInteger16() || r.IsUInteger16()) {
+ mov_w(dst, src);
+ } else {
+ mov(dst, src);
+ }
+}
+
+
+void MacroAssembler::LoadRoot(Register destination, Heap::RootListIndex index) {
+ if (isolate()->heap()->RootCanBeTreatedAsConstant(index)) {
+ Handle<Object> value(&isolate()->heap()->roots_array_start()[index]);
+ mov(destination, value);
+ return;
+ }
+ ExternalReference roots_array_start =
+ ExternalReference::roots_array_start(isolate());
+ mov(destination, Immediate(index));
+ mov(destination, Operand::StaticArray(destination,
+ times_pointer_size,
+ roots_array_start));
+}
+
+
+void MacroAssembler::StoreRoot(Register source,
+ Register scratch,
+ Heap::RootListIndex index) {
+ ASSERT(Heap::RootCanBeWrittenAfterInitialization(index));
+ ExternalReference roots_array_start =
+ ExternalReference::roots_array_start(isolate());
+ mov(scratch, Immediate(index));
+ mov(Operand::StaticArray(scratch, times_pointer_size, roots_array_start),
+ source);
+}
+
+
+void MacroAssembler::CompareRoot(Register with,
+ Register scratch,
+ Heap::RootListIndex index) {
+ ExternalReference roots_array_start =
+ ExternalReference::roots_array_start(isolate());
+ mov(scratch, Immediate(index));
+ cmp(with, Operand::StaticArray(scratch,
+ times_pointer_size,
+ roots_array_start));
+}
+
+
+void MacroAssembler::CompareRoot(Register with, Heap::RootListIndex index) {
+ ASSERT(isolate()->heap()->RootCanBeTreatedAsConstant(index));
+ Handle<Object> value(&isolate()->heap()->roots_array_start()[index]);
+ cmp(with, value);
+}
+
+
+void MacroAssembler::CompareRoot(const Operand& with,
+ Heap::RootListIndex index) {
+ ASSERT(isolate()->heap()->RootCanBeTreatedAsConstant(index));
+ Handle<Object> value(&isolate()->heap()->roots_array_start()[index]);
+ cmp(with, value);
+}
+
+
+void MacroAssembler::InNewSpace(
+ Register object,
+ Register scratch,
+ Condition cc,
+ Label* condition_met,
+ Label::Distance condition_met_distance) {
+ ASSERT(cc == equal || cc == not_equal);
+ if (scratch.is(object)) {
+ and_(scratch, Immediate(~Page::kPageAlignmentMask));
+ } else {
+ mov(scratch, Immediate(~Page::kPageAlignmentMask));
+ and_(scratch, object);
+ }
+ // Check that we can use a test_b.
+ ASSERT(MemoryChunk::IN_FROM_SPACE < 8);
+ ASSERT(MemoryChunk::IN_TO_SPACE < 8);
+ int mask = (1 << MemoryChunk::IN_FROM_SPACE)
+ | (1 << MemoryChunk::IN_TO_SPACE);
+ // If non-zero, the page belongs to new-space.
+ test_b(Operand(scratch, MemoryChunk::kFlagsOffset),
+ static_cast<uint8_t>(mask));
+ j(cc, condition_met, condition_met_distance);
+}
+
+
+void MacroAssembler::RememberedSetHelper(
+ Register object, // Only used for debug checks.
+ Register addr,
+ Register scratch,
+ MacroAssembler::RememberedSetFinalAction and_then) {
+ Label done;
+ if (emit_debug_code()) {
+ Label ok;
+ JumpIfNotInNewSpace(object, scratch, &ok, Label::kNear);
+ int3();
+ bind(&ok);
+ }
+ // Load store buffer top.
+ ExternalReference store_buffer =
+ ExternalReference::store_buffer_top(isolate());
+ mov(scratch, Operand::StaticVariable(store_buffer));
+ // Store pointer to buffer.
+ mov(Operand(scratch, 0), addr);
+ // Increment buffer top.
+ add(scratch, Immediate(kPointerSize));
+ // Write back new top of buffer.
+ mov(Operand::StaticVariable(store_buffer), scratch);
+ // Call stub on end of buffer.
+ // Check for end of buffer.
+ test(scratch, Immediate(StoreBuffer::kStoreBufferOverflowBit));
+ if (and_then == kReturnAtEnd) {
+ Label buffer_overflowed;
+ j(not_equal, &buffer_overflowed, Label::kNear);
+ ret(0);
+ bind(&buffer_overflowed);
+ } else {
+ ASSERT(and_then == kFallThroughAtEnd);
+ j(equal, &done, Label::kNear);
+ }
+ StoreBufferOverflowStub store_buffer_overflow =
+ StoreBufferOverflowStub(isolate());
+ CallStub(&store_buffer_overflow);
+ if (and_then == kReturnAtEnd) {
+ ret(0);
+ } else {
+ ASSERT(and_then == kFallThroughAtEnd);
+ bind(&done);
+ }
+}
+
+
+void MacroAssembler::ClampUint8(Register reg) {
+ Label done;
+ test(reg, Immediate(0xFFFFFF00));
+ j(zero, &done, Label::kNear);
+ setcc(negative, reg); // 1 if negative, 0 if positive.
+ dec_b(reg); // 0 if negative, 255 if positive.
+ bind(&done);
+}
+
+
+void MacroAssembler::SlowTruncateToI(Register result_reg,
+ Register input_reg,
+ int offset) {
+ DoubleToIStub stub(isolate(), input_reg, result_reg, offset, true);
+ call(stub.GetCode(), RelocInfo::CODE_TARGET);
+}
+
+
+void MacroAssembler::TruncateX87TOSToI(Register result_reg) {
+ sub(esp, Immediate(kDoubleSize));
+ fst_d(MemOperand(esp, 0));
+ SlowTruncateToI(result_reg, esp, 0);
+ add(esp, Immediate(kDoubleSize));
+}
+
+
+void MacroAssembler::X87TOSToI(Register result_reg,
+ MinusZeroMode minus_zero_mode,
+ Label* conversion_failed,
+ Label::Distance dst) {
+ Label done;
+ sub(esp, Immediate(kPointerSize));
+ fld(0);
+ fist_s(MemOperand(esp, 0));
+ fild_s(MemOperand(esp, 0));
+ pop(result_reg);
+ FCmp();
+ j(not_equal, conversion_failed, dst);
+ j(parity_even, conversion_failed, dst);
+ if (minus_zero_mode == FAIL_ON_MINUS_ZERO) {
+ test(result_reg, Operand(result_reg));
+ j(not_zero, &done, Label::kNear);
+ // To check for minus zero, we load the value again as float, and check
+ // if that is still 0.
+ sub(esp, Immediate(kPointerSize));
+ fst_s(MemOperand(esp, 0));
+ pop(result_reg);
+ test(result_reg, Operand(result_reg));
+ j(not_zero, conversion_failed, dst);
+ }
+ bind(&done);
+}
+
+
+void MacroAssembler::TruncateHeapNumberToI(Register result_reg,
+ Register input_reg) {
+ Label done, slow_case;
+
+ SlowTruncateToI(result_reg, input_reg);
+ bind(&done);
+}
+
+
+void MacroAssembler::TaggedToI(Register result_reg,
+ Register input_reg,
+ MinusZeroMode minus_zero_mode,
+ Label* lost_precision) {
+ Label done;
+
+ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
+ isolate()->factory()->heap_number_map());
+ j(not_equal, lost_precision, Label::kNear);
+
+ // TODO(olivf) Converting a number on the fpu is actually quite slow. We
+ // should first try a fast conversion and then bailout to this slow case.
+ Label lost_precision_pop, zero_check;
+ Label* lost_precision_int = (minus_zero_mode == FAIL_ON_MINUS_ZERO)
+ ? &lost_precision_pop : lost_precision;
+ sub(esp, Immediate(kPointerSize));
+ fld_d(FieldOperand(input_reg, HeapNumber::kValueOffset));
+ if (minus_zero_mode == FAIL_ON_MINUS_ZERO) fld(0);
+ fist_s(MemOperand(esp, 0));
+ fild_s(MemOperand(esp, 0));
+ FCmp();
+ pop(result_reg);
+ j(not_equal, lost_precision_int, Label::kNear);
+ j(parity_even, lost_precision_int, Label::kNear); // NaN.
+ if (minus_zero_mode == FAIL_ON_MINUS_ZERO) {
+ test(result_reg, Operand(result_reg));
+ j(zero, &zero_check, Label::kNear);
+ fstp(0);
+ jmp(&done, Label::kNear);
+ bind(&zero_check);
+ // To check for minus zero, we load the value again as float, and check
+ // if that is still 0.
+ sub(esp, Immediate(kPointerSize));
+ fstp_s(Operand(esp, 0));
+ pop(result_reg);
+ test(result_reg, Operand(result_reg));
+ j(zero, &done, Label::kNear);
+ jmp(lost_precision, Label::kNear);
+
+ bind(&lost_precision_pop);
+ fstp(0);
+ jmp(lost_precision, Label::kNear);
+ }
+ bind(&done);
+}
+
+
+void MacroAssembler::LoadUint32NoSSE2(Register src) {
+ Label done;
+ push(src);
+ fild_s(Operand(esp, 0));
+ cmp(src, Immediate(0));
+ j(not_sign, &done, Label::kNear);
+ ExternalReference uint32_bias =
+ ExternalReference::address_of_uint32_bias();
+ fld_d(Operand::StaticVariable(uint32_bias));
+ faddp(1);
+ bind(&done);
+ add(esp, Immediate(kPointerSize));
+}
+
+
+void MacroAssembler::RecordWriteArray(Register object,
+ Register value,
+ Register index,
+ RememberedSetAction remembered_set_action,
+ SmiCheck smi_check) {
+ // First, check if a write barrier is even needed. The tests below
+ // catch stores of Smis.
+ Label done;
+
+ // Skip barrier if writing a smi.
+ if (smi_check == INLINE_SMI_CHECK) {
+ ASSERT_EQ(0, kSmiTag);
+ test(value, Immediate(kSmiTagMask));
+ j(zero, &done);
+ }
+
+ // Array access: calculate the destination address in the same manner as
+ // KeyedStoreIC::GenerateGeneric. Multiply a smi by 2 to get an offset
+ // into an array of words.
+ Register dst = index;
+ lea(dst, Operand(object, index, times_half_pointer_size,
+ FixedArray::kHeaderSize - kHeapObjectTag));
+
+ RecordWrite(
+ object, dst, value, remembered_set_action, OMIT_SMI_CHECK);
+
+ bind(&done);
+
+ // Clobber clobbered input registers when running with the debug-code flag
+ // turned on to provoke errors.
+ if (emit_debug_code()) {
+ mov(value, Immediate(BitCast<int32_t>(kZapValue)));
+ mov(index, Immediate(BitCast<int32_t>(kZapValue)));
+ }
+}
+
+
+void MacroAssembler::RecordWriteField(
+ Register object,
+ int offset,
+ Register value,
+ Register dst,
+ RememberedSetAction remembered_set_action,
+ SmiCheck smi_check) {
+ // First, check if a write barrier is even needed. The tests below
+ // catch stores of Smis.
+ Label done;
+
+ // Skip barrier if writing a smi.
+ if (smi_check == INLINE_SMI_CHECK) {
+ JumpIfSmi(value, &done, Label::kNear);
+ }
+
+ // Although the object register is tagged, the offset is relative to the start
+ // of the object, so so offset must be a multiple of kPointerSize.
+ ASSERT(IsAligned(offset, kPointerSize));
+
+ lea(dst, FieldOperand(object, offset));
+ if (emit_debug_code()) {
+ Label ok;
+ test_b(dst, (1 << kPointerSizeLog2) - 1);
+ j(zero, &ok, Label::kNear);
+ int3();
+ bind(&ok);
+ }
+
+ RecordWrite(
+ object, dst, value, remembered_set_action, OMIT_SMI_CHECK);
+
+ bind(&done);
+
+ // Clobber clobbered input registers when running with the debug-code flag
+ // turned on to provoke errors.
+ if (emit_debug_code()) {
+ mov(value, Immediate(BitCast<int32_t>(kZapValue)));
+ mov(dst, Immediate(BitCast<int32_t>(kZapValue)));
+ }
+}
+
+
+void MacroAssembler::RecordWriteForMap(
+ Register object,
+ Handle<Map> map,
+ Register scratch1,
+ Register scratch2) {
+ Label done;
+
+ Register address = scratch1;
+ Register value = scratch2;
+ if (emit_debug_code()) {
+ Label ok;
+ lea(address, FieldOperand(object, HeapObject::kMapOffset));
+ test_b(address, (1 << kPointerSizeLog2) - 1);
+ j(zero, &ok, Label::kNear);
+ int3();
+ bind(&ok);
+ }
+
+ ASSERT(!object.is(value));
+ ASSERT(!object.is(address));
+ ASSERT(!value.is(address));
+ AssertNotSmi(object);
+
+ if (!FLAG_incremental_marking) {
+ return;
+ }
+
+ // Count number of write barriers in generated code.
+ isolate()->counters()->write_barriers_static()->Increment();
+ IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1);
+
+ // A single check of the map's pages interesting flag suffices, since it is
+ // only set during incremental collection, and then it's also guaranteed that
+ // the from object's page's interesting flag is also set. This optimization
+ // relies on the fact that maps can never be in new space.
+ ASSERT(!isolate()->heap()->InNewSpace(*map));
+ CheckPageFlagForMap(map,
+ MemoryChunk::kPointersToHereAreInterestingMask,
+ zero,
+ &done,
+ Label::kNear);
+
+ // Delay the initialization of |address| and |value| for the stub until it's
+ // known that the will be needed. Up until this point their values are not
+ // needed since they are embedded in the operands of instructions that need
+ // them.
+ lea(address, FieldOperand(object, HeapObject::kMapOffset));
+ mov(value, Immediate(map));
+ RecordWriteStub stub(isolate(), object, value, address, OMIT_REMEMBERED_SET);
+ CallStub(&stub);
+
+ bind(&done);
+
+ // Clobber clobbered input registers when running with the debug-code flag
+ // turned on to provoke errors.
+ if (emit_debug_code()) {
+ mov(value, Immediate(BitCast<int32_t>(kZapValue)));
+ mov(scratch1, Immediate(BitCast<int32_t>(kZapValue)));
+ mov(scratch2, Immediate(BitCast<int32_t>(kZapValue)));
+ }
+}
+
+
+void MacroAssembler::RecordWrite(Register object,
+ Register address,
+ Register value,
+ RememberedSetAction remembered_set_action,
+ SmiCheck smi_check) {
+ ASSERT(!object.is(value));
+ ASSERT(!object.is(address));
+ ASSERT(!value.is(address));
+ AssertNotSmi(object);
+
+ if (remembered_set_action == OMIT_REMEMBERED_SET &&
+ !FLAG_incremental_marking) {
+ return;
+ }
+
+ if (emit_debug_code()) {
+ Label ok;
+ cmp(value, Operand(address, 0));
+ j(equal, &ok, Label::kNear);
+ int3();
+ bind(&ok);
+ }
+
+ // Count number of write barriers in generated code.
+ isolate()->counters()->write_barriers_static()->Increment();
+ IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1);
+
+ // First, check if a write barrier is even needed. The tests below
+ // catch stores of Smis and stores into young gen.
+ Label done;
+
+ if (smi_check == INLINE_SMI_CHECK) {
+ // Skip barrier if writing a smi.
+ JumpIfSmi(value, &done, Label::kNear);
+ }
+
+ CheckPageFlag(value,
+ value, // Used as scratch.
+ MemoryChunk::kPointersToHereAreInterestingMask,
+ zero,
+ &done,
+ Label::kNear);
+ CheckPageFlag(object,
+ value, // Used as scratch.
+ MemoryChunk::kPointersFromHereAreInterestingMask,
+ zero,
+ &done,
+ Label::kNear);
+
+ RecordWriteStub stub(isolate(), object, value, address,
+ remembered_set_action);
+ CallStub(&stub);
+
+ bind(&done);
+
+ // Clobber clobbered registers when running with the debug-code flag
+ // turned on to provoke errors.
+ if (emit_debug_code()) {
+ mov(address, Immediate(BitCast<int32_t>(kZapValue)));
+ mov(value, Immediate(BitCast<int32_t>(kZapValue)));
+ }
+}
+
+
+void MacroAssembler::DebugBreak() {
+ Move(eax, Immediate(0));
+ mov(ebx, Immediate(ExternalReference(Runtime::kDebugBreak, isolate())));
+ CEntryStub ces(isolate(), 1);
+ call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
+}
+
+
+bool MacroAssembler::IsUnsafeImmediate(const Immediate& x) {
+ static const int kMaxImmediateBits = 17;
+ if (!RelocInfo::IsNone(x.rmode_)) return false;
+ return !is_intn(x.x_, kMaxImmediateBits);
+}
+
+
+void MacroAssembler::SafeMove(Register dst, const Immediate& x) {
+ if (IsUnsafeImmediate(x) && jit_cookie() != 0) {
+ Move(dst, Immediate(x.x_ ^ jit_cookie()));
+ xor_(dst, jit_cookie());
+ } else {
+ Move(dst, x);
+ }
+}
+
+
+void MacroAssembler::SafePush(const Immediate& x) {
+ if (IsUnsafeImmediate(x) && jit_cookie() != 0) {
+ push(Immediate(x.x_ ^ jit_cookie()));
+ xor_(Operand(esp, 0), Immediate(jit_cookie()));
+ } else {
+ push(x);
+ }
+}
+
+
+void MacroAssembler::CmpObjectType(Register heap_object,
+ InstanceType type,
+ Register map) {
+ mov(map, FieldOperand(heap_object, HeapObject::kMapOffset));
+ CmpInstanceType(map, type);
+}
+
+
+void MacroAssembler::CmpInstanceType(Register map, InstanceType type) {
+ cmpb(FieldOperand(map, Map::kInstanceTypeOffset),
+ static_cast<int8_t>(type));
+}
+
+
+void MacroAssembler::CheckFastElements(Register map,
+ Label* fail,
+ Label::Distance distance) {
+ STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
+ STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
+ STATIC_ASSERT(FAST_ELEMENTS == 2);
+ STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
+ cmpb(FieldOperand(map, Map::kBitField2Offset),
+ Map::kMaximumBitField2FastHoleyElementValue);
+ j(above, fail, distance);
+}
+
+
+void MacroAssembler::CheckFastObjectElements(Register map,
+ Label* fail,
+ Label::Distance distance) {
+ STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
+ STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
+ STATIC_ASSERT(FAST_ELEMENTS == 2);
+ STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
+ cmpb(FieldOperand(map, Map::kBitField2Offset),
+ Map::kMaximumBitField2FastHoleySmiElementValue);
+ j(below_equal, fail, distance);
+ cmpb(FieldOperand(map, Map::kBitField2Offset),
+ Map::kMaximumBitField2FastHoleyElementValue);
+ j(above, fail, distance);
+}
+
+
+void MacroAssembler::CheckFastSmiElements(Register map,
+ Label* fail,
+ Label::Distance distance) {
+ STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
+ STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
+ cmpb(FieldOperand(map, Map::kBitField2Offset),
+ Map::kMaximumBitField2FastHoleySmiElementValue);
+ j(above, fail, distance);
+}
+
+
+void MacroAssembler::StoreNumberToDoubleElements(
+ Register maybe_number,
+ Register elements,
+ Register key,
+ Register scratch,
+ Label* fail,
+ int elements_offset) {
+ Label smi_value, done, maybe_nan, not_nan, is_nan, have_double_value;
+ JumpIfSmi(maybe_number, &smi_value, Label::kNear);
+
+ CheckMap(maybe_number,
+ isolate()->factory()->heap_number_map(),
+ fail,
+ DONT_DO_SMI_CHECK);
+
+ // Double value, canonicalize NaN.
+ uint32_t offset = HeapNumber::kValueOffset + sizeof(kHoleNanLower32);
+ cmp(FieldOperand(maybe_number, offset),
+ Immediate(kNaNOrInfinityLowerBoundUpper32));
+ j(greater_equal, &maybe_nan, Label::kNear);
+
+ bind(¬_nan);
+ ExternalReference canonical_nan_reference =
+ ExternalReference::address_of_canonical_non_hole_nan();
+ fld_d(FieldOperand(maybe_number, HeapNumber::kValueOffset));
+ bind(&have_double_value);
+ fstp_d(FieldOperand(elements, key, times_4,
+ FixedDoubleArray::kHeaderSize - elements_offset));
+ jmp(&done);
+
+ bind(&maybe_nan);
+ // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise
+ // it's an Infinity, and the non-NaN code path applies.
+ j(greater, &is_nan, Label::kNear);
+ cmp(FieldOperand(maybe_number, HeapNumber::kValueOffset), Immediate(0));
+ j(zero, ¬_nan);
+ bind(&is_nan);
+ fld_d(Operand::StaticVariable(canonical_nan_reference));
+ jmp(&have_double_value, Label::kNear);
+
+ bind(&smi_value);
+ // Value is a smi. Convert to a double and store.
+ // Preserve original value.
+ mov(scratch, maybe_number);
+ SmiUntag(scratch);
+ push(scratch);
+ fild_s(Operand(esp, 0));
+ pop(scratch);
+ fstp_d(FieldOperand(elements, key, times_4,
+ FixedDoubleArray::kHeaderSize - elements_offset));
+ bind(&done);
+}
+
+
+void MacroAssembler::CompareMap(Register obj, Handle<Map> map) {
+ cmp(FieldOperand(obj, HeapObject::kMapOffset), map);
+}
+
+
+void MacroAssembler::CheckMap(Register obj,
+ Handle<Map> map,
+ Label* fail,
+ SmiCheckType smi_check_type) {
+ if (smi_check_type == DO_SMI_CHECK) {
+ JumpIfSmi(obj, fail);
+ }
+
+ CompareMap(obj, map);
+ j(not_equal, fail);
+}
+
+
+void MacroAssembler::DispatchMap(Register obj,
+ Register unused,
+ Handle<Map> map,
+ Handle<Code> success,
+ SmiCheckType smi_check_type) {
+ Label fail;
+ if (smi_check_type == DO_SMI_CHECK) {
+ JumpIfSmi(obj, &fail);
+ }
+ cmp(FieldOperand(obj, HeapObject::kMapOffset), Immediate(map));
+ j(equal, success);
+
+ bind(&fail);
+}
+
+
+Condition MacroAssembler::IsObjectStringType(Register heap_object,
+ Register map,
+ Register instance_type) {
+ mov(map, FieldOperand(heap_object, HeapObject::kMapOffset));
+ movzx_b(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
+ STATIC_ASSERT(kNotStringTag != 0);
+ test(instance_type, Immediate(kIsNotStringMask));
+ return zero;
+}
+
+
+Condition MacroAssembler::IsObjectNameType(Register heap_object,
+ Register map,
+ Register instance_type) {
+ mov(map, FieldOperand(heap_object, HeapObject::kMapOffset));
+ movzx_b(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
+ cmpb(instance_type, static_cast<uint8_t>(LAST_NAME_TYPE));
+ return below_equal;
+}
+
+
+void MacroAssembler::IsObjectJSObjectType(Register heap_object,
+ Register map,
+ Register scratch,
+ Label* fail) {
+ mov(map, FieldOperand(heap_object, HeapObject::kMapOffset));
+ IsInstanceJSObjectType(map, scratch, fail);
+}
+
+
+void MacroAssembler::IsInstanceJSObjectType(Register map,
+ Register scratch,
+ Label* fail) {
+ movzx_b(scratch, FieldOperand(map, Map::kInstanceTypeOffset));
+ sub(scratch, Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
+ cmp(scratch,
+ LAST_NONCALLABLE_SPEC_OBJECT_TYPE - FIRST_NONCALLABLE_SPEC_OBJECT_TYPE);
+ j(above, fail);
+}
+
+
+void MacroAssembler::FCmp() {
+ fucompp();
+ push(eax);
+ fnstsw_ax();
+ sahf();
+ pop(eax);
+}
+
+
+void MacroAssembler::AssertNumber(Register object) {
+ if (emit_debug_code()) {
+ Label ok;
+ JumpIfSmi(object, &ok);
+ cmp(FieldOperand(object, HeapObject::kMapOffset),
+ isolate()->factory()->heap_number_map());
+ Check(equal, kOperandNotANumber);
+ bind(&ok);
+ }
+}
+
+
+void MacroAssembler::AssertSmi(Register object) {
+ if (emit_debug_code()) {
+ test(object, Immediate(kSmiTagMask));
+ Check(equal, kOperandIsNotASmi);
+ }
+}
+
+
+void MacroAssembler::AssertString(Register object) {
+ if (emit_debug_code()) {
+ test(object, Immediate(kSmiTagMask));
+ Check(not_equal, kOperandIsASmiAndNotAString);
+ push(object);
+ mov(object, FieldOperand(object, HeapObject::kMapOffset));
+ CmpInstanceType(object, FIRST_NONSTRING_TYPE);
+ pop(object);
+ Check(below, kOperandIsNotAString);
+ }
+}
+
+
+void MacroAssembler::AssertName(Register object) {
+ if (emit_debug_code()) {
+ test(object, Immediate(kSmiTagMask));
+ Check(not_equal, kOperandIsASmiAndNotAName);
+ push(object);
+ mov(object, FieldOperand(object, HeapObject::kMapOffset));
+ CmpInstanceType(object, LAST_NAME_TYPE);
+ pop(object);
+ Check(below_equal, kOperandIsNotAName);
+ }
+}
+
+
+void MacroAssembler::AssertUndefinedOrAllocationSite(Register object) {
+ if (emit_debug_code()) {
+ Label done_checking;
+ AssertNotSmi(object);
+ cmp(object, isolate()->factory()->undefined_value());
+ j(equal, &done_checking);
+ cmp(FieldOperand(object, 0),
+ Immediate(isolate()->factory()->allocation_site_map()));
+ Assert(equal, kExpectedUndefinedOrCell);
+ bind(&done_checking);
+ }
+}
+
+
+void MacroAssembler::AssertNotSmi(Register object) {
+ if (emit_debug_code()) {
+ test(object, Immediate(kSmiTagMask));
+ Check(not_equal, kOperandIsASmi);
+ }
+}
+
+
+void MacroAssembler::StubPrologue() {
+ push(ebp); // Caller's frame pointer.
+ mov(ebp, esp);
+ push(esi); // Callee's context.
+ push(Immediate(Smi::FromInt(StackFrame::STUB)));
+}
+
+
+void MacroAssembler::Prologue(bool code_pre_aging) {
+ PredictableCodeSizeScope predictible_code_size_scope(this,
+ kNoCodeAgeSequenceLength);
+ if (code_pre_aging) {
+ // Pre-age the code.
+ call(isolate()->builtins()->MarkCodeAsExecutedOnce(),
+ RelocInfo::CODE_AGE_SEQUENCE);
+ Nop(kNoCodeAgeSequenceLength - Assembler::kCallInstructionLength);
+ } else {
+ push(ebp); // Caller's frame pointer.
+ mov(ebp, esp);
+ push(esi); // Callee's context.
+ push(edi); // Callee's JS function.
+ }
+}
+
+
+void MacroAssembler::EnterFrame(StackFrame::Type type) {
+ push(ebp);
+ mov(ebp, esp);
+ push(esi);
+ push(Immediate(Smi::FromInt(type)));
+ push(Immediate(CodeObject()));
+ if (emit_debug_code()) {
+ cmp(Operand(esp, 0), Immediate(isolate()->factory()->undefined_value()));
+ Check(not_equal, kCodeObjectNotProperlyPatched);
+ }
+}
+
+
+void MacroAssembler::LeaveFrame(StackFrame::Type type) {
+ if (emit_debug_code()) {
+ cmp(Operand(ebp, StandardFrameConstants::kMarkerOffset),
+ Immediate(Smi::FromInt(type)));
+ Check(equal, kStackFrameTypesMustMatch);
+ }
+ leave();
+}
+
+
+void MacroAssembler::EnterExitFramePrologue() {
+ // Set up the frame structure on the stack.
+ ASSERT(ExitFrameConstants::kCallerSPDisplacement == +2 * kPointerSize);
+ ASSERT(ExitFrameConstants::kCallerPCOffset == +1 * kPointerSize);
+ ASSERT(ExitFrameConstants::kCallerFPOffset == 0 * kPointerSize);
+ push(ebp);
+ mov(ebp, esp);
+
+ // Reserve room for entry stack pointer and push the code object.
+ ASSERT(ExitFrameConstants::kSPOffset == -1 * kPointerSize);
+ push(Immediate(0)); // Saved entry sp, patched before call.
+ push(Immediate(CodeObject())); // Accessed from ExitFrame::code_slot.
+
+ // Save the frame pointer and the context in top.
+ ExternalReference c_entry_fp_address(Isolate::kCEntryFPAddress, isolate());
+ ExternalReference context_address(Isolate::kContextAddress, isolate());
+ mov(Operand::StaticVariable(c_entry_fp_address), ebp);
+ mov(Operand::StaticVariable(context_address), esi);
+}
+
+
+void MacroAssembler::EnterExitFrameEpilogue(int argc) {
+ sub(esp, Immediate(argc * kPointerSize));
+
+ // Get the required frame alignment for the OS.
+ const int kFrameAlignment = OS::ActivationFrameAlignment();
+ if (kFrameAlignment > 0) {
+ ASSERT(IsPowerOf2(kFrameAlignment));
+ and_(esp, -kFrameAlignment);
+ }
+
+ // Patch the saved entry sp.
+ mov(Operand(ebp, ExitFrameConstants::kSPOffset), esp);
+}
+
+
+void MacroAssembler::EnterExitFrame() {
+ EnterExitFramePrologue();
+
+ // Set up argc and argv in callee-saved registers.
+ int offset = StandardFrameConstants::kCallerSPOffset - kPointerSize;
+ mov(edi, eax);
+ lea(esi, Operand(ebp, eax, times_4, offset));
+
+ // Reserve space for argc, argv and isolate.
+ EnterExitFrameEpilogue(3);
+}
+
+
+void MacroAssembler::EnterApiExitFrame(int argc) {
+ EnterExitFramePrologue();
+ EnterExitFrameEpilogue(argc);
+}
+
+
+void MacroAssembler::LeaveExitFrame() {
+ // Get the return address from the stack and restore the frame pointer.
+ mov(ecx, Operand(ebp, 1 * kPointerSize));
+ mov(ebp, Operand(ebp, 0 * kPointerSize));
+
+ // Pop the arguments and the receiver from the caller stack.
+ lea(esp, Operand(esi, 1 * kPointerSize));
+
+ // Push the return address to get ready to return.
+ push(ecx);
+
+ LeaveExitFrameEpilogue(true);
+}
+
+
+void MacroAssembler::LeaveExitFrameEpilogue(bool restore_context) {
+ // Restore current context from top and clear it in debug mode.
+ ExternalReference context_address(Isolate::kContextAddress, isolate());
+ if (restore_context) {
+ mov(esi, Operand::StaticVariable(context_address));
+ }
+#ifdef DEBUG
+ mov(Operand::StaticVariable(context_address), Immediate(0));
+#endif
+
+ // Clear the top frame.
+ ExternalReference c_entry_fp_address(Isolate::kCEntryFPAddress,
+ isolate());
+ mov(Operand::StaticVariable(c_entry_fp_address), Immediate(0));
+}
+
+
+void MacroAssembler::LeaveApiExitFrame(bool restore_context) {
+ mov(esp, ebp);
+ pop(ebp);
+
+ LeaveExitFrameEpilogue(restore_context);
+}
+
+
+void MacroAssembler::PushTryHandler(StackHandler::Kind kind,
+ int handler_index) {
+ // Adjust this code if not the case.
+ STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
+ STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
+
+ // We will build up the handler from the bottom by pushing on the stack.
+ // First push the frame pointer and context.
+ if (kind == StackHandler::JS_ENTRY) {
+ // The frame pointer does not point to a JS frame so we save NULL for
+ // ebp. We expect the code throwing an exception to check ebp before
+ // dereferencing it to restore the context.
+ push(Immediate(0)); // NULL frame pointer.
+ push(Immediate(Smi::FromInt(0))); // No context.
+ } else {
+ push(ebp);
+ push(esi);
+ }
+ // Push the state and the code object.
+ unsigned state =
+ StackHandler::IndexField::encode(handler_index) |
+ StackHandler::KindField::encode(kind);
+ push(Immediate(state));
+ Push(CodeObject());
+
+ // Link the current handler as the next handler.
+ ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
+ push(Operand::StaticVariable(handler_address));
+ // Set this new handler as the current one.
+ mov(Operand::StaticVariable(handler_address), esp);
+}
+
+
+void MacroAssembler::PopTryHandler() {
+ STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
+ ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
+ pop(Operand::StaticVariable(handler_address));
+ add(esp, Immediate(StackHandlerConstants::kSize - kPointerSize));
+}
+
+
+void MacroAssembler::JumpToHandlerEntry() {
+ // Compute the handler entry address and jump to it. The handler table is
+ // a fixed array of (smi-tagged) code offsets.
+ // eax = exception, edi = code object, edx = state.
+ mov(ebx, FieldOperand(edi, Code::kHandlerTableOffset));
+ shr(edx, StackHandler::kKindWidth);
+ mov(edx, FieldOperand(ebx, edx, times_4, FixedArray::kHeaderSize));
+ SmiUntag(edx);
+ lea(edi, FieldOperand(edi, edx, times_1, Code::kHeaderSize));
+ jmp(edi);
+}
+
+
+void MacroAssembler::Throw(Register value) {
+ // Adjust this code if not the case.
+ STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
+ STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
+
+ // The exception is expected in eax.
+ if (!value.is(eax)) {
+ mov(eax, value);
+ }
+ // Drop the stack pointer to the top of the top handler.
+ ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
+ mov(esp, Operand::StaticVariable(handler_address));
+ // Restore the next handler.
+ pop(Operand::StaticVariable(handler_address));
+
+ // Remove the code object and state, compute the handler address in edi.
+ pop(edi); // Code object.
+ pop(edx); // Index and state.
+
+ // Restore the context and frame pointer.
+ pop(esi); // Context.
+ pop(ebp); // Frame pointer.
+
+ // If the handler is a JS frame, restore the context to the frame.
+ // (kind == ENTRY) == (ebp == 0) == (esi == 0), so we could test either
+ // ebp or esi.
+ Label skip;
+ test(esi, esi);
+ j(zero, &skip, Label::kNear);
+ mov(Operand(ebp, StandardFrameConstants::kContextOffset), esi);
+ bind(&skip);
+
+ JumpToHandlerEntry();
+}
+
+
+void MacroAssembler::ThrowUncatchable(Register value) {
+ // Adjust this code if not the case.
+ STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
+ STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
+
+ // The exception is expected in eax.
+ if (!value.is(eax)) {
+ mov(eax, value);
+ }
+ // Drop the stack pointer to the top of the top stack handler.
+ ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
+ mov(esp, Operand::StaticVariable(handler_address));
+
+ // Unwind the handlers until the top ENTRY handler is found.
+ Label fetch_next, check_kind;
+ jmp(&check_kind, Label::kNear);
+ bind(&fetch_next);
+ mov(esp, Operand(esp, StackHandlerConstants::kNextOffset));
+
+ bind(&check_kind);
+ STATIC_ASSERT(StackHandler::JS_ENTRY == 0);
+ test(Operand(esp, StackHandlerConstants::kStateOffset),
+ Immediate(StackHandler::KindField::kMask));
+ j(not_zero, &fetch_next);
+
+ // Set the top handler address to next handler past the top ENTRY handler.
+ pop(Operand::StaticVariable(handler_address));
+
+ // Remove the code object and state, compute the handler address in edi.
+ pop(edi); // Code object.
+ pop(edx); // Index and state.
+
+ // Clear the context pointer and frame pointer (0 was saved in the handler).
+ pop(esi);
+ pop(ebp);
+
+ JumpToHandlerEntry();
+}
+
+
+void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
+ Register scratch1,
+ Register scratch2,
+ Label* miss) {
+ Label same_contexts;
+
+ ASSERT(!holder_reg.is(scratch1));
+ ASSERT(!holder_reg.is(scratch2));
+ ASSERT(!scratch1.is(scratch2));
+
+ // Load current lexical context from the stack frame.
+ mov(scratch1, Operand(ebp, StandardFrameConstants::kContextOffset));
+
+ // When generating debug code, make sure the lexical context is set.
+ if (emit_debug_code()) {
+ cmp(scratch1, Immediate(0));
+ Check(not_equal, kWeShouldNotHaveAnEmptyLexicalContext);
+ }
+ // Load the native context of the current context.
+ int offset =
+ Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
+ mov(scratch1, FieldOperand(scratch1, offset));
+ mov(scratch1, FieldOperand(scratch1, GlobalObject::kNativeContextOffset));
+
+ // Check the context is a native context.
+ if (emit_debug_code()) {
+ // Read the first word and compare to native_context_map.
+ cmp(FieldOperand(scratch1, HeapObject::kMapOffset),
+ isolate()->factory()->native_context_map());
+ Check(equal, kJSGlobalObjectNativeContextShouldBeANativeContext);
+ }
+
+ // Check if both contexts are the same.
+ cmp(scratch1, FieldOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
+ j(equal, &same_contexts);
+
+ // Compare security tokens, save holder_reg on the stack so we can use it
+ // as a temporary register.
+ //
+ // Check that the security token in the calling global object is
+ // compatible with the security token in the receiving global
+ // object.
+ mov(scratch2,
+ FieldOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
+
+ // Check the context is a native context.
+ if (emit_debug_code()) {
+ cmp(scratch2, isolate()->factory()->null_value());
+ Check(not_equal, kJSGlobalProxyContextShouldNotBeNull);
+
+ // Read the first word and compare to native_context_map(),
+ cmp(FieldOperand(scratch2, HeapObject::kMapOffset),
+ isolate()->factory()->native_context_map());
+ Check(equal, kJSGlobalObjectNativeContextShouldBeANativeContext);
+ }
+
+ int token_offset = Context::kHeaderSize +
+ Context::SECURITY_TOKEN_INDEX * kPointerSize;
+ mov(scratch1, FieldOperand(scratch1, token_offset));
+ cmp(scratch1, FieldOperand(scratch2, token_offset));
+ j(not_equal, miss);
+
+ bind(&same_contexts);
+}
+
+
+// Compute the hash code from the untagged key. This must be kept in sync with
+// ComputeIntegerHash in utils.h and KeyedLoadGenericElementStub in
+// code-stub-hydrogen.cc
+//
+// Note: r0 will contain hash code
+void MacroAssembler::GetNumberHash(Register r0, Register scratch) {
+ // Xor original key with a seed.
+ if (serializer_enabled()) {
+ ExternalReference roots_array_start =
+ ExternalReference::roots_array_start(isolate());
+ mov(scratch, Immediate(Heap::kHashSeedRootIndex));
+ mov(scratch,
+ Operand::StaticArray(scratch, times_pointer_size, roots_array_start));
+ SmiUntag(scratch);
+ xor_(r0, scratch);
+ } else {
+ int32_t seed = isolate()->heap()->HashSeed();
+ xor_(r0, Immediate(seed));
+ }
+
+ // hash = ~hash + (hash << 15);
+ mov(scratch, r0);
+ not_(r0);
+ shl(scratch, 15);
+ add(r0, scratch);
+ // hash = hash ^ (hash >> 12);
+ mov(scratch, r0);
+ shr(scratch, 12);
+ xor_(r0, scratch);
+ // hash = hash + (hash << 2);
+ lea(r0, Operand(r0, r0, times_4, 0));
+ // hash = hash ^ (hash >> 4);
+ mov(scratch, r0);
+ shr(scratch, 4);
+ xor_(r0, scratch);
+ // hash = hash * 2057;
+ imul(r0, r0, 2057);
+ // hash = hash ^ (hash >> 16);
+ mov(scratch, r0);
+ shr(scratch, 16);
+ xor_(r0, scratch);
+}
+
+
+
+void MacroAssembler::LoadFromNumberDictionary(Label* miss,
+ Register elements,
+ Register key,
+ Register r0,
+ Register r1,
+ Register r2,
+ Register result) {
+ // Register use:
+ //
+ // elements - holds the slow-case elements of the receiver and is unchanged.
+ //
+ // key - holds the smi key on entry and is unchanged.
+ //
+ // Scratch registers:
+ //
+ // r0 - holds the untagged key on entry and holds the hash once computed.
+ //
+ // r1 - used to hold the capacity mask of the dictionary
+ //
+ // r2 - used for the index into the dictionary.
+ //
+ // result - holds the result on exit if the load succeeds and we fall through.
+
+ Label done;
+
+ GetNumberHash(r0, r1);
+
+ // Compute capacity mask.
+ mov(r1, FieldOperand(elements, SeededNumberDictionary::kCapacityOffset));
+ shr(r1, kSmiTagSize); // convert smi to int
+ dec(r1);
+
+ // Generate an unrolled loop that performs a few probes before giving up.
+ for (int i = 0; i < kNumberDictionaryProbes; i++) {
+ // Use r2 for index calculations and keep the hash intact in r0.
+ mov(r2, r0);
+ // Compute the masked index: (hash + i + i * i) & mask.
+ if (i > 0) {
+ add(r2, Immediate(SeededNumberDictionary::GetProbeOffset(i)));
+ }
+ and_(r2, r1);
+
+ // Scale the index by multiplying by the entry size.
+ ASSERT(SeededNumberDictionary::kEntrySize == 3);
+ lea(r2, Operand(r2, r2, times_2, 0)); // r2 = r2 * 3
+
+ // Check if the key matches.
+ cmp(key, FieldOperand(elements,
+ r2,
+ times_pointer_size,
+ SeededNumberDictionary::kElementsStartOffset));
+ if (i != (kNumberDictionaryProbes - 1)) {
+ j(equal, &done);
+ } else {
+ j(not_equal, miss);
+ }
+ }
+
+ bind(&done);
+ // Check that the value is a normal propety.
+ const int kDetailsOffset =
+ SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
+ ASSERT_EQ(NORMAL, 0);
+ test(FieldOperand(elements, r2, times_pointer_size, kDetailsOffset),
+ Immediate(PropertyDetails::TypeField::kMask << kSmiTagSize));
+ j(not_zero, miss);
+
+ // Get the value at the masked, scaled index.
+ const int kValueOffset =
+ SeededNumberDictionary::kElementsStartOffset + kPointerSize;
+ mov(result, FieldOperand(elements, r2, times_pointer_size, kValueOffset));
+}
+
+
+void MacroAssembler::LoadAllocationTopHelper(Register result,
+ Register scratch,
+ AllocationFlags flags) {
+ ExternalReference allocation_top =
+ AllocationUtils::GetAllocationTopReference(isolate(), flags);
+
+ // Just return if allocation top is already known.
+ if ((flags & RESULT_CONTAINS_TOP) != 0) {
+ // No use of scratch if allocation top is provided.
+ ASSERT(scratch.is(no_reg));
+#ifdef DEBUG
+ // Assert that result actually contains top on entry.
+ cmp(result, Operand::StaticVariable(allocation_top));
+ Check(equal, kUnexpectedAllocationTop);
+#endif
+ return;
+ }
+
+ // Move address of new object to result. Use scratch register if available.
+ if (scratch.is(no_reg)) {
+ mov(result, Operand::StaticVariable(allocation_top));
+ } else {
+ mov(scratch, Immediate(allocation_top));
+ mov(result, Operand(scratch, 0));
+ }
+}
+
+
+void MacroAssembler::UpdateAllocationTopHelper(Register result_end,
+ Register scratch,
+ AllocationFlags flags) {
+ if (emit_debug_code()) {
+ test(result_end, Immediate(kObjectAlignmentMask));
+ Check(zero, kUnalignedAllocationInNewSpace);
+ }
+
+ ExternalReference allocation_top =
+ AllocationUtils::GetAllocationTopReference(isolate(), flags);
+
+ // Update new top. Use scratch if available.
+ if (scratch.is(no_reg)) {
+ mov(Operand::StaticVariable(allocation_top), result_end);
+ } else {
+ mov(Operand(scratch, 0), result_end);
+ }
+}
+
+
+void MacroAssembler::Allocate(int object_size,
+ Register result,
+ Register result_end,
+ Register scratch,
+ Label* gc_required,
+ AllocationFlags flags) {
+ ASSERT((flags & (RESULT_CONTAINS_TOP | SIZE_IN_WORDS)) == 0);
+ ASSERT(object_size <= Page::kMaxRegularHeapObjectSize);
+ if (!FLAG_inline_new) {
+ if (emit_debug_code()) {
+ // Trash the registers to simulate an allocation failure.
+ mov(result, Immediate(0x7091));
+ if (result_end.is_valid()) {
+ mov(result_end, Immediate(0x7191));
+ }
+ if (scratch.is_valid()) {
+ mov(scratch, Immediate(0x7291));
+ }
+ }
+ jmp(gc_required);
+ return;
+ }
+ ASSERT(!result.is(result_end));
+
+ // Load address of new object into result.
+ LoadAllocationTopHelper(result, scratch, flags);
+
+ ExternalReference allocation_limit =
+ AllocationUtils::GetAllocationLimitReference(isolate(), flags);
+
+ // Align the next allocation. Storing the filler map without checking top is
+ // safe in new-space because the limit of the heap is aligned there.
+ if ((flags & DOUBLE_ALIGNMENT) != 0) {
+ ASSERT((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
+ ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
+ Label aligned;
+ test(result, Immediate(kDoubleAlignmentMask));
+ j(zero, &aligned, Label::kNear);
+ if ((flags & PRETENURE_OLD_DATA_SPACE) != 0) {
+ cmp(result, Operand::StaticVariable(allocation_limit));
+ j(above_equal, gc_required);
+ }
+ mov(Operand(result, 0),
+ Immediate(isolate()->factory()->one_pointer_filler_map()));
+ add(result, Immediate(kDoubleSize / 2));
+ bind(&aligned);
+ }
+
+ // Calculate new top and bail out if space is exhausted.
+ Register top_reg = result_end.is_valid() ? result_end : result;
+ if (!top_reg.is(result)) {
+ mov(top_reg, result);
+ }
+ add(top_reg, Immediate(object_size));
+ j(carry, gc_required);
+ cmp(top_reg, Operand::StaticVariable(allocation_limit));
+ j(above, gc_required);
+
+ // Update allocation top.
+ UpdateAllocationTopHelper(top_reg, scratch, flags);
+
+ // Tag result if requested.
+ bool tag_result = (flags & TAG_OBJECT) != 0;
+ if (top_reg.is(result)) {
+ if (tag_result) {
+ sub(result, Immediate(object_size - kHeapObjectTag));
+ } else {
+ sub(result, Immediate(object_size));
+ }
+ } else if (tag_result) {
+ ASSERT(kHeapObjectTag == 1);
+ inc(result);
+ }
+}
+
+
+void MacroAssembler::Allocate(int header_size,
+ ScaleFactor element_size,
+ Register element_count,
+ RegisterValueType element_count_type,
+ Register result,
+ Register result_end,
+ Register scratch,
+ Label* gc_required,
+ AllocationFlags flags) {
+ ASSERT((flags & SIZE_IN_WORDS) == 0);
+ if (!FLAG_inline_new) {
+ if (emit_debug_code()) {
+ // Trash the registers to simulate an allocation failure.
+ mov(result, Immediate(0x7091));
+ mov(result_end, Immediate(0x7191));
+ if (scratch.is_valid()) {
+ mov(scratch, Immediate(0x7291));
+ }
+ // Register element_count is not modified by the function.
+ }
+ jmp(gc_required);
+ return;
+ }
+ ASSERT(!result.is(result_end));
+
+ // Load address of new object into result.
+ LoadAllocationTopHelper(result, scratch, flags);
+
+ ExternalReference allocation_limit =
+ AllocationUtils::GetAllocationLimitReference(isolate(), flags);
+
+ // Align the next allocation. Storing the filler map without checking top is
+ // safe in new-space because the limit of the heap is aligned there.
+ if ((flags & DOUBLE_ALIGNMENT) != 0) {
+ ASSERT((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
+ ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
+ Label aligned;
+ test(result, Immediate(kDoubleAlignmentMask));
+ j(zero, &aligned, Label::kNear);
+ if ((flags & PRETENURE_OLD_DATA_SPACE) != 0) {
+ cmp(result, Operand::StaticVariable(allocation_limit));
+ j(above_equal, gc_required);
+ }
+ mov(Operand(result, 0),
+ Immediate(isolate()->factory()->one_pointer_filler_map()));
+ add(result, Immediate(kDoubleSize / 2));
+ bind(&aligned);
+ }
+
+ // Calculate new top and bail out if space is exhausted.
+ // We assume that element_count*element_size + header_size does not
+ // overflow.
+ if (element_count_type == REGISTER_VALUE_IS_SMI) {
+ STATIC_ASSERT(static_cast<ScaleFactor>(times_2 - 1) == times_1);
+ STATIC_ASSERT(static_cast<ScaleFactor>(times_4 - 1) == times_2);
+ STATIC_ASSERT(static_cast<ScaleFactor>(times_8 - 1) == times_4);
+ ASSERT(element_size >= times_2);
+ ASSERT(kSmiTagSize == 1);
+ element_size = static_cast<ScaleFactor>(element_size - 1);
+ } else {
+ ASSERT(element_count_type == REGISTER_VALUE_IS_INT32);
+ }
+ lea(result_end, Operand(element_count, element_size, header_size));
+ add(result_end, result);
+ j(carry, gc_required);
+ cmp(result_end, Operand::StaticVariable(allocation_limit));
+ j(above, gc_required);
+
+ if ((flags & TAG_OBJECT) != 0) {
+ ASSERT(kHeapObjectTag == 1);
+ inc(result);
+ }
+
+ // Update allocation top.
+ UpdateAllocationTopHelper(result_end, scratch, flags);
+}
+
+
+void MacroAssembler::Allocate(Register object_size,
+ Register result,
+ Register result_end,
+ Register scratch,
+ Label* gc_required,
+ AllocationFlags flags) {
+ ASSERT((flags & (RESULT_CONTAINS_TOP | SIZE_IN_WORDS)) == 0);
+ if (!FLAG_inline_new) {
+ if (emit_debug_code()) {
+ // Trash the registers to simulate an allocation failure.
+ mov(result, Immediate(0x7091));
+ mov(result_end, Immediate(0x7191));
+ if (scratch.is_valid()) {
+ mov(scratch, Immediate(0x7291));
+ }
+ // object_size is left unchanged by this function.
+ }
+ jmp(gc_required);
+ return;
+ }
+ ASSERT(!result.is(result_end));
+
+ // Load address of new object into result.
+ LoadAllocationTopHelper(result, scratch, flags);
+
+ ExternalReference allocation_limit =
+ AllocationUtils::GetAllocationLimitReference(isolate(), flags);
+
+ // Align the next allocation. Storing the filler map without checking top is
+ // safe in new-space because the limit of the heap is aligned there.
+ if ((flags & DOUBLE_ALIGNMENT) != 0) {
+ ASSERT((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
+ ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
+ Label aligned;
+ test(result, Immediate(kDoubleAlignmentMask));
+ j(zero, &aligned, Label::kNear);
+ if ((flags & PRETENURE_OLD_DATA_SPACE) != 0) {
+ cmp(result, Operand::StaticVariable(allocation_limit));
+ j(above_equal, gc_required);
+ }
+ mov(Operand(result, 0),
+ Immediate(isolate()->factory()->one_pointer_filler_map()));
+ add(result, Immediate(kDoubleSize / 2));
+ bind(&aligned);
+ }
+
+ // Calculate new top and bail out if space is exhausted.
+ if (!object_size.is(result_end)) {
+ mov(result_end, object_size);
+ }
+ add(result_end, result);
+ j(carry, gc_required);
+ cmp(result_end, Operand::StaticVariable(allocation_limit));
+ j(above, gc_required);
+
+ // Tag result if requested.
+ if ((flags & TAG_OBJECT) != 0) {
+ ASSERT(kHeapObjectTag == 1);
+ inc(result);
+ }
+
+ // Update allocation top.
+ UpdateAllocationTopHelper(result_end, scratch, flags);
+}
+
+
+void MacroAssembler::UndoAllocationInNewSpace(Register object) {
+ ExternalReference new_space_allocation_top =
+ ExternalReference::new_space_allocation_top_address(isolate());
+
+ // Make sure the object has no tag before resetting top.
+ and_(object, Immediate(~kHeapObjectTagMask));
+#ifdef DEBUG
+ cmp(object, Operand::StaticVariable(new_space_allocation_top));
+ Check(below, kUndoAllocationOfNonAllocatedMemory);
+#endif
+ mov(Operand::StaticVariable(new_space_allocation_top), object);
+}
+
+
+void MacroAssembler::AllocateHeapNumber(Register result,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required) {
+ // Allocate heap number in new space.
+ Allocate(HeapNumber::kSize, result, scratch1, scratch2, gc_required,
+ TAG_OBJECT);
+
+ // Set the map.
+ mov(FieldOperand(result, HeapObject::kMapOffset),
+ Immediate(isolate()->factory()->heap_number_map()));
+}
+
+
+void MacroAssembler::AllocateTwoByteString(Register result,
+ Register length,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* gc_required) {
+ // Calculate the number of bytes needed for the characters in the string while
+ // observing object alignment.
+ ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
+ ASSERT(kShortSize == 2);
+ // scratch1 = length * 2 + kObjectAlignmentMask.
+ lea(scratch1, Operand(length, length, times_1, kObjectAlignmentMask));
+ and_(scratch1, Immediate(~kObjectAlignmentMask));
+
+ // Allocate two byte string in new space.
+ Allocate(SeqTwoByteString::kHeaderSize,
+ times_1,
+ scratch1,
+ REGISTER_VALUE_IS_INT32,
+ result,
+ scratch2,
+ scratch3,
+ gc_required,
+ TAG_OBJECT);
+
+ // Set the map, length and hash field.
+ mov(FieldOperand(result, HeapObject::kMapOffset),
+ Immediate(isolate()->factory()->string_map()));
+ mov(scratch1, length);
+ SmiTag(scratch1);
+ mov(FieldOperand(result, String::kLengthOffset), scratch1);
+ mov(FieldOperand(result, String::kHashFieldOffset),
+ Immediate(String::kEmptyHashField));
+}
+
+
+void MacroAssembler::AllocateAsciiString(Register result,
+ Register length,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* gc_required) {
+ // Calculate the number of bytes needed for the characters in the string while
+ // observing object alignment.
+ ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
+ mov(scratch1, length);
+ ASSERT(kCharSize == 1);
+ add(scratch1, Immediate(kObjectAlignmentMask));
+ and_(scratch1, Immediate(~kObjectAlignmentMask));
+
+ // Allocate ASCII string in new space.
+ Allocate(SeqOneByteString::kHeaderSize,
+ times_1,
+ scratch1,
+ REGISTER_VALUE_IS_INT32,
+ result,
+ scratch2,
+ scratch3,
+ gc_required,
+ TAG_OBJECT);
+
+ // Set the map, length and hash field.
+ mov(FieldOperand(result, HeapObject::kMapOffset),
+ Immediate(isolate()->factory()->ascii_string_map()));
+ mov(scratch1, length);
+ SmiTag(scratch1);
+ mov(FieldOperand(result, String::kLengthOffset), scratch1);
+ mov(FieldOperand(result, String::kHashFieldOffset),
+ Immediate(String::kEmptyHashField));
+}
+
+
+void MacroAssembler::AllocateAsciiString(Register result,
+ int length,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required) {
+ ASSERT(length > 0);
+
+ // Allocate ASCII string in new space.
+ Allocate(SeqOneByteString::SizeFor(length), result, scratch1, scratch2,
+ gc_required, TAG_OBJECT);
+
+ // Set the map, length and hash field.
+ mov(FieldOperand(result, HeapObject::kMapOffset),
+ Immediate(isolate()->factory()->ascii_string_map()));
+ mov(FieldOperand(result, String::kLengthOffset),
+ Immediate(Smi::FromInt(length)));
+ mov(FieldOperand(result, String::kHashFieldOffset),
+ Immediate(String::kEmptyHashField));
+}
+
+
+void MacroAssembler::AllocateTwoByteConsString(Register result,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required) {
+ // Allocate heap number in new space.
+ Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
+ TAG_OBJECT);
+
+ // Set the map. The other fields are left uninitialized.
+ mov(FieldOperand(result, HeapObject::kMapOffset),
+ Immediate(isolate()->factory()->cons_string_map()));
+}
+
+
+void MacroAssembler::AllocateAsciiConsString(Register result,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required) {
+ Label allocate_new_space, install_map;
+ AllocationFlags flags = TAG_OBJECT;
+
+ ExternalReference high_promotion_mode = ExternalReference::
+ new_space_high_promotion_mode_active_address(isolate());
+
+ test(Operand::StaticVariable(high_promotion_mode), Immediate(1));
+ j(zero, &allocate_new_space);
+
+ Allocate(ConsString::kSize,
+ result,
+ scratch1,
+ scratch2,
+ gc_required,
+ static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE));
+ jmp(&install_map);
+
+ bind(&allocate_new_space);
+ Allocate(ConsString::kSize,
+ result,
+ scratch1,
+ scratch2,
+ gc_required,
+ flags);
+
+ bind(&install_map);
+ // Set the map. The other fields are left uninitialized.
+ mov(FieldOperand(result, HeapObject::kMapOffset),
+ Immediate(isolate()->factory()->cons_ascii_string_map()));
+}
+
+
+void MacroAssembler::AllocateTwoByteSlicedString(Register result,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required) {
+ // Allocate heap number in new space.
+ Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
+ TAG_OBJECT);
+
+ // Set the map. The other fields are left uninitialized.
+ mov(FieldOperand(result, HeapObject::kMapOffset),
+ Immediate(isolate()->factory()->sliced_string_map()));
+}
+
+
+void MacroAssembler::AllocateAsciiSlicedString(Register result,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required) {
+ // Allocate heap number in new space.
+ Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
+ TAG_OBJECT);
+
+ // Set the map. The other fields are left uninitialized.
+ mov(FieldOperand(result, HeapObject::kMapOffset),
+ Immediate(isolate()->factory()->sliced_ascii_string_map()));
+}
+
+
+// Copy memory, byte-by-byte, from source to destination. Not optimized for
+// long or aligned copies. The contents of scratch and length are destroyed.
+// Source and destination are incremented by length.
+// Many variants of movsb, loop unrolling, word moves, and indexed operands
+// have been tried here already, and this is fastest.
+// A simpler loop is faster on small copies, but 30% slower on large ones.
+// The cld() instruction must have been emitted, to set the direction flag(),
+// before calling this function.
+void MacroAssembler::CopyBytes(Register source,
+ Register destination,
+ Register length,
+ Register scratch) {
+ Label short_loop, len4, len8, len12, done, short_string;
+ ASSERT(source.is(esi));
+ ASSERT(destination.is(edi));
+ ASSERT(length.is(ecx));
+ cmp(length, Immediate(4));
+ j(below, &short_string, Label::kNear);
+
+ // Because source is 4-byte aligned in our uses of this function,
+ // we keep source aligned for the rep_movs call by copying the odd bytes
+ // at the end of the ranges.
+ mov(scratch, Operand(source, length, times_1, -4));
+ mov(Operand(destination, length, times_1, -4), scratch);
+
+ cmp(length, Immediate(8));
+ j(below_equal, &len4, Label::kNear);
+ cmp(length, Immediate(12));
+ j(below_equal, &len8, Label::kNear);
+ cmp(length, Immediate(16));
+ j(below_equal, &len12, Label::kNear);
+
+ mov(scratch, ecx);
+ shr(ecx, 2);
+ rep_movs();
+ and_(scratch, Immediate(0x3));
+ add(destination, scratch);
+ jmp(&done, Label::kNear);
+
+ bind(&len12);
+ mov(scratch, Operand(source, 8));
+ mov(Operand(destination, 8), scratch);
+ bind(&len8);
+ mov(scratch, Operand(source, 4));
+ mov(Operand(destination, 4), scratch);
+ bind(&len4);
+ mov(scratch, Operand(source, 0));
+ mov(Operand(destination, 0), scratch);
+ add(destination, length);
+ jmp(&done, Label::kNear);
+
+ bind(&short_string);
+ test(length, length);
+ j(zero, &done, Label::kNear);
+
+ bind(&short_loop);
+ mov_b(scratch, Operand(source, 0));
+ mov_b(Operand(destination, 0), scratch);
+ inc(source);
+ inc(destination);
+ dec(length);
+ j(not_zero, &short_loop);
+
+ bind(&done);
+}
+
+
+void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
+ Register end_offset,
+ Register filler) {
+ Label loop, entry;
+ jmp(&entry);
+ bind(&loop);
+ mov(Operand(start_offset, 0), filler);
+ add(start_offset, Immediate(kPointerSize));
+ bind(&entry);
+ cmp(start_offset, end_offset);
+ j(less, &loop);
+}
+
+
+void MacroAssembler::BooleanBitTest(Register object,
+ int field_offset,
+ int bit_index) {
+ bit_index += kSmiTagSize + kSmiShiftSize;
+ ASSERT(IsPowerOf2(kBitsPerByte));
+ int byte_index = bit_index / kBitsPerByte;
+ int byte_bit_index = bit_index & (kBitsPerByte - 1);
+ test_b(FieldOperand(object, field_offset + byte_index),
+ static_cast<byte>(1 << byte_bit_index));
+}
+
+
+
+void MacroAssembler::NegativeZeroTest(Register result,
+ Register op,
+ Label* then_label) {
+ Label ok;
+ test(result, result);
+ j(not_zero, &ok);
+ test(op, op);
+ j(sign, then_label);
+ bind(&ok);
+}
+
+
+void MacroAssembler::NegativeZeroTest(Register result,
+ Register op1,
+ Register op2,
+ Register scratch,
+ Label* then_label) {
+ Label ok;
+ test(result, result);
+ j(not_zero, &ok);
+ mov(scratch, op1);
+ or_(scratch, op2);
+ j(sign, then_label);
+ bind(&ok);
+}
+
+
+void MacroAssembler::TryGetFunctionPrototype(Register function,
+ Register result,
+ Register scratch,
+ Label* miss,
+ bool miss_on_bound_function) {
+ // Check that the receiver isn't a smi.
+ JumpIfSmi(function, miss);
+
+ // Check that the function really is a function.
+ CmpObjectType(function, JS_FUNCTION_TYPE, result);
+ j(not_equal, miss);
+
+ if (miss_on_bound_function) {
+ // If a bound function, go to miss label.
+ mov(scratch,
+ FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
+ BooleanBitTest(scratch, SharedFunctionInfo::kCompilerHintsOffset,
+ SharedFunctionInfo::kBoundFunction);
+ j(not_zero, miss);
+ }
+
+ // Make sure that the function has an instance prototype.
+ Label non_instance;
+ movzx_b(scratch, FieldOperand(result, Map::kBitFieldOffset));
+ test(scratch, Immediate(1 << Map::kHasNonInstancePrototype));
+ j(not_zero, &non_instance);
+
+ // Get the prototype or initial map from the function.
+ mov(result,
+ FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
+
+ // If the prototype or initial map is the hole, don't return it and
+ // simply miss the cache instead. This will allow us to allocate a
+ // prototype object on-demand in the runtime system.
+ cmp(result, Immediate(isolate()->factory()->the_hole_value()));
+ j(equal, miss);
+
+ // If the function does not have an initial map, we're done.
+ Label done;
+ CmpObjectType(result, MAP_TYPE, scratch);
+ j(not_equal, &done);
+
+ // Get the prototype from the initial map.
+ mov(result, FieldOperand(result, Map::kPrototypeOffset));
+ jmp(&done);
+
+ // Non-instance prototype: Fetch prototype from constructor field
+ // in initial map.
+ bind(&non_instance);
+ mov(result, FieldOperand(result, Map::kConstructorOffset));
+
+ // All done.
+ bind(&done);
+}
+
+
+void MacroAssembler::CallStub(CodeStub* stub, TypeFeedbackId ast_id) {
+ ASSERT(AllowThisStubCall(stub)); // Calls are not allowed in some stubs.
+ call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id);
+}
+
+
+void MacroAssembler::TailCallStub(CodeStub* stub) {
+ jmp(stub->GetCode(), RelocInfo::CODE_TARGET);
+}
+
+
+void MacroAssembler::StubReturn(int argc) {
+ ASSERT(argc >= 1 && generating_stub());
+ ret((argc - 1) * kPointerSize);
+}
+
+
+bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
+ return has_frame_ || !stub->SometimesSetsUpAFrame();
+}
+
+
+void MacroAssembler::IndexFromHash(Register hash, Register index) {
+ // The assert checks that the constants for the maximum number of digits
+ // for an array index cached in the hash field and the number of bits
+ // reserved for it does not conflict.
+ ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
+ (1 << String::kArrayIndexValueBits));
+ // We want the smi-tagged index in key. kArrayIndexValueMask has zeros in
+ // the low kHashShift bits.
+ and_(hash, String::kArrayIndexValueMask);
+ STATIC_ASSERT(String::kHashShift >= kSmiTagSize && kSmiTag == 0);
+ if (String::kHashShift > kSmiTagSize) {
+ shr(hash, String::kHashShift - kSmiTagSize);
+ }
+ if (!index.is(hash)) {
+ mov(index, hash);
+ }
+}
+
+
+void MacroAssembler::CallRuntime(const Runtime::Function* f,
+ int num_arguments) {
+ // If the expected number of arguments of the runtime function is
+ // constant, we check that the actual number of arguments match the
+ // expectation.
+ CHECK(f->nargs < 0 || f->nargs == num_arguments);
+
+ // TODO(1236192): Most runtime routines don't need the number of
+ // arguments passed in because it is constant. At some point we
+ // should remove this need and make the runtime routine entry code
+ // smarter.
+ Move(eax, Immediate(num_arguments));
+ mov(ebx, Immediate(ExternalReference(f, isolate())));
+ CEntryStub ces(isolate(), 1);
+ CallStub(&ces);
+}
+
+
+void MacroAssembler::CallExternalReference(ExternalReference ref,
+ int num_arguments) {
+ mov(eax, Immediate(num_arguments));
+ mov(ebx, Immediate(ref));
+
+ CEntryStub stub(isolate(), 1);
+ CallStub(&stub);
+}
+
+
+void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
+ int num_arguments,
+ int result_size) {
+ // TODO(1236192): Most runtime routines don't need the number of
+ // arguments passed in because it is constant. At some point we
+ // should remove this need and make the runtime routine entry code
+ // smarter.
+ Move(eax, Immediate(num_arguments));
+ JumpToExternalReference(ext);
+}
+
+
+void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
+ int num_arguments,
+ int result_size) {
+ TailCallExternalReference(ExternalReference(fid, isolate()),
+ num_arguments,
+ result_size);
+}
+
+
+Operand ApiParameterOperand(int index) {
+ return Operand(esp, index * kPointerSize);
+}
+
+
+void MacroAssembler::PrepareCallApiFunction(int argc) {
+ EnterApiExitFrame(argc);
+ if (emit_debug_code()) {
+ mov(esi, Immediate(BitCast<int32_t>(kZapValue)));
+ }
+}
+
+
+void MacroAssembler::CallApiFunctionAndReturn(
+ Register function_address,
+ ExternalReference thunk_ref,
+ Operand thunk_last_arg,
+ int stack_space,
+ Operand return_value_operand,
+ Operand* context_restore_operand) {
+ ExternalReference next_address =
+ ExternalReference::handle_scope_next_address(isolate());
+ ExternalReference limit_address =
+ ExternalReference::handle_scope_limit_address(isolate());
+ ExternalReference level_address =
+ ExternalReference::handle_scope_level_address(isolate());
+
+ ASSERT(edx.is(function_address));
+ // Allocate HandleScope in callee-save registers.
+ mov(ebx, Operand::StaticVariable(next_address));
+ mov(edi, Operand::StaticVariable(limit_address));
+ add(Operand::StaticVariable(level_address), Immediate(1));
+
+ if (FLAG_log_timer_events) {
+ FrameScope frame(this, StackFrame::MANUAL);
+ PushSafepointRegisters();
+ PrepareCallCFunction(1, eax);
+ mov(Operand(esp, 0),
+ Immediate(ExternalReference::isolate_address(isolate())));
+ CallCFunction(ExternalReference::log_enter_external_function(isolate()), 1);
+ PopSafepointRegisters();
+ }
+
+
+ Label profiler_disabled;
+ Label end_profiler_check;
+ mov(eax, Immediate(ExternalReference::is_profiling_address(isolate())));
+ cmpb(Operand(eax, 0), 0);
+ j(zero, &profiler_disabled);
+
+ // Additional parameter is the address of the actual getter function.
+ mov(thunk_last_arg, function_address);
+ // Call the api function.
+ mov(eax, Immediate(thunk_ref));
+ call(eax);
+ jmp(&end_profiler_check);
+
+ bind(&profiler_disabled);
+ // Call the api function.
+ call(function_address);
+ bind(&end_profiler_check);
+
+ if (FLAG_log_timer_events) {
+ FrameScope frame(this, StackFrame::MANUAL);
+ PushSafepointRegisters();
+ PrepareCallCFunction(1, eax);
+ mov(Operand(esp, 0),
+ Immediate(ExternalReference::isolate_address(isolate())));
+ CallCFunction(ExternalReference::log_leave_external_function(isolate()), 1);
+ PopSafepointRegisters();
+ }
+
+ Label prologue;
+ // Load the value from ReturnValue
+ mov(eax, return_value_operand);
+
+ Label promote_scheduled_exception;
+ Label exception_handled;
+ Label delete_allocated_handles;
+ Label leave_exit_frame;
+
+ bind(&prologue);
+ // No more valid handles (the result handle was the last one). Restore
+ // previous handle scope.
+ mov(Operand::StaticVariable(next_address), ebx);
+ sub(Operand::StaticVariable(level_address), Immediate(1));
+ Assert(above_equal, kInvalidHandleScopeLevel);
+ cmp(edi, Operand::StaticVariable(limit_address));
+ j(not_equal, &delete_allocated_handles);
+ bind(&leave_exit_frame);
+
+ // Check if the function scheduled an exception.
+ ExternalReference scheduled_exception_address =
+ ExternalReference::scheduled_exception_address(isolate());
+ cmp(Operand::StaticVariable(scheduled_exception_address),
+ Immediate(isolate()->factory()->the_hole_value()));
+ j(not_equal, &promote_scheduled_exception);
+ bind(&exception_handled);
+
+#if ENABLE_EXTRA_CHECKS
+ // Check if the function returned a valid JavaScript value.
+ Label ok;
+ Register return_value = eax;
+ Register map = ecx;
+
+ JumpIfSmi(return_value, &ok, Label::kNear);
+ mov(map, FieldOperand(return_value, HeapObject::kMapOffset));
+
+ CmpInstanceType(map, FIRST_NONSTRING_TYPE);
+ j(below, &ok, Label::kNear);
+
+ CmpInstanceType(map, FIRST_SPEC_OBJECT_TYPE);
+ j(above_equal, &ok, Label::kNear);
+
+ cmp(map, isolate()->factory()->heap_number_map());
+ j(equal, &ok, Label::kNear);
+
+ cmp(return_value, isolate()->factory()->undefined_value());
+ j(equal, &ok, Label::kNear);
+
+ cmp(return_value, isolate()->factory()->true_value());
+ j(equal, &ok, Label::kNear);
+
+ cmp(return_value, isolate()->factory()->false_value());
+ j(equal, &ok, Label::kNear);
+
+ cmp(return_value, isolate()->factory()->null_value());
+ j(equal, &ok, Label::kNear);
+
+ Abort(kAPICallReturnedInvalidObject);
+
+ bind(&ok);
+#endif
+
+ bool restore_context = context_restore_operand != NULL;
+ if (restore_context) {
+ mov(esi, *context_restore_operand);
+ }
+ LeaveApiExitFrame(!restore_context);
+ ret(stack_space * kPointerSize);
+
+ bind(&promote_scheduled_exception);
+ {
+ FrameScope frame(this, StackFrame::INTERNAL);
+ CallRuntime(Runtime::kHiddenPromoteScheduledException, 0);
+ }
+ jmp(&exception_handled);
+
+ // HandleScope limit has changed. Delete allocated extensions.
+ ExternalReference delete_extensions =
+ ExternalReference::delete_handle_scope_extensions(isolate());
+ bind(&delete_allocated_handles);
+ mov(Operand::StaticVariable(limit_address), edi);
+ mov(edi, eax);
+ mov(Operand(esp, 0),
+ Immediate(ExternalReference::isolate_address(isolate())));
+ mov(eax, Immediate(delete_extensions));
+ call(eax);
+ mov(eax, edi);
+ jmp(&leave_exit_frame);
+}
+
+
+void MacroAssembler::JumpToExternalReference(const ExternalReference& ext) {
+ // Set the entry point and jump to the C entry runtime stub.
+ mov(ebx, Immediate(ext));
+ CEntryStub ces(isolate(), 1);
+ jmp(ces.GetCode(), RelocInfo::CODE_TARGET);
+}
+
+
+void MacroAssembler::InvokePrologue(const ParameterCount& expected,
+ const ParameterCount& actual,
+ Handle<Code> code_constant,
+ const Operand& code_operand,
+ Label* done,
+ bool* definitely_mismatches,
+ InvokeFlag flag,
+ Label::Distance done_near,
+ const CallWrapper& call_wrapper) {
+ bool definitely_matches = false;
+ *definitely_mismatches = false;
+ Label invoke;
+ if (expected.is_immediate()) {
+ ASSERT(actual.is_immediate());
+ if (expected.immediate() == actual.immediate()) {
+ definitely_matches = true;
+ } else {
+ mov(eax, actual.immediate());
+ const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel;
+ if (expected.immediate() == sentinel) {
+ // Don't worry about adapting arguments for builtins that
+ // don't want that done. Skip adaption code by making it look
+ // like we have a match between expected and actual number of
+ // arguments.
+ definitely_matches = true;
+ } else {
+ *definitely_mismatches = true;
+ mov(ebx, expected.immediate());
+ }
+ }
+ } else {
+ if (actual.is_immediate()) {
+ // Expected is in register, actual is immediate. This is the
+ // case when we invoke function values without going through the
+ // IC mechanism.
+ cmp(expected.reg(), actual.immediate());
+ j(equal, &invoke);
+ ASSERT(expected.reg().is(ebx));
+ mov(eax, actual.immediate());
+ } else if (!expected.reg().is(actual.reg())) {
+ // Both expected and actual are in (different) registers. This
+ // is the case when we invoke functions using call and apply.
+ cmp(expected.reg(), actual.reg());
+ j(equal, &invoke);
+ ASSERT(actual.reg().is(eax));
+ ASSERT(expected.reg().is(ebx));
+ }
+ }
+
+ if (!definitely_matches) {
+ Handle<Code> adaptor =
+ isolate()->builtins()->ArgumentsAdaptorTrampoline();
+ if (!code_constant.is_null()) {
+ mov(edx, Immediate(code_constant));
+ add(edx, Immediate(Code::kHeaderSize - kHeapObjectTag));
+ } else if (!code_operand.is_reg(edx)) {
+ mov(edx, code_operand);
+ }
+
+ if (flag == CALL_FUNCTION) {
+ call_wrapper.BeforeCall(CallSize(adaptor, RelocInfo::CODE_TARGET));
+ call(adaptor, RelocInfo::CODE_TARGET);
+ call_wrapper.AfterCall();
+ if (!*definitely_mismatches) {
+ jmp(done, done_near);
+ }
+ } else {
+ jmp(adaptor, RelocInfo::CODE_TARGET);
+ }
+ bind(&invoke);
+ }
+}
+
+
+void MacroAssembler::InvokeCode(const Operand& code,
+ const ParameterCount& expected,
+ const ParameterCount& actual,
+ InvokeFlag flag,
+ const CallWrapper& call_wrapper) {
+ // You can't call a function without a valid frame.
+ ASSERT(flag == JUMP_FUNCTION || has_frame());
+
+ Label done;
+ bool definitely_mismatches = false;
+ InvokePrologue(expected, actual, Handle<Code>::null(), code,
+ &done, &definitely_mismatches, flag, Label::kNear,
+ call_wrapper);
+ if (!definitely_mismatches) {
+ if (flag == CALL_FUNCTION) {
+ call_wrapper.BeforeCall(CallSize(code));
+ call(code);
+ call_wrapper.AfterCall();
+ } else {
+ ASSERT(flag == JUMP_FUNCTION);
+ jmp(code);
+ }
+ bind(&done);
+ }
+}
+
+
+void MacroAssembler::InvokeFunction(Register fun,
+ const ParameterCount& actual,
+ InvokeFlag flag,
+ const CallWrapper& call_wrapper) {
+ // You can't call a function without a valid frame.
+ ASSERT(flag == JUMP_FUNCTION || has_frame());
+
+ ASSERT(fun.is(edi));
+ mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
+ mov(ebx, FieldOperand(edx, SharedFunctionInfo::kFormalParameterCountOffset));
+ SmiUntag(ebx);
+
+ ParameterCount expected(ebx);
+ InvokeCode(FieldOperand(edi, JSFunction::kCodeEntryOffset),
+ expected, actual, flag, call_wrapper);
+}
+
+
+void MacroAssembler::InvokeFunction(Register fun,
+ const ParameterCount& expected,
+ const ParameterCount& actual,
+ InvokeFlag flag,
+ const CallWrapper& call_wrapper) {
+ // You can't call a function without a valid frame.
+ ASSERT(flag == JUMP_FUNCTION || has_frame());
+
+ ASSERT(fun.is(edi));
+ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
+
+ InvokeCode(FieldOperand(edi, JSFunction::kCodeEntryOffset),
+ expected, actual, flag, call_wrapper);
+}
+
+
+void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
+ const ParameterCount& expected,
+ const ParameterCount& actual,
+ InvokeFlag flag,
+ const CallWrapper& call_wrapper) {
+ LoadHeapObject(edi, function);
+ InvokeFunction(edi, expected, actual, flag, call_wrapper);
+}
+
+
+void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
+ InvokeFlag flag,
+ const CallWrapper& call_wrapper) {
+ // You can't call a builtin without a valid frame.
+ ASSERT(flag == JUMP_FUNCTION || has_frame());
+
+ // Rely on the assertion to check that the number of provided
+ // arguments match the expected number of arguments. Fake a
+ // parameter count to avoid emitting code to do the check.
+ ParameterCount expected(0);
+ GetBuiltinFunction(edi, id);
+ InvokeCode(FieldOperand(edi, JSFunction::kCodeEntryOffset),
+ expected, expected, flag, call_wrapper);
+}
+
+
+void MacroAssembler::GetBuiltinFunction(Register target,
+ Builtins::JavaScript id) {
+ // Load the JavaScript builtin function from the builtins object.
+ mov(target, Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+ mov(target, FieldOperand(target, GlobalObject::kBuiltinsOffset));
+ mov(target, FieldOperand(target,
+ JSBuiltinsObject::OffsetOfFunctionWithId(id)));
+}
+
+
+void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
+ ASSERT(!target.is(edi));
+ // Load the JavaScript builtin function from the builtins object.
+ GetBuiltinFunction(edi, id);
+ // Load the code entry point from the function into the target register.
+ mov(target, FieldOperand(edi, JSFunction::kCodeEntryOffset));
+}
+
+
+void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
+ if (context_chain_length > 0) {
+ // Move up the chain of contexts to the context containing the slot.
+ mov(dst, Operand(esi, Context::SlotOffset(Context::PREVIOUS_INDEX)));
+ for (int i = 1; i < context_chain_length; i++) {
+ mov(dst, Operand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
+ }
+ } else {
+ // Slot is in the current function context. Move it into the
+ // destination register in case we store into it (the write barrier
+ // cannot be allowed to destroy the context in esi).
+ mov(dst, esi);
+ }
+
+ // We should not have found a with context by walking the context chain
+ // (i.e., the static scope chain and runtime context chain do not agree).
+ // A variable occurring in such a scope should have slot type LOOKUP and
+ // not CONTEXT.
+ if (emit_debug_code()) {
+ cmp(FieldOperand(dst, HeapObject::kMapOffset),
+ isolate()->factory()->with_context_map());
+ Check(not_equal, kVariableResolvedToWithContext);
+ }
+}
+
+
+void MacroAssembler::LoadTransitionedArrayMapConditional(
+ ElementsKind expected_kind,
+ ElementsKind transitioned_kind,
+ Register map_in_out,
+ Register scratch,
+ Label* no_map_match) {
+ // Load the global or builtins object from the current context.
+ mov(scratch, Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+ mov(scratch, FieldOperand(scratch, GlobalObject::kNativeContextOffset));
+
+ // Check that the function's map is the same as the expected cached map.
+ mov(scratch, Operand(scratch,
+ Context::SlotOffset(Context::JS_ARRAY_MAPS_INDEX)));
+
+ size_t offset = expected_kind * kPointerSize +
+ FixedArrayBase::kHeaderSize;
+ cmp(map_in_out, FieldOperand(scratch, offset));
+ j(not_equal, no_map_match);
+
+ // Use the transitioned cached map.
+ offset = transitioned_kind * kPointerSize +
+ FixedArrayBase::kHeaderSize;
+ mov(map_in_out, FieldOperand(scratch, offset));
+}
+
+
+void MacroAssembler::LoadGlobalFunction(int index, Register function) {
+ // Load the global or builtins object from the current context.
+ mov(function,
+ Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+ // Load the native context from the global or builtins object.
+ mov(function,
+ FieldOperand(function, GlobalObject::kNativeContextOffset));
+ // Load the function from the native context.
+ mov(function, Operand(function, Context::SlotOffset(index)));
+}
+
+
+void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
+ Register map) {
+ // Load the initial map. The global functions all have initial maps.
+ mov(map, FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
+ if (emit_debug_code()) {
+ Label ok, fail;
+ CheckMap(map, isolate()->factory()->meta_map(), &fail, DO_SMI_CHECK);
+ jmp(&ok);
+ bind(&fail);
+ Abort(kGlobalFunctionsMustHaveInitialMap);
+ bind(&ok);
+ }
+}
+
+
+// Store the value in register src in the safepoint register stack
+// slot for register dst.
+void MacroAssembler::StoreToSafepointRegisterSlot(Register dst, Register src) {
+ mov(SafepointRegisterSlot(dst), src);
+}
+
+
+void MacroAssembler::StoreToSafepointRegisterSlot(Register dst, Immediate src) {
+ mov(SafepointRegisterSlot(dst), src);
+}
+
+
+void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) {
+ mov(dst, SafepointRegisterSlot(src));
+}
+
+
+Operand MacroAssembler::SafepointRegisterSlot(Register reg) {
+ return Operand(esp, SafepointRegisterStackIndex(reg.code()) * kPointerSize);
+}
+
+
+int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
+ // The registers are pushed starting with the lowest encoding,
+ // which means that lowest encodings are furthest away from
+ // the stack pointer.
+ ASSERT(reg_code >= 0 && reg_code < kNumSafepointRegisters);
+ return kNumSafepointRegisters - reg_code - 1;
+}
+
+
+void MacroAssembler::LoadHeapObject(Register result,
+ Handle<HeapObject> object) {
+ AllowDeferredHandleDereference embedding_raw_address;
+ if (isolate()->heap()->InNewSpace(*object)) {
+ Handle<Cell> cell = isolate()->factory()->NewCell(object);
+ mov(result, Operand::ForCell(cell));
+ } else {
+ mov(result, object);
+ }
+}
+
+
+void MacroAssembler::CmpHeapObject(Register reg, Handle<HeapObject> object) {
+ AllowDeferredHandleDereference using_raw_address;
+ if (isolate()->heap()->InNewSpace(*object)) {
+ Handle<Cell> cell = isolate()->factory()->NewCell(object);
+ cmp(reg, Operand::ForCell(cell));
+ } else {
+ cmp(reg, object);
+ }
+}
+
+
+void MacroAssembler::PushHeapObject(Handle<HeapObject> object) {
+ AllowDeferredHandleDereference using_raw_address;
+ if (isolate()->heap()->InNewSpace(*object)) {
+ Handle<Cell> cell = isolate()->factory()->NewCell(object);
+ push(Operand::ForCell(cell));
+ } else {
+ Push(object);
+ }
+}
+
+
+void MacroAssembler::Ret() {
+ ret(0);
+}
+
+
+void MacroAssembler::Ret(int bytes_dropped, Register scratch) {
+ if (is_uint16(bytes_dropped)) {
+ ret(bytes_dropped);
+ } else {
+ pop(scratch);
+ add(esp, Immediate(bytes_dropped));
+ push(scratch);
+ ret(0);
+ }
+}
+
+
+void MacroAssembler::VerifyX87StackDepth(uint32_t depth) {
+ // Make sure the floating point stack is either empty or has depth items.
+ ASSERT(depth <= 7);
+ // This is very expensive.
+ ASSERT(FLAG_debug_code && FLAG_enable_slow_asserts);
+
+ // The top-of-stack (tos) is 7 if there is one item pushed.
+ int tos = (8 - depth) % 8;
+ const int kTopMask = 0x3800;
+ push(eax);
+ fwait();
+ fnstsw_ax();
+ and_(eax, kTopMask);
+ shr(eax, 11);
+ cmp(eax, Immediate(tos));
+ Check(equal, kUnexpectedFPUStackDepthAfterInstruction);
+ fnclex();
+ pop(eax);
+}
+
+
+void MacroAssembler::Drop(int stack_elements) {
+ if (stack_elements > 0) {
+ add(esp, Immediate(stack_elements * kPointerSize));
+ }
+}
+
+
+void MacroAssembler::Move(Register dst, Register src) {
+ if (!dst.is(src)) {
+ mov(dst, src);
+ }
+}
+
+
+void MacroAssembler::Move(Register dst, const Immediate& x) {
+ if (x.is_zero()) {
+ xor_(dst, dst); // Shorter than mov of 32-bit immediate 0.
+ } else {
+ mov(dst, x);
+ }
+}
+
+
+void MacroAssembler::Move(const Operand& dst, const Immediate& x) {
+ mov(dst, x);
+}
+
+
+void MacroAssembler::SetCounter(StatsCounter* counter, int value) {
+ if (FLAG_native_code_counters && counter->Enabled()) {
+ mov(Operand::StaticVariable(ExternalReference(counter)), Immediate(value));
+ }
+}
+
+
+void MacroAssembler::IncrementCounter(StatsCounter* counter, int value) {
+ ASSERT(value > 0);
+ if (FLAG_native_code_counters && counter->Enabled()) {
+ Operand operand = Operand::StaticVariable(ExternalReference(counter));
+ if (value == 1) {
+ inc(operand);
+ } else {
+ add(operand, Immediate(value));
+ }
+ }
+}
+
+
+void MacroAssembler::DecrementCounter(StatsCounter* counter, int value) {
+ ASSERT(value > 0);
+ if (FLAG_native_code_counters && counter->Enabled()) {
+ Operand operand = Operand::StaticVariable(ExternalReference(counter));
+ if (value == 1) {
+ dec(operand);
+ } else {
+ sub(operand, Immediate(value));
+ }
+ }
+}
+
+
+void MacroAssembler::IncrementCounter(Condition cc,
+ StatsCounter* counter,
+ int value) {
+ ASSERT(value > 0);
+ if (FLAG_native_code_counters && counter->Enabled()) {
+ Label skip;
+ j(NegateCondition(cc), &skip);
+ pushfd();
+ IncrementCounter(counter, value);
+ popfd();
+ bind(&skip);
+ }
+}
+
+
+void MacroAssembler::DecrementCounter(Condition cc,
+ StatsCounter* counter,
+ int value) {
+ ASSERT(value > 0);
+ if (FLAG_native_code_counters && counter->Enabled()) {
+ Label skip;
+ j(NegateCondition(cc), &skip);
+ pushfd();
+ DecrementCounter(counter, value);
+ popfd();
+ bind(&skip);
+ }
+}
+
+
+void MacroAssembler::Assert(Condition cc, BailoutReason reason) {
+ if (emit_debug_code()) Check(cc, reason);
+}
+
+
+void MacroAssembler::AssertFastElements(Register elements) {
+ if (emit_debug_code()) {
+ Factory* factory = isolate()->factory();
+ Label ok;
+ cmp(FieldOperand(elements, HeapObject::kMapOffset),
+ Immediate(factory->fixed_array_map()));
+ j(equal, &ok);
+ cmp(FieldOperand(elements, HeapObject::kMapOffset),
+ Immediate(factory->fixed_double_array_map()));
+ j(equal, &ok);
+ cmp(FieldOperand(elements, HeapObject::kMapOffset),
+ Immediate(factory->fixed_cow_array_map()));
+ j(equal, &ok);
+ Abort(kJSObjectWithFastElementsMapHasSlowElements);
+ bind(&ok);
+ }
+}
+
+
+void MacroAssembler::Check(Condition cc, BailoutReason reason) {
+ Label L;
+ j(cc, &L);
+ Abort(reason);
+ // will not return here
+ bind(&L);
+}
+
+
+void MacroAssembler::CheckStackAlignment() {
+ int frame_alignment = OS::ActivationFrameAlignment();
+ int frame_alignment_mask = frame_alignment - 1;
+ if (frame_alignment > kPointerSize) {
+ ASSERT(IsPowerOf2(frame_alignment));
+ Label alignment_as_expected;
+ test(esp, Immediate(frame_alignment_mask));
+ j(zero, &alignment_as_expected);
+ // Abort if stack is not aligned.
+ int3();
+ bind(&alignment_as_expected);
+ }
+}
+
+
+void MacroAssembler::Abort(BailoutReason reason) {
+#ifdef DEBUG
+ const char* msg = GetBailoutReason(reason);
+ if (msg != NULL) {
+ RecordComment("Abort message: ");
+ RecordComment(msg);
+ }
+
+ if (FLAG_trap_on_abort) {
+ int3();
+ return;
+ }
+#endif
+
+ push(eax);
+ push(Immediate(reinterpret_cast<intptr_t>(Smi::FromInt(reason))));
+ // Disable stub call restrictions to always allow calls to abort.
+ if (!has_frame_) {
+ // We don't actually want to generate a pile of code for this, so just
+ // claim there is a stack frame, without generating one.
+ FrameScope scope(this, StackFrame::NONE);
+ CallRuntime(Runtime::kAbort, 1);
+ } else {
+ CallRuntime(Runtime::kAbort, 1);
+ }
+ // will not return here
+ int3();
+}
+
+
+void MacroAssembler::Throw(BailoutReason reason) {
+#ifdef DEBUG
+ const char* msg = GetBailoutReason(reason);
+ if (msg != NULL) {
+ RecordComment("Throw message: ");
+ RecordComment(msg);
+ }
+#endif
+
+ push(eax);
+ push(Immediate(Smi::FromInt(reason)));
+ // Disable stub call restrictions to always allow calls to throw.
+ if (!has_frame_) {
+ // We don't actually want to generate a pile of code for this, so just
+ // claim there is a stack frame, without generating one.
+ FrameScope scope(this, StackFrame::NONE);
+ CallRuntime(Runtime::kHiddenThrowMessage, 1);
+ } else {
+ CallRuntime(Runtime::kHiddenThrowMessage, 1);
+ }
+ // will not return here
+ int3();
+}
+
+
+void MacroAssembler::ThrowIf(Condition cc, BailoutReason reason) {
+ Label L;
+ j(NegateCondition(cc), &L);
+ Throw(reason);
+ // will not return here
+ bind(&L);
+}
+
+
+void MacroAssembler::LoadInstanceDescriptors(Register map,
+ Register descriptors) {
+ mov(descriptors, FieldOperand(map, Map::kDescriptorsOffset));
+}
+
+
+void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) {
+ mov(dst, FieldOperand(map, Map::kBitField3Offset));
+ DecodeField<Map::NumberOfOwnDescriptorsBits>(dst);
+}
+
+
+void MacroAssembler::LookupNumberStringCache(Register object,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ Label* not_found) {
+ // Use of registers. Register result is used as a temporary.
+ Register number_string_cache = result;
+ Register mask = scratch1;
+ Register scratch = scratch2;
+
+ // Load the number string cache.
+ LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
+ // Make the hash mask from the length of the number string cache. It
+ // contains two elements (number and string) for each cache entry.
+ mov(mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset));
+ shr(mask, kSmiTagSize + 1); // Untag length and divide it by two.
+ sub(mask, Immediate(1)); // Make mask.
+
+ // Calculate the entry in the number string cache. The hash value in the
+ // number string cache for smis is just the smi value, and the hash for
+ // doubles is the xor of the upper and lower words. See
+ // Heap::GetNumberStringCache.
+ Label smi_hash_calculated;
+ Label load_result_from_cache;
+ Label not_smi;
+ STATIC_ASSERT(kSmiTag == 0);
+ JumpIfNotSmi(object, ¬_smi, Label::kNear);
+ mov(scratch, object);
+ SmiUntag(scratch);
+ jmp(&smi_hash_calculated, Label::kNear);
+ bind(¬_smi);
+ cmp(FieldOperand(object, HeapObject::kMapOffset),
+ isolate()->factory()->heap_number_map());
+ j(not_equal, not_found);
+ STATIC_ASSERT(8 == kDoubleSize);
+ mov(scratch, FieldOperand(object, HeapNumber::kValueOffset));
+ xor_(scratch, FieldOperand(object, HeapNumber::kValueOffset + 4));
+ // Object is heap number and hash is now in scratch. Calculate cache index.
+ and_(scratch, mask);
+ Register index = scratch;
+ Register probe = mask;
+ mov(probe,
+ FieldOperand(number_string_cache,
+ index,
+ times_twice_pointer_size,
+ FixedArray::kHeaderSize));
+ JumpIfSmi(probe, not_found);
+ fld_d(FieldOperand(object, HeapNumber::kValueOffset));
+ fld_d(FieldOperand(probe, HeapNumber::kValueOffset));
+ FCmp();
+ j(parity_even, not_found); // Bail out if NaN is involved.
+ j(not_equal, not_found); // The cache did not contain this value.
+ jmp(&load_result_from_cache, Label::kNear);
+
+ bind(&smi_hash_calculated);
+ // Object is smi and hash is now in scratch. Calculate cache index.
+ and_(scratch, mask);
+ // Check if the entry is the smi we are looking for.
+ cmp(object,
+ FieldOperand(number_string_cache,
+ index,
+ times_twice_pointer_size,
+ FixedArray::kHeaderSize));
+ j(not_equal, not_found);
+
+ // Get the result from the cache.
+ bind(&load_result_from_cache);
+ mov(result,
+ FieldOperand(number_string_cache,
+ index,
+ times_twice_pointer_size,
+ FixedArray::kHeaderSize + kPointerSize));
+ IncrementCounter(isolate()->counters()->number_to_string_native(), 1);
+}
+
+
+void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(
+ Register instance_type,
+ Register scratch,
+ Label* failure) {
+ if (!scratch.is(instance_type)) {
+ mov(scratch, instance_type);
+ }
+ and_(scratch,
+ kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask);
+ cmp(scratch, kStringTag | kSeqStringTag | kOneByteStringTag);
+ j(not_equal, failure);
+}
+
+
+void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register object1,
+ Register object2,
+ Register scratch1,
+ Register scratch2,
+ Label* failure) {
+ // Check that both objects are not smis.
+ STATIC_ASSERT(kSmiTag == 0);
+ mov(scratch1, object1);
+ and_(scratch1, object2);
+ JumpIfSmi(scratch1, failure);
+
+ // Load instance type for both strings.
+ mov(scratch1, FieldOperand(object1, HeapObject::kMapOffset));
+ mov(scratch2, FieldOperand(object2, HeapObject::kMapOffset));
+ movzx_b(scratch1, FieldOperand(scratch1, Map::kInstanceTypeOffset));
+ movzx_b(scratch2, FieldOperand(scratch2, Map::kInstanceTypeOffset));
+
+ // Check that both are flat ASCII strings.
+ const int kFlatAsciiStringMask =
+ kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
+ const int kFlatAsciiStringTag =
+ kStringTag | kOneByteStringTag | kSeqStringTag;
+ // Interleave bits from both instance types and compare them in one check.
+ ASSERT_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 3));
+ and_(scratch1, kFlatAsciiStringMask);
+ and_(scratch2, kFlatAsciiStringMask);
+ lea(scratch1, Operand(scratch1, scratch2, times_8, 0));
+ cmp(scratch1, kFlatAsciiStringTag | (kFlatAsciiStringTag << 3));
+ j(not_equal, failure);
+}
+
+
+void MacroAssembler::JumpIfNotUniqueName(Operand operand,
+ Label* not_unique_name,
+ Label::Distance distance) {
+ STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
+ Label succeed;
+ test(operand, Immediate(kIsNotStringMask | kIsNotInternalizedMask));
+ j(zero, &succeed);
+ cmpb(operand, static_cast<uint8_t>(SYMBOL_TYPE));
+ j(not_equal, not_unique_name, distance);
+
+ bind(&succeed);
+}
+
+
+void MacroAssembler::EmitSeqStringSetCharCheck(Register string,
+ Register index,
+ Register value,
+ uint32_t encoding_mask) {
+ Label is_object;
+ JumpIfNotSmi(string, &is_object, Label::kNear);
+ Abort(kNonObject);
+ bind(&is_object);
+
+ push(value);
+ mov(value, FieldOperand(string, HeapObject::kMapOffset));
+ movzx_b(value, FieldOperand(value, Map::kInstanceTypeOffset));
+
+ and_(value, Immediate(kStringRepresentationMask | kStringEncodingMask));
+ cmp(value, Immediate(encoding_mask));
+ pop(value);
+ Check(equal, kUnexpectedStringType);
+
+ // The index is assumed to be untagged coming in, tag it to compare with the
+ // string length without using a temp register, it is restored at the end of
+ // this function.
+ SmiTag(index);
+ Check(no_overflow, kIndexIsTooLarge);
+
+ cmp(index, FieldOperand(string, String::kLengthOffset));
+ Check(less, kIndexIsTooLarge);
+
+ cmp(index, Immediate(Smi::FromInt(0)));
+ Check(greater_equal, kIndexIsNegative);
+
+ // Restore the index
+ SmiUntag(index);
+}
+
+
+void MacroAssembler::PrepareCallCFunction(int num_arguments, Register scratch) {
+ int frame_alignment = OS::ActivationFrameAlignment();
+ if (frame_alignment != 0) {
+ // Make stack end at alignment and make room for num_arguments words
+ // and the original value of esp.
+ mov(scratch, esp);
+ sub(esp, Immediate((num_arguments + 1) * kPointerSize));
+ ASSERT(IsPowerOf2(frame_alignment));
+ and_(esp, -frame_alignment);
+ mov(Operand(esp, num_arguments * kPointerSize), scratch);
+ } else {
+ sub(esp, Immediate(num_arguments * kPointerSize));
+ }
+}
+
+
+void MacroAssembler::CallCFunction(ExternalReference function,
+ int num_arguments) {
+ // Trashing eax is ok as it will be the return value.
+ mov(eax, Immediate(function));
+ CallCFunction(eax, num_arguments);
+}
+
+
+void MacroAssembler::CallCFunction(Register function,
+ int num_arguments) {
+ ASSERT(has_frame());
+ // Check stack alignment.
+ if (emit_debug_code()) {
+ CheckStackAlignment();
+ }
+
+ call(function);
+ if (OS::ActivationFrameAlignment() != 0) {
+ mov(esp, Operand(esp, num_arguments * kPointerSize));
+ } else {
+ add(esp, Immediate(num_arguments * kPointerSize));
+ }
+}
+
+
+bool AreAliased(Register r1, Register r2, Register r3, Register r4) {
+ if (r1.is(r2)) return true;
+ if (r1.is(r3)) return true;
+ if (r1.is(r4)) return true;
+ if (r2.is(r3)) return true;
+ if (r2.is(r4)) return true;
+ if (r3.is(r4)) return true;
+ return false;
+}
+
+
+CodePatcher::CodePatcher(byte* address, int size)
+ : address_(address),
+ size_(size),
+ masm_(NULL, address, size + Assembler::kGap) {
+ // Create a new macro assembler pointing to the address of the code to patch.
+ // The size is adjusted with kGap on order for the assembler to generate size
+ // bytes of instructions without failing with buffer size constraints.
+ ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
+}
+
+
+CodePatcher::~CodePatcher() {
+ // Indicate that code has changed.
+ CPU::FlushICache(address_, size_);
+
+ // Check that the code was patched as expected.
+ ASSERT(masm_.pc_ == address_ + size_);
+ ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
+}
+
+
+void MacroAssembler::CheckPageFlag(
+ Register object,
+ Register scratch,
+ int mask,
+ Condition cc,
+ Label* condition_met,
+ Label::Distance condition_met_distance) {
+ ASSERT(cc == zero || cc == not_zero);
+ if (scratch.is(object)) {
+ and_(scratch, Immediate(~Page::kPageAlignmentMask));
+ } else {
+ mov(scratch, Immediate(~Page::kPageAlignmentMask));
+ and_(scratch, object);
+ }
+ if (mask < (1 << kBitsPerByte)) {
+ test_b(Operand(scratch, MemoryChunk::kFlagsOffset),
+ static_cast<uint8_t>(mask));
+ } else {
+ test(Operand(scratch, MemoryChunk::kFlagsOffset), Immediate(mask));
+ }
+ j(cc, condition_met, condition_met_distance);
+}
+
+
+void MacroAssembler::CheckPageFlagForMap(
+ Handle<Map> map,
+ int mask,
+ Condition cc,
+ Label* condition_met,
+ Label::Distance condition_met_distance) {
+ ASSERT(cc == zero || cc == not_zero);
+ Page* page = Page::FromAddress(map->address());
+ ExternalReference reference(ExternalReference::page_flags(page));
+ // The inlined static address check of the page's flags relies
+ // on maps never being compacted.
+ ASSERT(!isolate()->heap()->mark_compact_collector()->
+ IsOnEvacuationCandidate(*map));
+ if (mask < (1 << kBitsPerByte)) {
+ test_b(Operand::StaticVariable(reference), static_cast<uint8_t>(mask));
+ } else {
+ test(Operand::StaticVariable(reference), Immediate(mask));
+ }
+ j(cc, condition_met, condition_met_distance);
+}
+
+
+void MacroAssembler::CheckMapDeprecated(Handle<Map> map,
+ Register scratch,
+ Label* if_deprecated) {
+ if (map->CanBeDeprecated()) {
+ mov(scratch, map);
+ mov(scratch, FieldOperand(scratch, Map::kBitField3Offset));
+ and_(scratch, Immediate(Map::Deprecated::kMask));
+ j(not_zero, if_deprecated);
+ }
+}
+
+
+void MacroAssembler::JumpIfBlack(Register object,
+ Register scratch0,
+ Register scratch1,
+ Label* on_black,
+ Label::Distance on_black_near) {
+ HasColor(object, scratch0, scratch1,
+ on_black, on_black_near,
+ 1, 0); // kBlackBitPattern.
+ ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
+}
+
+
+void MacroAssembler::HasColor(Register object,
+ Register bitmap_scratch,
+ Register mask_scratch,
+ Label* has_color,
+ Label::Distance has_color_distance,
+ int first_bit,
+ int second_bit) {
+ ASSERT(!AreAliased(object, bitmap_scratch, mask_scratch, ecx));
+
+ GetMarkBits(object, bitmap_scratch, mask_scratch);
+
+ Label other_color, word_boundary;
+ test(mask_scratch, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
+ j(first_bit == 1 ? zero : not_zero, &other_color, Label::kNear);
+ add(mask_scratch, mask_scratch); // Shift left 1 by adding.
+ j(zero, &word_boundary, Label::kNear);
+ test(mask_scratch, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
+ j(second_bit == 1 ? not_zero : zero, has_color, has_color_distance);
+ jmp(&other_color, Label::kNear);
+
+ bind(&word_boundary);
+ test_b(Operand(bitmap_scratch, MemoryChunk::kHeaderSize + kPointerSize), 1);
+
+ j(second_bit == 1 ? not_zero : zero, has_color, has_color_distance);
+ bind(&other_color);
+}
+
+
+void MacroAssembler::GetMarkBits(Register addr_reg,
+ Register bitmap_reg,
+ Register mask_reg) {
+ ASSERT(!AreAliased(addr_reg, mask_reg, bitmap_reg, ecx));
+ mov(bitmap_reg, Immediate(~Page::kPageAlignmentMask));
+ and_(bitmap_reg, addr_reg);
+ mov(ecx, addr_reg);
+ int shift =
+ Bitmap::kBitsPerCellLog2 + kPointerSizeLog2 - Bitmap::kBytesPerCellLog2;
+ shr(ecx, shift);
+ and_(ecx,
+ (Page::kPageAlignmentMask >> shift) & ~(Bitmap::kBytesPerCell - 1));
+
+ add(bitmap_reg, ecx);
+ mov(ecx, addr_reg);
+ shr(ecx, kPointerSizeLog2);
+ and_(ecx, (1 << Bitmap::kBitsPerCellLog2) - 1);
+ mov(mask_reg, Immediate(1));
+ shl_cl(mask_reg);
+}
+
+
+void MacroAssembler::EnsureNotWhite(
+ Register value,
+ Register bitmap_scratch,
+ Register mask_scratch,
+ Label* value_is_white_and_not_data,
+ Label::Distance distance) {
+ ASSERT(!AreAliased(value, bitmap_scratch, mask_scratch, ecx));
+ GetMarkBits(value, bitmap_scratch, mask_scratch);
+
+ // If the value is black or grey we don't need to do anything.
+ ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0);
+ ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
+ ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0);
+ ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
+
+ Label done;
+
+ // Since both black and grey have a 1 in the first position and white does
+ // not have a 1 there we only need to check one bit.
+ test(mask_scratch, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
+ j(not_zero, &done, Label::kNear);
+
+ if (emit_debug_code()) {
+ // Check for impossible bit pattern.
+ Label ok;
+ push(mask_scratch);
+ // shl. May overflow making the check conservative.
+ add(mask_scratch, mask_scratch);
+ test(mask_scratch, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
+ j(zero, &ok, Label::kNear);
+ int3();
+ bind(&ok);
+ pop(mask_scratch);
+ }
+
+ // Value is white. We check whether it is data that doesn't need scanning.
+ // Currently only checks for HeapNumber and non-cons strings.
+ Register map = ecx; // Holds map while checking type.
+ Register length = ecx; // Holds length of object after checking type.
+ Label not_heap_number;
+ Label is_data_object;
+
+ // Check for heap-number
+ mov(map, FieldOperand(value, HeapObject::kMapOffset));
+ cmp(map, isolate()->factory()->heap_number_map());
+ j(not_equal, ¬_heap_number, Label::kNear);
+ mov(length, Immediate(HeapNumber::kSize));
+ jmp(&is_data_object, Label::kNear);
+
+ bind(¬_heap_number);
+ // Check for strings.
+ ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
+ ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
+ // If it's a string and it's not a cons string then it's an object containing
+ // no GC pointers.
+ Register instance_type = ecx;
+ movzx_b(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
+ test_b(instance_type, kIsIndirectStringMask | kIsNotStringMask);
+ j(not_zero, value_is_white_and_not_data);
+ // It's a non-indirect (non-cons and non-slice) string.
+ // If it's external, the length is just ExternalString::kSize.
+ // Otherwise it's String::kHeaderSize + string->length() * (1 or 2).
+ Label not_external;
+ // External strings are the only ones with the kExternalStringTag bit
+ // set.
+ ASSERT_EQ(0, kSeqStringTag & kExternalStringTag);
+ ASSERT_EQ(0, kConsStringTag & kExternalStringTag);
+ test_b(instance_type, kExternalStringTag);
+ j(zero, ¬_external, Label::kNear);
+ mov(length, Immediate(ExternalString::kSize));
+ jmp(&is_data_object, Label::kNear);
+
+ bind(¬_external);
+ // Sequential string, either ASCII or UC16.
+ ASSERT(kOneByteStringTag == 0x04);
+ and_(length, Immediate(kStringEncodingMask));
+ xor_(length, Immediate(kStringEncodingMask));
+ add(length, Immediate(0x04));
+ // Value now either 4 (if ASCII) or 8 (if UC16), i.e., char-size shifted
+ // by 2. If we multiply the string length as smi by this, it still
+ // won't overflow a 32-bit value.
+ ASSERT_EQ(SeqOneByteString::kMaxSize, SeqTwoByteString::kMaxSize);
+ ASSERT(SeqOneByteString::kMaxSize <=
+ static_cast<int>(0xffffffffu >> (2 + kSmiTagSize)));
+ imul(length, FieldOperand(value, String::kLengthOffset));
+ shr(length, 2 + kSmiTagSize + kSmiShiftSize);
+ add(length, Immediate(SeqString::kHeaderSize + kObjectAlignmentMask));
+ and_(length, Immediate(~kObjectAlignmentMask));
+
+ bind(&is_data_object);
+ // Value is a data object, and it is white. Mark it black. Since we know
+ // that the object is white we can make it black by flipping one bit.
+ or_(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
+
+ and_(bitmap_scratch, Immediate(~Page::kPageAlignmentMask));
+ add(Operand(bitmap_scratch, MemoryChunk::kLiveBytesOffset),
+ length);
+ if (emit_debug_code()) {
+ mov(length, Operand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
+ cmp(length, Operand(bitmap_scratch, MemoryChunk::kSizeOffset));
+ Check(less_equal, kLiveBytesCountOverflowChunkSize);
+ }
+
+ bind(&done);
+}
+
+
+void MacroAssembler::EnumLength(Register dst, Register map) {
+ STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
+ mov(dst, FieldOperand(map, Map::kBitField3Offset));
+ and_(dst, Immediate(Map::EnumLengthBits::kMask));
+ SmiTag(dst);
+}
+
+
+void MacroAssembler::CheckEnumCache(Label* call_runtime) {
+ Label next, start;
+ mov(ecx, eax);
+
+ // Check if the enum length field is properly initialized, indicating that
+ // there is an enum cache.
+ mov(ebx, FieldOperand(ecx, HeapObject::kMapOffset));
+
+ EnumLength(edx, ebx);
+ cmp(edx, Immediate(Smi::FromInt(kInvalidEnumCacheSentinel)));
+ j(equal, call_runtime);
+
+ jmp(&start);
+
+ bind(&next);
+ mov(ebx, FieldOperand(ecx, HeapObject::kMapOffset));
+
+ // For all objects but the receiver, check that the cache is empty.
+ EnumLength(edx, ebx);
+ cmp(edx, Immediate(Smi::FromInt(0)));
+ j(not_equal, call_runtime);
+
+ bind(&start);
+
+ // Check that there are no elements. Register rcx contains the current JS
+ // object we've reached through the prototype chain.
+ Label no_elements;
+ mov(ecx, FieldOperand(ecx, JSObject::kElementsOffset));
+ cmp(ecx, isolate()->factory()->empty_fixed_array());
+ j(equal, &no_elements);
+
+ // Second chance, the object may be using the empty slow element dictionary.
+ cmp(ecx, isolate()->factory()->empty_slow_element_dictionary());
+ j(not_equal, call_runtime);
+
+ bind(&no_elements);
+ mov(ecx, FieldOperand(ebx, Map::kPrototypeOffset));
+ cmp(ecx, isolate()->factory()->null_value());
+ j(not_equal, &next);
+}
+
+
+void MacroAssembler::TestJSArrayForAllocationMemento(
+ Register receiver_reg,
+ Register scratch_reg,
+ Label* no_memento_found) {
+ ExternalReference new_space_start =
+ ExternalReference::new_space_start(isolate());
+ ExternalReference new_space_allocation_top =
+ ExternalReference::new_space_allocation_top_address(isolate());
+
+ lea(scratch_reg, Operand(receiver_reg,
+ JSArray::kSize + AllocationMemento::kSize - kHeapObjectTag));
+ cmp(scratch_reg, Immediate(new_space_start));
+ j(less, no_memento_found);
+ cmp(scratch_reg, Operand::StaticVariable(new_space_allocation_top));
+ j(greater, no_memento_found);
+ cmp(MemOperand(scratch_reg, -AllocationMemento::kSize),
+ Immediate(isolate()->factory()->allocation_memento_map()));
+}
+
+
+void MacroAssembler::JumpIfDictionaryInPrototypeChain(
+ Register object,
+ Register scratch0,
+ Register scratch1,
+ Label* found) {
+ ASSERT(!scratch1.is(scratch0));
+ Factory* factory = isolate()->factory();
+ Register current = scratch0;
+ Label loop_again;
+
+ // scratch contained elements pointer.
+ mov(current, object);
+
+ // Loop based on the map going up the prototype chain.
+ bind(&loop_again);
+ mov(current, FieldOperand(current, HeapObject::kMapOffset));
+ mov(scratch1, FieldOperand(current, Map::kBitField2Offset));
+ and_(scratch1, Map::kElementsKindMask);
+ shr(scratch1, Map::kElementsKindShift);
+ cmp(scratch1, Immediate(DICTIONARY_ELEMENTS));
+ j(equal, found);
+ mov(current, FieldOperand(current, Map::kPrototypeOffset));
+ cmp(current, Immediate(factory->null_value()));
+ j(not_equal, &loop_again);
+}
+
+
+void MacroAssembler::TruncatingDiv(Register dividend, int32_t divisor) {
+ ASSERT(!dividend.is(eax));
+ ASSERT(!dividend.is(edx));
+ MultiplierAndShift ms(divisor);
+ mov(eax, Immediate(ms.multiplier()));
+ imul(dividend);
+ if (divisor > 0 && ms.multiplier() < 0) add(edx, dividend);
+ if (divisor < 0 && ms.multiplier() > 0) sub(edx, dividend);
+ if (ms.shift() > 0) sar(edx, ms.shift());
+ mov(eax, dividend);
+ shr(eax, 31);
+ add(edx, eax);
+}
+
+
+} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_X87
--- /dev/null
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_X87_MACRO_ASSEMBLER_X87_H_
+#define V8_X87_MACRO_ASSEMBLER_X87_H_
+
+#include "assembler.h"
+#include "frames.h"
+#include "v8globals.h"
+
+namespace v8 {
+namespace internal {
+
+// Convenience for platform-independent signatures. We do not normally
+// distinguish memory operands from other operands on ia32.
+typedef Operand MemOperand;
+
+enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
+enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
+
+
+enum RegisterValueType {
+ REGISTER_VALUE_IS_SMI,
+ REGISTER_VALUE_IS_INT32
+};
+
+
+bool AreAliased(Register r1, Register r2, Register r3, Register r4);
+
+
+// MacroAssembler implements a collection of frequently used macros.
+class MacroAssembler: public Assembler {
+ public:
+ // The isolate parameter can be NULL if the macro assembler should
+ // not use isolate-dependent functionality. In this case, it's the
+ // responsibility of the caller to never invoke such function on the
+ // macro assembler.
+ MacroAssembler(Isolate* isolate, void* buffer, int size);
+
+ void Load(Register dst, const Operand& src, Representation r);
+ void Store(Register src, const Operand& dst, Representation r);
+
+ // Operations on roots in the root-array.
+ void LoadRoot(Register destination, Heap::RootListIndex index);
+ void StoreRoot(Register source, Register scratch, Heap::RootListIndex index);
+ void CompareRoot(Register with, Register scratch, Heap::RootListIndex index);
+ // These methods can only be used with constant roots (i.e. non-writable
+ // and not in new space).
+ void CompareRoot(Register with, Heap::RootListIndex index);
+ void CompareRoot(const Operand& with, Heap::RootListIndex index);
+
+ // ---------------------------------------------------------------------------
+ // GC Support
+ enum RememberedSetFinalAction {
+ kReturnAtEnd,
+ kFallThroughAtEnd
+ };
+
+ // Record in the remembered set the fact that we have a pointer to new space
+ // at the address pointed to by the addr register. Only works if addr is not
+ // in new space.
+ void RememberedSetHelper(Register object, // Used for debug code.
+ Register addr,
+ Register scratch,
+ RememberedSetFinalAction and_then);
+
+ void CheckPageFlag(Register object,
+ Register scratch,
+ int mask,
+ Condition cc,
+ Label* condition_met,
+ Label::Distance condition_met_distance = Label::kFar);
+
+ void CheckPageFlagForMap(
+ Handle<Map> map,
+ int mask,
+ Condition cc,
+ Label* condition_met,
+ Label::Distance condition_met_distance = Label::kFar);
+
+ void CheckMapDeprecated(Handle<Map> map,
+ Register scratch,
+ Label* if_deprecated);
+
+ // Check if object is in new space. Jumps if the object is not in new space.
+ // The register scratch can be object itself, but scratch will be clobbered.
+ void JumpIfNotInNewSpace(Register object,
+ Register scratch,
+ Label* branch,
+ Label::Distance distance = Label::kFar) {
+ InNewSpace(object, scratch, zero, branch, distance);
+ }
+
+ // Check if object is in new space. Jumps if the object is in new space.
+ // The register scratch can be object itself, but it will be clobbered.
+ void JumpIfInNewSpace(Register object,
+ Register scratch,
+ Label* branch,
+ Label::Distance distance = Label::kFar) {
+ InNewSpace(object, scratch, not_zero, branch, distance);
+ }
+
+ // Check if an object has a given incremental marking color. Also uses ecx!
+ void HasColor(Register object,
+ Register scratch0,
+ Register scratch1,
+ Label* has_color,
+ Label::Distance has_color_distance,
+ int first_bit,
+ int second_bit);
+
+ void JumpIfBlack(Register object,
+ Register scratch0,
+ Register scratch1,
+ Label* on_black,
+ Label::Distance on_black_distance = Label::kFar);
+
+ // Checks the color of an object. If the object is already grey or black
+ // then we just fall through, since it is already live. If it is white and
+ // we can determine that it doesn't need to be scanned, then we just mark it
+ // black and fall through. For the rest we jump to the label so the
+ // incremental marker can fix its assumptions.
+ void EnsureNotWhite(Register object,
+ Register scratch1,
+ Register scratch2,
+ Label* object_is_white_and_not_data,
+ Label::Distance distance);
+
+ // Notify the garbage collector that we wrote a pointer into an object.
+ // |object| is the object being stored into, |value| is the object being
+ // stored. value and scratch registers are clobbered by the operation.
+ // The offset is the offset from the start of the object, not the offset from
+ // the tagged HeapObject pointer. For use with FieldOperand(reg, off).
+ void RecordWriteField(
+ Register object,
+ int offset,
+ Register value,
+ Register scratch,
+ RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
+ SmiCheck smi_check = INLINE_SMI_CHECK);
+
+ // As above, but the offset has the tag presubtracted. For use with
+ // Operand(reg, off).
+ void RecordWriteContextSlot(
+ Register context,
+ int offset,
+ Register value,
+ Register scratch,
+ RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
+ SmiCheck smi_check = INLINE_SMI_CHECK) {
+ RecordWriteField(context,
+ offset + kHeapObjectTag,
+ value,
+ scratch,
+ remembered_set_action,
+ smi_check);
+ }
+
+ // Notify the garbage collector that we wrote a pointer into a fixed array.
+ // |array| is the array being stored into, |value| is the
+ // object being stored. |index| is the array index represented as a
+ // Smi. All registers are clobbered by the operation RecordWriteArray
+ // filters out smis so it does not update the write barrier if the
+ // value is a smi.
+ void RecordWriteArray(
+ Register array,
+ Register value,
+ Register index,
+ RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
+ SmiCheck smi_check = INLINE_SMI_CHECK);
+
+ // For page containing |object| mark region covering |address|
+ // dirty. |object| is the object being stored into, |value| is the
+ // object being stored. The address and value registers are clobbered by the
+ // operation. RecordWrite filters out smis so it does not update the
+ // write barrier if the value is a smi.
+ void RecordWrite(
+ Register object,
+ Register address,
+ Register value,
+ RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
+ SmiCheck smi_check = INLINE_SMI_CHECK);
+
+ // For page containing |object| mark the region covering the object's map
+ // dirty. |object| is the object being stored into, |map| is the Map object
+ // that was stored.
+ void RecordWriteForMap(
+ Register object,
+ Handle<Map> map,
+ Register scratch1,
+ Register scratch2);
+
+ // ---------------------------------------------------------------------------
+ // Debugger Support
+
+ void DebugBreak();
+
+ // Generates function and stub prologue code.
+ void StubPrologue();
+ void Prologue(bool code_pre_aging);
+
+ // Enter specific kind of exit frame. Expects the number of
+ // arguments in register eax and sets up the number of arguments in
+ // register edi and the pointer to the first argument in register
+ // esi.
+ void EnterExitFrame();
+
+ void EnterApiExitFrame(int argc);
+
+ // Leave the current exit frame. Expects the return value in
+ // register eax:edx (untouched) and the pointer to the first
+ // argument in register esi.
+ void LeaveExitFrame();
+
+ // Leave the current exit frame. Expects the return value in
+ // register eax (untouched).
+ void LeaveApiExitFrame(bool restore_context);
+
+ // Find the function context up the context chain.
+ void LoadContext(Register dst, int context_chain_length);
+
+ // Conditionally load the cached Array transitioned map of type
+ // transitioned_kind from the native context if the map in register
+ // map_in_out is the cached Array map in the native context of
+ // expected_kind.
+ void LoadTransitionedArrayMapConditional(
+ ElementsKind expected_kind,
+ ElementsKind transitioned_kind,
+ Register map_in_out,
+ Register scratch,
+ Label* no_map_match);
+
+ // Load the global function with the given index.
+ void LoadGlobalFunction(int index, Register function);
+
+ // Load the initial map from the global function. The registers
+ // function and map can be the same.
+ void LoadGlobalFunctionInitialMap(Register function, Register map);
+
+ // Push and pop the registers that can hold pointers.
+ void PushSafepointRegisters() { pushad(); }
+ void PopSafepointRegisters() { popad(); }
+ // Store the value in register/immediate src in the safepoint
+ // register stack slot for register dst.
+ void StoreToSafepointRegisterSlot(Register dst, Register src);
+ void StoreToSafepointRegisterSlot(Register dst, Immediate src);
+ void LoadFromSafepointRegisterSlot(Register dst, Register src);
+
+ void LoadHeapObject(Register result, Handle<HeapObject> object);
+ void CmpHeapObject(Register reg, Handle<HeapObject> object);
+ void PushHeapObject(Handle<HeapObject> object);
+
+ void LoadObject(Register result, Handle<Object> object) {
+ AllowDeferredHandleDereference heap_object_check;
+ if (object->IsHeapObject()) {
+ LoadHeapObject(result, Handle<HeapObject>::cast(object));
+ } else {
+ Move(result, Immediate(object));
+ }
+ }
+
+ void CmpObject(Register reg, Handle<Object> object) {
+ AllowDeferredHandleDereference heap_object_check;
+ if (object->IsHeapObject()) {
+ CmpHeapObject(reg, Handle<HeapObject>::cast(object));
+ } else {
+ cmp(reg, Immediate(object));
+ }
+ }
+
+ // ---------------------------------------------------------------------------
+ // JavaScript invokes
+
+ // Invoke the JavaScript function code by either calling or jumping.
+ void InvokeCode(Register code,
+ const ParameterCount& expected,
+ const ParameterCount& actual,
+ InvokeFlag flag,
+ const CallWrapper& call_wrapper) {
+ InvokeCode(Operand(code), expected, actual, flag, call_wrapper);
+ }
+
+ void InvokeCode(const Operand& code,
+ const ParameterCount& expected,
+ const ParameterCount& actual,
+ InvokeFlag flag,
+ const CallWrapper& call_wrapper);
+
+ // Invoke the JavaScript function in the given register. Changes the
+ // current context to the context in the function before invoking.
+ void InvokeFunction(Register function,
+ const ParameterCount& actual,
+ InvokeFlag flag,
+ const CallWrapper& call_wrapper);
+
+ void InvokeFunction(Register function,
+ const ParameterCount& expected,
+ const ParameterCount& actual,
+ InvokeFlag flag,
+ const CallWrapper& call_wrapper);
+
+ void InvokeFunction(Handle<JSFunction> function,
+ const ParameterCount& expected,
+ const ParameterCount& actual,
+ InvokeFlag flag,
+ const CallWrapper& call_wrapper);
+
+ // Invoke specified builtin JavaScript function. Adds an entry to
+ // the unresolved list if the name does not resolve.
+ void InvokeBuiltin(Builtins::JavaScript id,
+ InvokeFlag flag,
+ const CallWrapper& call_wrapper = NullCallWrapper());
+
+ // Store the function for the given builtin in the target register.
+ void GetBuiltinFunction(Register target, Builtins::JavaScript id);
+
+ // Store the code object for the given builtin in the target register.
+ void GetBuiltinEntry(Register target, Builtins::JavaScript id);
+
+ // Expression support
+ // Support for constant splitting.
+ bool IsUnsafeImmediate(const Immediate& x);
+ void SafeMove(Register dst, const Immediate& x);
+ void SafePush(const Immediate& x);
+
+ // Compare object type for heap object.
+ // Incoming register is heap_object and outgoing register is map.
+ void CmpObjectType(Register heap_object, InstanceType type, Register map);
+
+ // Compare instance type for map.
+ void CmpInstanceType(Register map, InstanceType type);
+
+ // Check if a map for a JSObject indicates that the object has fast elements.
+ // Jump to the specified label if it does not.
+ void CheckFastElements(Register map,
+ Label* fail,
+ Label::Distance distance = Label::kFar);
+
+ // Check if a map for a JSObject indicates that the object can have both smi
+ // and HeapObject elements. Jump to the specified label if it does not.
+ void CheckFastObjectElements(Register map,
+ Label* fail,
+ Label::Distance distance = Label::kFar);
+
+ // Check if a map for a JSObject indicates that the object has fast smi only
+ // elements. Jump to the specified label if it does not.
+ void CheckFastSmiElements(Register map,
+ Label* fail,
+ Label::Distance distance = Label::kFar);
+
+ // Check to see if maybe_number can be stored as a double in
+ // FastDoubleElements. If it can, store it at the index specified by key in
+ // the FastDoubleElements array elements, otherwise jump to fail.
+ void StoreNumberToDoubleElements(Register maybe_number,
+ Register elements,
+ Register key,
+ Register scratch,
+ Label* fail,
+ int offset = 0);
+
+ // Compare an object's map with the specified map.
+ void CompareMap(Register obj, Handle<Map> map);
+
+ // Check if the map of an object is equal to a specified map and branch to
+ // label if not. Skip the smi check if not required (object is known to be a
+ // heap object). If mode is ALLOW_ELEMENT_TRANSITION_MAPS, then also match
+ // against maps that are ElementsKind transition maps of the specified map.
+ void CheckMap(Register obj,
+ Handle<Map> map,
+ Label* fail,
+ SmiCheckType smi_check_type);
+
+ // Check if the map of an object is equal to a specified map and branch to a
+ // specified target if equal. Skip the smi check if not required (object is
+ // known to be a heap object)
+ void DispatchMap(Register obj,
+ Register unused,
+ Handle<Map> map,
+ Handle<Code> success,
+ SmiCheckType smi_check_type);
+
+ // Check if the object in register heap_object is a string. Afterwards the
+ // register map contains the object map and the register instance_type
+ // contains the instance_type. The registers map and instance_type can be the
+ // same in which case it contains the instance type afterwards. Either of the
+ // registers map and instance_type can be the same as heap_object.
+ Condition IsObjectStringType(Register heap_object,
+ Register map,
+ Register instance_type);
+
+ // Check if the object in register heap_object is a name. Afterwards the
+ // register map contains the object map and the register instance_type
+ // contains the instance_type. The registers map and instance_type can be the
+ // same in which case it contains the instance type afterwards. Either of the
+ // registers map and instance_type can be the same as heap_object.
+ Condition IsObjectNameType(Register heap_object,
+ Register map,
+ Register instance_type);
+
+ // Check if a heap object's type is in the JSObject range, not including
+ // JSFunction. The object's map will be loaded in the map register.
+ // Any or all of the three registers may be the same.
+ // The contents of the scratch register will always be overwritten.
+ void IsObjectJSObjectType(Register heap_object,
+ Register map,
+ Register scratch,
+ Label* fail);
+
+ // The contents of the scratch register will be overwritten.
+ void IsInstanceJSObjectType(Register map, Register scratch, Label* fail);
+
+ // FCmp is similar to integer cmp, but requires unsigned
+ // jcc instructions (je, ja, jae, jb, jbe, je, and jz).
+ void FCmp();
+
+ void ClampUint8(Register reg);
+
+ void SlowTruncateToI(Register result_reg, Register input_reg,
+ int offset = HeapNumber::kValueOffset - kHeapObjectTag);
+
+ void TruncateHeapNumberToI(Register result_reg, Register input_reg);
+ void TruncateX87TOSToI(Register result_reg);
+
+ void X87TOSToI(Register result_reg, MinusZeroMode minus_zero_mode,
+ Label* conversion_failed, Label::Distance dst = Label::kFar);
+
+ void TaggedToI(Register result_reg, Register input_reg,
+ MinusZeroMode minus_zero_mode, Label* lost_precision);
+
+ // Smi tagging support.
+ void SmiTag(Register reg) {
+ STATIC_ASSERT(kSmiTag == 0);
+ STATIC_ASSERT(kSmiTagSize == 1);
+ add(reg, reg);
+ }
+ void SmiUntag(Register reg) {
+ sar(reg, kSmiTagSize);
+ }
+
+ // Modifies the register even if it does not contain a Smi!
+ void SmiUntag(Register reg, Label* is_smi) {
+ STATIC_ASSERT(kSmiTagSize == 1);
+ sar(reg, kSmiTagSize);
+ STATIC_ASSERT(kSmiTag == 0);
+ j(not_carry, is_smi);
+ }
+
+ void LoadUint32NoSSE2(Register src);
+
+ // Jump the register contains a smi.
+ inline void JumpIfSmi(Register value,
+ Label* smi_label,
+ Label::Distance distance = Label::kFar) {
+ test(value, Immediate(kSmiTagMask));
+ j(zero, smi_label, distance);
+ }
+ // Jump if the operand is a smi.
+ inline void JumpIfSmi(Operand value,
+ Label* smi_label,
+ Label::Distance distance = Label::kFar) {
+ test(value, Immediate(kSmiTagMask));
+ j(zero, smi_label, distance);
+ }
+ // Jump if register contain a non-smi.
+ inline void JumpIfNotSmi(Register value,
+ Label* not_smi_label,
+ Label::Distance distance = Label::kFar) {
+ test(value, Immediate(kSmiTagMask));
+ j(not_zero, not_smi_label, distance);
+ }
+
+ void LoadInstanceDescriptors(Register map, Register descriptors);
+ void EnumLength(Register dst, Register map);
+ void NumberOfOwnDescriptors(Register dst, Register map);
+
+ template<typename Field>
+ void DecodeField(Register reg) {
+ static const int shift = Field::kShift;
+ static const int mask = Field::kMask >> Field::kShift;
+ sar(reg, shift);
+ and_(reg, Immediate(mask));
+ }
+
+ // Abort execution if argument is not a number, enabled via --debug-code.
+ void AssertNumber(Register object);
+
+ // Abort execution if argument is not a smi, enabled via --debug-code.
+ void AssertSmi(Register object);
+
+ // Abort execution if argument is a smi, enabled via --debug-code.
+ void AssertNotSmi(Register object);
+
+ // Abort execution if argument is not a string, enabled via --debug-code.
+ void AssertString(Register object);
+
+ // Abort execution if argument is not a name, enabled via --debug-code.
+ void AssertName(Register object);
+
+ // Abort execution if argument is not undefined or an AllocationSite, enabled
+ // via --debug-code.
+ void AssertUndefinedOrAllocationSite(Register object);
+
+ // ---------------------------------------------------------------------------
+ // Exception handling
+
+ // Push a new try handler and link it into try handler chain.
+ void PushTryHandler(StackHandler::Kind kind, int handler_index);
+
+ // Unlink the stack handler on top of the stack from the try handler chain.
+ void PopTryHandler();
+
+ // Throw to the top handler in the try hander chain.
+ void Throw(Register value);
+
+ // Throw past all JS frames to the top JS entry frame.
+ void ThrowUncatchable(Register value);
+
+ // Throw a message string as an exception.
+ void Throw(BailoutReason reason);
+
+ // Throw a message string as an exception if a condition is not true.
+ void ThrowIf(Condition cc, BailoutReason reason);
+
+ // ---------------------------------------------------------------------------
+ // Inline caching support
+
+ // Generate code for checking access rights - used for security checks
+ // on access to global objects across environments. The holder register
+ // is left untouched, but the scratch register is clobbered.
+ void CheckAccessGlobalProxy(Register holder_reg,
+ Register scratch1,
+ Register scratch2,
+ Label* miss);
+
+ void GetNumberHash(Register r0, Register scratch);
+
+ void LoadFromNumberDictionary(Label* miss,
+ Register elements,
+ Register key,
+ Register r0,
+ Register r1,
+ Register r2,
+ Register result);
+
+
+ // ---------------------------------------------------------------------------
+ // Allocation support
+
+ // Allocate an object in new space or old pointer space. If the given space
+ // is exhausted control continues at the gc_required label. The allocated
+ // object is returned in result and end of the new object is returned in
+ // result_end. The register scratch can be passed as no_reg in which case
+ // an additional object reference will be added to the reloc info. The
+ // returned pointers in result and result_end have not yet been tagged as
+ // heap objects. If result_contains_top_on_entry is true the content of
+ // result is known to be the allocation top on entry (could be result_end
+ // from a previous call). If result_contains_top_on_entry is true scratch
+ // should be no_reg as it is never used.
+ void Allocate(int object_size,
+ Register result,
+ Register result_end,
+ Register scratch,
+ Label* gc_required,
+ AllocationFlags flags);
+
+ void Allocate(int header_size,
+ ScaleFactor element_size,
+ Register element_count,
+ RegisterValueType element_count_type,
+ Register result,
+ Register result_end,
+ Register scratch,
+ Label* gc_required,
+ AllocationFlags flags);
+
+ void Allocate(Register object_size,
+ Register result,
+ Register result_end,
+ Register scratch,
+ Label* gc_required,
+ AllocationFlags flags);
+
+ // Undo allocation in new space. The object passed and objects allocated after
+ // it will no longer be allocated. Make sure that no pointers are left to the
+ // object(s) no longer allocated as they would be invalid when allocation is
+ // un-done.
+ void UndoAllocationInNewSpace(Register object);
+
+ // Allocate a heap number in new space with undefined value. The
+ // register scratch2 can be passed as no_reg; the others must be
+ // valid registers. Returns tagged pointer in result register, or
+ // jumps to gc_required if new space is full.
+ void AllocateHeapNumber(Register result,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required);
+
+ // Allocate a sequential string. All the header fields of the string object
+ // are initialized.
+ void AllocateTwoByteString(Register result,
+ Register length,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* gc_required);
+ void AllocateAsciiString(Register result,
+ Register length,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* gc_required);
+ void AllocateAsciiString(Register result,
+ int length,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required);
+
+ // Allocate a raw cons string object. Only the map field of the result is
+ // initialized.
+ void AllocateTwoByteConsString(Register result,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required);
+ void AllocateAsciiConsString(Register result,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required);
+
+ // Allocate a raw sliced string object. Only the map field of the result is
+ // initialized.
+ void AllocateTwoByteSlicedString(Register result,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required);
+ void AllocateAsciiSlicedString(Register result,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required);
+
+ // Copy memory, byte-by-byte, from source to destination. Not optimized for
+ // long or aligned copies.
+ // The contents of index and scratch are destroyed.
+ void CopyBytes(Register source,
+ Register destination,
+ Register length,
+ Register scratch);
+
+ // Initialize fields with filler values. Fields starting at |start_offset|
+ // not including end_offset are overwritten with the value in |filler|. At
+ // the end the loop, |start_offset| takes the value of |end_offset|.
+ void InitializeFieldsWithFiller(Register start_offset,
+ Register end_offset,
+ Register filler);
+
+ // ---------------------------------------------------------------------------
+ // Support functions.
+
+ // Check a boolean-bit of a Smi field.
+ void BooleanBitTest(Register object, int field_offset, int bit_index);
+
+ // Check if result is zero and op is negative.
+ void NegativeZeroTest(Register result, Register op, Label* then_label);
+
+ // Check if result is zero and any of op1 and op2 are negative.
+ // Register scratch is destroyed, and it must be different from op2.
+ void NegativeZeroTest(Register result, Register op1, Register op2,
+ Register scratch, Label* then_label);
+
+ // Try to get function prototype of a function and puts the value in
+ // the result register. Checks that the function really is a
+ // function and jumps to the miss label if the fast checks fail. The
+ // function register will be untouched; the other registers may be
+ // clobbered.
+ void TryGetFunctionPrototype(Register function,
+ Register result,
+ Register scratch,
+ Label* miss,
+ bool miss_on_bound_function = false);
+
+ // Picks out an array index from the hash field.
+ // Register use:
+ // hash - holds the index's hash. Clobbered.
+ // index - holds the overwritten index on exit.
+ void IndexFromHash(Register hash, Register index);
+
+ // ---------------------------------------------------------------------------
+ // Runtime calls
+
+ // Call a code stub. Generate the code if necessary.
+ void CallStub(CodeStub* stub, TypeFeedbackId ast_id = TypeFeedbackId::None());
+
+ // Tail call a code stub (jump). Generate the code if necessary.
+ void TailCallStub(CodeStub* stub);
+
+ // Return from a code stub after popping its arguments.
+ void StubReturn(int argc);
+
+ // Call a runtime routine.
+ void CallRuntime(const Runtime::Function* f, int num_arguments);
+ // Convenience function: Same as above, but takes the fid instead.
+ void CallRuntime(Runtime::FunctionId id) {
+ const Runtime::Function* function = Runtime::FunctionForId(id);
+ CallRuntime(function, function->nargs);
+ }
+ void CallRuntime(Runtime::FunctionId id, int num_arguments) {
+ CallRuntime(Runtime::FunctionForId(id), num_arguments);
+ }
+
+ // Convenience function: call an external reference.
+ void CallExternalReference(ExternalReference ref, int num_arguments);
+
+ // Tail call of a runtime routine (jump).
+ // Like JumpToExternalReference, but also takes care of passing the number
+ // of parameters.
+ void TailCallExternalReference(const ExternalReference& ext,
+ int num_arguments,
+ int result_size);
+
+ // Convenience function: tail call a runtime routine (jump).
+ void TailCallRuntime(Runtime::FunctionId fid,
+ int num_arguments,
+ int result_size);
+
+ // Before calling a C-function from generated code, align arguments on stack.
+ // After aligning the frame, arguments must be stored in esp[0], esp[4],
+ // etc., not pushed. The argument count assumes all arguments are word sized.
+ // Some compilers/platforms require the stack to be aligned when calling
+ // C++ code.
+ // Needs a scratch register to do some arithmetic. This register will be
+ // trashed.
+ void PrepareCallCFunction(int num_arguments, Register scratch);
+
+ // Calls a C function and cleans up the space for arguments allocated
+ // by PrepareCallCFunction. The called function is not allowed to trigger a
+ // garbage collection, since that might move the code and invalidate the
+ // return address (unless this is somehow accounted for by the called
+ // function).
+ void CallCFunction(ExternalReference function, int num_arguments);
+ void CallCFunction(Register function, int num_arguments);
+
+ // Prepares stack to put arguments (aligns and so on). Reserves
+ // space for return value if needed (assumes the return value is a handle).
+ // Arguments must be stored in ApiParameterOperand(0), ApiParameterOperand(1)
+ // etc. Saves context (esi). If space was reserved for return value then
+ // stores the pointer to the reserved slot into esi.
+ void PrepareCallApiFunction(int argc);
+
+ // Calls an API function. Allocates HandleScope, extracts returned value
+ // from handle and propagates exceptions. Clobbers ebx, edi and
+ // caller-save registers. Restores context. On return removes
+ // stack_space * kPointerSize (GCed).
+ void CallApiFunctionAndReturn(Register function_address,
+ ExternalReference thunk_ref,
+ Operand thunk_last_arg,
+ int stack_space,
+ Operand return_value_operand,
+ Operand* context_restore_operand);
+
+ // Jump to a runtime routine.
+ void JumpToExternalReference(const ExternalReference& ext);
+
+ // ---------------------------------------------------------------------------
+ // Utilities
+
+ void Ret();
+
+ // Return and drop arguments from stack, where the number of arguments
+ // may be bigger than 2^16 - 1. Requires a scratch register.
+ void Ret(int bytes_dropped, Register scratch);
+
+ // Emit code to discard a non-negative number of pointer-sized elements
+ // from the stack, clobbering only the esp register.
+ void Drop(int element_count);
+
+ void Call(Label* target) { call(target); }
+ void Push(Register src) { push(src); }
+ void Pop(Register dst) { pop(dst); }
+
+ // Emit call to the code we are currently generating.
+ void CallSelf() {
+ Handle<Code> self(reinterpret_cast<Code**>(CodeObject().location()));
+ call(self, RelocInfo::CODE_TARGET);
+ }
+
+ // Move if the registers are not identical.
+ void Move(Register target, Register source);
+
+ // Move a constant into a destination using the most efficient encoding.
+ void Move(Register dst, const Immediate& x);
+ void Move(const Operand& dst, const Immediate& x);
+
+ // Push a handle value.
+ void Push(Handle<Object> handle) { push(Immediate(handle)); }
+ void Push(Smi* smi) { Push(Handle<Smi>(smi, isolate())); }
+
+ Handle<Object> CodeObject() {
+ ASSERT(!code_object_.is_null());
+ return code_object_;
+ }
+
+ // Insert code to verify that the x87 stack has the specified depth (0-7)
+ void VerifyX87StackDepth(uint32_t depth);
+
+ // Emit code for a truncating division by a constant. The dividend register is
+ // unchanged, the result is in edx, and eax gets clobbered.
+ void TruncatingDiv(Register dividend, int32_t divisor);
+
+ // ---------------------------------------------------------------------------
+ // StatsCounter support
+
+ void SetCounter(StatsCounter* counter, int value);
+ void IncrementCounter(StatsCounter* counter, int value);
+ void DecrementCounter(StatsCounter* counter, int value);
+ void IncrementCounter(Condition cc, StatsCounter* counter, int value);
+ void DecrementCounter(Condition cc, StatsCounter* counter, int value);
+
+
+ // ---------------------------------------------------------------------------
+ // Debugging
+
+ // Calls Abort(msg) if the condition cc is not satisfied.
+ // Use --debug_code to enable.
+ void Assert(Condition cc, BailoutReason reason);
+
+ void AssertFastElements(Register elements);
+
+ // Like Assert(), but always enabled.
+ void Check(Condition cc, BailoutReason reason);
+
+ // Print a message to stdout and abort execution.
+ void Abort(BailoutReason reason);
+
+ // Check that the stack is aligned.
+ void CheckStackAlignment();
+
+ // Verify restrictions about code generated in stubs.
+ void set_generating_stub(bool value) { generating_stub_ = value; }
+ bool generating_stub() { return generating_stub_; }
+ void set_has_frame(bool value) { has_frame_ = value; }
+ bool has_frame() { return has_frame_; }
+ inline bool AllowThisStubCall(CodeStub* stub);
+
+ // ---------------------------------------------------------------------------
+ // String utilities.
+
+ // Generate code to do a lookup in the number string cache. If the number in
+ // the register object is found in the cache the generated code falls through
+ // with the result in the result register. The object and the result register
+ // can be the same. If the number is not found in the cache the code jumps to
+ // the label not_found with only the content of register object unchanged.
+ void LookupNumberStringCache(Register object,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ Label* not_found);
+
+ // Check whether the instance type represents a flat ASCII string. Jump to the
+ // label if not. If the instance type can be scratched specify same register
+ // for both instance type and scratch.
+ void JumpIfInstanceTypeIsNotSequentialAscii(Register instance_type,
+ Register scratch,
+ Label* on_not_flat_ascii_string);
+
+ // Checks if both objects are sequential ASCII strings, and jumps to label
+ // if either is not.
+ void JumpIfNotBothSequentialAsciiStrings(Register object1,
+ Register object2,
+ Register scratch1,
+ Register scratch2,
+ Label* on_not_flat_ascii_strings);
+
+ // Checks if the given register or operand is a unique name
+ void JumpIfNotUniqueName(Register reg, Label* not_unique_name,
+ Label::Distance distance = Label::kFar) {
+ JumpIfNotUniqueName(Operand(reg), not_unique_name, distance);
+ }
+
+ void JumpIfNotUniqueName(Operand operand, Label* not_unique_name,
+ Label::Distance distance = Label::kFar);
+
+ void EmitSeqStringSetCharCheck(Register string,
+ Register index,
+ Register value,
+ uint32_t encoding_mask);
+
+ static int SafepointRegisterStackIndex(Register reg) {
+ return SafepointRegisterStackIndex(reg.code());
+ }
+
+ // Activation support.
+ void EnterFrame(StackFrame::Type type);
+ void LeaveFrame(StackFrame::Type type);
+
+ // Expects object in eax and returns map with validated enum cache
+ // in eax. Assumes that any other register can be used as a scratch.
+ void CheckEnumCache(Label* call_runtime);
+
+ // AllocationMemento support. Arrays may have an associated
+ // AllocationMemento object that can be checked for in order to pretransition
+ // to another type.
+ // On entry, receiver_reg should point to the array object.
+ // scratch_reg gets clobbered.
+ // If allocation info is present, conditional code is set to equal.
+ void TestJSArrayForAllocationMemento(Register receiver_reg,
+ Register scratch_reg,
+ Label* no_memento_found);
+
+ void JumpIfJSArrayHasAllocationMemento(Register receiver_reg,
+ Register scratch_reg,
+ Label* memento_found) {
+ Label no_memento_found;
+ TestJSArrayForAllocationMemento(receiver_reg, scratch_reg,
+ &no_memento_found);
+ j(equal, memento_found);
+ bind(&no_memento_found);
+ }
+
+ // Jumps to found label if a prototype map has dictionary elements.
+ void JumpIfDictionaryInPrototypeChain(Register object, Register scratch0,
+ Register scratch1, Label* found);
+
+ private:
+ bool generating_stub_;
+ bool has_frame_;
+ // This handle will be patched with the code object on installation.
+ Handle<Object> code_object_;
+
+ // Helper functions for generating invokes.
+ void InvokePrologue(const ParameterCount& expected,
+ const ParameterCount& actual,
+ Handle<Code> code_constant,
+ const Operand& code_operand,
+ Label* done,
+ bool* definitely_mismatches,
+ InvokeFlag flag,
+ Label::Distance done_distance,
+ const CallWrapper& call_wrapper = NullCallWrapper());
+
+ void EnterExitFramePrologue();
+ void EnterExitFrameEpilogue(int argc);
+
+ void LeaveExitFrameEpilogue(bool restore_context);
+
+ // Allocation support helpers.
+ void LoadAllocationTopHelper(Register result,
+ Register scratch,
+ AllocationFlags flags);
+
+ void UpdateAllocationTopHelper(Register result_end,
+ Register scratch,
+ AllocationFlags flags);
+
+ // Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
+ void InNewSpace(Register object,
+ Register scratch,
+ Condition cc,
+ Label* condition_met,
+ Label::Distance condition_met_distance = Label::kFar);
+
+ // Helper for finding the mark bits for an address. Afterwards, the
+ // bitmap register points at the word with the mark bits and the mask
+ // the position of the first bit. Uses ecx as scratch and leaves addr_reg
+ // unchanged.
+ inline void GetMarkBits(Register addr_reg,
+ Register bitmap_reg,
+ Register mask_reg);
+
+ // Helper for throwing exceptions. Compute a handler address and jump to
+ // it. See the implementation for register usage.
+ void JumpToHandlerEntry();
+
+ // Compute memory operands for safepoint stack slots.
+ Operand SafepointRegisterSlot(Register reg);
+ static int SafepointRegisterStackIndex(int reg_code);
+
+ // Needs access to SafepointRegisterStackIndex for compiled frame
+ // traversal.
+ friend class StandardFrame;
+};
+
+
+// The code patcher is used to patch (typically) small parts of code e.g. for
+// debugging and other types of instrumentation. When using the code patcher
+// the exact number of bytes specified must be emitted. Is not legal to emit
+// relocation information. If any of these constraints are violated it causes
+// an assertion.
+class CodePatcher {
+ public:
+ CodePatcher(byte* address, int size);
+ virtual ~CodePatcher();
+
+ // Macro assembler to emit code.
+ MacroAssembler* masm() { return &masm_; }
+
+ private:
+ byte* address_; // The address of the code being patched.
+ int size_; // Number of bytes of the expected patch size.
+ MacroAssembler masm_; // Macro assembler used to generate the code.
+};
+
+
+// -----------------------------------------------------------------------------
+// Static helper functions.
+
+// Generate an Operand for loading a field from an object.
+inline Operand FieldOperand(Register object, int offset) {
+ return Operand(object, offset - kHeapObjectTag);
+}
+
+
+// Generate an Operand for loading an indexed field from an object.
+inline Operand FieldOperand(Register object,
+ Register index,
+ ScaleFactor scale,
+ int offset) {
+ return Operand(object, index, scale, offset - kHeapObjectTag);
+}
+
+
+inline Operand FixedArrayElementOperand(Register array,
+ Register index_as_smi,
+ int additional_offset = 0) {
+ int offset = FixedArray::kHeaderSize + additional_offset * kPointerSize;
+ return FieldOperand(array, index_as_smi, times_half_pointer_size, offset);
+}
+
+
+inline Operand ContextOperand(Register context, int index) {
+ return Operand(context, Context::SlotOffset(index));
+}
+
+
+inline Operand GlobalObjectOperand() {
+ return ContextOperand(esi, Context::GLOBAL_OBJECT_INDEX);
+}
+
+
+// Generates an Operand for saving parameters after PrepareCallApiFunction.
+Operand ApiParameterOperand(int index);
+
+
+#ifdef GENERATED_CODE_COVERAGE
+extern void LogGeneratedCodeCoverage(const char* file_line);
+#define CODE_COVERAGE_STRINGIFY(x) #x
+#define CODE_COVERAGE_TOSTRING(x) CODE_COVERAGE_STRINGIFY(x)
+#define __FILE_LINE__ __FILE__ ":" CODE_COVERAGE_TOSTRING(__LINE__)
+#define ACCESS_MASM(masm) { \
+ byte* ia32_coverage_function = \
+ reinterpret_cast<byte*>(FUNCTION_ADDR(LogGeneratedCodeCoverage)); \
+ masm->pushfd(); \
+ masm->pushad(); \
+ masm->push(Immediate(reinterpret_cast<int>(&__FILE_LINE__))); \
+ masm->call(ia32_coverage_function, RelocInfo::RUNTIME_ENTRY); \
+ masm->pop(eax); \
+ masm->popad(); \
+ masm->popfd(); \
+ } \
+ masm->
+#else
+#define ACCESS_MASM(masm) masm->
+#endif
+
+
+} } // namespace v8::internal
+
+#endif // V8_X87_MACRO_ASSEMBLER_X87_H_
--- /dev/null
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "v8.h"
+
+#if V8_TARGET_ARCH_X87
+
+#include "cpu-profiler.h"
+#include "unicode.h"
+#include "log.h"
+#include "regexp-stack.h"
+#include "macro-assembler.h"
+#include "regexp-macro-assembler.h"
+#include "x87/regexp-macro-assembler-x87.h"
+
+namespace v8 {
+namespace internal {
+
+#ifndef V8_INTERPRETED_REGEXP
+/*
+ * This assembler uses the following register assignment convention
+ * - edx : Current character. Must be loaded using LoadCurrentCharacter
+ * before using any of the dispatch methods. Temporarily stores the
+ * index of capture start after a matching pass for a global regexp.
+ * - edi : Current position in input, as negative offset from end of string.
+ * Please notice that this is the byte offset, not the character offset!
+ * - esi : end of input (points to byte after last character in input).
+ * - ebp : Frame pointer. Used to access arguments, local variables and
+ * RegExp registers.
+ * - esp : Points to tip of C stack.
+ * - ecx : Points to tip of backtrack stack
+ *
+ * The registers eax and ebx are free to use for computations.
+ *
+ * Each call to a public method should retain this convention.
+ * The stack will have the following structure:
+ * - Isolate* isolate (address of the current isolate)
+ * - direct_call (if 1, direct call from JavaScript code, if 0
+ * call through the runtime system)
+ * - stack_area_base (high end of the memory area to use as
+ * backtracking stack)
+ * - capture array size (may fit multiple sets of matches)
+ * - int* capture_array (int[num_saved_registers_], for output).
+ * - end of input (address of end of string)
+ * - start of input (address of first character in string)
+ * - start index (character index of start)
+ * - String* input_string (location of a handle containing the string)
+ * --- frame alignment (if applicable) ---
+ * - return address
+ * ebp-> - old ebp
+ * - backup of caller esi
+ * - backup of caller edi
+ * - backup of caller ebx
+ * - success counter (only for global regexps to count matches).
+ * - Offset of location before start of input (effectively character
+ * position -1). Used to initialize capture registers to a non-position.
+ * - register 0 ebp[-4] (only positions must be stored in the first
+ * - register 1 ebp[-8] num_saved_registers_ registers)
+ * - ...
+ *
+ * The first num_saved_registers_ registers are initialized to point to
+ * "character -1" in the string (i.e., char_size() bytes before the first
+ * character of the string). The remaining registers starts out as garbage.
+ *
+ * The data up to the return address must be placed there by the calling
+ * code, by calling the code entry as cast to a function with the signature:
+ * int (*match)(String* input_string,
+ * int start_index,
+ * Address start,
+ * Address end,
+ * int* capture_output_array,
+ * bool at_start,
+ * byte* stack_area_base,
+ * bool direct_call)
+ */
+
+#define __ ACCESS_MASM(masm_)
+
+RegExpMacroAssemblerX87::RegExpMacroAssemblerX87(
+ Mode mode,
+ int registers_to_save,
+ Zone* zone)
+ : NativeRegExpMacroAssembler(zone),
+ masm_(new MacroAssembler(zone->isolate(), NULL, kRegExpCodeSize)),
+ mode_(mode),
+ num_registers_(registers_to_save),
+ num_saved_registers_(registers_to_save),
+ entry_label_(),
+ start_label_(),
+ success_label_(),
+ backtrack_label_(),
+ exit_label_() {
+ ASSERT_EQ(0, registers_to_save % 2);
+ __ jmp(&entry_label_); // We'll write the entry code later.
+ __ bind(&start_label_); // And then continue from here.
+}
+
+
+RegExpMacroAssemblerX87::~RegExpMacroAssemblerX87() {
+ delete masm_;
+ // Unuse labels in case we throw away the assembler without calling GetCode.
+ entry_label_.Unuse();
+ start_label_.Unuse();
+ success_label_.Unuse();
+ backtrack_label_.Unuse();
+ exit_label_.Unuse();
+ check_preempt_label_.Unuse();
+ stack_overflow_label_.Unuse();
+}
+
+
+int RegExpMacroAssemblerX87::stack_limit_slack() {
+ return RegExpStack::kStackLimitSlack;
+}
+
+
+void RegExpMacroAssemblerX87::AdvanceCurrentPosition(int by) {
+ if (by != 0) {
+ __ add(edi, Immediate(by * char_size()));
+ }
+}
+
+
+void RegExpMacroAssemblerX87::AdvanceRegister(int reg, int by) {
+ ASSERT(reg >= 0);
+ ASSERT(reg < num_registers_);
+ if (by != 0) {
+ __ add(register_location(reg), Immediate(by));
+ }
+}
+
+
+void RegExpMacroAssemblerX87::Backtrack() {
+ CheckPreemption();
+ // Pop Code* offset from backtrack stack, add Code* and jump to location.
+ Pop(ebx);
+ __ add(ebx, Immediate(masm_->CodeObject()));
+ __ jmp(ebx);
+}
+
+
+void RegExpMacroAssemblerX87::Bind(Label* label) {
+ __ bind(label);
+}
+
+
+void RegExpMacroAssemblerX87::CheckCharacter(uint32_t c, Label* on_equal) {
+ __ cmp(current_character(), c);
+ BranchOrBacktrack(equal, on_equal);
+}
+
+
+void RegExpMacroAssemblerX87::CheckCharacterGT(uc16 limit, Label* on_greater) {
+ __ cmp(current_character(), limit);
+ BranchOrBacktrack(greater, on_greater);
+}
+
+
+void RegExpMacroAssemblerX87::CheckAtStart(Label* on_at_start) {
+ Label not_at_start;
+ // Did we start the match at the start of the string at all?
+ __ cmp(Operand(ebp, kStartIndex), Immediate(0));
+ BranchOrBacktrack(not_equal, ¬_at_start);
+ // If we did, are we still at the start of the input?
+ __ lea(eax, Operand(esi, edi, times_1, 0));
+ __ cmp(eax, Operand(ebp, kInputStart));
+ BranchOrBacktrack(equal, on_at_start);
+ __ bind(¬_at_start);
+}
+
+
+void RegExpMacroAssemblerX87::CheckNotAtStart(Label* on_not_at_start) {
+ // Did we start the match at the start of the string at all?
+ __ cmp(Operand(ebp, kStartIndex), Immediate(0));
+ BranchOrBacktrack(not_equal, on_not_at_start);
+ // If we did, are we still at the start of the input?
+ __ lea(eax, Operand(esi, edi, times_1, 0));
+ __ cmp(eax, Operand(ebp, kInputStart));
+ BranchOrBacktrack(not_equal, on_not_at_start);
+}
+
+
+void RegExpMacroAssemblerX87::CheckCharacterLT(uc16 limit, Label* on_less) {
+ __ cmp(current_character(), limit);
+ BranchOrBacktrack(less, on_less);
+}
+
+
+void RegExpMacroAssemblerX87::CheckGreedyLoop(Label* on_equal) {
+ Label fallthrough;
+ __ cmp(edi, Operand(backtrack_stackpointer(), 0));
+ __ j(not_equal, &fallthrough);
+ __ add(backtrack_stackpointer(), Immediate(kPointerSize)); // Pop.
+ BranchOrBacktrack(no_condition, on_equal);
+ __ bind(&fallthrough);
+}
+
+
+void RegExpMacroAssemblerX87::CheckNotBackReferenceIgnoreCase(
+ int start_reg,
+ Label* on_no_match) {
+ Label fallthrough;
+ __ mov(edx, register_location(start_reg)); // Index of start of capture
+ __ mov(ebx, register_location(start_reg + 1)); // Index of end of capture
+ __ sub(ebx, edx); // Length of capture.
+
+ // The length of a capture should not be negative. This can only happen
+ // if the end of the capture is unrecorded, or at a point earlier than
+ // the start of the capture.
+ BranchOrBacktrack(less, on_no_match);
+
+ // If length is zero, either the capture is empty or it is completely
+ // uncaptured. In either case succeed immediately.
+ __ j(equal, &fallthrough);
+
+ // Check that there are sufficient characters left in the input.
+ __ mov(eax, edi);
+ __ add(eax, ebx);
+ BranchOrBacktrack(greater, on_no_match);
+
+ if (mode_ == ASCII) {
+ Label success;
+ Label fail;
+ Label loop_increment;
+ // Save register contents to make the registers available below.
+ __ push(edi);
+ __ push(backtrack_stackpointer());
+ // After this, the eax, ecx, and edi registers are available.
+
+ __ add(edx, esi); // Start of capture
+ __ add(edi, esi); // Start of text to match against capture.
+ __ add(ebx, edi); // End of text to match against capture.
+
+ Label loop;
+ __ bind(&loop);
+ __ movzx_b(eax, Operand(edi, 0));
+ __ cmpb_al(Operand(edx, 0));
+ __ j(equal, &loop_increment);
+
+ // Mismatch, try case-insensitive match (converting letters to lower-case).
+ __ or_(eax, 0x20); // Convert match character to lower-case.
+ __ lea(ecx, Operand(eax, -'a'));
+ __ cmp(ecx, static_cast<int32_t>('z' - 'a')); // Is eax a lowercase letter?
+ Label convert_capture;
+ __ j(below_equal, &convert_capture); // In range 'a'-'z'.
+ // Latin-1: Check for values in range [224,254] but not 247.
+ __ sub(ecx, Immediate(224 - 'a'));
+ __ cmp(ecx, Immediate(254 - 224));
+ __ j(above, &fail); // Weren't Latin-1 letters.
+ __ cmp(ecx, Immediate(247 - 224)); // Check for 247.
+ __ j(equal, &fail);
+ __ bind(&convert_capture);
+ // Also convert capture character.
+ __ movzx_b(ecx, Operand(edx, 0));
+ __ or_(ecx, 0x20);
+
+ __ cmp(eax, ecx);
+ __ j(not_equal, &fail);
+
+ __ bind(&loop_increment);
+ // Increment pointers into match and capture strings.
+ __ add(edx, Immediate(1));
+ __ add(edi, Immediate(1));
+ // Compare to end of match, and loop if not done.
+ __ cmp(edi, ebx);
+ __ j(below, &loop);
+ __ jmp(&success);
+
+ __ bind(&fail);
+ // Restore original values before failing.
+ __ pop(backtrack_stackpointer());
+ __ pop(edi);
+ BranchOrBacktrack(no_condition, on_no_match);
+
+ __ bind(&success);
+ // Restore original value before continuing.
+ __ pop(backtrack_stackpointer());
+ // Drop original value of character position.
+ __ add(esp, Immediate(kPointerSize));
+ // Compute new value of character position after the matched part.
+ __ sub(edi, esi);
+ } else {
+ ASSERT(mode_ == UC16);
+ // Save registers before calling C function.
+ __ push(esi);
+ __ push(edi);
+ __ push(backtrack_stackpointer());
+ __ push(ebx);
+
+ static const int argument_count = 4;
+ __ PrepareCallCFunction(argument_count, ecx);
+ // Put arguments into allocated stack area, last argument highest on stack.
+ // Parameters are
+ // Address byte_offset1 - Address captured substring's start.
+ // Address byte_offset2 - Address of current character position.
+ // size_t byte_length - length of capture in bytes(!)
+ // Isolate* isolate
+
+ // Set isolate.
+ __ mov(Operand(esp, 3 * kPointerSize),
+ Immediate(ExternalReference::isolate_address(isolate())));
+ // Set byte_length.
+ __ mov(Operand(esp, 2 * kPointerSize), ebx);
+ // Set byte_offset2.
+ // Found by adding negative string-end offset of current position (edi)
+ // to end of string.
+ __ add(edi, esi);
+ __ mov(Operand(esp, 1 * kPointerSize), edi);
+ // Set byte_offset1.
+ // Start of capture, where edx already holds string-end negative offset.
+ __ add(edx, esi);
+ __ mov(Operand(esp, 0 * kPointerSize), edx);
+
+ {
+ AllowExternalCallThatCantCauseGC scope(masm_);
+ ExternalReference compare =
+ ExternalReference::re_case_insensitive_compare_uc16(isolate());
+ __ CallCFunction(compare, argument_count);
+ }
+ // Pop original values before reacting on result value.
+ __ pop(ebx);
+ __ pop(backtrack_stackpointer());
+ __ pop(edi);
+ __ pop(esi);
+
+ // Check if function returned non-zero for success or zero for failure.
+ __ or_(eax, eax);
+ BranchOrBacktrack(zero, on_no_match);
+ // On success, increment position by length of capture.
+ __ add(edi, ebx);
+ }
+ __ bind(&fallthrough);
+}
+
+
+void RegExpMacroAssemblerX87::CheckNotBackReference(
+ int start_reg,
+ Label* on_no_match) {
+ Label fallthrough;
+ Label success;
+ Label fail;
+
+ // Find length of back-referenced capture.
+ __ mov(edx, register_location(start_reg));
+ __ mov(eax, register_location(start_reg + 1));
+ __ sub(eax, edx); // Length to check.
+ // Fail on partial or illegal capture (start of capture after end of capture).
+ BranchOrBacktrack(less, on_no_match);
+ // Succeed on empty capture (including no capture)
+ __ j(equal, &fallthrough);
+
+ // Check that there are sufficient characters left in the input.
+ __ mov(ebx, edi);
+ __ add(ebx, eax);
+ BranchOrBacktrack(greater, on_no_match);
+
+ // Save register to make it available below.
+ __ push(backtrack_stackpointer());
+
+ // Compute pointers to match string and capture string
+ __ lea(ebx, Operand(esi, edi, times_1, 0)); // Start of match.
+ __ add(edx, esi); // Start of capture.
+ __ lea(ecx, Operand(eax, ebx, times_1, 0)); // End of match
+
+ Label loop;
+ __ bind(&loop);
+ if (mode_ == ASCII) {
+ __ movzx_b(eax, Operand(edx, 0));
+ __ cmpb_al(Operand(ebx, 0));
+ } else {
+ ASSERT(mode_ == UC16);
+ __ movzx_w(eax, Operand(edx, 0));
+ __ cmpw_ax(Operand(ebx, 0));
+ }
+ __ j(not_equal, &fail);
+ // Increment pointers into capture and match string.
+ __ add(edx, Immediate(char_size()));
+ __ add(ebx, Immediate(char_size()));
+ // Check if we have reached end of match area.
+ __ cmp(ebx, ecx);
+ __ j(below, &loop);
+ __ jmp(&success);
+
+ __ bind(&fail);
+ // Restore backtrack stackpointer.
+ __ pop(backtrack_stackpointer());
+ BranchOrBacktrack(no_condition, on_no_match);
+
+ __ bind(&success);
+ // Move current character position to position after match.
+ __ mov(edi, ecx);
+ __ sub(edi, esi);
+ // Restore backtrack stackpointer.
+ __ pop(backtrack_stackpointer());
+
+ __ bind(&fallthrough);
+}
+
+
+void RegExpMacroAssemblerX87::CheckNotCharacter(uint32_t c,
+ Label* on_not_equal) {
+ __ cmp(current_character(), c);
+ BranchOrBacktrack(not_equal, on_not_equal);
+}
+
+
+void RegExpMacroAssemblerX87::CheckCharacterAfterAnd(uint32_t c,
+ uint32_t mask,
+ Label* on_equal) {
+ if (c == 0) {
+ __ test(current_character(), Immediate(mask));
+ } else {
+ __ mov(eax, mask);
+ __ and_(eax, current_character());
+ __ cmp(eax, c);
+ }
+ BranchOrBacktrack(equal, on_equal);
+}
+
+
+void RegExpMacroAssemblerX87::CheckNotCharacterAfterAnd(uint32_t c,
+ uint32_t mask,
+ Label* on_not_equal) {
+ if (c == 0) {
+ __ test(current_character(), Immediate(mask));
+ } else {
+ __ mov(eax, mask);
+ __ and_(eax, current_character());
+ __ cmp(eax, c);
+ }
+ BranchOrBacktrack(not_equal, on_not_equal);
+}
+
+
+void RegExpMacroAssemblerX87::CheckNotCharacterAfterMinusAnd(
+ uc16 c,
+ uc16 minus,
+ uc16 mask,
+ Label* on_not_equal) {
+ ASSERT(minus < String::kMaxUtf16CodeUnit);
+ __ lea(eax, Operand(current_character(), -minus));
+ if (c == 0) {
+ __ test(eax, Immediate(mask));
+ } else {
+ __ and_(eax, mask);
+ __ cmp(eax, c);
+ }
+ BranchOrBacktrack(not_equal, on_not_equal);
+}
+
+
+void RegExpMacroAssemblerX87::CheckCharacterInRange(
+ uc16 from,
+ uc16 to,
+ Label* on_in_range) {
+ __ lea(eax, Operand(current_character(), -from));
+ __ cmp(eax, to - from);
+ BranchOrBacktrack(below_equal, on_in_range);
+}
+
+
+void RegExpMacroAssemblerX87::CheckCharacterNotInRange(
+ uc16 from,
+ uc16 to,
+ Label* on_not_in_range) {
+ __ lea(eax, Operand(current_character(), -from));
+ __ cmp(eax, to - from);
+ BranchOrBacktrack(above, on_not_in_range);
+}
+
+
+void RegExpMacroAssemblerX87::CheckBitInTable(
+ Handle<ByteArray> table,
+ Label* on_bit_set) {
+ __ mov(eax, Immediate(table));
+ Register index = current_character();
+ if (mode_ != ASCII || kTableMask != String::kMaxOneByteCharCode) {
+ __ mov(ebx, kTableSize - 1);
+ __ and_(ebx, current_character());
+ index = ebx;
+ }
+ __ cmpb(FieldOperand(eax, index, times_1, ByteArray::kHeaderSize), 0);
+ BranchOrBacktrack(not_equal, on_bit_set);
+}
+
+
+bool RegExpMacroAssemblerX87::CheckSpecialCharacterClass(uc16 type,
+ Label* on_no_match) {
+ // Range checks (c in min..max) are generally implemented by an unsigned
+ // (c - min) <= (max - min) check
+ switch (type) {
+ case 's':
+ // Match space-characters
+ if (mode_ == ASCII) {
+ // One byte space characters are '\t'..'\r', ' ' and \u00a0.
+ Label success;
+ __ cmp(current_character(), ' ');
+ __ j(equal, &success, Label::kNear);
+ // Check range 0x09..0x0d
+ __ lea(eax, Operand(current_character(), -'\t'));
+ __ cmp(eax, '\r' - '\t');
+ __ j(below_equal, &success, Label::kNear);
+ // \u00a0 (NBSP).
+ __ cmp(eax, 0x00a0 - '\t');
+ BranchOrBacktrack(not_equal, on_no_match);
+ __ bind(&success);
+ return true;
+ }
+ return false;
+ case 'S':
+ // The emitted code for generic character classes is good enough.
+ return false;
+ case 'd':
+ // Match ASCII digits ('0'..'9')
+ __ lea(eax, Operand(current_character(), -'0'));
+ __ cmp(eax, '9' - '0');
+ BranchOrBacktrack(above, on_no_match);
+ return true;
+ case 'D':
+ // Match non ASCII-digits
+ __ lea(eax, Operand(current_character(), -'0'));
+ __ cmp(eax, '9' - '0');
+ BranchOrBacktrack(below_equal, on_no_match);
+ return true;
+ case '.': {
+ // Match non-newlines (not 0x0a('\n'), 0x0d('\r'), 0x2028 and 0x2029)
+ __ mov(eax, current_character());
+ __ xor_(eax, Immediate(0x01));
+ // See if current character is '\n'^1 or '\r'^1, i.e., 0x0b or 0x0c
+ __ sub(eax, Immediate(0x0b));
+ __ cmp(eax, 0x0c - 0x0b);
+ BranchOrBacktrack(below_equal, on_no_match);
+ if (mode_ == UC16) {
+ // Compare original value to 0x2028 and 0x2029, using the already
+ // computed (current_char ^ 0x01 - 0x0b). I.e., check for
+ // 0x201d (0x2028 - 0x0b) or 0x201e.
+ __ sub(eax, Immediate(0x2028 - 0x0b));
+ __ cmp(eax, 0x2029 - 0x2028);
+ BranchOrBacktrack(below_equal, on_no_match);
+ }
+ return true;
+ }
+ case 'w': {
+ if (mode_ != ASCII) {
+ // Table is 128 entries, so all ASCII characters can be tested.
+ __ cmp(current_character(), Immediate('z'));
+ BranchOrBacktrack(above, on_no_match);
+ }
+ ASSERT_EQ(0, word_character_map[0]); // Character '\0' is not a word char.
+ ExternalReference word_map = ExternalReference::re_word_character_map();
+ __ test_b(current_character(),
+ Operand::StaticArray(current_character(), times_1, word_map));
+ BranchOrBacktrack(zero, on_no_match);
+ return true;
+ }
+ case 'W': {
+ Label done;
+ if (mode_ != ASCII) {
+ // Table is 128 entries, so all ASCII characters can be tested.
+ __ cmp(current_character(), Immediate('z'));
+ __ j(above, &done);
+ }
+ ASSERT_EQ(0, word_character_map[0]); // Character '\0' is not a word char.
+ ExternalReference word_map = ExternalReference::re_word_character_map();
+ __ test_b(current_character(),
+ Operand::StaticArray(current_character(), times_1, word_map));
+ BranchOrBacktrack(not_zero, on_no_match);
+ if (mode_ != ASCII) {
+ __ bind(&done);
+ }
+ return true;
+ }
+ // Non-standard classes (with no syntactic shorthand) used internally.
+ case '*':
+ // Match any character.
+ return true;
+ case 'n': {
+ // Match newlines (0x0a('\n'), 0x0d('\r'), 0x2028 or 0x2029).
+ // The opposite of '.'.
+ __ mov(eax, current_character());
+ __ xor_(eax, Immediate(0x01));
+ // See if current character is '\n'^1 or '\r'^1, i.e., 0x0b or 0x0c
+ __ sub(eax, Immediate(0x0b));
+ __ cmp(eax, 0x0c - 0x0b);
+ if (mode_ == ASCII) {
+ BranchOrBacktrack(above, on_no_match);
+ } else {
+ Label done;
+ BranchOrBacktrack(below_equal, &done);
+ ASSERT_EQ(UC16, mode_);
+ // Compare original value to 0x2028 and 0x2029, using the already
+ // computed (current_char ^ 0x01 - 0x0b). I.e., check for
+ // 0x201d (0x2028 - 0x0b) or 0x201e.
+ __ sub(eax, Immediate(0x2028 - 0x0b));
+ __ cmp(eax, 1);
+ BranchOrBacktrack(above, on_no_match);
+ __ bind(&done);
+ }
+ return true;
+ }
+ // No custom implementation (yet): s(UC16), S(UC16).
+ default:
+ return false;
+ }
+}
+
+
+void RegExpMacroAssemblerX87::Fail() {
+ STATIC_ASSERT(FAILURE == 0); // Return value for failure is zero.
+ if (!global()) {
+ __ Move(eax, Immediate(FAILURE));
+ }
+ __ jmp(&exit_label_);
+}
+
+
+Handle<HeapObject> RegExpMacroAssemblerX87::GetCode(Handle<String> source) {
+ Label return_eax;
+ // Finalize code - write the entry point code now we know how many
+ // registers we need.
+
+ // Entry code:
+ __ bind(&entry_label_);
+
+ // Tell the system that we have a stack frame. Because the type is MANUAL, no
+ // code is generated.
+ FrameScope scope(masm_, StackFrame::MANUAL);
+
+ // Actually emit code to start a new stack frame.
+ __ push(ebp);
+ __ mov(ebp, esp);
+ // Save callee-save registers. Order here should correspond to order of
+ // kBackup_ebx etc.
+ __ push(esi);
+ __ push(edi);
+ __ push(ebx); // Callee-save on MacOS.
+ __ push(Immediate(0)); // Number of successful matches in a global regexp.
+ __ push(Immediate(0)); // Make room for "input start - 1" constant.
+
+ // Check if we have space on the stack for registers.
+ Label stack_limit_hit;
+ Label stack_ok;
+
+ ExternalReference stack_limit =
+ ExternalReference::address_of_stack_limit(isolate());
+ __ mov(ecx, esp);
+ __ sub(ecx, Operand::StaticVariable(stack_limit));
+ // Handle it if the stack pointer is already below the stack limit.
+ __ j(below_equal, &stack_limit_hit);
+ // Check if there is room for the variable number of registers above
+ // the stack limit.
+ __ cmp(ecx, num_registers_ * kPointerSize);
+ __ j(above_equal, &stack_ok);
+ // Exit with OutOfMemory exception. There is not enough space on the stack
+ // for our working registers.
+ __ mov(eax, EXCEPTION);
+ __ jmp(&return_eax);
+
+ __ bind(&stack_limit_hit);
+ CallCheckStackGuardState(ebx);
+ __ or_(eax, eax);
+ // If returned value is non-zero, we exit with the returned value as result.
+ __ j(not_zero, &return_eax);
+
+ __ bind(&stack_ok);
+ // Load start index for later use.
+ __ mov(ebx, Operand(ebp, kStartIndex));
+
+ // Allocate space on stack for registers.
+ __ sub(esp, Immediate(num_registers_ * kPointerSize));
+ // Load string length.
+ __ mov(esi, Operand(ebp, kInputEnd));
+ // Load input position.
+ __ mov(edi, Operand(ebp, kInputStart));
+ // Set up edi to be negative offset from string end.
+ __ sub(edi, esi);
+
+ // Set eax to address of char before start of the string.
+ // (effectively string position -1).
+ __ neg(ebx);
+ if (mode_ == UC16) {
+ __ lea(eax, Operand(edi, ebx, times_2, -char_size()));
+ } else {
+ __ lea(eax, Operand(edi, ebx, times_1, -char_size()));
+ }
+ // Store this value in a local variable, for use when clearing
+ // position registers.
+ __ mov(Operand(ebp, kInputStartMinusOne), eax);
+
+#if V8_OS_WIN
+ // Ensure that we write to each stack page, in order. Skipping a page
+ // on Windows can cause segmentation faults. Assuming page size is 4k.
+ const int kPageSize = 4096;
+ const int kRegistersPerPage = kPageSize / kPointerSize;
+ for (int i = num_saved_registers_ + kRegistersPerPage - 1;
+ i < num_registers_;
+ i += kRegistersPerPage) {
+ __ mov(register_location(i), eax); // One write every page.
+ }
+#endif // V8_OS_WIN
+
+ Label load_char_start_regexp, start_regexp;
+ // Load newline if index is at start, previous character otherwise.
+ __ cmp(Operand(ebp, kStartIndex), Immediate(0));
+ __ j(not_equal, &load_char_start_regexp, Label::kNear);
+ __ mov(current_character(), '\n');
+ __ jmp(&start_regexp, Label::kNear);
+
+ // Global regexp restarts matching here.
+ __ bind(&load_char_start_regexp);
+ // Load previous char as initial value of current character register.
+ LoadCurrentCharacterUnchecked(-1, 1);
+ __ bind(&start_regexp);
+
+ // Initialize on-stack registers.
+ if (num_saved_registers_ > 0) { // Always is, if generated from a regexp.
+ // Fill saved registers with initial value = start offset - 1
+ // Fill in stack push order, to avoid accessing across an unwritten
+ // page (a problem on Windows).
+ if (num_saved_registers_ > 8) {
+ __ mov(ecx, kRegisterZero);
+ Label init_loop;
+ __ bind(&init_loop);
+ __ mov(Operand(ebp, ecx, times_1, 0), eax);
+ __ sub(ecx, Immediate(kPointerSize));
+ __ cmp(ecx, kRegisterZero - num_saved_registers_ * kPointerSize);
+ __ j(greater, &init_loop);
+ } else { // Unroll the loop.
+ for (int i = 0; i < num_saved_registers_; i++) {
+ __ mov(register_location(i), eax);
+ }
+ }
+ }
+
+ // Initialize backtrack stack pointer.
+ __ mov(backtrack_stackpointer(), Operand(ebp, kStackHighEnd));
+
+ __ jmp(&start_label_);
+
+ // Exit code:
+ if (success_label_.is_linked()) {
+ // Save captures when successful.
+ __ bind(&success_label_);
+ if (num_saved_registers_ > 0) {
+ // copy captures to output
+ __ mov(ebx, Operand(ebp, kRegisterOutput));
+ __ mov(ecx, Operand(ebp, kInputEnd));
+ __ mov(edx, Operand(ebp, kStartIndex));
+ __ sub(ecx, Operand(ebp, kInputStart));
+ if (mode_ == UC16) {
+ __ lea(ecx, Operand(ecx, edx, times_2, 0));
+ } else {
+ __ add(ecx, edx);
+ }
+ for (int i = 0; i < num_saved_registers_; i++) {
+ __ mov(eax, register_location(i));
+ if (i == 0 && global_with_zero_length_check()) {
+ // Keep capture start in edx for the zero-length check later.
+ __ mov(edx, eax);
+ }
+ // Convert to index from start of string, not end.
+ __ add(eax, ecx);
+ if (mode_ == UC16) {
+ __ sar(eax, 1); // Convert byte index to character index.
+ }
+ __ mov(Operand(ebx, i * kPointerSize), eax);
+ }
+ }
+
+ if (global()) {
+ // Restart matching if the regular expression is flagged as global.
+ // Increment success counter.
+ __ inc(Operand(ebp, kSuccessfulCaptures));
+ // Capture results have been stored, so the number of remaining global
+ // output registers is reduced by the number of stored captures.
+ __ mov(ecx, Operand(ebp, kNumOutputRegisters));
+ __ sub(ecx, Immediate(num_saved_registers_));
+ // Check whether we have enough room for another set of capture results.
+ __ cmp(ecx, Immediate(num_saved_registers_));
+ __ j(less, &exit_label_);
+
+ __ mov(Operand(ebp, kNumOutputRegisters), ecx);
+ // Advance the location for output.
+ __ add(Operand(ebp, kRegisterOutput),
+ Immediate(num_saved_registers_ * kPointerSize));
+
+ // Prepare eax to initialize registers with its value in the next run.
+ __ mov(eax, Operand(ebp, kInputStartMinusOne));
+
+ if (global_with_zero_length_check()) {
+ // Special case for zero-length matches.
+ // edx: capture start index
+ __ cmp(edi, edx);
+ // Not a zero-length match, restart.
+ __ j(not_equal, &load_char_start_regexp);
+ // edi (offset from the end) is zero if we already reached the end.
+ __ test(edi, edi);
+ __ j(zero, &exit_label_, Label::kNear);
+ // Advance current position after a zero-length match.
+ if (mode_ == UC16) {
+ __ add(edi, Immediate(2));
+ } else {
+ __ inc(edi);
+ }
+ }
+
+ __ jmp(&load_char_start_regexp);
+ } else {
+ __ mov(eax, Immediate(SUCCESS));
+ }
+ }
+
+ __ bind(&exit_label_);
+ if (global()) {
+ // Return the number of successful captures.
+ __ mov(eax, Operand(ebp, kSuccessfulCaptures));
+ }
+
+ __ bind(&return_eax);
+ // Skip esp past regexp registers.
+ __ lea(esp, Operand(ebp, kBackup_ebx));
+ // Restore callee-save registers.
+ __ pop(ebx);
+ __ pop(edi);
+ __ pop(esi);
+ // Exit function frame, restore previous one.
+ __ pop(ebp);
+ __ ret(0);
+
+ // Backtrack code (branch target for conditional backtracks).
+ if (backtrack_label_.is_linked()) {
+ __ bind(&backtrack_label_);
+ Backtrack();
+ }
+
+ Label exit_with_exception;
+
+ // Preempt-code
+ if (check_preempt_label_.is_linked()) {
+ SafeCallTarget(&check_preempt_label_);
+
+ __ push(backtrack_stackpointer());
+ __ push(edi);
+
+ CallCheckStackGuardState(ebx);
+ __ or_(eax, eax);
+ // If returning non-zero, we should end execution with the given
+ // result as return value.
+ __ j(not_zero, &return_eax);
+
+ __ pop(edi);
+ __ pop(backtrack_stackpointer());
+ // String might have moved: Reload esi from frame.
+ __ mov(esi, Operand(ebp, kInputEnd));
+ SafeReturn();
+ }
+
+ // Backtrack stack overflow code.
+ if (stack_overflow_label_.is_linked()) {
+ SafeCallTarget(&stack_overflow_label_);
+ // Reached if the backtrack-stack limit has been hit.
+
+ Label grow_failed;
+ // Save registers before calling C function
+ __ push(esi);
+ __ push(edi);
+
+ // Call GrowStack(backtrack_stackpointer())
+ static const int num_arguments = 3;
+ __ PrepareCallCFunction(num_arguments, ebx);
+ __ mov(Operand(esp, 2 * kPointerSize),
+ Immediate(ExternalReference::isolate_address(isolate())));
+ __ lea(eax, Operand(ebp, kStackHighEnd));
+ __ mov(Operand(esp, 1 * kPointerSize), eax);
+ __ mov(Operand(esp, 0 * kPointerSize), backtrack_stackpointer());
+ ExternalReference grow_stack =
+ ExternalReference::re_grow_stack(isolate());
+ __ CallCFunction(grow_stack, num_arguments);
+ // If return NULL, we have failed to grow the stack, and
+ // must exit with a stack-overflow exception.
+ __ or_(eax, eax);
+ __ j(equal, &exit_with_exception);
+ // Otherwise use return value as new stack pointer.
+ __ mov(backtrack_stackpointer(), eax);
+ // Restore saved registers and continue.
+ __ pop(edi);
+ __ pop(esi);
+ SafeReturn();
+ }
+
+ if (exit_with_exception.is_linked()) {
+ // If any of the code above needed to exit with an exception.
+ __ bind(&exit_with_exception);
+ // Exit with Result EXCEPTION(-1) to signal thrown exception.
+ __ mov(eax, EXCEPTION);
+ __ jmp(&return_eax);
+ }
+
+ CodeDesc code_desc;
+ masm_->GetCode(&code_desc);
+ Handle<Code> code =
+ isolate()->factory()->NewCode(code_desc,
+ Code::ComputeFlags(Code::REGEXP),
+ masm_->CodeObject());
+ PROFILE(isolate(), RegExpCodeCreateEvent(*code, *source));
+ return Handle<HeapObject>::cast(code);
+}
+
+
+void RegExpMacroAssemblerX87::GoTo(Label* to) {
+ BranchOrBacktrack(no_condition, to);
+}
+
+
+void RegExpMacroAssemblerX87::IfRegisterGE(int reg,
+ int comparand,
+ Label* if_ge) {
+ __ cmp(register_location(reg), Immediate(comparand));
+ BranchOrBacktrack(greater_equal, if_ge);
+}
+
+
+void RegExpMacroAssemblerX87::IfRegisterLT(int reg,
+ int comparand,
+ Label* if_lt) {
+ __ cmp(register_location(reg), Immediate(comparand));
+ BranchOrBacktrack(less, if_lt);
+}
+
+
+void RegExpMacroAssemblerX87::IfRegisterEqPos(int reg,
+ Label* if_eq) {
+ __ cmp(edi, register_location(reg));
+ BranchOrBacktrack(equal, if_eq);
+}
+
+
+RegExpMacroAssembler::IrregexpImplementation
+ RegExpMacroAssemblerX87::Implementation() {
+ return kX87Implementation;
+}
+
+
+void RegExpMacroAssemblerX87::LoadCurrentCharacter(int cp_offset,
+ Label* on_end_of_input,
+ bool check_bounds,
+ int characters) {
+ ASSERT(cp_offset >= -1); // ^ and \b can look behind one character.
+ ASSERT(cp_offset < (1<<30)); // Be sane! (And ensure negation works)
+ if (check_bounds) {
+ CheckPosition(cp_offset + characters - 1, on_end_of_input);
+ }
+ LoadCurrentCharacterUnchecked(cp_offset, characters);
+}
+
+
+void RegExpMacroAssemblerX87::PopCurrentPosition() {
+ Pop(edi);
+}
+
+
+void RegExpMacroAssemblerX87::PopRegister(int register_index) {
+ Pop(eax);
+ __ mov(register_location(register_index), eax);
+}
+
+
+void RegExpMacroAssemblerX87::PushBacktrack(Label* label) {
+ Push(Immediate::CodeRelativeOffset(label));
+ CheckStackLimit();
+}
+
+
+void RegExpMacroAssemblerX87::PushCurrentPosition() {
+ Push(edi);
+}
+
+
+void RegExpMacroAssemblerX87::PushRegister(int register_index,
+ StackCheckFlag check_stack_limit) {
+ __ mov(eax, register_location(register_index));
+ Push(eax);
+ if (check_stack_limit) CheckStackLimit();
+}
+
+
+void RegExpMacroAssemblerX87::ReadCurrentPositionFromRegister(int reg) {
+ __ mov(edi, register_location(reg));
+}
+
+
+void RegExpMacroAssemblerX87::ReadStackPointerFromRegister(int reg) {
+ __ mov(backtrack_stackpointer(), register_location(reg));
+ __ add(backtrack_stackpointer(), Operand(ebp, kStackHighEnd));
+}
+
+void RegExpMacroAssemblerX87::SetCurrentPositionFromEnd(int by) {
+ Label after_position;
+ __ cmp(edi, -by * char_size());
+ __ j(greater_equal, &after_position, Label::kNear);
+ __ mov(edi, -by * char_size());
+ // On RegExp code entry (where this operation is used), the character before
+ // the current position is expected to be already loaded.
+ // We have advanced the position, so it's safe to read backwards.
+ LoadCurrentCharacterUnchecked(-1, 1);
+ __ bind(&after_position);
+}
+
+
+void RegExpMacroAssemblerX87::SetRegister(int register_index, int to) {
+ ASSERT(register_index >= num_saved_registers_); // Reserved for positions!
+ __ mov(register_location(register_index), Immediate(to));
+}
+
+
+bool RegExpMacroAssemblerX87::Succeed() {
+ __ jmp(&success_label_);
+ return global();
+}
+
+
+void RegExpMacroAssemblerX87::WriteCurrentPositionToRegister(int reg,
+ int cp_offset) {
+ if (cp_offset == 0) {
+ __ mov(register_location(reg), edi);
+ } else {
+ __ lea(eax, Operand(edi, cp_offset * char_size()));
+ __ mov(register_location(reg), eax);
+ }
+}
+
+
+void RegExpMacroAssemblerX87::ClearRegisters(int reg_from, int reg_to) {
+ ASSERT(reg_from <= reg_to);
+ __ mov(eax, Operand(ebp, kInputStartMinusOne));
+ for (int reg = reg_from; reg <= reg_to; reg++) {
+ __ mov(register_location(reg), eax);
+ }
+}
+
+
+void RegExpMacroAssemblerX87::WriteStackPointerToRegister(int reg) {
+ __ mov(eax, backtrack_stackpointer());
+ __ sub(eax, Operand(ebp, kStackHighEnd));
+ __ mov(register_location(reg), eax);
+}
+
+
+// Private methods:
+
+void RegExpMacroAssemblerX87::CallCheckStackGuardState(Register scratch) {
+ static const int num_arguments = 3;
+ __ PrepareCallCFunction(num_arguments, scratch);
+ // RegExp code frame pointer.
+ __ mov(Operand(esp, 2 * kPointerSize), ebp);
+ // Code* of self.
+ __ mov(Operand(esp, 1 * kPointerSize), Immediate(masm_->CodeObject()));
+ // Next address on the stack (will be address of return address).
+ __ lea(eax, Operand(esp, -kPointerSize));
+ __ mov(Operand(esp, 0 * kPointerSize), eax);
+ ExternalReference check_stack_guard =
+ ExternalReference::re_check_stack_guard_state(isolate());
+ __ CallCFunction(check_stack_guard, num_arguments);
+}
+
+
+// Helper function for reading a value out of a stack frame.
+template <typename T>
+static T& frame_entry(Address re_frame, int frame_offset) {
+ return reinterpret_cast<T&>(Memory::int32_at(re_frame + frame_offset));
+}
+
+
+int RegExpMacroAssemblerX87::CheckStackGuardState(Address* return_address,
+ Code* re_code,
+ Address re_frame) {
+ Isolate* isolate = frame_entry<Isolate*>(re_frame, kIsolate);
+ if (isolate->stack_guard()->IsStackOverflow()) {
+ isolate->StackOverflow();
+ return EXCEPTION;
+ }
+
+ // If not real stack overflow the stack guard was used to interrupt
+ // execution for another purpose.
+
+ // If this is a direct call from JavaScript retry the RegExp forcing the call
+ // through the runtime system. Currently the direct call cannot handle a GC.
+ if (frame_entry<int>(re_frame, kDirectCall) == 1) {
+ return RETRY;
+ }
+
+ // Prepare for possible GC.
+ HandleScope handles(isolate);
+ Handle<Code> code_handle(re_code);
+
+ Handle<String> subject(frame_entry<String*>(re_frame, kInputString));
+
+ // Current string.
+ bool is_ascii = subject->IsOneByteRepresentationUnderneath();
+
+ ASSERT(re_code->instruction_start() <= *return_address);
+ ASSERT(*return_address <=
+ re_code->instruction_start() + re_code->instruction_size());
+
+ Object* result = isolate->stack_guard()->HandleInterrupts();
+
+ if (*code_handle != re_code) { // Return address no longer valid
+ int delta = code_handle->address() - re_code->address();
+ // Overwrite the return address on the stack.
+ *return_address += delta;
+ }
+
+ if (result->IsException()) {
+ return EXCEPTION;
+ }
+
+ Handle<String> subject_tmp = subject;
+ int slice_offset = 0;
+
+ // Extract the underlying string and the slice offset.
+ if (StringShape(*subject_tmp).IsCons()) {
+ subject_tmp = Handle<String>(ConsString::cast(*subject_tmp)->first());
+ } else if (StringShape(*subject_tmp).IsSliced()) {
+ SlicedString* slice = SlicedString::cast(*subject_tmp);
+ subject_tmp = Handle<String>(slice->parent());
+ slice_offset = slice->offset();
+ }
+
+ // String might have changed.
+ if (subject_tmp->IsOneByteRepresentation() != is_ascii) {
+ // If we changed between an ASCII and an UC16 string, the specialized
+ // code cannot be used, and we need to restart regexp matching from
+ // scratch (including, potentially, compiling a new version of the code).
+ return RETRY;
+ }
+
+ // Otherwise, the content of the string might have moved. It must still
+ // be a sequential or external string with the same content.
+ // Update the start and end pointers in the stack frame to the current
+ // location (whether it has actually moved or not).
+ ASSERT(StringShape(*subject_tmp).IsSequential() ||
+ StringShape(*subject_tmp).IsExternal());
+
+ // The original start address of the characters to match.
+ const byte* start_address = frame_entry<const byte*>(re_frame, kInputStart);
+
+ // Find the current start address of the same character at the current string
+ // position.
+ int start_index = frame_entry<int>(re_frame, kStartIndex);
+ const byte* new_address = StringCharacterPosition(*subject_tmp,
+ start_index + slice_offset);
+
+ if (start_address != new_address) {
+ // If there is a difference, update the object pointer and start and end
+ // addresses in the RegExp stack frame to match the new value.
+ const byte* end_address = frame_entry<const byte* >(re_frame, kInputEnd);
+ int byte_length = static_cast<int>(end_address - start_address);
+ frame_entry<const String*>(re_frame, kInputString) = *subject;
+ frame_entry<const byte*>(re_frame, kInputStart) = new_address;
+ frame_entry<const byte*>(re_frame, kInputEnd) = new_address + byte_length;
+ } else if (frame_entry<const String*>(re_frame, kInputString) != *subject) {
+ // Subject string might have been a ConsString that underwent
+ // short-circuiting during GC. That will not change start_address but
+ // will change pointer inside the subject handle.
+ frame_entry<const String*>(re_frame, kInputString) = *subject;
+ }
+
+ return 0;
+}
+
+
+Operand RegExpMacroAssemblerX87::register_location(int register_index) {
+ ASSERT(register_index < (1<<30));
+ if (num_registers_ <= register_index) {
+ num_registers_ = register_index + 1;
+ }
+ return Operand(ebp, kRegisterZero - register_index * kPointerSize);
+}
+
+
+void RegExpMacroAssemblerX87::CheckPosition(int cp_offset,
+ Label* on_outside_input) {
+ __ cmp(edi, -cp_offset * char_size());
+ BranchOrBacktrack(greater_equal, on_outside_input);
+}
+
+
+void RegExpMacroAssemblerX87::BranchOrBacktrack(Condition condition,
+ Label* to) {
+ if (condition < 0) { // No condition
+ if (to == NULL) {
+ Backtrack();
+ return;
+ }
+ __ jmp(to);
+ return;
+ }
+ if (to == NULL) {
+ __ j(condition, &backtrack_label_);
+ return;
+ }
+ __ j(condition, to);
+}
+
+
+void RegExpMacroAssemblerX87::SafeCall(Label* to) {
+ Label return_to;
+ __ push(Immediate::CodeRelativeOffset(&return_to));
+ __ jmp(to);
+ __ bind(&return_to);
+}
+
+
+void RegExpMacroAssemblerX87::SafeReturn() {
+ __ pop(ebx);
+ __ add(ebx, Immediate(masm_->CodeObject()));
+ __ jmp(ebx);
+}
+
+
+void RegExpMacroAssemblerX87::SafeCallTarget(Label* name) {
+ __ bind(name);
+}
+
+
+void RegExpMacroAssemblerX87::Push(Register source) {
+ ASSERT(!source.is(backtrack_stackpointer()));
+ // Notice: This updates flags, unlike normal Push.
+ __ sub(backtrack_stackpointer(), Immediate(kPointerSize));
+ __ mov(Operand(backtrack_stackpointer(), 0), source);
+}
+
+
+void RegExpMacroAssemblerX87::Push(Immediate value) {
+ // Notice: This updates flags, unlike normal Push.
+ __ sub(backtrack_stackpointer(), Immediate(kPointerSize));
+ __ mov(Operand(backtrack_stackpointer(), 0), value);
+}
+
+
+void RegExpMacroAssemblerX87::Pop(Register target) {
+ ASSERT(!target.is(backtrack_stackpointer()));
+ __ mov(target, Operand(backtrack_stackpointer(), 0));
+ // Notice: This updates flags, unlike normal Pop.
+ __ add(backtrack_stackpointer(), Immediate(kPointerSize));
+}
+
+
+void RegExpMacroAssemblerX87::CheckPreemption() {
+ // Check for preemption.
+ Label no_preempt;
+ ExternalReference stack_limit =
+ ExternalReference::address_of_stack_limit(isolate());
+ __ cmp(esp, Operand::StaticVariable(stack_limit));
+ __ j(above, &no_preempt);
+
+ SafeCall(&check_preempt_label_);
+
+ __ bind(&no_preempt);
+}
+
+
+void RegExpMacroAssemblerX87::CheckStackLimit() {
+ Label no_stack_overflow;
+ ExternalReference stack_limit =
+ ExternalReference::address_of_regexp_stack_limit(isolate());
+ __ cmp(backtrack_stackpointer(), Operand::StaticVariable(stack_limit));
+ __ j(above, &no_stack_overflow);
+
+ SafeCall(&stack_overflow_label_);
+
+ __ bind(&no_stack_overflow);
+}
+
+
+void RegExpMacroAssemblerX87::LoadCurrentCharacterUnchecked(int cp_offset,
+ int characters) {
+ if (mode_ == ASCII) {
+ if (characters == 4) {
+ __ mov(current_character(), Operand(esi, edi, times_1, cp_offset));
+ } else if (characters == 2) {
+ __ movzx_w(current_character(), Operand(esi, edi, times_1, cp_offset));
+ } else {
+ ASSERT(characters == 1);
+ __ movzx_b(current_character(), Operand(esi, edi, times_1, cp_offset));
+ }
+ } else {
+ ASSERT(mode_ == UC16);
+ if (characters == 2) {
+ __ mov(current_character(),
+ Operand(esi, edi, times_1, cp_offset * sizeof(uc16)));
+ } else {
+ ASSERT(characters == 1);
+ __ movzx_w(current_character(),
+ Operand(esi, edi, times_1, cp_offset * sizeof(uc16)));
+ }
+ }
+}
+
+
+#undef __
+
+#endif // V8_INTERPRETED_REGEXP
+
+}} // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_X87
--- /dev/null
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_X87_REGEXP_MACRO_ASSEMBLER_X87_H_
+#define V8_X87_REGEXP_MACRO_ASSEMBLER_X87_H_
+
+#include "x87/assembler-x87.h"
+#include "x87/assembler-x87-inl.h"
+#include "macro-assembler.h"
+
+namespace v8 {
+namespace internal {
+
+#ifndef V8_INTERPRETED_REGEXP
+class RegExpMacroAssemblerX87: public NativeRegExpMacroAssembler {
+ public:
+ RegExpMacroAssemblerX87(Mode mode, int registers_to_save, Zone* zone);
+ virtual ~RegExpMacroAssemblerX87();
+ virtual int stack_limit_slack();
+ virtual void AdvanceCurrentPosition(int by);
+ virtual void AdvanceRegister(int reg, int by);
+ virtual void Backtrack();
+ virtual void Bind(Label* label);
+ virtual void CheckAtStart(Label* on_at_start);
+ virtual void CheckCharacter(uint32_t c, Label* on_equal);
+ virtual void CheckCharacterAfterAnd(uint32_t c,
+ uint32_t mask,
+ Label* on_equal);
+ virtual void CheckCharacterGT(uc16 limit, Label* on_greater);
+ virtual void CheckCharacterLT(uc16 limit, Label* on_less);
+ // A "greedy loop" is a loop that is both greedy and with a simple
+ // body. It has a particularly simple implementation.
+ virtual void CheckGreedyLoop(Label* on_tos_equals_current_position);
+ virtual void CheckNotAtStart(Label* on_not_at_start);
+ virtual void CheckNotBackReference(int start_reg, Label* on_no_match);
+ virtual void CheckNotBackReferenceIgnoreCase(int start_reg,
+ Label* on_no_match);
+ virtual void CheckNotCharacter(uint32_t c, Label* on_not_equal);
+ virtual void CheckNotCharacterAfterAnd(uint32_t c,
+ uint32_t mask,
+ Label* on_not_equal);
+ virtual void CheckNotCharacterAfterMinusAnd(uc16 c,
+ uc16 minus,
+ uc16 mask,
+ Label* on_not_equal);
+ virtual void CheckCharacterInRange(uc16 from,
+ uc16 to,
+ Label* on_in_range);
+ virtual void CheckCharacterNotInRange(uc16 from,
+ uc16 to,
+ Label* on_not_in_range);
+ virtual void CheckBitInTable(Handle<ByteArray> table, Label* on_bit_set);
+
+ // Checks whether the given offset from the current position is before
+ // the end of the string.
+ virtual void CheckPosition(int cp_offset, Label* on_outside_input);
+ virtual bool CheckSpecialCharacterClass(uc16 type, Label* on_no_match);
+ virtual void Fail();
+ virtual Handle<HeapObject> GetCode(Handle<String> source);
+ virtual void GoTo(Label* label);
+ virtual void IfRegisterGE(int reg, int comparand, Label* if_ge);
+ virtual void IfRegisterLT(int reg, int comparand, Label* if_lt);
+ virtual void IfRegisterEqPos(int reg, Label* if_eq);
+ virtual IrregexpImplementation Implementation();
+ virtual void LoadCurrentCharacter(int cp_offset,
+ Label* on_end_of_input,
+ bool check_bounds = true,
+ int characters = 1);
+ virtual void PopCurrentPosition();
+ virtual void PopRegister(int register_index);
+ virtual void PushBacktrack(Label* label);
+ virtual void PushCurrentPosition();
+ virtual void PushRegister(int register_index,
+ StackCheckFlag check_stack_limit);
+ virtual void ReadCurrentPositionFromRegister(int reg);
+ virtual void ReadStackPointerFromRegister(int reg);
+ virtual void SetCurrentPositionFromEnd(int by);
+ virtual void SetRegister(int register_index, int to);
+ virtual bool Succeed();
+ virtual void WriteCurrentPositionToRegister(int reg, int cp_offset);
+ virtual void ClearRegisters(int reg_from, int reg_to);
+ virtual void WriteStackPointerToRegister(int reg);
+
+ // Called from RegExp if the stack-guard is triggered.
+ // If the code object is relocated, the return address is fixed before
+ // returning.
+ static int CheckStackGuardState(Address* return_address,
+ Code* re_code,
+ Address re_frame);
+
+ private:
+ // Offsets from ebp of function parameters and stored registers.
+ static const int kFramePointer = 0;
+ // Above the frame pointer - function parameters and return address.
+ static const int kReturn_eip = kFramePointer + kPointerSize;
+ static const int kFrameAlign = kReturn_eip + kPointerSize;
+ // Parameters.
+ static const int kInputString = kFrameAlign;
+ static const int kStartIndex = kInputString + kPointerSize;
+ static const int kInputStart = kStartIndex + kPointerSize;
+ static const int kInputEnd = kInputStart + kPointerSize;
+ static const int kRegisterOutput = kInputEnd + kPointerSize;
+ // For the case of global regular expression, we have room to store at least
+ // one set of capture results. For the case of non-global regexp, we ignore
+ // this value.
+ static const int kNumOutputRegisters = kRegisterOutput + kPointerSize;
+ static const int kStackHighEnd = kNumOutputRegisters + kPointerSize;
+ static const int kDirectCall = kStackHighEnd + kPointerSize;
+ static const int kIsolate = kDirectCall + kPointerSize;
+ // Below the frame pointer - local stack variables.
+ // When adding local variables remember to push space for them in
+ // the frame in GetCode.
+ static const int kBackup_esi = kFramePointer - kPointerSize;
+ static const int kBackup_edi = kBackup_esi - kPointerSize;
+ static const int kBackup_ebx = kBackup_edi - kPointerSize;
+ static const int kSuccessfulCaptures = kBackup_ebx - kPointerSize;
+ static const int kInputStartMinusOne = kSuccessfulCaptures - kPointerSize;
+ // First register address. Following registers are below it on the stack.
+ static const int kRegisterZero = kInputStartMinusOne - kPointerSize;
+
+ // Initial size of code buffer.
+ static const size_t kRegExpCodeSize = 1024;
+
+ // Load a number of characters at the given offset from the
+ // current position, into the current-character register.
+ void LoadCurrentCharacterUnchecked(int cp_offset, int character_count);
+
+ // Check whether preemption has been requested.
+ void CheckPreemption();
+
+ // Check whether we are exceeding the stack limit on the backtrack stack.
+ void CheckStackLimit();
+
+ // Generate a call to CheckStackGuardState.
+ void CallCheckStackGuardState(Register scratch);
+
+ // The ebp-relative location of a regexp register.
+ Operand register_location(int register_index);
+
+ // The register containing the current character after LoadCurrentCharacter.
+ inline Register current_character() { return edx; }
+
+ // The register containing the backtrack stack top. Provides a meaningful
+ // name to the register.
+ inline Register backtrack_stackpointer() { return ecx; }
+
+ // Byte size of chars in the string to match (decided by the Mode argument)
+ inline int char_size() { return static_cast<int>(mode_); }
+
+ // Equivalent to a conditional branch to the label, unless the label
+ // is NULL, in which case it is a conditional Backtrack.
+ void BranchOrBacktrack(Condition condition, Label* to);
+
+ // Call and return internally in the generated code in a way that
+ // is GC-safe (i.e., doesn't leave absolute code addresses on the stack)
+ inline void SafeCall(Label* to);
+ inline void SafeReturn();
+ inline void SafeCallTarget(Label* name);
+
+ // Pushes the value of a register on the backtrack stack. Decrements the
+ // stack pointer (ecx) by a word size and stores the register's value there.
+ inline void Push(Register source);
+
+ // Pushes a value on the backtrack stack. Decrements the stack pointer (ecx)
+ // by a word size and stores the value there.
+ inline void Push(Immediate value);
+
+ // Pops a value from the backtrack stack. Reads the word at the stack pointer
+ // (ecx) and increments it by a word size.
+ inline void Pop(Register target);
+
+ Isolate* isolate() const { return masm_->isolate(); }
+
+ MacroAssembler* masm_;
+
+ // Which mode to generate code for (ASCII or UC16).
+ Mode mode_;
+
+ // One greater than maximal register index actually used.
+ int num_registers_;
+
+ // Number of registers to output at the end (the saved registers
+ // are always 0..num_saved_registers_-1)
+ int num_saved_registers_;
+
+ // Labels used internally.
+ Label entry_label_;
+ Label start_label_;
+ Label success_label_;
+ Label backtrack_label_;
+ Label exit_label_;
+ Label check_preempt_label_;
+ Label stack_overflow_label_;
+};
+#endif // V8_INTERPRETED_REGEXP
+
+}} // namespace v8::internal
+
+#endif // V8_X87_REGEXP_MACRO_ASSEMBLER_X87_H_
--- /dev/null
+// Copyright 2008 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+
+// Since there is no simulator for the ia32 architecture this file is empty.
--- /dev/null
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_X87_SIMULATOR_X87_H_
+#define V8_X87_SIMULATOR_X87_H_
+
+#include "allocation.h"
+
+namespace v8 {
+namespace internal {
+
+// Since there is no simulator for the ia32 architecture the only thing we can
+// do is to call the entry directly.
+#define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \
+ (entry(p0, p1, p2, p3, p4))
+
+
+typedef int (*regexp_matcher)(String*, int, const byte*,
+ const byte*, int*, int, Address, int, Isolate*);
+
+// Call the generated regexp code directly. The code at the entry address should
+// expect eight int/pointer sized arguments and return an int.
+#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7, p8) \
+ (FUNCTION_CAST<regexp_matcher>(entry)(p0, p1, p2, p3, p4, p5, p6, p7, p8))
+
+
+// The stack limit beyond which we will throw stack overflow errors in
+// generated code. Because generated code on ia32 uses the C stack, we
+// just use the C stack limit.
+class SimulatorStack : public v8::internal::AllStatic {
+ public:
+ static inline uintptr_t JsLimitFromCLimit(Isolate* isolate,
+ uintptr_t c_limit) {
+ USE(isolate);
+ return c_limit;
+ }
+
+ static inline uintptr_t RegisterCTryCatch(uintptr_t try_catch_address) {
+ return try_catch_address;
+ }
+
+ static inline void UnregisterCTryCatch() { }
+};
+
+} } // namespace v8::internal
+
+#endif // V8_X87_SIMULATOR_X87_H_
--- /dev/null
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "v8.h"
+
+#if V8_TARGET_ARCH_X87
+
+#include "ic-inl.h"
+#include "codegen.h"
+#include "stub-cache.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm)
+
+
+static void ProbeTable(Isolate* isolate,
+ MacroAssembler* masm,
+ Code::Flags flags,
+ StubCache::Table table,
+ Register name,
+ Register receiver,
+ // Number of the cache entry pointer-size scaled.
+ Register offset,
+ Register extra) {
+ ExternalReference key_offset(isolate->stub_cache()->key_reference(table));
+ ExternalReference value_offset(isolate->stub_cache()->value_reference(table));
+ ExternalReference map_offset(isolate->stub_cache()->map_reference(table));
+
+ Label miss;
+
+ // Multiply by 3 because there are 3 fields per entry (name, code, map).
+ __ lea(offset, Operand(offset, offset, times_2, 0));
+
+ if (extra.is_valid()) {
+ // Get the code entry from the cache.
+ __ mov(extra, Operand::StaticArray(offset, times_1, value_offset));
+
+ // Check that the key in the entry matches the name.
+ __ cmp(name, Operand::StaticArray(offset, times_1, key_offset));
+ __ j(not_equal, &miss);
+
+ // Check the map matches.
+ __ mov(offset, Operand::StaticArray(offset, times_1, map_offset));
+ __ cmp(offset, FieldOperand(receiver, HeapObject::kMapOffset));
+ __ j(not_equal, &miss);
+
+ // Check that the flags match what we're looking for.
+ __ mov(offset, FieldOperand(extra, Code::kFlagsOffset));
+ __ and_(offset, ~Code::kFlagsNotUsedInLookup);
+ __ cmp(offset, flags);
+ __ j(not_equal, &miss);
+
+#ifdef DEBUG
+ if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
+ __ jmp(&miss);
+ } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) {
+ __ jmp(&miss);
+ }
+#endif
+
+ // Jump to the first instruction in the code stub.
+ __ add(extra, Immediate(Code::kHeaderSize - kHeapObjectTag));
+ __ jmp(extra);
+
+ __ bind(&miss);
+ } else {
+ // Save the offset on the stack.
+ __ push(offset);
+
+ // Check that the key in the entry matches the name.
+ __ cmp(name, Operand::StaticArray(offset, times_1, key_offset));
+ __ j(not_equal, &miss);
+
+ // Check the map matches.
+ __ mov(offset, Operand::StaticArray(offset, times_1, map_offset));
+ __ cmp(offset, FieldOperand(receiver, HeapObject::kMapOffset));
+ __ j(not_equal, &miss);
+
+ // Restore offset register.
+ __ mov(offset, Operand(esp, 0));
+
+ // Get the code entry from the cache.
+ __ mov(offset, Operand::StaticArray(offset, times_1, value_offset));
+
+ // Check that the flags match what we're looking for.
+ __ mov(offset, FieldOperand(offset, Code::kFlagsOffset));
+ __ and_(offset, ~Code::kFlagsNotUsedInLookup);
+ __ cmp(offset, flags);
+ __ j(not_equal, &miss);
+
+#ifdef DEBUG
+ if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
+ __ jmp(&miss);
+ } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) {
+ __ jmp(&miss);
+ }
+#endif
+
+ // Restore offset and re-load code entry from cache.
+ __ pop(offset);
+ __ mov(offset, Operand::StaticArray(offset, times_1, value_offset));
+
+ // Jump to the first instruction in the code stub.
+ __ add(offset, Immediate(Code::kHeaderSize - kHeapObjectTag));
+ __ jmp(offset);
+
+ // Pop at miss.
+ __ bind(&miss);
+ __ pop(offset);
+ }
+}
+
+
+void StubCompiler::GenerateDictionaryNegativeLookup(MacroAssembler* masm,
+ Label* miss_label,
+ Register receiver,
+ Handle<Name> name,
+ Register scratch0,
+ Register scratch1) {
+ ASSERT(name->IsUniqueName());
+ ASSERT(!receiver.is(scratch0));
+ Counters* counters = masm->isolate()->counters();
+ __ IncrementCounter(counters->negative_lookups(), 1);
+ __ IncrementCounter(counters->negative_lookups_miss(), 1);
+
+ __ mov(scratch0, FieldOperand(receiver, HeapObject::kMapOffset));
+
+ const int kInterceptorOrAccessCheckNeededMask =
+ (1 << Map::kHasNamedInterceptor) | (1 << Map::kIsAccessCheckNeeded);
+
+ // Bail out if the receiver has a named interceptor or requires access checks.
+ __ test_b(FieldOperand(scratch0, Map::kBitFieldOffset),
+ kInterceptorOrAccessCheckNeededMask);
+ __ j(not_zero, miss_label);
+
+ // Check that receiver is a JSObject.
+ __ CmpInstanceType(scratch0, FIRST_SPEC_OBJECT_TYPE);
+ __ j(below, miss_label);
+
+ // Load properties array.
+ Register properties = scratch0;
+ __ mov(properties, FieldOperand(receiver, JSObject::kPropertiesOffset));
+
+ // Check that the properties array is a dictionary.
+ __ cmp(FieldOperand(properties, HeapObject::kMapOffset),
+ Immediate(masm->isolate()->factory()->hash_table_map()));
+ __ j(not_equal, miss_label);
+
+ Label done;
+ NameDictionaryLookupStub::GenerateNegativeLookup(masm,
+ miss_label,
+ &done,
+ properties,
+ name,
+ scratch1);
+ __ bind(&done);
+ __ DecrementCounter(counters->negative_lookups_miss(), 1);
+}
+
+
+void StubCache::GenerateProbe(MacroAssembler* masm,
+ Code::Flags flags,
+ Register receiver,
+ Register name,
+ Register scratch,
+ Register extra,
+ Register extra2,
+ Register extra3) {
+ Label miss;
+
+ // Assert that code is valid. The multiplying code relies on the entry size
+ // being 12.
+ ASSERT(sizeof(Entry) == 12);
+
+ // Assert the flags do not name a specific type.
+ ASSERT(Code::ExtractTypeFromFlags(flags) == 0);
+
+ // Assert that there are no register conflicts.
+ ASSERT(!scratch.is(receiver));
+ ASSERT(!scratch.is(name));
+ ASSERT(!extra.is(receiver));
+ ASSERT(!extra.is(name));
+ ASSERT(!extra.is(scratch));
+
+ // Assert scratch and extra registers are valid, and extra2/3 are unused.
+ ASSERT(!scratch.is(no_reg));
+ ASSERT(extra2.is(no_reg));
+ ASSERT(extra3.is(no_reg));
+
+ Register offset = scratch;
+ scratch = no_reg;
+
+ Counters* counters = masm->isolate()->counters();
+ __ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1);
+
+ // Check that the receiver isn't a smi.
+ __ JumpIfSmi(receiver, &miss);
+
+ // Get the map of the receiver and compute the hash.
+ __ mov(offset, FieldOperand(name, Name::kHashFieldOffset));
+ __ add(offset, FieldOperand(receiver, HeapObject::kMapOffset));
+ __ xor_(offset, flags);
+ // We mask out the last two bits because they are not part of the hash and
+ // they are always 01 for maps. Also in the two 'and' instructions below.
+ __ and_(offset, (kPrimaryTableSize - 1) << kHeapObjectTagSize);
+ // ProbeTable expects the offset to be pointer scaled, which it is, because
+ // the heap object tag size is 2 and the pointer size log 2 is also 2.
+ ASSERT(kHeapObjectTagSize == kPointerSizeLog2);
+
+ // Probe the primary table.
+ ProbeTable(isolate(), masm, flags, kPrimary, name, receiver, offset, extra);
+
+ // Primary miss: Compute hash for secondary probe.
+ __ mov(offset, FieldOperand(name, Name::kHashFieldOffset));
+ __ add(offset, FieldOperand(receiver, HeapObject::kMapOffset));
+ __ xor_(offset, flags);
+ __ and_(offset, (kPrimaryTableSize - 1) << kHeapObjectTagSize);
+ __ sub(offset, name);
+ __ add(offset, Immediate(flags));
+ __ and_(offset, (kSecondaryTableSize - 1) << kHeapObjectTagSize);
+
+ // Probe the secondary table.
+ ProbeTable(
+ isolate(), masm, flags, kSecondary, name, receiver, offset, extra);
+
+ // Cache miss: Fall-through and let caller handle the miss by
+ // entering the runtime system.
+ __ bind(&miss);
+ __ IncrementCounter(counters->megamorphic_stub_cache_misses(), 1);
+}
+
+
+void StubCompiler::GenerateLoadGlobalFunctionPrototype(MacroAssembler* masm,
+ int index,
+ Register prototype) {
+ __ LoadGlobalFunction(index, prototype);
+ __ LoadGlobalFunctionInitialMap(prototype, prototype);
+ // Load the prototype from the initial map.
+ __ mov(prototype, FieldOperand(prototype, Map::kPrototypeOffset));
+}
+
+
+void StubCompiler::GenerateDirectLoadGlobalFunctionPrototype(
+ MacroAssembler* masm,
+ int index,
+ Register prototype,
+ Label* miss) {
+ // Get the global function with the given index.
+ Handle<JSFunction> function(
+ JSFunction::cast(masm->isolate()->native_context()->get(index)));
+ // Check we're still in the same context.
+ Register scratch = prototype;
+ const int offset = Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX);
+ __ mov(scratch, Operand(esi, offset));
+ __ mov(scratch, FieldOperand(scratch, GlobalObject::kNativeContextOffset));
+ __ cmp(Operand(scratch, Context::SlotOffset(index)), function);
+ __ j(not_equal, miss);
+
+ // Load its initial map. The global functions all have initial maps.
+ __ Move(prototype, Immediate(Handle<Map>(function->initial_map())));
+ // Load the prototype from the initial map.
+ __ mov(prototype, FieldOperand(prototype, Map::kPrototypeOffset));
+}
+
+
+void StubCompiler::GenerateLoadArrayLength(MacroAssembler* masm,
+ Register receiver,
+ Register scratch,
+ Label* miss_label) {
+ // Check that the receiver isn't a smi.
+ __ JumpIfSmi(receiver, miss_label);
+
+ // Check that the object is a JS array.
+ __ CmpObjectType(receiver, JS_ARRAY_TYPE, scratch);
+ __ j(not_equal, miss_label);
+
+ // Load length directly from the JS array.
+ __ mov(eax, FieldOperand(receiver, JSArray::kLengthOffset));
+ __ ret(0);
+}
+
+
+void StubCompiler::GenerateLoadFunctionPrototype(MacroAssembler* masm,
+ Register receiver,
+ Register scratch1,
+ Register scratch2,
+ Label* miss_label) {
+ __ TryGetFunctionPrototype(receiver, scratch1, scratch2, miss_label);
+ __ mov(eax, scratch1);
+ __ ret(0);
+}
+
+
+void StubCompiler::GenerateFastPropertyLoad(MacroAssembler* masm,
+ Register dst,
+ Register src,
+ bool inobject,
+ int index,
+ Representation representation) {
+ ASSERT(!representation.IsDouble());
+ int offset = index * kPointerSize;
+ if (!inobject) {
+ // Calculate the offset into the properties array.
+ offset = offset + FixedArray::kHeaderSize;
+ __ mov(dst, FieldOperand(src, JSObject::kPropertiesOffset));
+ src = dst;
+ }
+ __ mov(dst, FieldOperand(src, offset));
+}
+
+
+static void PushInterceptorArguments(MacroAssembler* masm,
+ Register receiver,
+ Register holder,
+ Register name,
+ Handle<JSObject> holder_obj) {
+ STATIC_ASSERT(StubCache::kInterceptorArgsNameIndex == 0);
+ STATIC_ASSERT(StubCache::kInterceptorArgsInfoIndex == 1);
+ STATIC_ASSERT(StubCache::kInterceptorArgsThisIndex == 2);
+ STATIC_ASSERT(StubCache::kInterceptorArgsHolderIndex == 3);
+ STATIC_ASSERT(StubCache::kInterceptorArgsLength == 4);
+ __ push(name);
+ Handle<InterceptorInfo> interceptor(holder_obj->GetNamedInterceptor());
+ ASSERT(!masm->isolate()->heap()->InNewSpace(*interceptor));
+ Register scratch = name;
+ __ mov(scratch, Immediate(interceptor));
+ __ push(scratch);
+ __ push(receiver);
+ __ push(holder);
+}
+
+
+static void CompileCallLoadPropertyWithInterceptor(
+ MacroAssembler* masm,
+ Register receiver,
+ Register holder,
+ Register name,
+ Handle<JSObject> holder_obj,
+ IC::UtilityId id) {
+ PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
+ __ CallExternalReference(
+ ExternalReference(IC_Utility(id), masm->isolate()),
+ StubCache::kInterceptorArgsLength);
+}
+
+
+// Generate call to api function.
+// This function uses push() to generate smaller, faster code than
+// the version above. It is an optimization that should will be removed
+// when api call ICs are generated in hydrogen.
+void StubCompiler::GenerateFastApiCall(MacroAssembler* masm,
+ const CallOptimization& optimization,
+ Handle<Map> receiver_map,
+ Register receiver,
+ Register scratch_in,
+ bool is_store,
+ int argc,
+ Register* values) {
+ // Copy return value.
+ __ pop(scratch_in);
+ // receiver
+ __ push(receiver);
+ // Write the arguments to stack frame.
+ for (int i = 0; i < argc; i++) {
+ Register arg = values[argc-1-i];
+ ASSERT(!receiver.is(arg));
+ ASSERT(!scratch_in.is(arg));
+ __ push(arg);
+ }
+ __ push(scratch_in);
+ // Stack now matches JSFunction abi.
+ ASSERT(optimization.is_simple_api_call());
+
+ // Abi for CallApiFunctionStub.
+ Register callee = eax;
+ Register call_data = ebx;
+ Register holder = ecx;
+ Register api_function_address = edx;
+ Register scratch = edi; // scratch_in is no longer valid.
+
+ // Put holder in place.
+ CallOptimization::HolderLookup holder_lookup;
+ Handle<JSObject> api_holder = optimization.LookupHolderOfExpectedType(
+ receiver_map,
+ &holder_lookup);
+ switch (holder_lookup) {
+ case CallOptimization::kHolderIsReceiver:
+ __ Move(holder, receiver);
+ break;
+ case CallOptimization::kHolderFound:
+ __ LoadHeapObject(holder, api_holder);
+ break;
+ case CallOptimization::kHolderNotFound:
+ UNREACHABLE();
+ break;
+ }
+
+ Isolate* isolate = masm->isolate();
+ Handle<JSFunction> function = optimization.constant_function();
+ Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
+ Handle<Object> call_data_obj(api_call_info->data(), isolate);
+
+ // Put callee in place.
+ __ LoadHeapObject(callee, function);
+
+ bool call_data_undefined = false;
+ // Put call_data in place.
+ if (isolate->heap()->InNewSpace(*call_data_obj)) {
+ __ mov(scratch, api_call_info);
+ __ mov(call_data, FieldOperand(scratch, CallHandlerInfo::kDataOffset));
+ } else if (call_data_obj->IsUndefined()) {
+ call_data_undefined = true;
+ __ mov(call_data, Immediate(isolate->factory()->undefined_value()));
+ } else {
+ __ mov(call_data, call_data_obj);
+ }
+
+ // Put api_function_address in place.
+ Address function_address = v8::ToCData<Address>(api_call_info->callback());
+ __ mov(api_function_address, Immediate(function_address));
+
+ // Jump to stub.
+ CallApiFunctionStub stub(isolate, is_store, call_data_undefined, argc);
+ __ TailCallStub(&stub);
+}
+
+
+void StoreStubCompiler::GenerateRestoreName(MacroAssembler* masm,
+ Label* label,
+ Handle<Name> name) {
+ if (!label->is_unused()) {
+ __ bind(label);
+ __ mov(this->name(), Immediate(name));
+ }
+}
+
+
+// Generate code to check that a global property cell is empty. Create
+// the property cell at compilation time if no cell exists for the
+// property.
+void StubCompiler::GenerateCheckPropertyCell(MacroAssembler* masm,
+ Handle<JSGlobalObject> global,
+ Handle<Name> name,
+ Register scratch,
+ Label* miss) {
+ Handle<PropertyCell> cell =
+ JSGlobalObject::EnsurePropertyCell(global, name);
+ ASSERT(cell->value()->IsTheHole());
+ Handle<Oddball> the_hole = masm->isolate()->factory()->the_hole_value();
+ if (masm->serializer_enabled()) {
+ __ mov(scratch, Immediate(cell));
+ __ cmp(FieldOperand(scratch, PropertyCell::kValueOffset),
+ Immediate(the_hole));
+ } else {
+ __ cmp(Operand::ForCell(cell), Immediate(the_hole));
+ }
+ __ j(not_equal, miss);
+}
+
+
+void StoreStubCompiler::GenerateNegativeHolderLookup(
+ MacroAssembler* masm,
+ Handle<JSObject> holder,
+ Register holder_reg,
+ Handle<Name> name,
+ Label* miss) {
+ if (holder->IsJSGlobalObject()) {
+ GenerateCheckPropertyCell(
+ masm, Handle<JSGlobalObject>::cast(holder), name, scratch1(), miss);
+ } else if (!holder->HasFastProperties() && !holder->IsJSGlobalProxy()) {
+ GenerateDictionaryNegativeLookup(
+ masm, miss, holder_reg, name, scratch1(), scratch2());
+ }
+}
+
+
+// Receiver_reg is preserved on jumps to miss_label, but may be destroyed if
+// store is successful.
+void StoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm,
+ Handle<JSObject> object,
+ LookupResult* lookup,
+ Handle<Map> transition,
+ Handle<Name> name,
+ Register receiver_reg,
+ Register storage_reg,
+ Register value_reg,
+ Register scratch1,
+ Register scratch2,
+ Register unused,
+ Label* miss_label,
+ Label* slow) {
+ int descriptor = transition->LastAdded();
+ DescriptorArray* descriptors = transition->instance_descriptors();
+ PropertyDetails details = descriptors->GetDetails(descriptor);
+ Representation representation = details.representation();
+ ASSERT(!representation.IsNone());
+
+ if (details.type() == CONSTANT) {
+ Handle<Object> constant(descriptors->GetValue(descriptor), masm->isolate());
+ __ CmpObject(value_reg, constant);
+ __ j(not_equal, miss_label);
+ } else if (representation.IsSmi()) {
+ __ JumpIfNotSmi(value_reg, miss_label);
+ } else if (representation.IsHeapObject()) {
+ __ JumpIfSmi(value_reg, miss_label);
+ HeapType* field_type = descriptors->GetFieldType(descriptor);
+ HeapType::Iterator<Map> it = field_type->Classes();
+ if (!it.Done()) {
+ Label do_store;
+ while (true) {
+ __ CompareMap(value_reg, it.Current());
+ it.Advance();
+ if (it.Done()) {
+ __ j(not_equal, miss_label);
+ break;
+ }
+ __ j(equal, &do_store, Label::kNear);
+ }
+ __ bind(&do_store);
+ }
+ } else if (representation.IsDouble()) {
+ Label do_store, heap_number;
+ __ AllocateHeapNumber(storage_reg, scratch1, scratch2, slow);
+
+ __ JumpIfNotSmi(value_reg, &heap_number);
+ __ SmiUntag(value_reg);
+ __ push(value_reg);
+ __ fild_s(Operand(esp, 0));
+ __ pop(value_reg);
+ __ SmiTag(value_reg);
+ __ jmp(&do_store);
+
+ __ bind(&heap_number);
+ __ CheckMap(value_reg, masm->isolate()->factory()->heap_number_map(),
+ miss_label, DONT_DO_SMI_CHECK);
+ __ fld_d(FieldOperand(value_reg, HeapNumber::kValueOffset));
+
+ __ bind(&do_store);
+ __ fstp_d(FieldOperand(storage_reg, HeapNumber::kValueOffset));
+ }
+
+ // Stub never generated for non-global objects that require access
+ // checks.
+ ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
+
+ // Perform map transition for the receiver if necessary.
+ if (details.type() == FIELD &&
+ object->map()->unused_property_fields() == 0) {
+ // The properties must be extended before we can store the value.
+ // We jump to a runtime call that extends the properties array.
+ __ pop(scratch1); // Return address.
+ __ push(receiver_reg);
+ __ push(Immediate(transition));
+ __ push(value_reg);
+ __ push(scratch1);
+ __ TailCallExternalReference(
+ ExternalReference(IC_Utility(IC::kSharedStoreIC_ExtendStorage),
+ masm->isolate()),
+ 3,
+ 1);
+ return;
+ }
+
+ // Update the map of the object.
+ __ mov(scratch1, Immediate(transition));
+ __ mov(FieldOperand(receiver_reg, HeapObject::kMapOffset), scratch1);
+
+ // Update the write barrier for the map field.
+ __ RecordWriteField(receiver_reg,
+ HeapObject::kMapOffset,
+ scratch1,
+ scratch2,
+ OMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+
+ if (details.type() == CONSTANT) {
+ ASSERT(value_reg.is(eax));
+ __ ret(0);
+ return;
+ }
+
+ int index = transition->instance_descriptors()->GetFieldIndex(
+ transition->LastAdded());
+
+ // Adjust for the number of properties stored in the object. Even in the
+ // face of a transition we can use the old map here because the size of the
+ // object and the number of in-object properties is not going to change.
+ index -= object->map()->inobject_properties();
+
+ SmiCheck smi_check = representation.IsTagged()
+ ? INLINE_SMI_CHECK : OMIT_SMI_CHECK;
+ // TODO(verwaest): Share this code as a code stub.
+ if (index < 0) {
+ // Set the property straight into the object.
+ int offset = object->map()->instance_size() + (index * kPointerSize);
+ if (representation.IsDouble()) {
+ __ mov(FieldOperand(receiver_reg, offset), storage_reg);
+ } else {
+ __ mov(FieldOperand(receiver_reg, offset), value_reg);
+ }
+
+ if (!representation.IsSmi()) {
+ // Update the write barrier for the array address.
+ if (!representation.IsDouble()) {
+ __ mov(storage_reg, value_reg);
+ }
+ __ RecordWriteField(receiver_reg,
+ offset,
+ storage_reg,
+ scratch1,
+ EMIT_REMEMBERED_SET,
+ smi_check);
+ }
+ } else {
+ // Write to the properties array.
+ int offset = index * kPointerSize + FixedArray::kHeaderSize;
+ // Get the properties array (optimistically).
+ __ mov(scratch1, FieldOperand(receiver_reg, JSObject::kPropertiesOffset));
+ if (representation.IsDouble()) {
+ __ mov(FieldOperand(scratch1, offset), storage_reg);
+ } else {
+ __ mov(FieldOperand(scratch1, offset), value_reg);
+ }
+
+ if (!representation.IsSmi()) {
+ // Update the write barrier for the array address.
+ if (!representation.IsDouble()) {
+ __ mov(storage_reg, value_reg);
+ }
+ __ RecordWriteField(scratch1,
+ offset,
+ storage_reg,
+ receiver_reg,
+ EMIT_REMEMBERED_SET,
+ smi_check);
+ }
+ }
+
+ // Return the value (register eax).
+ ASSERT(value_reg.is(eax));
+ __ ret(0);
+}
+
+
+// Both name_reg and receiver_reg are preserved on jumps to miss_label,
+// but may be destroyed if store is successful.
+void StoreStubCompiler::GenerateStoreField(MacroAssembler* masm,
+ Handle<JSObject> object,
+ LookupResult* lookup,
+ Register receiver_reg,
+ Register name_reg,
+ Register value_reg,
+ Register scratch1,
+ Register scratch2,
+ Label* miss_label) {
+ // Stub never generated for non-global objects that require access
+ // checks.
+ ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
+
+ int index = lookup->GetFieldIndex().field_index();
+
+ // Adjust for the number of properties stored in the object. Even in the
+ // face of a transition we can use the old map here because the size of the
+ // object and the number of in-object properties is not going to change.
+ index -= object->map()->inobject_properties();
+
+ Representation representation = lookup->representation();
+ ASSERT(!representation.IsNone());
+ if (representation.IsSmi()) {
+ __ JumpIfNotSmi(value_reg, miss_label);
+ } else if (representation.IsHeapObject()) {
+ __ JumpIfSmi(value_reg, miss_label);
+ HeapType* field_type = lookup->GetFieldType();
+ HeapType::Iterator<Map> it = field_type->Classes();
+ if (!it.Done()) {
+ Label do_store;
+ while (true) {
+ __ CompareMap(value_reg, it.Current());
+ it.Advance();
+ if (it.Done()) {
+ __ j(not_equal, miss_label);
+ break;
+ }
+ __ j(equal, &do_store, Label::kNear);
+ }
+ __ bind(&do_store);
+ }
+ } else if (representation.IsDouble()) {
+ // Load the double storage.
+ if (index < 0) {
+ int offset = object->map()->instance_size() + (index * kPointerSize);
+ __ mov(scratch1, FieldOperand(receiver_reg, offset));
+ } else {
+ __ mov(scratch1, FieldOperand(receiver_reg, JSObject::kPropertiesOffset));
+ int offset = index * kPointerSize + FixedArray::kHeaderSize;
+ __ mov(scratch1, FieldOperand(scratch1, offset));
+ }
+
+ // Store the value into the storage.
+ Label do_store, heap_number;
+ __ JumpIfNotSmi(value_reg, &heap_number);
+ __ SmiUntag(value_reg);
+ __ push(value_reg);
+ __ fild_s(Operand(esp, 0));
+ __ pop(value_reg);
+ __ SmiTag(value_reg);
+ __ jmp(&do_store);
+ __ bind(&heap_number);
+ __ CheckMap(value_reg, masm->isolate()->factory()->heap_number_map(),
+ miss_label, DONT_DO_SMI_CHECK);
+ __ fld_d(FieldOperand(value_reg, HeapNumber::kValueOffset));
+ __ bind(&do_store);
+ __ fstp_d(FieldOperand(scratch1, HeapNumber::kValueOffset));
+ // Return the value (register eax).
+ ASSERT(value_reg.is(eax));
+ __ ret(0);
+ return;
+ }
+
+ ASSERT(!representation.IsDouble());
+ // TODO(verwaest): Share this code as a code stub.
+ SmiCheck smi_check = representation.IsTagged()
+ ? INLINE_SMI_CHECK : OMIT_SMI_CHECK;
+ if (index < 0) {
+ // Set the property straight into the object.
+ int offset = object->map()->instance_size() + (index * kPointerSize);
+ __ mov(FieldOperand(receiver_reg, offset), value_reg);
+
+ if (!representation.IsSmi()) {
+ // Update the write barrier for the array address.
+ // Pass the value being stored in the now unused name_reg.
+ __ mov(name_reg, value_reg);
+ __ RecordWriteField(receiver_reg,
+ offset,
+ name_reg,
+ scratch1,
+ EMIT_REMEMBERED_SET,
+ smi_check);
+ }
+ } else {
+ // Write to the properties array.
+ int offset = index * kPointerSize + FixedArray::kHeaderSize;
+ // Get the properties array (optimistically).
+ __ mov(scratch1, FieldOperand(receiver_reg, JSObject::kPropertiesOffset));
+ __ mov(FieldOperand(scratch1, offset), value_reg);
+
+ if (!representation.IsSmi()) {
+ // Update the write barrier for the array address.
+ // Pass the value being stored in the now unused name_reg.
+ __ mov(name_reg, value_reg);
+ __ RecordWriteField(scratch1,
+ offset,
+ name_reg,
+ receiver_reg,
+ EMIT_REMEMBERED_SET,
+ smi_check);
+ }
+ }
+
+ // Return the value (register eax).
+ ASSERT(value_reg.is(eax));
+ __ ret(0);
+}
+
+
+void StubCompiler::GenerateTailCall(MacroAssembler* masm, Handle<Code> code) {
+ __ jmp(code, RelocInfo::CODE_TARGET);
+}
+
+
+#undef __
+#define __ ACCESS_MASM(masm())
+
+
+Register StubCompiler::CheckPrototypes(Handle<HeapType> type,
+ Register object_reg,
+ Handle<JSObject> holder,
+ Register holder_reg,
+ Register scratch1,
+ Register scratch2,
+ Handle<Name> name,
+ Label* miss,
+ PrototypeCheckType check) {
+ Handle<Map> receiver_map(IC::TypeToMap(*type, isolate()));
+
+ // Make sure there's no overlap between holder and object registers.
+ ASSERT(!scratch1.is(object_reg) && !scratch1.is(holder_reg));
+ ASSERT(!scratch2.is(object_reg) && !scratch2.is(holder_reg)
+ && !scratch2.is(scratch1));
+
+ // Keep track of the current object in register reg.
+ Register reg = object_reg;
+ int depth = 0;
+
+ Handle<JSObject> current = Handle<JSObject>::null();
+ if (type->IsConstant()) current =
+ Handle<JSObject>::cast(type->AsConstant()->Value());
+ Handle<JSObject> prototype = Handle<JSObject>::null();
+ Handle<Map> current_map = receiver_map;
+ Handle<Map> holder_map(holder->map());
+ // Traverse the prototype chain and check the maps in the prototype chain for
+ // fast and global objects or do negative lookup for normal objects.
+ while (!current_map.is_identical_to(holder_map)) {
+ ++depth;
+
+ // Only global objects and objects that do not require access
+ // checks are allowed in stubs.
+ ASSERT(current_map->IsJSGlobalProxyMap() ||
+ !current_map->is_access_check_needed());
+
+ prototype = handle(JSObject::cast(current_map->prototype()));
+ if (current_map->is_dictionary_map() &&
+ !current_map->IsJSGlobalObjectMap() &&
+ !current_map->IsJSGlobalProxyMap()) {
+ if (!name->IsUniqueName()) {
+ ASSERT(name->IsString());
+ name = factory()->InternalizeString(Handle<String>::cast(name));
+ }
+ ASSERT(current.is_null() ||
+ current->property_dictionary()->FindEntry(name) ==
+ NameDictionary::kNotFound);
+
+ GenerateDictionaryNegativeLookup(masm(), miss, reg, name,
+ scratch1, scratch2);
+
+ __ mov(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
+ reg = holder_reg; // From now on the object will be in holder_reg.
+ __ mov(reg, FieldOperand(scratch1, Map::kPrototypeOffset));
+ } else {
+ bool in_new_space = heap()->InNewSpace(*prototype);
+ if (depth != 1 || check == CHECK_ALL_MAPS) {
+ __ CheckMap(reg, current_map, miss, DONT_DO_SMI_CHECK);
+ }
+
+ // Check access rights to the global object. This has to happen after
+ // the map check so that we know that the object is actually a global
+ // object.
+ if (current_map->IsJSGlobalProxyMap()) {
+ __ CheckAccessGlobalProxy(reg, scratch1, scratch2, miss);
+ } else if (current_map->IsJSGlobalObjectMap()) {
+ GenerateCheckPropertyCell(
+ masm(), Handle<JSGlobalObject>::cast(current), name,
+ scratch2, miss);
+ }
+
+ if (in_new_space) {
+ // Save the map in scratch1 for later.
+ __ mov(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
+ }
+
+ reg = holder_reg; // From now on the object will be in holder_reg.
+
+ if (in_new_space) {
+ // The prototype is in new space; we cannot store a reference to it
+ // in the code. Load it from the map.
+ __ mov(reg, FieldOperand(scratch1, Map::kPrototypeOffset));
+ } else {
+ // The prototype is in old space; load it directly.
+ __ mov(reg, prototype);
+ }
+ }
+
+ // Go to the next object in the prototype chain.
+ current = prototype;
+ current_map = handle(current->map());
+ }
+
+ // Log the check depth.
+ LOG(isolate(), IntEvent("check-maps-depth", depth + 1));
+
+ if (depth != 0 || check == CHECK_ALL_MAPS) {
+ // Check the holder map.
+ __ CheckMap(reg, current_map, miss, DONT_DO_SMI_CHECK);
+ }
+
+ // Perform security check for access to the global object.
+ ASSERT(current_map->IsJSGlobalProxyMap() ||
+ !current_map->is_access_check_needed());
+ if (current_map->IsJSGlobalProxyMap()) {
+ __ CheckAccessGlobalProxy(reg, scratch1, scratch2, miss);
+ }
+
+ // Return the register containing the holder.
+ return reg;
+}
+
+
+void LoadStubCompiler::HandlerFrontendFooter(Handle<Name> name, Label* miss) {
+ if (!miss->is_unused()) {
+ Label success;
+ __ jmp(&success);
+ __ bind(miss);
+ TailCallBuiltin(masm(), MissBuiltin(kind()));
+ __ bind(&success);
+ }
+}
+
+
+void StoreStubCompiler::HandlerFrontendFooter(Handle<Name> name, Label* miss) {
+ if (!miss->is_unused()) {
+ Label success;
+ __ jmp(&success);
+ GenerateRestoreName(masm(), miss, name);
+ TailCallBuiltin(masm(), MissBuiltin(kind()));
+ __ bind(&success);
+ }
+}
+
+
+Register LoadStubCompiler::CallbackHandlerFrontend(
+ Handle<HeapType> type,
+ Register object_reg,
+ Handle<JSObject> holder,
+ Handle<Name> name,
+ Handle<Object> callback) {
+ Label miss;
+
+ Register reg = HandlerFrontendHeader(type, object_reg, holder, name, &miss);
+
+ if (!holder->HasFastProperties() && !holder->IsJSGlobalObject()) {
+ ASSERT(!reg.is(scratch2()));
+ ASSERT(!reg.is(scratch3()));
+ Register dictionary = scratch1();
+ bool must_preserve_dictionary_reg = reg.is(dictionary);
+
+ // Load the properties dictionary.
+ if (must_preserve_dictionary_reg) {
+ __ push(dictionary);
+ }
+ __ mov(dictionary, FieldOperand(reg, JSObject::kPropertiesOffset));
+
+ // Probe the dictionary.
+ Label probe_done, pop_and_miss;
+ NameDictionaryLookupStub::GeneratePositiveLookup(masm(),
+ &pop_and_miss,
+ &probe_done,
+ dictionary,
+ this->name(),
+ scratch2(),
+ scratch3());
+ __ bind(&pop_and_miss);
+ if (must_preserve_dictionary_reg) {
+ __ pop(dictionary);
+ }
+ __ jmp(&miss);
+ __ bind(&probe_done);
+
+ // If probing finds an entry in the dictionary, scratch2 contains the
+ // index into the dictionary. Check that the value is the callback.
+ Register index = scratch2();
+ const int kElementsStartOffset =
+ NameDictionary::kHeaderSize +
+ NameDictionary::kElementsStartIndex * kPointerSize;
+ const int kValueOffset = kElementsStartOffset + kPointerSize;
+ __ mov(scratch3(),
+ Operand(dictionary, index, times_4, kValueOffset - kHeapObjectTag));
+ if (must_preserve_dictionary_reg) {
+ __ pop(dictionary);
+ }
+ __ cmp(scratch3(), callback);
+ __ j(not_equal, &miss);
+ }
+
+ HandlerFrontendFooter(name, &miss);
+ return reg;
+}
+
+
+void LoadStubCompiler::GenerateLoadField(Register reg,
+ Handle<JSObject> holder,
+ PropertyIndex field,
+ Representation representation) {
+ if (!reg.is(receiver())) __ mov(receiver(), reg);
+ if (kind() == Code::LOAD_IC) {
+ LoadFieldStub stub(isolate(),
+ field.is_inobject(holder),
+ field.translate(holder),
+ representation);
+ GenerateTailCall(masm(), stub.GetCode());
+ } else {
+ KeyedLoadFieldStub stub(isolate(),
+ field.is_inobject(holder),
+ field.translate(holder),
+ representation);
+ GenerateTailCall(masm(), stub.GetCode());
+ }
+}
+
+
+void LoadStubCompiler::GenerateLoadCallback(
+ Register reg,
+ Handle<ExecutableAccessorInfo> callback) {
+ // Insert additional parameters into the stack frame above return address.
+ ASSERT(!scratch3().is(reg));
+ __ pop(scratch3()); // Get return address to place it below.
+
+ STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 0);
+ STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 1);
+ STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 2);
+ STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 3);
+ STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 4);
+ STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 5);
+ __ push(receiver()); // receiver
+ // Push data from ExecutableAccessorInfo.
+ if (isolate()->heap()->InNewSpace(callback->data())) {
+ ASSERT(!scratch2().is(reg));
+ __ mov(scratch2(), Immediate(callback));
+ __ push(FieldOperand(scratch2(), ExecutableAccessorInfo::kDataOffset));
+ } else {
+ __ push(Immediate(Handle<Object>(callback->data(), isolate())));
+ }
+ __ push(Immediate(isolate()->factory()->undefined_value())); // ReturnValue
+ // ReturnValue default value
+ __ push(Immediate(isolate()->factory()->undefined_value()));
+ __ push(Immediate(reinterpret_cast<int>(isolate())));
+ __ push(reg); // holder
+
+ // Save a pointer to where we pushed the arguments. This will be
+ // passed as the const PropertyAccessorInfo& to the C++ callback.
+ __ push(esp);
+
+ __ push(name()); // name
+
+ __ push(scratch3()); // Restore return address.
+
+ // Abi for CallApiGetter
+ Register getter_address = edx;
+ Address function_address = v8::ToCData<Address>(callback->getter());
+ __ mov(getter_address, Immediate(function_address));
+
+ CallApiGetterStub stub(isolate());
+ __ TailCallStub(&stub);
+}
+
+
+void LoadStubCompiler::GenerateLoadConstant(Handle<Object> value) {
+ // Return the constant value.
+ __ LoadObject(eax, value);
+ __ ret(0);
+}
+
+
+void LoadStubCompiler::GenerateLoadInterceptor(
+ Register holder_reg,
+ Handle<Object> object,
+ Handle<JSObject> interceptor_holder,
+ LookupResult* lookup,
+ Handle<Name> name) {
+ ASSERT(interceptor_holder->HasNamedInterceptor());
+ ASSERT(!interceptor_holder->GetNamedInterceptor()->getter()->IsUndefined());
+
+ // So far the most popular follow ups for interceptor loads are FIELD
+ // and CALLBACKS, so inline only them, other cases may be added
+ // later.
+ bool compile_followup_inline = false;
+ if (lookup->IsFound() && lookup->IsCacheable()) {
+ if (lookup->IsField()) {
+ compile_followup_inline = true;
+ } else if (lookup->type() == CALLBACKS &&
+ lookup->GetCallbackObject()->IsExecutableAccessorInfo()) {
+ ExecutableAccessorInfo* callback =
+ ExecutableAccessorInfo::cast(lookup->GetCallbackObject());
+ compile_followup_inline = callback->getter() != NULL &&
+ callback->IsCompatibleReceiver(*object);
+ }
+ }
+
+ if (compile_followup_inline) {
+ // Compile the interceptor call, followed by inline code to load the
+ // property from further up the prototype chain if the call fails.
+ // Check that the maps haven't changed.
+ ASSERT(holder_reg.is(receiver()) || holder_reg.is(scratch1()));
+
+ // Preserve the receiver register explicitly whenever it is different from
+ // the holder and it is needed should the interceptor return without any
+ // result. The CALLBACKS case needs the receiver to be passed into C++ code,
+ // the FIELD case might cause a miss during the prototype check.
+ bool must_perfrom_prototype_check = *interceptor_holder != lookup->holder();
+ bool must_preserve_receiver_reg = !receiver().is(holder_reg) &&
+ (lookup->type() == CALLBACKS || must_perfrom_prototype_check);
+
+ // Save necessary data before invoking an interceptor.
+ // Requires a frame to make GC aware of pushed pointers.
+ {
+ FrameScope frame_scope(masm(), StackFrame::INTERNAL);
+
+ if (must_preserve_receiver_reg) {
+ __ push(receiver());
+ }
+ __ push(holder_reg);
+ __ push(this->name());
+
+ // Invoke an interceptor. Note: map checks from receiver to
+ // interceptor's holder has been compiled before (see a caller
+ // of this method.)
+ CompileCallLoadPropertyWithInterceptor(
+ masm(), receiver(), holder_reg, this->name(), interceptor_holder,
+ IC::kLoadPropertyWithInterceptorOnly);
+
+ // Check if interceptor provided a value for property. If it's
+ // the case, return immediately.
+ Label interceptor_failed;
+ __ cmp(eax, factory()->no_interceptor_result_sentinel());
+ __ j(equal, &interceptor_failed);
+ frame_scope.GenerateLeaveFrame();
+ __ ret(0);
+
+ // Clobber registers when generating debug-code to provoke errors.
+ __ bind(&interceptor_failed);
+ if (FLAG_debug_code) {
+ __ mov(receiver(), Immediate(BitCast<int32_t>(kZapValue)));
+ __ mov(holder_reg, Immediate(BitCast<int32_t>(kZapValue)));
+ __ mov(this->name(), Immediate(BitCast<int32_t>(kZapValue)));
+ }
+
+ __ pop(this->name());
+ __ pop(holder_reg);
+ if (must_preserve_receiver_reg) {
+ __ pop(receiver());
+ }
+
+ // Leave the internal frame.
+ }
+
+ GenerateLoadPostInterceptor(holder_reg, interceptor_holder, name, lookup);
+ } else { // !compile_followup_inline
+ // Call the runtime system to load the interceptor.
+ // Check that the maps haven't changed.
+ __ pop(scratch2()); // save old return address
+ PushInterceptorArguments(masm(), receiver(), holder_reg,
+ this->name(), interceptor_holder);
+ __ push(scratch2()); // restore old return address
+
+ ExternalReference ref =
+ ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorForLoad),
+ isolate());
+ __ TailCallExternalReference(ref, StubCache::kInterceptorArgsLength, 1);
+ }
+}
+
+
+Handle<Code> StoreStubCompiler::CompileStoreCallback(
+ Handle<JSObject> object,
+ Handle<JSObject> holder,
+ Handle<Name> name,
+ Handle<ExecutableAccessorInfo> callback) {
+ Register holder_reg = HandlerFrontend(
+ IC::CurrentTypeOf(object, isolate()), receiver(), holder, name);
+
+ __ pop(scratch1()); // remove the return address
+ __ push(receiver());
+ __ push(holder_reg);
+ __ Push(callback);
+ __ Push(name);
+ __ push(value());
+ __ push(scratch1()); // restore return address
+
+ // Do tail-call to the runtime system.
+ ExternalReference store_callback_property =
+ ExternalReference(IC_Utility(IC::kStoreCallbackProperty), isolate());
+ __ TailCallExternalReference(store_callback_property, 5, 1);
+
+ // Return the generated code.
+ return GetCode(kind(), Code::FAST, name);
+}
+
+
+#undef __
+#define __ ACCESS_MASM(masm)
+
+
+void StoreStubCompiler::GenerateStoreViaSetter(
+ MacroAssembler* masm,
+ Handle<HeapType> type,
+ Register receiver,
+ Handle<JSFunction> setter) {
+ // ----------- S t a t e -------------
+ // -- esp[0] : return address
+ // -----------------------------------
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ // Save value register, so we can restore it later.
+ __ push(value());
+
+ if (!setter.is_null()) {
+ // Call the JavaScript setter with receiver and value on the stack.
+ if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) {
+ // Swap in the global receiver.
+ __ mov(receiver,
+ FieldOperand(receiver, JSGlobalObject::kGlobalReceiverOffset));
+ }
+ __ push(receiver);
+ __ push(value());
+ ParameterCount actual(1);
+ ParameterCount expected(setter);
+ __ InvokeFunction(setter, expected, actual,
+ CALL_FUNCTION, NullCallWrapper());
+ } else {
+ // If we generate a global code snippet for deoptimization only, remember
+ // the place to continue after deoptimization.
+ masm->isolate()->heap()->SetSetterStubDeoptPCOffset(masm->pc_offset());
+ }
+
+ // We have to return the passed value, not the return value of the setter.
+ __ pop(eax);
+
+ // Restore context register.
+ __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+ }
+ __ ret(0);
+}
+
+
+#undef __
+#define __ ACCESS_MASM(masm())
+
+
+Handle<Code> StoreStubCompiler::CompileStoreInterceptor(
+ Handle<JSObject> object,
+ Handle<Name> name) {
+ __ pop(scratch1()); // remove the return address
+ __ push(receiver());
+ __ push(this->name());
+ __ push(value());
+ __ push(scratch1()); // restore return address
+
+ // Do tail-call to the runtime system.
+ ExternalReference store_ic_property =
+ ExternalReference(IC_Utility(IC::kStoreInterceptorProperty), isolate());
+ __ TailCallExternalReference(store_ic_property, 3, 1);
+
+ // Return the generated code.
+ return GetCode(kind(), Code::FAST, name);
+}
+
+
+void StoreStubCompiler::GenerateStoreArrayLength() {
+ // Prepare tail call to StoreIC_ArrayLength.
+ __ pop(scratch1()); // remove the return address
+ __ push(receiver());
+ __ push(value());
+ __ push(scratch1()); // restore return address
+
+ ExternalReference ref =
+ ExternalReference(IC_Utility(IC::kStoreIC_ArrayLength),
+ masm()->isolate());
+ __ TailCallExternalReference(ref, 2, 1);
+}
+
+
+Handle<Code> KeyedStoreStubCompiler::CompileStorePolymorphic(
+ MapHandleList* receiver_maps,
+ CodeHandleList* handler_stubs,
+ MapHandleList* transitioned_maps) {
+ Label miss;
+ __ JumpIfSmi(receiver(), &miss, Label::kNear);
+ __ mov(scratch1(), FieldOperand(receiver(), HeapObject::kMapOffset));
+ for (int i = 0; i < receiver_maps->length(); ++i) {
+ __ cmp(scratch1(), receiver_maps->at(i));
+ if (transitioned_maps->at(i).is_null()) {
+ __ j(equal, handler_stubs->at(i));
+ } else {
+ Label next_map;
+ __ j(not_equal, &next_map, Label::kNear);
+ __ mov(transition_map(), Immediate(transitioned_maps->at(i)));
+ __ jmp(handler_stubs->at(i), RelocInfo::CODE_TARGET);
+ __ bind(&next_map);
+ }
+ }
+ __ bind(&miss);
+ TailCallBuiltin(masm(), MissBuiltin(kind()));
+
+ // Return the generated code.
+ return GetICCode(
+ kind(), Code::NORMAL, factory()->empty_string(), POLYMORPHIC);
+}
+
+
+Handle<Code> LoadStubCompiler::CompileLoadNonexistent(Handle<HeapType> type,
+ Handle<JSObject> last,
+ Handle<Name> name) {
+ NonexistentHandlerFrontend(type, last, name);
+
+ // Return undefined if maps of the full prototype chain are still the
+ // same and no global property with this name contains a value.
+ __ mov(eax, isolate()->factory()->undefined_value());
+ __ ret(0);
+
+ // Return the generated code.
+ return GetCode(kind(), Code::FAST, name);
+}
+
+
+Register* LoadStubCompiler::registers() {
+ // receiver, name, scratch1, scratch2, scratch3, scratch4.
+ static Register registers[] = { edx, ecx, ebx, eax, edi, no_reg };
+ return registers;
+}
+
+
+Register* KeyedLoadStubCompiler::registers() {
+ // receiver, name, scratch1, scratch2, scratch3, scratch4.
+ static Register registers[] = { edx, ecx, ebx, eax, edi, no_reg };
+ return registers;
+}
+
+
+Register StoreStubCompiler::value() {
+ return eax;
+}
+
+
+Register* StoreStubCompiler::registers() {
+ // receiver, name, scratch1, scratch2, scratch3.
+ static Register registers[] = { edx, ecx, ebx, edi, no_reg };
+ return registers;
+}
+
+
+Register* KeyedStoreStubCompiler::registers() {
+ // receiver, name, scratch1, scratch2, scratch3.
+ static Register registers[] = { edx, ecx, ebx, edi, no_reg };
+ return registers;
+}
+
+
+#undef __
+#define __ ACCESS_MASM(masm)
+
+
+void LoadStubCompiler::GenerateLoadViaGetter(MacroAssembler* masm,
+ Handle<HeapType> type,
+ Register receiver,
+ Handle<JSFunction> getter) {
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ if (!getter.is_null()) {
+ // Call the JavaScript getter with the receiver on the stack.
+ if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) {
+ // Swap in the global receiver.
+ __ mov(receiver,
+ FieldOperand(receiver, JSGlobalObject::kGlobalReceiverOffset));
+ }
+ __ push(receiver);
+ ParameterCount actual(0);
+ ParameterCount expected(getter);
+ __ InvokeFunction(getter, expected, actual,
+ CALL_FUNCTION, NullCallWrapper());
+ } else {
+ // If we generate a global code snippet for deoptimization only, remember
+ // the place to continue after deoptimization.
+ masm->isolate()->heap()->SetGetterStubDeoptPCOffset(masm->pc_offset());
+ }
+
+ // Restore context register.
+ __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+ }
+ __ ret(0);
+}
+
+
+#undef __
+#define __ ACCESS_MASM(masm())
+
+
+Handle<Code> LoadStubCompiler::CompileLoadGlobal(
+ Handle<HeapType> type,
+ Handle<GlobalObject> global,
+ Handle<PropertyCell> cell,
+ Handle<Name> name,
+ bool is_dont_delete) {
+ Label miss;
+
+ HandlerFrontendHeader(type, receiver(), global, name, &miss);
+ // Get the value from the cell.
+ if (masm()->serializer_enabled()) {
+ __ mov(eax, Immediate(cell));
+ __ mov(eax, FieldOperand(eax, PropertyCell::kValueOffset));
+ } else {
+ __ mov(eax, Operand::ForCell(cell));
+ }
+
+ // Check for deleted property if property can actually be deleted.
+ if (!is_dont_delete) {
+ __ cmp(eax, factory()->the_hole_value());
+ __ j(equal, &miss);
+ } else if (FLAG_debug_code) {
+ __ cmp(eax, factory()->the_hole_value());
+ __ Check(not_equal, kDontDeleteCellsCannotContainTheHole);
+ }
+
+ Counters* counters = isolate()->counters();
+ __ IncrementCounter(counters->named_load_global_stub(), 1);
+ // The code above already loads the result into the return register.
+ __ ret(0);
+
+ HandlerFrontendFooter(name, &miss);
+
+ // Return the generated code.
+ return GetCode(kind(), Code::NORMAL, name);
+}
+
+
+Handle<Code> BaseLoadStoreStubCompiler::CompilePolymorphicIC(
+ TypeHandleList* types,
+ CodeHandleList* handlers,
+ Handle<Name> name,
+ Code::StubType type,
+ IcCheckType check) {
+ Label miss;
+
+ if (check == PROPERTY &&
+ (kind() == Code::KEYED_LOAD_IC || kind() == Code::KEYED_STORE_IC)) {
+ __ cmp(this->name(), Immediate(name));
+ __ j(not_equal, &miss);
+ }
+
+ Label number_case;
+ Label* smi_target = IncludesNumberType(types) ? &number_case : &miss;
+ __ JumpIfSmi(receiver(), smi_target);
+
+ Register map_reg = scratch1();
+ __ mov(map_reg, FieldOperand(receiver(), HeapObject::kMapOffset));
+ int receiver_count = types->length();
+ int number_of_handled_maps = 0;
+ for (int current = 0; current < receiver_count; ++current) {
+ Handle<HeapType> type = types->at(current);
+ Handle<Map> map = IC::TypeToMap(*type, isolate());
+ if (!map->is_deprecated()) {
+ number_of_handled_maps++;
+ __ cmp(map_reg, map);
+ if (type->Is(HeapType::Number())) {
+ ASSERT(!number_case.is_unused());
+ __ bind(&number_case);
+ }
+ __ j(equal, handlers->at(current));
+ }
+ }
+ ASSERT(number_of_handled_maps != 0);
+
+ __ bind(&miss);
+ TailCallBuiltin(masm(), MissBuiltin(kind()));
+
+ // Return the generated code.
+ InlineCacheState state =
+ number_of_handled_maps > 1 ? POLYMORPHIC : MONOMORPHIC;
+ return GetICCode(kind(), type, name, state);
+}
+
+
+#undef __
+#define __ ACCESS_MASM(masm)
+
+
+void KeyedLoadStubCompiler::GenerateLoadDictionaryElement(
+ MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- ecx : key
+ // -- edx : receiver
+ // -- esp[0] : return address
+ // -----------------------------------
+ Label slow, miss;
+
+ // This stub is meant to be tail-jumped to, the receiver must already
+ // have been verified by the caller to not be a smi.
+ __ JumpIfNotSmi(ecx, &miss);
+ __ mov(ebx, ecx);
+ __ SmiUntag(ebx);
+ __ mov(eax, FieldOperand(edx, JSObject::kElementsOffset));
+
+ // Push receiver on the stack to free up a register for the dictionary
+ // probing.
+ __ push(edx);
+ __ LoadFromNumberDictionary(&slow, eax, ecx, ebx, edx, edi, eax);
+ // Pop receiver before returning.
+ __ pop(edx);
+ __ ret(0);
+
+ __ bind(&slow);
+ __ pop(edx);
+
+ // ----------- S t a t e -------------
+ // -- ecx : key
+ // -- edx : receiver
+ // -- esp[0] : return address
+ // -----------------------------------
+ TailCallBuiltin(masm, Builtins::kKeyedLoadIC_Slow);
+
+ __ bind(&miss);
+ // ----------- S t a t e -------------
+ // -- ecx : key
+ // -- edx : receiver
+ // -- esp[0] : return address
+ // -----------------------------------
+ TailCallBuiltin(masm, Builtins::kKeyedLoadIC_Miss);
+}
+
+
+#undef __
+
+} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_X87
'test-macro-assembler-mips.cc'
],
}],
+ ['v8_target_arch=="x87"', {
+ 'sources': [ ### gcmole(arch:x87) ###
+ 'test-assembler-x87.cc',
+ 'test-code-stubs.cc',
+ 'test-code-stubs-x87.cc',
+ 'test-cpu-x87.cc',
+ 'test-disasm-x87.cc',
+ 'test-macro-assembler-x87.cc',
+ 'test-log-stack-tracer.cc'
+ ],
+ }],
[ 'OS=="linux" or OS=="qnx"', {
'sources': [
'test-platform-linux.cc',
--- /dev/null
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <stdlib.h>
+
+#include "v8.h"
+
+#include "disassembler.h"
+#include "factory.h"
+#include "macro-assembler.h"
+#include "platform.h"
+#include "serialize.h"
+#include "cctest.h"
+
+using namespace v8::internal;
+
+
+typedef int (*F0)();
+typedef int (*F1)(int x);
+typedef int (*F2)(int x, int y);
+
+
+#define __ assm.
+
+TEST(AssemblerIa320) {
+ CcTest::InitializeVM();
+ Isolate* isolate = reinterpret_cast<Isolate*>(CcTest::isolate());
+ HandleScope scope(isolate);
+
+ v8::internal::byte buffer[256];
+ Assembler assm(isolate, buffer, sizeof buffer);
+
+ __ mov(eax, Operand(esp, 4));
+ __ add(eax, Operand(esp, 8));
+ __ ret(0);
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+#ifdef OBJECT_PRINT
+ code->Print();
+#endif
+ F2 f = FUNCTION_CAST<F2>(code->entry());
+ int res = f(3, 4);
+ ::printf("f() = %d\n", res);
+ CHECK_EQ(7, res);
+}
+
+
+TEST(AssemblerIa321) {
+ CcTest::InitializeVM();
+ Isolate* isolate = reinterpret_cast<Isolate*>(CcTest::isolate());
+ HandleScope scope(isolate);
+
+ v8::internal::byte buffer[256];
+ Assembler assm(isolate, buffer, sizeof buffer);
+ Label L, C;
+
+ __ mov(edx, Operand(esp, 4));
+ __ xor_(eax, eax); // clear eax
+ __ jmp(&C);
+
+ __ bind(&L);
+ __ add(eax, edx);
+ __ sub(edx, Immediate(1));
+
+ __ bind(&C);
+ __ test(edx, edx);
+ __ j(not_zero, &L);
+ __ ret(0);
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+#ifdef OBJECT_PRINT
+ code->Print();
+#endif
+ F1 f = FUNCTION_CAST<F1>(code->entry());
+ int res = f(100);
+ ::printf("f() = %d\n", res);
+ CHECK_EQ(5050, res);
+}
+
+
+TEST(AssemblerIa322) {
+ CcTest::InitializeVM();
+ Isolate* isolate = reinterpret_cast<Isolate*>(CcTest::isolate());
+ HandleScope scope(isolate);
+
+ v8::internal::byte buffer[256];
+ Assembler assm(isolate, buffer, sizeof buffer);
+ Label L, C;
+
+ __ mov(edx, Operand(esp, 4));
+ __ mov(eax, 1);
+ __ jmp(&C);
+
+ __ bind(&L);
+ __ imul(eax, edx);
+ __ sub(edx, Immediate(1));
+
+ __ bind(&C);
+ __ test(edx, edx);
+ __ j(not_zero, &L);
+ __ ret(0);
+
+ // some relocated stuff here, not executed
+ __ mov(eax, isolate->factory()->true_value());
+ __ jmp(NULL, RelocInfo::RUNTIME_ENTRY);
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+#ifdef OBJECT_PRINT
+ code->Print();
+#endif
+ F1 f = FUNCTION_CAST<F1>(code->entry());
+ int res = f(10);
+ ::printf("f() = %d\n", res);
+ CHECK_EQ(3628800, res);
+}
+
+
+typedef int (*F3)(float x);
+
+typedef int (*F4)(double x);
+
+static int baz = 42;
+TEST(AssemblerIa325) {
+ CcTest::InitializeVM();
+ Isolate* isolate = reinterpret_cast<Isolate*>(CcTest::isolate());
+ HandleScope scope(isolate);
+
+ v8::internal::byte buffer[256];
+ Assembler assm(isolate, buffer, sizeof buffer);
+
+ __ mov(eax, Operand(reinterpret_cast<intptr_t>(&baz), RelocInfo::NONE32));
+ __ ret(0);
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ F0 f = FUNCTION_CAST<F0>(code->entry());
+ int res = f();
+ CHECK_EQ(42, res);
+}
+
+
+typedef int (*F7)(double x, double y);
+
+TEST(AssemblerIa329) {
+ CcTest::InitializeVM();
+ Isolate* isolate = reinterpret_cast<Isolate*>(CcTest::isolate());
+ HandleScope scope(isolate);
+ v8::internal::byte buffer[256];
+ MacroAssembler assm(isolate, buffer, sizeof buffer);
+ enum { kEqual = 0, kGreater = 1, kLess = 2, kNaN = 3, kUndefined = 4 };
+ Label equal_l, less_l, greater_l, nan_l;
+ __ fld_d(Operand(esp, 3 * kPointerSize));
+ __ fld_d(Operand(esp, 1 * kPointerSize));
+ __ FCmp();
+ __ j(parity_even, &nan_l);
+ __ j(equal, &equal_l);
+ __ j(below, &less_l);
+ __ j(above, &greater_l);
+
+ __ mov(eax, kUndefined);
+ __ ret(0);
+
+ __ bind(&equal_l);
+ __ mov(eax, kEqual);
+ __ ret(0);
+
+ __ bind(&greater_l);
+ __ mov(eax, kGreater);
+ __ ret(0);
+
+ __ bind(&less_l);
+ __ mov(eax, kLess);
+ __ ret(0);
+
+ __ bind(&nan_l);
+ __ mov(eax, kNaN);
+ __ ret(0);
+
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+#ifdef OBJECT_PRINT
+ code->Print();
+#endif
+
+ F7 f = FUNCTION_CAST<F7>(code->entry());
+ CHECK_EQ(kLess, f(1.1, 2.2));
+ CHECK_EQ(kEqual, f(2.2, 2.2));
+ CHECK_EQ(kGreater, f(3.3, 2.2));
+ CHECK_EQ(kNaN, f(OS::nan_value(), 1.1));
+}
+
+
+TEST(AssemblerIa3210) {
+ // Test chaining of label usages within instructions (issue 1644).
+ CcTest::InitializeVM();
+ Isolate* isolate = reinterpret_cast<Isolate*>(CcTest::isolate());
+ HandleScope scope(isolate);
+ Assembler assm(isolate, NULL, 0);
+
+ Label target;
+ __ j(equal, &target);
+ __ j(not_equal, &target);
+ __ bind(&target);
+ __ nop();
+}
+
+
+TEST(AssemblerMultiByteNop) {
+ CcTest::InitializeVM();
+ Isolate* isolate = reinterpret_cast<Isolate*>(CcTest::isolate());
+ HandleScope scope(isolate);
+ v8::internal::byte buffer[1024];
+ Assembler assm(isolate, buffer, sizeof(buffer));
+ __ push(ebx);
+ __ push(ecx);
+ __ push(edx);
+ __ push(edi);
+ __ push(esi);
+ __ mov(eax, 1);
+ __ mov(ebx, 2);
+ __ mov(ecx, 3);
+ __ mov(edx, 4);
+ __ mov(edi, 5);
+ __ mov(esi, 6);
+ for (int i = 0; i < 16; i++) {
+ int before = assm.pc_offset();
+ __ Nop(i);
+ CHECK_EQ(assm.pc_offset() - before, i);
+ }
+
+ Label fail;
+ __ cmp(eax, 1);
+ __ j(not_equal, &fail);
+ __ cmp(ebx, 2);
+ __ j(not_equal, &fail);
+ __ cmp(ecx, 3);
+ __ j(not_equal, &fail);
+ __ cmp(edx, 4);
+ __ j(not_equal, &fail);
+ __ cmp(edi, 5);
+ __ j(not_equal, &fail);
+ __ cmp(esi, 6);
+ __ j(not_equal, &fail);
+ __ mov(eax, 42);
+ __ pop(esi);
+ __ pop(edi);
+ __ pop(edx);
+ __ pop(ecx);
+ __ pop(ebx);
+ __ ret(0);
+ __ bind(&fail);
+ __ mov(eax, 13);
+ __ pop(esi);
+ __ pop(edi);
+ __ pop(edx);
+ __ pop(ecx);
+ __ pop(ebx);
+ __ ret(0);
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ CHECK(code->IsCode());
+
+ F0 f = FUNCTION_CAST<F0>(code->entry());
+ int res = f();
+ CHECK_EQ(42, res);
+}
+
+
+#undef __
--- /dev/null
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <stdlib.h>
+
+#include <limits>
+
+#include "v8.h"
+
+#include "cctest.h"
+#include "code-stubs.h"
+#include "test-code-stubs.h"
+#include "factory.h"
+#include "macro-assembler.h"
+#include "platform.h"
+
+using namespace v8::internal;
+
+#define __ assm.
+
+ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
+ Register source_reg,
+ Register destination_reg) {
+ // Allocate an executable page of memory.
+ size_t actual_size;
+ byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
+ &actual_size,
+ true));
+ CHECK(buffer);
+ HandleScope handles(isolate);
+ MacroAssembler assm(isolate, buffer, static_cast<int>(actual_size));
+ int offset =
+ source_reg.is(esp) ? 0 : (HeapNumber::kValueOffset - kSmiTagSize);
+ DoubleToIStub stub(isolate, source_reg, destination_reg, offset, true);
+ byte* start = stub.GetCode()->instruction_start();
+
+ __ push(ebx);
+ __ push(ecx);
+ __ push(edx);
+ __ push(esi);
+ __ push(edi);
+
+ if (!source_reg.is(esp)) {
+ __ lea(source_reg, MemOperand(esp, 6 * kPointerSize - offset));
+ }
+
+ int param_offset = 7 * kPointerSize;
+ // Save registers make sure they don't get clobbered.
+ int reg_num = 0;
+ for (;reg_num < Register::NumAllocatableRegisters(); ++reg_num) {
+ Register reg = Register::FromAllocationIndex(reg_num);
+ if (!reg.is(esp) && !reg.is(ebp) && !reg.is(destination_reg)) {
+ __ push(reg);
+ param_offset += kPointerSize;
+ }
+ }
+
+ // Re-push the double argument
+ __ push(MemOperand(esp, param_offset));
+ __ push(MemOperand(esp, param_offset));
+
+ // Call through to the actual stub
+ __ call(start, RelocInfo::EXTERNAL_REFERENCE);
+
+ __ add(esp, Immediate(kDoubleSize));
+
+ // Make sure no registers have been unexpectedly clobbered
+ for (--reg_num; reg_num >= 0; --reg_num) {
+ Register reg = Register::FromAllocationIndex(reg_num);
+ if (!reg.is(esp) && !reg.is(ebp) && !reg.is(destination_reg)) {
+ __ cmp(reg, MemOperand(esp, 0));
+ __ Assert(equal, kRegisterWasClobbered);
+ __ add(esp, Immediate(kPointerSize));
+ }
+ }
+
+ __ mov(eax, destination_reg);
+
+ __ pop(edi);
+ __ pop(esi);
+ __ pop(edx);
+ __ pop(ecx);
+ __ pop(ebx);
+
+ __ ret(kDoubleSize);
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ return reinterpret_cast<ConvertDToIFunc>(
+ reinterpret_cast<intptr_t>(buffer));
+}
+
+#undef __
+
+
+static Isolate* GetIsolateFrom(LocalContext* context) {
+ return reinterpret_cast<Isolate*>((*context)->GetIsolate());
+}
+
+
+TEST(ConvertDToI) {
+ CcTest::InitializeVM();
+ LocalContext context;
+ Isolate* isolate = GetIsolateFrom(&context);
+ HandleScope scope(isolate);
+
+#if DEBUG
+ // Verify that the tests actually work with the C version. In the release
+ // code, the compiler optimizes it away because it's all constant, but does it
+ // wrong, triggering an assert on gcc.
+ RunAllTruncationTests(&ConvertDToICVersion);
+#endif
+
+ Register source_registers[] = {esp, eax, ebx, ecx, edx, edi, esi};
+ Register dest_registers[] = {eax, ebx, ecx, edx, edi, esi};
+
+ for (size_t s = 0; s < sizeof(source_registers) / sizeof(Register); s++) {
+ for (size_t d = 0; d < sizeof(dest_registers) / sizeof(Register); d++) {
+ RunAllTruncationTests(
+ MakeConvertDToIFuncTrampoline(isolate,
+ source_registers[s],
+ dest_registers[d]));
+ }
+ }
+}
#ifndef V8_TEST_CODE_STUBS_H_
#define V8_TEST_CODE_STUBS_H_
-#if V8_TARGET_ARCH_IA32
+#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X87
#if __GNUC__
#define STDCALL __attribute__((stdcall))
#else
--- /dev/null
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "cctest.h"
+#include "cpu.h"
+
+using namespace v8::internal;
+
+
+TEST(RequiredFeaturesX64) {
+ // Test for the features required by every x86 CPU in compat/legacy mode.
+ CPU cpu;
+ CHECK(cpu.has_sahf());
+}
--- /dev/null
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <stdlib.h>
+
+#include "v8.h"
+
+#include "debug.h"
+#include "disasm.h"
+#include "disassembler.h"
+#include "macro-assembler.h"
+#include "serialize.h"
+#include "stub-cache.h"
+#include "cctest.h"
+
+using namespace v8::internal;
+
+
+#define __ assm.
+
+
+static void DummyStaticFunction(Object* result) {
+}
+
+
+TEST(DisasmIa320) {
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+ v8::internal::byte buffer[2048];
+ Assembler assm(isolate, buffer, sizeof buffer);
+ DummyStaticFunction(NULL); // just bloody use it (DELETE; debugging)
+
+ // Short immediate instructions
+ __ adc(eax, 12345678);
+ __ add(eax, Immediate(12345678));
+ __ or_(eax, 12345678);
+ __ sub(eax, Immediate(12345678));
+ __ xor_(eax, 12345678);
+ __ and_(eax, 12345678);
+ Handle<FixedArray> foo = isolate->factory()->NewFixedArray(10, TENURED);
+ __ cmp(eax, foo);
+
+ // ---- This one caused crash
+ __ mov(ebx, Operand(esp, ecx, times_2, 0)); // [esp+ecx*4]
+
+ // ---- All instructions that I can think of
+ __ add(edx, ebx);
+ __ add(edx, Operand(12, RelocInfo::NONE32));
+ __ add(edx, Operand(ebx, 0));
+ __ add(edx, Operand(ebx, 16));
+ __ add(edx, Operand(ebx, 1999));
+ __ add(edx, Operand(ebx, -4));
+ __ add(edx, Operand(ebx, -1999));
+ __ add(edx, Operand(esp, 0));
+ __ add(edx, Operand(esp, 16));
+ __ add(edx, Operand(esp, 1999));
+ __ add(edx, Operand(esp, -4));
+ __ add(edx, Operand(esp, -1999));
+ __ nop();
+ __ add(esi, Operand(ecx, times_4, 0));
+ __ add(esi, Operand(ecx, times_4, 24));
+ __ add(esi, Operand(ecx, times_4, -4));
+ __ add(esi, Operand(ecx, times_4, -1999));
+ __ nop();
+ __ add(edi, Operand(ebp, ecx, times_4, 0));
+ __ add(edi, Operand(ebp, ecx, times_4, 12));
+ __ add(edi, Operand(ebp, ecx, times_4, -8));
+ __ add(edi, Operand(ebp, ecx, times_4, -3999));
+ __ add(Operand(ebp, ecx, times_4, 12), Immediate(12));
+
+ __ nop();
+ __ add(ebx, Immediate(12));
+ __ nop();
+ __ adc(ecx, 12);
+ __ adc(ecx, 1000);
+ __ nop();
+ __ and_(edx, 3);
+ __ and_(edx, Operand(esp, 4));
+ __ cmp(edx, 3);
+ __ cmp(edx, Operand(esp, 4));
+ __ cmp(Operand(ebp, ecx, times_4, 0), Immediate(1000));
+ Handle<FixedArray> foo2 = isolate->factory()->NewFixedArray(10, TENURED);
+ __ cmp(ebx, foo2);
+ __ cmpb(ebx, Operand(ebp, ecx, times_2, 0));
+ __ cmpb(Operand(ebp, ecx, times_2, 0), ebx);
+ __ or_(edx, 3);
+ __ xor_(edx, 3);
+ __ nop();
+ __ cpuid();
+ __ movsx_b(edx, ecx);
+ __ movsx_w(edx, ecx);
+ __ movzx_b(edx, ecx);
+ __ movzx_w(edx, ecx);
+
+ __ nop();
+ __ imul(edx, ecx);
+ __ shld(edx, ecx);
+ __ shrd(edx, ecx);
+ __ bts(edx, ecx);
+ __ bts(Operand(ebx, ecx, times_4, 0), ecx);
+ __ nop();
+ __ pushad();
+ __ popad();
+ __ pushfd();
+ __ popfd();
+ __ push(Immediate(12));
+ __ push(Immediate(23456));
+ __ push(ecx);
+ __ push(esi);
+ __ push(Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
+ __ push(Operand(ebx, ecx, times_4, 0));
+ __ push(Operand(ebx, ecx, times_4, 0));
+ __ push(Operand(ebx, ecx, times_4, 10000));
+ __ pop(edx);
+ __ pop(eax);
+ __ pop(Operand(ebx, ecx, times_4, 0));
+ __ nop();
+
+ __ add(edx, Operand(esp, 16));
+ __ add(edx, ecx);
+ __ mov_b(edx, ecx);
+ __ mov_b(ecx, 6);
+ __ mov_b(Operand(ebx, ecx, times_4, 10000), 6);
+ __ mov_b(Operand(esp, 16), edx);
+ __ mov_w(edx, Operand(esp, 16));
+ __ mov_w(Operand(esp, 16), edx);
+ __ nop();
+ __ movsx_w(edx, Operand(esp, 12));
+ __ movsx_b(edx, Operand(esp, 12));
+ __ movzx_w(edx, Operand(esp, 12));
+ __ movzx_b(edx, Operand(esp, 12));
+ __ nop();
+ __ mov(edx, 1234567);
+ __ mov(edx, Operand(esp, 12));
+ __ mov(Operand(ebx, ecx, times_4, 10000), Immediate(12345));
+ __ mov(Operand(ebx, ecx, times_4, 10000), edx);
+ __ nop();
+ __ dec_b(edx);
+ __ dec_b(Operand(eax, 10));
+ __ dec_b(Operand(ebx, ecx, times_4, 10000));
+ __ dec(edx);
+ __ cdq();
+
+ __ nop();
+ __ idiv(edx);
+ __ mul(edx);
+ __ neg(edx);
+ __ not_(edx);
+ __ test(Operand(ebx, ecx, times_4, 10000), Immediate(123456));
+
+ __ imul(edx, Operand(ebx, ecx, times_4, 10000));
+ __ imul(edx, ecx, 12);
+ __ imul(edx, ecx, 1000);
+
+ __ inc(edx);
+ __ inc(Operand(ebx, ecx, times_4, 10000));
+ __ push(Operand(ebx, ecx, times_4, 10000));
+ __ pop(Operand(ebx, ecx, times_4, 10000));
+ __ call(Operand(ebx, ecx, times_4, 10000));
+ __ jmp(Operand(ebx, ecx, times_4, 10000));
+
+ __ lea(edx, Operand(ebx, ecx, times_4, 10000));
+ __ or_(edx, 12345);
+ __ or_(edx, Operand(ebx, ecx, times_4, 10000));
+
+ __ nop();
+
+ __ rcl(edx, 1);
+ __ rcl(edx, 7);
+ __ rcr(edx, 1);
+ __ rcr(edx, 7);
+ __ sar(edx, 1);
+ __ sar(edx, 6);
+ __ sar_cl(edx);
+ __ sbb(edx, Operand(ebx, ecx, times_4, 10000));
+ __ shld(edx, Operand(ebx, ecx, times_4, 10000));
+ __ shl(edx, 1);
+ __ shl(edx, 6);
+ __ shl_cl(edx);
+ __ shrd(edx, Operand(ebx, ecx, times_4, 10000));
+ __ shr(edx, 1);
+ __ shr(edx, 7);
+ __ shr_cl(edx);
+
+
+ // Immediates
+
+ __ adc(edx, 12345);
+
+ __ add(ebx, Immediate(12));
+ __ add(Operand(edx, ecx, times_4, 10000), Immediate(12));
+
+ __ and_(ebx, 12345);
+
+ __ cmp(ebx, 12345);
+ __ cmp(ebx, Immediate(12));
+ __ cmp(Operand(edx, ecx, times_4, 10000), Immediate(12));
+ __ cmpb(eax, 100);
+
+ __ or_(ebx, 12345);
+
+ __ sub(ebx, Immediate(12));
+ __ sub(Operand(edx, ecx, times_4, 10000), Immediate(12));
+
+ __ xor_(ebx, 12345);
+
+ __ imul(edx, ecx, 12);
+ __ imul(edx, ecx, 1000);
+
+ __ cld();
+ __ rep_movs();
+ __ rep_stos();
+ __ stos();
+
+ __ sub(edx, Operand(ebx, ecx, times_4, 10000));
+ __ sub(edx, ebx);
+
+ __ test(edx, Immediate(12345));
+ __ test(edx, Operand(ebx, ecx, times_8, 10000));
+ __ test(Operand(esi, edi, times_1, -20000000), Immediate(300000000));
+ __ test_b(edx, Operand(ecx, ebx, times_2, 1000));
+ __ test_b(Operand(eax, -20), 0x9A);
+ __ nop();
+
+ __ xor_(edx, 12345);
+ __ xor_(edx, Operand(ebx, ecx, times_8, 10000));
+ __ bts(Operand(ebx, ecx, times_8, 10000), edx);
+ __ hlt();
+ __ int3();
+ __ ret(0);
+ __ ret(8);
+
+ // Calls
+
+ Label L1, L2;
+ __ bind(&L1);
+ __ nop();
+ __ call(&L1);
+ __ call(&L2);
+ __ nop();
+ __ bind(&L2);
+ __ call(Operand(ebx, ecx, times_4, 10000));
+ __ nop();
+ Handle<Code> ic(LoadIC::initialize_stub(isolate, NOT_CONTEXTUAL));
+ __ call(ic, RelocInfo::CODE_TARGET);
+ __ nop();
+ __ call(FUNCTION_ADDR(DummyStaticFunction), RelocInfo::RUNTIME_ENTRY);
+ __ nop();
+
+ __ jmp(&L1);
+ __ jmp(Operand(ebx, ecx, times_4, 10000));
+ ExternalReference after_break_target =
+ ExternalReference::debug_after_break_target_address(isolate);
+ __ jmp(Operand::StaticVariable(after_break_target));
+ __ jmp(ic, RelocInfo::CODE_TARGET);
+ __ nop();
+
+
+ Label Ljcc;
+ __ nop();
+ // long jumps
+ __ j(overflow, &Ljcc);
+ __ j(no_overflow, &Ljcc);
+ __ j(below, &Ljcc);
+ __ j(above_equal, &Ljcc);
+ __ j(equal, &Ljcc);
+ __ j(not_equal, &Ljcc);
+ __ j(below_equal, &Ljcc);
+ __ j(above, &Ljcc);
+ __ j(sign, &Ljcc);
+ __ j(not_sign, &Ljcc);
+ __ j(parity_even, &Ljcc);
+ __ j(parity_odd, &Ljcc);
+ __ j(less, &Ljcc);
+ __ j(greater_equal, &Ljcc);
+ __ j(less_equal, &Ljcc);
+ __ j(greater, &Ljcc);
+ __ nop();
+ __ bind(&Ljcc);
+ // short jumps
+ __ j(overflow, &Ljcc);
+ __ j(no_overflow, &Ljcc);
+ __ j(below, &Ljcc);
+ __ j(above_equal, &Ljcc);
+ __ j(equal, &Ljcc);
+ __ j(not_equal, &Ljcc);
+ __ j(below_equal, &Ljcc);
+ __ j(above, &Ljcc);
+ __ j(sign, &Ljcc);
+ __ j(not_sign, &Ljcc);
+ __ j(parity_even, &Ljcc);
+ __ j(parity_odd, &Ljcc);
+ __ j(less, &Ljcc);
+ __ j(greater_equal, &Ljcc);
+ __ j(less_equal, &Ljcc);
+ __ j(greater, &Ljcc);
+
+ // 0xD9 instructions
+ __ nop();
+
+ __ fld(1);
+ __ fld1();
+ __ fldz();
+ __ fldpi();
+ __ fabs();
+ __ fchs();
+ __ fprem();
+ __ fprem1();
+ __ fincstp();
+ __ ftst();
+ __ fxch(3);
+ __ fld_s(Operand(ebx, ecx, times_4, 10000));
+ __ fstp_s(Operand(ebx, ecx, times_4, 10000));
+ __ ffree(3);
+ __ fld_d(Operand(ebx, ecx, times_4, 10000));
+ __ fstp_d(Operand(ebx, ecx, times_4, 10000));
+ __ nop();
+
+ __ fild_s(Operand(ebx, ecx, times_4, 10000));
+ __ fistp_s(Operand(ebx, ecx, times_4, 10000));
+ __ fild_d(Operand(ebx, ecx, times_4, 10000));
+ __ fistp_d(Operand(ebx, ecx, times_4, 10000));
+ __ fnstsw_ax();
+ __ nop();
+ __ fadd(3);
+ __ fsub(3);
+ __ fmul(3);
+ __ fdiv(3);
+
+ __ faddp(3);
+ __ fsubp(3);
+ __ fmulp(3);
+ __ fdivp(3);
+ __ fcompp();
+ __ fwait();
+ __ frndint();
+ __ fninit();
+ __ nop();
+
+ // Nop instructions
+ for (int i = 0; i < 16; i++) {
+ __ Nop(i);
+ }
+
+ __ ret(0);
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ USE(code);
+#ifdef OBJECT_PRINT
+ code->Print();
+ byte* begin = code->instruction_start();
+ byte* end = begin + code->instruction_size();
+ disasm::Disassembler::Disassemble(stdout, begin, end);
+#endif
+}
+
+#undef __
// GenerateHashInit takes the first character as an argument so it can't
// handle the zero length string.
ASSERT(string.length() > 0);
-#if V8_TARGET_ARCH_IA32
+#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X87
__ push(ebx);
__ push(ecx);
__ mov(eax, Immediate(0));
void generate(MacroAssembler* masm, uint32_t key) {
-#if V8_TARGET_ARCH_IA32
+#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X87
__ push(ebx);
__ mov(eax, Immediate(key));
__ GetNumberHash(eax, ebx);
--- /dev/null
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <stdlib.h>
+
+#include "v8.h"
+
+#include "macro-assembler.h"
+#include "factory.h"
+#include "platform.h"
+#include "serialize.h"
+#include "cctest.h"
+
+using namespace v8::internal;
+
+#if __GNUC__
+#define STDCALL __attribute__((stdcall))
+#else
+#define STDCALL __stdcall
+#endif
+
+typedef int STDCALL F0Type();
+typedef F0Type* F0;
+
+#define __ masm->
+
+
+TEST(LoadAndStoreWithRepresentation) {
+ v8::internal::V8::Initialize(NULL);
+
+ // Allocate an executable page of memory.
+ size_t actual_size;
+ byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
+ &actual_size,
+ true));
+ CHECK(buffer);
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope handles(isolate);
+ MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size));
+ MacroAssembler* masm = &assembler; // Create a pointer for the __ macro.
+ __ push(ebx);
+ __ push(edx);
+ __ sub(esp, Immediate(1 * kPointerSize));
+ Label exit;
+
+ // Test 1.
+ __ mov(eax, Immediate(1)); // Test number.
+ __ mov(Operand(esp, 0 * kPointerSize), Immediate(0));
+ __ mov(ebx, Immediate(-1));
+ __ Store(ebx, Operand(esp, 0 * kPointerSize), Representation::UInteger8());
+ __ mov(ebx, Operand(esp, 0 * kPointerSize));
+ __ mov(edx, Immediate(255));
+ __ cmp(ebx, edx);
+ __ j(not_equal, &exit);
+ __ Load(ebx, Operand(esp, 0 * kPointerSize), Representation::UInteger8());
+ __ cmp(ebx, edx);
+ __ j(not_equal, &exit);
+
+
+ // Test 2.
+ __ mov(eax, Immediate(2)); // Test number.
+ __ mov(Operand(esp, 0 * kPointerSize), Immediate(0));
+ __ mov(ebx, Immediate(-1));
+ __ Store(ebx, Operand(esp, 0 * kPointerSize), Representation::Integer8());
+ __ mov(ebx, Operand(esp, 0 * kPointerSize));
+ __ mov(edx, Immediate(255));
+ __ cmp(ebx, edx);
+ __ j(not_equal, &exit);
+ __ Load(ebx, Operand(esp, 0 * kPointerSize), Representation::Integer8());
+ __ mov(edx, Immediate(-1));
+ __ cmp(ebx, edx);
+ __ j(not_equal, &exit);
+
+ // Test 3.
+ __ mov(eax, Immediate(3)); // Test number.
+ __ mov(Operand(esp, 0 * kPointerSize), Immediate(0));
+ __ mov(ebx, Immediate(-1));
+ __ Store(ebx, Operand(esp, 0 * kPointerSize), Representation::Integer16());
+ __ mov(ebx, Operand(esp, 0 * kPointerSize));
+ __ mov(edx, Immediate(65535));
+ __ cmp(ebx, edx);
+ __ j(not_equal, &exit);
+ __ Load(edx, Operand(esp, 0 * kPointerSize), Representation::Integer16());
+ __ mov(ebx, Immediate(-1));
+ __ cmp(ebx, edx);
+ __ j(not_equal, &exit);
+
+ // Test 4.
+ __ mov(eax, Immediate(4)); // Test number.
+ __ mov(Operand(esp, 0 * kPointerSize), Immediate(0));
+ __ mov(ebx, Immediate(-1));
+ __ Store(ebx, Operand(esp, 0 * kPointerSize), Representation::UInteger16());
+ __ mov(ebx, Operand(esp, 0 * kPointerSize));
+ __ mov(edx, Immediate(65535));
+ __ cmp(ebx, edx);
+ __ j(not_equal, &exit);
+ __ Load(edx, Operand(esp, 0 * kPointerSize), Representation::UInteger16());
+ __ cmp(ebx, edx);
+ __ j(not_equal, &exit);
+
+ // Test 5.
+ __ mov(eax, Immediate(5));
+ __ Move(edx, Immediate(0)); // Test Move()
+ __ cmp(edx, Immediate(0));
+ __ j(not_equal, &exit);
+ __ Move(ecx, Immediate(-1));
+ __ cmp(ecx, Immediate(-1));
+ __ j(not_equal, &exit);
+ __ Move(ebx, Immediate(0x77));
+ __ cmp(ebx, Immediate(0x77));
+ __ j(not_equal, &exit);
+
+ __ xor_(eax, eax); // Success.
+ __ bind(&exit);
+ __ add(esp, Immediate(1 * kPointerSize));
+ __ pop(edx);
+ __ pop(ebx);
+ __ ret(0);
+
+ CodeDesc desc;
+ masm->GetCode(&desc);
+ // Call the function from C++.
+ int result = FUNCTION_CAST<F0>(buffer)();
+ CHECK_EQ(0, result);
+}
+
+#undef __
#include "ia32/macro-assembler-ia32.h"
#include "ia32/regexp-macro-assembler-ia32.h"
#endif
+#if V8_TARGET_ARCH_X87
+#include "x87/assembler-x87.h"
+#include "x87/macro-assembler-x87.h"
+#include "x87/regexp-macro-assembler-x87.h"
+#endif
#endif // V8_INTERPRETED_REGEXP
using namespace v8::internal;
typedef RegExpMacroAssemblerARM64 ArchRegExpMacroAssembler;
#elif V8_TARGET_ARCH_MIPS
typedef RegExpMacroAssemblerMIPS ArchRegExpMacroAssembler;
+#elif V8_TARGET_ARCH_X87
+typedef RegExpMacroAssemblerX87 ArchRegExpMacroAssembler;
#endif
class ContextInitializer {
# BUG(v8:2989). PASS/FAIL on linux32 because crankshaft is turned off for
# nosse2. Also for arm novfp3.
- 'regress/regress-2989': [FAIL, NO_VARIANTS, ['system == linux and arch == ia32 or arch == arm and simulator == True', PASS]],
+ 'regress/regress-2989': [FAIL, NO_VARIANTS, ['system == linux and arch == x87 or arch == arm and simulator == True', PASS]],
# Skip endain dependent test for mips due to different typed views of the same
# array buffer.
'../../src/ia32/stub-cache-ia32.cc',
],
}],
+ ['v8_target_arch=="x87"', {
+ 'sources': [ ### gcmole(arch:x87) ###
+ '../../src/x87/assembler-x87-inl.h',
+ '../../src/x87/assembler-x87.cc',
+ '../../src/x87/assembler-x87.h',
+ '../../src/x87/builtins-x87.cc',
+ '../../src/x87/code-stubs-x87.cc',
+ '../../src/x87/code-stubs-x87.h',
+ '../../src/x87/codegen-x87.cc',
+ '../../src/x87/codegen-x87.h',
+ '../../src/x87/cpu-x87.cc',
+ '../../src/x87/debug-x87.cc',
+ '../../src/x87/deoptimizer-x87.cc',
+ '../../src/x87/disasm-x87.cc',
+ '../../src/x87/frames-x87.cc',
+ '../../src/x87/frames-x87.h',
+ '../../src/x87/full-codegen-x87.cc',
+ '../../src/x87/ic-x87.cc',
+ '../../src/x87/lithium-codegen-x87.cc',
+ '../../src/x87/lithium-codegen-x87.h',
+ '../../src/x87/lithium-gap-resolver-x87.cc',
+ '../../src/x87/lithium-gap-resolver-x87.h',
+ '../../src/x87/lithium-x87.cc',
+ '../../src/x87/lithium-x87.h',
+ '../../src/x87/macro-assembler-x87.cc',
+ '../../src/x87/macro-assembler-x87.h',
+ '../../src/x87/regexp-macro-assembler-x87.cc',
+ '../../src/x87/regexp-macro-assembler-x87.h',
+ '../../src/x87/stub-cache-x87.cc',
+ ],
+ }],
['v8_target_arch=="mips" or v8_target_arch=="mipsel"', {
'sources': [ ### gcmole(arch:mipsel) ###
'../../src/mips/assembler-mips.cc',
"android_ia32",
"arm",
"ia32",
+ "x87",
"mips",
"mipsel",
"nacl_ia32",
"mipsel",
"nacl_ia32",
"nacl_x64",
+ "x87",
"arm64"]
# Support arches, modes to be written as keywords instead of strings.
VARIABLES = {ALWAYS: True}
-for var in ["debug", "release", "android_arm", "android_arm64", "android_ia32",
- "arm", "arm64", "ia32", "mips", "mipsel", "x64", "nacl_ia32",
+for var in ["debug", "release", "android_arm", "android_arm64", "android_ia32", "android_x87",
+ "arm", "arm64", "ia32", "mips", "mipsel", "x64", "x87", "nacl_ia32",
"nacl_x64", "macos", "windows", "linux"]:
VARIABLES[var] = var