# Architectures and modes to be compiled. Consider these to be internal
# variables, don't override them (use the targets instead).
-ARCHES = ia32 x64 x32 arm arm64 mips mipsel mips64el x87
+ARCHES = ia32 x64 x32 arm arm64 mips mipsel mips64el x87 ppc ppc64
DEFAULT_ARCHES = ia32 x64 arm
MODES = release debug optdebug
DEFAULT_MODES = release debug
'msvs_use_common_release': 0,
'clang%': 0,
'v8_target_arch%': '<(target_arch)',
+ 'v8_host_byteorder%': '<!(python -c "import sys; print sys.byteorder")',
# Native Client builds currently use the V8 ARM JIT and
# arm/simulator-arm.cc to defer the significant effort required
# for NaCl JIT support. The nacl_target_arch variable provides
'android_webview_build%': '<(android_webview_build)',
},
'conditions': [
- ['host_arch=="ia32" or host_arch=="x64" or clang==1', {
+ ['host_arch=="ia32" or host_arch=="x64" or \
+ host_arch=="ppc" or host_arch=="ppc64" or \
+ clang==1', {
'variables': {
'host_cxx_is_biarch%': 1,
},
},
}],
['target_arch=="ia32" or target_arch=="x64" or target_arch=="x87" or \
+ target_arch=="ppc" or target_arch=="ppc64" or \
clang==1', {
'variables': {
'target_cxx_is_biarch%': 1,
'V8_TARGET_ARCH_ARM64',
],
}],
+ ['v8_target_arch=="ppc" or v8_target_arch=="ppc64"', {
+ 'defines': [
+ 'V8_TARGET_ARCH_PPC',
+ ],
+ 'conditions': [
+ ['v8_target_arch=="ppc64"', {
+ 'defines': [
+ 'V8_TARGET_ARCH_PPC64',
+ ],
+ }],
+ ['v8_host_byteorder=="little"', {
+ 'defines': [
+ 'V8_TARGET_ARCH_PPC_LE',
+ ],
+ }],
+ ['v8_host_byteorder=="big"', {
+ 'defines': [
+ 'V8_TARGET_ARCH_PPC_BE',
+ ],
+ }],
+ ],
+ }], # ppc
['v8_target_arch=="ia32"', {
'defines': [
'V8_TARGET_ARCH_IA32',
},
},
}],
- ['(OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="solaris" \
+ ['(OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="solaris" \
or OS=="netbsd" or OS=="mac" or OS=="android" or OS=="qnx") and \
(v8_target_arch=="arm" or v8_target_arch=="ia32" or \
v8_target_arch=="x87" or v8_target_arch=="mips" or \
- v8_target_arch=="mipsel")', {
+ v8_target_arch=="mipsel" or v8_target_arch=="ppc")', {
'target_conditions': [
['_toolset=="host"', {
'conditions': [
],
}],
['(OS=="linux" or OS=="android") and \
- (v8_target_arch=="x64" or v8_target_arch=="arm64")', {
+ (v8_target_arch=="x64" or v8_target_arch=="arm64" or \
+ v8_target_arch=="ppc64")', {
'target_conditions': [
['_toolset=="host"', {
'conditions': [
#include "src/arm64/assembler-arm64-inl.h" // NOLINT
#elif V8_TARGET_ARCH_ARM
#include "src/arm/assembler-arm-inl.h" // NOLINT
+#elif V8_TARGET_ARCH_PPC
+#include "src/ppc/assembler-ppc-inl.h" // NOLINT
#elif V8_TARGET_ARCH_MIPS
#include "src/mips/assembler-mips-inl.h" // NOLINT
#elif V8_TARGET_ARCH_MIPS64
#include "src/arm64/regexp-macro-assembler-arm64.h" // NOLINT
#elif V8_TARGET_ARCH_ARM
#include "src/arm/regexp-macro-assembler-arm.h" // NOLINT
+#elif V8_TARGET_ARCH_PPC
+#include "src/ppc/regexp-macro-assembler-ppc.h" // NOLINT
#elif V8_TARGET_ARCH_MIPS
#include "src/mips/regexp-macro-assembler-mips.h" // NOLINT
#elif V8_TARGET_ARCH_MIPS64
function = FUNCTION_ADDR(RegExpMacroAssemblerARM64::CheckStackGuardState);
#elif V8_TARGET_ARCH_ARM
function = FUNCTION_ADDR(RegExpMacroAssemblerARM::CheckStackGuardState);
+#elif V8_TARGET_ARCH_PPC
+ function = FUNCTION_ADDR(RegExpMacroAssemblerPPC::CheckStackGuardState);
#elif V8_TARGET_ARCH_MIPS
function = FUNCTION_ADDR(RegExpMacroAssemblerMIPS::CheckStackGuardState);
#elif V8_TARGET_ARCH_MIPS64
namespace v8 {
namespace internal {
+// TODO(svenpanne) introduce an AbortReason and partition this list
#define ERROR_MESSAGES_LIST(V) \
V(kNoReason, "no reason") \
\
V(kStackFrameTypesMustMatch, "Stack frame types must match") \
V(kTheCurrentStackPointerIsBelowCsp, \
"The current stack pointer is below csp") \
+ V(kTheInstructionShouldBeALis, "The instruction should be a lis") \
V(kTheInstructionShouldBeALui, "The instruction should be a lui") \
V(kTheInstructionShouldBeAnOri, "The instruction should be an ori") \
+ V(kTheInstructionShouldBeAnOris, "The instruction should be an oris") \
+ V(kTheInstructionShouldBeALi, "The instruction should be a li") \
+ V(kTheInstructionShouldBeASldi, "The instruction should be a sldi") \
V(kTheInstructionToPatchShouldBeALoadFromConstantPool, \
"The instruction to patch should be a load from the constant pool") \
V(kTheInstructionToPatchShouldBeAnLdrLiteral, \
"The instruction to patch should be a ldr literal") \
+ V(kTheInstructionToPatchShouldBeALis, \
+ "The instruction to patch should be a lis") \
V(kTheInstructionToPatchShouldBeALui, \
"The instruction to patch should be a lui") \
V(kTheInstructionToPatchShouldBeAnOri, \
#include "src/base/atomicops_internals_arm64_gcc.h"
#elif defined(__GNUC__) && V8_HOST_ARCH_ARM
#include "src/base/atomicops_internals_arm_gcc.h"
+#elif defined(__GNUC__) && V8_HOST_ARCH_PPC
+#include "src/base/atomicops_internals_ppc_gcc.h"
#elif defined(__GNUC__) && (V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64)
#include "src/base/atomicops_internals_x86_gcc.h"
#elif defined(__GNUC__) && V8_HOST_ARCH_MIPS
--- /dev/null
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is an internal atomic implementation, use atomicops.h instead.
+//
+
+#ifndef V8_BASE_ATOMICOPS_INTERNALS_PPC_H_
+#define V8_BASE_ATOMICOPS_INTERNALS_PPC_H_
+
+namespace v8 {
+namespace base {
+
+inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
+ Atomic32 old_value,
+ Atomic32 new_value) {
+ return (__sync_val_compare_and_swap(ptr, old_value, new_value));
+}
+
+inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
+ Atomic32 new_value) {
+ Atomic32 old_value;
+ do {
+ old_value = *ptr;
+ } while (__sync_bool_compare_and_swap(ptr, old_value, new_value) == false);
+ return old_value;
+}
+
+inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
+ Atomic32 increment) {
+ return Barrier_AtomicIncrement(ptr, increment);
+}
+
+inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
+ Atomic32 increment) {
+ for (;;) {
+ Atomic32 old_value = *ptr;
+ Atomic32 new_value = old_value + increment;
+ if (__sync_bool_compare_and_swap(ptr, old_value, new_value)) {
+ return new_value;
+ // The exchange took place as expected.
+ }
+ // Otherwise, *ptr changed mid-loop and we need to retry.
+ }
+}
+
+inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
+ Atomic32 old_value, Atomic32 new_value) {
+ return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+}
+
+inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
+ Atomic32 old_value, Atomic32 new_value) {
+ return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+}
+
+inline void NoBarrier_Store(volatile Atomic8* ptr, Atomic8 value) {
+ *ptr = value;
+}
+
+inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
+ *ptr = value;
+}
+
+inline void MemoryBarrier() {
+ __asm__ __volatile__("sync" : : : "memory"); }
+
+inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
+ *ptr = value;
+ MemoryBarrier();
+}
+
+inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
+ MemoryBarrier();
+ *ptr = value;
+}
+
+inline Atomic8 NoBarrier_Load(volatile const Atomic8* ptr) { return *ptr; }
+
+inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) { return *ptr; }
+
+inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
+ Atomic32 value = *ptr;
+ MemoryBarrier();
+ return value;
+}
+
+inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
+ MemoryBarrier();
+ return *ptr;
+}
+
+#ifdef V8_TARGET_ARCH_PPC64
+inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
+ Atomic64 old_value,
+ Atomic64 new_value) {
+ return (__sync_val_compare_and_swap(ptr, old_value, new_value));
+}
+
+inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
+ Atomic64 new_value) {
+ Atomic64 old_value;
+ do {
+ old_value = *ptr;
+ } while (__sync_bool_compare_and_swap(ptr, old_value, new_value) == false);
+ return old_value;
+}
+
+inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
+ Atomic64 increment) {
+ return Barrier_AtomicIncrement(ptr, increment);
+}
+
+inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
+ Atomic64 increment) {
+ for (;;) {
+ Atomic64 old_value = *ptr;
+ Atomic64 new_value = old_value + increment;
+ if (__sync_bool_compare_and_swap(ptr, old_value, new_value)) {
+ return new_value;
+ // The exchange took place as expected.
+ }
+ // Otherwise, *ptr changed mid-loop and we need to retry.
+ }
+}
+
+inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
+ Atomic64 old_value, Atomic64 new_value) {
+ return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+}
+
+inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
+ Atomic64 old_value, Atomic64 new_value) {
+ return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+}
+
+inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
+ *ptr = value;
+}
+
+inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
+ *ptr = value;
+ MemoryBarrier();
+}
+
+inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
+ MemoryBarrier();
+ *ptr = value;
+}
+
+inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) { return *ptr; }
+
+inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
+ Atomic64 value = *ptr;
+ MemoryBarrier();
+ return value;
+}
+
+inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
+ MemoryBarrier();
+ return *ptr;
+}
+
+#endif
+}
+} // namespace v8::base
+
+#endif // V8_BASE_ATOMICOPS_INTERNALS_PPC_GCC_H_
#elif defined(__MIPSEB__) || defined(__MIPSEL__)
#define V8_HOST_ARCH_MIPS 1
#define V8_HOST_ARCH_32_BIT 1
+#elif defined(__PPC__) || defined(_ARCH_PPC)
+#define V8_HOST_ARCH_PPC 1
+#if defined(__PPC64__) || defined(_ARCH_PPC64)
+#define V8_HOST_ARCH_64_BIT 1
+#else
+#define V8_HOST_ARCH_32_BIT 1
+#endif
#else
#error "Host architecture was not detected as supported by v8"
#endif
// Target architecture detection. This may be set externally. If not, detect
// in the same way as the host architecture, that is, target the native
// environment as presented by the compiler.
-#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_X87 && \
+#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_X87 && \
!V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS && \
- !V8_TARGET_ARCH_MIPS64
+ !V8_TARGET_ARCH_MIPS64 && !V8_TARGET_ARCH_PPC
#if defined(_M_X64) || defined(__x86_64__)
#define V8_TARGET_ARCH_X64 1
#elif defined(_M_IX86) || defined(__i386__)
#define V8_TARGET_ARCH_32_BIT 1
#elif V8_TARGET_ARCH_MIPS64
#define V8_TARGET_ARCH_64_BIT 1
+#elif V8_TARGET_ARCH_PPC
+#if V8_TARGET_ARCH_PPC64
+#define V8_TARGET_ARCH_64_BIT 1
+#else
+#define V8_TARGET_ARCH_32_BIT 1
+#endif
#elif V8_TARGET_ARCH_X87
#define V8_TARGET_ARCH_32_BIT 1
#else
#define V8_TARGET_LITTLE_ENDIAN 1
#elif V8_TARGET_ARCH_X87
#define V8_TARGET_LITTLE_ENDIAN 1
+#elif V8_TARGET_ARCH_PPC_LE
+#define V8_TARGET_LITTLE_ENDIAN 1
+#elif V8_TARGET_ARCH_PPC_BE
+#define V8_TARGET_BIG_ENDIAN 1
#else
#error Unknown target architecture endianness
#endif
#if V8_OS_QNX
#include <sys/syspage.h> // cpuinfo
#endif
+#if V8_OS_LINUX && V8_HOST_ARCH_PPC
+#include <elf.h>
+#endif
#if V8_OS_POSIX
#include <unistd.h> // sysconf()
#endif
delete[] part;
}
+#elif V8_HOST_ARCH_PPC
+
+#ifndef USE_SIMULATOR
+#if V8_OS_LINUX
+ // Read processor info from /proc/self/auxv.
+ char* auxv_cpu_type = NULL;
+ FILE* fp = fopen("/proc/self/auxv", "r");
+ if (fp != NULL) {
+#if V8_TARGET_ARCH_PPC64
+ Elf64_auxv_t entry;
+#else
+ Elf32_auxv_t entry;
#endif
+ for (;;) {
+ size_t n = fread(&entry, sizeof(entry), 1, fp);
+ if (n == 0 || entry.a_type == AT_NULL) {
+ break;
+ }
+ if (entry.a_type == AT_PLATFORM) {
+ auxv_cpu_type = reinterpret_cast<char*>(entry.a_un.a_val);
+ break;
+ }
+ }
+ fclose(fp);
+ }
+
+ part_ = -1;
+ if (auxv_cpu_type) {
+ if (strcmp(auxv_cpu_type, "power8") == 0) {
+ part_ = PPC_POWER8;
+ } else if (strcmp(auxv_cpu_type, "power7") == 0) {
+ part_ = PPC_POWER7;
+ } else if (strcmp(auxv_cpu_type, "power6") == 0) {
+ part_ = PPC_POWER6;
+ } else if (strcmp(auxv_cpu_type, "power5") == 0) {
+ part_ = PPC_POWER5;
+ } else if (strcmp(auxv_cpu_type, "ppc970") == 0) {
+ part_ = PPC_G5;
+ } else if (strcmp(auxv_cpu_type, "ppc7450") == 0) {
+ part_ = PPC_G4;
+ } else if (strcmp(auxv_cpu_type, "pa6t") == 0) {
+ part_ = PPC_PA6T;
+ }
+ }
+
+#endif // V8_OS_LINUX
+#endif // !USE_SIMULATOR
+#endif // V8_HOST_ARCH_PPC
}
} } // namespace v8::base
int variant() const { return variant_; }
static const int NVIDIA_DENVER = 0x0;
int part() const { return part_; }
+
+ // ARM-specific part codes
static const int ARM_CORTEX_A5 = 0xc05;
static const int ARM_CORTEX_A7 = 0xc07;
static const int ARM_CORTEX_A8 = 0xc08;
static const int ARM_CORTEX_A12 = 0xc0c;
static const int ARM_CORTEX_A15 = 0xc0f;
+ // PPC-specific part codes
+ enum {
+ PPC_POWER5,
+ PPC_POWER6,
+ PPC_POWER7,
+ PPC_POWER8,
+ PPC_G4,
+ PPC_G5,
+ PPC_PA6T
+ };
+
// General features
bool has_fpu() const { return has_fpu_; }
// Otherwise we just assume 16 byte alignment, i.e.:
// - With gcc 4.4 the tree vectorization optimizer can generate code
// that requires 16 byte alignment such as movdqa on x86.
- // - Mac OS X and Solaris (64-bit) activation frames must be 16 byte-aligned;
- // see "Mac OS X ABI Function Call Guide"
+ // - Mac OS X, PPC and Solaris (64-bit) activation frames must
+ // be 16 byte-aligned; see "Mac OS X ABI Function Call Guide"
return 16;
#endif
}
// the hint address to 46 bits to give the kernel a fighting chance of
// fulfilling our placement request.
raw_addr &= V8_UINT64_C(0x3ffffffff000);
+#elif V8_TARGET_ARCH_PPC64
+#if V8_TARGET_BIG_ENDIAN
+ // Big-endian Linux: 44 bits of virtual addressing.
+ raw_addr &= V8_UINT64_C(0x03fffffff000);
+#else
+ // Little-endian Linux: 48 bits of virtual addressing.
+ raw_addr &= V8_UINT64_C(0x3ffffffff000);
+#endif
#else
raw_addr &= 0x3ffff000;
asm("break");
#elif V8_HOST_ARCH_MIPS64
asm("break");
+#elif V8_HOST_ARCH_PPC
+ asm("twge 2,2");
#elif V8_HOST_ARCH_IA32
#if V8_OS_NACL
asm("hlt");
#define CODE_STUB_LIST_ARM64(V)
#endif
+// List of code stubs only used on PPC platforms.
+#ifdef V8_TARGET_ARCH_PPC
+#define CODE_STUB_LIST_PPC(V) \
+ V(DirectCEntry) \
+ V(StoreRegistersState) \
+ V(RestoreRegistersState)
+#else
+#define CODE_STUB_LIST_PPC(V)
+#endif
+
// List of code stubs only used on MIPS platforms.
#if V8_TARGET_ARCH_MIPS
#define CODE_STUB_LIST_MIPS(V) \
#endif
// Combined list of code stubs.
-#define CODE_STUB_LIST(V) \
- CODE_STUB_LIST_ALL_PLATFORMS(V) \
- CODE_STUB_LIST_ARM(V) \
- CODE_STUB_LIST_ARM64(V) \
+#define CODE_STUB_LIST(V) \
+ CODE_STUB_LIST_ALL_PLATFORMS(V) \
+ CODE_STUB_LIST_ARM(V) \
+ CODE_STUB_LIST_ARM64(V) \
+ CODE_STUB_LIST_PPC(V) \
CODE_STUB_LIST_MIPS(V)
// Stub is base classes of all stubs.
#include "src/arm64/code-stubs-arm64.h"
#elif V8_TARGET_ARCH_ARM
#include "src/arm/code-stubs-arm.h"
+#elif V8_TARGET_ARCH_PPC
+#include "src/ppc/code-stubs-ppc.h"
#elif V8_TARGET_ARCH_MIPS
#include "src/mips/code-stubs-mips.h"
#elif V8_TARGET_ARCH_MIPS64
: PlatformCodeStub(isolate) {
minor_key_ = SaveDoublesBits::encode(save_doubles == kSaveFPRegs);
DCHECK(result_size == 1 || result_size == 2);
-#ifdef _WIN64
+#if _WIN64 || (V8_TARGET_ARCH_PPC64 && !ABI_RETURNS_OBJECT_PAIRS_IN_REGS)
minor_key_ = ResultSizeBits::update(minor_key_, result_size);
#endif // _WIN64
}
private:
bool save_doubles() const { return SaveDoublesBits::decode(minor_key_); }
-#ifdef _WIN64
+#if _WIN64 || (V8_TARGET_ARCH_PPC64 && !ABI_RETURNS_OBJECT_PAIRS_IN_REGS)
int result_size() const { return ResultSizeBits::decode(minor_key_); }
#endif // _WIN64
#include "src/arm64/codegen-arm64.h" // NOLINT
#elif V8_TARGET_ARCH_ARM
#include "src/arm/codegen-arm.h" // NOLINT
+#elif V8_TARGET_ARCH_PPC
+#include "src/ppc/codegen-ppc.h" // NOLINT
#elif V8_TARGET_ARCH_MIPS
#include "src/mips/codegen-mips.h" // NOLINT
#elif V8_TARGET_ARCH_MIPS64
#include "src/compiler/mips64/instruction-codes-mips64.h"
#elif V8_TARGET_ARCH_X64
#include "src/compiler/x64/instruction-codes-x64.h"
+#elif V8_TARGET_ARCH_PPC
+#include "src/compiler/ppc/instruction-codes-ppc.h"
#else
#define TARGET_ARCH_OPCODE_LIST(V)
#define TARGET_ADDRESSING_MODE_LIST(V)
DEFINE_BOOL(enable_vldr_imm, false,
"enable use of constant pools for double immediate (ARM only)")
DEFINE_BOOL(force_long_branches, false,
- "force all emitted branches to be in long mode (MIPS only)")
+ "force all emitted branches to be in long mode (MIPS/PPC only)")
// bootstrapper.cc
DEFINE_STRING(expose_natives_as, NULL, "expose natives in global object")
DEFINE_BOOL(check_icache, false,
"Check icache flushes in ARM and MIPS simulator")
DEFINE_INT(stop_sim_at, 0, "Simulator stop after x number of instructions")
-#if defined(V8_TARGET_ARCH_ARM64) || defined(V8_TARGET_ARCH_MIPS64)
+#if defined(V8_TARGET_ARCH_ARM64) || defined(V8_TARGET_ARCH_MIPS64) || \
+ defined(V8_TARGET_ARCH_PPC64)
DEFINE_INT(sim_stack_alignment, 16,
"Stack alignment in bytes in simulator. This must be a power of two "
"and it must be at least 16. 16 is default.")
#include "src/arm64/frames-arm64.h" // NOLINT
#elif V8_TARGET_ARCH_ARM
#include "src/arm/frames-arm.h" // NOLINT
+#elif V8_TARGET_ARCH_PPC
+#include "src/ppc/frames-ppc.h" // NOLINT
#elif V8_TARGET_ARCH_MIPS
#include "src/mips/frames-mips.h" // NOLINT
#elif V8_TARGET_ARCH_MIPS64
// TODO(all): Copied ARM value. Check this is sensible for ARM64.
static const int kCodeSizeMultiplier = 149;
static const int kBootCodeSizeMultiplier = 110;
+#elif V8_TARGET_ARCH_PPC64
+ static const int kCodeSizeMultiplier = 200;
+ static const int kBootCodeSizeMultiplier = 120;
+#elif V8_TARGET_ARCH_PPC
+ static const int kCodeSizeMultiplier = 200;
+ static const int kBootCodeSizeMultiplier = 120;
#elif V8_TARGET_ARCH_MIPS
static const int kCodeSizeMultiplier = 149;
static const int kBootCodeSizeMultiplier = 120;
Label* if_true,
Label* if_false,
Label* fall_through);
-#else // All non-mips arch.
+#elif V8_TARGET_ARCH_PPC
+ void Split(Condition cc, Label* if_true, Label* if_false, Label* fall_through,
+ CRegister cr = cr7);
+#else // All other arch.
void Split(Condition cc,
Label* if_true,
Label* if_false,
Label* fall_through);
-#endif // V8_TARGET_ARCH_MIPS
+#endif
// Load the value of a known (PARAMETER, LOCAL, or CONTEXT) variable into
// a register. Emits a context chain walk if if necessary (so does
#if V8_TARGET_ARCH_IA32 || (V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_32_BIT) || \
V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS || \
- V8_TARGET_ARCH_MIPS64
+ V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_PPC
#define V8_TURBOFAN_BACKEND 1
#else
#define V8_TURBOFAN_BACKEND 0
#if (V8_TARGET_ARCH_ARM && !V8_HOST_ARCH_ARM)
#define USE_SIMULATOR 1
#endif
+#if (V8_TARGET_ARCH_PPC && !V8_HOST_ARCH_PPC)
+#define USE_SIMULATOR 1
+#endif
#if (V8_TARGET_ARCH_MIPS && !V8_HOST_ARCH_MIPS)
#define USE_SIMULATOR 1
#endif
// Determine whether double field unboxing feature is enabled.
-#if (V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64)
+#if V8_TARGET_ARCH_64_BIT
#define V8_DOUBLE_FIELDS_UNBOXING 1
#else
#define V8_DOUBLE_FIELDS_UNBOXING 0
// ARM64
ALWAYS_ALIGN_CSP,
COHERENT_CACHE,
+ // PPC
+ FPR_GPR_MOV,
+ LWSYNC,
+ ISELECT,
NUMBER_OF_CPU_FEATURES
};
#include "src/v8threads.h"
#include "src/vm-state-inl.h"
+#if V8_TARGET_ARCH_PPC && !V8_INTERPRETED_REGEXP
+#include "src/regexp-macro-assembler.h" // NOLINT
+#include "src/ppc/regexp-macro-assembler-ppc.h" // NOLINT
+#endif
#if V8_TARGET_ARCH_ARM && !V8_INTERPRETED_REGEXP
#include "src/regexp-macro-assembler.h" // NOLINT
#include "src/arm/regexp-macro-assembler-arm.h" // NOLINT
#include "src/arm64/lithium-arm64.h" // NOLINT
#elif V8_TARGET_ARCH_ARM
#include "src/arm/lithium-arm.h" // NOLINT
+#elif V8_TARGET_ARCH_PPC
+#include "src/ppc/lithium-ppc.h" // NOLINT
#elif V8_TARGET_ARCH_MIPS
#include "src/mips/lithium-mips.h" // NOLINT
#elif V8_TARGET_ARCH_MIPS64
#include "src/arm64/lithium-codegen-arm64.h" // NOLINT
#elif V8_TARGET_ARCH_ARM
#include "src/arm/lithium-codegen-arm.h" // NOLINT
+#elif V8_TARGET_ARCH_PPC
+#include "src/ppc/lithium-codegen-ppc.h" // NOLINT
#elif V8_TARGET_ARCH_MIPS
#include "src/mips/lithium-codegen-mips.h" // NOLINT
#elif V8_TARGET_ARCH_MIPS64
// Initialize other runtime facilities
#if defined(USE_SIMULATOR)
-#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || \
- V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
+#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS || \
+ V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_PPC
Simulator::Initialize(this);
#endif
#endif
class Debugger;
class PromiseOnStack;
-#if !defined(__arm__) && V8_TARGET_ARCH_ARM || \
+#if !defined(__arm__) && V8_TARGET_ARCH_ARM || \
!defined(__aarch64__) && V8_TARGET_ARCH_ARM64 || \
- !defined(__mips__) && V8_TARGET_ARCH_MIPS || \
+ !defined(__PPC__) && V8_TARGET_ARCH_PPC || \
+ !defined(__mips__) && V8_TARGET_ARCH_MIPS || \
!defined(__mips__) && V8_TARGET_ARCH_MIPS64
class Redirection;
class Simulator;
};
-#if V8_TARGET_ARCH_ARM && !defined(__arm__) || \
+#if V8_TARGET_ARCH_ARM && !defined(__arm__) || \
V8_TARGET_ARCH_ARM64 && !defined(__aarch64__) || \
- V8_TARGET_ARCH_MIPS && !defined(__mips__) || \
+ V8_TARGET_ARCH_PPC && !defined(__PPC__) || \
+ V8_TARGET_ARCH_MIPS && !defined(__mips__) || \
V8_TARGET_ARCH_MIPS64 && !defined(__mips__)
#define ISOLATE_INIT_SIMULATOR_LIST(V) \
thread_id_(thread_id),
stack_limit_(0),
thread_state_(NULL),
-#if !defined(__arm__) && V8_TARGET_ARCH_ARM || \
+#if !defined(__arm__) && V8_TARGET_ARCH_ARM || \
!defined(__aarch64__) && V8_TARGET_ARCH_ARM64 || \
- !defined(__mips__) && V8_TARGET_ARCH_MIPS || \
+ !defined(__PPC__) && V8_TARGET_ARCH_PPC || \
+ !defined(__mips__) && V8_TARGET_ARCH_MIPS || \
!defined(__mips__) && V8_TARGET_ARCH_MIPS64
simulator_(NULL),
#endif
FIELD_ACCESSOR(uintptr_t, stack_limit)
FIELD_ACCESSOR(ThreadState*, thread_state)
-#if !defined(__arm__) && V8_TARGET_ARCH_ARM || \
+#if !defined(__arm__) && V8_TARGET_ARCH_ARM || \
!defined(__aarch64__) && V8_TARGET_ARCH_ARM64 || \
- !defined(__mips__) && V8_TARGET_ARCH_MIPS || \
+ !defined(__PPC__) && V8_TARGET_ARCH_PPC || \
+ !defined(__mips__) && V8_TARGET_ARCH_MIPS || \
!defined(__mips__) && V8_TARGET_ARCH_MIPS64
FIELD_ACCESSOR(Simulator*, simulator)
#endif
uintptr_t stack_limit_;
ThreadState* thread_state_;
-#if !defined(__arm__) && V8_TARGET_ARCH_ARM || \
+#if !defined(__arm__) && V8_TARGET_ARCH_ARM || \
!defined(__aarch64__) && V8_TARGET_ARCH_ARM64 || \
- !defined(__mips__) && V8_TARGET_ARCH_MIPS || \
+ !defined(__PPC__) && V8_TARGET_ARCH_PPC || \
+ !defined(__mips__) && V8_TARGET_ARCH_MIPS || \
!defined(__mips__) && V8_TARGET_ARCH_MIPS64
Simulator* simulator_;
#endif
#include "src/arm64/regexp-macro-assembler-arm64.h" // NOLINT
#elif V8_TARGET_ARCH_ARM
#include "src/arm/regexp-macro-assembler-arm.h" // NOLINT
+#elif V8_TARGET_ARCH_PPC
+#include "src/ppc/regexp-macro-assembler-ppc.h" // NOLINT
#elif V8_TARGET_ARCH_MIPS
#include "src/mips/regexp-macro-assembler-mips.h" // NOLINT
#elif V8_TARGET_ARCH_MIPS64
#elif V8_TARGET_ARCH_ARM64
RegExpMacroAssemblerARM64 macro_assembler(mode, (data->capture_count + 1) * 2,
zone);
+#elif V8_TARGET_ARCH_PPC
+ RegExpMacroAssemblerPPC macro_assembler(mode, (data->capture_count + 1) * 2,
+ zone);
#elif V8_TARGET_ARCH_MIPS
RegExpMacroAssemblerMIPS macro_assembler(mode, (data->capture_count + 1) * 2,
zone);
#include "src/arm64/lithium-arm64.h" // NOLINT
#elif V8_TARGET_ARCH_ARM
#include "src/arm/lithium-arm.h" // NOLINT
+#elif V8_TARGET_ARCH_PPC
+#include "src/ppc/lithium-ppc.h" // NOLINT
#elif V8_TARGET_ARCH_MIPS
#include "src/mips/lithium-mips.h" // NOLINT
#elif V8_TARGET_ARCH_MIPS64
#elif V8_TARGET_ARCH_X87
#include "src/x87/lithium-x87.h" // NOLINT
#include "src/x87/lithium-codegen-x87.h" // NOLINT
+#elif V8_TARGET_ARCH_PPC
+#include "src/ppc/lithium-ppc.h" // NOLINT
+#include "src/ppc/lithium-codegen-ppc.h" // NOLINT
#else
#error Unsupported target architecture.
#endif
#include "src/mips/lithium-mips.h" // NOLINT
#elif V8_TARGET_ARCH_MIPS64
#include "src/mips64/lithium-mips64.h" // NOLINT
+#elif V8_TARGET_ARCH_PPC
+#include "src/ppc/lithium-ppc.h" // NOLINT
#elif V8_TARGET_ARCH_X87
#include "src/x87/lithium-x87.h" // NOLINT
#else
#elif V8_TARGET_ARCH_ARM
#include "src/arm/lithium-arm.h" // NOLINT
#include "src/arm/lithium-codegen-arm.h" // NOLINT
+#elif V8_TARGET_ARCH_PPC
+#include "src/ppc/lithium-ppc.h" // NOLINT
+#include "src/ppc/lithium-codegen-ppc.h" // NOLINT
#elif V8_TARGET_ARCH_MIPS
#include "src/mips/lithium-mips.h" // NOLINT
#include "src/mips/lithium-codegen-mips.h" // NOLINT
const char arch[] = "x32";
#elif V8_TARGET_ARCH_ARM
const char arch[] = "arm";
+#elif V8_TARGET_ARCH_PPC
+ const char arch[] = "ppc";
#elif V8_TARGET_ARCH_MIPS
const char arch[] = "mips";
#elif V8_TARGET_ARCH_X87
#include "src/arm/assembler-arm-inl.h"
#include "src/code.h" // NOLINT, must be after assembler_*.h
#include "src/arm/macro-assembler-arm.h" // NOLINT
+#elif V8_TARGET_ARCH_PPC
+#include "src/ppc/constants-ppc.h"
+#include "src/assembler.h" // NOLINT
+#include "src/ppc/assembler-ppc.h" // NOLINT
+#include "src/ppc/assembler-ppc-inl.h"
+#include "src/code.h" // NOLINT, must be after assembler_*.h
+#include "src/ppc/macro-assembler-ppc.h"
#elif V8_TARGET_ARCH_MIPS
#include "src/mips/constants-mips.h"
#include "src/assembler.h" // NOLINT
#include "src/mips/constants-mips.h" // NOLINT
#elif V8_TARGET_ARCH_MIPS64
#include "src/mips64/constants-mips64.h" // NOLINT
+#elif V8_TARGET_ARCH_PPC
+#include "src/ppc/constants-ppc.h" // NOLINT
#endif
// Assume support
supported_ |= (1u << FPU);
}
- if (cpu.cache_line_size() != 0) {
- cache_line_size_ = cpu.cache_line_size();
- }
#elif V8_OS_AIX
// Assume support FP support and default cache line size
supported_ |= (1u << FPU);
// Code address skips the function descriptor "header".
// TOC and static chain are ignored and set to 0.
void Assembler::function_descriptor() {
+#if ABI_USES_FUNCTION_DESCRIPTORS
DCHECK(pc_offset() == 0);
RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
emit_ptr(reinterpret_cast<uintptr_t>(pc_) + 3 * kPointerSize);
emit_ptr(0);
emit_ptr(0);
+#endif
}
Label invoke, handler_entry, exit;
// Called from C
-#if ABI_USES_FUNCTION_DESCRIPTORS
__ function_descriptor();
-#endif
ProfileEntryHookStub::MaybeCallEntryHook(masm);
Register temp3 = r9;
// Called from C
-#if ABI_USES_FUNCTION_DESCRIPTORS
__ function_descriptor();
-#endif
__ Push(temp3, temp2, temp1);
MathExpGenerator::EmitMathExp(&masm, input, result, double_scratch1,
MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
// Called from C
-#if ABI_USES_FUNCTION_DESCRIPTORS
__ function_descriptor();
-#endif
__ MovFromFloatParameter(d1);
__ fsqrt(d1, d1);
DCHECK_EQ(0, registers_to_save % 2);
// Called from C
-#if ABI_USES_FUNCTION_DESCRIPTORS
__ function_descriptor();
-#endif
__ b(&entry_label_); // We'll write the entry code later.
// If the code gets too big or corrupted, an internal exception will be
kARMImplementation,
kARM64Implementation,
kMIPSImplementation,
+ kPPCImplementation,
kX64Implementation,
kX87Implementation,
kBytecodeImplementation
Simulator::sp));
state->fp = reinterpret_cast<Address>(simulator_->get_register(
Simulator::fp));
+#elif V8_TARGET_ARCH_PPC
+ state->pc = reinterpret_cast<Address>(simulator_->get_pc());
+ state->sp =
+ reinterpret_cast<Address>(simulator_->get_register(Simulator::sp));
+ state->fp =
+ reinterpret_cast<Address>(simulator_->get_register(Simulator::fp));
#endif
}
#else
// Extracting the sample from the context is extremely machine dependent.
ucontext_t* ucontext = reinterpret_cast<ucontext_t*>(context);
-#if !V8_OS_OPENBSD
+#if !(V8_OS_OPENBSD || (V8_OS_LINUX && V8_HOST_ARCH_PPC))
mcontext_t& mcontext = ucontext->uc_mcontext;
#endif
#if V8_OS_LINUX
state.pc = reinterpret_cast<Address>(mcontext.pc);
state.sp = reinterpret_cast<Address>(mcontext.gregs[29]);
state.fp = reinterpret_cast<Address>(mcontext.gregs[30]);
+#elif V8_HOST_ARCH_PPC
+ state.pc = reinterpret_cast<Address>(ucontext->uc_mcontext.regs->nip);
+ state.sp = reinterpret_cast<Address>(ucontext->uc_mcontext.regs->gpr[PT_R1]);
+ state.fp = reinterpret_cast<Address>(ucontext->uc_mcontext.regs->gpr[PT_R31]);
#endif // V8_HOST_ARCH_*
#elif V8_OS_MACOSX
#if V8_HOST_ARCH_X64
// allocation point and write a pointer to it to the current object.
ALL_SPACES(kBackref, kPlain, kStartOfObject)
ALL_SPACES(kBackrefWithSkip, kPlain, kStartOfObject)
-#if defined(V8_TARGET_ARCH_MIPS) || V8_OOL_CONSTANT_POOL || \
- defined(V8_TARGET_ARCH_MIPS64)
+#if defined(V8_TARGET_ARCH_MIPS) || defined(V8_TARGET_ARCH_MIPS64) || \
+ defined(V8_TARGET_ARCH_PPC) || V8_OOL_CONSTANT_POOL
// Deserialize a new object from pointer found in code and write
- // a pointer to it to the current object. Required only for MIPS or ARM
- // with ool constant pool, and omitted on the other architectures because
- // it is fully unrolled and would cause bloat.
+ // a pointer to it to the current object. Required only for MIPS, PPC or
+ // ARM with ool constant pool, and omitted on the other architectures
+ // because it is fully unrolled and would cause bloat.
ALL_SPACES(kNewObject, kFromCode, kStartOfObject)
// Find a recently deserialized code object using its offset from the
// current allocation point and write a pointer to it to the current
- // object. Required only for MIPS or ARM with ool constant pool.
+ // object. Required only for MIPS, PPC or ARM with ool constant pool.
ALL_SPACES(kBackref, kFromCode, kStartOfObject)
ALL_SPACES(kBackrefWithSkip, kFromCode, kStartOfObject)
#endif
CASE_STATEMENT(kRootArray, kPlain, kStartOfObject, 0)
CASE_BODY(kRootArray, kPlain, kStartOfObject, 0)
#if defined(V8_TARGET_ARCH_MIPS) || V8_OOL_CONSTANT_POOL || \
- defined(V8_TARGET_ARCH_MIPS64)
+ defined(V8_TARGET_ARCH_MIPS64) || defined(V8_TARGET_ARCH_PPC)
// Find an object in the roots array and write a pointer to it to in code.
CASE_STATEMENT(kRootArray, kFromCode, kStartOfObject, 0)
CASE_BODY(kRootArray, kFromCode, kStartOfObject, 0)
#include "src/arm64/simulator-arm64.h"
#elif V8_TARGET_ARCH_ARM
#include "src/arm/simulator-arm.h"
+#elif V8_TARGET_ARCH_PPC
+#include "src/ppc/simulator-ppc.h"
#elif V8_TARGET_ARCH_MIPS
#include "src/mips/simulator-mips.h"
#elif V8_TARGET_ARCH_MIPS64
#elif defined(V8_HOST_ARCH_MIPS)
INLINE(void CopyCharsUnsigned(uint8_t* dest, const uint8_t* src, int chars));
INLINE(void CopyCharsUnsigned(uint16_t* dest, const uint16_t* src, int chars));
+#elif defined(V8_HOST_ARCH_PPC)
+INLINE(void CopyCharsUnsigned(uint8_t* dest, const uint8_t* src, int chars));
+INLINE(void CopyCharsUnsigned(uint16_t* dest, const uint16_t* src, int chars));
#endif
// Copy from 8bit/16bit chars to 8bit/16bit chars.
MemCopy(dest, src, chars * sizeof(*dest));
}
}
+#elif defined(V8_HOST_ARCH_PPC)
+#define CASE(n) \
+ case n: \
+ memcpy(dest, src, n); \
+ break
+void CopyCharsUnsigned(uint8_t* dest, const uint8_t* src, int chars) {
+ switch (static_cast<unsigned>(chars)) {
+ case 0:
+ break;
+ case 1:
+ *dest = *src;
+ break;
+ CASE(2);
+ CASE(3);
+ CASE(4);
+ CASE(5);
+ CASE(6);
+ CASE(7);
+ CASE(8);
+ CASE(9);
+ CASE(10);
+ CASE(11);
+ CASE(12);
+ CASE(13);
+ CASE(14);
+ CASE(15);
+ CASE(16);
+ CASE(17);
+ CASE(18);
+ CASE(19);
+ CASE(20);
+ CASE(21);
+ CASE(22);
+ CASE(23);
+ CASE(24);
+ CASE(25);
+ CASE(26);
+ CASE(27);
+ CASE(28);
+ CASE(29);
+ CASE(30);
+ CASE(31);
+ CASE(32);
+ CASE(33);
+ CASE(34);
+ CASE(35);
+ CASE(36);
+ CASE(37);
+ CASE(38);
+ CASE(39);
+ CASE(40);
+ CASE(41);
+ CASE(42);
+ CASE(43);
+ CASE(44);
+ CASE(45);
+ CASE(46);
+ CASE(47);
+ CASE(48);
+ CASE(49);
+ CASE(50);
+ CASE(51);
+ CASE(52);
+ CASE(53);
+ CASE(54);
+ CASE(55);
+ CASE(56);
+ CASE(57);
+ CASE(58);
+ CASE(59);
+ CASE(60);
+ CASE(61);
+ CASE(62);
+ CASE(63);
+ CASE(64);
+ default:
+ memcpy(dest, src, chars);
+ break;
+ }
+}
+#undef CASE
+
+#define CASE(n) \
+ case n: \
+ memcpy(dest, src, n * 2); \
+ break
+void CopyCharsUnsigned(uint16_t* dest, const uint16_t* src, int chars) {
+ switch (static_cast<unsigned>(chars)) {
+ case 0:
+ break;
+ case 1:
+ *dest = *src;
+ break;
+ CASE(2);
+ CASE(3);
+ CASE(4);
+ CASE(5);
+ CASE(6);
+ CASE(7);
+ CASE(8);
+ CASE(9);
+ CASE(10);
+ CASE(11);
+ CASE(12);
+ CASE(13);
+ CASE(14);
+ CASE(15);
+ CASE(16);
+ CASE(17);
+ CASE(18);
+ CASE(19);
+ CASE(20);
+ CASE(21);
+ CASE(22);
+ CASE(23);
+ CASE(24);
+ CASE(25);
+ CASE(26);
+ CASE(27);
+ CASE(28);
+ CASE(29);
+ CASE(30);
+ CASE(31);
+ CASE(32);
+ default:
+ memcpy(dest, src, chars * 2);
+ break;
+ }
+}
+#undef CASE
#endif
'test-js-arm64-variables.cc'
],
}],
+ ['v8_target_arch=="ppc"', {
+ 'sources': [ ### gcmole(arch:ppc) ###
+ 'test-assembler-ppc.cc',
+ 'test-code-stubs.cc',
+ 'test-disasm-ppc.cc'
+ ],
+ }],
+ ['v8_target_arch=="ppc64"', {
+ 'sources': [ ### gcmole(arch:ppc64) ###
+ 'test-assembler-ppc.cc',
+ 'test-code-stubs.cc',
+ 'test-disasm-ppc.cc'
+ ],
+ }],
['v8_target_arch=="mipsel"', {
'sources': [ ### gcmole(arch:mipsel) ###
'test-assembler-mips.cc',
Simulator::CallArgument::End()};
return ReturnValueTraits<R>::Cast(CallSimulator(FUNCTION_ADDR(f), args));
}
-#elif USE_SIMULATOR && V8_TARGET_ARCH_MIPS64
+#elif USE_SIMULATOR && (V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_PPC64)
uintptr_t CallSimulator(byte* f, int64_t p1 = 0, int64_t p2 = 0,
int64_t p3 = 0, int64_t p4 = 0) {
Simulator* simulator = Simulator::current(isolate_);
ParameterTraits<P2>::Cast(p2), ParameterTraits<P3>::Cast(p3),
ParameterTraits<P4>::Cast(p4)));
}
-#elif USE_SIMULATOR && (V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_MIPS)
+#elif USE_SIMULATOR && \
+ (V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_PPC)
uintptr_t CallSimulator(byte* f, int32_t p1 = 0, int32_t p2 = 0,
int32_t p3 = 0, int32_t p4 = 0) {
Simulator* simulator = Simulator::current(isolate_);
--- /dev/null
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "src/v8.h"
+
+#include "src/disassembler.h"
+#include "src/factory.h"
+#include "src/ppc/assembler-ppc-inl.h"
+#include "src/ppc/simulator-ppc.h"
+#include "test/cctest/cctest.h"
+
+using namespace v8::internal;
+
+
+// Define these function prototypes to match JSEntryFunction in execution.cc.
+typedef Object* (*F1)(int x, int p1, int p2, int p3, int p4);
+typedef Object* (*F2)(int x, int y, int p2, int p3, int p4);
+typedef Object* (*F3)(void* p0, int p1, int p2, int p3, int p4);
+typedef Object* (*F4)(void* p0, void* p1, int p2, int p3, int p4);
+
+
+#define __ assm.
+
+// Simple add parameter 1 to parameter 2 and return
+TEST(0) {
+ CcTest::InitializeVM();
+ Isolate* isolate = Isolate::Current();
+ HandleScope scope(isolate);
+
+ Assembler assm(isolate, NULL, 0);
+
+ __ function_descriptor();
+
+ __ add(r3, r3, r4);
+ __ blr();
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+#ifdef DEBUG
+ code->Print();
+#endif
+ F2 f = FUNCTION_CAST<F2>(code->entry());
+ intptr_t res =
+ reinterpret_cast<intptr_t>(CALL_GENERATED_CODE(f, 3, 4, 0, 0, 0));
+ ::printf("f() = %" V8PRIdPTR "\n", res);
+ CHECK_EQ(7, static_cast<int>(res));
+}
+
+
+// Loop 100 times, adding loop counter to result
+TEST(1) {
+ CcTest::InitializeVM();
+ Isolate* isolate = Isolate::Current();
+ HandleScope scope(isolate);
+
+ Assembler assm(isolate, NULL, 0);
+ Label L, C;
+
+ __ function_descriptor();
+
+ __ mr(r4, r3);
+ __ li(r3, Operand::Zero());
+ __ b(&C);
+
+ __ bind(&L);
+ __ add(r3, r3, r4);
+ __ subi(r4, r4, Operand(1));
+
+ __ bind(&C);
+ __ cmpi(r4, Operand::Zero());
+ __ bne(&L);
+ __ blr();
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+#ifdef DEBUG
+ code->Print();
+#endif
+ F1 f = FUNCTION_CAST<F1>(code->entry());
+ intptr_t res =
+ reinterpret_cast<intptr_t>(CALL_GENERATED_CODE(f, 100, 0, 0, 0, 0));
+ ::printf("f() = %" V8PRIdPTR "\n", res);
+ CHECK_EQ(5050, static_cast<int>(res));
+}
+
+
+TEST(2) {
+ CcTest::InitializeVM();
+ Isolate* isolate = Isolate::Current();
+ HandleScope scope(isolate);
+
+ Assembler assm(isolate, NULL, 0);
+ Label L, C;
+
+ __ function_descriptor();
+
+ __ mr(r4, r3);
+ __ li(r3, Operand(1));
+ __ b(&C);
+
+ __ bind(&L);
+#if defined(V8_TARGET_ARCH_PPC64)
+ __ mulld(r3, r4, r3);
+#else
+ __ mullw(r3, r4, r3);
+#endif
+ __ subi(r4, r4, Operand(1));
+
+ __ bind(&C);
+ __ cmpi(r4, Operand::Zero());
+ __ bne(&L);
+ __ blr();
+
+ // some relocated stuff here, not executed
+ __ RecordComment("dead code, just testing relocations");
+ __ mov(r0, Operand(isolate->factory()->true_value()));
+ __ RecordComment("dead code, just testing immediate operands");
+ __ mov(r0, Operand(-1));
+ __ mov(r0, Operand(0xFF000000));
+ __ mov(r0, Operand(0xF0F0F0F0));
+ __ mov(r0, Operand(0xFFF0FFFF));
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+#ifdef DEBUG
+ code->Print();
+#endif
+ F1 f = FUNCTION_CAST<F1>(code->entry());
+ intptr_t res =
+ reinterpret_cast<intptr_t>(CALL_GENERATED_CODE(f, 10, 0, 0, 0, 0));
+ ::printf("f() = %" V8PRIdPTR "\n", res);
+ CHECK_EQ(3628800, static_cast<int>(res));
+}
+
+
+TEST(3) {
+ CcTest::InitializeVM();
+ Isolate* isolate = Isolate::Current();
+ HandleScope scope(isolate);
+
+ typedef struct {
+ int i;
+ char c;
+ int16_t s;
+ } T;
+ T t;
+
+ Assembler assm(Isolate::Current(), NULL, 0);
+ Label L, C;
+
+ __ function_descriptor();
+
+// build a frame
+#if V8_TARGET_ARCH_PPC64
+ __ stdu(sp, MemOperand(sp, -32));
+ __ std(fp, MemOperand(sp, 24));
+#else
+ __ stwu(sp, MemOperand(sp, -16));
+ __ stw(fp, MemOperand(sp, 12));
+#endif
+ __ mr(fp, sp);
+
+ // r4 points to our struct
+ __ mr(r4, r3);
+
+ // modify field int i of struct
+ __ lwz(r3, MemOperand(r4, OFFSET_OF(T, i)));
+ __ srwi(r5, r3, Operand(1));
+ __ stw(r5, MemOperand(r4, OFFSET_OF(T, i)));
+
+ // modify field char c of struct
+ __ lbz(r5, MemOperand(r4, OFFSET_OF(T, c)));
+ __ add(r3, r5, r3);
+ __ slwi(r5, r5, Operand(2));
+ __ stb(r5, MemOperand(r4, OFFSET_OF(T, c)));
+
+ // modify field int16_t s of struct
+ __ lhz(r5, MemOperand(r4, OFFSET_OF(T, s)));
+ __ add(r3, r5, r3);
+ __ srwi(r5, r5, Operand(3));
+ __ sth(r5, MemOperand(r4, OFFSET_OF(T, s)));
+
+// restore frame
+#if V8_TARGET_ARCH_PPC64
+ __ addi(r11, fp, Operand(32));
+ __ ld(fp, MemOperand(r11, -8));
+#else
+ __ addi(r11, fp, Operand(16));
+ __ lwz(fp, MemOperand(r11, -4));
+#endif
+ __ mr(sp, r11);
+ __ blr();
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+#ifdef DEBUG
+ code->Print();
+#endif
+ F3 f = FUNCTION_CAST<F3>(code->entry());
+ t.i = 100000;
+ t.c = 10;
+ t.s = 1000;
+ intptr_t res =
+ reinterpret_cast<intptr_t>(CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0));
+ ::printf("f() = %" V8PRIdPTR "\n", res);
+ CHECK_EQ(101010, static_cast<int>(res));
+ CHECK_EQ(100000 / 2, t.i);
+ CHECK_EQ(10 * 4, t.c);
+ CHECK_EQ(1000 / 8, t.s);
+}
+
+#if 0
+TEST(4) {
+ // Test the VFP floating point instructions.
+ CcTest::InitializeVM();
+ Isolate* isolate = Isolate::Current();
+ HandleScope scope(isolate);
+
+ typedef struct {
+ double a;
+ double b;
+ double c;
+ double d;
+ double e;
+ double f;
+ double g;
+ double h;
+ int i;
+ double m;
+ double n;
+ float x;
+ float y;
+ } T;
+ T t;
+
+ // Create a function that accepts &t, and loads, manipulates, and stores
+ // the doubles and floats.
+ Assembler assm(Isolate::Current(), NULL, 0);
+ Label L, C;
+
+ if (CpuFeatures::IsSupported(VFP3)) {
+ CpuFeatures::Scope scope(VFP3);
+
+ __ mov(ip, Operand(sp));
+ __ stm(db_w, sp, r4.bit() | fp.bit() | lr.bit());
+ __ sub(fp, ip, Operand(4));
+
+ __ mov(r4, Operand(r0));
+ __ vldr(d6, r4, OFFSET_OF(T, a));
+ __ vldr(d7, r4, OFFSET_OF(T, b));
+ __ vadd(d5, d6, d7);
+ __ vstr(d5, r4, OFFSET_OF(T, c));
+
+ __ vmov(r2, r3, d5);
+ __ vmov(d4, r2, r3);
+ __ vstr(d4, r4, OFFSET_OF(T, b));
+
+ // Load t.x and t.y, switch values, and store back to the struct.
+ __ vldr(s0, r4, OFFSET_OF(T, x));
+ __ vldr(s31, r4, OFFSET_OF(T, y));
+ __ vmov(s16, s0);
+ __ vmov(s0, s31);
+ __ vmov(s31, s16);
+ __ vstr(s0, r4, OFFSET_OF(T, x));
+ __ vstr(s31, r4, OFFSET_OF(T, y));
+
+ // Move a literal into a register that can be encoded in the instruction.
+ __ vmov(d4, 1.0);
+ __ vstr(d4, r4, OFFSET_OF(T, e));
+
+ // Move a literal into a register that requires 64 bits to encode.
+ // 0x3ff0000010000000 = 1.000000059604644775390625
+ __ vmov(d4, 1.000000059604644775390625);
+ __ vstr(d4, r4, OFFSET_OF(T, d));
+
+ // Convert from floating point to integer.
+ __ vmov(d4, 2.0);
+ __ vcvt_s32_f64(s31, d4);
+ __ vstr(s31, r4, OFFSET_OF(T, i));
+
+ // Convert from integer to floating point.
+ __ mov(lr, Operand(42));
+ __ vmov(s31, lr);
+ __ vcvt_f64_s32(d4, s31);
+ __ vstr(d4, r4, OFFSET_OF(T, f));
+
+ // Test vabs.
+ __ vldr(d1, r4, OFFSET_OF(T, g));
+ __ vabs(d0, d1);
+ __ vstr(d0, r4, OFFSET_OF(T, g));
+ __ vldr(d2, r4, OFFSET_OF(T, h));
+ __ vabs(d0, d2);
+ __ vstr(d0, r4, OFFSET_OF(T, h));
+
+ // Test vneg.
+ __ vldr(d1, r4, OFFSET_OF(T, m));
+ __ vneg(d0, d1);
+ __ vstr(d0, r4, OFFSET_OF(T, m));
+ __ vldr(d1, r4, OFFSET_OF(T, n));
+ __ vneg(d0, d1);
+ __ vstr(d0, r4, OFFSET_OF(T, n));
+
+ __ ldm(ia_w, sp, r4.bit() | fp.bit() | pc.bit());
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Object* code = isolate->heap()->CreateCode(
+ desc,
+ Code::ComputeFlags(Code::STUB),
+ Handle<Code>())->ToObjectChecked();
+ CHECK(code->IsCode());
+#ifdef DEBUG
+ Code::cast(code)->Print();
+#endif
+ F3 f = FUNCTION_CAST<F3>(Code::cast(code)->entry());
+ t.a = 1.5;
+ t.b = 2.75;
+ t.c = 17.17;
+ t.d = 0.0;
+ t.e = 0.0;
+ t.f = 0.0;
+ t.g = -2718.2818;
+ t.h = 31415926.5;
+ t.i = 0;
+ t.m = -2718.2818;
+ t.n = 123.456;
+ t.x = 4.5;
+ t.y = 9.0;
+ Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
+ USE(dummy);
+ CHECK_EQ(4.5, t.y);
+ CHECK_EQ(9.0, t.x);
+ CHECK_EQ(-123.456, t.n);
+ CHECK_EQ(2718.2818, t.m);
+ CHECK_EQ(2, t.i);
+ CHECK_EQ(2718.2818, t.g);
+ CHECK_EQ(31415926.5, t.h);
+ CHECK_EQ(42.0, t.f);
+ CHECK_EQ(1.0, t.e);
+ CHECK_EQ(1.000000059604644775390625, t.d);
+ CHECK_EQ(4.25, t.c);
+ CHECK_EQ(4.25, t.b);
+ CHECK_EQ(1.5, t.a);
+ }
+}
+
+
+TEST(5) {
+ // Test the ARMv7 bitfield instructions.
+ CcTest::InitializeVM();
+ Isolate* isolate = Isolate::Current();
+ HandleScope scope(isolate);
+
+ Assembler assm(isolate, NULL, 0);
+
+ if (CpuFeatures::IsSupported(ARMv7)) {
+ CpuFeatures::Scope scope(ARMv7);
+ // On entry, r0 = 0xAAAAAAAA = 0b10..10101010.
+ __ ubfx(r0, r0, 1, 12); // 0b00..010101010101 = 0x555
+ __ sbfx(r0, r0, 0, 5); // 0b11..111111110101 = -11
+ __ bfc(r0, 1, 3); // 0b11..111111110001 = -15
+ __ mov(r1, Operand(7));
+ __ bfi(r0, r1, 3, 3); // 0b11..111111111001 = -7
+ __ mov(pc, Operand(lr));
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Object* code = isolate->heap()->CreateCode(
+ desc,
+ Code::ComputeFlags(Code::STUB),
+ Handle<Code>())->ToObjectChecked();
+ CHECK(code->IsCode());
+#ifdef DEBUG
+ Code::cast(code)->Print();
+#endif
+ F1 f = FUNCTION_CAST<F1>(Code::cast(code)->entry());
+ int res = reinterpret_cast<int>(
+ CALL_GENERATED_CODE(f, 0xAAAAAAAA, 0, 0, 0, 0));
+ ::printf("f() = %d\n", res);
+ CHECK_EQ(-7, res);
+ }
+}
+
+
+TEST(6) {
+ // Test saturating instructions.
+ CcTest::InitializeVM();
+ Isolate* isolate = Isolate::Current();
+ HandleScope scope(isolate);
+
+ Assembler assm(isolate, NULL, 0);
+
+ if (CpuFeatures::IsSupported(ARMv7)) {
+ CpuFeatures::Scope scope(ARMv7);
+ __ usat(r1, 8, Operand(r0)); // Sat 0xFFFF to 0-255 = 0xFF.
+ __ usat(r2, 12, Operand(r0, ASR, 9)); // Sat (0xFFFF>>9) to 0-4095 = 0x7F.
+ __ usat(r3, 1, Operand(r0, LSL, 16)); // Sat (0xFFFF<<16) to 0-1 = 0x0.
+ __ addi(r0, r1, Operand(r2));
+ __ addi(r0, r0, Operand(r3));
+ __ mov(pc, Operand(lr));
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Object* code = isolate->heap()->CreateCode(
+ desc,
+ Code::ComputeFlags(Code::STUB),
+ Handle<Code>())->ToObjectChecked();
+ CHECK(code->IsCode());
+#ifdef DEBUG
+ Code::cast(code)->Print();
+#endif
+ F1 f = FUNCTION_CAST<F1>(Code::cast(code)->entry());
+ int res = reinterpret_cast<int>(
+ CALL_GENERATED_CODE(f, 0xFFFF, 0, 0, 0, 0));
+ ::printf("f() = %d\n", res);
+ CHECK_EQ(382, res);
+ }
+}
+
+enum VCVTTypes {
+ s32_f64,
+ u32_f64
+};
+
+static void TestRoundingMode(VCVTTypes types,
+ VFPRoundingMode mode,
+ double value,
+ int expected,
+ bool expected_exception = false) {
+ CcTest::InitializeVM();
+ Isolate* isolate = Isolate::Current();
+ HandleScope scope(isolate);
+
+ Assembler assm(isolate, NULL, 0);
+
+ if (CpuFeatures::IsSupported(VFP3)) {
+ CpuFeatures::Scope scope(VFP3);
+
+ Label wrong_exception;
+
+ __ vmrs(r1);
+ // Set custom FPSCR.
+ __ bic(r2, r1, Operand(kVFPRoundingModeMask | kVFPExceptionMask));
+ __ orr(r2, r2, Operand(mode));
+ __ vmsr(r2);
+
+ // Load value, convert, and move back result to r0 if everything went well.
+ __ vmov(d1, value);
+ switch (types) {
+ case s32_f64:
+ __ vcvt_s32_f64(s0, d1, kFPSCRRounding);
+ break;
+
+ case u32_f64:
+ __ vcvt_u32_f64(s0, d1, kFPSCRRounding);
+ break;
+
+ default:
+ UNREACHABLE();
+ break;
+ }
+ // Check for vfp exceptions
+ __ vmrs(r2);
+ __ tst(r2, Operand(kVFPExceptionMask));
+ // Check that we behaved as expected.
+ __ b(&wrong_exception,
+ expected_exception ? eq : ne);
+ // There was no exception. Retrieve the result and return.
+ __ vmov(r0, s0);
+ __ mov(pc, Operand(lr));
+
+ // The exception behaviour is not what we expected.
+ // Load a special value and return.
+ __ bind(&wrong_exception);
+ __ mov(r0, Operand(11223344));
+ __ mov(pc, Operand(lr));
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Object* code = isolate->heap()->CreateCode(
+ desc,
+ Code::ComputeFlags(Code::STUB),
+ Handle<Code>())->ToObjectChecked();
+ CHECK(code->IsCode());
+#ifdef DEBUG
+ Code::cast(code)->Print();
+#endif
+ F1 f = FUNCTION_CAST<F1>(Code::cast(code)->entry());
+ int res = reinterpret_cast<int>(
+ CALL_GENERATED_CODE(f, 0, 0, 0, 0, 0));
+ ::printf("res = %d\n", res);
+ CHECK_EQ(expected, res);
+ }
+}
+
+
+TEST(7) {
+ // Test vfp rounding modes.
+
+ // s32_f64 (double to integer).
+
+ TestRoundingMode(s32_f64, RN, 0, 0);
+ TestRoundingMode(s32_f64, RN, 0.5, 0);
+ TestRoundingMode(s32_f64, RN, -0.5, 0);
+ TestRoundingMode(s32_f64, RN, 1.5, 2);
+ TestRoundingMode(s32_f64, RN, -1.5, -2);
+ TestRoundingMode(s32_f64, RN, 123.7, 124);
+ TestRoundingMode(s32_f64, RN, -123.7, -124);
+ TestRoundingMode(s32_f64, RN, 123456.2, 123456);
+ TestRoundingMode(s32_f64, RN, -123456.2, -123456);
+ TestRoundingMode(s32_f64, RN, static_cast<double>(kMaxInt), kMaxInt);
+ TestRoundingMode(s32_f64, RN, (kMaxInt + 0.49), kMaxInt);
+ TestRoundingMode(s32_f64, RN, (kMaxInt + 1.0), kMaxInt, true);
+ TestRoundingMode(s32_f64, RN, (kMaxInt + 0.5), kMaxInt, true);
+ TestRoundingMode(s32_f64, RN, static_cast<double>(kMinInt), kMinInt);
+ TestRoundingMode(s32_f64, RN, (kMinInt - 0.5), kMinInt);
+ TestRoundingMode(s32_f64, RN, (kMinInt - 1.0), kMinInt, true);
+ TestRoundingMode(s32_f64, RN, (kMinInt - 0.51), kMinInt, true);
+
+ TestRoundingMode(s32_f64, RM, 0, 0);
+ TestRoundingMode(s32_f64, RM, 0.5, 0);
+ TestRoundingMode(s32_f64, RM, -0.5, -1);
+ TestRoundingMode(s32_f64, RM, 123.7, 123);
+ TestRoundingMode(s32_f64, RM, -123.7, -124);
+ TestRoundingMode(s32_f64, RM, 123456.2, 123456);
+ TestRoundingMode(s32_f64, RM, -123456.2, -123457);
+ TestRoundingMode(s32_f64, RM, static_cast<double>(kMaxInt), kMaxInt);
+ TestRoundingMode(s32_f64, RM, (kMaxInt + 0.5), kMaxInt);
+ TestRoundingMode(s32_f64, RM, (kMaxInt + 1.0), kMaxInt, true);
+ TestRoundingMode(s32_f64, RM, static_cast<double>(kMinInt), kMinInt);
+ TestRoundingMode(s32_f64, RM, (kMinInt - 0.5), kMinInt, true);
+ TestRoundingMode(s32_f64, RM, (kMinInt + 0.5), kMinInt);
+
+ TestRoundingMode(s32_f64, RZ, 0, 0);
+ TestRoundingMode(s32_f64, RZ, 0.5, 0);
+ TestRoundingMode(s32_f64, RZ, -0.5, 0);
+ TestRoundingMode(s32_f64, RZ, 123.7, 123);
+ TestRoundingMode(s32_f64, RZ, -123.7, -123);
+ TestRoundingMode(s32_f64, RZ, 123456.2, 123456);
+ TestRoundingMode(s32_f64, RZ, -123456.2, -123456);
+ TestRoundingMode(s32_f64, RZ, static_cast<double>(kMaxInt), kMaxInt);
+ TestRoundingMode(s32_f64, RZ, (kMaxInt + 0.5), kMaxInt);
+ TestRoundingMode(s32_f64, RZ, (kMaxInt + 1.0), kMaxInt, true);
+ TestRoundingMode(s32_f64, RZ, static_cast<double>(kMinInt), kMinInt);
+ TestRoundingMode(s32_f64, RZ, (kMinInt - 0.5), kMinInt);
+ TestRoundingMode(s32_f64, RZ, (kMinInt - 1.0), kMinInt, true);
+
+
+ // u32_f64 (double to integer).
+
+ // Negative values.
+ TestRoundingMode(u32_f64, RN, -0.5, 0);
+ TestRoundingMode(u32_f64, RN, -123456.7, 0, true);
+ TestRoundingMode(u32_f64, RN, static_cast<double>(kMinInt), 0, true);
+ TestRoundingMode(u32_f64, RN, kMinInt - 1.0, 0, true);
+
+ TestRoundingMode(u32_f64, RM, -0.5, 0, true);
+ TestRoundingMode(u32_f64, RM, -123456.7, 0, true);
+ TestRoundingMode(u32_f64, RM, static_cast<double>(kMinInt), 0, true);
+ TestRoundingMode(u32_f64, RM, kMinInt - 1.0, 0, true);
+
+ TestRoundingMode(u32_f64, RZ, -0.5, 0);
+ TestRoundingMode(u32_f64, RZ, -123456.7, 0, true);
+ TestRoundingMode(u32_f64, RZ, static_cast<double>(kMinInt), 0, true);
+ TestRoundingMode(u32_f64, RZ, kMinInt - 1.0, 0, true);
+
+ // Positive values.
+ // kMaxInt is the maximum *signed* integer: 0x7fffffff.
+ static const uint32_t kMaxUInt = 0xffffffffu;
+ TestRoundingMode(u32_f64, RZ, 0, 0);
+ TestRoundingMode(u32_f64, RZ, 0.5, 0);
+ TestRoundingMode(u32_f64, RZ, 123.7, 123);
+ TestRoundingMode(u32_f64, RZ, 123456.2, 123456);
+ TestRoundingMode(u32_f64, RZ, static_cast<double>(kMaxInt), kMaxInt);
+ TestRoundingMode(u32_f64, RZ, (kMaxInt + 0.5), kMaxInt);
+ TestRoundingMode(u32_f64, RZ, (kMaxInt + 1.0),
+ static_cast<uint32_t>(kMaxInt) + 1);
+ TestRoundingMode(u32_f64, RZ, (kMaxUInt + 0.5), kMaxUInt);
+ TestRoundingMode(u32_f64, RZ, (kMaxUInt + 1.0), kMaxUInt, true);
+
+ TestRoundingMode(u32_f64, RM, 0, 0);
+ TestRoundingMode(u32_f64, RM, 0.5, 0);
+ TestRoundingMode(u32_f64, RM, 123.7, 123);
+ TestRoundingMode(u32_f64, RM, 123456.2, 123456);
+ TestRoundingMode(u32_f64, RM, static_cast<double>(kMaxInt), kMaxInt);
+ TestRoundingMode(u32_f64, RM, (kMaxInt + 0.5), kMaxInt);
+ TestRoundingMode(u32_f64, RM, (kMaxInt + 1.0),
+ static_cast<uint32_t>(kMaxInt) + 1);
+ TestRoundingMode(u32_f64, RM, (kMaxUInt + 0.5), kMaxUInt);
+ TestRoundingMode(u32_f64, RM, (kMaxUInt + 1.0), kMaxUInt, true);
+
+ TestRoundingMode(u32_f64, RN, 0, 0);
+ TestRoundingMode(u32_f64, RN, 0.5, 0);
+ TestRoundingMode(u32_f64, RN, 1.5, 2);
+ TestRoundingMode(u32_f64, RN, 123.7, 124);
+ TestRoundingMode(u32_f64, RN, 123456.2, 123456);
+ TestRoundingMode(u32_f64, RN, static_cast<double>(kMaxInt), kMaxInt);
+ TestRoundingMode(u32_f64, RN, (kMaxInt + 0.49), kMaxInt);
+ TestRoundingMode(u32_f64, RN, (kMaxInt + 0.5),
+ static_cast<uint32_t>(kMaxInt) + 1);
+ TestRoundingMode(u32_f64, RN, (kMaxUInt + 0.49), kMaxUInt);
+ TestRoundingMode(u32_f64, RN, (kMaxUInt + 0.5), kMaxUInt, true);
+ TestRoundingMode(u32_f64, RN, (kMaxUInt + 1.0), kMaxUInt, true);
+}
+
+
+TEST(8) {
+ // Test VFP multi load/store with ia_w.
+ CcTest::InitializeVM();
+ Isolate* isolate = Isolate::Current();
+ HandleScope scope(isolate);
+
+ typedef struct {
+ double a;
+ double b;
+ double c;
+ double d;
+ double e;
+ double f;
+ double g;
+ double h;
+ } D;
+ D d;
+
+ typedef struct {
+ float a;
+ float b;
+ float c;
+ float d;
+ float e;
+ float f;
+ float g;
+ float h;
+ } F;
+ F f;
+
+ // Create a function that uses vldm/vstm to move some double and
+ // single precision values around in memory.
+ Assembler assm(isolate, NULL, 0);
+
+ if (CpuFeatures::IsSupported(VFP2)) {
+ CpuFeatures::Scope scope(VFP2);
+
+ __ mov(ip, Operand(sp));
+ __ stm(db_w, sp, r4.bit() | fp.bit() | lr.bit());
+ __ sub(fp, ip, Operand(4));
+
+ __ addi(r4, r0, Operand(OFFSET_OF(D, a)));
+ __ vldm(ia_w, r4, d0, d3);
+ __ vldm(ia_w, r4, d4, d7);
+
+ __ addi(r4, r0, Operand(OFFSET_OF(D, a)));
+ __ vstm(ia_w, r4, d6, d7);
+ __ vstm(ia_w, r4, d0, d5);
+
+ __ addi(r4, r1, Operand(OFFSET_OF(F, a)));
+ __ vldm(ia_w, r4, s0, s3);
+ __ vldm(ia_w, r4, s4, s7);
+
+ __ addi(r4, r1, Operand(OFFSET_OF(F, a)));
+ __ vstm(ia_w, r4, s6, s7);
+ __ vstm(ia_w, r4, s0, s5);
+
+ __ ldm(ia_w, sp, r4.bit() | fp.bit() | pc.bit());
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Object* code = isolate->heap()->CreateCode(
+ desc,
+ Code::ComputeFlags(Code::STUB),
+ Handle<Code>())->ToObjectChecked();
+ CHECK(code->IsCode());
+#ifdef DEBUG
+ Code::cast(code)->Print();
+#endif
+ F4 fn = FUNCTION_CAST<F4>(Code::cast(code)->entry());
+ d.a = 1.1;
+ d.b = 2.2;
+ d.c = 3.3;
+ d.d = 4.4;
+ d.e = 5.5;
+ d.f = 6.6;
+ d.g = 7.7;
+ d.h = 8.8;
+
+ f.a = 1.0;
+ f.b = 2.0;
+ f.c = 3.0;
+ f.d = 4.0;
+ f.e = 5.0;
+ f.f = 6.0;
+ f.g = 7.0;
+ f.h = 8.0;
+
+ Object* dummy = CALL_GENERATED_CODE(fn, &d, &f, 0, 0, 0);
+ USE(dummy);
+
+ CHECK_EQ(7.7, d.a);
+ CHECK_EQ(8.8, d.b);
+ CHECK_EQ(1.1, d.c);
+ CHECK_EQ(2.2, d.d);
+ CHECK_EQ(3.3, d.e);
+ CHECK_EQ(4.4, d.f);
+ CHECK_EQ(5.5, d.g);
+ CHECK_EQ(6.6, d.h);
+
+ CHECK_EQ(7.0, f.a);
+ CHECK_EQ(8.0, f.b);
+ CHECK_EQ(1.0, f.c);
+ CHECK_EQ(2.0, f.d);
+ CHECK_EQ(3.0, f.e);
+ CHECK_EQ(4.0, f.f);
+ CHECK_EQ(5.0, f.g);
+ CHECK_EQ(6.0, f.h);
+ }
+}
+
+
+TEST(9) {
+ // Test VFP multi load/store with ia.
+ CcTest::InitializeVM();
+ Isolate* isolate = Isolate::Current();
+ HandleScope scope(isolate);
+
+ typedef struct {
+ double a;
+ double b;
+ double c;
+ double d;
+ double e;
+ double f;
+ double g;
+ double h;
+ } D;
+ D d;
+
+ typedef struct {
+ float a;
+ float b;
+ float c;
+ float d;
+ float e;
+ float f;
+ float g;
+ float h;
+ } F;
+ F f;
+
+ // Create a function that uses vldm/vstm to move some double and
+ // single precision values around in memory.
+ Assembler assm(isolate, NULL, 0);
+
+ if (CpuFeatures::IsSupported(VFP2)) {
+ CpuFeatures::Scope scope(VFP2);
+
+ __ mov(ip, Operand(sp));
+ __ stm(db_w, sp, r4.bit() | fp.bit() | lr.bit());
+ __ sub(fp, ip, Operand(4));
+
+ __ addi(r4, r0, Operand(OFFSET_OF(D, a)));
+ __ vldm(ia, r4, d0, d3);
+ __ addi(r4, r4, Operand(4 * 8));
+ __ vldm(ia, r4, d4, d7);
+
+ __ addi(r4, r0, Operand(OFFSET_OF(D, a)));
+ __ vstm(ia, r4, d6, d7);
+ __ addi(r4, r4, Operand(2 * 8));
+ __ vstm(ia, r4, d0, d5);
+
+ __ addi(r4, r1, Operand(OFFSET_OF(F, a)));
+ __ vldm(ia, r4, s0, s3);
+ __ addi(r4, r4, Operand(4 * 4));
+ __ vldm(ia, r4, s4, s7);
+
+ __ addi(r4, r1, Operand(OFFSET_OF(F, a)));
+ __ vstm(ia, r4, s6, s7);
+ __ addi(r4, r4, Operand(2 * 4));
+ __ vstm(ia, r4, s0, s5);
+
+ __ ldm(ia_w, sp, r4.bit() | fp.bit() | pc.bit());
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Object* code = isolate->heap()->CreateCode(
+ desc,
+ Code::ComputeFlags(Code::STUB),
+ Handle<Code>())->ToObjectChecked();
+ CHECK(code->IsCode());
+#ifdef DEBUG
+ Code::cast(code)->Print();
+#endif
+ F4 fn = FUNCTION_CAST<F4>(Code::cast(code)->entry());
+ d.a = 1.1;
+ d.b = 2.2;
+ d.c = 3.3;
+ d.d = 4.4;
+ d.e = 5.5;
+ d.f = 6.6;
+ d.g = 7.7;
+ d.h = 8.8;
+
+ f.a = 1.0;
+ f.b = 2.0;
+ f.c = 3.0;
+ f.d = 4.0;
+ f.e = 5.0;
+ f.f = 6.0;
+ f.g = 7.0;
+ f.h = 8.0;
+
+ Object* dummy = CALL_GENERATED_CODE(fn, &d, &f, 0, 0, 0);
+ USE(dummy);
+
+ CHECK_EQ(7.7, d.a);
+ CHECK_EQ(8.8, d.b);
+ CHECK_EQ(1.1, d.c);
+ CHECK_EQ(2.2, d.d);
+ CHECK_EQ(3.3, d.e);
+ CHECK_EQ(4.4, d.f);
+ CHECK_EQ(5.5, d.g);
+ CHECK_EQ(6.6, d.h);
+
+ CHECK_EQ(7.0, f.a);
+ CHECK_EQ(8.0, f.b);
+ CHECK_EQ(1.0, f.c);
+ CHECK_EQ(2.0, f.d);
+ CHECK_EQ(3.0, f.e);
+ CHECK_EQ(4.0, f.f);
+ CHECK_EQ(5.0, f.g);
+ CHECK_EQ(6.0, f.h);
+ }
+}
+
+
+TEST(10) {
+ // Test VFP multi load/store with db_w.
+ CcTest::InitializeVM();
+ Isolate* isolate = Isolate::Current();
+ HandleScope scope(isolate);
+
+ typedef struct {
+ double a;
+ double b;
+ double c;
+ double d;
+ double e;
+ double f;
+ double g;
+ double h;
+ } D;
+ D d;
+
+ typedef struct {
+ float a;
+ float b;
+ float c;
+ float d;
+ float e;
+ float f;
+ float g;
+ float h;
+ } F;
+ F f;
+
+ // Create a function that uses vldm/vstm to move some double and
+ // single precision values around in memory.
+ Assembler assm(isolate, NULL, 0);
+
+ if (CpuFeatures::IsSupported(VFP2)) {
+ CpuFeatures::Scope scope(VFP2);
+
+ __ mov(ip, Operand(sp));
+ __ stm(db_w, sp, r4.bit() | fp.bit() | lr.bit());
+ __ sub(fp, ip, Operand(4));
+
+ __ addi(r4, r0, Operand(OFFSET_OF(D, h) + 8));
+ __ vldm(db_w, r4, d4, d7);
+ __ vldm(db_w, r4, d0, d3);
+
+ __ addi(r4, r0, Operand(OFFSET_OF(D, h) + 8));
+ __ vstm(db_w, r4, d0, d5);
+ __ vstm(db_w, r4, d6, d7);
+
+ __ addi(r4, r1, Operand(OFFSET_OF(F, h) + 4));
+ __ vldm(db_w, r4, s4, s7);
+ __ vldm(db_w, r4, s0, s3);
+
+ __ addi(r4, r1, Operand(OFFSET_OF(F, h) + 4));
+ __ vstm(db_w, r4, s0, s5);
+ __ vstm(db_w, r4, s6, s7);
+
+ __ ldm(ia_w, sp, r4.bit() | fp.bit() | pc.bit());
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Object* code = isolate->heap()->CreateCode(
+ desc,
+ Code::ComputeFlags(Code::STUB),
+ Handle<Code>())->ToObjectChecked();
+ CHECK(code->IsCode());
+#ifdef DEBUG
+ Code::cast(code)->Print();
+#endif
+ F4 fn = FUNCTION_CAST<F4>(Code::cast(code)->entry());
+ d.a = 1.1;
+ d.b = 2.2;
+ d.c = 3.3;
+ d.d = 4.4;
+ d.e = 5.5;
+ d.f = 6.6;
+ d.g = 7.7;
+ d.h = 8.8;
+
+ f.a = 1.0;
+ f.b = 2.0;
+ f.c = 3.0;
+ f.d = 4.0;
+ f.e = 5.0;
+ f.f = 6.0;
+ f.g = 7.0;
+ f.h = 8.0;
+
+ Object* dummy = CALL_GENERATED_CODE(fn, &d, &f, 0, 0, 0);
+ USE(dummy);
+
+ CHECK_EQ(7.7, d.a);
+ CHECK_EQ(8.8, d.b);
+ CHECK_EQ(1.1, d.c);
+ CHECK_EQ(2.2, d.d);
+ CHECK_EQ(3.3, d.e);
+ CHECK_EQ(4.4, d.f);
+ CHECK_EQ(5.5, d.g);
+ CHECK_EQ(6.6, d.h);
+
+ CHECK_EQ(7.0, f.a);
+ CHECK_EQ(8.0, f.b);
+ CHECK_EQ(1.0, f.c);
+ CHECK_EQ(2.0, f.d);
+ CHECK_EQ(3.0, f.e);
+ CHECK_EQ(4.0, f.f);
+ CHECK_EQ(5.0, f.g);
+ CHECK_EQ(6.0, f.h);
+ }
+}
+
+
+TEST(11) {
+ // Test instructions using the carry flag.
+ CcTest::InitializeVM();
+ Isolate* isolate = Isolate::Current();
+ HandleScope scope(isolate);
+
+ typedef struct {
+ int32_t a;
+ int32_t b;
+ int32_t c;
+ int32_t d;
+ } I;
+ I i;
+
+ i.a = 0xabcd0001;
+ i.b = 0xabcd0000;
+
+ Assembler assm(isolate, NULL, 0);
+
+ // Test HeapObject untagging.
+ __ ldr(r1, MemOperand(r0, OFFSET_OF(I, a)));
+ __ mov(r1, Operand(r1, ASR, 1), SetCC);
+ __ adc(r1, r1, Operand(r1), LeaveCC, cs);
+ __ str(r1, MemOperand(r0, OFFSET_OF(I, a)));
+
+ __ ldr(r2, MemOperand(r0, OFFSET_OF(I, b)));
+ __ mov(r2, Operand(r2, ASR, 1), SetCC);
+ __ adc(r2, r2, Operand(r2), LeaveCC, cs);
+ __ str(r2, MemOperand(r0, OFFSET_OF(I, b)));
+
+ // Test corner cases.
+ __ mov(r1, Operand(0xffffffff));
+ __ mov(r2, Operand::Zero());
+ __ mov(r3, Operand(r1, ASR, 1), SetCC); // Set the carry.
+ __ adc(r3, r1, Operand(r2));
+ __ str(r3, MemOperand(r0, OFFSET_OF(I, c)));
+
+ __ mov(r1, Operand(0xffffffff));
+ __ mov(r2, Operand::Zero());
+ __ mov(r3, Operand(r2, ASR, 1), SetCC); // Unset the carry.
+ __ adc(r3, r1, Operand(r2));
+ __ str(r3, MemOperand(r0, OFFSET_OF(I, d)));
+
+ __ mov(pc, Operand(lr));
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Object* code = isolate->heap()->CreateCode(
+ desc,
+ Code::ComputeFlags(Code::STUB),
+ Handle<Code>())->ToObjectChecked();
+ CHECK(code->IsCode());
+#ifdef DEBUG
+ Code::cast(code)->Print();
+#endif
+ F3 f = FUNCTION_CAST<F3>(Code::cast(code)->entry());
+ Object* dummy = CALL_GENERATED_CODE(f, &i, 0, 0, 0, 0);
+ USE(dummy);
+
+ CHECK_EQ(0xabcd0001, i.a);
+ CHECK_EQ(static_cast<int32_t>(0xabcd0000) >> 1, i.b);
+ CHECK_EQ(0x00000000, i.c);
+ CHECK_EQ(0xffffffff, i.d);
+}
+
+
+TEST(12) {
+ // Test chaining of label usages within instructions (issue 1644).
+ CcTest::InitializeVM();
+ Isolate* isolate = Isolate::Current();
+ HandleScope scope(isolate);
+
+ Assembler assm(isolate, NULL, 0);
+ Label target;
+ __ b(eq, &target);
+ __ b(ne, &target);
+ __ bind(&target);
+ __ nop();
+}
+#endif
+
+#undef __
--- /dev/null
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+
+#include <stdlib.h>
+
+#include "src/v8.h"
+
+#include "src/debug.h"
+#include "src/disasm.h"
+#include "src/disassembler.h"
+#include "src/macro-assembler.h"
+#include "src/serialize.h"
+#include "test/cctest/cctest.h"
+
+using namespace v8::internal;
+
+
+bool DisassembleAndCompare(byte* pc, const char* compare_string) {
+ disasm::NameConverter converter;
+ disasm::Disassembler disasm(converter);
+ EmbeddedVector<char, 128> disasm_buffer;
+
+ disasm.InstructionDecode(disasm_buffer, pc);
+
+ if (strcmp(compare_string, disasm_buffer.start()) != 0) {
+ fprintf(stderr,
+ "expected: \n"
+ "%s\n"
+ "disassembled: \n"
+ "%s\n\n",
+ compare_string, disasm_buffer.start());
+ return false;
+ }
+ return true;
+}
+
+
+// Set up V8 to a state where we can at least run the assembler and
+// disassembler. Declare the variables and allocate the data structures used
+// in the rest of the macros.
+#define SET_UP() \
+ CcTest::InitializeVM(); \
+ Isolate* isolate = Isolate::Current(); \
+ HandleScope scope(isolate); \
+ byte* buffer = reinterpret_cast<byte*>(malloc(4 * 1024)); \
+ Assembler assm(isolate, buffer, 4 * 1024); \
+ bool failure = false;
+
+
+// This macro assembles one instruction using the preallocated assembler and
+// disassembles the generated instruction, comparing the output to the expected
+// value. If the comparison fails an error message is printed, but the test
+// continues to run until the end.
+#define COMPARE(asm_, compare_string) \
+ { \
+ int pc_offset = assm.pc_offset(); \
+ byte* progcounter = &buffer[pc_offset]; \
+ assm.asm_; \
+ if (!DisassembleAndCompare(progcounter, compare_string)) failure = true; \
+ }
+
+// Force emission of any pending literals into a pool.
+#define EMIT_PENDING_LITERALS() assm.CheckConstPool(true, false)
+
+
+// Verify that all invocations of the COMPARE macro passed successfully.
+// Exit with a failure if at least one of the tests failed.
+#define VERIFY_RUN() \
+ if (failure) { \
+ V8_Fatal(__FILE__, __LINE__, "PPC Disassembler tests failed.\n"); \
+ }
+
+TEST(DisasmPPC) {
+ SET_UP();
+
+ COMPARE(addc(r9, r7, r9), "7d274814 addc r9, r7, r9");
+ COMPARE(addic(r3, r5, Operand(20)), "30650014 addic r3, r5, 20");
+ COMPARE(addi(r0, ip, Operand(63)), "380c003f addi r0, r12, 63");
+ COMPARE(add(r5, r7, r0), "7ca70214 add r5, r7, r0");
+ COMPARE(addze(r0, r0, LeaveOE, SetRC), "7c000195 addze. r0, r0");
+ COMPARE(andi(r0, r3, Operand(4)), "70600004 andi. r0, r3, 4");
+ COMPARE(and_(r3, r6, r5), "7cc32838 and r3, r6, r5");
+ COMPARE(and_(r6, r0, r6, SetRC), "7c063039 and. r6, r0, r6");
+ // skipping branches (for now?)
+ COMPARE(bctr(), "4e800420 bctr");
+ COMPARE(blr(), "4e800020 blr");
+ COMPARE(bclr(BA, SetLK), "4e800021 blrl");
+// skipping call - only used in simulator
+#if V8_TARGET_ARCH_PPC64
+ COMPARE(cmpi(r0, Operand(5)), "2fa00005 cmpi r0, 5");
+#else
+ COMPARE(cmpi(r0, Operand(5)), "2f800005 cmpi r0, 5");
+#endif
+#if V8_TARGET_ARCH_PPC64
+ COMPARE(cmpl(r6, r7), "7fa63840 cmpl r6, r7");
+#else
+ COMPARE(cmpl(r6, r7), "7f863840 cmpl r6, r7");
+#endif
+#if V8_TARGET_ARCH_PPC64
+ COMPARE(cmp(r5, r11), "7fa55800 cmp r5, r11");
+#else
+ COMPARE(cmp(r5, r11), "7f855800 cmp r5, r11");
+#endif
+ // skipping crxor - incomplete disassembly
+ COMPARE(lbz(r4, MemOperand(r4, 7)), "88840007 lbz r4, 7(r4)");
+ COMPARE(lfd(d0, MemOperand(sp, 128)), "c8010080 lfd d0, 128(sp)");
+ COMPARE(li(r0, Operand(16)), "38000010 li r0, 16");
+ COMPARE(lis(r8, Operand(22560)), "3d005820 lis r8, 22560");
+ COMPARE(lwz(ip, MemOperand(r19, 44)), "8193002c lwz r12, 44(r19)");
+ COMPARE(lwzx(r0, MemOperand(r5, ip)), "7c05602e lwzx r0, r5, r12");
+ COMPARE(mflr(r0), "7c0802a6 mflr r0");
+ COMPARE(mr(r15, r4), "7c8f2378 mr r15, r4");
+ COMPARE(mtctr(r0), "7c0903a6 mtctr r0");
+ COMPARE(mtlr(r15), "7de803a6 mtlr r15");
+ COMPARE(ori(r8, r8, Operand(42849)), "6108a761 ori r8, r8, 42849");
+ COMPARE(orx(r5, r3, r4), "7c652378 or r5, r3, r4");
+ COMPARE(rlwinm(r4, r3, 2, 0, 29), "5464103a rlwinm r4, r3, 2, 0, 29");
+ COMPARE(rlwinm(r0, r3, 0, 31, 31, SetRC),
+ "546007ff rlwinm. r0, r3, 0, 31, 31");
+ COMPARE(srawi(r3, r6, 1), "7cc30e70 srawi r3,r6,1");
+ COMPARE(stb(r5, MemOperand(r11, 11)), "98ab000b stb r5, 11(r11)");
+ COMPARE(stfd(d2, MemOperand(sp, 8)), "d8410008 stfd d2, 8(sp)");
+ COMPARE(stw(r16, MemOperand(sp, 64)), "92010040 stw r16, 64(sp)");
+ COMPARE(stwu(r3, MemOperand(sp, -4)), "9461fffc stwu r3, -4(sp)");
+ COMPARE(sub(r3, r3, r4), "7c641850 subf r3, r4, r3");
+ COMPARE(sub(r0, r9, r8, LeaveOE, SetRC), "7c084851 subf. r0, r8, r9");
+ COMPARE(xor_(r6, r5, r4), "7ca62278 xor r6, r5, r4");
+
+ VERIFY_RUN();
+}
__ pop(kRootRegister);
__ jr(ra);
__ nop();
+#elif V8_TARGET_ARCH_PPC
+ __ function_descriptor();
+ __ push(kRootRegister);
+ __ InitializeRootRegister();
+ __ li(r3, Operand(key));
+ __ GetNumberHash(r3, ip);
+ __ pop(kRootRegister);
+ __ blr();
#else
#error Unsupported architecture.
#endif
CHECK(value->IsNumber());
CHECK_EQ(Smi::kMaxValue, Handle<Smi>::cast(value)->value());
-#if !defined(V8_TARGET_ARCH_X64) && !defined(V8_TARGET_ARCH_ARM64) && \
- !defined(V8_TARGET_ARCH_MIPS64)
+#if !defined(V8_TARGET_ARCH_64_BIT)
// TODO(lrn): We need a NumberFromIntptr function in order to test this.
value = factory->NewNumberFromInt(Smi::kMinValue - 1);
CHECK(value->IsHeapNumber());
__asm__ __volatile__("sw $sp, %0" : "=g"(sp_addr));
#elif V8_HOST_ARCH_MIPS64
__asm__ __volatile__("sd $sp, %0" : "=g"(sp_addr));
+#elif defined(__PPC64__) || defined(_ARCH_PPC64)
+ __asm__ __volatile__("std 1, %0" : "=g"(sp_addr));
+#elif defined(__PPC__) || defined(_ARCH_PPC)
+ __asm__ __volatile__("stw 1, %0" : "=g"(sp_addr));
#else
#error Host architecture was not detected as supported by v8
#endif
#include "src/arm64/macro-assembler-arm64.h"
#include "src/arm64/regexp-macro-assembler-arm64.h"
#endif
+#if V8_TARGET_ARCH_PPC
+#include "src/ppc/assembler-ppc.h"
+#include "src/ppc/macro-assembler-ppc.h"
+#include "src/ppc/regexp-macro-assembler-ppc.h"
+#endif
#if V8_TARGET_ARCH_MIPS
#include "src/mips/assembler-mips.h"
#include "src/mips/macro-assembler-mips.h"
typedef RegExpMacroAssemblerARM ArchRegExpMacroAssembler;
#elif V8_TARGET_ARCH_ARM64
typedef RegExpMacroAssemblerARM64 ArchRegExpMacroAssembler;
+#elif V8_TARGET_ARCH_PPC
+typedef RegExpMacroAssemblerPPC ArchRegExpMacroAssembler;
#elif V8_TARGET_ARCH_MIPS
typedef RegExpMacroAssemblerMIPS ArchRegExpMacroAssembler;
#elif V8_TARGET_ARCH_MIPS64
static const int invalid = String::kMaxLength + 1; \
HandleScope scope(isolate); \
Vector<TYPE> dummy = Vector<TYPE>::New(invalid); \
+ memset(dummy.start(), 0x0, dummy.length() * sizeof(TYPE)); \
CHECK(isolate->factory()->FUN(Vector<const TYPE>::cast(dummy)).is_null()); \
memset(dummy.start(), 0x20, dummy.length() * sizeof(TYPE)); \
CHECK(isolate->has_pending_exception()); \
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// On MacOS X 10.7.5, this test needs a stack size of at least 788 kBytes.
+// On PPC64, this test needs a stack size of at least 698 kBytes.
// Flags: --stack-size=800
// Flags: --turbo-deoptimization
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Flags: --stack-size=1300
+
// Create a regexp in the form of a?a?...a? so that fully
// traversing the entire graph would be prohibitively expensive.
// This should not cause time out.
'compiler/x64/instruction-selector-x64-unittest.cc',
],
}],
+ ['v8_target_arch=="ppc" or v8_target_arch=="ppc64"', {
+ 'sources': [ ### gcmole(arch:ppc) ###
+ 'compiler/ppc/instruction-selector-ppc-unittest.cc',
+ ],
+ }],
['component=="shared_library"', {
# compiler-unittests can't be built against a shared library, so we
# need to depend on the underlying static target in that case.
'../../src/compiler/x64/linkage-x64.cc',
],
}],
+ ['v8_target_arch=="ppc" or v8_target_arch=="ppc64"', {
+ 'sources': [ ### gcmole(arch:ppc) ###
+ '../../src/ppc/assembler-ppc-inl.h',
+ '../../src/ppc/assembler-ppc.cc',
+ '../../src/ppc/assembler-ppc.h',
+ '../../src/ppc/builtins-ppc.cc',
+ '../../src/ppc/code-stubs-ppc.cc',
+ '../../src/ppc/code-stubs-ppc.h',
+ '../../src/ppc/codegen-ppc.cc',
+ '../../src/ppc/codegen-ppc.h',
+ '../../src/ppc/constants-ppc.h',
+ '../../src/ppc/constants-ppc.cc',
+ '../../src/ppc/cpu-ppc.cc',
+ '../../src/ppc/debug-ppc.cc',
+ '../../src/ppc/deoptimizer-ppc.cc',
+ '../../src/ppc/disasm-ppc.cc',
+ '../../src/ppc/frames-ppc.cc',
+ '../../src/ppc/frames-ppc.h',
+ '../../src/ppc/full-codegen-ppc.cc',
+ '../../src/ppc/interface-descriptors-ppc.cc',
+ '../../src/ppc/interface-descriptors-ppc.h',
+ '../../src/ppc/lithium-ppc.cc',
+ '../../src/ppc/lithium-ppc.h',
+ '../../src/ppc/lithium-codegen-ppc.cc',
+ '../../src/ppc/lithium-codegen-ppc.h',
+ '../../src/ppc/lithium-gap-resolver-ppc.cc',
+ '../../src/ppc/lithium-gap-resolver-ppc.h',
+ '../../src/ppc/macro-assembler-ppc.cc',
+ '../../src/ppc/macro-assembler-ppc.h',
+ '../../src/ppc/regexp-macro-assembler-ppc.cc',
+ '../../src/ppc/regexp-macro-assembler-ppc.h',
+ '../../src/ppc/simulator-ppc.cc',
+ '../../src/compiler/ppc/code-generator-ppc.cc',
+ '../../src/compiler/ppc/instruction-codes-ppc.h',
+ '../../src/compiler/ppc/instruction-selector-ppc.cc',
+ '../../src/compiler/ppc/linkage-ppc.cc',
+ '../../src/ic/ppc/access-compiler-ppc.cc',
+ '../../src/ic/ppc/handler-compiler-ppc.cc',
+ '../../src/ic/ppc/ic-ppc.cc',
+ '../../src/ic/ppc/ic-compiler-ppc.cc',
+ '../../src/ic/ppc/stub-cache-ppc.cc',
+ ],
+ }],
['OS=="win"', {
'variables': {
'gyp_generators': '<!(echo $GYP_GENERATORS)',
'../../src/base/atomicops_internals_atomicword_compat.h',
'../../src/base/atomicops_internals_mac.h',
'../../src/base/atomicops_internals_mips_gcc.h',
+ '../../src/base/atomicops_internals_ppc_gcc.h',
'../../src/base/atomicops_internals_tsan.h',
'../../src/base/atomicops_internals_x86_gcc.cc',
'../../src/base/atomicops_internals_x86_gcc.h',
"android_ia32",
"arm",
"ia32",
+ "ppc",
+ "ppc64",
"mipsel",
"nacl_ia32",
"nacl_x64",
"mips64el",
"nacl_ia32",
"nacl_x64",
+ "ppc",
+ "ppc64",
"x64",
"x32",
"arm64"]
VARIABLES = {ALWAYS: True}
for var in ["debug", "release", "android_arm", "android_arm64", "android_ia32", "android_x87",
"arm", "arm64", "ia32", "mips", "mipsel", "mips64el", "x64", "x87", "nacl_ia32",
- "nacl_x64", "macos", "windows", "linux"]:
+ "nacl_x64", "ppc", "ppc64", "macos", "windows", "linux", "aix"]:
VARIABLES[var] = var
return 'ia32'
elif machine == 'amd64':
return 'ia32'
+ elif id == 'ppc64':
+ return 'ppc'
else:
return None