From e4c5b84652cd5f5b610665f7adbb4f9dfbb2b0be Mon Sep 17 00:00:00 2001 From: Sven Panne Date: Fri, 16 Jan 2015 08:42:00 +0100 Subject: [PATCH] Contribution of PowerPC port (continuation of 422063005) Contribution of PowerPC port (continuation of 422063005). The inital patch covers the core changes to the common files. Subsequent patches will cover changes to common files to support AIX and to update the ppc directories so they are current with the changes in the rest of the project. This is based off of the GitHub repository https://github.com/andrewlow/v8ppc BUG= R=svenpanne@chromium.org, danno@chromium.org, sevnpanne@chromium.org Review URL: https://codereview.chromium.org/817143002 Cr-Commit-Position: refs/heads/master@{#26091} --- Makefile | 2 +- build/toolchain.gypi | 35 +- src/assembler.cc | 6 + src/bailout-reason.h | 7 + src/base/atomicops.h | 2 + src/base/atomicops_internals_ppc_gcc.h | 168 +++ src/base/build_config.h | 21 +- src/base/cpu.cc | 50 + src/base/cpu.h | 13 + src/base/platform/platform-posix.cc | 14 +- src/code-stubs.h | 25 +- src/codegen.h | 2 + src/compiler/instruction-codes.h | 2 + src/flag-definitions.h | 5 +- src/frames-inl.h | 2 + src/full-codegen.h | 13 +- src/globals.h | 11 +- src/heap/heap.cc | 4 + src/hydrogen-instructions.cc | 2 + src/hydrogen.cc | 2 + src/isolate.cc | 4 +- src/isolate.h | 25 +- src/jsregexp.cc | 5 + src/lithium-allocator-inl.h | 2 + src/lithium-codegen.cc | 3 + src/lithium-inl.h | 2 + src/lithium.cc | 3 + src/log.cc | 2 + src/macro-assembler.h | 7 + src/objects.h | 2 + src/ppc/assembler-ppc.cc | 5 +- src/ppc/code-stubs-ppc.cc | 2 - src/ppc/codegen-ppc.cc | 4 - src/ppc/regexp-macro-assembler-ppc.cc | 2 - src/regexp-macro-assembler.h | 1 + src/sampler.cc | 12 +- src/serialize.cc | 14 +- src/simulator.h | 2 + src/utils.h | 133 +++ test/cctest/cctest.gyp | 14 + test/cctest/compiler/call-tester.h | 5 +- test/cctest/test-assembler-ppc.cc | 1060 ++++++++++++++++++ test/cctest/test-disasm-ppc.cc | 155 +++ test/cctest/test-hashing.cc | 8 + test/cctest/test-heap.cc | 3 +- test/cctest/test-platform.cc | 4 + test/cctest/test-regexp.cc | 7 + test/cctest/test-strings.cc | 1 + test/mjsunit/big-array-literal.js | 1 + test/mjsunit/regress/regress-crbug-178790.js | 2 + test/unittests/unittests.gyp | 5 + tools/gyp/v8.gyp | 44 + tools/run-deopt-fuzzer.py | 2 + tools/run-tests.py | 2 + tools/testrunner/local/statusfile.py | 2 +- tools/testrunner/local/utils.py | 2 + 56 files changed, 1876 insertions(+), 57 deletions(-) create mode 100644 src/base/atomicops_internals_ppc_gcc.h create mode 100644 test/cctest/test-assembler-ppc.cc create mode 100644 test/cctest/test-disasm-ppc.cc diff --git a/Makefile b/Makefile index e2edd5093..6f29e76e1 100644 --- a/Makefile +++ b/Makefile @@ -232,7 +232,7 @@ endif # Architectures and modes to be compiled. Consider these to be internal # variables, don't override them (use the targets instead). -ARCHES = ia32 x64 x32 arm arm64 mips mipsel mips64el x87 +ARCHES = ia32 x64 x32 arm arm64 mips mipsel mips64el x87 ppc ppc64 DEFAULT_ARCHES = ia32 x64 arm MODES = release debug optdebug DEFAULT_MODES = release debug diff --git a/build/toolchain.gypi b/build/toolchain.gypi index 80856ce78..ee8efe13c 100644 --- a/build/toolchain.gypi +++ b/build/toolchain.gypi @@ -32,6 +32,7 @@ 'msvs_use_common_release': 0, 'clang%': 0, 'v8_target_arch%': '<(target_arch)', + 'v8_host_byteorder%': ' // cpuinfo #endif +#if V8_OS_LINUX && V8_HOST_ARCH_PPC +#include +#endif #if V8_OS_POSIX #include // sysconf() #endif @@ -580,7 +583,54 @@ CPU::CPU() delete[] part; } +#elif V8_HOST_ARCH_PPC + +#ifndef USE_SIMULATOR +#if V8_OS_LINUX + // Read processor info from /proc/self/auxv. + char* auxv_cpu_type = NULL; + FILE* fp = fopen("/proc/self/auxv", "r"); + if (fp != NULL) { +#if V8_TARGET_ARCH_PPC64 + Elf64_auxv_t entry; +#else + Elf32_auxv_t entry; #endif + for (;;) { + size_t n = fread(&entry, sizeof(entry), 1, fp); + if (n == 0 || entry.a_type == AT_NULL) { + break; + } + if (entry.a_type == AT_PLATFORM) { + auxv_cpu_type = reinterpret_cast(entry.a_un.a_val); + break; + } + } + fclose(fp); + } + + part_ = -1; + if (auxv_cpu_type) { + if (strcmp(auxv_cpu_type, "power8") == 0) { + part_ = PPC_POWER8; + } else if (strcmp(auxv_cpu_type, "power7") == 0) { + part_ = PPC_POWER7; + } else if (strcmp(auxv_cpu_type, "power6") == 0) { + part_ = PPC_POWER6; + } else if (strcmp(auxv_cpu_type, "power5") == 0) { + part_ = PPC_POWER5; + } else if (strcmp(auxv_cpu_type, "ppc970") == 0) { + part_ = PPC_G5; + } else if (strcmp(auxv_cpu_type, "ppc7450") == 0) { + part_ = PPC_G4; + } else if (strcmp(auxv_cpu_type, "pa6t") == 0) { + part_ = PPC_PA6T; + } + } + +#endif // V8_OS_LINUX +#endif // !USE_SIMULATOR +#endif // V8_HOST_ARCH_PPC } } } // namespace v8::base diff --git a/src/base/cpu.h b/src/base/cpu.h index 8c41f9d77..38465f5b3 100644 --- a/src/base/cpu.h +++ b/src/base/cpu.h @@ -50,6 +50,8 @@ class CPU FINAL { int variant() const { return variant_; } static const int NVIDIA_DENVER = 0x0; int part() const { return part_; } + + // ARM-specific part codes static const int ARM_CORTEX_A5 = 0xc05; static const int ARM_CORTEX_A7 = 0xc07; static const int ARM_CORTEX_A8 = 0xc08; @@ -57,6 +59,17 @@ class CPU FINAL { static const int ARM_CORTEX_A12 = 0xc0c; static const int ARM_CORTEX_A15 = 0xc0f; + // PPC-specific part codes + enum { + PPC_POWER5, + PPC_POWER6, + PPC_POWER7, + PPC_POWER8, + PPC_G4, + PPC_G5, + PPC_PA6T + }; + // General features bool has_fpu() const { return has_fpu_; } diff --git a/src/base/platform/platform-posix.cc b/src/base/platform/platform-posix.cc index 64aed2b8d..ebf726124 100644 --- a/src/base/platform/platform-posix.cc +++ b/src/base/platform/platform-posix.cc @@ -85,8 +85,8 @@ int OS::ActivationFrameAlignment() { // Otherwise we just assume 16 byte alignment, i.e.: // - With gcc 4.4 the tree vectorization optimizer can generate code // that requires 16 byte alignment such as movdqa on x86. - // - Mac OS X and Solaris (64-bit) activation frames must be 16 byte-aligned; - // see "Mac OS X ABI Function Call Guide" + // - Mac OS X, PPC and Solaris (64-bit) activation frames must + // be 16 byte-aligned; see "Mac OS X ABI Function Call Guide" return 16; #endif } @@ -171,6 +171,14 @@ void* OS::GetRandomMmapAddr() { // the hint address to 46 bits to give the kernel a fighting chance of // fulfilling our placement request. raw_addr &= V8_UINT64_C(0x3ffffffff000); +#elif V8_TARGET_ARCH_PPC64 +#if V8_TARGET_BIG_ENDIAN + // Big-endian Linux: 44 bits of virtual addressing. + raw_addr &= V8_UINT64_C(0x03fffffff000); +#else + // Little-endian Linux: 48 bits of virtual addressing. + raw_addr &= V8_UINT64_C(0x3ffffffff000); +#endif #else raw_addr &= 0x3ffff000; @@ -225,6 +233,8 @@ void OS::DebugBreak() { asm("break"); #elif V8_HOST_ARCH_MIPS64 asm("break"); +#elif V8_HOST_ARCH_PPC + asm("twge 2,2"); #elif V8_HOST_ARCH_IA32 #if V8_OS_NACL asm("hlt"); diff --git a/src/code-stubs.h b/src/code-stubs.h index 8448e557f..ab56f3424 100644 --- a/src/code-stubs.h +++ b/src/code-stubs.h @@ -111,6 +111,16 @@ namespace internal { #define CODE_STUB_LIST_ARM64(V) #endif +// List of code stubs only used on PPC platforms. +#ifdef V8_TARGET_ARCH_PPC +#define CODE_STUB_LIST_PPC(V) \ + V(DirectCEntry) \ + V(StoreRegistersState) \ + V(RestoreRegistersState) +#else +#define CODE_STUB_LIST_PPC(V) +#endif + // List of code stubs only used on MIPS platforms. #if V8_TARGET_ARCH_MIPS #define CODE_STUB_LIST_MIPS(V) \ @@ -127,10 +137,11 @@ namespace internal { #endif // Combined list of code stubs. -#define CODE_STUB_LIST(V) \ - CODE_STUB_LIST_ALL_PLATFORMS(V) \ - CODE_STUB_LIST_ARM(V) \ - CODE_STUB_LIST_ARM64(V) \ +#define CODE_STUB_LIST(V) \ + CODE_STUB_LIST_ALL_PLATFORMS(V) \ + CODE_STUB_LIST_ARM(V) \ + CODE_STUB_LIST_ARM64(V) \ + CODE_STUB_LIST_PPC(V) \ CODE_STUB_LIST_MIPS(V) // Stub is base classes of all stubs. @@ -504,6 +515,8 @@ class RuntimeCallHelper { #include "src/arm64/code-stubs-arm64.h" #elif V8_TARGET_ARCH_ARM #include "src/arm/code-stubs-arm.h" +#elif V8_TARGET_ARCH_PPC +#include "src/ppc/code-stubs-ppc.h" #elif V8_TARGET_ARCH_MIPS #include "src/mips/code-stubs-mips.h" #elif V8_TARGET_ARCH_MIPS64 @@ -1460,7 +1473,7 @@ class CEntryStub : public PlatformCodeStub { : PlatformCodeStub(isolate) { minor_key_ = SaveDoublesBits::encode(save_doubles == kSaveFPRegs); DCHECK(result_size == 1 || result_size == 2); -#ifdef _WIN64 +#if _WIN64 || (V8_TARGET_ARCH_PPC64 && !ABI_RETURNS_OBJECT_PAIRS_IN_REGS) minor_key_ = ResultSizeBits::update(minor_key_, result_size); #endif // _WIN64 } @@ -1473,7 +1486,7 @@ class CEntryStub : public PlatformCodeStub { private: bool save_doubles() const { return SaveDoublesBits::decode(minor_key_); } -#ifdef _WIN64 +#if _WIN64 || (V8_TARGET_ARCH_PPC64 && !ABI_RETURNS_OBJECT_PAIRS_IN_REGS) int result_size() const { return ResultSizeBits::decode(minor_key_); } #endif // _WIN64 diff --git a/src/codegen.h b/src/codegen.h index ba99a404a..0e0cf1d29 100644 --- a/src/codegen.h +++ b/src/codegen.h @@ -53,6 +53,8 @@ enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF }; #include "src/arm64/codegen-arm64.h" // NOLINT #elif V8_TARGET_ARCH_ARM #include "src/arm/codegen-arm.h" // NOLINT +#elif V8_TARGET_ARCH_PPC +#include "src/ppc/codegen-ppc.h" // NOLINT #elif V8_TARGET_ARCH_MIPS #include "src/mips/codegen-mips.h" // NOLINT #elif V8_TARGET_ARCH_MIPS64 diff --git a/src/compiler/instruction-codes.h b/src/compiler/instruction-codes.h index daf00afb3..fdb46ec6c 100644 --- a/src/compiler/instruction-codes.h +++ b/src/compiler/instruction-codes.h @@ -19,6 +19,8 @@ #include "src/compiler/mips64/instruction-codes-mips64.h" #elif V8_TARGET_ARCH_X64 #include "src/compiler/x64/instruction-codes-x64.h" +#elif V8_TARGET_ARCH_PPC +#include "src/compiler/ppc/instruction-codes-ppc.h" #else #define TARGET_ARCH_OPCODE_LIST(V) #define TARGET_ADDRESSING_MODE_LIST(V) diff --git a/src/flag-definitions.h b/src/flag-definitions.h index 5f65c7c2d..08ebad82d 100644 --- a/src/flag-definitions.h +++ b/src/flag-definitions.h @@ -464,7 +464,7 @@ DEFINE_BOOL(enable_32dregs, ENABLE_32DREGS_DEFAULT, DEFINE_BOOL(enable_vldr_imm, false, "enable use of constant pools for double immediate (ARM only)") DEFINE_BOOL(force_long_branches, false, - "force all emitted branches to be in long mode (MIPS only)") + "force all emitted branches to be in long mode (MIPS/PPC only)") // bootstrapper.cc DEFINE_STRING(expose_natives_as, NULL, "expose natives in global object") @@ -667,7 +667,8 @@ DEFINE_BOOL(debug_sim, false, "Enable debugging the simulator") DEFINE_BOOL(check_icache, false, "Check icache flushes in ARM and MIPS simulator") DEFINE_INT(stop_sim_at, 0, "Simulator stop after x number of instructions") -#if defined(V8_TARGET_ARCH_ARM64) || defined(V8_TARGET_ARCH_MIPS64) +#if defined(V8_TARGET_ARCH_ARM64) || defined(V8_TARGET_ARCH_MIPS64) || \ + defined(V8_TARGET_ARCH_PPC64) DEFINE_INT(sim_stack_alignment, 16, "Stack alignment in bytes in simulator. This must be a power of two " "and it must be at least 16. 16 is default.") diff --git a/src/frames-inl.h b/src/frames-inl.h index d7f2f75d3..824c1a762 100644 --- a/src/frames-inl.h +++ b/src/frames-inl.h @@ -17,6 +17,8 @@ #include "src/arm64/frames-arm64.h" // NOLINT #elif V8_TARGET_ARCH_ARM #include "src/arm/frames-arm.h" // NOLINT +#elif V8_TARGET_ARCH_PPC +#include "src/ppc/frames-ppc.h" // NOLINT #elif V8_TARGET_ARCH_MIPS #include "src/mips/frames-mips.h" // NOLINT #elif V8_TARGET_ARCH_MIPS64 diff --git a/src/full-codegen.h b/src/full-codegen.h index f870caefd..9681c8abe 100644 --- a/src/full-codegen.h +++ b/src/full-codegen.h @@ -112,6 +112,12 @@ class FullCodeGenerator: public AstVisitor { // TODO(all): Copied ARM value. Check this is sensible for ARM64. static const int kCodeSizeMultiplier = 149; static const int kBootCodeSizeMultiplier = 110; +#elif V8_TARGET_ARCH_PPC64 + static const int kCodeSizeMultiplier = 200; + static const int kBootCodeSizeMultiplier = 120; +#elif V8_TARGET_ARCH_PPC + static const int kCodeSizeMultiplier = 200; + static const int kBootCodeSizeMultiplier = 120; #elif V8_TARGET_ARCH_MIPS static const int kCodeSizeMultiplier = 149; static const int kBootCodeSizeMultiplier = 120; @@ -330,12 +336,15 @@ class FullCodeGenerator: public AstVisitor { Label* if_true, Label* if_false, Label* fall_through); -#else // All non-mips arch. +#elif V8_TARGET_ARCH_PPC + void Split(Condition cc, Label* if_true, Label* if_false, Label* fall_through, + CRegister cr = cr7); +#else // All other arch. void Split(Condition cc, Label* if_true, Label* if_false, Label* fall_through); -#endif // V8_TARGET_ARCH_MIPS +#endif // Load the value of a known (PARAMETER, LOCAL, or CONTEXT) variable into // a register. Emits a context chain walk if if necessary (so does diff --git a/src/globals.h b/src/globals.h index 48bb030b8..9db432e51 100644 --- a/src/globals.h +++ b/src/globals.h @@ -28,7 +28,7 @@ #if V8_TARGET_ARCH_IA32 || (V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_32_BIT) || \ V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS || \ - V8_TARGET_ARCH_MIPS64 + V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_PPC #define V8_TURBOFAN_BACKEND 1 #else #define V8_TURBOFAN_BACKEND 0 @@ -59,6 +59,9 @@ namespace internal { #if (V8_TARGET_ARCH_ARM && !V8_HOST_ARCH_ARM) #define USE_SIMULATOR 1 #endif +#if (V8_TARGET_ARCH_PPC && !V8_HOST_ARCH_PPC) +#define USE_SIMULATOR 1 +#endif #if (V8_TARGET_ARCH_MIPS && !V8_HOST_ARCH_MIPS) #define USE_SIMULATOR 1 #endif @@ -83,7 +86,7 @@ namespace internal { // Determine whether double field unboxing feature is enabled. -#if (V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64) +#if V8_TARGET_ARCH_64_BIT #define V8_DOUBLE_FIELDS_UNBOXING 1 #else #define V8_DOUBLE_FIELDS_UNBOXING 0 @@ -625,6 +628,10 @@ enum CpuFeature { // ARM64 ALWAYS_ALIGN_CSP, COHERENT_CACHE, + // PPC + FPR_GPR_MOV, + LWSYNC, + ISELECT, NUMBER_OF_CPU_FEATURES }; diff --git a/src/heap/heap.cc b/src/heap/heap.cc index a49806fd9..79218a32b 100644 --- a/src/heap/heap.cc +++ b/src/heap/heap.cc @@ -34,6 +34,10 @@ #include "src/v8threads.h" #include "src/vm-state-inl.h" +#if V8_TARGET_ARCH_PPC && !V8_INTERPRETED_REGEXP +#include "src/regexp-macro-assembler.h" // NOLINT +#include "src/ppc/regexp-macro-assembler-ppc.h" // NOLINT +#endif #if V8_TARGET_ARCH_ARM && !V8_INTERPRETED_REGEXP #include "src/regexp-macro-assembler.h" // NOLINT #include "src/arm/regexp-macro-assembler-arm.h" // NOLINT diff --git a/src/hydrogen-instructions.cc b/src/hydrogen-instructions.cc index 0e6a03d71..287fc5639 100644 --- a/src/hydrogen-instructions.cc +++ b/src/hydrogen-instructions.cc @@ -18,6 +18,8 @@ #include "src/arm64/lithium-arm64.h" // NOLINT #elif V8_TARGET_ARCH_ARM #include "src/arm/lithium-arm.h" // NOLINT +#elif V8_TARGET_ARCH_PPC +#include "src/ppc/lithium-ppc.h" // NOLINT #elif V8_TARGET_ARCH_MIPS #include "src/mips/lithium-mips.h" // NOLINT #elif V8_TARGET_ARCH_MIPS64 diff --git a/src/hydrogen.cc b/src/hydrogen.cc index 8000d8f10..acbc1919e 100644 --- a/src/hydrogen.cc +++ b/src/hydrogen.cc @@ -51,6 +51,8 @@ #include "src/arm64/lithium-codegen-arm64.h" // NOLINT #elif V8_TARGET_ARCH_ARM #include "src/arm/lithium-codegen-arm.h" // NOLINT +#elif V8_TARGET_ARCH_PPC +#include "src/ppc/lithium-codegen-ppc.h" // NOLINT #elif V8_TARGET_ARCH_MIPS #include "src/mips/lithium-codegen-mips.h" // NOLINT #elif V8_TARGET_ARCH_MIPS64 diff --git a/src/isolate.cc b/src/isolate.cc index 6ca993306..9cad13f77 100644 --- a/src/isolate.cc +++ b/src/isolate.cc @@ -2027,8 +2027,8 @@ bool Isolate::Init(Deserializer* des) { // Initialize other runtime facilities #if defined(USE_SIMULATOR) -#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || \ - V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 +#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS || \ + V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_PPC Simulator::Initialize(this); #endif #endif diff --git a/src/isolate.h b/src/isolate.h index 6a273c6ef..27e637701 100644 --- a/src/isolate.h +++ b/src/isolate.h @@ -82,9 +82,10 @@ class Debug; class Debugger; class PromiseOnStack; -#if !defined(__arm__) && V8_TARGET_ARCH_ARM || \ +#if !defined(__arm__) && V8_TARGET_ARCH_ARM || \ !defined(__aarch64__) && V8_TARGET_ARCH_ARM64 || \ - !defined(__mips__) && V8_TARGET_ARCH_MIPS || \ + !defined(__PPC__) && V8_TARGET_ARCH_PPC || \ + !defined(__mips__) && V8_TARGET_ARCH_MIPS || \ !defined(__mips__) && V8_TARGET_ARCH_MIPS64 class Redirection; class Simulator; @@ -322,9 +323,10 @@ class ThreadLocalTop BASE_EMBEDDED { }; -#if V8_TARGET_ARCH_ARM && !defined(__arm__) || \ +#if V8_TARGET_ARCH_ARM && !defined(__arm__) || \ V8_TARGET_ARCH_ARM64 && !defined(__aarch64__) || \ - V8_TARGET_ARCH_MIPS && !defined(__mips__) || \ + V8_TARGET_ARCH_PPC && !defined(__PPC__) || \ + V8_TARGET_ARCH_MIPS && !defined(__mips__) || \ V8_TARGET_ARCH_MIPS64 && !defined(__mips__) #define ISOLATE_INIT_SIMULATOR_LIST(V) \ @@ -417,9 +419,10 @@ class Isolate { thread_id_(thread_id), stack_limit_(0), thread_state_(NULL), -#if !defined(__arm__) && V8_TARGET_ARCH_ARM || \ +#if !defined(__arm__) && V8_TARGET_ARCH_ARM || \ !defined(__aarch64__) && V8_TARGET_ARCH_ARM64 || \ - !defined(__mips__) && V8_TARGET_ARCH_MIPS || \ + !defined(__PPC__) && V8_TARGET_ARCH_PPC || \ + !defined(__mips__) && V8_TARGET_ARCH_MIPS || \ !defined(__mips__) && V8_TARGET_ARCH_MIPS64 simulator_(NULL), #endif @@ -432,9 +435,10 @@ class Isolate { FIELD_ACCESSOR(uintptr_t, stack_limit) FIELD_ACCESSOR(ThreadState*, thread_state) -#if !defined(__arm__) && V8_TARGET_ARCH_ARM || \ +#if !defined(__arm__) && V8_TARGET_ARCH_ARM || \ !defined(__aarch64__) && V8_TARGET_ARCH_ARM64 || \ - !defined(__mips__) && V8_TARGET_ARCH_MIPS || \ + !defined(__PPC__) && V8_TARGET_ARCH_PPC || \ + !defined(__mips__) && V8_TARGET_ARCH_MIPS || \ !defined(__mips__) && V8_TARGET_ARCH_MIPS64 FIELD_ACCESSOR(Simulator*, simulator) #endif @@ -449,9 +453,10 @@ class Isolate { uintptr_t stack_limit_; ThreadState* thread_state_; -#if !defined(__arm__) && V8_TARGET_ARCH_ARM || \ +#if !defined(__arm__) && V8_TARGET_ARCH_ARM || \ !defined(__aarch64__) && V8_TARGET_ARCH_ARM64 || \ - !defined(__mips__) && V8_TARGET_ARCH_MIPS || \ + !defined(__PPC__) && V8_TARGET_ARCH_PPC || \ + !defined(__mips__) && V8_TARGET_ARCH_MIPS || \ !defined(__mips__) && V8_TARGET_ARCH_MIPS64 Simulator* simulator_; #endif diff --git a/src/jsregexp.cc b/src/jsregexp.cc index a5b9fb5ab..d5a7775b8 100644 --- a/src/jsregexp.cc +++ b/src/jsregexp.cc @@ -31,6 +31,8 @@ #include "src/arm64/regexp-macro-assembler-arm64.h" // NOLINT #elif V8_TARGET_ARCH_ARM #include "src/arm/regexp-macro-assembler-arm.h" // NOLINT +#elif V8_TARGET_ARCH_PPC +#include "src/ppc/regexp-macro-assembler-ppc.h" // NOLINT #elif V8_TARGET_ARCH_MIPS #include "src/mips/regexp-macro-assembler-mips.h" // NOLINT #elif V8_TARGET_ARCH_MIPS64 @@ -6094,6 +6096,9 @@ RegExpEngine::CompilationResult RegExpEngine::Compile( #elif V8_TARGET_ARCH_ARM64 RegExpMacroAssemblerARM64 macro_assembler(mode, (data->capture_count + 1) * 2, zone); +#elif V8_TARGET_ARCH_PPC + RegExpMacroAssemblerPPC macro_assembler(mode, (data->capture_count + 1) * 2, + zone); #elif V8_TARGET_ARCH_MIPS RegExpMacroAssemblerMIPS macro_assembler(mode, (data->capture_count + 1) * 2, zone); diff --git a/src/lithium-allocator-inl.h b/src/lithium-allocator-inl.h index bafa00f07..98923ae3a 100644 --- a/src/lithium-allocator-inl.h +++ b/src/lithium-allocator-inl.h @@ -15,6 +15,8 @@ #include "src/arm64/lithium-arm64.h" // NOLINT #elif V8_TARGET_ARCH_ARM #include "src/arm/lithium-arm.h" // NOLINT +#elif V8_TARGET_ARCH_PPC +#include "src/ppc/lithium-ppc.h" // NOLINT #elif V8_TARGET_ARCH_MIPS #include "src/mips/lithium-mips.h" // NOLINT #elif V8_TARGET_ARCH_MIPS64 diff --git a/src/lithium-codegen.cc b/src/lithium-codegen.cc index 4534b46d8..27c5c01b0 100644 --- a/src/lithium-codegen.cc +++ b/src/lithium-codegen.cc @@ -29,6 +29,9 @@ #elif V8_TARGET_ARCH_X87 #include "src/x87/lithium-x87.h" // NOLINT #include "src/x87/lithium-codegen-x87.h" // NOLINT +#elif V8_TARGET_ARCH_PPC +#include "src/ppc/lithium-ppc.h" // NOLINT +#include "src/ppc/lithium-codegen-ppc.h" // NOLINT #else #error Unsupported target architecture. #endif diff --git a/src/lithium-inl.h b/src/lithium-inl.h index 36e166e92..1a1077339 100644 --- a/src/lithium-inl.h +++ b/src/lithium-inl.h @@ -19,6 +19,8 @@ #include "src/mips/lithium-mips.h" // NOLINT #elif V8_TARGET_ARCH_MIPS64 #include "src/mips64/lithium-mips64.h" // NOLINT +#elif V8_TARGET_ARCH_PPC +#include "src/ppc/lithium-ppc.h" // NOLINT #elif V8_TARGET_ARCH_X87 #include "src/x87/lithium-x87.h" // NOLINT #else diff --git a/src/lithium.cc b/src/lithium.cc index d57a2dd4a..7e2f1a62b 100644 --- a/src/lithium.cc +++ b/src/lithium.cc @@ -18,6 +18,9 @@ #elif V8_TARGET_ARCH_ARM #include "src/arm/lithium-arm.h" // NOLINT #include "src/arm/lithium-codegen-arm.h" // NOLINT +#elif V8_TARGET_ARCH_PPC +#include "src/ppc/lithium-ppc.h" // NOLINT +#include "src/ppc/lithium-codegen-ppc.h" // NOLINT #elif V8_TARGET_ARCH_MIPS #include "src/mips/lithium-mips.h" // NOLINT #include "src/mips/lithium-codegen-mips.h" // NOLINT diff --git a/src/log.cc b/src/log.cc index 3eede365b..f0ca8de75 100644 --- a/src/log.cc +++ b/src/log.cc @@ -402,6 +402,8 @@ void LowLevelLogger::LogCodeInfo() { const char arch[] = "x32"; #elif V8_TARGET_ARCH_ARM const char arch[] = "arm"; +#elif V8_TARGET_ARCH_PPC + const char arch[] = "ppc"; #elif V8_TARGET_ARCH_MIPS const char arch[] = "mips"; #elif V8_TARGET_ARCH_X87 diff --git a/src/macro-assembler.h b/src/macro-assembler.h index 2501f806c..166ac428b 100644 --- a/src/macro-assembler.h +++ b/src/macro-assembler.h @@ -64,6 +64,13 @@ const int kInvalidProtoDepth = -1; #include "src/arm/assembler-arm-inl.h" #include "src/code.h" // NOLINT, must be after assembler_*.h #include "src/arm/macro-assembler-arm.h" // NOLINT +#elif V8_TARGET_ARCH_PPC +#include "src/ppc/constants-ppc.h" +#include "src/assembler.h" // NOLINT +#include "src/ppc/assembler-ppc.h" // NOLINT +#include "src/ppc/assembler-ppc-inl.h" +#include "src/code.h" // NOLINT, must be after assembler_*.h +#include "src/ppc/macro-assembler-ppc.h" #elif V8_TARGET_ARCH_MIPS #include "src/mips/constants-mips.h" #include "src/assembler.h" // NOLINT diff --git a/src/objects.h b/src/objects.h index 7eeedfa76..5b0e714cf 100644 --- a/src/objects.h +++ b/src/objects.h @@ -31,6 +31,8 @@ #include "src/mips/constants-mips.h" // NOLINT #elif V8_TARGET_ARCH_MIPS64 #include "src/mips64/constants-mips64.h" // NOLINT +#elif V8_TARGET_ARCH_PPC +#include "src/ppc/constants-ppc.h" // NOLINT #endif diff --git a/src/ppc/assembler-ppc.cc b/src/ppc/assembler-ppc.cc index 4b8b16565..d91052873 100644 --- a/src/ppc/assembler-ppc.cc +++ b/src/ppc/assembler-ppc.cc @@ -82,9 +82,6 @@ void CpuFeatures::ProbeImpl(bool cross_compile) { // Assume support supported_ |= (1u << FPU); } - if (cpu.cache_line_size() != 0) { - cache_line_size_ = cpu.cache_line_size(); - } #elif V8_OS_AIX // Assume support FP support and default cache line size supported_ |= (1u << FPU); @@ -1422,11 +1419,13 @@ void Assembler::marker_asm(int mcode) { // Code address skips the function descriptor "header". // TOC and static chain are ignored and set to 0. void Assembler::function_descriptor() { +#if ABI_USES_FUNCTION_DESCRIPTORS DCHECK(pc_offset() == 0); RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE); emit_ptr(reinterpret_cast(pc_) + 3 * kPointerSize); emit_ptr(0); emit_ptr(0); +#endif } diff --git a/src/ppc/code-stubs-ppc.cc b/src/ppc/code-stubs-ppc.cc index 3e84a2143..2c65b4497 100644 --- a/src/ppc/code-stubs-ppc.cc +++ b/src/ppc/code-stubs-ppc.cc @@ -1150,9 +1150,7 @@ void JSEntryStub::Generate(MacroAssembler* masm) { Label invoke, handler_entry, exit; // Called from C -#if ABI_USES_FUNCTION_DESCRIPTORS __ function_descriptor(); -#endif ProfileEntryHookStub::MaybeCallEntryHook(masm); diff --git a/src/ppc/codegen-ppc.cc b/src/ppc/codegen-ppc.cc index 1074e872b..deca69664 100644 --- a/src/ppc/codegen-ppc.cc +++ b/src/ppc/codegen-ppc.cc @@ -46,9 +46,7 @@ UnaryMathFunction CreateExpFunction() { Register temp3 = r9; // Called from C -#if ABI_USES_FUNCTION_DESCRIPTORS __ function_descriptor(); -#endif __ Push(temp3, temp2, temp1); MathExpGenerator::EmitMathExp(&masm, input, result, double_scratch1, @@ -88,9 +86,7 @@ UnaryMathFunction CreateSqrtFunction() { MacroAssembler masm(NULL, buffer, static_cast(actual_size)); // Called from C -#if ABI_USES_FUNCTION_DESCRIPTORS __ function_descriptor(); -#endif __ MovFromFloatParameter(d1); __ fsqrt(d1, d1); diff --git a/src/ppc/regexp-macro-assembler-ppc.cc b/src/ppc/regexp-macro-assembler-ppc.cc index 54acce16f..fb8f1ee99 100644 --- a/src/ppc/regexp-macro-assembler-ppc.cc +++ b/src/ppc/regexp-macro-assembler-ppc.cc @@ -115,9 +115,7 @@ RegExpMacroAssemblerPPC::RegExpMacroAssemblerPPC(Mode mode, DCHECK_EQ(0, registers_to_save % 2); // Called from C -#if ABI_USES_FUNCTION_DESCRIPTORS __ function_descriptor(); -#endif __ b(&entry_label_); // We'll write the entry code later. // If the code gets too big or corrupted, an internal exception will be diff --git a/src/regexp-macro-assembler.h b/src/regexp-macro-assembler.h index f72cc4d42..c2f8f4297 100644 --- a/src/regexp-macro-assembler.h +++ b/src/regexp-macro-assembler.h @@ -32,6 +32,7 @@ class RegExpMacroAssembler { kARMImplementation, kARM64Implementation, kMIPSImplementation, + kPPCImplementation, kX64Implementation, kX87Implementation, kBytecodeImplementation diff --git a/src/sampler.cc b/src/sampler.cc index 760df8070..19c5cacee 100644 --- a/src/sampler.cc +++ b/src/sampler.cc @@ -256,6 +256,12 @@ class SimulatorHelper { Simulator::sp)); state->fp = reinterpret_cast
(simulator_->get_register( Simulator::fp)); +#elif V8_TARGET_ARCH_PPC + state->pc = reinterpret_cast
(simulator_->get_pc()); + state->sp = + reinterpret_cast
(simulator_->get_register(Simulator::sp)); + state->fp = + reinterpret_cast
(simulator_->get_register(Simulator::fp)); #endif } @@ -361,7 +367,7 @@ void SignalHandler::HandleProfilerSignal(int signal, siginfo_t* info, #else // Extracting the sample from the context is extremely machine dependent. ucontext_t* ucontext = reinterpret_cast(context); -#if !V8_OS_OPENBSD +#if !(V8_OS_OPENBSD || (V8_OS_LINUX && V8_HOST_ARCH_PPC)) mcontext_t& mcontext = ucontext->uc_mcontext; #endif #if V8_OS_LINUX @@ -398,6 +404,10 @@ void SignalHandler::HandleProfilerSignal(int signal, siginfo_t* info, state.pc = reinterpret_cast
(mcontext.pc); state.sp = reinterpret_cast
(mcontext.gregs[29]); state.fp = reinterpret_cast
(mcontext.gregs[30]); +#elif V8_HOST_ARCH_PPC + state.pc = reinterpret_cast
(ucontext->uc_mcontext.regs->nip); + state.sp = reinterpret_cast
(ucontext->uc_mcontext.regs->gpr[PT_R1]); + state.fp = reinterpret_cast
(ucontext->uc_mcontext.regs->gpr[PT_R31]); #endif // V8_HOST_ARCH_* #elif V8_OS_MACOSX #if V8_HOST_ARCH_X64 diff --git a/src/serialize.cc b/src/serialize.cc index 8d6d55082..085f4e5ee 100644 --- a/src/serialize.cc +++ b/src/serialize.cc @@ -1193,16 +1193,16 @@ void Deserializer::ReadData(Object** current, Object** limit, int source_space, // allocation point and write a pointer to it to the current object. ALL_SPACES(kBackref, kPlain, kStartOfObject) ALL_SPACES(kBackrefWithSkip, kPlain, kStartOfObject) -#if defined(V8_TARGET_ARCH_MIPS) || V8_OOL_CONSTANT_POOL || \ - defined(V8_TARGET_ARCH_MIPS64) +#if defined(V8_TARGET_ARCH_MIPS) || defined(V8_TARGET_ARCH_MIPS64) || \ + defined(V8_TARGET_ARCH_PPC) || V8_OOL_CONSTANT_POOL // Deserialize a new object from pointer found in code and write - // a pointer to it to the current object. Required only for MIPS or ARM - // with ool constant pool, and omitted on the other architectures because - // it is fully unrolled and would cause bloat. + // a pointer to it to the current object. Required only for MIPS, PPC or + // ARM with ool constant pool, and omitted on the other architectures + // because it is fully unrolled and would cause bloat. ALL_SPACES(kNewObject, kFromCode, kStartOfObject) // Find a recently deserialized code object using its offset from the // current allocation point and write a pointer to it to the current - // object. Required only for MIPS or ARM with ool constant pool. + // object. Required only for MIPS, PPC or ARM with ool constant pool. ALL_SPACES(kBackref, kFromCode, kStartOfObject) ALL_SPACES(kBackrefWithSkip, kFromCode, kStartOfObject) #endif @@ -1219,7 +1219,7 @@ void Deserializer::ReadData(Object** current, Object** limit, int source_space, CASE_STATEMENT(kRootArray, kPlain, kStartOfObject, 0) CASE_BODY(kRootArray, kPlain, kStartOfObject, 0) #if defined(V8_TARGET_ARCH_MIPS) || V8_OOL_CONSTANT_POOL || \ - defined(V8_TARGET_ARCH_MIPS64) + defined(V8_TARGET_ARCH_MIPS64) || defined(V8_TARGET_ARCH_PPC) // Find an object in the roots array and write a pointer to it to in code. CASE_STATEMENT(kRootArray, kFromCode, kStartOfObject, 0) CASE_BODY(kRootArray, kFromCode, kStartOfObject, 0) diff --git a/src/simulator.h b/src/simulator.h index 6dd08f4a5..d19829190 100644 --- a/src/simulator.h +++ b/src/simulator.h @@ -13,6 +13,8 @@ #include "src/arm64/simulator-arm64.h" #elif V8_TARGET_ARCH_ARM #include "src/arm/simulator-arm.h" +#elif V8_TARGET_ARCH_PPC +#include "src/ppc/simulator-ppc.h" #elif V8_TARGET_ARCH_MIPS #include "src/mips/simulator-mips.h" #elif V8_TARGET_ARCH_MIPS64 diff --git a/src/utils.h b/src/utils.h index 87276c129..4491f5584 100644 --- a/src/utils.h +++ b/src/utils.h @@ -1327,6 +1327,9 @@ INLINE(void CopyCharsUnsigned(uint16_t* dest, const uint16_t* src, int chars)); #elif defined(V8_HOST_ARCH_MIPS) INLINE(void CopyCharsUnsigned(uint8_t* dest, const uint8_t* src, int chars)); INLINE(void CopyCharsUnsigned(uint16_t* dest, const uint16_t* src, int chars)); +#elif defined(V8_HOST_ARCH_PPC) +INLINE(void CopyCharsUnsigned(uint8_t* dest, const uint8_t* src, int chars)); +INLINE(void CopyCharsUnsigned(uint16_t* dest, const uint16_t* src, int chars)); #endif // Copy from 8bit/16bit chars to 8bit/16bit chars. @@ -1486,6 +1489,136 @@ void CopyCharsUnsigned(uint16_t* dest, const uint16_t* src, int chars) { MemCopy(dest, src, chars * sizeof(*dest)); } } +#elif defined(V8_HOST_ARCH_PPC) +#define CASE(n) \ + case n: \ + memcpy(dest, src, n); \ + break +void CopyCharsUnsigned(uint8_t* dest, const uint8_t* src, int chars) { + switch (static_cast(chars)) { + case 0: + break; + case 1: + *dest = *src; + break; + CASE(2); + CASE(3); + CASE(4); + CASE(5); + CASE(6); + CASE(7); + CASE(8); + CASE(9); + CASE(10); + CASE(11); + CASE(12); + CASE(13); + CASE(14); + CASE(15); + CASE(16); + CASE(17); + CASE(18); + CASE(19); + CASE(20); + CASE(21); + CASE(22); + CASE(23); + CASE(24); + CASE(25); + CASE(26); + CASE(27); + CASE(28); + CASE(29); + CASE(30); + CASE(31); + CASE(32); + CASE(33); + CASE(34); + CASE(35); + CASE(36); + CASE(37); + CASE(38); + CASE(39); + CASE(40); + CASE(41); + CASE(42); + CASE(43); + CASE(44); + CASE(45); + CASE(46); + CASE(47); + CASE(48); + CASE(49); + CASE(50); + CASE(51); + CASE(52); + CASE(53); + CASE(54); + CASE(55); + CASE(56); + CASE(57); + CASE(58); + CASE(59); + CASE(60); + CASE(61); + CASE(62); + CASE(63); + CASE(64); + default: + memcpy(dest, src, chars); + break; + } +} +#undef CASE + +#define CASE(n) \ + case n: \ + memcpy(dest, src, n * 2); \ + break +void CopyCharsUnsigned(uint16_t* dest, const uint16_t* src, int chars) { + switch (static_cast(chars)) { + case 0: + break; + case 1: + *dest = *src; + break; + CASE(2); + CASE(3); + CASE(4); + CASE(5); + CASE(6); + CASE(7); + CASE(8); + CASE(9); + CASE(10); + CASE(11); + CASE(12); + CASE(13); + CASE(14); + CASE(15); + CASE(16); + CASE(17); + CASE(18); + CASE(19); + CASE(20); + CASE(21); + CASE(22); + CASE(23); + CASE(24); + CASE(25); + CASE(26); + CASE(27); + CASE(28); + CASE(29); + CASE(30); + CASE(31); + CASE(32); + default: + memcpy(dest, src, chars * 2); + break; + } +} +#undef CASE #endif diff --git a/test/cctest/cctest.gyp b/test/cctest/cctest.gyp index 7d3ace84f..a962c4729 100644 --- a/test/cctest/cctest.gyp +++ b/test/cctest/cctest.gyp @@ -212,6 +212,20 @@ 'test-js-arm64-variables.cc' ], }], + ['v8_target_arch=="ppc"', { + 'sources': [ ### gcmole(arch:ppc) ### + 'test-assembler-ppc.cc', + 'test-code-stubs.cc', + 'test-disasm-ppc.cc' + ], + }], + ['v8_target_arch=="ppc64"', { + 'sources': [ ### gcmole(arch:ppc64) ### + 'test-assembler-ppc.cc', + 'test-code-stubs.cc', + 'test-disasm-ppc.cc' + ], + }], ['v8_target_arch=="mipsel"', { 'sources': [ ### gcmole(arch:mipsel) ### 'test-assembler-mips.cc', diff --git a/test/cctest/compiler/call-tester.h b/test/cctest/compiler/call-tester.h index cad171e60..ffafaf080 100644 --- a/test/cctest/compiler/call-tester.h +++ b/test/cctest/compiler/call-tester.h @@ -207,7 +207,7 @@ class CallHelper { Simulator::CallArgument::End()}; return ReturnValueTraits::Cast(CallSimulator(FUNCTION_ADDR(f), args)); } -#elif USE_SIMULATOR && V8_TARGET_ARCH_MIPS64 +#elif USE_SIMULATOR && (V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_PPC64) uintptr_t CallSimulator(byte* f, int64_t p1 = 0, int64_t p2 = 0, int64_t p3 = 0, int64_t p4 = 0) { Simulator* simulator = Simulator::current(isolate_); @@ -243,7 +243,8 @@ class CallHelper { ParameterTraits::Cast(p2), ParameterTraits::Cast(p3), ParameterTraits::Cast(p4))); } -#elif USE_SIMULATOR && (V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_MIPS) +#elif USE_SIMULATOR && \ + (V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_PPC) uintptr_t CallSimulator(byte* f, int32_t p1 = 0, int32_t p2 = 0, int32_t p3 = 0, int32_t p4 = 0) { Simulator* simulator = Simulator::current(isolate_); diff --git a/test/cctest/test-assembler-ppc.cc b/test/cctest/test-assembler-ppc.cc new file mode 100644 index 000000000..4a2e7d398 --- /dev/null +++ b/test/cctest/test-assembler-ppc.cc @@ -0,0 +1,1060 @@ +// Copyright 2012 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#include "src/v8.h" + +#include "src/disassembler.h" +#include "src/factory.h" +#include "src/ppc/assembler-ppc-inl.h" +#include "src/ppc/simulator-ppc.h" +#include "test/cctest/cctest.h" + +using namespace v8::internal; + + +// Define these function prototypes to match JSEntryFunction in execution.cc. +typedef Object* (*F1)(int x, int p1, int p2, int p3, int p4); +typedef Object* (*F2)(int x, int y, int p2, int p3, int p4); +typedef Object* (*F3)(void* p0, int p1, int p2, int p3, int p4); +typedef Object* (*F4)(void* p0, void* p1, int p2, int p3, int p4); + + +#define __ assm. + +// Simple add parameter 1 to parameter 2 and return +TEST(0) { + CcTest::InitializeVM(); + Isolate* isolate = Isolate::Current(); + HandleScope scope(isolate); + + Assembler assm(isolate, NULL, 0); + + __ function_descriptor(); + + __ add(r3, r3, r4); + __ blr(); + + CodeDesc desc; + assm.GetCode(&desc); + Handle code = isolate->factory()->NewCode( + desc, Code::ComputeFlags(Code::STUB), Handle()); +#ifdef DEBUG + code->Print(); +#endif + F2 f = FUNCTION_CAST(code->entry()); + intptr_t res = + reinterpret_cast(CALL_GENERATED_CODE(f, 3, 4, 0, 0, 0)); + ::printf("f() = %" V8PRIdPTR "\n", res); + CHECK_EQ(7, static_cast(res)); +} + + +// Loop 100 times, adding loop counter to result +TEST(1) { + CcTest::InitializeVM(); + Isolate* isolate = Isolate::Current(); + HandleScope scope(isolate); + + Assembler assm(isolate, NULL, 0); + Label L, C; + + __ function_descriptor(); + + __ mr(r4, r3); + __ li(r3, Operand::Zero()); + __ b(&C); + + __ bind(&L); + __ add(r3, r3, r4); + __ subi(r4, r4, Operand(1)); + + __ bind(&C); + __ cmpi(r4, Operand::Zero()); + __ bne(&L); + __ blr(); + + CodeDesc desc; + assm.GetCode(&desc); + Handle code = isolate->factory()->NewCode( + desc, Code::ComputeFlags(Code::STUB), Handle()); +#ifdef DEBUG + code->Print(); +#endif + F1 f = FUNCTION_CAST(code->entry()); + intptr_t res = + reinterpret_cast(CALL_GENERATED_CODE(f, 100, 0, 0, 0, 0)); + ::printf("f() = %" V8PRIdPTR "\n", res); + CHECK_EQ(5050, static_cast(res)); +} + + +TEST(2) { + CcTest::InitializeVM(); + Isolate* isolate = Isolate::Current(); + HandleScope scope(isolate); + + Assembler assm(isolate, NULL, 0); + Label L, C; + + __ function_descriptor(); + + __ mr(r4, r3); + __ li(r3, Operand(1)); + __ b(&C); + + __ bind(&L); +#if defined(V8_TARGET_ARCH_PPC64) + __ mulld(r3, r4, r3); +#else + __ mullw(r3, r4, r3); +#endif + __ subi(r4, r4, Operand(1)); + + __ bind(&C); + __ cmpi(r4, Operand::Zero()); + __ bne(&L); + __ blr(); + + // some relocated stuff here, not executed + __ RecordComment("dead code, just testing relocations"); + __ mov(r0, Operand(isolate->factory()->true_value())); + __ RecordComment("dead code, just testing immediate operands"); + __ mov(r0, Operand(-1)); + __ mov(r0, Operand(0xFF000000)); + __ mov(r0, Operand(0xF0F0F0F0)); + __ mov(r0, Operand(0xFFF0FFFF)); + + CodeDesc desc; + assm.GetCode(&desc); + Handle code = isolate->factory()->NewCode( + desc, Code::ComputeFlags(Code::STUB), Handle()); +#ifdef DEBUG + code->Print(); +#endif + F1 f = FUNCTION_CAST(code->entry()); + intptr_t res = + reinterpret_cast(CALL_GENERATED_CODE(f, 10, 0, 0, 0, 0)); + ::printf("f() = %" V8PRIdPTR "\n", res); + CHECK_EQ(3628800, static_cast(res)); +} + + +TEST(3) { + CcTest::InitializeVM(); + Isolate* isolate = Isolate::Current(); + HandleScope scope(isolate); + + typedef struct { + int i; + char c; + int16_t s; + } T; + T t; + + Assembler assm(Isolate::Current(), NULL, 0); + Label L, C; + + __ function_descriptor(); + +// build a frame +#if V8_TARGET_ARCH_PPC64 + __ stdu(sp, MemOperand(sp, -32)); + __ std(fp, MemOperand(sp, 24)); +#else + __ stwu(sp, MemOperand(sp, -16)); + __ stw(fp, MemOperand(sp, 12)); +#endif + __ mr(fp, sp); + + // r4 points to our struct + __ mr(r4, r3); + + // modify field int i of struct + __ lwz(r3, MemOperand(r4, OFFSET_OF(T, i))); + __ srwi(r5, r3, Operand(1)); + __ stw(r5, MemOperand(r4, OFFSET_OF(T, i))); + + // modify field char c of struct + __ lbz(r5, MemOperand(r4, OFFSET_OF(T, c))); + __ add(r3, r5, r3); + __ slwi(r5, r5, Operand(2)); + __ stb(r5, MemOperand(r4, OFFSET_OF(T, c))); + + // modify field int16_t s of struct + __ lhz(r5, MemOperand(r4, OFFSET_OF(T, s))); + __ add(r3, r5, r3); + __ srwi(r5, r5, Operand(3)); + __ sth(r5, MemOperand(r4, OFFSET_OF(T, s))); + +// restore frame +#if V8_TARGET_ARCH_PPC64 + __ addi(r11, fp, Operand(32)); + __ ld(fp, MemOperand(r11, -8)); +#else + __ addi(r11, fp, Operand(16)); + __ lwz(fp, MemOperand(r11, -4)); +#endif + __ mr(sp, r11); + __ blr(); + + CodeDesc desc; + assm.GetCode(&desc); + Handle code = isolate->factory()->NewCode( + desc, Code::ComputeFlags(Code::STUB), Handle()); +#ifdef DEBUG + code->Print(); +#endif + F3 f = FUNCTION_CAST(code->entry()); + t.i = 100000; + t.c = 10; + t.s = 1000; + intptr_t res = + reinterpret_cast(CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0)); + ::printf("f() = %" V8PRIdPTR "\n", res); + CHECK_EQ(101010, static_cast(res)); + CHECK_EQ(100000 / 2, t.i); + CHECK_EQ(10 * 4, t.c); + CHECK_EQ(1000 / 8, t.s); +} + +#if 0 +TEST(4) { + // Test the VFP floating point instructions. + CcTest::InitializeVM(); + Isolate* isolate = Isolate::Current(); + HandleScope scope(isolate); + + typedef struct { + double a; + double b; + double c; + double d; + double e; + double f; + double g; + double h; + int i; + double m; + double n; + float x; + float y; + } T; + T t; + + // Create a function that accepts &t, and loads, manipulates, and stores + // the doubles and floats. + Assembler assm(Isolate::Current(), NULL, 0); + Label L, C; + + if (CpuFeatures::IsSupported(VFP3)) { + CpuFeatures::Scope scope(VFP3); + + __ mov(ip, Operand(sp)); + __ stm(db_w, sp, r4.bit() | fp.bit() | lr.bit()); + __ sub(fp, ip, Operand(4)); + + __ mov(r4, Operand(r0)); + __ vldr(d6, r4, OFFSET_OF(T, a)); + __ vldr(d7, r4, OFFSET_OF(T, b)); + __ vadd(d5, d6, d7); + __ vstr(d5, r4, OFFSET_OF(T, c)); + + __ vmov(r2, r3, d5); + __ vmov(d4, r2, r3); + __ vstr(d4, r4, OFFSET_OF(T, b)); + + // Load t.x and t.y, switch values, and store back to the struct. + __ vldr(s0, r4, OFFSET_OF(T, x)); + __ vldr(s31, r4, OFFSET_OF(T, y)); + __ vmov(s16, s0); + __ vmov(s0, s31); + __ vmov(s31, s16); + __ vstr(s0, r4, OFFSET_OF(T, x)); + __ vstr(s31, r4, OFFSET_OF(T, y)); + + // Move a literal into a register that can be encoded in the instruction. + __ vmov(d4, 1.0); + __ vstr(d4, r4, OFFSET_OF(T, e)); + + // Move a literal into a register that requires 64 bits to encode. + // 0x3ff0000010000000 = 1.000000059604644775390625 + __ vmov(d4, 1.000000059604644775390625); + __ vstr(d4, r4, OFFSET_OF(T, d)); + + // Convert from floating point to integer. + __ vmov(d4, 2.0); + __ vcvt_s32_f64(s31, d4); + __ vstr(s31, r4, OFFSET_OF(T, i)); + + // Convert from integer to floating point. + __ mov(lr, Operand(42)); + __ vmov(s31, lr); + __ vcvt_f64_s32(d4, s31); + __ vstr(d4, r4, OFFSET_OF(T, f)); + + // Test vabs. + __ vldr(d1, r4, OFFSET_OF(T, g)); + __ vabs(d0, d1); + __ vstr(d0, r4, OFFSET_OF(T, g)); + __ vldr(d2, r4, OFFSET_OF(T, h)); + __ vabs(d0, d2); + __ vstr(d0, r4, OFFSET_OF(T, h)); + + // Test vneg. + __ vldr(d1, r4, OFFSET_OF(T, m)); + __ vneg(d0, d1); + __ vstr(d0, r4, OFFSET_OF(T, m)); + __ vldr(d1, r4, OFFSET_OF(T, n)); + __ vneg(d0, d1); + __ vstr(d0, r4, OFFSET_OF(T, n)); + + __ ldm(ia_w, sp, r4.bit() | fp.bit() | pc.bit()); + + CodeDesc desc; + assm.GetCode(&desc); + Object* code = isolate->heap()->CreateCode( + desc, + Code::ComputeFlags(Code::STUB), + Handle())->ToObjectChecked(); + CHECK(code->IsCode()); +#ifdef DEBUG + Code::cast(code)->Print(); +#endif + F3 f = FUNCTION_CAST(Code::cast(code)->entry()); + t.a = 1.5; + t.b = 2.75; + t.c = 17.17; + t.d = 0.0; + t.e = 0.0; + t.f = 0.0; + t.g = -2718.2818; + t.h = 31415926.5; + t.i = 0; + t.m = -2718.2818; + t.n = 123.456; + t.x = 4.5; + t.y = 9.0; + Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0); + USE(dummy); + CHECK_EQ(4.5, t.y); + CHECK_EQ(9.0, t.x); + CHECK_EQ(-123.456, t.n); + CHECK_EQ(2718.2818, t.m); + CHECK_EQ(2, t.i); + CHECK_EQ(2718.2818, t.g); + CHECK_EQ(31415926.5, t.h); + CHECK_EQ(42.0, t.f); + CHECK_EQ(1.0, t.e); + CHECK_EQ(1.000000059604644775390625, t.d); + CHECK_EQ(4.25, t.c); + CHECK_EQ(4.25, t.b); + CHECK_EQ(1.5, t.a); + } +} + + +TEST(5) { + // Test the ARMv7 bitfield instructions. + CcTest::InitializeVM(); + Isolate* isolate = Isolate::Current(); + HandleScope scope(isolate); + + Assembler assm(isolate, NULL, 0); + + if (CpuFeatures::IsSupported(ARMv7)) { + CpuFeatures::Scope scope(ARMv7); + // On entry, r0 = 0xAAAAAAAA = 0b10..10101010. + __ ubfx(r0, r0, 1, 12); // 0b00..010101010101 = 0x555 + __ sbfx(r0, r0, 0, 5); // 0b11..111111110101 = -11 + __ bfc(r0, 1, 3); // 0b11..111111110001 = -15 + __ mov(r1, Operand(7)); + __ bfi(r0, r1, 3, 3); // 0b11..111111111001 = -7 + __ mov(pc, Operand(lr)); + + CodeDesc desc; + assm.GetCode(&desc); + Object* code = isolate->heap()->CreateCode( + desc, + Code::ComputeFlags(Code::STUB), + Handle())->ToObjectChecked(); + CHECK(code->IsCode()); +#ifdef DEBUG + Code::cast(code)->Print(); +#endif + F1 f = FUNCTION_CAST(Code::cast(code)->entry()); + int res = reinterpret_cast( + CALL_GENERATED_CODE(f, 0xAAAAAAAA, 0, 0, 0, 0)); + ::printf("f() = %d\n", res); + CHECK_EQ(-7, res); + } +} + + +TEST(6) { + // Test saturating instructions. + CcTest::InitializeVM(); + Isolate* isolate = Isolate::Current(); + HandleScope scope(isolate); + + Assembler assm(isolate, NULL, 0); + + if (CpuFeatures::IsSupported(ARMv7)) { + CpuFeatures::Scope scope(ARMv7); + __ usat(r1, 8, Operand(r0)); // Sat 0xFFFF to 0-255 = 0xFF. + __ usat(r2, 12, Operand(r0, ASR, 9)); // Sat (0xFFFF>>9) to 0-4095 = 0x7F. + __ usat(r3, 1, Operand(r0, LSL, 16)); // Sat (0xFFFF<<16) to 0-1 = 0x0. + __ addi(r0, r1, Operand(r2)); + __ addi(r0, r0, Operand(r3)); + __ mov(pc, Operand(lr)); + + CodeDesc desc; + assm.GetCode(&desc); + Object* code = isolate->heap()->CreateCode( + desc, + Code::ComputeFlags(Code::STUB), + Handle())->ToObjectChecked(); + CHECK(code->IsCode()); +#ifdef DEBUG + Code::cast(code)->Print(); +#endif + F1 f = FUNCTION_CAST(Code::cast(code)->entry()); + int res = reinterpret_cast( + CALL_GENERATED_CODE(f, 0xFFFF, 0, 0, 0, 0)); + ::printf("f() = %d\n", res); + CHECK_EQ(382, res); + } +} + +enum VCVTTypes { + s32_f64, + u32_f64 +}; + +static void TestRoundingMode(VCVTTypes types, + VFPRoundingMode mode, + double value, + int expected, + bool expected_exception = false) { + CcTest::InitializeVM(); + Isolate* isolate = Isolate::Current(); + HandleScope scope(isolate); + + Assembler assm(isolate, NULL, 0); + + if (CpuFeatures::IsSupported(VFP3)) { + CpuFeatures::Scope scope(VFP3); + + Label wrong_exception; + + __ vmrs(r1); + // Set custom FPSCR. + __ bic(r2, r1, Operand(kVFPRoundingModeMask | kVFPExceptionMask)); + __ orr(r2, r2, Operand(mode)); + __ vmsr(r2); + + // Load value, convert, and move back result to r0 if everything went well. + __ vmov(d1, value); + switch (types) { + case s32_f64: + __ vcvt_s32_f64(s0, d1, kFPSCRRounding); + break; + + case u32_f64: + __ vcvt_u32_f64(s0, d1, kFPSCRRounding); + break; + + default: + UNREACHABLE(); + break; + } + // Check for vfp exceptions + __ vmrs(r2); + __ tst(r2, Operand(kVFPExceptionMask)); + // Check that we behaved as expected. + __ b(&wrong_exception, + expected_exception ? eq : ne); + // There was no exception. Retrieve the result and return. + __ vmov(r0, s0); + __ mov(pc, Operand(lr)); + + // The exception behaviour is not what we expected. + // Load a special value and return. + __ bind(&wrong_exception); + __ mov(r0, Operand(11223344)); + __ mov(pc, Operand(lr)); + + CodeDesc desc; + assm.GetCode(&desc); + Object* code = isolate->heap()->CreateCode( + desc, + Code::ComputeFlags(Code::STUB), + Handle())->ToObjectChecked(); + CHECK(code->IsCode()); +#ifdef DEBUG + Code::cast(code)->Print(); +#endif + F1 f = FUNCTION_CAST(Code::cast(code)->entry()); + int res = reinterpret_cast( + CALL_GENERATED_CODE(f, 0, 0, 0, 0, 0)); + ::printf("res = %d\n", res); + CHECK_EQ(expected, res); + } +} + + +TEST(7) { + // Test vfp rounding modes. + + // s32_f64 (double to integer). + + TestRoundingMode(s32_f64, RN, 0, 0); + TestRoundingMode(s32_f64, RN, 0.5, 0); + TestRoundingMode(s32_f64, RN, -0.5, 0); + TestRoundingMode(s32_f64, RN, 1.5, 2); + TestRoundingMode(s32_f64, RN, -1.5, -2); + TestRoundingMode(s32_f64, RN, 123.7, 124); + TestRoundingMode(s32_f64, RN, -123.7, -124); + TestRoundingMode(s32_f64, RN, 123456.2, 123456); + TestRoundingMode(s32_f64, RN, -123456.2, -123456); + TestRoundingMode(s32_f64, RN, static_cast(kMaxInt), kMaxInt); + TestRoundingMode(s32_f64, RN, (kMaxInt + 0.49), kMaxInt); + TestRoundingMode(s32_f64, RN, (kMaxInt + 1.0), kMaxInt, true); + TestRoundingMode(s32_f64, RN, (kMaxInt + 0.5), kMaxInt, true); + TestRoundingMode(s32_f64, RN, static_cast(kMinInt), kMinInt); + TestRoundingMode(s32_f64, RN, (kMinInt - 0.5), kMinInt); + TestRoundingMode(s32_f64, RN, (kMinInt - 1.0), kMinInt, true); + TestRoundingMode(s32_f64, RN, (kMinInt - 0.51), kMinInt, true); + + TestRoundingMode(s32_f64, RM, 0, 0); + TestRoundingMode(s32_f64, RM, 0.5, 0); + TestRoundingMode(s32_f64, RM, -0.5, -1); + TestRoundingMode(s32_f64, RM, 123.7, 123); + TestRoundingMode(s32_f64, RM, -123.7, -124); + TestRoundingMode(s32_f64, RM, 123456.2, 123456); + TestRoundingMode(s32_f64, RM, -123456.2, -123457); + TestRoundingMode(s32_f64, RM, static_cast(kMaxInt), kMaxInt); + TestRoundingMode(s32_f64, RM, (kMaxInt + 0.5), kMaxInt); + TestRoundingMode(s32_f64, RM, (kMaxInt + 1.0), kMaxInt, true); + TestRoundingMode(s32_f64, RM, static_cast(kMinInt), kMinInt); + TestRoundingMode(s32_f64, RM, (kMinInt - 0.5), kMinInt, true); + TestRoundingMode(s32_f64, RM, (kMinInt + 0.5), kMinInt); + + TestRoundingMode(s32_f64, RZ, 0, 0); + TestRoundingMode(s32_f64, RZ, 0.5, 0); + TestRoundingMode(s32_f64, RZ, -0.5, 0); + TestRoundingMode(s32_f64, RZ, 123.7, 123); + TestRoundingMode(s32_f64, RZ, -123.7, -123); + TestRoundingMode(s32_f64, RZ, 123456.2, 123456); + TestRoundingMode(s32_f64, RZ, -123456.2, -123456); + TestRoundingMode(s32_f64, RZ, static_cast(kMaxInt), kMaxInt); + TestRoundingMode(s32_f64, RZ, (kMaxInt + 0.5), kMaxInt); + TestRoundingMode(s32_f64, RZ, (kMaxInt + 1.0), kMaxInt, true); + TestRoundingMode(s32_f64, RZ, static_cast(kMinInt), kMinInt); + TestRoundingMode(s32_f64, RZ, (kMinInt - 0.5), kMinInt); + TestRoundingMode(s32_f64, RZ, (kMinInt - 1.0), kMinInt, true); + + + // u32_f64 (double to integer). + + // Negative values. + TestRoundingMode(u32_f64, RN, -0.5, 0); + TestRoundingMode(u32_f64, RN, -123456.7, 0, true); + TestRoundingMode(u32_f64, RN, static_cast(kMinInt), 0, true); + TestRoundingMode(u32_f64, RN, kMinInt - 1.0, 0, true); + + TestRoundingMode(u32_f64, RM, -0.5, 0, true); + TestRoundingMode(u32_f64, RM, -123456.7, 0, true); + TestRoundingMode(u32_f64, RM, static_cast(kMinInt), 0, true); + TestRoundingMode(u32_f64, RM, kMinInt - 1.0, 0, true); + + TestRoundingMode(u32_f64, RZ, -0.5, 0); + TestRoundingMode(u32_f64, RZ, -123456.7, 0, true); + TestRoundingMode(u32_f64, RZ, static_cast(kMinInt), 0, true); + TestRoundingMode(u32_f64, RZ, kMinInt - 1.0, 0, true); + + // Positive values. + // kMaxInt is the maximum *signed* integer: 0x7fffffff. + static const uint32_t kMaxUInt = 0xffffffffu; + TestRoundingMode(u32_f64, RZ, 0, 0); + TestRoundingMode(u32_f64, RZ, 0.5, 0); + TestRoundingMode(u32_f64, RZ, 123.7, 123); + TestRoundingMode(u32_f64, RZ, 123456.2, 123456); + TestRoundingMode(u32_f64, RZ, static_cast(kMaxInt), kMaxInt); + TestRoundingMode(u32_f64, RZ, (kMaxInt + 0.5), kMaxInt); + TestRoundingMode(u32_f64, RZ, (kMaxInt + 1.0), + static_cast(kMaxInt) + 1); + TestRoundingMode(u32_f64, RZ, (kMaxUInt + 0.5), kMaxUInt); + TestRoundingMode(u32_f64, RZ, (kMaxUInt + 1.0), kMaxUInt, true); + + TestRoundingMode(u32_f64, RM, 0, 0); + TestRoundingMode(u32_f64, RM, 0.5, 0); + TestRoundingMode(u32_f64, RM, 123.7, 123); + TestRoundingMode(u32_f64, RM, 123456.2, 123456); + TestRoundingMode(u32_f64, RM, static_cast(kMaxInt), kMaxInt); + TestRoundingMode(u32_f64, RM, (kMaxInt + 0.5), kMaxInt); + TestRoundingMode(u32_f64, RM, (kMaxInt + 1.0), + static_cast(kMaxInt) + 1); + TestRoundingMode(u32_f64, RM, (kMaxUInt + 0.5), kMaxUInt); + TestRoundingMode(u32_f64, RM, (kMaxUInt + 1.0), kMaxUInt, true); + + TestRoundingMode(u32_f64, RN, 0, 0); + TestRoundingMode(u32_f64, RN, 0.5, 0); + TestRoundingMode(u32_f64, RN, 1.5, 2); + TestRoundingMode(u32_f64, RN, 123.7, 124); + TestRoundingMode(u32_f64, RN, 123456.2, 123456); + TestRoundingMode(u32_f64, RN, static_cast(kMaxInt), kMaxInt); + TestRoundingMode(u32_f64, RN, (kMaxInt + 0.49), kMaxInt); + TestRoundingMode(u32_f64, RN, (kMaxInt + 0.5), + static_cast(kMaxInt) + 1); + TestRoundingMode(u32_f64, RN, (kMaxUInt + 0.49), kMaxUInt); + TestRoundingMode(u32_f64, RN, (kMaxUInt + 0.5), kMaxUInt, true); + TestRoundingMode(u32_f64, RN, (kMaxUInt + 1.0), kMaxUInt, true); +} + + +TEST(8) { + // Test VFP multi load/store with ia_w. + CcTest::InitializeVM(); + Isolate* isolate = Isolate::Current(); + HandleScope scope(isolate); + + typedef struct { + double a; + double b; + double c; + double d; + double e; + double f; + double g; + double h; + } D; + D d; + + typedef struct { + float a; + float b; + float c; + float d; + float e; + float f; + float g; + float h; + } F; + F f; + + // Create a function that uses vldm/vstm to move some double and + // single precision values around in memory. + Assembler assm(isolate, NULL, 0); + + if (CpuFeatures::IsSupported(VFP2)) { + CpuFeatures::Scope scope(VFP2); + + __ mov(ip, Operand(sp)); + __ stm(db_w, sp, r4.bit() | fp.bit() | lr.bit()); + __ sub(fp, ip, Operand(4)); + + __ addi(r4, r0, Operand(OFFSET_OF(D, a))); + __ vldm(ia_w, r4, d0, d3); + __ vldm(ia_w, r4, d4, d7); + + __ addi(r4, r0, Operand(OFFSET_OF(D, a))); + __ vstm(ia_w, r4, d6, d7); + __ vstm(ia_w, r4, d0, d5); + + __ addi(r4, r1, Operand(OFFSET_OF(F, a))); + __ vldm(ia_w, r4, s0, s3); + __ vldm(ia_w, r4, s4, s7); + + __ addi(r4, r1, Operand(OFFSET_OF(F, a))); + __ vstm(ia_w, r4, s6, s7); + __ vstm(ia_w, r4, s0, s5); + + __ ldm(ia_w, sp, r4.bit() | fp.bit() | pc.bit()); + + CodeDesc desc; + assm.GetCode(&desc); + Object* code = isolate->heap()->CreateCode( + desc, + Code::ComputeFlags(Code::STUB), + Handle())->ToObjectChecked(); + CHECK(code->IsCode()); +#ifdef DEBUG + Code::cast(code)->Print(); +#endif + F4 fn = FUNCTION_CAST(Code::cast(code)->entry()); + d.a = 1.1; + d.b = 2.2; + d.c = 3.3; + d.d = 4.4; + d.e = 5.5; + d.f = 6.6; + d.g = 7.7; + d.h = 8.8; + + f.a = 1.0; + f.b = 2.0; + f.c = 3.0; + f.d = 4.0; + f.e = 5.0; + f.f = 6.0; + f.g = 7.0; + f.h = 8.0; + + Object* dummy = CALL_GENERATED_CODE(fn, &d, &f, 0, 0, 0); + USE(dummy); + + CHECK_EQ(7.7, d.a); + CHECK_EQ(8.8, d.b); + CHECK_EQ(1.1, d.c); + CHECK_EQ(2.2, d.d); + CHECK_EQ(3.3, d.e); + CHECK_EQ(4.4, d.f); + CHECK_EQ(5.5, d.g); + CHECK_EQ(6.6, d.h); + + CHECK_EQ(7.0, f.a); + CHECK_EQ(8.0, f.b); + CHECK_EQ(1.0, f.c); + CHECK_EQ(2.0, f.d); + CHECK_EQ(3.0, f.e); + CHECK_EQ(4.0, f.f); + CHECK_EQ(5.0, f.g); + CHECK_EQ(6.0, f.h); + } +} + + +TEST(9) { + // Test VFP multi load/store with ia. + CcTest::InitializeVM(); + Isolate* isolate = Isolate::Current(); + HandleScope scope(isolate); + + typedef struct { + double a; + double b; + double c; + double d; + double e; + double f; + double g; + double h; + } D; + D d; + + typedef struct { + float a; + float b; + float c; + float d; + float e; + float f; + float g; + float h; + } F; + F f; + + // Create a function that uses vldm/vstm to move some double and + // single precision values around in memory. + Assembler assm(isolate, NULL, 0); + + if (CpuFeatures::IsSupported(VFP2)) { + CpuFeatures::Scope scope(VFP2); + + __ mov(ip, Operand(sp)); + __ stm(db_w, sp, r4.bit() | fp.bit() | lr.bit()); + __ sub(fp, ip, Operand(4)); + + __ addi(r4, r0, Operand(OFFSET_OF(D, a))); + __ vldm(ia, r4, d0, d3); + __ addi(r4, r4, Operand(4 * 8)); + __ vldm(ia, r4, d4, d7); + + __ addi(r4, r0, Operand(OFFSET_OF(D, a))); + __ vstm(ia, r4, d6, d7); + __ addi(r4, r4, Operand(2 * 8)); + __ vstm(ia, r4, d0, d5); + + __ addi(r4, r1, Operand(OFFSET_OF(F, a))); + __ vldm(ia, r4, s0, s3); + __ addi(r4, r4, Operand(4 * 4)); + __ vldm(ia, r4, s4, s7); + + __ addi(r4, r1, Operand(OFFSET_OF(F, a))); + __ vstm(ia, r4, s6, s7); + __ addi(r4, r4, Operand(2 * 4)); + __ vstm(ia, r4, s0, s5); + + __ ldm(ia_w, sp, r4.bit() | fp.bit() | pc.bit()); + + CodeDesc desc; + assm.GetCode(&desc); + Object* code = isolate->heap()->CreateCode( + desc, + Code::ComputeFlags(Code::STUB), + Handle())->ToObjectChecked(); + CHECK(code->IsCode()); +#ifdef DEBUG + Code::cast(code)->Print(); +#endif + F4 fn = FUNCTION_CAST(Code::cast(code)->entry()); + d.a = 1.1; + d.b = 2.2; + d.c = 3.3; + d.d = 4.4; + d.e = 5.5; + d.f = 6.6; + d.g = 7.7; + d.h = 8.8; + + f.a = 1.0; + f.b = 2.0; + f.c = 3.0; + f.d = 4.0; + f.e = 5.0; + f.f = 6.0; + f.g = 7.0; + f.h = 8.0; + + Object* dummy = CALL_GENERATED_CODE(fn, &d, &f, 0, 0, 0); + USE(dummy); + + CHECK_EQ(7.7, d.a); + CHECK_EQ(8.8, d.b); + CHECK_EQ(1.1, d.c); + CHECK_EQ(2.2, d.d); + CHECK_EQ(3.3, d.e); + CHECK_EQ(4.4, d.f); + CHECK_EQ(5.5, d.g); + CHECK_EQ(6.6, d.h); + + CHECK_EQ(7.0, f.a); + CHECK_EQ(8.0, f.b); + CHECK_EQ(1.0, f.c); + CHECK_EQ(2.0, f.d); + CHECK_EQ(3.0, f.e); + CHECK_EQ(4.0, f.f); + CHECK_EQ(5.0, f.g); + CHECK_EQ(6.0, f.h); + } +} + + +TEST(10) { + // Test VFP multi load/store with db_w. + CcTest::InitializeVM(); + Isolate* isolate = Isolate::Current(); + HandleScope scope(isolate); + + typedef struct { + double a; + double b; + double c; + double d; + double e; + double f; + double g; + double h; + } D; + D d; + + typedef struct { + float a; + float b; + float c; + float d; + float e; + float f; + float g; + float h; + } F; + F f; + + // Create a function that uses vldm/vstm to move some double and + // single precision values around in memory. + Assembler assm(isolate, NULL, 0); + + if (CpuFeatures::IsSupported(VFP2)) { + CpuFeatures::Scope scope(VFP2); + + __ mov(ip, Operand(sp)); + __ stm(db_w, sp, r4.bit() | fp.bit() | lr.bit()); + __ sub(fp, ip, Operand(4)); + + __ addi(r4, r0, Operand(OFFSET_OF(D, h) + 8)); + __ vldm(db_w, r4, d4, d7); + __ vldm(db_w, r4, d0, d3); + + __ addi(r4, r0, Operand(OFFSET_OF(D, h) + 8)); + __ vstm(db_w, r4, d0, d5); + __ vstm(db_w, r4, d6, d7); + + __ addi(r4, r1, Operand(OFFSET_OF(F, h) + 4)); + __ vldm(db_w, r4, s4, s7); + __ vldm(db_w, r4, s0, s3); + + __ addi(r4, r1, Operand(OFFSET_OF(F, h) + 4)); + __ vstm(db_w, r4, s0, s5); + __ vstm(db_w, r4, s6, s7); + + __ ldm(ia_w, sp, r4.bit() | fp.bit() | pc.bit()); + + CodeDesc desc; + assm.GetCode(&desc); + Object* code = isolate->heap()->CreateCode( + desc, + Code::ComputeFlags(Code::STUB), + Handle())->ToObjectChecked(); + CHECK(code->IsCode()); +#ifdef DEBUG + Code::cast(code)->Print(); +#endif + F4 fn = FUNCTION_CAST(Code::cast(code)->entry()); + d.a = 1.1; + d.b = 2.2; + d.c = 3.3; + d.d = 4.4; + d.e = 5.5; + d.f = 6.6; + d.g = 7.7; + d.h = 8.8; + + f.a = 1.0; + f.b = 2.0; + f.c = 3.0; + f.d = 4.0; + f.e = 5.0; + f.f = 6.0; + f.g = 7.0; + f.h = 8.0; + + Object* dummy = CALL_GENERATED_CODE(fn, &d, &f, 0, 0, 0); + USE(dummy); + + CHECK_EQ(7.7, d.a); + CHECK_EQ(8.8, d.b); + CHECK_EQ(1.1, d.c); + CHECK_EQ(2.2, d.d); + CHECK_EQ(3.3, d.e); + CHECK_EQ(4.4, d.f); + CHECK_EQ(5.5, d.g); + CHECK_EQ(6.6, d.h); + + CHECK_EQ(7.0, f.a); + CHECK_EQ(8.0, f.b); + CHECK_EQ(1.0, f.c); + CHECK_EQ(2.0, f.d); + CHECK_EQ(3.0, f.e); + CHECK_EQ(4.0, f.f); + CHECK_EQ(5.0, f.g); + CHECK_EQ(6.0, f.h); + } +} + + +TEST(11) { + // Test instructions using the carry flag. + CcTest::InitializeVM(); + Isolate* isolate = Isolate::Current(); + HandleScope scope(isolate); + + typedef struct { + int32_t a; + int32_t b; + int32_t c; + int32_t d; + } I; + I i; + + i.a = 0xabcd0001; + i.b = 0xabcd0000; + + Assembler assm(isolate, NULL, 0); + + // Test HeapObject untagging. + __ ldr(r1, MemOperand(r0, OFFSET_OF(I, a))); + __ mov(r1, Operand(r1, ASR, 1), SetCC); + __ adc(r1, r1, Operand(r1), LeaveCC, cs); + __ str(r1, MemOperand(r0, OFFSET_OF(I, a))); + + __ ldr(r2, MemOperand(r0, OFFSET_OF(I, b))); + __ mov(r2, Operand(r2, ASR, 1), SetCC); + __ adc(r2, r2, Operand(r2), LeaveCC, cs); + __ str(r2, MemOperand(r0, OFFSET_OF(I, b))); + + // Test corner cases. + __ mov(r1, Operand(0xffffffff)); + __ mov(r2, Operand::Zero()); + __ mov(r3, Operand(r1, ASR, 1), SetCC); // Set the carry. + __ adc(r3, r1, Operand(r2)); + __ str(r3, MemOperand(r0, OFFSET_OF(I, c))); + + __ mov(r1, Operand(0xffffffff)); + __ mov(r2, Operand::Zero()); + __ mov(r3, Operand(r2, ASR, 1), SetCC); // Unset the carry. + __ adc(r3, r1, Operand(r2)); + __ str(r3, MemOperand(r0, OFFSET_OF(I, d))); + + __ mov(pc, Operand(lr)); + + CodeDesc desc; + assm.GetCode(&desc); + Object* code = isolate->heap()->CreateCode( + desc, + Code::ComputeFlags(Code::STUB), + Handle())->ToObjectChecked(); + CHECK(code->IsCode()); +#ifdef DEBUG + Code::cast(code)->Print(); +#endif + F3 f = FUNCTION_CAST(Code::cast(code)->entry()); + Object* dummy = CALL_GENERATED_CODE(f, &i, 0, 0, 0, 0); + USE(dummy); + + CHECK_EQ(0xabcd0001, i.a); + CHECK_EQ(static_cast(0xabcd0000) >> 1, i.b); + CHECK_EQ(0x00000000, i.c); + CHECK_EQ(0xffffffff, i.d); +} + + +TEST(12) { + // Test chaining of label usages within instructions (issue 1644). + CcTest::InitializeVM(); + Isolate* isolate = Isolate::Current(); + HandleScope scope(isolate); + + Assembler assm(isolate, NULL, 0); + Label target; + __ b(eq, &target); + __ b(ne, &target); + __ bind(&target); + __ nop(); +} +#endif + +#undef __ diff --git a/test/cctest/test-disasm-ppc.cc b/test/cctest/test-disasm-ppc.cc new file mode 100644 index 000000000..87b9ade05 --- /dev/null +++ b/test/cctest/test-disasm-ppc.cc @@ -0,0 +1,155 @@ +// Copyright 2011 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// + +#include + +#include "src/v8.h" + +#include "src/debug.h" +#include "src/disasm.h" +#include "src/disassembler.h" +#include "src/macro-assembler.h" +#include "src/serialize.h" +#include "test/cctest/cctest.h" + +using namespace v8::internal; + + +bool DisassembleAndCompare(byte* pc, const char* compare_string) { + disasm::NameConverter converter; + disasm::Disassembler disasm(converter); + EmbeddedVector disasm_buffer; + + disasm.InstructionDecode(disasm_buffer, pc); + + if (strcmp(compare_string, disasm_buffer.start()) != 0) { + fprintf(stderr, + "expected: \n" + "%s\n" + "disassembled: \n" + "%s\n\n", + compare_string, disasm_buffer.start()); + return false; + } + return true; +} + + +// Set up V8 to a state where we can at least run the assembler and +// disassembler. Declare the variables and allocate the data structures used +// in the rest of the macros. +#define SET_UP() \ + CcTest::InitializeVM(); \ + Isolate* isolate = Isolate::Current(); \ + HandleScope scope(isolate); \ + byte* buffer = reinterpret_cast(malloc(4 * 1024)); \ + Assembler assm(isolate, buffer, 4 * 1024); \ + bool failure = false; + + +// This macro assembles one instruction using the preallocated assembler and +// disassembles the generated instruction, comparing the output to the expected +// value. If the comparison fails an error message is printed, but the test +// continues to run until the end. +#define COMPARE(asm_, compare_string) \ + { \ + int pc_offset = assm.pc_offset(); \ + byte* progcounter = &buffer[pc_offset]; \ + assm.asm_; \ + if (!DisassembleAndCompare(progcounter, compare_string)) failure = true; \ + } + +// Force emission of any pending literals into a pool. +#define EMIT_PENDING_LITERALS() assm.CheckConstPool(true, false) + + +// Verify that all invocations of the COMPARE macro passed successfully. +// Exit with a failure if at least one of the tests failed. +#define VERIFY_RUN() \ + if (failure) { \ + V8_Fatal(__FILE__, __LINE__, "PPC Disassembler tests failed.\n"); \ + } + +TEST(DisasmPPC) { + SET_UP(); + + COMPARE(addc(r9, r7, r9), "7d274814 addc r9, r7, r9"); + COMPARE(addic(r3, r5, Operand(20)), "30650014 addic r3, r5, 20"); + COMPARE(addi(r0, ip, Operand(63)), "380c003f addi r0, r12, 63"); + COMPARE(add(r5, r7, r0), "7ca70214 add r5, r7, r0"); + COMPARE(addze(r0, r0, LeaveOE, SetRC), "7c000195 addze. r0, r0"); + COMPARE(andi(r0, r3, Operand(4)), "70600004 andi. r0, r3, 4"); + COMPARE(and_(r3, r6, r5), "7cc32838 and r3, r6, r5"); + COMPARE(and_(r6, r0, r6, SetRC), "7c063039 and. r6, r0, r6"); + // skipping branches (for now?) + COMPARE(bctr(), "4e800420 bctr"); + COMPARE(blr(), "4e800020 blr"); + COMPARE(bclr(BA, SetLK), "4e800021 blrl"); +// skipping call - only used in simulator +#if V8_TARGET_ARCH_PPC64 + COMPARE(cmpi(r0, Operand(5)), "2fa00005 cmpi r0, 5"); +#else + COMPARE(cmpi(r0, Operand(5)), "2f800005 cmpi r0, 5"); +#endif +#if V8_TARGET_ARCH_PPC64 + COMPARE(cmpl(r6, r7), "7fa63840 cmpl r6, r7"); +#else + COMPARE(cmpl(r6, r7), "7f863840 cmpl r6, r7"); +#endif +#if V8_TARGET_ARCH_PPC64 + COMPARE(cmp(r5, r11), "7fa55800 cmp r5, r11"); +#else + COMPARE(cmp(r5, r11), "7f855800 cmp r5, r11"); +#endif + // skipping crxor - incomplete disassembly + COMPARE(lbz(r4, MemOperand(r4, 7)), "88840007 lbz r4, 7(r4)"); + COMPARE(lfd(d0, MemOperand(sp, 128)), "c8010080 lfd d0, 128(sp)"); + COMPARE(li(r0, Operand(16)), "38000010 li r0, 16"); + COMPARE(lis(r8, Operand(22560)), "3d005820 lis r8, 22560"); + COMPARE(lwz(ip, MemOperand(r19, 44)), "8193002c lwz r12, 44(r19)"); + COMPARE(lwzx(r0, MemOperand(r5, ip)), "7c05602e lwzx r0, r5, r12"); + COMPARE(mflr(r0), "7c0802a6 mflr r0"); + COMPARE(mr(r15, r4), "7c8f2378 mr r15, r4"); + COMPARE(mtctr(r0), "7c0903a6 mtctr r0"); + COMPARE(mtlr(r15), "7de803a6 mtlr r15"); + COMPARE(ori(r8, r8, Operand(42849)), "6108a761 ori r8, r8, 42849"); + COMPARE(orx(r5, r3, r4), "7c652378 or r5, r3, r4"); + COMPARE(rlwinm(r4, r3, 2, 0, 29), "5464103a rlwinm r4, r3, 2, 0, 29"); + COMPARE(rlwinm(r0, r3, 0, 31, 31, SetRC), + "546007ff rlwinm. r0, r3, 0, 31, 31"); + COMPARE(srawi(r3, r6, 1), "7cc30e70 srawi r3,r6,1"); + COMPARE(stb(r5, MemOperand(r11, 11)), "98ab000b stb r5, 11(r11)"); + COMPARE(stfd(d2, MemOperand(sp, 8)), "d8410008 stfd d2, 8(sp)"); + COMPARE(stw(r16, MemOperand(sp, 64)), "92010040 stw r16, 64(sp)"); + COMPARE(stwu(r3, MemOperand(sp, -4)), "9461fffc stwu r3, -4(sp)"); + COMPARE(sub(r3, r3, r4), "7c641850 subf r3, r4, r3"); + COMPARE(sub(r0, r9, r8, LeaveOE, SetRC), "7c084851 subf. r0, r8, r9"); + COMPARE(xor_(r6, r5, r4), "7ca62278 xor r6, r5, r4"); + + VERIFY_RUN(); +} diff --git a/test/cctest/test-hashing.cc b/test/cctest/test-hashing.cc index 692861cfe..c8ae4f30e 100644 --- a/test/cctest/test-hashing.cc +++ b/test/cctest/test-hashing.cc @@ -90,6 +90,14 @@ void generate(MacroAssembler* masm, uint32_t key) { __ pop(kRootRegister); __ jr(ra); __ nop(); +#elif V8_TARGET_ARCH_PPC + __ function_descriptor(); + __ push(kRootRegister); + __ InitializeRootRegister(); + __ li(r3, Operand(key)); + __ GetNumberHash(r3, ip); + __ pop(kRootRegister); + __ blr(); #else #error Unsupported architecture. #endif diff --git a/test/cctest/test-heap.cc b/test/cctest/test-heap.cc index 5a01e12f1..aca5d1a77 100644 --- a/test/cctest/test-heap.cc +++ b/test/cctest/test-heap.cc @@ -161,8 +161,7 @@ TEST(HeapObjects) { CHECK(value->IsNumber()); CHECK_EQ(Smi::kMaxValue, Handle::cast(value)->value()); -#if !defined(V8_TARGET_ARCH_X64) && !defined(V8_TARGET_ARCH_ARM64) && \ - !defined(V8_TARGET_ARCH_MIPS64) +#if !defined(V8_TARGET_ARCH_64_BIT) // TODO(lrn): We need a NumberFromIntptr function in order to test this. value = factory->NewNumberFromInt(Smi::kMinValue - 1); CHECK(value->IsHeapNumber()); diff --git a/test/cctest/test-platform.cc b/test/cctest/test-platform.cc index 90926d1a9..a786d368e 100644 --- a/test/cctest/test-platform.cc +++ b/test/cctest/test-platform.cc @@ -24,6 +24,10 @@ void GetStackPointer(const v8::FunctionCallbackInfo& args) { __asm__ __volatile__("sw $sp, %0" : "=g"(sp_addr)); #elif V8_HOST_ARCH_MIPS64 __asm__ __volatile__("sd $sp, %0" : "=g"(sp_addr)); +#elif defined(__PPC64__) || defined(_ARCH_PPC64) + __asm__ __volatile__("std 1, %0" : "=g"(sp_addr)); +#elif defined(__PPC__) || defined(_ARCH_PPC) + __asm__ __volatile__("stw 1, %0" : "=g"(sp_addr)); #else #error Host architecture was not detected as supported by v8 #endif diff --git a/test/cctest/test-regexp.cc b/test/cctest/test-regexp.cc index 4ca5c3626..8afde4635 100644 --- a/test/cctest/test-regexp.cc +++ b/test/cctest/test-regexp.cc @@ -53,6 +53,11 @@ #include "src/arm64/macro-assembler-arm64.h" #include "src/arm64/regexp-macro-assembler-arm64.h" #endif +#if V8_TARGET_ARCH_PPC +#include "src/ppc/assembler-ppc.h" +#include "src/ppc/macro-assembler-ppc.h" +#include "src/ppc/regexp-macro-assembler-ppc.h" +#endif #if V8_TARGET_ARCH_MIPS #include "src/mips/assembler-mips.h" #include "src/mips/macro-assembler-mips.h" @@ -687,6 +692,8 @@ typedef RegExpMacroAssemblerX64 ArchRegExpMacroAssembler; typedef RegExpMacroAssemblerARM ArchRegExpMacroAssembler; #elif V8_TARGET_ARCH_ARM64 typedef RegExpMacroAssemblerARM64 ArchRegExpMacroAssembler; +#elif V8_TARGET_ARCH_PPC +typedef RegExpMacroAssemblerPPC ArchRegExpMacroAssembler; #elif V8_TARGET_ARCH_MIPS typedef RegExpMacroAssemblerMIPS ArchRegExpMacroAssembler; #elif V8_TARGET_ARCH_MIPS64 diff --git a/test/cctest/test-strings.cc b/test/cctest/test-strings.cc index d1f23f75a..c77fc36a0 100644 --- a/test/cctest/test-strings.cc +++ b/test/cctest/test-strings.cc @@ -1445,6 +1445,7 @@ TEST(InvalidExternalString) { static const int invalid = String::kMaxLength + 1; \ HandleScope scope(isolate); \ Vector dummy = Vector::New(invalid); \ + memset(dummy.start(), 0x0, dummy.length() * sizeof(TYPE)); \ CHECK(isolate->factory()->FUN(Vector::cast(dummy)).is_null()); \ memset(dummy.start(), 0x20, dummy.length() * sizeof(TYPE)); \ CHECK(isolate->has_pending_exception()); \ diff --git a/test/mjsunit/big-array-literal.js b/test/mjsunit/big-array-literal.js index 401807f68..7e19c0a2d 100644 --- a/test/mjsunit/big-array-literal.js +++ b/test/mjsunit/big-array-literal.js @@ -26,6 +26,7 @@ // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // On MacOS X 10.7.5, this test needs a stack size of at least 788 kBytes. +// On PPC64, this test needs a stack size of at least 698 kBytes. // Flags: --stack-size=800 // Flags: --turbo-deoptimization diff --git a/test/mjsunit/regress/regress-crbug-178790.js b/test/mjsunit/regress/regress-crbug-178790.js index 25cc96b85..6b5c77b9a 100644 --- a/test/mjsunit/regress/regress-crbug-178790.js +++ b/test/mjsunit/regress/regress-crbug-178790.js @@ -25,6 +25,8 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Flags: --stack-size=1300 + // Create a regexp in the form of a?a?...a? so that fully // traversing the entire graph would be prohibitively expensive. // This should not cause time out. diff --git a/test/unittests/unittests.gyp b/test/unittests/unittests.gyp index fe4a12795..fd47b56a5 100644 --- a/test/unittests/unittests.gyp +++ b/test/unittests/unittests.gyp @@ -106,6 +106,11 @@ 'compiler/x64/instruction-selector-x64-unittest.cc', ], }], + ['v8_target_arch=="ppc" or v8_target_arch=="ppc64"', { + 'sources': [ ### gcmole(arch:ppc) ### + 'compiler/ppc/instruction-selector-ppc-unittest.cc', + ], + }], ['component=="shared_library"', { # compiler-unittests can't be built against a shared library, so we # need to depend on the underlying static target in that case. diff --git a/tools/gyp/v8.gyp b/tools/gyp/v8.gyp index 2472b3335..c9b1eb3b1 100644 --- a/tools/gyp/v8.gyp +++ b/tools/gyp/v8.gyp @@ -1203,6 +1203,49 @@ '../../src/compiler/x64/linkage-x64.cc', ], }], + ['v8_target_arch=="ppc" or v8_target_arch=="ppc64"', { + 'sources': [ ### gcmole(arch:ppc) ### + '../../src/ppc/assembler-ppc-inl.h', + '../../src/ppc/assembler-ppc.cc', + '../../src/ppc/assembler-ppc.h', + '../../src/ppc/builtins-ppc.cc', + '../../src/ppc/code-stubs-ppc.cc', + '../../src/ppc/code-stubs-ppc.h', + '../../src/ppc/codegen-ppc.cc', + '../../src/ppc/codegen-ppc.h', + '../../src/ppc/constants-ppc.h', + '../../src/ppc/constants-ppc.cc', + '../../src/ppc/cpu-ppc.cc', + '../../src/ppc/debug-ppc.cc', + '../../src/ppc/deoptimizer-ppc.cc', + '../../src/ppc/disasm-ppc.cc', + '../../src/ppc/frames-ppc.cc', + '../../src/ppc/frames-ppc.h', + '../../src/ppc/full-codegen-ppc.cc', + '../../src/ppc/interface-descriptors-ppc.cc', + '../../src/ppc/interface-descriptors-ppc.h', + '../../src/ppc/lithium-ppc.cc', + '../../src/ppc/lithium-ppc.h', + '../../src/ppc/lithium-codegen-ppc.cc', + '../../src/ppc/lithium-codegen-ppc.h', + '../../src/ppc/lithium-gap-resolver-ppc.cc', + '../../src/ppc/lithium-gap-resolver-ppc.h', + '../../src/ppc/macro-assembler-ppc.cc', + '../../src/ppc/macro-assembler-ppc.h', + '../../src/ppc/regexp-macro-assembler-ppc.cc', + '../../src/ppc/regexp-macro-assembler-ppc.h', + '../../src/ppc/simulator-ppc.cc', + '../../src/compiler/ppc/code-generator-ppc.cc', + '../../src/compiler/ppc/instruction-codes-ppc.h', + '../../src/compiler/ppc/instruction-selector-ppc.cc', + '../../src/compiler/ppc/linkage-ppc.cc', + '../../src/ic/ppc/access-compiler-ppc.cc', + '../../src/ic/ppc/handler-compiler-ppc.cc', + '../../src/ic/ppc/ic-ppc.cc', + '../../src/ic/ppc/ic-compiler-ppc.cc', + '../../src/ic/ppc/stub-cache-ppc.cc', + ], + }], ['OS=="win"', { 'variables': { 'gyp_generators': '