--- /dev/null
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/code-generator.h"
+
+#include "src/compiler/code-generator-impl.h"
+#include "src/compiler/gap-resolver.h"
+#include "src/compiler/node-matchers.h"
+#include "src/ppc/macro-assembler-ppc.h"
+#include "src/scopes.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+#define __ masm()->
+
+
+#define kScratchReg r11
+
+
+// Adds PPC-specific methods to convert InstructionOperands.
+class PPCOperandConverter FINAL : public InstructionOperandConverter {
+ public:
+ PPCOperandConverter(CodeGenerator* gen, Instruction* instr)
+ : InstructionOperandConverter(gen, instr) {}
+
+ RCBit OutputRCBit() const {
+ switch (instr_->flags_mode()) {
+ case kFlags_branch:
+ case kFlags_set:
+ return SetRC;
+ case kFlags_none:
+ return LeaveRC;
+ }
+ UNREACHABLE();
+ return LeaveRC;
+ }
+
+ bool CompareLogical() const {
+ switch (instr_->flags_condition()) {
+ case kUnsignedLessThan:
+ case kUnsignedGreaterThanOrEqual:
+ case kUnsignedLessThanOrEqual:
+ case kUnsignedGreaterThan:
+ return true;
+ default:
+ return false;
+ }
+ UNREACHABLE();
+ return false;
+ }
+
+ Operand InputImmediate(int index) {
+ Constant constant = ToConstant(instr_->InputAt(index));
+ switch (constant.type()) {
+ case Constant::kInt32:
+ return Operand(constant.ToInt32());
+ case Constant::kFloat32:
+ return Operand(
+ isolate()->factory()->NewNumber(constant.ToFloat32(), TENURED));
+ case Constant::kFloat64:
+ return Operand(
+ isolate()->factory()->NewNumber(constant.ToFloat64(), TENURED));
+ case Constant::kInt64:
+#if V8_TARGET_ARCH_PPC64
+ return Operand(constant.ToInt64());
+#endif
+ case Constant::kExternalReference:
+ case Constant::kHeapObject:
+ case Constant::kRpoNumber:
+ break;
+ }
+ UNREACHABLE();
+ return Operand::Zero();
+ }
+
+ MemOperand MemoryOperand(AddressingMode* mode, int* first_index) {
+ const int index = *first_index;
+ *mode = AddressingModeField::decode(instr_->opcode());
+ switch (*mode) {
+ case kMode_None:
+ break;
+ case kMode_MRI:
+ *first_index += 2;
+ return MemOperand(InputRegister(index + 0), InputInt32(index + 1));
+ case kMode_MRR:
+ *first_index += 2;
+ return MemOperand(InputRegister(index + 0), InputRegister(index + 1));
+ }
+ UNREACHABLE();
+ return MemOperand(r0);
+ }
+
+ MemOperand MemoryOperand(AddressingMode* mode, int first_index = 0) {
+ return MemoryOperand(mode, &first_index);
+ }
+
+ MemOperand ToMemOperand(InstructionOperand* op) const {
+ DCHECK(op != NULL);
+ DCHECK(!op->IsRegister());
+ DCHECK(!op->IsDoubleRegister());
+ DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
+ // The linkage computes where all spill slots are located.
+ FrameOffset offset = linkage()->GetFrameOffset(op->index(), frame(), 0);
+ return MemOperand(offset.from_stack_pointer() ? sp : fp, offset.offset());
+ }
+};
+
+
+static inline bool HasRegisterInput(Instruction* instr, int index) {
+ return instr->InputAt(index)->IsRegister();
+}
+
+
+namespace {
+
+class OutOfLineLoadNAN32 FINAL : public OutOfLineCode {
+ public:
+ OutOfLineLoadNAN32(CodeGenerator* gen, DoubleRegister result)
+ : OutOfLineCode(gen), result_(result) {}
+
+ void Generate() FINAL {
+ __ LoadDoubleLiteral(result_, std::numeric_limits<float>::quiet_NaN(),
+ kScratchReg);
+ }
+
+ private:
+ DoubleRegister const result_;
+};
+
+
+class OutOfLineLoadNAN64 FINAL : public OutOfLineCode {
+ public:
+ OutOfLineLoadNAN64(CodeGenerator* gen, DoubleRegister result)
+ : OutOfLineCode(gen), result_(result) {}
+
+ void Generate() FINAL {
+ __ LoadDoubleLiteral(result_, std::numeric_limits<double>::quiet_NaN(),
+ kScratchReg);
+ }
+
+ private:
+ DoubleRegister const result_;
+};
+
+
+class OutOfLineLoadZero FINAL : public OutOfLineCode {
+ public:
+ OutOfLineLoadZero(CodeGenerator* gen, Register result)
+ : OutOfLineCode(gen), result_(result) {}
+
+ void Generate() FINAL { __ li(result_, Operand::Zero()); }
+
+ private:
+ Register const result_;
+};
+
+
+Condition FlagsConditionToCondition(FlagsCondition condition) {
+ switch (condition) {
+ case kEqual:
+ return eq;
+ case kNotEqual:
+ return ne;
+ case kSignedLessThan:
+ case kUnsignedLessThan:
+ return lt;
+ case kSignedGreaterThanOrEqual:
+ case kUnsignedGreaterThanOrEqual:
+ return ge;
+ case kSignedLessThanOrEqual:
+ case kUnsignedLessThanOrEqual:
+ return le;
+ case kSignedGreaterThan:
+ case kUnsignedGreaterThan:
+ return gt;
+ case kOverflow:
+#if V8_TARGET_ARCH_PPC64
+ return ne;
+#else
+ return lt;
+#endif
+ case kNotOverflow:
+#if V8_TARGET_ARCH_PPC64
+ return eq;
+#else
+ return ge;
+#endif
+ case kUnorderedEqual:
+ case kUnorderedNotEqual:
+ break;
+ }
+ UNREACHABLE();
+ return kNoCondition;
+}
+
+} // namespace
+
+#define ASSEMBLE_FLOAT_UNOP_RC(asm_instr) \
+ do { \
+ __ asm_instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0), \
+ i.OutputRCBit()); \
+ } while (0)
+
+
+#define ASSEMBLE_FLOAT_BINOP_RC(asm_instr) \
+ do { \
+ __ asm_instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0), \
+ i.InputDoubleRegister(1), i.OutputRCBit()); \
+ } while (0)
+
+
+#define ASSEMBLE_BINOP(asm_instr_reg, asm_instr_imm) \
+ do { \
+ if (HasRegisterInput(instr, 1)) { \
+ __ asm_instr_reg(i.OutputRegister(), i.InputRegister(0), \
+ i.InputRegister(1)); \
+ } else { \
+ __ asm_instr_imm(i.OutputRegister(), i.InputRegister(0), \
+ i.InputImmediate(1)); \
+ } \
+ } while (0)
+
+
+#define ASSEMBLE_BINOP_RC(asm_instr_reg, asm_instr_imm) \
+ do { \
+ if (HasRegisterInput(instr, 1)) { \
+ __ asm_instr_reg(i.OutputRegister(), i.InputRegister(0), \
+ i.InputRegister(1), i.OutputRCBit()); \
+ } else { \
+ __ asm_instr_imm(i.OutputRegister(), i.InputRegister(0), \
+ i.InputImmediate(1), i.OutputRCBit()); \
+ } \
+ } while (0)
+
+
+#define ASSEMBLE_BINOP_INT_RC(asm_instr_reg, asm_instr_imm) \
+ do { \
+ if (HasRegisterInput(instr, 1)) { \
+ __ asm_instr_reg(i.OutputRegister(), i.InputRegister(0), \
+ i.InputRegister(1), i.OutputRCBit()); \
+ } else { \
+ __ asm_instr_imm(i.OutputRegister(), i.InputRegister(0), \
+ i.InputInt32(1), i.OutputRCBit()); \
+ } \
+ } while (0)
+
+
+#if V8_TARGET_ARCH_PPC64
+#define ASSEMBLE_ADD_WITH_OVERFLOW() \
+ do { \
+ ASSEMBLE_BINOP(add, addi); \
+ __ TestIfInt32(i.OutputRegister(), r0, cr0); \
+ } while (0)
+#else
+#define ASSEMBLE_ADD_WITH_OVERFLOW() \
+ do { \
+ if (HasRegisterInput(instr, 1)) { \
+ __ AddAndCheckForOverflow(i.OutputRegister(), i.InputRegister(0), \
+ i.InputRegister(1), kScratchReg, r0); \
+ } else { \
+ __ AddAndCheckForOverflow(i.OutputRegister(), i.InputRegister(0), \
+ i.InputInt32(1), kScratchReg, r0); \
+ } \
+ } while (0)
+#endif
+
+
+#if V8_TARGET_ARCH_PPC64
+#define ASSEMBLE_SUB_WITH_OVERFLOW() \
+ do { \
+ ASSEMBLE_BINOP(sub, subi); \
+ __ TestIfInt32(i.OutputRegister(), r0, cr0); \
+ } while (0)
+#else
+#define ASSEMBLE_SUB_WITH_OVERFLOW() \
+ do { \
+ if (HasRegisterInput(instr, 1)) { \
+ __ SubAndCheckForOverflow(i.OutputRegister(), i.InputRegister(0), \
+ i.InputRegister(1), kScratchReg, r0); \
+ } else { \
+ __ AddAndCheckForOverflow(i.OutputRegister(), i.InputRegister(0), \
+ -i.InputInt32(1), kScratchReg, r0); \
+ } \
+ } while (0)
+#endif
+
+
+#define ASSEMBLE_COMPARE(cmp_instr, cmpl_instr) \
+ do { \
+ const CRegister cr = cr0; \
+ if (HasRegisterInput(instr, 1)) { \
+ if (i.CompareLogical()) { \
+ __ cmpl_instr(i.InputRegister(0), i.InputRegister(1), cr); \
+ } else { \
+ __ cmp_instr(i.InputRegister(0), i.InputRegister(1), cr); \
+ } \
+ } else { \
+ if (i.CompareLogical()) { \
+ __ cmpl_instr##i(i.InputRegister(0), i.InputImmediate(1), cr); \
+ } else { \
+ __ cmp_instr##i(i.InputRegister(0), i.InputImmediate(1), cr); \
+ } \
+ } \
+ DCHECK_EQ(SetRC, i.OutputRCBit()); \
+ } while (0)
+
+
+#define ASSEMBLE_FLOAT_COMPARE(cmp_instr) \
+ do { \
+ const CRegister cr = cr0; \
+ __ cmp_instr(i.InputDoubleRegister(0), i.InputDoubleRegister(1), cr); \
+ DCHECK_EQ(SetRC, i.OutputRCBit()); \
+ } while (0)
+
+
+#define ASSEMBLE_MODULO(div_instr, mul_instr) \
+ do { \
+ const Register scratch = kScratchReg; \
+ __ div_instr(scratch, i.InputRegister(0), i.InputRegister(1)); \
+ __ mul_instr(scratch, scratch, i.InputRegister(1)); \
+ __ sub(i.OutputRegister(), i.InputRegister(0), scratch, LeaveOE, \
+ i.OutputRCBit()); \
+ } while (0)
+
+
+#define ASSEMBLE_FLOAT_MODULO() \
+ do { \
+ FrameScope scope(masm(), StackFrame::MANUAL); \
+ __ PrepareCallCFunction(0, 2, kScratchReg); \
+ __ MovToFloatParameters(i.InputDoubleRegister(0), \
+ i.InputDoubleRegister(1)); \
+ __ CallCFunction(ExternalReference::mod_two_doubles_operation(isolate()), \
+ 0, 2); \
+ __ MovFromFloatResult(i.OutputDoubleRegister()); \
+ DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
+ } while (0)
+
+
+#define ASSEMBLE_LOAD_FLOAT(asm_instr, asm_instrx) \
+ do { \
+ DoubleRegister result = i.OutputDoubleRegister(); \
+ AddressingMode mode = kMode_None; \
+ MemOperand operand = i.MemoryOperand(&mode); \
+ if (mode == kMode_MRI) { \
+ __ asm_instr(result, operand); \
+ } else { \
+ __ asm_instrx(result, operand); \
+ } \
+ DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
+ } while (0)
+
+
+#define ASSEMBLE_LOAD_INTEGER(asm_instr, asm_instrx) \
+ do { \
+ Register result = i.OutputRegister(); \
+ AddressingMode mode = kMode_None; \
+ MemOperand operand = i.MemoryOperand(&mode); \
+ if (mode == kMode_MRI) { \
+ __ asm_instr(result, operand); \
+ } else { \
+ __ asm_instrx(result, operand); \
+ } \
+ DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
+ } while (0)
+
+
+#define ASSEMBLE_STORE_FLOAT(asm_instr, asm_instrx) \
+ do { \
+ int index = 0; \
+ AddressingMode mode = kMode_None; \
+ MemOperand operand = i.MemoryOperand(&mode, &index); \
+ DoubleRegister value = i.InputDoubleRegister(index); \
+ if (mode == kMode_MRI) { \
+ __ asm_instr(value, operand); \
+ } else { \
+ __ asm_instrx(value, operand); \
+ } \
+ DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
+ } while (0)
+
+
+#define ASSEMBLE_STORE_INTEGER(asm_instr, asm_instrx) \
+ do { \
+ int index = 0; \
+ AddressingMode mode = kMode_None; \
+ MemOperand operand = i.MemoryOperand(&mode, &index); \
+ Register value = i.InputRegister(index); \
+ if (mode == kMode_MRI) { \
+ __ asm_instr(value, operand); \
+ } else { \
+ __ asm_instrx(value, operand); \
+ } \
+ DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
+ } while (0)
+
+
+// TODO(mbrandy): fix paths that produce garbage in offset's upper 32-bits.
+#define ASSEMBLE_CHECKED_LOAD_FLOAT(asm_instr, asm_instrx, width) \
+ do { \
+ DoubleRegister result = i.OutputDoubleRegister(); \
+ AddressingMode mode = kMode_None; \
+ MemOperand operand = i.MemoryOperand(&mode, 0); \
+ DCHECK_EQ(kMode_MRR, mode); \
+ Register offset = operand.rb(); \
+ __ extsw(offset, offset); \
+ if (HasRegisterInput(instr, 2)) { \
+ __ cmplw(offset, i.InputRegister(2)); \
+ } else { \
+ __ cmplwi(offset, i.InputImmediate(2)); \
+ } \
+ auto ool = new (zone()) OutOfLineLoadNAN##width(this, result); \
+ __ bge(ool->entry()); \
+ if (mode == kMode_MRI) { \
+ __ asm_instr(result, operand); \
+ } else { \
+ __ asm_instrx(result, operand); \
+ } \
+ __ bind(ool->exit()); \
+ DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
+ } while (0)
+
+
+// TODO(mbrandy): fix paths that produce garbage in offset's upper 32-bits.
+#define ASSEMBLE_CHECKED_LOAD_INTEGER(asm_instr, asm_instrx) \
+ do { \
+ Register result = i.OutputRegister(); \
+ AddressingMode mode = kMode_None; \
+ MemOperand operand = i.MemoryOperand(&mode, 0); \
+ DCHECK_EQ(kMode_MRR, mode); \
+ Register offset = operand.rb(); \
+ __ extsw(offset, offset); \
+ if (HasRegisterInput(instr, 2)) { \
+ __ cmplw(offset, i.InputRegister(2)); \
+ } else { \
+ __ cmplwi(offset, i.InputImmediate(2)); \
+ } \
+ auto ool = new (zone()) OutOfLineLoadZero(this, result); \
+ __ bge(ool->entry()); \
+ if (mode == kMode_MRI) { \
+ __ asm_instr(result, operand); \
+ } else { \
+ __ asm_instrx(result, operand); \
+ } \
+ __ bind(ool->exit()); \
+ DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
+ } while (0)
+
+
+// TODO(mbrandy): fix paths that produce garbage in offset's upper 32-bits.
+#define ASSEMBLE_CHECKED_STORE_FLOAT(asm_instr, asm_instrx) \
+ do { \
+ Label done; \
+ AddressingMode mode = kMode_None; \
+ MemOperand operand = i.MemoryOperand(&mode, 0); \
+ DCHECK_EQ(kMode_MRR, mode); \
+ Register offset = operand.rb(); \
+ __ extsw(offset, offset); \
+ if (HasRegisterInput(instr, 2)) { \
+ __ cmplw(offset, i.InputRegister(2)); \
+ } else { \
+ __ cmplwi(offset, i.InputImmediate(2)); \
+ } \
+ __ bge(&done); \
+ DoubleRegister value = i.InputDoubleRegister(3); \
+ if (mode == kMode_MRI) { \
+ __ asm_instr(value, operand); \
+ } else { \
+ __ asm_instrx(value, operand); \
+ } \
+ __ bind(&done); \
+ DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
+ } while (0)
+
+
+// TODO(mbrandy): fix paths that produce garbage in offset's upper 32-bits.
+#define ASSEMBLE_CHECKED_STORE_INTEGER(asm_instr, asm_instrx) \
+ do { \
+ Label done; \
+ AddressingMode mode = kMode_None; \
+ MemOperand operand = i.MemoryOperand(&mode, 0); \
+ DCHECK_EQ(kMode_MRR, mode); \
+ Register offset = operand.rb(); \
+ __ extsw(offset, offset); \
+ if (HasRegisterInput(instr, 2)) { \
+ __ cmplw(offset, i.InputRegister(2)); \
+ } else { \
+ __ cmplwi(offset, i.InputImmediate(2)); \
+ } \
+ __ bge(&done); \
+ Register value = i.InputRegister(3); \
+ if (mode == kMode_MRI) { \
+ __ asm_instr(value, operand); \
+ } else { \
+ __ asm_instrx(value, operand); \
+ } \
+ __ bind(&done); \
+ DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
+ } while (0)
+
+
+#define ASSEMBLE_STORE_WRITE_BARRIER() \
+ do { \
+ Register object = i.InputRegister(0); \
+ Register index = i.InputRegister(1); \
+ Register value = i.InputRegister(2); \
+ __ add(index, object, index); \
+ __ StoreP(value, MemOperand(index)); \
+ SaveFPRegsMode mode = \
+ frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs; \
+ LinkRegisterStatus lr_status = kLRHasNotBeenSaved; \
+ __ RecordWrite(object, index, value, lr_status, mode); \
+ DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
+ } while (0)
+
+
+// Assembles an instruction after register allocation, producing machine code.
+void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
+ PPCOperandConverter i(this, instr);
+ ArchOpcode opcode = ArchOpcodeField::decode(instr->opcode());
+
+ switch (opcode) {
+ case kArchCallCodeObject: {
+ EnsureSpaceForLazyDeopt();
+ if (HasRegisterInput(instr, 0)) {
+ __ addi(ip, i.InputRegister(0),
+ Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Call(ip);
+ } else {
+ __ Call(Handle<Code>::cast(i.InputHeapObject(0)),
+ RelocInfo::CODE_TARGET);
+ }
+ AddSafepointAndDeopt(instr);
+ DCHECK_EQ(LeaveRC, i.OutputRCBit());
+ break;
+ }
+ case kArchCallJSFunction: {
+ EnsureSpaceForLazyDeopt();
+ Register func = i.InputRegister(0);
+ if (FLAG_debug_code) {
+ // Check the function's context matches the context argument.
+ __ LoadP(kScratchReg,
+ FieldMemOperand(func, JSFunction::kContextOffset));
+ __ cmp(cp, kScratchReg);
+ __ Assert(eq, kWrongFunctionContext);
+ }
+ __ LoadP(ip, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
+ __ Call(ip);
+ AddSafepointAndDeopt(instr);
+ DCHECK_EQ(LeaveRC, i.OutputRCBit());
+ break;
+ }
+ case kArchJmp:
+ AssembleArchJump(i.InputRpo(0));
+ DCHECK_EQ(LeaveRC, i.OutputRCBit());
+ break;
+ case kArchNop:
+ // don't emit code for nops.
+ DCHECK_EQ(LeaveRC, i.OutputRCBit());
+ break;
+ case kArchRet:
+ AssembleReturn();
+ DCHECK_EQ(LeaveRC, i.OutputRCBit());
+ break;
+ case kArchStackPointer:
+ __ mr(i.OutputRegister(), sp);
+ DCHECK_EQ(LeaveRC, i.OutputRCBit());
+ break;
+ case kArchTruncateDoubleToI:
+ // TODO(mbrandy): move slow call to stub out of line.
+ __ TruncateDoubleToI(i.OutputRegister(), i.InputDoubleRegister(0));
+ DCHECK_EQ(LeaveRC, i.OutputRCBit());
+ break;
+ case kPPC_And32:
+ case kPPC_And64:
+ if (HasRegisterInput(instr, 1)) {
+ __ and_(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
+ i.OutputRCBit());
+ } else {
+ __ andi(i.OutputRegister(), i.InputRegister(0), i.InputImmediate(1));
+ }
+ break;
+ case kPPC_AndComplement32:
+ case kPPC_AndComplement64:
+ __ andc(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
+ i.OutputRCBit());
+ break;
+ case kPPC_Or32:
+ case kPPC_Or64:
+ if (HasRegisterInput(instr, 1)) {
+ __ orx(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
+ i.OutputRCBit());
+ } else {
+ __ ori(i.OutputRegister(), i.InputRegister(0), i.InputImmediate(1));
+ DCHECK_EQ(LeaveRC, i.OutputRCBit());
+ }
+ break;
+ case kPPC_OrComplement32:
+ case kPPC_OrComplement64:
+ __ orc(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
+ i.OutputRCBit());
+ break;
+ case kPPC_Xor32:
+ case kPPC_Xor64:
+ if (HasRegisterInput(instr, 1)) {
+ __ xor_(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
+ i.OutputRCBit());
+ } else {
+ __ xori(i.OutputRegister(), i.InputRegister(0), i.InputImmediate(1));
+ DCHECK_EQ(LeaveRC, i.OutputRCBit());
+ }
+ break;
+ case kPPC_ShiftLeft32:
+ ASSEMBLE_BINOP_RC(slw, slwi);
+ break;
+#if V8_TARGET_ARCH_PPC64
+ case kPPC_ShiftLeft64:
+ ASSEMBLE_BINOP_RC(sld, sldi);
+ break;
+#endif
+ case kPPC_ShiftRight32:
+ ASSEMBLE_BINOP_RC(srw, srwi);
+ break;
+#if V8_TARGET_ARCH_PPC64
+ case kPPC_ShiftRight64:
+ ASSEMBLE_BINOP_RC(srd, srdi);
+ break;
+#endif
+ case kPPC_ShiftRightAlg32:
+ ASSEMBLE_BINOP_INT_RC(sraw, srawi);
+ break;
+#if V8_TARGET_ARCH_PPC64
+ case kPPC_ShiftRightAlg64:
+ ASSEMBLE_BINOP_INT_RC(srad, sradi);
+ break;
+#endif
+ case kPPC_RotRight32:
+ if (HasRegisterInput(instr, 1)) {
+ __ subfic(kScratchReg, i.InputRegister(1), Operand(32));
+ __ rotlw(i.OutputRegister(), i.InputRegister(0), kScratchReg,
+ i.OutputRCBit());
+ } else {
+ int sh = i.InputInt32(1);
+ __ rotrwi(i.OutputRegister(), i.InputRegister(0), sh, i.OutputRCBit());
+ }
+ break;
+#if V8_TARGET_ARCH_PPC64
+ case kPPC_RotRight64:
+ if (HasRegisterInput(instr, 1)) {
+ __ subfic(kScratchReg, i.InputRegister(1), Operand(64));
+ __ rotld(i.OutputRegister(), i.InputRegister(0), kScratchReg,
+ i.OutputRCBit());
+ } else {
+ int sh = i.InputInt32(1);
+ __ rotrdi(i.OutputRegister(), i.InputRegister(0), sh, i.OutputRCBit());
+ }
+ break;
+#endif
+ case kPPC_Not32:
+ case kPPC_Not64:
+ __ notx(i.OutputRegister(), i.InputRegister(0), i.OutputRCBit());
+ break;
+ case kPPC_RotLeftAndMask32:
+ __ rlwinm(i.OutputRegister(), i.InputRegister(0), i.InputInt32(1),
+ 31 - i.InputInt32(2), 31 - i.InputInt32(3), i.OutputRCBit());
+ break;
+#if V8_TARGET_ARCH_PPC64
+ case kPPC_RotLeftAndClear64:
+ __ rldic(i.OutputRegister(), i.InputRegister(0), i.InputInt32(1),
+ 63 - i.InputInt32(2), i.OutputRCBit());
+ break;
+ case kPPC_RotLeftAndClearLeft64:
+ __ rldicl(i.OutputRegister(), i.InputRegister(0), i.InputInt32(1),
+ 63 - i.InputInt32(2), i.OutputRCBit());
+ break;
+ case kPPC_RotLeftAndClearRight64:
+ __ rldicr(i.OutputRegister(), i.InputRegister(0), i.InputInt32(1),
+ 63 - i.InputInt32(2), i.OutputRCBit());
+ break;
+#endif
+ case kPPC_Add32:
+ case kPPC_Add64:
+ if (HasRegisterInput(instr, 1)) {
+ __ add(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
+ LeaveOE, i.OutputRCBit());
+ } else {
+ __ addi(i.OutputRegister(), i.InputRegister(0), i.InputImmediate(1));
+ DCHECK_EQ(LeaveRC, i.OutputRCBit());
+ }
+ break;
+ case kPPC_AddWithOverflow32:
+ ASSEMBLE_ADD_WITH_OVERFLOW();
+ break;
+ case kPPC_AddFloat64:
+ ASSEMBLE_FLOAT_BINOP_RC(fadd);
+ break;
+ case kPPC_Sub32:
+ case kPPC_Sub64:
+ if (HasRegisterInput(instr, 1)) {
+ __ sub(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
+ LeaveOE, i.OutputRCBit());
+ } else {
+ __ subi(i.OutputRegister(), i.InputRegister(0), i.InputImmediate(1));
+ DCHECK_EQ(LeaveRC, i.OutputRCBit());
+ }
+ break;
+ case kPPC_SubWithOverflow32:
+ ASSEMBLE_SUB_WITH_OVERFLOW();
+ break;
+ case kPPC_SubFloat64:
+ ASSEMBLE_FLOAT_BINOP_RC(fsub);
+ break;
+ case kPPC_Mul32:
+ __ mullw(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
+ LeaveOE, i.OutputRCBit());
+ break;
+#if V8_TARGET_ARCH_PPC64
+ case kPPC_Mul64:
+ __ mulld(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
+ LeaveOE, i.OutputRCBit());
+ break;
+#endif
+ case kPPC_MulHigh32:
+ __ mulhw(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
+ i.OutputRCBit());
+ break;
+ case kPPC_MulHighU32:
+ __ mulhwu(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
+ i.OutputRCBit());
+ break;
+ case kPPC_MulFloat64:
+ ASSEMBLE_FLOAT_BINOP_RC(fmul);
+ break;
+ case kPPC_Div32:
+ __ divw(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+ DCHECK_EQ(LeaveRC, i.OutputRCBit());
+ break;
+#if V8_TARGET_ARCH_PPC64
+ case kPPC_Div64:
+ __ divd(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+ DCHECK_EQ(LeaveRC, i.OutputRCBit());
+ break;
+#endif
+ case kPPC_DivU32:
+ __ divwu(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+ DCHECK_EQ(LeaveRC, i.OutputRCBit());
+ break;
+#if V8_TARGET_ARCH_PPC64
+ case kPPC_DivU64:
+ __ divdu(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+ DCHECK_EQ(LeaveRC, i.OutputRCBit());
+ break;
+#endif
+ case kPPC_DivFloat64:
+ ASSEMBLE_FLOAT_BINOP_RC(fdiv);
+ break;
+ case kPPC_Mod32:
+ ASSEMBLE_MODULO(divw, mullw);
+ break;
+#if V8_TARGET_ARCH_PPC64
+ case kPPC_Mod64:
+ ASSEMBLE_MODULO(divd, mulld);
+ break;
+#endif
+ case kPPC_ModU32:
+ ASSEMBLE_MODULO(divwu, mullw);
+ break;
+#if V8_TARGET_ARCH_PPC64
+ case kPPC_ModU64:
+ ASSEMBLE_MODULO(divdu, mulld);
+ break;
+#endif
+ case kPPC_ModFloat64:
+ // TODO(bmeurer): We should really get rid of this special instruction,
+ // and generate a CallAddress instruction instead.
+ ASSEMBLE_FLOAT_MODULO();
+ break;
+ case kPPC_Neg32:
+ case kPPC_Neg64:
+ __ neg(i.OutputRegister(), i.InputRegister(0), LeaveOE, i.OutputRCBit());
+ break;
+ case kPPC_SqrtFloat64:
+ ASSEMBLE_FLOAT_UNOP_RC(fsqrt);
+ break;
+ case kPPC_FloorFloat64:
+ ASSEMBLE_FLOAT_UNOP_RC(frim);
+ break;
+ case kPPC_CeilFloat64:
+ ASSEMBLE_FLOAT_UNOP_RC(frip);
+ break;
+ case kPPC_TruncateFloat64:
+ ASSEMBLE_FLOAT_UNOP_RC(friz);
+ break;
+ case kPPC_RoundFloat64:
+ ASSEMBLE_FLOAT_UNOP_RC(frin);
+ break;
+ case kPPC_NegFloat64:
+ ASSEMBLE_FLOAT_UNOP_RC(fneg);
+ break;
+ case kPPC_Cmp32:
+ ASSEMBLE_COMPARE(cmpw, cmplw);
+ break;
+#if V8_TARGET_ARCH_PPC64
+ case kPPC_Cmp64:
+ ASSEMBLE_COMPARE(cmp, cmpl);
+ break;
+#endif
+ case kPPC_CmpFloat64:
+ ASSEMBLE_FLOAT_COMPARE(fcmpu);
+ break;
+ case kPPC_Tst32:
+ if (HasRegisterInput(instr, 1)) {
+ __ and_(r0, i.InputRegister(0), i.InputRegister(1), i.OutputRCBit());
+ } else {
+ __ andi(r0, i.InputRegister(0), i.InputImmediate(1));
+ }
+#if V8_TARGET_ARCH_PPC64
+ __ extsw(r0, r0, i.OutputRCBit());
+#endif
+ DCHECK_EQ(SetRC, i.OutputRCBit());
+ break;
+#if V8_TARGET_ARCH_PPC64
+ case kPPC_Tst64:
+ if (HasRegisterInput(instr, 1)) {
+ __ and_(r0, i.InputRegister(0), i.InputRegister(1), i.OutputRCBit());
+ } else {
+ __ andi(r0, i.InputRegister(0), i.InputImmediate(1));
+ }
+ DCHECK_EQ(SetRC, i.OutputRCBit());
+ break;
+#endif
+ case kPPC_Push:
+ __ Push(i.InputRegister(0));
+ DCHECK_EQ(LeaveRC, i.OutputRCBit());
+ break;
+ case kPPC_ExtendSignWord8:
+ __ extsb(i.OutputRegister(), i.InputRegister(0));
+ DCHECK_EQ(LeaveRC, i.OutputRCBit());
+ break;
+ case kPPC_ExtendSignWord16:
+ __ extsh(i.OutputRegister(), i.InputRegister(0));
+ DCHECK_EQ(LeaveRC, i.OutputRCBit());
+ break;
+#if V8_TARGET_ARCH_PPC64
+ case kPPC_ExtendSignWord32:
+ __ extsw(i.OutputRegister(), i.InputRegister(0));
+ DCHECK_EQ(LeaveRC, i.OutputRCBit());
+ break;
+ case kPPC_Uint32ToUint64:
+ // Zero extend
+ __ clrldi(i.OutputRegister(), i.InputRegister(0), Operand(32));
+ DCHECK_EQ(LeaveRC, i.OutputRCBit());
+ break;
+ case kPPC_Int64ToInt32:
+ // TODO(mbrandy): sign extend?
+ __ Move(i.OutputRegister(), i.InputRegister(0));
+ DCHECK_EQ(LeaveRC, i.OutputRCBit());
+ break;
+#endif
+ case kPPC_Int32ToFloat64:
+ __ ConvertIntToDouble(i.InputRegister(0), i.OutputDoubleRegister());
+ DCHECK_EQ(LeaveRC, i.OutputRCBit());
+ break;
+ case kPPC_Uint32ToFloat64:
+ __ ConvertUnsignedIntToDouble(i.InputRegister(0),
+ i.OutputDoubleRegister());
+ DCHECK_EQ(LeaveRC, i.OutputRCBit());
+ break;
+ case kPPC_Float64ToInt32:
+ case kPPC_Float64ToUint32:
+ __ ConvertDoubleToInt64(i.InputDoubleRegister(0),
+#if !V8_TARGET_ARCH_PPC64
+ kScratchReg,
+#endif
+ i.OutputRegister(), kScratchDoubleReg);
+ DCHECK_EQ(LeaveRC, i.OutputRCBit());
+ break;
+ case kPPC_Float64ToFloat32:
+ ASSEMBLE_FLOAT_UNOP_RC(frsp);
+ break;
+ case kPPC_Float32ToFloat64:
+ // Nothing to do.
+ __ Move(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ DCHECK_EQ(LeaveRC, i.OutputRCBit());
+ break;
+ case kPPC_LoadWordU8:
+ ASSEMBLE_LOAD_INTEGER(lbz, lbzx);
+ break;
+ case kPPC_LoadWordS8:
+ ASSEMBLE_LOAD_INTEGER(lbz, lbzx);
+ __ extsb(i.OutputRegister(), i.OutputRegister());
+ break;
+ case kPPC_LoadWordU16:
+ ASSEMBLE_LOAD_INTEGER(lhz, lhzx);
+ break;
+ case kPPC_LoadWordS16:
+ ASSEMBLE_LOAD_INTEGER(lha, lhax);
+ break;
+ case kPPC_LoadWordS32:
+ ASSEMBLE_LOAD_INTEGER(lwa, lwax);
+ break;
+#if V8_TARGET_ARCH_PPC64
+ case kPPC_LoadWord64:
+ ASSEMBLE_LOAD_INTEGER(ld, ldx);
+ break;
+#endif
+ case kPPC_LoadFloat32:
+ ASSEMBLE_LOAD_FLOAT(lfs, lfsx);
+ break;
+ case kPPC_LoadFloat64:
+ ASSEMBLE_LOAD_FLOAT(lfd, lfdx);
+ break;
+ case kPPC_StoreWord8:
+ ASSEMBLE_STORE_INTEGER(stb, stbx);
+ break;
+ case kPPC_StoreWord16:
+ ASSEMBLE_STORE_INTEGER(sth, sthx);
+ break;
+ case kPPC_StoreWord32:
+ ASSEMBLE_STORE_INTEGER(stw, stwx);
+ break;
+#if V8_TARGET_ARCH_PPC64
+ case kPPC_StoreWord64:
+ ASSEMBLE_STORE_INTEGER(std, stdx);
+ break;
+#endif
+ case kPPC_StoreFloat32:
+ ASSEMBLE_STORE_FLOAT(stfs, stfsx);
+ break;
+ case kPPC_StoreFloat64:
+ ASSEMBLE_STORE_FLOAT(stfd, stfdx);
+ break;
+ case kPPC_StoreWriteBarrier:
+ ASSEMBLE_STORE_WRITE_BARRIER();
+ break;
+ case kCheckedLoadInt8:
+ ASSEMBLE_CHECKED_LOAD_INTEGER(lbz, lbzx);
+ __ extsb(i.OutputRegister(), i.OutputRegister());
+ break;
+ case kCheckedLoadUint8:
+ ASSEMBLE_CHECKED_LOAD_INTEGER(lbz, lbzx);
+ break;
+ case kCheckedLoadInt16:
+ ASSEMBLE_CHECKED_LOAD_INTEGER(lha, lhax);
+ break;
+ case kCheckedLoadUint16:
+ ASSEMBLE_CHECKED_LOAD_INTEGER(lhz, lhzx);
+ break;
+ case kCheckedLoadWord32:
+ ASSEMBLE_CHECKED_LOAD_INTEGER(lwa, lwax);
+ break;
+ case kCheckedLoadFloat32:
+ ASSEMBLE_CHECKED_LOAD_FLOAT(lfs, lfsx, 32);
+ break;
+ case kCheckedLoadFloat64:
+ ASSEMBLE_CHECKED_LOAD_FLOAT(lfd, lfdx, 64);
+ break;
+ case kCheckedStoreWord8:
+ ASSEMBLE_CHECKED_STORE_INTEGER(stb, stbx);
+ break;
+ case kCheckedStoreWord16:
+ ASSEMBLE_CHECKED_STORE_INTEGER(sth, sthx);
+ break;
+ case kCheckedStoreWord32:
+ ASSEMBLE_CHECKED_STORE_INTEGER(stw, stwx);
+ break;
+ case kCheckedStoreFloat32:
+ ASSEMBLE_CHECKED_STORE_FLOAT(stfs, stfsx);
+ break;
+ case kCheckedStoreFloat64:
+ ASSEMBLE_CHECKED_STORE_FLOAT(stfd, stfdx);
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+}
+
+
+// Assembles branches after an instruction.
+void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
+ PPCOperandConverter i(this, instr);
+ Label* tlabel = branch->true_label;
+ Label* flabel = branch->false_label;
+ ArchOpcode op = instr->arch_opcode();
+ FlagsCondition condition = branch->condition;
+ CRegister cr = cr0;
+
+ // Overflow checked for add/sub only.
+ DCHECK((condition != kOverflow && condition != kNotOverflow) ||
+ (op == kPPC_AddWithOverflow32 || op == kPPC_SubWithOverflow32));
+
+ Condition cond = FlagsConditionToCondition(condition);
+ if (op == kPPC_CmpFloat64) {
+ // check for unordered if necessary
+ if (cond == le) {
+ __ bunordered(flabel, cr);
+ // Unnecessary for eq/lt since only FU bit will be set.
+ } else if (cond == gt) {
+ __ bunordered(tlabel, cr);
+ // Unnecessary for ne/ge since only FU bit will be set.
+ }
+ }
+ __ b(cond, tlabel, cr);
+ if (!branch->fallthru) __ b(flabel); // no fallthru to flabel.
+}
+
+
+void CodeGenerator::AssembleArchJump(BasicBlock::RpoNumber target) {
+ if (!IsNextInAssemblyOrder(target)) __ b(GetLabel(target));
+}
+
+
+// Assembles boolean materializations after an instruction.
+void CodeGenerator::AssembleArchBoolean(Instruction* instr,
+ FlagsCondition condition) {
+ PPCOperandConverter i(this, instr);
+ Label done;
+ ArchOpcode op = instr->arch_opcode();
+ bool check_unordered = (op == kPPC_CmpFloat64);
+ CRegister cr = cr0;
+
+ // Overflow checked for add/sub only.
+ DCHECK((condition != kOverflow && condition != kNotOverflow) ||
+ (op == kPPC_AddWithOverflow32 || op == kPPC_SubWithOverflow32));
+
+ // Materialize a full 32-bit 1 or 0 value. The result register is always the
+ // last output of the instruction.
+ DCHECK_NE(0u, instr->OutputCount());
+ Register reg = i.OutputRegister(instr->OutputCount() - 1);
+
+ Condition cond = FlagsConditionToCondition(condition);
+ switch (cond) {
+ case eq:
+ case lt:
+ __ li(reg, Operand::Zero());
+ __ li(kScratchReg, Operand(1));
+ __ isel(cond, reg, kScratchReg, reg, cr);
+ break;
+ case ne:
+ case ge:
+ __ li(reg, Operand(1));
+ __ isel(NegateCondition(cond), reg, r0, reg, cr);
+ break;
+ case gt:
+ if (check_unordered) {
+ __ li(reg, Operand(1));
+ __ li(kScratchReg, Operand::Zero());
+ __ bunordered(&done, cr);
+ __ isel(cond, reg, reg, kScratchReg, cr);
+ } else {
+ __ li(reg, Operand::Zero());
+ __ li(kScratchReg, Operand(1));
+ __ isel(cond, reg, kScratchReg, reg, cr);
+ }
+ break;
+ case le:
+ if (check_unordered) {
+ __ li(reg, Operand::Zero());
+ __ li(kScratchReg, Operand(1));
+ __ bunordered(&done, cr);
+ __ isel(NegateCondition(cond), reg, r0, kScratchReg, cr);
+ } else {
+ __ li(reg, Operand(1));
+ __ isel(NegateCondition(cond), reg, r0, reg, cr);
+ }
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ __ bind(&done);
+}
+
+
+void CodeGenerator::AssembleDeoptimizerCall(int deoptimization_id) {
+ Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
+ isolate(), deoptimization_id, Deoptimizer::LAZY);
+ __ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
+}
+
+
+void CodeGenerator::AssemblePrologue() {
+ CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
+ if (descriptor->kind() == CallDescriptor::kCallAddress) {
+#if ABI_USES_FUNCTION_DESCRIPTORS
+ __ function_descriptor();
+#endif
+ int register_save_area_size = 0;
+ RegList frame_saves = fp.bit();
+ __ mflr(r0);
+#if V8_OOL_CONSTANT_POOL
+ __ Push(r0, fp, kConstantPoolRegister);
+ // Adjust FP to point to saved FP.
+ __ subi(fp, sp, Operand(StandardFrameConstants::kConstantPoolOffset));
+ register_save_area_size += kPointerSize;
+ frame_saves |= kConstantPoolRegister.bit();
+#else
+ __ Push(r0, fp);
+ __ mr(fp, sp);
+#endif
+ // Save callee-saved registers.
+ const RegList saves = descriptor->CalleeSavedRegisters() & ~frame_saves;
+ for (int i = Register::kNumRegisters - 1; i >= 0; i--) {
+ if (!((1 << i) & saves)) continue;
+ register_save_area_size += kPointerSize;
+ }
+ frame()->SetRegisterSaveAreaSize(register_save_area_size);
+ __ MultiPush(saves);
+ } else if (descriptor->IsJSFunctionCall()) {
+ CompilationInfo* info = this->info();
+ __ Prologue(info->IsCodePreAgingActive());
+ frame()->SetRegisterSaveAreaSize(
+ StandardFrameConstants::kFixedFrameSizeFromFp);
+ } else {
+ __ StubPrologue();
+ frame()->SetRegisterSaveAreaSize(
+ StandardFrameConstants::kFixedFrameSizeFromFp);
+ }
+ int stack_slots = frame()->GetSpillSlotCount();
+
+ if (info()->is_osr()) {
+ // TurboFan OSR-compiled functions cannot be entered directly.
+ __ Abort(kShouldNotDirectlyEnterOsrFunction);
+
+ // Unoptimized code jumps directly to this entrypoint while the unoptimized
+ // frame is still on the stack. Optimized code uses OSR values directly from
+ // the unoptimized frame. Thus, all that needs to be done is to allocate the
+ // remaining stack slots.
+ if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
+ osr_pc_offset_ = __ pc_offset();
+ DCHECK(stack_slots >= frame()->GetOsrStackSlotCount());
+ stack_slots -= frame()->GetOsrStackSlotCount();
+ }
+
+ if (stack_slots > 0) {
+ __ Add(sp, sp, -stack_slots * kPointerSize, r0);
+ }
+}
+
+
+void CodeGenerator::AssembleReturn() {
+ CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
+ if (descriptor->kind() == CallDescriptor::kCallAddress) {
+ if (frame()->GetRegisterSaveAreaSize() > 0) {
+ // Remove this frame's spill slots first.
+ int stack_slots = frame()->GetSpillSlotCount();
+ if (stack_slots > 0) {
+ __ Add(sp, sp, stack_slots * kPointerSize, r0);
+ }
+ // Restore registers.
+ RegList frame_saves = fp.bit();
+#if V8_OOL_CONSTANT_POOL
+ frame_saves |= kConstantPoolRegister.bit();
+#endif
+ const RegList saves = descriptor->CalleeSavedRegisters() & ~frame_saves;
+ if (saves != 0) {
+ __ MultiPop(saves);
+ }
+ }
+ __ LeaveFrame(StackFrame::MANUAL);
+ __ Ret();
+ } else {
+ int pop_count = descriptor->IsJSFunctionCall()
+ ? static_cast<int>(descriptor->JSParameterCount())
+ : 0;
+ __ LeaveFrame(StackFrame::MANUAL, pop_count * kPointerSize);
+ __ Ret();
+ }
+}
+
+
+void CodeGenerator::AssembleMove(InstructionOperand* source,
+ InstructionOperand* destination) {
+ PPCOperandConverter g(this, NULL);
+ // Dispatch on the source and destination operand kinds. Not all
+ // combinations are possible.
+ if (source->IsRegister()) {
+ DCHECK(destination->IsRegister() || destination->IsStackSlot());
+ Register src = g.ToRegister(source);
+ if (destination->IsRegister()) {
+ __ Move(g.ToRegister(destination), src);
+ } else {
+ __ StoreP(src, g.ToMemOperand(destination), r0);
+ }
+ } else if (source->IsStackSlot()) {
+ DCHECK(destination->IsRegister() || destination->IsStackSlot());
+ MemOperand src = g.ToMemOperand(source);
+ if (destination->IsRegister()) {
+ __ LoadP(g.ToRegister(destination), src, r0);
+ } else {
+ Register temp = kScratchReg;
+ __ LoadP(temp, src, r0);
+ __ StoreP(temp, g.ToMemOperand(destination), r0);
+ }
+ } else if (source->IsConstant()) {
+ Constant src = g.ToConstant(source);
+ if (destination->IsRegister() || destination->IsStackSlot()) {
+ Register dst =
+ destination->IsRegister() ? g.ToRegister(destination) : kScratchReg;
+ switch (src.type()) {
+ case Constant::kInt32:
+ __ mov(dst, Operand(src.ToInt32()));
+ break;
+ case Constant::kInt64:
+ __ mov(dst, Operand(src.ToInt64()));
+ break;
+ case Constant::kFloat32:
+ __ Move(dst,
+ isolate()->factory()->NewNumber(src.ToFloat32(), TENURED));
+ break;
+ case Constant::kFloat64:
+ __ Move(dst,
+ isolate()->factory()->NewNumber(src.ToFloat64(), TENURED));
+ break;
+ case Constant::kExternalReference:
+ __ mov(dst, Operand(src.ToExternalReference()));
+ break;
+ case Constant::kHeapObject:
+ __ Move(dst, src.ToHeapObject());
+ break;
+ case Constant::kRpoNumber:
+ UNREACHABLE(); // TODO(dcarney): loading RPO constants on PPC.
+ break;
+ }
+ if (destination->IsStackSlot()) {
+ __ StoreP(dst, g.ToMemOperand(destination), r0);
+ }
+ } else {
+ DoubleRegister dst = destination->IsDoubleRegister()
+ ? g.ToDoubleRegister(destination)
+ : kScratchDoubleReg;
+ double value = (src.type() == Constant::kFloat32) ? src.ToFloat32()
+ : src.ToFloat64();
+ __ LoadDoubleLiteral(dst, value, kScratchReg);
+ if (destination->IsDoubleStackSlot()) {
+ __ StoreDouble(dst, g.ToMemOperand(destination), r0);
+ }
+ }
+ } else if (source->IsDoubleRegister()) {
+ DoubleRegister src = g.ToDoubleRegister(source);
+ if (destination->IsDoubleRegister()) {
+ DoubleRegister dst = g.ToDoubleRegister(destination);
+ __ Move(dst, src);
+ } else {
+ DCHECK(destination->IsDoubleStackSlot());
+ __ StoreDouble(src, g.ToMemOperand(destination), r0);
+ }
+ } else if (source->IsDoubleStackSlot()) {
+ DCHECK(destination->IsDoubleRegister() || destination->IsDoubleStackSlot());
+ MemOperand src = g.ToMemOperand(source);
+ if (destination->IsDoubleRegister()) {
+ __ LoadDouble(g.ToDoubleRegister(destination), src, r0);
+ } else {
+ DoubleRegister temp = kScratchDoubleReg;
+ __ LoadDouble(temp, src, r0);
+ __ StoreDouble(temp, g.ToMemOperand(destination), r0);
+ }
+ } else {
+ UNREACHABLE();
+ }
+}
+
+
+void CodeGenerator::AssembleSwap(InstructionOperand* source,
+ InstructionOperand* destination) {
+ PPCOperandConverter g(this, NULL);
+ // Dispatch on the source and destination operand kinds. Not all
+ // combinations are possible.
+ if (source->IsRegister()) {
+ // Register-register.
+ Register temp = kScratchReg;
+ Register src = g.ToRegister(source);
+ if (destination->IsRegister()) {
+ Register dst = g.ToRegister(destination);
+ __ mr(temp, src);
+ __ mr(src, dst);
+ __ mr(dst, temp);
+ } else {
+ DCHECK(destination->IsStackSlot());
+ MemOperand dst = g.ToMemOperand(destination);
+ __ mr(temp, src);
+ __ LoadP(src, dst);
+ __ StoreP(temp, dst);
+ }
+#if V8_TARGET_ARCH_PPC64
+ } else if (source->IsStackSlot() || source->IsDoubleStackSlot()) {
+#else
+ } else if (source->IsStackSlot()) {
+#endif
+ DCHECK(destination->IsStackSlot());
+ Register temp_0 = kScratchReg;
+ Register temp_1 = r0;
+ MemOperand src = g.ToMemOperand(source);
+ MemOperand dst = g.ToMemOperand(destination);
+ __ LoadP(temp_0, src);
+ __ LoadP(temp_1, dst);
+ __ StoreP(temp_0, dst);
+ __ StoreP(temp_1, src);
+ } else if (source->IsDoubleRegister()) {
+ DoubleRegister temp = kScratchDoubleReg;
+ DoubleRegister src = g.ToDoubleRegister(source);
+ if (destination->IsDoubleRegister()) {
+ DoubleRegister dst = g.ToDoubleRegister(destination);
+ __ fmr(temp, src);
+ __ fmr(src, dst);
+ __ fmr(dst, temp);
+ } else {
+ DCHECK(destination->IsDoubleStackSlot());
+ MemOperand dst = g.ToMemOperand(destination);
+ __ fmr(temp, src);
+ __ lfd(src, dst);
+ __ stfd(temp, dst);
+ }
+#if !V8_TARGET_ARCH_PPC64
+ } else if (source->IsDoubleStackSlot()) {
+ DCHECK(destination->IsDoubleStackSlot());
+ DoubleRegister temp_0 = kScratchDoubleReg;
+ DoubleRegister temp_1 = d0;
+ MemOperand src = g.ToMemOperand(source);
+ MemOperand dst = g.ToMemOperand(destination);
+ __ lfd(temp_0, src);
+ __ lfd(temp_1, dst);
+ __ stfd(temp_0, dst);
+ __ stfd(temp_1, src);
+#endif
+ } else {
+ // No other combinations are possible.
+ UNREACHABLE();
+ }
+}
+
+
+void CodeGenerator::AddNopForSmiCodeInlining() {
+ // We do not insert nops for inlined Smi code.
+}
+
+
+void CodeGenerator::EnsureSpaceForLazyDeopt() {
+ int space_needed = Deoptimizer::patch_size();
+ if (!info()->IsStub()) {
+ // Ensure that we have enough space after the previous lazy-bailout
+ // instruction for patching the code here.
+ int current_pc = masm()->pc_offset();
+ if (current_pc < last_lazy_deopt_pc_ + space_needed) {
+ int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
+ DCHECK_EQ(0, padding_size % v8::internal::Assembler::kInstrSize);
+ while (padding_size > 0) {
+ __ nop();
+ padding_size -= v8::internal::Assembler::kInstrSize;
+ }
+ }
+ }
+ MarkLazyDeoptSite();
+}
+
+#undef __
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
--- /dev/null
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_PPC_INSTRUCTION_CODES_PPC_H_
+#define V8_COMPILER_PPC_INSTRUCTION_CODES_PPC_H_
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// PPC-specific opcodes that specify which assembly sequence to emit.
+// Most opcodes specify a single instruction.
+#define TARGET_ARCH_OPCODE_LIST(V) \
+ V(PPC_And32) \
+ V(PPC_And64) \
+ V(PPC_AndComplement32) \
+ V(PPC_AndComplement64) \
+ V(PPC_Or32) \
+ V(PPC_Or64) \
+ V(PPC_OrComplement32) \
+ V(PPC_OrComplement64) \
+ V(PPC_Xor32) \
+ V(PPC_Xor64) \
+ V(PPC_ShiftLeft32) \
+ V(PPC_ShiftLeft64) \
+ V(PPC_ShiftRight32) \
+ V(PPC_ShiftRight64) \
+ V(PPC_ShiftRightAlg32) \
+ V(PPC_ShiftRightAlg64) \
+ V(PPC_RotRight32) \
+ V(PPC_RotRight64) \
+ V(PPC_Not32) \
+ V(PPC_Not64) \
+ V(PPC_RotLeftAndMask32) \
+ V(PPC_RotLeftAndClear64) \
+ V(PPC_RotLeftAndClearLeft64) \
+ V(PPC_RotLeftAndClearRight64) \
+ V(PPC_Add32) \
+ V(PPC_AddWithOverflow32) \
+ V(PPC_Add64) \
+ V(PPC_AddFloat64) \
+ V(PPC_Sub32) \
+ V(PPC_SubWithOverflow32) \
+ V(PPC_Sub64) \
+ V(PPC_SubFloat64) \
+ V(PPC_Mul32) \
+ V(PPC_Mul64) \
+ V(PPC_MulHigh32) \
+ V(PPC_MulHighU32) \
+ V(PPC_MulFloat64) \
+ V(PPC_Div32) \
+ V(PPC_Div64) \
+ V(PPC_DivU32) \
+ V(PPC_DivU64) \
+ V(PPC_DivFloat64) \
+ V(PPC_Mod32) \
+ V(PPC_Mod64) \
+ V(PPC_ModU32) \
+ V(PPC_ModU64) \
+ V(PPC_ModFloat64) \
+ V(PPC_Neg32) \
+ V(PPC_Neg64) \
+ V(PPC_NegFloat64) \
+ V(PPC_SqrtFloat64) \
+ V(PPC_FloorFloat64) \
+ V(PPC_CeilFloat64) \
+ V(PPC_TruncateFloat64) \
+ V(PPC_RoundFloat64) \
+ V(PPC_Cmp32) \
+ V(PPC_Cmp64) \
+ V(PPC_CmpFloat64) \
+ V(PPC_Tst32) \
+ V(PPC_Tst64) \
+ V(PPC_Push) \
+ V(PPC_ExtendSignWord8) \
+ V(PPC_ExtendSignWord16) \
+ V(PPC_ExtendSignWord32) \
+ V(PPC_Uint32ToUint64) \
+ V(PPC_Int64ToInt32) \
+ V(PPC_Int32ToFloat64) \
+ V(PPC_Uint32ToFloat64) \
+ V(PPC_Float32ToFloat64) \
+ V(PPC_Float64ToInt32) \
+ V(PPC_Float64ToUint32) \
+ V(PPC_Float64ToFloat32) \
+ V(PPC_LoadWordS8) \
+ V(PPC_LoadWordU8) \
+ V(PPC_LoadWordS16) \
+ V(PPC_LoadWordU16) \
+ V(PPC_LoadWordS32) \
+ V(PPC_LoadWord64) \
+ V(PPC_LoadFloat32) \
+ V(PPC_LoadFloat64) \
+ V(PPC_StoreWord8) \
+ V(PPC_StoreWord16) \
+ V(PPC_StoreWord32) \
+ V(PPC_StoreWord64) \
+ V(PPC_StoreFloat32) \
+ V(PPC_StoreFloat64) \
+ V(PPC_StoreWriteBarrier)
+
+
+// Addressing modes represent the "shape" of inputs to an instruction.
+// Many instructions support multiple addressing modes. Addressing modes
+// are encoded into the InstructionCode of the instruction and tell the
+// code generator after register allocation which assembler method to call.
+//
+// We use the following local notation for addressing modes:
+//
+// R = register
+// O = register or stack slot
+// D = double register
+// I = immediate (handle, external, int32)
+// MRI = [register + immediate]
+// MRR = [register + register]
+#define TARGET_ADDRESSING_MODE_LIST(V) \
+ V(MRI) /* [%r0 + K] */ \
+ V(MRR) /* [%r0 + %r1] */
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_PPC_INSTRUCTION_CODES_PPC_H_
--- /dev/null
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/instruction-selector-impl.h"
+#include "src/compiler/node-matchers.h"
+#include "src/compiler/node-properties.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+enum ImmediateMode {
+ kInt16Imm,
+ kInt16Imm_Unsigned,
+ kInt16Imm_Negate,
+ kInt16Imm_4ByteAligned,
+ kShift32Imm,
+ kShift64Imm,
+ kNoImmediate
+};
+
+
+// Adds PPC-specific methods for generating operands.
+class PPCOperandGenerator FINAL : public OperandGenerator {
+ public:
+ explicit PPCOperandGenerator(InstructionSelector* selector)
+ : OperandGenerator(selector) {}
+
+ InstructionOperand UseOperand(Node* node, ImmediateMode mode) {
+ if (CanBeImmediate(node, mode)) {
+ return UseImmediate(node);
+ }
+ return UseRegister(node);
+ }
+
+ bool CanBeImmediate(Node* node, ImmediateMode mode) {
+ int64_t value;
+ if (node->opcode() == IrOpcode::kInt32Constant)
+ value = OpParameter<int32_t>(node);
+ else if (node->opcode() == IrOpcode::kInt64Constant)
+ value = OpParameter<int64_t>(node);
+ else
+ return false;
+ return CanBeImmediate(value, mode);
+ }
+
+ bool CanBeImmediate(int64_t value, ImmediateMode mode) {
+ switch (mode) {
+ case kInt16Imm:
+ return is_int16(value);
+ case kInt16Imm_Unsigned:
+ return is_uint16(value);
+ case kInt16Imm_Negate:
+ return is_int16(-value);
+ case kInt16Imm_4ByteAligned:
+ return is_int16(value) && !(value & 3);
+ case kShift32Imm:
+ return 0 <= value && value < 32;
+ case kShift64Imm:
+ return 0 <= value && value < 64;
+ case kNoImmediate:
+ return false;
+ }
+ return false;
+ }
+};
+
+
+static void VisitRRFloat64(InstructionSelector* selector, ArchOpcode opcode,
+ Node* node) {
+ PPCOperandGenerator g(selector);
+ selector->Emit(opcode, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)));
+}
+
+
+static void VisitRRR(InstructionSelector* selector, Node* node,
+ ArchOpcode opcode) {
+ PPCOperandGenerator g(selector);
+ selector->Emit(opcode, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)),
+ g.UseRegister(node->InputAt(1)));
+}
+
+
+static void VisitRRRFloat64(InstructionSelector* selector, Node* node,
+ ArchOpcode opcode) {
+ PPCOperandGenerator g(selector);
+ selector->Emit(opcode, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)),
+ g.UseRegister(node->InputAt(1)));
+}
+
+
+static void VisitRRO(InstructionSelector* selector, Node* node,
+ ArchOpcode opcode, ImmediateMode operand_mode) {
+ PPCOperandGenerator g(selector);
+ selector->Emit(opcode, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)),
+ g.UseOperand(node->InputAt(1), operand_mode));
+}
+
+
+// Shared routine for multiple binary operations.
+template <typename Matcher>
+static void VisitBinop(InstructionSelector* selector, Node* node,
+ InstructionCode opcode, ImmediateMode operand_mode,
+ FlagsContinuation* cont) {
+ PPCOperandGenerator g(selector);
+ Matcher m(node);
+ InstructionOperand inputs[4];
+ size_t input_count = 0;
+ InstructionOperand outputs[2];
+ size_t output_count = 0;
+
+ inputs[input_count++] = g.UseRegister(m.left().node());
+ inputs[input_count++] = g.UseOperand(m.right().node(), operand_mode);
+
+ if (cont->IsBranch()) {
+ inputs[input_count++] = g.Label(cont->true_block());
+ inputs[input_count++] = g.Label(cont->false_block());
+ }
+
+ outputs[output_count++] = g.DefineAsRegister(node);
+ if (cont->IsSet()) {
+ outputs[output_count++] = g.DefineAsRegister(cont->result());
+ }
+
+ DCHECK_NE(0u, input_count);
+ DCHECK_NE(0u, output_count);
+ DCHECK_GE(arraysize(inputs), input_count);
+ DCHECK_GE(arraysize(outputs), output_count);
+
+ Instruction* instr = selector->Emit(cont->Encode(opcode), output_count,
+ outputs, input_count, inputs);
+ if (cont->IsBranch()) instr->MarkAsControl();
+}
+
+
+// Shared routine for multiple binary operations.
+template <typename Matcher>
+static void VisitBinop(InstructionSelector* selector, Node* node,
+ ArchOpcode opcode, ImmediateMode operand_mode) {
+ FlagsContinuation cont;
+ VisitBinop<Matcher>(selector, node, opcode, operand_mode, &cont);
+}
+
+
+void InstructionSelector::VisitLoad(Node* node) {
+ MachineType rep = RepresentationOf(OpParameter<LoadRepresentation>(node));
+ MachineType typ = TypeOf(OpParameter<LoadRepresentation>(node));
+ PPCOperandGenerator g(this);
+ Node* base = node->InputAt(0);
+ Node* offset = node->InputAt(1);
+
+ ArchOpcode opcode;
+ ImmediateMode mode = kInt16Imm;
+ switch (rep) {
+ case kRepFloat32:
+ opcode = kPPC_LoadFloat32;
+ break;
+ case kRepFloat64:
+ opcode = kPPC_LoadFloat64;
+ break;
+ case kRepBit: // Fall through.
+ case kRepWord8:
+ opcode = (typ == kTypeInt32) ? kPPC_LoadWordS8 : kPPC_LoadWordU8;
+ break;
+ case kRepWord16:
+ opcode = (typ == kTypeInt32) ? kPPC_LoadWordS16 : kPPC_LoadWordU16;
+ break;
+#if !V8_TARGET_ARCH_PPC64
+ case kRepTagged: // Fall through.
+#endif
+ case kRepWord32:
+ opcode = kPPC_LoadWordS32;
+#if V8_TARGET_ARCH_PPC64
+ // TODO(mbrandy): this applies to signed loads only (lwa)
+ mode = kInt16Imm_4ByteAligned;
+#endif
+ break;
+#if V8_TARGET_ARCH_PPC64
+ case kRepTagged: // Fall through.
+ case kRepWord64:
+ opcode = kPPC_LoadWord64;
+ mode = kInt16Imm_4ByteAligned;
+ break;
+#endif
+ default:
+ UNREACHABLE();
+ return;
+ }
+ if (g.CanBeImmediate(offset, mode)) {
+ Emit(opcode | AddressingModeField::encode(kMode_MRI),
+ g.DefineAsRegister(node), g.UseRegister(base), g.UseImmediate(offset));
+ } else if (g.CanBeImmediate(base, mode)) {
+ Emit(opcode | AddressingModeField::encode(kMode_MRI),
+ g.DefineAsRegister(node), g.UseRegister(offset), g.UseImmediate(base));
+ } else {
+ Emit(opcode | AddressingModeField::encode(kMode_MRR),
+ g.DefineAsRegister(node), g.UseRegister(base), g.UseRegister(offset));
+ }
+}
+
+
+void InstructionSelector::VisitStore(Node* node) {
+ PPCOperandGenerator g(this);
+ Node* base = node->InputAt(0);
+ Node* offset = node->InputAt(1);
+ Node* value = node->InputAt(2);
+
+ StoreRepresentation store_rep = OpParameter<StoreRepresentation>(node);
+ MachineType rep = RepresentationOf(store_rep.machine_type());
+ if (store_rep.write_barrier_kind() == kFullWriteBarrier) {
+ DCHECK(rep == kRepTagged);
+ // TODO(dcarney): refactor RecordWrite function to take temp registers
+ // and pass them here instead of using fixed regs
+ // TODO(dcarney): handle immediate indices.
+ InstructionOperand temps[] = {g.TempRegister(r8), g.TempRegister(r9)};
+ Emit(kPPC_StoreWriteBarrier, g.NoOutput(), g.UseFixed(base, r7),
+ g.UseFixed(offset, r8), g.UseFixed(value, r9), arraysize(temps),
+ temps);
+ return;
+ }
+ DCHECK_EQ(kNoWriteBarrier, store_rep.write_barrier_kind());
+ ArchOpcode opcode;
+ ImmediateMode mode = kInt16Imm;
+ switch (rep) {
+ case kRepFloat32:
+ opcode = kPPC_StoreFloat32;
+ break;
+ case kRepFloat64:
+ opcode = kPPC_StoreFloat64;
+ break;
+ case kRepBit: // Fall through.
+ case kRepWord8:
+ opcode = kPPC_StoreWord8;
+ break;
+ case kRepWord16:
+ opcode = kPPC_StoreWord16;
+ break;
+#if !V8_TARGET_ARCH_PPC64
+ case kRepTagged: // Fall through.
+#endif
+ case kRepWord32:
+ opcode = kPPC_StoreWord32;
+ break;
+#if V8_TARGET_ARCH_PPC64
+ case kRepTagged: // Fall through.
+ case kRepWord64:
+ opcode = kPPC_StoreWord64;
+ mode = kInt16Imm_4ByteAligned;
+ break;
+#endif
+ default:
+ UNREACHABLE();
+ return;
+ }
+ if (g.CanBeImmediate(offset, mode)) {
+ Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
+ g.UseRegister(base), g.UseImmediate(offset), g.UseRegister(value));
+ } else if (g.CanBeImmediate(base, mode)) {
+ Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
+ g.UseRegister(offset), g.UseImmediate(base), g.UseRegister(value));
+ } else {
+ Emit(opcode | AddressingModeField::encode(kMode_MRR), g.NoOutput(),
+ g.UseRegister(base), g.UseRegister(offset), g.UseRegister(value));
+ }
+}
+
+
+void InstructionSelector::VisitCheckedLoad(Node* node) {
+ MachineType rep = RepresentationOf(OpParameter<MachineType>(node));
+ MachineType typ = TypeOf(OpParameter<MachineType>(node));
+ PPCOperandGenerator g(this);
+ Node* const base = node->InputAt(0);
+ Node* const offset = node->InputAt(1);
+ Node* const length = node->InputAt(2);
+ ArchOpcode opcode;
+ switch (rep) {
+ case kRepWord8:
+ opcode = typ == kTypeInt32 ? kCheckedLoadInt8 : kCheckedLoadUint8;
+ break;
+ case kRepWord16:
+ opcode = typ == kTypeInt32 ? kCheckedLoadInt16 : kCheckedLoadUint16;
+ break;
+ case kRepWord32:
+ opcode = kCheckedLoadWord32;
+ break;
+ case kRepFloat32:
+ opcode = kCheckedLoadFloat32;
+ break;
+ case kRepFloat64:
+ opcode = kCheckedLoadFloat64;
+ break;
+ default:
+ UNREACHABLE();
+ return;
+ }
+ AddressingMode addressingMode = kMode_MRR;
+ Emit(opcode | AddressingModeField::encode(addressingMode),
+ g.DefineAsRegister(node), g.UseRegister(base), g.UseRegister(offset),
+ g.UseOperand(length, kInt16Imm_Unsigned));
+}
+
+
+void InstructionSelector::VisitCheckedStore(Node* node) {
+ MachineType rep = RepresentationOf(OpParameter<MachineType>(node));
+ PPCOperandGenerator g(this);
+ Node* const base = node->InputAt(0);
+ Node* const offset = node->InputAt(1);
+ Node* const length = node->InputAt(2);
+ Node* const value = node->InputAt(3);
+ ArchOpcode opcode;
+ switch (rep) {
+ case kRepWord8:
+ opcode = kCheckedStoreWord8;
+ break;
+ case kRepWord16:
+ opcode = kCheckedStoreWord16;
+ break;
+ case kRepWord32:
+ opcode = kCheckedStoreWord32;
+ break;
+ case kRepFloat32:
+ opcode = kCheckedStoreFloat32;
+ break;
+ case kRepFloat64:
+ opcode = kCheckedStoreFloat64;
+ break;
+ default:
+ UNREACHABLE();
+ return;
+ }
+ AddressingMode addressingMode = kMode_MRR;
+ Emit(opcode | AddressingModeField::encode(addressingMode), g.NoOutput(),
+ g.UseRegister(base), g.UseRegister(offset),
+ g.UseOperand(length, kInt16Imm_Unsigned), g.UseRegister(value));
+}
+
+
+template <typename Matcher>
+static void VisitLogical(InstructionSelector* selector, Node* node, Matcher* m,
+ ArchOpcode opcode, bool left_can_cover,
+ bool right_can_cover, ImmediateMode imm_mode) {
+ PPCOperandGenerator g(selector);
+
+ // Map instruction to equivalent operation with inverted right input.
+ ArchOpcode inv_opcode = opcode;
+ switch (opcode) {
+ case kPPC_And32:
+ inv_opcode = kPPC_AndComplement32;
+ break;
+ case kPPC_And64:
+ inv_opcode = kPPC_AndComplement64;
+ break;
+ case kPPC_Or32:
+ inv_opcode = kPPC_OrComplement32;
+ break;
+ case kPPC_Or64:
+ inv_opcode = kPPC_OrComplement64;
+ break;
+ default:
+ UNREACHABLE();
+ }
+
+ // Select Logical(y, ~x) for Logical(Xor(x, -1), y).
+ if ((m->left().IsWord32Xor() || m->left().IsWord64Xor()) && left_can_cover) {
+ Matcher mleft(m->left().node());
+ if (mleft.right().Is(-1)) {
+ selector->Emit(inv_opcode, g.DefineAsRegister(node),
+ g.UseRegister(m->right().node()),
+ g.UseRegister(mleft.left().node()));
+ return;
+ }
+ }
+
+ // Select Logical(x, ~y) for Logical(x, Xor(y, -1)).
+ if ((m->right().IsWord32Xor() || m->right().IsWord64Xor()) &&
+ right_can_cover) {
+ Matcher mright(m->right().node());
+ if (mright.right().Is(-1)) {
+ // TODO(all): support shifted operand on right.
+ selector->Emit(inv_opcode, g.DefineAsRegister(node),
+ g.UseRegister(m->left().node()),
+ g.UseRegister(mright.left().node()));
+ return;
+ }
+ }
+
+ VisitBinop<Matcher>(selector, node, opcode, imm_mode);
+}
+
+
+static inline bool IsContiguousMask32(uint32_t value, int* mb, int* me) {
+ int mask_width = base::bits::CountPopulation32(value);
+ int mask_msb = base::bits::CountLeadingZeros32(value);
+ int mask_lsb = base::bits::CountTrailingZeros32(value);
+ if ((mask_width == 0) || (mask_msb + mask_width + mask_lsb != 32))
+ return false;
+ *mb = mask_lsb + mask_width - 1;
+ *me = mask_lsb;
+ return true;
+}
+
+
+#if V8_TARGET_ARCH_PPC64
+static inline bool IsContiguousMask64(uint64_t value, int* mb, int* me) {
+ int mask_width = base::bits::CountPopulation64(value);
+ int mask_msb = base::bits::CountLeadingZeros64(value);
+ int mask_lsb = base::bits::CountTrailingZeros64(value);
+ if ((mask_width == 0) || (mask_msb + mask_width + mask_lsb != 64))
+ return false;
+ *mb = mask_lsb + mask_width - 1;
+ *me = mask_lsb;
+ return true;
+}
+#endif
+
+
+// TODO(mbrandy): Absorb rotate-right into rlwinm?
+void InstructionSelector::VisitWord32And(Node* node) {
+ PPCOperandGenerator g(this);
+ Int32BinopMatcher m(node);
+ int mb;
+ int me;
+ if (m.right().HasValue() && IsContiguousMask32(m.right().Value(), &mb, &me)) {
+ int sh = 0;
+ Node* left = m.left().node();
+ if ((m.left().IsWord32Shr() || m.left().IsWord32Shl()) &&
+ CanCover(node, left)) {
+ // Try to absorb left/right shift into rlwinm
+ Int32BinopMatcher mleft(m.left().node());
+ if (mleft.right().IsInRange(0, 31)) {
+ left = mleft.left().node();
+ sh = mleft.right().Value();
+ if (m.left().IsWord32Shr()) {
+ // Adjust the mask such that it doesn't include any rotated bits.
+ if (mb > 31 - sh) mb = 31 - sh;
+ sh = (32 - sh) & 0x1f;
+ } else {
+ // Adjust the mask such that it doesn't include any rotated bits.
+ if (me < sh) me = sh;
+ }
+ }
+ }
+ if (mb >= me) {
+ Emit(kPPC_RotLeftAndMask32, g.DefineAsRegister(node), g.UseRegister(left),
+ g.TempImmediate(sh), g.TempImmediate(mb), g.TempImmediate(me));
+ return;
+ }
+ }
+ VisitLogical<Int32BinopMatcher>(
+ this, node, &m, kPPC_And32, CanCover(node, m.left().node()),
+ CanCover(node, m.right().node()), kInt16Imm_Unsigned);
+}
+
+
+#if V8_TARGET_ARCH_PPC64
+// TODO(mbrandy): Absorb rotate-right into rldic?
+void InstructionSelector::VisitWord64And(Node* node) {
+ PPCOperandGenerator g(this);
+ Int64BinopMatcher m(node);
+ int mb;
+ int me;
+ if (m.right().HasValue() && IsContiguousMask64(m.right().Value(), &mb, &me)) {
+ int sh = 0;
+ Node* left = m.left().node();
+ if ((m.left().IsWord64Shr() || m.left().IsWord64Shl()) &&
+ CanCover(node, left)) {
+ // Try to absorb left/right shift into rldic
+ Int64BinopMatcher mleft(m.left().node());
+ if (mleft.right().IsInRange(0, 63)) {
+ left = mleft.left().node();
+ sh = mleft.right().Value();
+ if (m.left().IsWord64Shr()) {
+ // Adjust the mask such that it doesn't include any rotated bits.
+ if (mb > 63 - sh) mb = 63 - sh;
+ sh = (64 - sh) & 0x3f;
+ } else {
+ // Adjust the mask such that it doesn't include any rotated bits.
+ if (me < sh) me = sh;
+ }
+ }
+ }
+ if (mb >= me) {
+ bool match = false;
+ ArchOpcode opcode;
+ int mask;
+ if (me == 0) {
+ match = true;
+ opcode = kPPC_RotLeftAndClearLeft64;
+ mask = mb;
+ } else if (mb == 63) {
+ match = true;
+ opcode = kPPC_RotLeftAndClearRight64;
+ mask = me;
+ } else if (sh && me <= sh && m.left().IsWord64Shl()) {
+ match = true;
+ opcode = kPPC_RotLeftAndClear64;
+ mask = mb;
+ }
+ if (match) {
+ Emit(opcode, g.DefineAsRegister(node), g.UseRegister(left),
+ g.TempImmediate(sh), g.TempImmediate(mask));
+ return;
+ }
+ }
+ }
+ VisitLogical<Int64BinopMatcher>(
+ this, node, &m, kPPC_And64, CanCover(node, m.left().node()),
+ CanCover(node, m.right().node()), kInt16Imm_Unsigned);
+}
+#endif
+
+
+void InstructionSelector::VisitWord32Or(Node* node) {
+ Int32BinopMatcher m(node);
+ VisitLogical<Int32BinopMatcher>(
+ this, node, &m, kPPC_Or32, CanCover(node, m.left().node()),
+ CanCover(node, m.right().node()), kInt16Imm_Unsigned);
+}
+
+
+#if V8_TARGET_ARCH_PPC64
+void InstructionSelector::VisitWord64Or(Node* node) {
+ Int64BinopMatcher m(node);
+ VisitLogical<Int64BinopMatcher>(
+ this, node, &m, kPPC_Or64, CanCover(node, m.left().node()),
+ CanCover(node, m.right().node()), kInt16Imm_Unsigned);
+}
+#endif
+
+
+void InstructionSelector::VisitWord32Xor(Node* node) {
+ PPCOperandGenerator g(this);
+ Int32BinopMatcher m(node);
+ if (m.right().Is(-1)) {
+ Emit(kPPC_Not32, g.DefineAsRegister(node), g.UseRegister(m.left().node()));
+ } else {
+ VisitBinop<Int32BinopMatcher>(this, node, kPPC_Xor32, kInt16Imm_Unsigned);
+ }
+}
+
+
+#if V8_TARGET_ARCH_PPC64
+void InstructionSelector::VisitWord64Xor(Node* node) {
+ PPCOperandGenerator g(this);
+ Int64BinopMatcher m(node);
+ if (m.right().Is(-1)) {
+ Emit(kPPC_Not64, g.DefineAsRegister(node), g.UseRegister(m.left().node()));
+ } else {
+ VisitBinop<Int64BinopMatcher>(this, node, kPPC_Xor64, kInt16Imm_Unsigned);
+ }
+}
+#endif
+
+
+void InstructionSelector::VisitWord32Shl(Node* node) {
+ PPCOperandGenerator g(this);
+ Int32BinopMatcher m(node);
+ if (m.left().IsWord32And() && m.right().IsInRange(0, 31)) {
+ // Try to absorb logical-and into rlwinm
+ Int32BinopMatcher mleft(m.left().node());
+ int sh = m.right().Value();
+ int mb;
+ int me;
+ if (mleft.right().HasValue() &&
+ IsContiguousMask32(mleft.right().Value() << sh, &mb, &me)) {
+ // Adjust the mask such that it doesn't include any rotated bits.
+ if (me < sh) me = sh;
+ if (mb >= me) {
+ Emit(kPPC_RotLeftAndMask32, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()), g.TempImmediate(sh),
+ g.TempImmediate(mb), g.TempImmediate(me));
+ return;
+ }
+ }
+ }
+ VisitRRO(this, node, kPPC_ShiftLeft32, kShift32Imm);
+}
+
+
+#if V8_TARGET_ARCH_PPC64
+void InstructionSelector::VisitWord64Shl(Node* node) {
+ PPCOperandGenerator g(this);
+ Int64BinopMatcher m(node);
+ // TODO(mbrandy): eliminate left sign extension if right >= 32
+ if (m.left().IsWord64And() && m.right().IsInRange(0, 63)) {
+ // Try to absorb logical-and into rldic
+ Int64BinopMatcher mleft(m.left().node());
+ int sh = m.right().Value();
+ int mb;
+ int me;
+ if (mleft.right().HasValue() &&
+ IsContiguousMask64(mleft.right().Value() << sh, &mb, &me)) {
+ // Adjust the mask such that it doesn't include any rotated bits.
+ if (me < sh) me = sh;
+ if (mb >= me) {
+ bool match = false;
+ ArchOpcode opcode;
+ int mask;
+ if (me == 0) {
+ match = true;
+ opcode = kPPC_RotLeftAndClearLeft64;
+ mask = mb;
+ } else if (mb == 63) {
+ match = true;
+ opcode = kPPC_RotLeftAndClearRight64;
+ mask = me;
+ } else if (sh && me <= sh) {
+ match = true;
+ opcode = kPPC_RotLeftAndClear64;
+ mask = mb;
+ }
+ if (match) {
+ Emit(opcode, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()), g.TempImmediate(sh),
+ g.TempImmediate(mask));
+ return;
+ }
+ }
+ }
+ }
+ VisitRRO(this, node, kPPC_ShiftLeft64, kShift64Imm);
+}
+#endif
+
+
+void InstructionSelector::VisitWord32Shr(Node* node) {
+ PPCOperandGenerator g(this);
+ Int32BinopMatcher m(node);
+ if (m.left().IsWord32And() && m.right().IsInRange(0, 31)) {
+ // Try to absorb logical-and into rlwinm
+ Int32BinopMatcher mleft(m.left().node());
+ int sh = m.right().Value();
+ int mb;
+ int me;
+ if (mleft.right().HasValue() &&
+ IsContiguousMask32((uint32_t)(mleft.right().Value()) >> sh, &mb, &me)) {
+ // Adjust the mask such that it doesn't include any rotated bits.
+ if (mb > 31 - sh) mb = 31 - sh;
+ sh = (32 - sh) & 0x1f;
+ if (mb >= me) {
+ Emit(kPPC_RotLeftAndMask32, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()), g.TempImmediate(sh),
+ g.TempImmediate(mb), g.TempImmediate(me));
+ return;
+ }
+ }
+ }
+ VisitRRO(this, node, kPPC_ShiftRight32, kShift32Imm);
+}
+
+
+#if V8_TARGET_ARCH_PPC64
+void InstructionSelector::VisitWord64Shr(Node* node) {
+ PPCOperandGenerator g(this);
+ Int64BinopMatcher m(node);
+ if (m.left().IsWord64And() && m.right().IsInRange(0, 63)) {
+ // Try to absorb logical-and into rldic
+ Int64BinopMatcher mleft(m.left().node());
+ int sh = m.right().Value();
+ int mb;
+ int me;
+ if (mleft.right().HasValue() &&
+ IsContiguousMask64((uint64_t)(mleft.right().Value()) >> sh, &mb, &me)) {
+ // Adjust the mask such that it doesn't include any rotated bits.
+ if (mb > 63 - sh) mb = 63 - sh;
+ sh = (64 - sh) & 0x3f;
+ if (mb >= me) {
+ bool match = false;
+ ArchOpcode opcode;
+ int mask;
+ if (me == 0) {
+ match = true;
+ opcode = kPPC_RotLeftAndClearLeft64;
+ mask = mb;
+ } else if (mb == 63) {
+ match = true;
+ opcode = kPPC_RotLeftAndClearRight64;
+ mask = me;
+ }
+ if (match) {
+ Emit(opcode, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()), g.TempImmediate(sh),
+ g.TempImmediate(mask));
+ return;
+ }
+ }
+ }
+ }
+ VisitRRO(this, node, kPPC_ShiftRight64, kShift64Imm);
+}
+#endif
+
+
+void InstructionSelector::VisitWord32Sar(Node* node) {
+ PPCOperandGenerator g(this);
+ Int32BinopMatcher m(node);
+ // Replace with sign extension for (x << K) >> K where K is 16 or 24.
+ if (CanCover(node, m.left().node()) && m.left().IsWord32Shl()) {
+ Int32BinopMatcher mleft(m.left().node());
+ if (mleft.right().Is(16) && m.right().Is(16)) {
+ Emit(kPPC_ExtendSignWord16, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()));
+ return;
+ } else if (mleft.right().Is(24) && m.right().Is(24)) {
+ Emit(kPPC_ExtendSignWord8, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()));
+ return;
+ }
+ }
+ VisitRRO(this, node, kPPC_ShiftRightAlg32, kShift32Imm);
+}
+
+
+#if V8_TARGET_ARCH_PPC64
+void InstructionSelector::VisitWord64Sar(Node* node) {
+ VisitRRO(this, node, kPPC_ShiftRightAlg64, kShift64Imm);
+}
+#endif
+
+
+// TODO(mbrandy): Absorb logical-and into rlwinm?
+void InstructionSelector::VisitWord32Ror(Node* node) {
+ VisitRRO(this, node, kPPC_RotRight32, kShift32Imm);
+}
+
+
+#if V8_TARGET_ARCH_PPC64
+// TODO(mbrandy): Absorb logical-and into rldic?
+void InstructionSelector::VisitWord64Ror(Node* node) {
+ VisitRRO(this, node, kPPC_RotRight64, kShift64Imm);
+}
+#endif
+
+
+void InstructionSelector::VisitInt32Add(Node* node) {
+ VisitBinop<Int32BinopMatcher>(this, node, kPPC_Add32, kInt16Imm);
+}
+
+
+#if V8_TARGET_ARCH_PPC64
+void InstructionSelector::VisitInt64Add(Node* node) {
+ VisitBinop<Int64BinopMatcher>(this, node, kPPC_Add64, kInt16Imm);
+}
+#endif
+
+
+void InstructionSelector::VisitInt32Sub(Node* node) {
+ PPCOperandGenerator g(this);
+ Int32BinopMatcher m(node);
+ if (m.left().Is(0)) {
+ Emit(kPPC_Neg32, g.DefineAsRegister(node), g.UseRegister(m.right().node()));
+ } else {
+ VisitBinop<Int32BinopMatcher>(this, node, kPPC_Sub32, kInt16Imm_Negate);
+ }
+}
+
+
+#if V8_TARGET_ARCH_PPC64
+void InstructionSelector::VisitInt64Sub(Node* node) {
+ PPCOperandGenerator g(this);
+ Int64BinopMatcher m(node);
+ if (m.left().Is(0)) {
+ Emit(kPPC_Neg64, g.DefineAsRegister(node), g.UseRegister(m.right().node()));
+ } else {
+ VisitBinop<Int64BinopMatcher>(this, node, kPPC_Sub64, kInt16Imm_Negate);
+ }
+}
+#endif
+
+
+void InstructionSelector::VisitInt32Mul(Node* node) {
+ VisitRRR(this, node, kPPC_Mul32);
+}
+
+
+#if V8_TARGET_ARCH_PPC64
+void InstructionSelector::VisitInt64Mul(Node* node) {
+ VisitRRR(this, node, kPPC_Mul64);
+}
+#endif
+
+
+void InstructionSelector::VisitInt32MulHigh(Node* node) {
+ PPCOperandGenerator g(this);
+ Emit(kPPC_MulHigh32, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
+}
+
+
+void InstructionSelector::VisitUint32MulHigh(Node* node) {
+ PPCOperandGenerator g(this);
+ Emit(kPPC_MulHighU32, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
+}
+
+
+void InstructionSelector::VisitInt32Div(Node* node) {
+ VisitRRR(this, node, kPPC_Div32);
+}
+
+
+#if V8_TARGET_ARCH_PPC64
+void InstructionSelector::VisitInt64Div(Node* node) {
+ VisitRRR(this, node, kPPC_Div64);
+}
+#endif
+
+
+void InstructionSelector::VisitUint32Div(Node* node) {
+ VisitRRR(this, node, kPPC_DivU32);
+}
+
+
+#if V8_TARGET_ARCH_PPC64
+void InstructionSelector::VisitUint64Div(Node* node) {
+ VisitRRR(this, node, kPPC_DivU64);
+}
+#endif
+
+
+void InstructionSelector::VisitInt32Mod(Node* node) {
+ VisitRRR(this, node, kPPC_Mod32);
+}
+
+
+#if V8_TARGET_ARCH_PPC64
+void InstructionSelector::VisitInt64Mod(Node* node) {
+ VisitRRR(this, node, kPPC_Mod64);
+}
+#endif
+
+
+void InstructionSelector::VisitUint32Mod(Node* node) {
+ VisitRRR(this, node, kPPC_ModU32);
+}
+
+
+#if V8_TARGET_ARCH_PPC64
+void InstructionSelector::VisitUint64Mod(Node* node) {
+ VisitRRR(this, node, kPPC_ModU64);
+}
+#endif
+
+
+void InstructionSelector::VisitChangeFloat32ToFloat64(Node* node) {
+ PPCOperandGenerator g(this);
+ Emit(kPPC_Float32ToFloat64, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) {
+ PPCOperandGenerator g(this);
+ Emit(kPPC_Int32ToFloat64, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitChangeUint32ToFloat64(Node* node) {
+ PPCOperandGenerator g(this);
+ Emit(kPPC_Uint32ToFloat64, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) {
+ PPCOperandGenerator g(this);
+ Emit(kPPC_Float64ToInt32, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) {
+ PPCOperandGenerator g(this);
+ Emit(kPPC_Float64ToUint32, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)));
+}
+
+
+#if V8_TARGET_ARCH_PPC64
+void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
+ // TODO(mbrandy): inspect input to see if nop is appropriate.
+ PPCOperandGenerator g(this);
+ Emit(kPPC_ExtendSignWord32, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitChangeUint32ToUint64(Node* node) {
+ // TODO(mbrandy): inspect input to see if nop is appropriate.
+ PPCOperandGenerator g(this);
+ Emit(kPPC_Uint32ToUint64, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)));
+}
+#endif
+
+
+void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
+ PPCOperandGenerator g(this);
+ Emit(kPPC_Float64ToFloat32, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)));
+}
+
+
+#if V8_TARGET_ARCH_PPC64
+void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
+ PPCOperandGenerator g(this);
+ // TODO(mbrandy): inspect input to see if nop is appropriate.
+ Emit(kPPC_Int64ToInt32, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)));
+}
+#endif
+
+
+void InstructionSelector::VisitFloat64Add(Node* node) {
+ // TODO(mbrandy): detect multiply-add
+ VisitRRRFloat64(this, node, kPPC_AddFloat64);
+}
+
+
+void InstructionSelector::VisitFloat64Sub(Node* node) {
+ // TODO(mbrandy): detect multiply-subtract
+ VisitRRRFloat64(this, node, kPPC_SubFloat64);
+}
+
+
+void InstructionSelector::VisitFloat64Mul(Node* node) {
+ // TODO(mbrandy): detect negate
+ VisitRRRFloat64(this, node, kPPC_MulFloat64);
+}
+
+
+void InstructionSelector::VisitFloat64Div(Node* node) {
+ VisitRRRFloat64(this, node, kPPC_DivFloat64);
+}
+
+
+void InstructionSelector::VisitFloat64Mod(Node* node) {
+ PPCOperandGenerator g(this);
+ Emit(kPPC_ModFloat64, g.DefineAsFixed(node, d1),
+ g.UseFixed(node->InputAt(0), d1),
+ g.UseFixed(node->InputAt(1), d2))->MarkAsCall();
+}
+
+
+void InstructionSelector::VisitFloat64Sqrt(Node* node) {
+ VisitRRFloat64(this, kPPC_SqrtFloat64, node);
+}
+
+
+void InstructionSelector::VisitFloat64Floor(Node* node) {
+ VisitRRFloat64(this, kPPC_FloorFloat64, node);
+}
+
+
+void InstructionSelector::VisitFloat64Ceil(Node* node) {
+ VisitRRFloat64(this, kPPC_CeilFloat64, node);
+}
+
+
+void InstructionSelector::VisitFloat64RoundTruncate(Node* node) {
+ VisitRRFloat64(this, kPPC_TruncateFloat64, node);
+}
+
+
+void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
+ VisitRRFloat64(this, kPPC_RoundFloat64, node);
+}
+
+
+void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
+ if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
+ FlagsContinuation cont(kOverflow, ovf);
+ return VisitBinop<Int32BinopMatcher>(this, node, kPPC_AddWithOverflow32,
+ kInt16Imm, &cont);
+ }
+ FlagsContinuation cont;
+ VisitBinop<Int32BinopMatcher>(this, node, kPPC_AddWithOverflow32, kInt16Imm,
+ &cont);
+}
+
+
+void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
+ if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
+ FlagsContinuation cont(kOverflow, ovf);
+ return VisitBinop<Int32BinopMatcher>(this, node, kPPC_SubWithOverflow32,
+ kInt16Imm_Negate, &cont);
+ }
+ FlagsContinuation cont;
+ VisitBinop<Int32BinopMatcher>(this, node, kPPC_SubWithOverflow32,
+ kInt16Imm_Negate, &cont);
+}
+
+
+static bool CompareLogical(FlagsContinuation* cont) {
+ switch (cont->condition()) {
+ case kUnsignedLessThan:
+ case kUnsignedGreaterThanOrEqual:
+ case kUnsignedLessThanOrEqual:
+ case kUnsignedGreaterThan:
+ return true;
+ default:
+ return false;
+ }
+ UNREACHABLE();
+ return false;
+}
+
+
+// Shared routine for multiple compare operations.
+static void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
+ InstructionOperand left, InstructionOperand right,
+ FlagsContinuation* cont) {
+ PPCOperandGenerator g(selector);
+ opcode = cont->Encode(opcode);
+ if (cont->IsBranch()) {
+ selector->Emit(opcode, g.NoOutput(), left, right,
+ g.Label(cont->true_block()),
+ g.Label(cont->false_block()))->MarkAsControl();
+ } else {
+ DCHECK(cont->IsSet());
+ selector->Emit(opcode, g.DefineAsRegister(cont->result()), left, right);
+ }
+}
+
+
+// Shared routine for multiple word compare operations.
+static void VisitWordCompare(InstructionSelector* selector, Node* node,
+ InstructionCode opcode, FlagsContinuation* cont,
+ bool commutative, ImmediateMode immediate_mode) {
+ PPCOperandGenerator g(selector);
+ Node* left = node->InputAt(0);
+ Node* right = node->InputAt(1);
+
+ // Match immediates on left or right side of comparison.
+ if (g.CanBeImmediate(right, immediate_mode)) {
+ VisitCompare(selector, opcode, g.UseRegister(left), g.UseImmediate(right),
+ cont);
+ } else if (g.CanBeImmediate(left, immediate_mode)) {
+ if (!commutative) cont->Commute();
+ VisitCompare(selector, opcode, g.UseRegister(right), g.UseImmediate(left),
+ cont);
+ } else {
+ VisitCompare(selector, opcode, g.UseRegister(left), g.UseRegister(right),
+ cont);
+ }
+}
+
+
+static void VisitWord32Compare(InstructionSelector* selector, Node* node,
+ FlagsContinuation* cont) {
+ ImmediateMode mode = (CompareLogical(cont) ? kInt16Imm_Unsigned : kInt16Imm);
+ VisitWordCompare(selector, node, kPPC_Cmp32, cont, false, mode);
+}
+
+
+#if V8_TARGET_ARCH_PPC64
+static void VisitWord64Compare(InstructionSelector* selector, Node* node,
+ FlagsContinuation* cont) {
+ ImmediateMode mode = (CompareLogical(cont) ? kInt16Imm_Unsigned : kInt16Imm);
+ VisitWordCompare(selector, node, kPPC_Cmp64, cont, false, mode);
+}
+#endif
+
+
+// Shared routine for multiple float compare operations.
+static void VisitFloat64Compare(InstructionSelector* selector, Node* node,
+ FlagsContinuation* cont) {
+ PPCOperandGenerator g(selector);
+ Node* left = node->InputAt(0);
+ Node* right = node->InputAt(1);
+ VisitCompare(selector, kPPC_CmpFloat64, g.UseRegister(left),
+ g.UseRegister(right), cont);
+}
+
+
+// Shared routine for word comparisons against zero.
+static void VisitWordCompareZero(InstructionSelector* selector, Node* user,
+ Node* value, InstructionCode opcode,
+ FlagsContinuation* cont) {
+ while (selector->CanCover(user, value)) {
+ switch (value->opcode()) {
+ case IrOpcode::kWord32Equal: {
+ // Combine with comparisons against 0 by simply inverting the
+ // continuation.
+ Int32BinopMatcher m(value);
+ if (m.right().Is(0)) {
+ user = value;
+ value = m.left().node();
+ cont->Negate();
+ continue;
+ }
+ cont->OverwriteAndNegateIfEqual(kEqual);
+ return VisitWord32Compare(selector, value, cont);
+ }
+ case IrOpcode::kInt32LessThan:
+ cont->OverwriteAndNegateIfEqual(kSignedLessThan);
+ return VisitWord32Compare(selector, value, cont);
+ case IrOpcode::kInt32LessThanOrEqual:
+ cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
+ return VisitWord32Compare(selector, value, cont);
+ case IrOpcode::kUint32LessThan:
+ cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
+ return VisitWord32Compare(selector, value, cont);
+ case IrOpcode::kUint32LessThanOrEqual:
+ cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
+ return VisitWord32Compare(selector, value, cont);
+#if V8_TARGET_ARCH_PPC64
+ case IrOpcode::kWord64Equal: {
+ // Combine with comparisons against 0 by simply inverting the
+ // continuation.
+ Int64BinopMatcher m(value);
+ if (m.right().Is(0)) {
+ user = value;
+ value = m.left().node();
+ cont->Negate();
+ continue;
+ }
+ cont->OverwriteAndNegateIfEqual(kEqual);
+ return VisitWord64Compare(selector, value, cont);
+ }
+ case IrOpcode::kInt64LessThan:
+ cont->OverwriteAndNegateIfEqual(kSignedLessThan);
+ return VisitWord64Compare(selector, value, cont);
+ case IrOpcode::kInt64LessThanOrEqual:
+ cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
+ return VisitWord64Compare(selector, value, cont);
+ case IrOpcode::kUint64LessThan:
+ cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
+ return VisitWord64Compare(selector, value, cont);
+#endif
+ case IrOpcode::kFloat64Equal:
+ cont->OverwriteAndNegateIfEqual(kEqual);
+ return VisitFloat64Compare(selector, value, cont);
+ case IrOpcode::kFloat64LessThan:
+ cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
+ return VisitFloat64Compare(selector, value, cont);
+ case IrOpcode::kFloat64LessThanOrEqual:
+ cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
+ return VisitFloat64Compare(selector, value, cont);
+ case IrOpcode::kProjection:
+ // Check if this is the overflow output projection of an
+ // <Operation>WithOverflow node.
+ if (ProjectionIndexOf(value->op()) == 1u) {
+ // We cannot combine the <Operation>WithOverflow with this branch
+ // unless the 0th projection (the use of the actual value of the
+ // <Operation> is either NULL, which means there's no use of the
+ // actual value, or was already defined, which means it is scheduled
+ // *AFTER* this branch).
+ Node* const node = value->InputAt(0);
+ Node* const result = NodeProperties::FindProjection(node, 0);
+ if (result == NULL || selector->IsDefined(result)) {
+ switch (node->opcode()) {
+ case IrOpcode::kInt32AddWithOverflow:
+ cont->OverwriteAndNegateIfEqual(kOverflow);
+ return VisitBinop<Int32BinopMatcher>(
+ selector, node, kPPC_AddWithOverflow32, kInt16Imm, cont);
+ case IrOpcode::kInt32SubWithOverflow:
+ cont->OverwriteAndNegateIfEqual(kOverflow);
+ return VisitBinop<Int32BinopMatcher>(selector, node,
+ kPPC_SubWithOverflow32,
+ kInt16Imm_Negate, cont);
+ default:
+ break;
+ }
+ }
+ }
+ break;
+ case IrOpcode::kInt32Sub:
+ return VisitWord32Compare(selector, value, cont);
+ case IrOpcode::kWord32And:
+ // TODO(mbandy): opportunity for rlwinm?
+ return VisitWordCompare(selector, value, kPPC_Tst32, cont, true,
+ kInt16Imm_Unsigned);
+// TODO(mbrandy): Handle?
+// case IrOpcode::kInt32Add:
+// case IrOpcode::kWord32Or:
+// case IrOpcode::kWord32Xor:
+// case IrOpcode::kWord32Sar:
+// case IrOpcode::kWord32Shl:
+// case IrOpcode::kWord32Shr:
+// case IrOpcode::kWord32Ror:
+#if V8_TARGET_ARCH_PPC64
+ case IrOpcode::kInt64Sub:
+ return VisitWord64Compare(selector, value, cont);
+ case IrOpcode::kWord64And:
+ // TODO(mbandy): opportunity for rldic?
+ return VisitWordCompare(selector, value, kPPC_Tst64, cont, true,
+ kInt16Imm_Unsigned);
+// TODO(mbrandy): Handle?
+// case IrOpcode::kInt64Add:
+// case IrOpcode::kWord64Or:
+// case IrOpcode::kWord64Xor:
+// case IrOpcode::kWord64Sar:
+// case IrOpcode::kWord64Shl:
+// case IrOpcode::kWord64Shr:
+// case IrOpcode::kWord64Ror:
+#endif
+ default:
+ break;
+ }
+ break;
+ }
+
+ // Branch could not be combined with a compare, emit compare against 0.
+ PPCOperandGenerator g(selector);
+ VisitCompare(selector, opcode, g.UseRegister(value), g.TempImmediate(0),
+ cont);
+}
+
+
+static void VisitWord32CompareZero(InstructionSelector* selector, Node* user,
+ Node* value, FlagsContinuation* cont) {
+ VisitWordCompareZero(selector, user, value, kPPC_Cmp32, cont);
+}
+
+
+#if V8_TARGET_ARCH_PPC64
+static void VisitWord64CompareZero(InstructionSelector* selector, Node* user,
+ Node* value, FlagsContinuation* cont) {
+ VisitWordCompareZero(selector, user, value, kPPC_Cmp64, cont);
+}
+#endif
+
+
+void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
+ BasicBlock* fbranch) {
+ FlagsContinuation cont(kNotEqual, tbranch, fbranch);
+ VisitWord32CompareZero(this, branch, branch->InputAt(0), &cont);
+}
+
+
+void InstructionSelector::VisitWord32Equal(Node* const node) {
+ FlagsContinuation cont(kEqual, node);
+ Int32BinopMatcher m(node);
+ if (m.right().Is(0)) {
+ return VisitWord32CompareZero(this, m.node(), m.left().node(), &cont);
+ }
+ VisitWord32Compare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitInt32LessThan(Node* node) {
+ FlagsContinuation cont(kSignedLessThan, node);
+ VisitWord32Compare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitInt32LessThanOrEqual(Node* node) {
+ FlagsContinuation cont(kSignedLessThanOrEqual, node);
+ VisitWord32Compare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitUint32LessThan(Node* node) {
+ FlagsContinuation cont(kUnsignedLessThan, node);
+ VisitWord32Compare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitUint32LessThanOrEqual(Node* node) {
+ FlagsContinuation cont(kUnsignedLessThanOrEqual, node);
+ VisitWord32Compare(this, node, &cont);
+}
+
+
+#if V8_TARGET_ARCH_PPC64
+void InstructionSelector::VisitWord64Equal(Node* const node) {
+ FlagsContinuation cont(kEqual, node);
+ Int64BinopMatcher m(node);
+ if (m.right().Is(0)) {
+ return VisitWord64CompareZero(this, m.node(), m.left().node(), &cont);
+ }
+ VisitWord64Compare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitInt64LessThan(Node* node) {
+ FlagsContinuation cont(kSignedLessThan, node);
+ VisitWord64Compare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitInt64LessThanOrEqual(Node* node) {
+ FlagsContinuation cont(kSignedLessThanOrEqual, node);
+ VisitWord64Compare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitUint64LessThan(Node* node) {
+ FlagsContinuation cont(kUnsignedLessThan, node);
+ VisitWord64Compare(this, node, &cont);
+}
+#endif
+
+
+void InstructionSelector::VisitFloat64Equal(Node* node) {
+ FlagsContinuation cont(kEqual, node);
+ VisitFloat64Compare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitFloat64LessThan(Node* node) {
+ FlagsContinuation cont(kUnsignedLessThan, node);
+ VisitFloat64Compare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
+ FlagsContinuation cont(kUnsignedLessThanOrEqual, node);
+ VisitFloat64Compare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitCall(Node* node) {
+ PPCOperandGenerator g(this);
+ const CallDescriptor* descriptor = OpParameter<CallDescriptor*>(node);
+
+ FrameStateDescriptor* frame_state_descriptor = NULL;
+ if (descriptor->NeedsFrameState()) {
+ frame_state_descriptor =
+ GetFrameStateDescriptor(node->InputAt(descriptor->InputCount()));
+ }
+
+ CallBuffer buffer(zone(), descriptor, frame_state_descriptor);
+
+ // Compute InstructionOperands for inputs and outputs.
+ // TODO(turbofan): on PPC it's probably better to use the code object in a
+ // register if there are multiple uses of it. Improve constant pool and the
+ // heuristics in the register allocator for where to emit constants.
+ InitializeCallBuffer(node, &buffer, true, false);
+
+ // Push any stack arguments.
+ // TODO(mbrandy): reverse order and use push only for first
+ for (auto i = buffer.pushed_nodes.rbegin(); i != buffer.pushed_nodes.rend();
+ i++) {
+ Emit(kPPC_Push, g.NoOutput(), g.UseRegister(*i));
+ }
+
+ // Select the appropriate opcode based on the call type.
+ InstructionCode opcode;
+ switch (descriptor->kind()) {
+ case CallDescriptor::kCallCodeObject: {
+ opcode = kArchCallCodeObject;
+ break;
+ }
+ case CallDescriptor::kCallJSFunction:
+ opcode = kArchCallJSFunction;
+ break;
+ default:
+ UNREACHABLE();
+ return;
+ }
+ opcode |= MiscField::encode(descriptor->flags());
+
+ // Emit the call instruction.
+ InstructionOperand* first_output =
+ buffer.outputs.size() > 0 ? &buffer.outputs.front() : NULL;
+ Instruction* call_instr =
+ Emit(opcode, buffer.outputs.size(), first_output,
+ buffer.instruction_args.size(), &buffer.instruction_args.front());
+ call_instr->MarkAsCall();
+}
+
+
+// static
+MachineOperatorBuilder::Flags
+InstructionSelector::SupportedMachineOperatorFlags() {
+ return MachineOperatorBuilder::kFloat64Floor |
+ MachineOperatorBuilder::kFloat64Ceil |
+ MachineOperatorBuilder::kFloat64RoundTruncate |
+ MachineOperatorBuilder::kFloat64RoundTiesAway;
+ // We omit kWord32ShiftIsSafe as s[rl]w use 0x3f as a mask rather than 0x1f.
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
--- /dev/null
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/assembler.h"
+#include "src/code-stubs.h"
+#include "src/compiler/linkage.h"
+#include "src/compiler/linkage-impl.h"
+#include "src/zone.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+struct PPCLinkageHelperTraits {
+ static Register ReturnValueReg() { return r3; }
+ static Register ReturnValue2Reg() { return r4; }
+ static Register JSCallFunctionReg() { return r4; }
+ static Register ContextReg() { return cp; }
+ static Register RuntimeCallFunctionReg() { return r4; }
+ static Register RuntimeCallArgCountReg() { return r3; }
+ static RegList CCalleeSaveRegisters() {
+ return r14.bit() | r15.bit() | r16.bit() | r17.bit() | r18.bit() |
+ r19.bit() | r20.bit() | r21.bit() | r22.bit() | r23.bit() |
+ r24.bit() | r25.bit() | r26.bit() | r27.bit() | r28.bit() |
+ r29.bit() | r30.bit() | fp.bit();
+ }
+ static Register CRegisterParameter(int i) {
+ static Register register_parameters[] = {r3, r4, r5, r6, r7, r8, r9, r10};
+ return register_parameters[i];
+ }
+ static int CRegisterParametersLength() { return 8; }
+};
+
+
+typedef LinkageHelper<PPCLinkageHelperTraits> LH;
+
+CallDescriptor* Linkage::GetJSCallDescriptor(Zone* zone, bool is_osr,
+ int parameter_count,
+ CallDescriptor::Flags flags) {
+ return LH::GetJSCallDescriptor(zone, is_osr, parameter_count, flags);
+}
+
+
+CallDescriptor* Linkage::GetRuntimeCallDescriptor(
+ Zone* zone, Runtime::FunctionId function, int parameter_count,
+ Operator::Properties properties) {
+ return LH::GetRuntimeCallDescriptor(zone, function, parameter_count,
+ properties);
+}
+
+
+CallDescriptor* Linkage::GetStubCallDescriptor(
+ Isolate* isolate, Zone* zone, const CallInterfaceDescriptor& descriptor,
+ int stack_parameter_count, CallDescriptor::Flags flags,
+ Operator::Properties properties) {
+ return LH::GetStubCallDescriptor(isolate, zone, descriptor,
+ stack_parameter_count, flags, properties);
+}
+
+
+CallDescriptor* Linkage::GetSimplifiedCDescriptor(Zone* zone,
+ MachineSignature* sig) {
+ return LH::GetSimplifiedCDescriptor(zone, sig);
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
void NamedLoadHandlerCompiler::GenerateLoadViaGetter(
MacroAssembler* masm, Handle<HeapType> type, Register receiver,
- Handle<JSFunction> getter) {
+ Register holder, int accessor_index, int expected_arguments,
+ Register scratch) {
// ----------- S t a t e -------------
// -- r3 : receiver
// -- r5 : name
{
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
- if (!getter.is_null()) {
+ if (accessor_index >= 0) {
+ DCHECK(!holder.is(scratch));
+ DCHECK(!receiver.is(scratch));
// Call the JavaScript getter with the receiver on the stack.
if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) {
// Swap in the global receiver.
- __ LoadP(receiver,
+ __ LoadP(scratch,
FieldMemOperand(receiver, JSGlobalObject::kGlobalProxyOffset));
+ receiver = scratch;
}
__ push(receiver);
ParameterCount actual(0);
- ParameterCount expected(getter);
- __ InvokeFunction(getter, expected, actual, CALL_FUNCTION,
- NullCallWrapper());
+ ParameterCount expected(expected_arguments);
+ __ LoadAccessor(r4, holder, accessor_index, ACCESSOR_GETTER);
+ __ InvokeFunction(r4, expected, actual, CALL_FUNCTION, NullCallWrapper());
} else {
// If we generate a global code snippet for deoptimization only, remember
// the place to continue after deoptimization.
void NamedStoreHandlerCompiler::GenerateStoreViaSetter(
MacroAssembler* masm, Handle<HeapType> type, Register receiver,
- Handle<JSFunction> setter) {
+ Register holder, int accessor_index, int expected_arguments,
+ Register scratch) {
// ----------- S t a t e -------------
// -- lr : return address
// -----------------------------------
// Save value register, so we can restore it later.
__ push(value());
- if (!setter.is_null()) {
+ if (accessor_index >= 0) {
+ DCHECK(!holder.is(scratch));
+ DCHECK(!receiver.is(scratch));
+ DCHECK(!value().is(scratch));
// Call the JavaScript setter with receiver and value on the stack.
if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) {
// Swap in the global receiver.
- __ LoadP(receiver,
+ __ LoadP(scratch,
FieldMemOperand(receiver, JSGlobalObject::kGlobalProxyOffset));
+ receiver = scratch;
}
__ Push(receiver, value());
ParameterCount actual(1);
- ParameterCount expected(setter);
- __ InvokeFunction(setter, expected, actual, CALL_FUNCTION,
- NullCallWrapper());
+ ParameterCount expected(expected_arguments);
+ __ LoadAccessor(r4, holder, accessor_index, ACCESSOR_SETTER);
+ __ InvokeFunction(r4, expected, actual, CALL_FUNCTION, NullCallWrapper());
} else {
// If we generate a global code snippet for deoptimization only, remember
// the place to continue after deoptimization.
}
+void PropertyHandlerCompiler::PushVectorAndSlot(Register vector,
+ Register slot) {
+ MacroAssembler* masm = this->masm();
+ __ Push(vector, slot);
+}
+
+
+void PropertyHandlerCompiler::PopVectorAndSlot(Register vector, Register slot) {
+ MacroAssembler* masm = this->masm();
+ __ Pop(vector, slot);
+}
+
+
+void PropertyHandlerCompiler::DiscardVectorAndSlot() {
+ MacroAssembler* masm = this->masm();
+ // Remove vector and slot.
+ __ addi(sp, sp, Operand(2 * kPointerSize));
+}
+
+
void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
MacroAssembler* masm, Label* miss_label, Register receiver,
Handle<Name> name, Register scratch0, Register scratch1) {
void NamedLoadHandlerCompiler::GenerateDirectLoadGlobalFunctionPrototype(
- MacroAssembler* masm, int index, Register prototype, Label* miss) {
- Isolate* isolate = masm->isolate();
- // Get the global function with the given index.
- Handle<JSFunction> function(
- JSFunction::cast(isolate->native_context()->get(index)));
-
- // Check we're still in the same context.
- Register scratch = prototype;
+ MacroAssembler* masm, int index, Register result, Label* miss) {
const int offset = Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX);
- __ LoadP(scratch, MemOperand(cp, offset));
- __ LoadP(scratch,
- FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
- __ LoadP(scratch, MemOperand(scratch, Context::SlotOffset(index)));
- __ Move(ip, function);
- __ cmp(ip, scratch);
- __ bne(miss);
-
+ __ LoadP(result, MemOperand(cp, offset));
+ __ LoadP(result, FieldMemOperand(result, GlobalObject::kNativeContextOffset));
+ __ LoadP(result, MemOperand(result, Context::SlotOffset(index)));
// Load its initial map. The global functions all have initial maps.
- __ Move(prototype, Handle<Map>(function->initial_map()));
+ __ LoadP(result,
+ FieldMemOperand(result, JSFunction::kPrototypeOrInitialMapOffset));
// Load the prototype from the initial map.
- __ LoadP(prototype, FieldMemOperand(prototype, Map::kPrototypeOffset));
+ __ LoadP(result, FieldMemOperand(result, Map::kPrototypeOffset));
}
Register scratch, Label* miss) {
Handle<Cell> cell = JSGlobalObject::EnsurePropertyCell(global, name);
DCHECK(cell->value()->IsTheHole());
- __ mov(scratch, Operand(cell));
+ Handle<WeakCell> weak_cell = masm->isolate()->factory()->NewWeakCell(cell);
+ __ LoadWeakValue(scratch, weak_cell, miss);
__ LoadP(scratch, FieldMemOperand(scratch, Cell::kValueOffset));
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
__ cmp(scratch, ip);
Register holder, Register name,
Handle<JSObject> holder_obj) {
STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsNameIndex == 0);
- STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsInfoIndex == 1);
- STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsThisIndex == 2);
- STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsHolderIndex == 3);
- STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsLength == 4);
+ STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsThisIndex == 1);
+ STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsHolderIndex == 2);
+ STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsLength == 3);
__ push(name);
- Handle<InterceptorInfo> interceptor(holder_obj->GetNamedInterceptor());
- DCHECK(!masm->isolate()->heap()->InNewSpace(*interceptor));
- Register scratch = name;
- __ mov(scratch, Operand(interceptor));
- __ push(scratch);
__ push(receiver);
__ push(holder);
}
// Generate call to api function.
-void PropertyHandlerCompiler::GenerateFastApiCall(
+void PropertyHandlerCompiler::GenerateApiAccessorCall(
MacroAssembler* masm, const CallOptimization& optimization,
Handle<Map> receiver_map, Register receiver, Register scratch_in,
- bool is_store, int argc, Register* values) {
+ bool is_store, Register store_parameter, Register accessor_holder,
+ int accessor_index) {
+ DCHECK(!accessor_holder.is(scratch_in));
DCHECK(!receiver.is(scratch_in));
__ push(receiver);
// Write the arguments to stack frame.
- for (int i = 0; i < argc; i++) {
- Register arg = values[argc - 1 - i];
- DCHECK(!receiver.is(arg));
- DCHECK(!scratch_in.is(arg));
- __ push(arg);
+ if (is_store) {
+ DCHECK(!receiver.is(store_parameter));
+ DCHECK(!scratch_in.is(store_parameter));
+ __ push(store_parameter);
}
DCHECK(optimization.is_simple_api_call());
// Abi for CallApiFunctionStub.
Register callee = r3;
- Register call_data = r7;
+ Register data = r7;
Register holder = r5;
Register api_function_address = r4;
+ // Put callee in place.
+ __ LoadAccessor(callee, accessor_holder, accessor_index,
+ is_store ? ACCESSOR_SETTER : ACCESSOR_GETTER);
+
// Put holder in place.
CallOptimization::HolderLookup holder_lookup;
- Handle<JSObject> api_holder =
- optimization.LookupHolderOfExpectedType(receiver_map, &holder_lookup);
+ int holder_depth = 0;
+ optimization.LookupHolderOfExpectedType(receiver_map, &holder_lookup,
+ &holder_depth);
switch (holder_lookup) {
case CallOptimization::kHolderIsReceiver:
__ Move(holder, receiver);
break;
case CallOptimization::kHolderFound:
- __ Move(holder, api_holder);
+ __ LoadP(holder, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ LoadP(holder, FieldMemOperand(holder, Map::kPrototypeOffset));
+ for (int i = 1; i < holder_depth; i++) {
+ __ LoadP(holder, FieldMemOperand(holder, HeapObject::kMapOffset));
+ __ LoadP(holder, FieldMemOperand(holder, Map::kPrototypeOffset));
+ }
break;
case CallOptimization::kHolderNotFound:
UNREACHABLE();
}
Isolate* isolate = masm->isolate();
- Handle<JSFunction> function = optimization.constant_function();
Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
- Handle<Object> call_data_obj(api_call_info->data(), isolate);
-
- // Put callee in place.
- __ Move(callee, function);
-
bool call_data_undefined = false;
- // Put call_data in place.
- if (isolate->heap()->InNewSpace(*call_data_obj)) {
- __ Move(call_data, api_call_info);
- __ LoadP(call_data,
- FieldMemOperand(call_data, CallHandlerInfo::kDataOffset));
- } else if (call_data_obj->IsUndefined()) {
+ // Put call data in place.
+ if (api_call_info->data()->IsUndefined()) {
call_data_undefined = true;
- __ LoadRoot(call_data, Heap::kUndefinedValueRootIndex);
+ __ LoadRoot(data, Heap::kUndefinedValueRootIndex);
} else {
- __ Move(call_data, call_data_obj);
+ __ LoadP(data,
+ FieldMemOperand(callee, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadP(data,
+ FieldMemOperand(data, SharedFunctionInfo::kFunctionDataOffset));
+ __ LoadP(data,
+ FieldMemOperand(data, FunctionTemplateInfo::kCallCodeOffset));
+ __ LoadP(data, FieldMemOperand(data, CallHandlerInfo::kDataOffset));
}
// Put api_function_address in place.
__ mov(api_function_address, Operand(ref));
// Jump to stub.
- CallApiFunctionStub stub(isolate, is_store, call_data_undefined, argc);
+ CallApiAccessorStub stub(isolate, is_store, call_data_undefined);
__ TailCallStub(&stub);
}
}
-void NamedStoreHandlerCompiler::GenerateRestoreNameAndMap(
- Handle<Name> name, Handle<Map> transition) {
+void NamedStoreHandlerCompiler::GenerateRestoreName(Handle<Name> name) {
__ mov(this->name(), Operand(name));
- __ mov(StoreTransitionDescriptor::MapRegister(), Operand(transition));
}
-void NamedStoreHandlerCompiler::GenerateConstantCheck(Object* constant,
+void NamedStoreHandlerCompiler::GenerateRestoreMap(Handle<Map> transition,
+ Register scratch,
+ Label* miss) {
+ Handle<WeakCell> cell = Map::WeakCellForMap(transition);
+ Register map_reg = StoreTransitionDescriptor::MapRegister();
+ DCHECK(!map_reg.is(scratch));
+ __ LoadWeakValue(map_reg, cell, miss);
+ if (transition->CanBeDeprecated()) {
+ __ lwz(scratch, FieldMemOperand(map_reg, Map::kBitField3Offset));
+ __ DecodeField<Map::Deprecated>(r0, scratch, SetRC);
+ __ bne(miss, cr0);
+ }
+}
+
+
+void NamedStoreHandlerCompiler::GenerateConstantCheck(Register map_reg,
+ int descriptor,
Register value_reg,
+ Register scratch,
Label* miss_label) {
- __ Move(scratch1(), handle(constant, isolate()));
- __ cmp(value_reg, scratch1());
+ DCHECK(!map_reg.is(scratch));
+ DCHECK(!map_reg.is(value_reg));
+ DCHECK(!value_reg.is(scratch));
+ __ LoadInstanceDescriptors(map_reg, scratch);
+ __ LoadP(scratch, FieldMemOperand(
+ scratch, DescriptorArray::GetValueOffset(descriptor)));
+ __ cmp(value_reg, scratch);
__ bne(miss_label);
}
void NamedStoreHandlerCompiler::GenerateFieldTypeChecks(HeapType* field_type,
Register value_reg,
Label* miss_label) {
+ Register map_reg = scratch1();
+ Register scratch = scratch2();
+ DCHECK(!value_reg.is(map_reg));
+ DCHECK(!value_reg.is(scratch));
__ JumpIfSmi(value_reg, miss_label);
HeapType::Iterator<Map> it = field_type->Classes();
if (!it.Done()) {
- __ LoadP(scratch1(), FieldMemOperand(value_reg, HeapObject::kMapOffset));
+ __ LoadP(map_reg, FieldMemOperand(value_reg, HeapObject::kMapOffset));
Label do_store;
while (true) {
- __ CompareMap(scratch1(), it.Current(), &do_store);
+ __ CmpWeakValue(map_reg, Map::WeakCellForMap(it.Current()), scratch);
it.Advance();
if (it.Done()) {
__ bne(miss_label);
__ LoadP(reg, FieldMemOperand(scratch1, Map::kPrototypeOffset));
} else {
Register map_reg = scratch1;
+ __ LoadP(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
if (depth != 1 || check == CHECK_ALL_MAPS) {
- // CheckMap implicitly loads the map of |reg| into |map_reg|.
- __ CheckMap(reg, map_reg, current_map, miss, DONT_DO_SMI_CHECK);
- } else {
- __ LoadP(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
+ Handle<WeakCell> cell = Map::WeakCellForMap(current_map);
+ __ CmpWeakValue(map_reg, cell, scratch2);
+ __ bne(miss);
}
// Check access rights to the global object. This has to happen after
reg = holder_reg; // From now on the object will be in holder_reg.
- // Two possible reasons for loading the prototype from the map:
- // (1) Can't store references to new space in code.
- // (2) Handler is shared for all receivers with the same prototype
- // map (but not necessarily the same prototype instance).
- bool load_prototype_from_map =
- heap()->InNewSpace(*prototype) || depth == 1;
- if (load_prototype_from_map) {
- __ LoadP(reg, FieldMemOperand(map_reg, Map::kPrototypeOffset));
- } else {
- __ mov(reg, Operand(prototype));
- }
+ __ LoadP(reg, FieldMemOperand(map_reg, Map::kPrototypeOffset));
}
// Go to the next object in the prototype chain.
if (depth != 0 || check == CHECK_ALL_MAPS) {
// Check the holder map.
- __ CheckMap(reg, scratch1, current_map, miss, DONT_DO_SMI_CHECK);
+ __ LoadP(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
+ Handle<WeakCell> cell = Map::WeakCellForMap(current_map);
+ __ CmpWeakValue(scratch1, cell, scratch2);
+ __ bne(miss);
}
// Perform security check for access to the global object.
Label success;
__ b(&success);
__ bind(miss);
+ if (IC::ICUseVector(kind())) {
+ DCHECK(kind() == Code::LOAD_IC);
+ PopVectorAndSlot();
+ }
TailCallBuiltin(masm(), MissBuiltin(kind()));
__ bind(&success);
}
DCHECK(!scratch3().is(reg));
DCHECK(!scratch4().is(reg));
__ push(receiver());
- if (heap()->InNewSpace(callback->data())) {
- __ Move(scratch3(), callback);
- __ LoadP(scratch3(),
- FieldMemOperand(scratch3(), ExecutableAccessorInfo::kDataOffset));
+ // Push data from ExecutableAccessorInfo.
+ Handle<Object> data(callback->data(), isolate());
+ if (data->IsUndefined() || data->IsSmi()) {
+ __ Move(scratch3(), data);
} else {
- __ Move(scratch3(), Handle<Object>(callback->data(), isolate()));
+ Handle<WeakCell> cell =
+ isolate()->factory()->NewWeakCell(Handle<HeapObject>::cast(data));
+ // The callback is alive if this instruction is executed,
+ // so the weak cell is not cleared and points to data.
+ __ GetWeakValue(scratch3(), cell);
}
__ push(scratch3());
__ LoadRoot(scratch3(), Heap::kUndefinedValueRootIndex);
} else {
__ Push(holder_reg, this->name());
}
+ InterceptorVectorSlotPush(holder_reg);
// Invoke an interceptor. Note: map checks from receiver to
// interceptor's holder has been compiled before (see a caller
// of this method.)
__ Ret();
__ bind(&interceptor_failed);
+ InterceptorVectorSlotPop(holder_reg);
__ pop(this->name());
__ pop(holder_reg);
if (must_preserve_receiver_reg) {
Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
- Handle<JSObject> object, Handle<Name> name,
- Handle<ExecutableAccessorInfo> callback) {
- Register holder_reg = Frontend(receiver(), name);
+ Handle<JSObject> object, Handle<Name> name, int accessor_index) {
+ Register holder_reg = Frontend(name);
__ Push(receiver(), holder_reg); // receiver
- __ mov(ip, Operand(callback)); // callback info
+ __ LoadSmiLiteral(ip, Smi::FromInt(accessor_index));
__ push(ip);
__ mov(ip, Operand(name));
__ Push(ip, value());
Handle<Code> NamedLoadHandlerCompiler::CompileLoadGlobal(
Handle<PropertyCell> cell, Handle<Name> name, bool is_configurable) {
Label miss;
+ if (IC::ICUseVector(kind())) {
+ PushVectorAndSlot();
+ }
FrontendHeader(receiver(), name, &miss);
// Get the value from the cell.
Register result = StoreDescriptor::ValueRegister();
- __ mov(result, Operand(cell));
+ Handle<WeakCell> weak_cell = factory()->NewWeakCell(cell);
+ __ LoadWeakValue(result, weak_cell, &miss);
__ LoadP(result, FieldMemOperand(result, Cell::kValueOffset));
// Check for deleted property if property can actually be deleted.
Counters* counters = isolate()->counters();
__ IncrementCounter(counters->named_load_global_stub(), 1, r4, r6);
+ if (IC::ICUseVector(kind())) {
+ DiscardVectorAndSlot();
+ }
__ Ret();
FrontendFooter(name, &miss);
#define __ ACCESS_MASM(masm)
-void PropertyICCompiler::GenerateRuntimeSetProperty(MacroAssembler* masm,
- StrictMode strict_mode) {
+void PropertyICCompiler::GenerateRuntimeSetProperty(
+ MacroAssembler* masm, LanguageMode language_mode) {
+ __ mov(r0, Operand(Smi::FromInt(language_mode)));
__ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
- StoreDescriptor::ValueRegister());
-
- __ mov(r0, Operand(Smi::FromInt(strict_mode)));
- __ Push(r0);
+ StoreDescriptor::ValueRegister(), r0);
// Do tail-call to runtime routine.
__ TailCallRuntime(Runtime::kSetProperty, 4, 1);
if (check == PROPERTY &&
(kind() == Code::KEYED_LOAD_IC || kind() == Code::KEYED_STORE_IC)) {
- // In case we are compiling an IC for dictionary loads and stores, just
+ // In case we are compiling an IC for dictionary loads or stores, just
// check whether the name is unique.
if (name.is_identical_to(isolate()->factory()->normal_ic_symbol())) {
+ // Keyed loads with dictionaries shouldn't be here, they go generic.
+ // The DCHECK is to protect assumptions when --vector-ics is on.
+ DCHECK(kind() != Code::KEYED_LOAD_IC);
Register tmp = scratch1();
__ JumpIfSmi(this->name(), &miss);
__ LoadP(tmp, FieldMemOperand(this->name(), HeapObject::kMapOffset));
Handle<Map> map = IC::TypeToMap(*type, isolate());
if (!map->is_deprecated()) {
number_of_handled_maps++;
- __ mov(ip, Operand(map));
- __ cmp(map_reg, ip);
+ Handle<WeakCell> cell = Map::WeakCellForMap(map);
+ __ CmpWeakValue(map_reg, cell, scratch2());
if (type->Is(HeapType::Number())) {
DCHECK(!number_case.is_unused());
__ bind(&number_case);
__ JumpIfSmi(receiver(), &miss);
int receiver_count = receiver_maps->length();
- __ LoadP(scratch1(), FieldMemOperand(receiver(), HeapObject::kMapOffset));
+ Register map_reg = scratch1();
+ __ LoadP(map_reg, FieldMemOperand(receiver(), HeapObject::kMapOffset));
for (int i = 0; i < receiver_count; ++i) {
- __ mov(ip, Operand(receiver_maps->at(i)));
- __ cmp(scratch1(), ip);
+ Handle<WeakCell> cell = Map::WeakCellForMap(receiver_maps->at(i));
+ __ CmpWeakValue(map_reg, cell, scratch2());
if (transitioned_maps->at(i).is_null()) {
__ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET, eq);
} else {
Label next_map;
__ bne(&next_map);
- __ mov(transition_map(), Operand(transitioned_maps->at(i)));
+ Handle<WeakCell> cell = Map::WeakCellForMap(transitioned_maps->at(i));
+ __ LoadWeakValue(transition_map(), cell, &miss);
__ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET, al);
__ bind(&next_map);
}
// Loads an indexed element from a fast case array.
-// If not_fast_array is NULL, doesn't perform the elements map check.
static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver,
Register key, Register elements,
Register scratch1, Register scratch2,
- Register result, Label* not_fast_array,
- Label* out_of_range) {
+ Register result, Label* slow) {
// Register use:
//
// receiver - holds the receiver on entry.
// key - holds the smi key on entry.
// Unchanged unless 'result' is the same register.
//
- // elements - holds the elements of the receiver on exit.
- //
// result - holds the result on exit if the load succeeded.
// Allowed to be the the same as 'receiver' or 'key'.
// Unchanged on bailout so 'receiver' and 'key' can be safely
//
// Scratch registers:
//
- // scratch1 - used to hold elements map and elements length.
- // Holds the elements map if not_fast_array branch is taken.
+ // elements - holds the elements of the receiver and its protoypes.
+ //
+ // scratch1 - used to hold elements length, bit fields, base addresses.
//
- // scratch2 - used to hold the loaded value.
+ // scratch2 - used to hold maps, prototypes, and the loaded value.
+ Label check_prototypes, check_next_prototype;
+ Label done, in_bounds, return_undefined;
__ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
- if (not_fast_array != NULL) {
- // Check that the object is in fast mode and writable.
- __ LoadP(scratch1, FieldMemOperand(elements, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
- __ cmp(scratch1, ip);
- __ bne(not_fast_array);
- } else {
- __ AssertFastElements(elements);
- }
+ __ AssertFastElements(elements);
+
// Check that the key (index) is within bounds.
__ LoadP(scratch1, FieldMemOperand(elements, FixedArray::kLengthOffset));
__ cmpl(key, scratch1);
- __ bge(out_of_range);
+ __ blt(&in_bounds);
+ // Out-of-bounds. Check the prototype chain to see if we can just return
+ // 'undefined'.
+ __ cmpi(key, Operand::Zero());
+ __ blt(slow); // Negative keys can't take the fast OOB path.
+ __ bind(&check_prototypes);
+ __ LoadP(scratch2, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ bind(&check_next_prototype);
+ __ LoadP(scratch2, FieldMemOperand(scratch2, Map::kPrototypeOffset));
+ // scratch2: current prototype
+ __ CompareRoot(scratch2, Heap::kNullValueRootIndex);
+ __ beq(&return_undefined);
+ __ LoadP(elements, FieldMemOperand(scratch2, JSObject::kElementsOffset));
+ __ LoadP(scratch2, FieldMemOperand(scratch2, HeapObject::kMapOffset));
+ // elements: elements of current prototype
+ // scratch2: map of current prototype
+ __ CompareInstanceType(scratch2, scratch1, JS_OBJECT_TYPE);
+ __ blt(slow);
+ __ lbz(scratch1, FieldMemOperand(scratch2, Map::kBitFieldOffset));
+ __ andi(r0, scratch1, Operand((1 << Map::kIsAccessCheckNeeded) |
+ (1 << Map::kHasIndexedInterceptor)));
+ __ bne(slow, cr0);
+ __ CompareRoot(elements, Heap::kEmptyFixedArrayRootIndex);
+ __ bne(slow);
+ __ jmp(&check_next_prototype);
+
+ __ bind(&return_undefined);
+ __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
+ __ jmp(&done);
+
+ __ bind(&in_bounds);
// Fast case: Do the load.
__ addi(scratch1, elements,
Operand(FixedArray::kHeaderSize - kHeapObjectTag));
// The key is a smi.
__ SmiToPtrArrayOffset(scratch2, key);
__ LoadPX(scratch2, MemOperand(scratch2, scratch1));
- __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
- __ cmp(scratch2, ip);
- // In case the loaded value is the_hole we have to consult GetProperty
- // to ensure the prototype chain is searched.
- __ beq(out_of_range);
+ __ CompareRoot(scratch2, Heap::kTheHoleValueRootIndex);
+ // In case the loaded value is the_hole we have to check the prototype chain.
+ __ beq(&check_prototypes);
__ mr(result, scratch2);
+ __ bind(&done);
}
static const Register LoadIC_TempRegister() { return r6; }
+static void LoadIC_PushArgs(MacroAssembler* masm) {
+ Register receiver = LoadDescriptor::ReceiverRegister();
+ Register name = LoadDescriptor::NameRegister();
+ if (FLAG_vector_ics) {
+ Register slot = VectorLoadICDescriptor::SlotRegister();
+ Register vector = VectorLoadICDescriptor::VectorRegister();
+
+ __ Push(receiver, name, slot, vector);
+ } else {
+ __ Push(receiver, name);
+ }
+}
+
+
void LoadIC::GenerateMiss(MacroAssembler* masm) {
// The return address is in lr.
Isolate* isolate = masm->isolate();
- __ IncrementCounter(isolate->counters()->load_miss(), 1, r6, r7);
+ DCHECK(!FLAG_vector_ics ||
+ !AreAliased(r7, r8, VectorLoadICDescriptor::SlotRegister(),
+ VectorLoadICDescriptor::VectorRegister()));
+ __ IncrementCounter(isolate->counters()->load_miss(), 1, r7, r8);
- __ mr(LoadIC_TempRegister(), LoadDescriptor::ReceiverRegister());
- __ Push(LoadIC_TempRegister(), LoadDescriptor::NameRegister());
+ LoadIC_PushArgs(masm);
// Perform tail call to the entry.
ExternalReference ref = ExternalReference(IC_Utility(kLoadIC_Miss), isolate);
- __ TailCallExternalReference(ref, 2, 1);
+ int arg_count = FLAG_vector_ics ? 4 : 2;
+ __ TailCallExternalReference(ref, arg_count, 1);
}
// The return address is in lr.
Isolate* isolate = masm->isolate();
- __ IncrementCounter(isolate->counters()->keyed_load_miss(), 1, r6, r7);
+ DCHECK(!FLAG_vector_ics ||
+ !AreAliased(r7, r8, VectorLoadICDescriptor::SlotRegister(),
+ VectorLoadICDescriptor::VectorRegister()));
+ __ IncrementCounter(isolate->counters()->keyed_load_miss(), 1, r7, r8);
- __ Push(LoadDescriptor::ReceiverRegister(), LoadDescriptor::NameRegister());
+ LoadIC_PushArgs(masm);
// Perform tail call to the entry.
ExternalReference ref =
ExternalReference(IC_Utility(kKeyedLoadIC_Miss), isolate);
-
- __ TailCallExternalReference(ref, 2, 1);
+ int arg_count = FLAG_vector_ics ? 4 : 2;
+ __ TailCallExternalReference(ref, arg_count, 1);
}
}
-void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
+void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm) {
// The return address is in lr.
Label slow, check_name, index_smi, index_name, property_array_property;
Label probe_dictionary, check_number_dictionary;
// Check the receiver's map to see if it has fast elements.
__ CheckFastElements(r3, r6, &check_number_dictionary);
- GenerateFastArrayLoad(masm, receiver, key, r3, r6, r7, r3, NULL, &slow);
+ GenerateFastArrayLoad(masm, receiver, key, r3, r6, r7, r3, &slow);
__ IncrementCounter(isolate->counters()->keyed_load_generic_smi(), 1, r7, r6);
__ Ret();
GenerateKeyedLoadReceiverCheck(masm, receiver, r3, r6,
Map::kHasNamedInterceptor, &slow);
- // If the receiver is a fast-case object, check the keyed lookup
- // cache. Otherwise probe the dictionary.
+ // If the receiver is a fast-case object, check the stub cache. Otherwise
+ // probe the dictionary.
__ LoadP(r6, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
__ LoadP(r7, FieldMemOperand(r6, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kHashTableMapRootIndex);
__ cmp(r7, ip);
__ beq(&probe_dictionary);
- // Load the map of the receiver, compute the keyed lookup cache hash
- // based on 32 bits of the map pointer and the name hash.
- __ LoadP(r3, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ srawi(r6, r3, KeyedLookupCache::kMapHashShift);
- __ lwz(r7, FieldMemOperand(key, Name::kHashFieldOffset));
- __ srawi(r7, r7, Name::kHashShift);
- __ xor_(r6, r6, r7);
- int mask = KeyedLookupCache::kCapacityMask & KeyedLookupCache::kHashMask;
- __ mov(r7, Operand(mask));
- __ and_(r6, r6, r7, LeaveRC);
-
- // Load the key (consisting of map and unique name) from the cache and
- // check for match.
- Label load_in_object_property;
- static const int kEntriesPerBucket = KeyedLookupCache::kEntriesPerBucket;
- Label hit_on_nth_entry[kEntriesPerBucket];
- ExternalReference cache_keys =
- ExternalReference::keyed_lookup_cache_keys(isolate);
-
- __ mov(r7, Operand(cache_keys));
- __ mr(r0, r5);
- __ ShiftLeftImm(r5, r6, Operand(kPointerSizeLog2 + 1));
- __ add(r7, r7, r5);
- __ mr(r5, r0);
-
- for (int i = 0; i < kEntriesPerBucket - 1; i++) {
- Label try_next_entry;
- // Load map and move r7 to next entry.
- __ LoadP(r8, MemOperand(r7));
- __ addi(r7, r7, Operand(kPointerSize * 2));
- __ cmp(r3, r8);
- __ bne(&try_next_entry);
- __ LoadP(r8, MemOperand(r7, -kPointerSize)); // Load name
- __ cmp(key, r8);
- __ beq(&hit_on_nth_entry[i]);
- __ bind(&try_next_entry);
- }
-
- // Last entry: Load map and move r7 to name.
- __ LoadP(r8, MemOperand(r7));
- __ addi(r7, r7, Operand(kPointerSize));
- __ cmp(r3, r8);
- __ bne(&slow);
- __ LoadP(r8, MemOperand(r7));
- __ cmp(key, r8);
- __ bne(&slow);
- // Get field offset.
- // r3 : receiver's map
- // r6 : lookup cache index
- ExternalReference cache_field_offsets =
- ExternalReference::keyed_lookup_cache_field_offsets(isolate);
-
- // Hit on nth entry.
- for (int i = kEntriesPerBucket - 1; i >= 0; i--) {
- __ bind(&hit_on_nth_entry[i]);
- __ mov(r7, Operand(cache_field_offsets));
- if (i != 0) {
- __ addi(r6, r6, Operand(i));
- }
- __ ShiftLeftImm(r8, r6, Operand(2));
- __ lwzx(r8, MemOperand(r8, r7));
- __ lbz(r9, FieldMemOperand(r3, Map::kInObjectPropertiesOffset));
- __ sub(r8, r8, r9);
- __ cmpi(r8, Operand::Zero());
- __ bge(&property_array_property);
- if (i != 0) {
- __ b(&load_in_object_property);
- }
+ if (FLAG_vector_ics) {
+ // When vector ics are in use, the handlers in the stub cache expect a
+ // vector and slot. Since we won't change the IC from any downstream
+ // misses, a dummy vector can be used.
+ Register vector = VectorLoadICDescriptor::VectorRegister();
+ Register slot = VectorLoadICDescriptor::SlotRegister();
+ DCHECK(!AreAliased(vector, slot, r7, r8, r9, r10));
+ Handle<TypeFeedbackVector> dummy_vector = Handle<TypeFeedbackVector>::cast(
+ masm->isolate()->factory()->keyed_load_dummy_vector());
+ int int_slot = dummy_vector->GetIndex(FeedbackVectorICSlot(0));
+ __ LoadRoot(vector, Heap::kKeyedLoadDummyVectorRootIndex);
+ __ LoadSmiLiteral(slot, Smi::FromInt(int_slot));
}
- // Load in-object property.
- __ bind(&load_in_object_property);
- __ lbz(r9, FieldMemOperand(r3, Map::kInstanceSizeOffset));
- __ add(r9, r9, r8); // Index from start of object.
- __ subi(receiver, receiver, Operand(kHeapObjectTag)); // Remove the heap tag.
- __ ShiftLeftImm(r3, r9, Operand(kPointerSizeLog2));
- __ LoadPX(r3, MemOperand(r3, receiver));
- __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(), 1,
- r7, r6);
- __ Ret();
-
- // Load property array property.
- __ bind(&property_array_property);
- __ LoadP(receiver, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
- __ addi(receiver, receiver,
- Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ ShiftLeftImm(r3, r8, Operand(kPointerSizeLog2));
- __ LoadPX(r3, MemOperand(r3, receiver));
- __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(), 1,
- r7, r6);
- __ Ret();
+ Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
+ Code::ComputeHandlerFlags(Code::LOAD_IC));
+ masm->isolate()->stub_cache()->GenerateProbe(
+ masm, Code::KEYED_LOAD_IC, flags, false, receiver, key, r7, r8, r9, r10);
+ // Cache miss.
+ GenerateMiss(masm);
// Do a quick inline probe of the receiver's dictionary, if it
// exists.
void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
- StrictMode strict_mode) {
+ LanguageMode language_mode) {
// ---------- S t a t e --------------
// -- r3 : value
// -- r4 : key
// r3: value.
// r4: key.
// r5: receiver.
- PropertyICCompiler::GenerateRuntimeSetProperty(masm, strict_mode);
+ PropertyICCompiler::GenerateRuntimeSetProperty(masm, language_mode);
// Never returns to here.
__ bind(&maybe_name_key);
__ JumpIfNotUniqueNameInstanceType(r7, &slow);
Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
Code::ComputeHandlerFlags(Code::STORE_IC));
- masm->isolate()->stub_cache()->GenerateProbe(masm, flags, false, receiver,
- key, r6, r7, r8, r9);
+ masm->isolate()->stub_cache()->GenerateProbe(
+ masm, Code::STORE_IC, flags, false, receiver, key, r6, r7, r8, r9);
// Cache miss.
__ b(&miss);
Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
Code::ComputeHandlerFlags(Code::STORE_IC));
- masm->isolate()->stub_cache()->GenerateProbe(masm, flags, false, receiver,
- name, r6, r7, r8, r9);
+ masm->isolate()->stub_cache()->GenerateProbe(
+ masm, Code::STORE_IC, flags, false, receiver, name, r6, r7, r8, r9);
// Cache miss: Jump to runtime.
GenerateMiss(masm);
#if V8_TARGET_ARCH_PPC
#include "src/codegen.h"
+#include "src/ic/ic.h"
#include "src/ic/stub-cache.h"
+#include "src/interface-descriptors.h"
namespace v8 {
namespace internal {
static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
- Code::Flags flags, bool leave_frame,
+ Code::Kind ic_kind, Code::Flags flags, bool leave_frame,
StubCache::Table table, Register receiver, Register name,
// Number of the cache entry, not scaled.
Register offset, Register scratch, Register scratch2,
// Calculate the base address of the entry.
__ mov(base_addr, Operand(key_offset));
- __ ShiftLeftImm(scratch2, offset_scratch, Operand(kPointerSizeLog2));
- __ add(base_addr, base_addr, scratch2);
+#if V8_TARGET_ARCH_PPC64
+ DCHECK(kPointerSizeLog2 > StubCache::kCacheIndexShift);
+ __ ShiftLeftImm(offset_scratch, offset_scratch,
+ Operand(kPointerSizeLog2 - StubCache::kCacheIndexShift));
+#else
+ DCHECK(kPointerSizeLog2 == StubCache::kCacheIndexShift);
+#endif
+ __ add(base_addr, base_addr, offset_scratch);
// Check that the key in the entry matches the name.
__ LoadP(ip, MemOperand(base_addr, 0));
}
-void StubCache::GenerateProbe(MacroAssembler* masm, Code::Flags flags,
- bool leave_frame, Register receiver,
- Register name, Register scratch, Register extra,
- Register extra2, Register extra3) {
+void StubCache::GenerateProbe(MacroAssembler* masm, Code::Kind ic_kind,
+ Code::Flags flags, bool leave_frame,
+ Register receiver, Register name,
+ Register scratch, Register extra, Register extra2,
+ Register extra3) {
Isolate* isolate = masm->isolate();
Label miss;
DCHECK(Code::ExtractTypeFromFlags(flags) == 0);
// Make sure that there are no register conflicts.
- DCHECK(!scratch.is(receiver));
- DCHECK(!scratch.is(name));
- DCHECK(!extra.is(receiver));
- DCHECK(!extra.is(name));
- DCHECK(!extra.is(scratch));
- DCHECK(!extra2.is(receiver));
- DCHECK(!extra2.is(name));
- DCHECK(!extra2.is(scratch));
- DCHECK(!extra2.is(extra));
+ DCHECK(!AreAliased(receiver, name, scratch, extra, extra2, extra3));
// Check scratch, extra and extra2 registers are valid.
DCHECK(!scratch.is(no_reg));
DCHECK(!extra2.is(no_reg));
DCHECK(!extra3.is(no_reg));
+#ifdef DEBUG
+ // If vector-based ics are in use, ensure that scratch, extra, extra2 and
+ // extra3 don't conflict with the vector and slot registers, which need
+ // to be preserved for a handler call or miss.
+ if (IC::ICUseVector(ic_kind)) {
+ Register vector = VectorLoadICDescriptor::VectorRegister();
+ Register slot = VectorLoadICDescriptor::SlotRegister();
+ DCHECK(!AreAliased(vector, slot, scratch, extra, extra2, extra3));
+ }
+#endif
+
Counters* counters = masm->isolate()->counters();
__ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1, extra2,
extra3);
__ lwz(scratch, FieldMemOperand(name, Name::kHashFieldOffset));
__ LoadP(ip, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ add(scratch, scratch, ip);
-#if V8_TARGET_ARCH_PPC64
- // Use only the low 32 bits of the map pointer.
- __ rldicl(scratch, scratch, 0, 32);
-#endif
- uint32_t mask = kPrimaryTableSize - 1;
- // We shift out the last two bits because they are not part of the hash and
- // they are always 01 for maps.
- __ ShiftRightImm(scratch, scratch, Operand(kCacheIndexShift));
- // Mask down the eor argument to the minimum to keep the immediate
- // encodable.
- __ xori(scratch, scratch, Operand((flags >> kCacheIndexShift) & mask));
- // Prefer and_ to ubfx here because ubfx takes 2 cycles.
- __ andi(scratch, scratch, Operand(mask));
+ __ xori(scratch, scratch, Operand(flags));
+ // The mask omits the last two bits because they are not part of the hash.
+ __ andi(scratch, scratch,
+ Operand((kPrimaryTableSize - 1) << kCacheIndexShift));
// Probe the primary table.
- ProbeTable(isolate, masm, flags, leave_frame, kPrimary, receiver, name,
- scratch, extra, extra2, extra3);
+ ProbeTable(isolate, masm, ic_kind, flags, leave_frame, kPrimary, receiver,
+ name, scratch, extra, extra2, extra3);
// Primary miss: Compute hash for secondary probe.
- __ ShiftRightImm(extra, name, Operand(kCacheIndexShift));
- __ sub(scratch, scratch, extra);
- uint32_t mask2 = kSecondaryTableSize - 1;
- __ addi(scratch, scratch, Operand((flags >> kCacheIndexShift) & mask2));
- __ andi(scratch, scratch, Operand(mask2));
+ __ sub(scratch, scratch, name);
+ __ addi(scratch, scratch, Operand(flags));
+ __ andi(scratch, scratch,
+ Operand((kSecondaryTableSize - 1) << kCacheIndexShift));
// Probe the secondary table.
- ProbeTable(isolate, masm, flags, leave_frame, kSecondary, receiver, name,
- scratch, extra, extra2, extra3);
+ ProbeTable(isolate, masm, ic_kind, flags, leave_frame, kSecondary, receiver,
+ name, scratch, extra, extra2, extra3);
// Cache miss: Fall-through and let caller handle the miss by
// entering the runtime system.
cpu.part() == base::CPU::PPC_POWER8) {
supported_ |= (1u << LWSYNC);
}
+ if (cpu.part() == base::CPU::PPC_POWER7 ||
+ cpu.part() == base::CPU::PPC_POWER8) {
+ supported_ |= (1u << ISELECT);
+ }
#if V8_OS_LINUX
if (!(cpu.part() == base::CPU::PPC_G5 || cpu.part() == base::CPU::PPC_G4)) {
// Assume support
#else // Simulator
supported_ |= (1u << FPU);
supported_ |= (1u << LWSYNC);
+ supported_ |= (1u << ISELECT);
#if V8_TARGET_ARCH_PPC64
supported_ |= (1u << FPR_GPR_MOV);
#endif
// -----------------------------------------------------------------------------
// Specific instructions, constants, and masks.
-// Spare buffer.
-static const int kMinimalBufferSize = 4 * KB;
-
Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
: AssemblerBase(isolate, buffer, buffer_size),
}
}
- PPCPORT_UNIMPLEMENTED();
DCHECK(false);
return -1;
}
// check which type of branch this is 16 or 26 bit offset
if (BX == opcode) {
int imm26 = target_pos - pos;
- DCHECK((imm26 & (kAAMask | kLKMask)) == 0);
- instr &= ((~kImm26Mask) | kAAMask | kLKMask);
- DCHECK(is_int26(imm26));
- instr_at_put(pos, instr | (imm26 & kImm26Mask));
+ DCHECK(is_int26(imm26) && (imm26 & (kAAMask | kLKMask)) == 0);
+ if (imm26 == kInstrSize && !(instr & kLKMask)) {
+ // Branch to next instr without link.
+ instr = ORI; // nop: ori, 0,0,0
+ } else {
+ instr &= ((~kImm26Mask) | kAAMask | kLKMask);
+ instr |= (imm26 & kImm26Mask);
+ }
+ instr_at_put(pos, instr);
return;
} else if (BCX == opcode) {
int imm16 = target_pos - pos;
- DCHECK((imm16 & (kAAMask | kLKMask)) == 0);
- instr &= ((~kImm16Mask) | kAAMask | kLKMask);
- DCHECK(is_int16(imm16));
- instr_at_put(pos, instr | (imm16 & kImm16Mask));
+ DCHECK(is_int16(imm16) && (imm16 & (kAAMask | kLKMask)) == 0);
+ if (imm16 == kInstrSize && !(instr & kLKMask)) {
+ // Branch to next instr without link.
+ instr = ORI; // nop: ori, 0,0,0
+ } else {
+ instr &= ((~kImm16Mask) | kAAMask | kLKMask);
+ instr |= (imm16 & kImm16Mask);
+ }
+ instr_at_put(pos, instr);
return;
} else if ((instr & ~kImm26Mask) == 0) {
DCHECK(target_pos == kEndOfChain || target_pos >= 0);
// Multiply hi word
-void Assembler::mulhw(Register dst, Register src1, Register src2, OEBit o,
- RCBit r) {
- xo_form(EXT2 | MULHWX, dst, src1, src2, o, r);
+void Assembler::mulhw(Register dst, Register src1, Register src2, RCBit r) {
+ xo_form(EXT2 | MULHWX, dst, src1, src2, LeaveOE, r);
+}
+
+
+// Multiply hi word unsigned
+void Assembler::mulhwu(Register dst, Register src1, Register src2, RCBit r) {
+ xo_form(EXT2 | MULHWUX, dst, src1, src2, LeaveOE, r);
}
}
+// Divide word unsigned
+void Assembler::divwu(Register dst, Register src1, Register src2, OEBit o,
+ RCBit r) {
+ xo_form(EXT2 | DIVWU, dst, src1, src2, o, r);
+}
+
+
void Assembler::addi(Register dst, Register src, const Operand& imm) {
DCHECK(!src.is(r0)); // use li instead to show intent
d_form(ADDI, dst, src, imm.imm_, true);
}
+void Assembler::orc(Register dst, Register src1, Register src2, RCBit rc) {
+ x_form(EXT2 | ORC, dst, src1, src2, rc);
+}
+
+
void Assembler::cmpi(Register src1, const Operand& src2, CRegister cr) {
intptr_t imm16 = src2.imm_;
#if V8_TARGET_ARCH_PPC64
}
+void Assembler::isel(Register rt, Register ra, Register rb, int cb) {
+ emit(EXT2 | ISEL | rt.code() * B21 | ra.code() * B16 | rb.code() * B11 |
+ cb * B6);
+}
+
+
// Pseudo op - load immediate
void Assembler::li(Register dst, const Operand& imm) {
d_form(ADDI, dst, r0, imm.imm_, true);
}
+void Assembler::lhax(Register rt, const MemOperand& src) {
+ Register ra = src.ra();
+ Register rb = src.rb();
+ DCHECK(!ra.is(r0));
+ emit(EXT2 | LHAX | rt.code() * B21 | ra.code() * B16 | rb.code() * B11);
+}
+
+
void Assembler::lwz(Register dst, const MemOperand& src) {
DCHECK(!src.ra_.is(r0));
d_form(LWZ, dst, src.ra(), src.offset(), true);
}
+void Assembler::lha(Register dst, const MemOperand& src) {
+ DCHECK(!src.ra_.is(r0));
+ d_form(LHA, dst, src.ra(), src.offset(), true);
+}
+
+
void Assembler::lwa(Register dst, const MemOperand& src) {
#if V8_TARGET_ARCH_PPC64
int offset = src.offset();
}
+void Assembler::lwax(Register rt, const MemOperand& src) {
+#if V8_TARGET_ARCH_PPC64
+ Register ra = src.ra();
+ Register rb = src.rb();
+ DCHECK(!ra.is(r0));
+ emit(EXT2 | LWAX | rt.code() * B21 | ra.code() * B16 | rb.code() * B11);
+#else
+ lwzx(rt, src);
+#endif
+}
+
+
void Assembler::stb(Register dst, const MemOperand& src) {
DCHECK(!src.ra_.is(r0));
d_form(STB, dst, src.ra(), src.offset(), true);
}
+void Assembler::extsw(Register rs, Register ra, RCBit rc) {
+#if V8_TARGET_ARCH_PPC64
+ emit(EXT2 | EXTSW | ra.code() * B21 | rs.code() * B16 | rc);
+#else
+ // nop on 32-bit
+ DCHECK(rs.is(ra) && rc == LeaveRC);
+#endif
+}
+
+
void Assembler::neg(Register rt, Register ra, OEBit o, RCBit r) {
emit(EXT2 | NEGX | rt.code() * B21 | ra.code() * B16 | o | r);
}
}
-void Assembler::extsw(Register rs, Register ra, RCBit rc) {
- emit(EXT2 | EXTSW | ra.code() * B21 | rs.code() * B16 | rc);
-}
-
-
void Assembler::mulld(Register dst, Register src1, Register src2, OEBit o,
RCBit r) {
xo_form(EXT2 | MULLD, dst, src1, src2, o, r);
RCBit r) {
xo_form(EXT2 | DIVD, dst, src1, src2, o, r);
}
-#endif
-void Assembler::fake_asm(enum FAKE_OPCODE_T fopcode) {
- DCHECK(fopcode < fLastFaker);
- emit(FAKE_OPCODE | FAKER_SUBOPCODE | fopcode);
-}
-
-
-void Assembler::marker_asm(int mcode) {
- if (::v8::internal::FLAG_trace_sim_stubs) {
- DCHECK(mcode < F_NEXT_AVAILABLE_STUB_MARKER);
- emit(FAKE_OPCODE | MARKER_SUBOPCODE | mcode);
- }
+void Assembler::divdu(Register dst, Register src1, Register src2, OEBit o,
+ RCBit r) {
+ xo_form(EXT2 | DIVDU, dst, src1, src2, o, r);
}
+#endif
// Function descriptor for AIX.
RecordRelocInfo(rinfo);
}
- canOptimize =
- !(src.must_output_reloc_info(this) || is_trampoline_pool_blocked());
+ canOptimize = !(src.must_output_reloc_info(this) ||
+ (is_trampoline_pool_blocked() && !is_int16(value)));
#if V8_OOL_CONSTANT_POOL
if (use_constant_pool_for_mov(src, canOptimize)) {
void Assembler::bkpt(uint32_t imm16) { emit(0x7d821008); }
-void Assembler::info(const char* msg, Condition cond, int32_t code,
- CRegister cr) {
- if (::v8::internal::FLAG_trace_sim_stubs) {
- emit(0x7d9ff808);
-#if V8_TARGET_ARCH_PPC64
- uint64_t value = reinterpret_cast<uint64_t>(msg);
- emit(static_cast<uint32_t>(value >> 32));
- emit(static_cast<uint32_t>(value & 0xFFFFFFFF));
-#else
- emit(reinterpret_cast<Instr>(msg));
-#endif
- }
-}
-
-
void Assembler::dcbf(Register ra, Register rb) {
emit(EXT2 | DCBF | ra.code() * B16 | rb.code() * B11);
}
}
-void Assembler::frim(const DoubleRegister frt, const DoubleRegister frb) {
- emit(EXT4 | FRIM | frt.code() * B21 | frb.code() * B11);
+void Assembler::frin(const DoubleRegister frt, const DoubleRegister frb,
+ RCBit rc) {
+ emit(EXT4 | FRIN | frt.code() * B21 | frb.code() * B11 | rc);
+}
+
+
+void Assembler::friz(const DoubleRegister frt, const DoubleRegister frb,
+ RCBit rc) {
+ emit(EXT4 | FRIZ | frt.code() * B21 | frb.code() * B11 | rc);
+}
+
+
+void Assembler::frip(const DoubleRegister frt, const DoubleRegister frb,
+ RCBit rc) {
+ emit(EXT4 | FRIP | frt.code() * B21 | frb.code() * B11 | rc);
+}
+
+
+void Assembler::frim(const DoubleRegister frt, const DoubleRegister frb,
+ RCBit rc) {
+ emit(EXT4 | FRIM | frt.code() * B21 | frb.code() * B11 | rc);
}
}
+void Assembler::RecordDeoptReason(const int reason, const int raw_position) {
+ if (FLAG_trace_deopt) {
+ EnsureSpace ensure_space(this);
+ RecordRelocInfo(RelocInfo::POSITION, raw_position);
+ RecordRelocInfo(RelocInfo::DEOPT_REASON, reason);
+ }
+}
+
+
void Assembler::GrowBuffer() {
if (!own_buffer_) FATAL("external code buffer is too small");
#define ABI_PASSES_HANDLES_IN_REGS \
(!V8_HOST_ARCH_PPC || V8_OS_AIX || V8_TARGET_ARCH_PPC64)
-#define ABI_RETURNS_HANDLES_IN_REGS \
- (!V8_HOST_ARCH_PPC || V8_TARGET_LITTLE_ENDIAN)
-
#define ABI_RETURNS_OBJECT_PAIRS_IN_REGS \
- (!V8_HOST_ARCH_PPC || V8_TARGET_LITTLE_ENDIAN)
+ (!V8_HOST_ARCH_PPC || !V8_TARGET_ARCH_PPC64 || V8_TARGET_LITTLE_ENDIAN)
#define ABI_TOC_ADDRESSABILITY_VIA_IP \
(V8_HOST_ARCH_PPC && V8_TARGET_ARCH_PPC64 && V8_TARGET_LITTLE_ENDIAN)
}
}
+ void isel(Register rt, Register ra, Register rb, int cb);
+ void isel(Condition cond, Register rt, Register ra, Register rb,
+ CRegister cr = cr7) {
+ DCHECK(cond != al);
+ DCHECK(cr.code() >= 0 && cr.code() <= 7);
+
+ switch (cond) {
+ case eq:
+ isel(rt, ra, rb, encode_crbit(cr, CR_EQ));
+ break;
+ case ne:
+ isel(rt, rb, ra, encode_crbit(cr, CR_EQ));
+ break;
+ case gt:
+ isel(rt, ra, rb, encode_crbit(cr, CR_GT));
+ break;
+ case le:
+ isel(rt, rb, ra, encode_crbit(cr, CR_GT));
+ break;
+ case lt:
+ isel(rt, ra, rb, encode_crbit(cr, CR_LT));
+ break;
+ case ge:
+ isel(rt, rb, ra, encode_crbit(cr, CR_LT));
+ break;
+ case unordered:
+ isel(rt, ra, rb, encode_crbit(cr, CR_FU));
+ break;
+ case ordered:
+ isel(rt, rb, ra, encode_crbit(cr, CR_FU));
+ break;
+ case overflow:
+ isel(rt, ra, rb, encode_crbit(cr, CR_SO));
+ break;
+ case nooverflow:
+ isel(rt, rb, ra, encode_crbit(cr, CR_SO));
+ break;
+ default:
+ UNIMPLEMENTED();
+ }
+ }
+
void b(Condition cond, Label* L, CRegister cr = cr7, LKBit lk = LeaveLK) {
if (cond == al) {
b(L, lk);
void mullw(Register dst, Register src1, Register src2, OEBit o = LeaveOE,
RCBit r = LeaveRC);
- void mulhw(Register dst, Register src1, Register src2, OEBit o = LeaveOE,
- RCBit r = LeaveRC);
+ void mulhw(Register dst, Register src1, Register src2, RCBit r = LeaveRC);
+ void mulhwu(Register dst, Register src1, Register src2, RCBit r = LeaveRC);
void divw(Register dst, Register src1, Register src2, OEBit o = LeaveOE,
RCBit r = LeaveRC);
+ void divwu(Register dst, Register src1, Register src2, OEBit o = LeaveOE,
+ RCBit r = LeaveRC);
void addi(Register dst, Register src, const Operand& imm);
void addis(Register dst, Register src, const Operand& imm);
void ori(Register dst, Register src, const Operand& imm);
void oris(Register dst, Register src, const Operand& imm);
void orx(Register dst, Register src1, Register src2, RCBit rc = LeaveRC);
+ void orc(Register dst, Register src1, Register src2, RCBit rc = LeaveRC);
void xori(Register dst, Register src, const Operand& imm);
void xoris(Register ra, Register rs, const Operand& imm);
void xor_(Register dst, Register src1, Register src2, RCBit rc = LeaveRC);
void lhz(Register dst, const MemOperand& src);
void lhzx(Register dst, const MemOperand& src);
void lhzux(Register dst, const MemOperand& src);
+ void lha(Register dst, const MemOperand& src);
+ void lhax(Register dst, const MemOperand& src);
void lwz(Register dst, const MemOperand& src);
void lwzu(Register dst, const MemOperand& src);
void lwzx(Register dst, const MemOperand& src);
void lwzux(Register dst, const MemOperand& src);
void lwa(Register dst, const MemOperand& src);
+ void lwax(Register dst, const MemOperand& src);
void stb(Register dst, const MemOperand& src);
void stbx(Register dst, const MemOperand& src);
void stbux(Register dst, const MemOperand& src);
void extsb(Register rs, Register ra, RCBit r = LeaveRC);
void extsh(Register rs, Register ra, RCBit r = LeaveRC);
+ void extsw(Register rs, Register ra, RCBit r = LeaveRC);
void neg(Register rt, Register ra, OEBit o = LeaveOE, RCBit c = LeaveRC);
void rotldi(Register ra, Register rs, int sh, RCBit r = LeaveRC);
void rotrdi(Register ra, Register rs, int sh, RCBit r = LeaveRC);
void cntlzd_(Register dst, Register src, RCBit rc = LeaveRC);
- void extsw(Register rs, Register ra, RCBit r = LeaveRC);
void mulld(Register dst, Register src1, Register src2, OEBit o = LeaveOE,
RCBit r = LeaveRC);
void divd(Register dst, Register src1, Register src2, OEBit o = LeaveOE,
RCBit r = LeaveRC);
+ void divdu(Register dst, Register src1, Register src2, OEBit o = LeaveOE,
+ RCBit r = LeaveRC);
#endif
void rlwinm(Register ra, Register rs, int sh, int mb, int me,
void mtfprwa(DoubleRegister dst, Register src);
#endif
- void fake_asm(enum FAKE_OPCODE_T fopcode);
- void marker_asm(int mcode);
void function_descriptor();
// Exception-generating instructions and debugging support
void bkpt(uint32_t imm16); // v5 and above
- // Informational messages when simulating
- void info(const char* msg, Condition cond = al,
- int32_t code = kDefaultStopCode, CRegister cr = cr7);
-
void dcbf(Register ra, Register rb);
void sync();
void lwsync();
RCBit rc = LeaveRC);
void fctiwz(const DoubleRegister frt, const DoubleRegister frb);
void fctiw(const DoubleRegister frt, const DoubleRegister frb);
- void frim(const DoubleRegister frt, const DoubleRegister frb);
+ void frin(const DoubleRegister frt, const DoubleRegister frb,
+ RCBit rc = LeaveRC);
+ void friz(const DoubleRegister frt, const DoubleRegister frb,
+ RCBit rc = LeaveRC);
+ void frip(const DoubleRegister frt, const DoubleRegister frb,
+ RCBit rc = LeaveRC);
+ void frim(const DoubleRegister frt, const DoubleRegister frb,
+ RCBit rc = LeaveRC);
void frsp(const DoubleRegister frt, const DoubleRegister frb,
RCBit rc = LeaveRC);
void fcfid(const DoubleRegister frt, const DoubleRegister frb,
// Use --code-comments to enable.
void RecordComment(const char* msg);
+ // Record a deoptimization reason that can be used by a log or cpu profiler.
+ // Use --trace-deopt to enable.
+ void RecordDeoptReason(const int reason, const int raw_position);
+
// Writes a single byte or word of data in the code stream. Used
// for inline tables, e.g., jump-tables.
void db(uint8_t data);
bool is_trampoline_emitted() const { return trampoline_emitted_; }
-#if V8_OOL_CONSTANT_POOL
- void set_constant_pool_available(bool available) {
- constant_pool_available_ = available;
- }
-#endif
-
private:
// Code generation
// The relocation writer's position is at least kGap bytes below the end of
}
+static void Generate_Runtime_NewObject(MacroAssembler* masm,
+ bool create_memento,
+ Register original_constructor,
+ Label* count_incremented,
+ Label* allocated) {
+ // ----------- S t a t e -------------
+ // -- r4: argument for Runtime_NewObject
+ // -----------------------------------
+ Register result = r7;
+
+ if (create_memento) {
+ // Get the cell or allocation site.
+ __ LoadP(r5, MemOperand(sp, 2 * kPointerSize));
+ __ Push(r5, r4, original_constructor);
+ __ CallRuntime(Runtime::kNewObjectWithAllocationSite, 3);
+ __ mr(result, r3);
+ // Runtime_NewObjectWithAllocationSite increments allocation count.
+ // Skip the increment.
+ __ b(count_incremented);
+ } else {
+ __ Push(r4, original_constructor);
+ __ CallRuntime(Runtime::kNewObject, 2);
+ __ mr(result, r3);
+ __ b(allocated);
+ }
+}
+
+
static void Generate_JSConstructStubHelper(MacroAssembler* masm,
bool is_api_function,
bool create_memento) {
// -- r3 : number of arguments
// -- r4 : constructor function
// -- r5 : allocation site or undefined
+ // -- r6 : original constructor
// -- lr : return address
// -- sp[...]: constructor arguments
// -----------------------------------
FrameAndConstantPoolScope scope(masm, StackFrame::CONSTRUCT);
if (create_memento) {
- __ AssertUndefinedOrAllocationSite(r5, r6);
+ __ AssertUndefinedOrAllocationSite(r5, r7);
__ push(r5);
}
// Preserve the two incoming parameters on the stack.
__ SmiTag(r3);
- __ push(r3); // Smi-tagged arguments count.
- __ push(r4); // Constructor function.
+ __ Push(r3, r4);
+
+ Label rt_call, allocated, normal_new, count_incremented;
+ __ cmp(r4, r6);
+ __ beq(&normal_new);
+
+ // Original constructor and function are different.
+ Generate_Runtime_NewObject(masm, create_memento, r6, &count_incremented,
+ &allocated);
+ __ bind(&normal_new);
// Try to allocate the object without transitioning into C code. If any of
// the preconditions is not met, the code bails out to the runtime call.
- Label rt_call, allocated;
if (FLAG_inline_new) {
Label undo_allocation;
ExternalReference debug_step_in_fp =
MemOperand bit_field3 = FieldMemOperand(r5, Map::kBitField3Offset);
// Check if slack tracking is enabled.
__ lwz(r7, bit_field3);
- __ DecodeField<Map::ConstructionCount>(r11, r7);
- STATIC_ASSERT(JSFunction::kNoSlackTracking == 0);
- __ cmpi(r11, Operand::Zero()); // JSFunction::kNoSlackTracking
- __ beq(&allocate);
+ __ DecodeField<Map::Counter>(r11, r7);
+ __ cmpi(r11, Operand(Map::kSlackTrackingCounterEnd));
+ __ blt(&allocate);
// Decrease generous allocation count.
- __ Add(r7, r7, -(1 << Map::ConstructionCount::kShift), r0);
+ __ Add(r7, r7, -(1 << Map::Counter::kShift), r0);
__ stw(r7, bit_field3);
- __ cmpi(r11, Operand(JSFunction::kFinishSlackTracking));
+ __ cmpi(r11, Operand(Map::kSlackTrackingCounterEnd));
__ bne(&allocate);
__ push(r4);
Label no_inobject_slack_tracking;
// Check if slack tracking is enabled.
- STATIC_ASSERT(JSFunction::kNoSlackTracking == 0);
- __ cmpi(r11, Operand::Zero()); // JSFunction::kNoSlackTracking
- __ beq(&no_inobject_slack_tracking);
+ __ cmpi(r11, Operand(Map::kSlackTrackingCounterEnd));
+ __ blt(&no_inobject_slack_tracking);
// Allocate object with a slack.
__ lbz(r3, FieldMemOperand(r5, Map::kPreAllocatedPropertyFieldsOffset));
// Allocate the new receiver object using the runtime call.
// r4: constructor function
__ bind(&rt_call);
- if (create_memento) {
- // Get the cell or allocation site.
- __ LoadP(r5, MemOperand(sp, 2 * kPointerSize));
- __ push(r5);
- }
-
- __ push(r4); // argument for Runtime_NewObject
- if (create_memento) {
- __ CallRuntime(Runtime::kNewObjectWithAllocationSite, 2);
- } else {
- __ CallRuntime(Runtime::kNewObject, 1);
- }
- __ mr(r7, r3);
-
- // If we ended up using the runtime, and we want a memento, then the
- // runtime call made it for us, and we shouldn't do create count
- // increment.
- Label count_incremented;
- if (create_memento) {
- __ b(&count_incremented);
- }
+ Generate_Runtime_NewObject(masm, create_memento, r4, &count_incremented,
+ &allocated);
// Receiver for constructor call allocated.
// r7: JSObject
}
+void Builtins::Generate_JSConstructStubForDerived(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r3 : number of arguments
+ // -- r4 : constructor function
+ // -- r5 : allocation site or undefined
+ // -- r6 : original constructor
+ // -- lr : return address
+ // -- sp[...]: constructor arguments
+ // -----------------------------------
+
+ // TODO(dslomov): support pretenuring
+ CHECK(!FLAG_pretenuring_call_new);
+
+ {
+ FrameAndConstantPoolScope scope(masm, StackFrame::CONSTRUCT);
+
+ // Smi-tagged arguments count.
+ __ mr(r7, r3);
+ __ SmiTag(r7, SetRC);
+
+ // receiver is the hole.
+ __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
+ __ Push(r7, ip);
+
+ // Set up pointer to last argument.
+ __ addi(r5, fp, Operand(StandardFrameConstants::kCallerSPOffset));
+
+ // Copy arguments and receiver to the expression stack.
+ // r3: number of arguments
+ // r4: constructor function
+ // r5: address of last argument (caller sp)
+ // r7: number of arguments (smi-tagged)
+ // cr0: compare against zero of arguments
+ // sp[0]: receiver
+ // sp[1]: number of arguments (smi-tagged)
+ Label loop, no_args;
+ __ beq(&no_args, cr0);
+ __ ShiftLeftImm(ip, r3, Operand(kPointerSizeLog2));
+ __ mtctr(r3);
+ __ bind(&loop);
+ __ subi(ip, ip, Operand(kPointerSize));
+ __ LoadPX(r0, MemOperand(r5, ip));
+ __ push(r0);
+ __ bdnz(&loop);
+ __ bind(&no_args);
+
+ // Call the function.
+ // r3: number of arguments
+ // r4: constructor function
+ ParameterCount actual(r3);
+ __ InvokeFunction(r4, actual, CALL_FUNCTION, NullCallWrapper());
+
+ // Restore context from the frame.
+ // r3: result
+ // sp[0]: number of arguments (smi-tagged)
+ __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ LoadP(r4, MemOperand(sp, 0));
+
+ // Leave construct frame.
+ }
+
+ __ SmiToPtrArrayOffset(r4, r4);
+ __ add(sp, sp, r4);
+ __ addi(sp, sp, Operand(kPointerSize));
+ __ blr();
+}
+
+
static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
bool is_construct) {
// Called from Generate_JS_Entry
// Test for overflow
#if V8_TARGET_ARCH_PPC64
- __ TestIfInt32(result_reg, scratch, r0);
+ __ TestIfInt32(result_reg, r0);
#else
__ TestIfInt32(scratch, result_reg, r0);
#endif
// not (it's a NaN). For <= and >= we need to load r0 with the failing
// value if it's a NaN.
if (cond != eq) {
- Label not_equal;
- __ bne(¬_equal);
- // All-zero means Infinity means equal.
- __ Ret();
- __ bind(¬_equal);
- if (cond == le) {
- __ li(r3, Operand(GREATER)); // NaN <= NaN should fail.
+ if (CpuFeatures::IsSupported(ISELECT)) {
+ __ li(r4, Operand((cond == le) ? GREATER : LESS));
+ __ isel(eq, r3, r3, r4);
} else {
- __ li(r3, Operand(LESS)); // NaN >= NaN should fail.
+ Label not_equal;
+ __ bne(¬_equal);
+ // All-zero means Infinity means equal.
+ __ Ret();
+ __ bind(¬_equal);
+ if (cond == le) {
+ __ li(r3, Operand(GREATER)); // NaN <= NaN should fail.
+ } else {
+ __ li(r3, Operand(LESS)); // NaN >= NaN should fail.
+ }
}
}
__ Ret();
// If either is a Smi (we know that not both are), then they can only
// be strictly equal if the other is a HeapNumber.
STATIC_ASSERT(kSmiTag == 0);
- DCHECK_EQ(0, Smi::FromInt(0));
+ DCHECK_EQ(static_cast<Smi*>(0), Smi::FromInt(0));
__ and_(r5, lhs, rhs);
__ JumpIfNotSmi(r5, ¬_smis);
// One operand is a smi. EmitSmiNonsmiComparison generates code that can:
Label nan, equal, less_than;
__ bunordered(&nan);
- __ beq(&equal);
- __ blt(&less_than);
- __ li(r3, Operand(GREATER));
- __ Ret();
- __ bind(&equal);
- __ li(r3, Operand(EQUAL));
- __ Ret();
- __ bind(&less_than);
- __ li(r3, Operand(LESS));
- __ Ret();
+ if (CpuFeatures::IsSupported(ISELECT)) {
+ DCHECK(EQUAL == 0);
+ __ li(r4, Operand(GREATER));
+ __ li(r5, Operand(LESS));
+ __ isel(eq, r3, r0, r4);
+ __ isel(lt, r3, r5, r3);
+ __ Ret();
+ } else {
+ __ beq(&equal);
+ __ blt(&less_than);
+ __ li(r3, Operand(GREATER));
+ __ Ret();
+ __ bind(&equal);
+ __ li(r3, Operand(EQUAL));
+ __ Ret();
+ __ bind(&less_than);
+ __ li(r3, Operand(LESS));
+ __ Ret();
+ }
__ bind(&nan);
// If one of the sides was a NaN then the v flag is set. Load r3 with
__ ConvertIntToDouble(scratch2, double_result);
// Get absolute value of exponent.
- Label positive_exponent;
__ cmpi(scratch, Operand::Zero());
- __ bge(&positive_exponent);
- __ neg(scratch, scratch);
- __ bind(&positive_exponent);
+ if (CpuFeatures::IsSupported(ISELECT)) {
+ __ neg(scratch2, scratch);
+ __ isel(lt, scratch, scratch2, scratch);
+ } else {
+ Label positive_exponent;
+ __ bge(&positive_exponent);
+ __ neg(scratch, scratch);
+ __ bind(&positive_exponent);
+ }
Label while_true, no_carry, loop_end;
__ bind(&while_true);
void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
CEntryStub::GenerateAheadOfTime(isolate);
- // WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime(isolate);
StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
StubFailureTrampolineStub::GenerateAheadOfTime(isolate);
ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
+ CreateWeakCellStub::GenerateAheadOfTime(isolate);
BinaryOpICStub::GenerateAheadOfTime(isolate);
StoreRegistersStateStub::GenerateAheadOfTime(isolate);
RestoreRegistersStateStub::GenerateAheadOfTime(isolate);
int arg_stack_space = 1;
// PPC LINUX ABI:
-#if V8_TARGET_ARCH_PPC64 && !ABI_RETURNS_OBJECT_PAIRS_IN_REGS
+#if !ABI_RETURNS_OBJECT_PAIRS_IN_REGS
// Pass buffer for return value on stack if necessary
if (result_size() > 1) {
DCHECK_EQ(2, result_size());
// Result returned in registers or stack, depending on result size and ABI.
Register isolate_reg = r5;
-#if V8_TARGET_ARCH_PPC64 && !ABI_RETURNS_OBJECT_PAIRS_IN_REGS
+#if !ABI_RETURNS_OBJECT_PAIRS_IN_REGS
if (result_size() > 1) {
// The return value is 16-byte non-scalar value.
// Use frame storage reserved by calling function to pass return
__ Call(target);
}
-#if V8_TARGET_ARCH_PPC64 && !ABI_RETURNS_OBJECT_PAIRS_IN_REGS
+#if !ABI_RETURNS_OBJECT_PAIRS_IN_REGS
// If return value is on the stack, pop it to registers.
if (result_size() > 1) {
__ LoadP(r4, MemOperand(r3, kPointerSize));
__ Push(r3, r4);
__ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION);
}
- Label true_value, done;
- __ cmpi(r3, Operand::Zero());
- __ beq(&true_value);
+ if (CpuFeatures::IsSupported(ISELECT)) {
+ __ cmpi(r3, Operand::Zero());
+ __ LoadRoot(r3, Heap::kTrueValueRootIndex);
+ __ LoadRoot(r4, Heap::kFalseValueRootIndex);
+ __ isel(eq, r3, r3, r4);
+ } else {
+ Label true_value, done;
+ __ cmpi(r3, Operand::Zero());
+ __ beq(&true_value);
- __ LoadRoot(r3, Heap::kFalseValueRootIndex);
- __ b(&done);
+ __ LoadRoot(r3, Heap::kFalseValueRootIndex);
+ __ b(&done);
- __ bind(&true_value);
- __ LoadRoot(r3, Heap::kTrueValueRootIndex);
+ __ bind(&true_value);
+ __ LoadRoot(r3, Heap::kTrueValueRootIndex);
- __ bind(&done);
+ __ bind(&done);
+ }
__ Ret(HasArgsInRegisters() ? 0 : 2);
}
}
void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
Label miss;
Register receiver = LoadDescriptor::ReceiverRegister();
-
- NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(masm, receiver, r6,
- r7, &miss);
+ // Ensure that the vector and slot registers won't be clobbered before
+ // calling the miss handler.
+ DCHECK(!FLAG_vector_ics ||
+ !AreAliased(r7, r8, VectorLoadICDescriptor::VectorRegister(),
+ VectorLoadICDescriptor::SlotRegister()));
+
+ NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(masm, receiver, r7,
+ r8, &miss);
__ bind(&miss);
PropertyAccessCompiler::TailCallBuiltin(
masm, PropertyAccessCompiler::MissBuiltin(Code::LOAD_IC));
Register receiver = LoadDescriptor::ReceiverRegister();
Register index = LoadDescriptor::NameRegister();
- Register scratch = r6;
+ Register scratch = r8;
Register result = r3;
DCHECK(!scratch.is(receiver) && !scratch.is(index));
+ DCHECK(!FLAG_vector_ics ||
+ (!scratch.is(VectorLoadICDescriptor::VectorRegister()) &&
+ result.is(VectorLoadICDescriptor::SlotRegister())));
+ // StringCharAtGenerator doesn't use the result register until it's passed
+ // the different miss possibilities. If it did, we would have a conflict
+ // when FLAG_vector_ics is true.
StringCharAtGenerator char_at_generator(receiver, index, scratch, result,
&miss, // When not a string.
&miss, // When not a number.
// r4 = parameter count (tagged)
// r5 = argument count (tagged)
// Compute the mapped parameter count = min(r4, r5) in r4.
- Label skip;
__ cmp(r4, r5);
- __ blt(&skip);
- __ mr(r4, r5);
- __ bind(&skip);
+ if (CpuFeatures::IsSupported(ISELECT)) {
+ __ isel(lt, r4, r4, r5);
+ } else {
+ Label skip;
+ __ blt(&skip);
+ __ mr(r4, r5);
+ __ bind(&skip);
+ }
__ bind(&try_allocate);
const int kParameterMapHeaderSize =
FixedArray::kHeaderSize + 2 * kPointerSize;
// If there are no mapped parameters, we do not need the parameter_map.
- Label skip2, skip3;
__ CmpSmiLiteral(r4, Smi::FromInt(0), r0);
- __ bne(&skip2);
- __ li(r11, Operand::Zero());
- __ b(&skip3);
- __ bind(&skip2);
- __ SmiToPtrArrayOffset(r11, r4);
- __ addi(r11, r11, Operand(kParameterMapHeaderSize));
- __ bind(&skip3);
+ if (CpuFeatures::IsSupported(ISELECT)) {
+ __ SmiToPtrArrayOffset(r11, r4);
+ __ addi(r11, r11, Operand(kParameterMapHeaderSize));
+ __ isel(eq, r11, r0, r11);
+ } else {
+ Label skip2, skip3;
+ __ bne(&skip2);
+ __ li(r11, Operand::Zero());
+ __ b(&skip3);
+ __ bind(&skip2);
+ __ SmiToPtrArrayOffset(r11, r4);
+ __ addi(r11, r11, Operand(kParameterMapHeaderSize));
+ __ bind(&skip3);
+ }
// 2. Backing store.
__ SmiToPtrArrayOffset(r7, r5);
__ LoadP(r7,
MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
__ LoadP(r7, FieldMemOperand(r7, GlobalObject::kNativeContextOffset));
- Label skip4, skip5;
__ cmpi(r4, Operand::Zero());
- __ bne(&skip4);
- __ LoadP(r7, MemOperand(r7, kNormalOffset));
- __ b(&skip5);
- __ bind(&skip4);
- __ LoadP(r7, MemOperand(r7, kAliasedOffset));
- __ bind(&skip5);
+ if (CpuFeatures::IsSupported(ISELECT)) {
+ __ LoadP(r11, MemOperand(r7, kNormalOffset));
+ __ LoadP(r7, MemOperand(r7, kAliasedOffset));
+ __ isel(eq, r7, r11, r7);
+ } else {
+ Label skip4, skip5;
+ __ bne(&skip4);
+ __ LoadP(r7, MemOperand(r7, kNormalOffset));
+ __ b(&skip5);
+ __ bind(&skip4);
+ __ LoadP(r7, MemOperand(r7, kAliasedOffset));
+ __ bind(&skip5);
+ }
// r3 = address of new object (tagged)
// r4 = mapped parameter count (tagged)
// r5 = argument count (tagged)
// r7 = address of parameter map or backing store (tagged)
// Initialize parameter map. If there are no mapped arguments, we're done.
- Label skip_parameter_map, skip6;
+ Label skip_parameter_map;
__ CmpSmiLiteral(r4, Smi::FromInt(0), r0);
- __ bne(&skip6);
- // Move backing store address to r6, because it is
- // expected there when filling in the unmapped arguments.
- __ mr(r6, r7);
- __ b(&skip_parameter_map);
- __ bind(&skip6);
+ if (CpuFeatures::IsSupported(ISELECT)) {
+ __ isel(eq, r6, r7, r6);
+ __ beq(&skip_parameter_map);
+ } else {
+ Label skip6;
+ __ bne(&skip6);
+ // Move backing store address to r6, because it is
+ // expected there when filling in the unmapped arguments.
+ __ mr(r6, r7);
+ __ b(&skip_parameter_map);
+ __ bind(&skip6);
+ }
__ LoadRoot(r9, Heap::kSloppyArgumentsElementsMapRootIndex);
__ StoreP(r9, FieldMemOperand(r7, FixedArray::kMapOffset), r0);
// entry is at the feedback vector slot given by r6 + 1.
__ LoadP(r5, FieldMemOperand(r8, FixedArray::kHeaderSize + kPointerSize));
} else {
- Label feedback_register_initialized;
// Put the AllocationSite from the feedback vector into r5, or undefined.
__ LoadP(r5, FieldMemOperand(r8, FixedArray::kHeaderSize));
__ LoadP(r8, FieldMemOperand(r5, AllocationSite::kMapOffset));
__ CompareRoot(r8, Heap::kAllocationSiteMapRootIndex);
- __ beq(&feedback_register_initialized);
- __ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
- __ bind(&feedback_register_initialized);
+ if (CpuFeatures::IsSupported(ISELECT)) {
+ __ LoadRoot(r8, Heap::kUndefinedValueRootIndex);
+ __ isel(eq, r5, r5, r8);
+ } else {
+ Label feedback_register_initialized;
+ __ beq(&feedback_register_initialized);
+ __ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
+ __ bind(&feedback_register_initialized);
+ }
}
__ AssertUndefinedOrAllocationSite(r5, r8);
}
+ // Pass function as original constructor.
+ __ mr(r6, r4);
+
// Jump to the function-specific construct stub.
Register jmp_reg = r7;
__ LoadP(jmp_reg, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
void CallIC_ArrayStub::Generate(MacroAssembler* masm) {
// r4 - function
// r6 - slot id
+ // r5 - vector
Label miss;
int argc = arg_count();
ParameterCount actual(argc);
- EmitLoadTypeFeedbackVector(masm, r5);
-
__ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, r7);
__ cmp(r4, r7);
__ bne(&miss);
void CallICStub::Generate(MacroAssembler* masm) {
// r4 - function
// r6 - slot id (Smi)
+ // r5 - vector
+ const int with_types_offset =
+ FixedArray::OffsetOfElementAt(TypeFeedbackVector::kWithTypesIndex);
+ const int generic_offset =
+ FixedArray::OffsetOfElementAt(TypeFeedbackVector::kGenericCountIndex);
Label extra_checks_or_miss, slow_start;
Label slow, non_function, wrap, cont;
Label have_js_function;
int argc = arg_count();
ParameterCount actual(argc);
- EmitLoadTypeFeedbackVector(masm, r5);
-
// The checks. First, does r4 match the recorded monomorphic target?
__ SmiToPtrArrayOffset(r7, r6);
__ add(r7, r5, r7);
__ LoadP(r7, FieldMemOperand(r7, FixedArray::kHeaderSize));
- __ cmp(r4, r7);
+
+ // We don't know that we have a weak cell. We might have a private symbol
+ // or an AllocationSite, but the memory is safe to examine.
+ // AllocationSite::kTransitionInfoOffset - contains a Smi or pointer to
+ // FixedArray.
+ // WeakCell::kValueOffset - contains a JSFunction or Smi(0)
+ // Symbol::kHashFieldSlot - if the low bit is 1, then the hash is not
+ // computed, meaning that it can't appear to be a pointer. If the low bit is
+ // 0, then hash is computed, but the 0 bit prevents the field from appearing
+ // to be a pointer.
+ STATIC_ASSERT(WeakCell::kSize >= kPointerSize);
+ STATIC_ASSERT(AllocationSite::kTransitionInfoOffset ==
+ WeakCell::kValueOffset &&
+ WeakCell::kValueOffset == Symbol::kHashFieldSlot);
+
+ __ LoadP(r8, FieldMemOperand(r7, WeakCell::kValueOffset));
+ __ cmp(r4, r8);
__ bne(&extra_checks_or_miss);
+ // The compare above could have been a SMI/SMI comparison. Guard against this
+ // convincing us that we have a monomorphic JSFunction.
+ __ JumpIfSmi(r4, &extra_checks_or_miss);
+
__ bind(&have_js_function);
if (CallAsMethod()) {
EmitContinueIfStrictOrNative(masm, &cont);
}
__ bind(&extra_checks_or_miss);
- Label miss;
+ Label uninitialized, miss;
__ CompareRoot(r7, Heap::kmegamorphic_symbolRootIndex);
__ beq(&slow_start);
+
+ // The following cases attempt to handle MISS cases without going to the
+ // runtime.
+ if (FLAG_trace_ic) {
+ __ b(&miss);
+ }
+
__ CompareRoot(r7, Heap::kuninitialized_symbolRootIndex);
+ __ beq(&uninitialized);
+
+ // We are going megamorphic. If the feedback is a JSFunction, it is fine
+ // to handle it here. More complex cases are dealt with in the runtime.
+ __ AssertNotSmi(r7);
+ __ CompareObjectType(r7, r8, r8, JS_FUNCTION_TYPE);
+ __ bne(&miss);
+ __ SmiToPtrArrayOffset(r7, r6);
+ __ add(r7, r5, r7);
+ __ LoadRoot(ip, Heap::kmegamorphic_symbolRootIndex);
+ __ StoreP(ip, FieldMemOperand(r7, FixedArray::kHeaderSize), r0);
+ // We have to update statistics for runtime profiling.
+ __ LoadP(r7, FieldMemOperand(r5, with_types_offset));
+ __ SubSmiLiteral(r7, r7, Smi::FromInt(1), r0);
+ __ StoreP(r7, FieldMemOperand(r5, with_types_offset), r0);
+ __ LoadP(r7, FieldMemOperand(r5, generic_offset));
+ __ AddSmiLiteral(r7, r7, Smi::FromInt(1), r0);
+ __ StoreP(r7, FieldMemOperand(r5, generic_offset), r0);
+ __ b(&slow_start);
+
+ __ bind(&uninitialized);
+
+ // We are going monomorphic, provided we actually have a JSFunction.
+ __ JumpIfSmi(r4, &miss);
+
+ // Goto miss case if we do not have a function.
+ __ CompareObjectType(r4, r7, r7, JS_FUNCTION_TYPE);
+ __ bne(&miss);
+
+ // Make sure the function is not the Array() function, which requires special
+ // behavior on MISS.
+ __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, r7);
+ __ cmp(r4, r7);
__ beq(&miss);
- if (!FLAG_trace_ic) {
- // We are going megamorphic. If the feedback is a JSFunction, it is fine
- // to handle it here. More complex cases are dealt with in the runtime.
- __ AssertNotSmi(r7);
- __ CompareObjectType(r7, r8, r8, JS_FUNCTION_TYPE);
- __ bne(&miss);
- __ SmiToPtrArrayOffset(r7, r6);
- __ add(r7, r5, r7);
- __ LoadRoot(ip, Heap::kmegamorphic_symbolRootIndex);
- __ StoreP(ip, FieldMemOperand(r7, FixedArray::kHeaderSize), r0);
- // We have to update statistics for runtime profiling.
- const int with_types_offset =
- FixedArray::OffsetOfElementAt(TypeFeedbackVector::kWithTypesIndex);
- __ LoadP(r7, FieldMemOperand(r5, with_types_offset));
- __ SubSmiLiteral(r7, r7, Smi::FromInt(1), r0);
- __ StoreP(r7, FieldMemOperand(r5, with_types_offset), r0);
- const int generic_offset =
- FixedArray::OffsetOfElementAt(TypeFeedbackVector::kGenericCountIndex);
- __ LoadP(r7, FieldMemOperand(r5, generic_offset));
- __ AddSmiLiteral(r7, r7, Smi::FromInt(1), r0);
- __ StoreP(r7, FieldMemOperand(r5, generic_offset), r0);
- __ jmp(&slow_start);
+ // Update stats.
+ __ LoadP(r7, FieldMemOperand(r5, with_types_offset));
+ __ AddSmiLiteral(r7, r7, Smi::FromInt(1), r0);
+ __ StoreP(r7, FieldMemOperand(r5, with_types_offset), r0);
+
+ // Store the function. Use a stub since we need a frame for allocation.
+ // r5 - vector
+ // r6 - slot
+ // r4 - function
+ {
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ CreateWeakCellStub create_stub(masm->isolate());
+ __ Push(r4);
+ __ CallStub(&create_stub);
+ __ Pop(r4);
}
- // We are here because tracing is on or we are going monomorphic.
+ __ b(&have_js_function);
+
+ // We are here because tracing is on or we encountered a MISS case we can't
+ // handle here.
__ bind(&miss);
GenerateMiss(masm);
void CallICStub::GenerateMiss(MacroAssembler* masm) {
- // Get the receiver of the function from the stack; 1 ~ return address.
- __ LoadP(r7, MemOperand(sp, (arg_count() + 1) * kPointerSize), r0);
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
- {
- FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
-
- // Push the receiver and the function and feedback info.
- __ Push(r7, r4, r5, r6);
+ // Push the function and feedback info.
+ __ Push(r4, r5, r6);
- // Call the entry.
- IC::UtilityId id = GetICState() == DEFAULT ? IC::kCallIC_Miss
- : IC::kCallIC_Customization_Miss;
+ // Call the entry.
+ IC::UtilityId id = GetICState() == DEFAULT ? IC::kCallIC_Miss
+ : IC::kCallIC_Customization_Miss;
- ExternalReference miss = ExternalReference(IC_Utility(id), masm->isolate());
- __ CallExternalReference(miss, 4);
+ ExternalReference miss = ExternalReference(IC_Utility(id), masm->isolate());
+ __ CallExternalReference(miss, 3);
- // Move result to r4 and exit the internal frame.
- __ mr(r4, r3);
- }
+ // Move result to r4 and exit the internal frame.
+ __ mr(r4, r3);
}
}
+void ToNumberStub::Generate(MacroAssembler* masm) {
+ // The ToNumber stub takes one argument in r3.
+ Label not_smi;
+ __ JumpIfNotSmi(r3, ¬_smi);
+ __ blr();
+ __ bind(¬_smi);
+
+ Label not_heap_number;
+ __ LoadP(r4, FieldMemOperand(r3, HeapObject::kMapOffset));
+ __ lbz(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
+ // r3: object
+ // r4: instance type.
+ __ cmpi(r4, Operand(HEAP_NUMBER_TYPE));
+ __ bne(¬_heap_number);
+ __ blr();
+ __ bind(¬_heap_number);
+
+ Label not_string, slow_string;
+ __ cmpli(r4, Operand(FIRST_NONSTRING_TYPE));
+ __ bge(¬_string);
+ // Check if string has a cached array index.
+ __ lwz(r5, FieldMemOperand(r3, String::kHashFieldOffset));
+ __ And(r0, r5, Operand(String::kContainsCachedArrayIndexMask), SetRC);
+ __ bne(&slow_string, cr0);
+ __ IndexFromHash(r5, r3);
+ __ blr();
+ __ bind(&slow_string);
+ __ push(r3); // Push argument.
+ __ TailCallRuntime(Runtime::kStringToNumber, 1, 1);
+ __ bind(¬_string);
+
+ Label not_oddball;
+ __ cmpi(r4, Operand(ODDBALL_TYPE));
+ __ bne(¬_oddball);
+ __ LoadP(r3, FieldMemOperand(r3, Oddball::kToNumberOffset));
+ __ blr();
+ __ bind(¬_oddball);
+
+ __ push(r3); // Push argument.
+ __ InvokeBuiltin(Builtins::TO_NUMBER, JUMP_FUNCTION);
+}
+
+
void StringHelper::GenerateFlatOneByteStringEquals(MacroAssembler* masm,
Register left,
Register right,
void StringHelper::GenerateCompareFlatOneByteStrings(
MacroAssembler* masm, Register left, Register right, Register scratch1,
Register scratch2, Register scratch3) {
- Label skip, result_not_equal, compare_lengths;
+ Label result_not_equal, compare_lengths;
// Find minimum length and length difference.
__ LoadP(scratch1, FieldMemOperand(left, String::kLengthOffset));
__ LoadP(scratch2, FieldMemOperand(right, String::kLengthOffset));
__ sub(scratch3, scratch1, scratch2, LeaveOE, SetRC);
Register length_delta = scratch3;
- __ ble(&skip, cr0);
- __ mr(scratch1, scratch2);
- __ bind(&skip);
+ if (CpuFeatures::IsSupported(ISELECT)) {
+ __ isel(gt, scratch1, scratch2, scratch1, cr0);
+ } else {
+ Label skip;
+ __ ble(&skip, cr0);
+ __ mr(scratch1, scratch2);
+ __ bind(&skip);
+ }
Register min_length = scratch1;
STATIC_ASSERT(kSmiTag == 0);
__ cmpi(min_length, Operand::Zero());
__ bind(&result_not_equal);
// Conditionally update the result based either on length_delta or
// the last comparion performed in the loop above.
- Label less_equal, equal;
- __ ble(&less_equal);
- __ LoadSmiLiteral(r3, Smi::FromInt(GREATER));
- __ Ret();
- __ bind(&less_equal);
- __ beq(&equal);
- __ LoadSmiLiteral(r3, Smi::FromInt(LESS));
- __ bind(&equal);
- __ Ret();
+ if (CpuFeatures::IsSupported(ISELECT)) {
+ __ li(r4, Operand(GREATER));
+ __ li(r5, Operand(LESS));
+ __ isel(eq, r3, r0, r4);
+ __ isel(lt, r3, r5, r3);
+ __ Ret();
+ } else {
+ Label less_equal, equal;
+ __ ble(&less_equal);
+ __ LoadSmiLiteral(r3, Smi::FromInt(GREATER));
+ __ Ret();
+ __ bind(&less_equal);
+ __ beq(&equal);
+ __ LoadSmiLiteral(r3, Smi::FromInt(LESS));
+ __ bind(&equal);
+ __ Ret();
+ }
}
__ bunordered(&unordered);
// Return a result of -1, 0, or 1, based on status bits.
- __ beq(&equal);
- __ blt(&less_than);
- // assume greater than
- __ li(r3, Operand(GREATER));
- __ Ret();
- __ bind(&equal);
- __ li(r3, Operand(EQUAL));
- __ Ret();
- __ bind(&less_than);
- __ li(r3, Operand(LESS));
- __ Ret();
+ if (CpuFeatures::IsSupported(ISELECT)) {
+ DCHECK(EQUAL == 0);
+ __ li(r4, Operand(GREATER));
+ __ li(r5, Operand(LESS));
+ __ isel(eq, r3, r0, r4);
+ __ isel(lt, r3, r5, r3);
+ __ Ret();
+ } else {
+ __ beq(&equal);
+ __ blt(&less_than);
+ // assume greater than
+ __ li(r3, Operand(GREATER));
+ __ Ret();
+ __ bind(&equal);
+ __ li(r3, Operand(EQUAL));
+ __ Ret();
+ __ bind(&less_than);
+ __ li(r3, Operand(LESS));
+ __ Ret();
+ }
__ bind(&unordered);
__ bind(&generic_stub);
void CompareICStub::GenerateKnownObjects(MacroAssembler* masm) {
Label miss;
+ Handle<WeakCell> cell = Map::WeakCellForMap(known_map_);
__ and_(r5, r4, r3);
__ JumpIfSmi(r5, &miss);
+ __ GetWeakValue(r7, cell);
__ LoadP(r5, FieldMemOperand(r3, HeapObject::kMapOffset));
__ LoadP(r6, FieldMemOperand(r4, HeapObject::kMapOffset));
- __ Cmpi(r5, Operand(known_map_), r0);
+ __ cmp(r5, r7);
__ bne(&miss);
- __ Cmpi(r6, Operand(known_map_), r0);
+ __ cmp(r6, r7);
__ bne(&miss);
__ sub(r3, r3, r4);
__ and_(index, index, ip);
// Scale the index by multiplying by the entry size.
- DCHECK(NameDictionary::kEntrySize == 3);
+ STATIC_ASSERT(NameDictionary::kEntrySize == 3);
__ ShiftLeftImm(ip, index, Operand(1));
__ add(index, index, ip); // index *= 3.
__ ShiftLeftImm(scratch, index, Operand(1));
__ add(index, index, scratch); // index *= 3.
- DCHECK_EQ(kSmiTagSize, 1);
__ ShiftLeftImm(scratch, index, Operand(kPointerSizeLog2));
__ add(index, dictionary, scratch);
__ LoadP(entry_key, FieldMemOperand(index, kElementsStartOffset));
}
+void CallICTrampolineStub::Generate(MacroAssembler* masm) {
+ EmitLoadTypeFeedbackVector(masm, r5);
+ CallICStub stub(isolate(), state());
+ __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
+}
+
+
+void CallIC_ArrayTrampolineStub::Generate(MacroAssembler* masm) {
+ EmitLoadTypeFeedbackVector(masm, r5);
+ CallIC_ArrayStub stub(isolate(), state());
+ __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
+}
+
+
void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
if (masm->isolate()->function_entry_hook() != NULL) {
PredictableCodeSizeScope predictable(masm,
}
-void CallApiFunctionStub::Generate(MacroAssembler* masm) {
+static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
+ return ref0.address() - ref1.address();
+}
+
+
+// Calls an API function. Allocates HandleScope, extracts returned value
+// from handle and propagates exceptions. Restores context. stack_space
+// - space to be unwound on exit (includes the call JS arguments space and
+// the additional space allocated for the fast call).
+static void CallApiFunctionAndReturn(MacroAssembler* masm,
+ Register function_address,
+ ExternalReference thunk_ref,
+ int stack_space,
+ MemOperand* stack_space_operand,
+ MemOperand return_value_operand,
+ MemOperand* context_restore_operand) {
+ Isolate* isolate = masm->isolate();
+ ExternalReference next_address =
+ ExternalReference::handle_scope_next_address(isolate);
+ const int kNextOffset = 0;
+ const int kLimitOffset = AddressOffset(
+ ExternalReference::handle_scope_limit_address(isolate), next_address);
+ const int kLevelOffset = AddressOffset(
+ ExternalReference::handle_scope_level_address(isolate), next_address);
+
+ // Additional parameter is the address of the actual callback.
+ DCHECK(function_address.is(r4) || function_address.is(r5));
+ Register scratch = r6;
+
+ __ mov(scratch, Operand(ExternalReference::is_profiling_address(isolate)));
+ __ lbz(scratch, MemOperand(scratch, 0));
+ __ cmpi(scratch, Operand::Zero());
+
+ if (CpuFeatures::IsSupported(ISELECT)) {
+ __ mov(scratch, Operand(thunk_ref));
+ __ isel(eq, scratch, function_address, scratch);
+ } else {
+ Label profiler_disabled;
+ Label end_profiler_check;
+ __ beq(&profiler_disabled);
+ __ mov(scratch, Operand(thunk_ref));
+ __ b(&end_profiler_check);
+ __ bind(&profiler_disabled);
+ __ mr(scratch, function_address);
+ __ bind(&end_profiler_check);
+ }
+
+ // Allocate HandleScope in callee-save registers.
+ // r17 - next_address
+ // r14 - next_address->kNextOffset
+ // r15 - next_address->kLimitOffset
+ // r16 - next_address->kLevelOffset
+ __ mov(r17, Operand(next_address));
+ __ LoadP(r14, MemOperand(r17, kNextOffset));
+ __ LoadP(r15, MemOperand(r17, kLimitOffset));
+ __ lwz(r16, MemOperand(r17, kLevelOffset));
+ __ addi(r16, r16, Operand(1));
+ __ stw(r16, MemOperand(r17, kLevelOffset));
+
+ if (FLAG_log_timer_events) {
+ FrameScope frame(masm, StackFrame::MANUAL);
+ __ PushSafepointRegisters();
+ __ PrepareCallCFunction(1, r3);
+ __ mov(r3, Operand(ExternalReference::isolate_address(isolate)));
+ __ CallCFunction(ExternalReference::log_enter_external_function(isolate),
+ 1);
+ __ PopSafepointRegisters();
+ }
+
+ // Native call returns to the DirectCEntry stub which redirects to the
+ // return address pushed on stack (could have moved after GC).
+ // DirectCEntry stub itself is generated early and never moves.
+ DirectCEntryStub stub(isolate);
+ stub.GenerateCall(masm, scratch);
+
+ if (FLAG_log_timer_events) {
+ FrameScope frame(masm, StackFrame::MANUAL);
+ __ PushSafepointRegisters();
+ __ PrepareCallCFunction(1, r3);
+ __ mov(r3, Operand(ExternalReference::isolate_address(isolate)));
+ __ CallCFunction(ExternalReference::log_leave_external_function(isolate),
+ 1);
+ __ PopSafepointRegisters();
+ }
+
+ Label promote_scheduled_exception;
+ Label exception_handled;
+ Label delete_allocated_handles;
+ Label leave_exit_frame;
+ Label return_value_loaded;
+
+ // load value from ReturnValue
+ __ LoadP(r3, return_value_operand);
+ __ bind(&return_value_loaded);
+ // No more valid handles (the result handle was the last one). Restore
+ // previous handle scope.
+ __ StoreP(r14, MemOperand(r17, kNextOffset));
+ if (__ emit_debug_code()) {
+ __ lwz(r4, MemOperand(r17, kLevelOffset));
+ __ cmp(r4, r16);
+ __ Check(eq, kUnexpectedLevelAfterReturnFromApiCall);
+ }
+ __ subi(r16, r16, Operand(1));
+ __ stw(r16, MemOperand(r17, kLevelOffset));
+ __ LoadP(r0, MemOperand(r17, kLimitOffset));
+ __ cmp(r15, r0);
+ __ bne(&delete_allocated_handles);
+
+ // Check if the function scheduled an exception.
+ __ bind(&leave_exit_frame);
+ __ LoadRoot(r14, Heap::kTheHoleValueRootIndex);
+ __ mov(r15, Operand(ExternalReference::scheduled_exception_address(isolate)));
+ __ LoadP(r15, MemOperand(r15));
+ __ cmp(r14, r15);
+ __ bne(&promote_scheduled_exception);
+ __ bind(&exception_handled);
+
+ bool restore_context = context_restore_operand != NULL;
+ if (restore_context) {
+ __ LoadP(cp, *context_restore_operand);
+ }
+ // LeaveExitFrame expects unwind space to be in a register.
+ if (stack_space_operand != NULL) {
+ __ lwz(r14, *stack_space_operand);
+ } else {
+ __ mov(r14, Operand(stack_space));
+ }
+ __ LeaveExitFrame(false, r14, !restore_context, stack_space_operand != NULL);
+ __ blr();
+
+ __ bind(&promote_scheduled_exception);
+ {
+ FrameScope frame(masm, StackFrame::INTERNAL);
+ __ CallExternalReference(
+ ExternalReference(Runtime::kPromoteScheduledException, isolate), 0);
+ }
+ __ jmp(&exception_handled);
+
+ // HandleScope limit has changed. Delete allocated extensions.
+ __ bind(&delete_allocated_handles);
+ __ StoreP(r15, MemOperand(r17, kLimitOffset));
+ __ mr(r14, r3);
+ __ PrepareCallCFunction(1, r15);
+ __ mov(r3, Operand(ExternalReference::isolate_address(isolate)));
+ __ CallCFunction(ExternalReference::delete_handle_scope_extensions(isolate),
+ 1);
+ __ mr(r3, r14);
+ __ b(&leave_exit_frame);
+}
+
+
+static void CallApiFunctionStubHelper(MacroAssembler* masm,
+ const ParameterCount& argc,
+ bool return_first_arg,
+ bool call_data_undefined) {
// ----------- S t a t e -------------
// -- r3 : callee
// -- r7 : call_data
// -- r5 : holder
// -- r4 : api_function_address
+ // -- r6 : number of arguments if argc is a register
// -- cp : context
// --
// -- sp[0] : last argument
Register api_function_address = r4;
Register context = cp;
- int argc = this->argc();
- bool is_store = this->is_store();
- bool call_data_undefined = this->call_data_undefined();
-
typedef FunctionCallbackArguments FCA;
STATIC_ASSERT(FCA::kContextSaveIndex == 6);
STATIC_ASSERT(FCA::kHolderIndex == 0);
STATIC_ASSERT(FCA::kArgsLength == 7);
+ DCHECK(argc.is_immediate() || r3.is(argc.reg()));
+
// context save
__ push(context);
// load context from callee
// return value default
__ push(scratch);
// isolate
- __ mov(scratch, Operand(ExternalReference::isolate_address(isolate())));
+ __ mov(scratch, Operand(ExternalReference::isolate_address(masm->isolate())));
__ push(scratch);
// holder
__ push(holder);
// [0] space for DirectCEntryStub's LR save
// [1-4] FunctionCallbackInfo
const int kApiStackSpace = 5;
+ const int kFunctionCallbackInfoOffset =
+ (kStackFrameExtraParamSlot + 1) * kPointerSize;
FrameScope frame_scope(masm, StackFrame::MANUAL);
__ EnterExitFrame(false, kApiStackSpace);
DCHECK(!api_function_address.is(r3) && !scratch.is(r3));
// r3 = FunctionCallbackInfo&
// Arguments is after the return address.
- __ addi(r3, sp, Operand((kStackFrameExtraParamSlot + 1) * kPointerSize));
+ __ addi(r3, sp, Operand(kFunctionCallbackInfoOffset));
// FunctionCallbackInfo::implicit_args_
__ StoreP(scratch, MemOperand(r3, 0 * kPointerSize));
- // FunctionCallbackInfo::values_
- __ addi(ip, scratch, Operand((FCA::kArgsLength - 1 + argc) * kPointerSize));
- __ StoreP(ip, MemOperand(r3, 1 * kPointerSize));
- // FunctionCallbackInfo::length_ = argc
- __ li(ip, Operand(argc));
- __ stw(ip, MemOperand(r3, 2 * kPointerSize));
- // FunctionCallbackInfo::is_construct_call = 0
- __ li(ip, Operand::Zero());
- __ stw(ip, MemOperand(r3, 2 * kPointerSize + kIntSize));
-
- const int kStackUnwindSpace = argc + FCA::kArgsLength + 1;
+ if (argc.is_immediate()) {
+ // FunctionCallbackInfo::values_
+ __ addi(ip, scratch,
+ Operand((FCA::kArgsLength - 1 + argc.immediate()) * kPointerSize));
+ __ StoreP(ip, MemOperand(r3, 1 * kPointerSize));
+ // FunctionCallbackInfo::length_ = argc
+ __ li(ip, Operand(argc.immediate()));
+ __ stw(ip, MemOperand(r3, 2 * kPointerSize));
+ // FunctionCallbackInfo::is_construct_call_ = 0
+ __ li(ip, Operand::Zero());
+ __ stw(ip, MemOperand(r3, 2 * kPointerSize + kIntSize));
+ } else {
+ __ ShiftLeftImm(ip, argc.reg(), Operand(kPointerSizeLog2));
+ __ addi(ip, ip, Operand((FCA::kArgsLength - 1) * kPointerSize));
+ // FunctionCallbackInfo::values_
+ __ add(r0, scratch, ip);
+ __ StoreP(r0, MemOperand(r3, 1 * kPointerSize));
+ // FunctionCallbackInfo::length_ = argc
+ __ stw(argc.reg(), MemOperand(r3, 2 * kPointerSize));
+ // FunctionCallbackInfo::is_construct_call_
+ __ stw(ip, MemOperand(r3, 2 * kPointerSize + kIntSize));
+ }
+
ExternalReference thunk_ref =
- ExternalReference::invoke_function_callback(isolate());
+ ExternalReference::invoke_function_callback(masm->isolate());
AllowExternalCallThatCantCauseGC scope(masm);
MemOperand context_restore_operand(
fp, (2 + FCA::kContextSaveIndex) * kPointerSize);
// Stores return the first js argument
int return_value_offset = 0;
- if (is_store) {
+ if (return_first_arg) {
return_value_offset = 2 + FCA::kArgsLength;
} else {
return_value_offset = 2 + FCA::kReturnValueOffset;
}
MemOperand return_value_operand(fp, return_value_offset * kPointerSize);
+ int stack_space = 0;
+ MemOperand is_construct_call_operand =
+ MemOperand(sp, kFunctionCallbackInfoOffset + 2 * kPointerSize + kIntSize);
+ MemOperand* stack_space_operand = &is_construct_call_operand;
+ if (argc.is_immediate()) {
+ stack_space = argc.immediate() + FCA::kArgsLength + 1;
+ stack_space_operand = NULL;
+ }
+ CallApiFunctionAndReturn(masm, api_function_address, thunk_ref, stack_space,
+ stack_space_operand, return_value_operand,
+ &context_restore_operand);
+}
+
+
+void CallApiFunctionStub::Generate(MacroAssembler* masm) {
+ bool call_data_undefined = this->call_data_undefined();
+ CallApiFunctionStubHelper(masm, ParameterCount(r6), false,
+ call_data_undefined);
+}
+
- __ CallApiFunctionAndReturn(api_function_address, thunk_ref,
- kStackUnwindSpace, return_value_operand,
- &context_restore_operand);
+void CallApiAccessorStub::Generate(MacroAssembler* masm) {
+ bool is_store = this->is_store();
+ int argc = this->argc();
+ bool call_data_undefined = this->call_data_undefined();
+ CallApiFunctionStubHelper(masm, ParameterCount(argc), is_store,
+ call_data_undefined);
}
ExternalReference thunk_ref =
ExternalReference::invoke_accessor_getter_callback(isolate());
- __ CallApiFunctionAndReturn(api_function_address, thunk_ref,
- kStackUnwindSpace,
- MemOperand(fp, 6 * kPointerSize), NULL);
+ CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
+ kStackUnwindSpace, NULL,
+ MemOperand(fp, 6 * kPointerSize), NULL);
}
enum Mode { STORE_BUFFER_ONLY, INCREMENTAL, INCREMENTAL_COMPACTION };
- virtual bool SometimesSetsUpAFrame() { return false; }
+ bool SometimesSetsUpAFrame() OVERRIDE { return false; }
static void PatchBranchIntoNop(MacroAssembler* masm, int pos) {
// Consider adding DCHECK here to catch bad patching
Mode mode);
void InformIncrementalMarker(MacroAssembler* masm);
- void Activate(Code* code) {
+ void Activate(Code* code) OVERRIDE {
code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code);
}
void GenerateCall(MacroAssembler* masm, Register target);
private:
- bool NeedsImmovableCode() { return true; }
+ bool NeedsImmovableCode() OVERRIDE { return true; }
DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
DEFINE_PLATFORM_CODE_STUB(DirectCEntry, PlatformCodeStub);
Label* done, Register elements,
Register name, Register r0, Register r1);
- virtual bool SometimesSetsUpAFrame() { return false; }
+ bool SometimesSetsUpAFrame() OVERRIDE { return false; }
private:
static const int kInlinedProbes = 4;
MacroAssembler* masm, Register receiver, Register key, Register value,
Register target_map, AllocationSiteMode mode, Label* fail) {
// lr contains the return address
- Label loop, entry, convert_hole, gc_required, only_change_map, done;
+ Label loop, entry, convert_hole, only_change_map, done;
Register elements = r7;
Register length = r8;
Register array = r9;
// target_map parameter can be clobbered.
Register scratch1 = target_map;
- Register scratch2 = r11;
+ Register scratch2 = r10;
+ Register scratch3 = r11;
+ Register scratch4 = r14;
// Verify input registers don't conflict with locals.
DCHECK(!AreAliased(receiver, key, value, target_map, elements, length, array,
__ CompareRoot(elements, Heap::kEmptyFixedArrayRootIndex);
__ beq(&only_change_map);
- // Preserve lr and use r17 as a temporary register.
- __ mflr(r0);
- __ Push(r0);
-
__ LoadP(length, FieldMemOperand(elements, FixedArray::kLengthOffset));
// length: number of elements (smi-tagged)
// Allocate new FixedDoubleArray.
- __ SmiToDoubleArrayOffset(r17, length);
- __ addi(r17, r17, Operand(FixedDoubleArray::kHeaderSize));
- __ Allocate(r17, array, r10, scratch2, &gc_required, DOUBLE_ALIGNMENT);
+ __ SmiToDoubleArrayOffset(scratch3, length);
+ __ addi(scratch3, scratch3, Operand(FixedDoubleArray::kHeaderSize));
+ __ Allocate(scratch3, array, scratch4, scratch2, fail, DOUBLE_ALIGNMENT);
+ // array: destination FixedDoubleArray, not tagged as heap object.
+ // elements: source FixedArray.
// Set destination FixedDoubleArray's length and map.
__ LoadRoot(scratch2, Heap::kFixedDoubleArrayMapRootIndex);
__ StoreP(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset), r0);
__ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, scratch2,
- kLRHasBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
+ kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
// Replace receiver's backing store with newly created FixedDoubleArray.
__ addi(scratch1, array, Operand(kHeapObjectTag));
__ StoreP(scratch1, FieldMemOperand(receiver, JSObject::kElementsOffset), r0);
__ RecordWriteField(receiver, JSObject::kElementsOffset, scratch1, scratch2,
- kLRHasBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
+ kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
// Prepare for conversion loop.
- __ addi(target_map, elements,
+ __ addi(scratch1, elements,
Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ addi(r10, array, Operand(FixedDoubleArray::kHeaderSize));
- __ SmiToDoubleArrayOffset(array, length);
- __ add(array_end, r10, array);
+ __ addi(scratch2, array, Operand(FixedDoubleArray::kHeaderSize));
+ __ SmiToDoubleArrayOffset(array_end, length);
+ __ add(array_end, scratch2, array_end);
// Repurpose registers no longer in use.
#if V8_TARGET_ARCH_PPC64
Register hole_int64 = elements;
+ __ mov(hole_int64, Operand(kHoleNanInt64));
#else
Register hole_lower = elements;
Register hole_upper = length;
+ __ mov(hole_lower, Operand(kHoleNanLower32));
+ __ mov(hole_upper, Operand(kHoleNanUpper32));
#endif
// scratch1: begin of source FixedArray element fields, not tagged
// hole_lower: kHoleNanLower32 OR hol_int64
OMIT_SMI_CHECK);
__ b(&done);
- // Call into runtime if GC is required.
- __ bind(&gc_required);
- __ Pop(r0);
- __ mtlr(r0);
- __ b(fail);
-
// Convert and copy elements.
__ bind(&loop);
- __ LoadP(r11, MemOperand(scratch1));
+ __ LoadP(scratch3, MemOperand(scratch1));
__ addi(scratch1, scratch1, Operand(kPointerSize));
- // r11: current element
- __ UntagAndJumpIfNotSmi(r11, r11, &convert_hole);
+ // scratch3: current element
+ __ UntagAndJumpIfNotSmi(scratch3, scratch3, &convert_hole);
// Normal smi, convert to double and store.
- __ ConvertIntToDouble(r11, d0);
+ __ ConvertIntToDouble(scratch3, d0);
__ stfd(d0, MemOperand(scratch2, 0));
- __ addi(r10, r10, Operand(8));
-
+ __ addi(scratch2, scratch2, Operand(8));
__ b(&entry);
// Hole found, store the-hole NaN.
__ bind(&convert_hole);
if (FLAG_debug_code) {
- // Restore a "smi-untagged" heap object.
- __ LoadP(r11, MemOperand(r6, -kPointerSize));
- __ CompareRoot(r11, Heap::kTheHoleValueRootIndex);
+ __ LoadP(scratch3, MemOperand(scratch1, -kPointerSize));
+ __ CompareRoot(scratch3, Heap::kTheHoleValueRootIndex);
__ Assert(eq, kObjectFoundInSmiOnlyArray);
}
#if V8_TARGET_ARCH_PPC64
- __ std(hole_int64, MemOperand(r10, 0));
+ __ std(hole_int64, MemOperand(scratch2, 0));
#else
- __ stw(hole_upper, MemOperand(r10, Register::kExponentOffset));
- __ stw(hole_lower, MemOperand(r10, Register::kMantissaOffset));
+ __ stw(hole_upper, MemOperand(scratch2, Register::kExponentOffset));
+ __ stw(hole_lower, MemOperand(scratch2, Register::kMantissaOffset));
#endif
- __ addi(r10, r10, Operand(8));
+ __ addi(scratch2, scratch2, Operand(8));
__ bind(&entry);
- __ cmp(r10, array_end);
+ __ cmp(scratch2, array_end);
__ blt(&loop);
- __ Pop(r0);
- __ mtlr(r0);
__ bind(&done);
}
MacroAssembler* masm, Register receiver, Register key, Register value,
Register target_map, AllocationSiteMode mode, Label* fail) {
// Register lr contains the return address.
- Label entry, loop, convert_hole, gc_required, only_change_map;
+ Label loop, convert_hole, gc_required, only_change_map;
Register elements = r7;
Register array = r9;
Register length = r8;
- Register scratch = r11;
+ Register scratch = r10;
+ Register scratch3 = r11;
+ Register hole_value = r14;
// Verify input registers don't conflict with locals.
DCHECK(!AreAliased(receiver, key, value, target_map, elements, array, length,
__ addi(src_elements, elements,
Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
__ SmiToPtrArrayOffset(length, length);
- __ LoadRoot(r10, Heap::kTheHoleValueRootIndex);
+ __ LoadRoot(hole_value, Heap::kTheHoleValueRootIndex);
Label initialization_loop, loop_done;
__ ShiftRightImm(r0, length, Operand(kPointerSizeLog2), SetRC);
__ addi(dst_elements, array,
Operand(FixedArray::kHeaderSize - kHeapObjectTag - kPointerSize));
__ bind(&initialization_loop);
- __ StorePU(r10, MemOperand(dst_elements, kPointerSize));
+ __ StorePU(hole_value, MemOperand(dst_elements, kPointerSize));
__ bdnz(&initialization_loop);
__ addi(dst_elements, array,
// not tagged, +4
// dst_end: end of destination FixedArray, not tagged
// array: destination FixedArray
- // r10: the-hole pointer
+ // hole_value: the-hole pointer
// heap_number_map: heap number map
__ b(&loop);
// Replace the-hole NaN with the-hole pointer.
__ bind(&convert_hole);
- __ StoreP(r10, MemOperand(dst_elements));
+ __ StoreP(hole_value, MemOperand(dst_elements));
__ addi(dst_elements, dst_elements, Operand(kPointerSize));
__ cmpl(dst_elements, dst_end);
__ bge(&loop_done);
// Non-hole double, copy value into a heap number.
Register heap_number = receiver;
Register scratch2 = value;
- __ AllocateHeapNumber(heap_number, scratch2, r11, heap_number_map,
+ __ AllocateHeapNumber(heap_number, scratch2, scratch3, heap_number_map,
&gc_required);
// heap_number: new heap number
#if V8_TARGET_ARCH_PPC64
__ addi(dst_elements, dst_elements, Operand(kPointerSize));
__ RecordWrite(array, scratch2, heap_number, kLRHasNotBeenSaved,
kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
- __ b(&entry);
-
- // Replace the-hole NaN with the-hole pointer.
- __ bind(&convert_hole);
- __ StoreP(r10, MemOperand(dst_elements));
- __ addi(dst_elements, dst_elements, Operand(kPointerSize));
-
- __ bind(&entry);
__ cmpl(dst_elements, dst_end);
__ blt(&loop);
__ bind(&loop_done);
SUBFCX = 8 << 1,
ADDCX = 10 << 1,
MULHWUX = 11 << 1,
+ ISEL = 15 << 1,
MFCR = 19 << 1,
LWARX = 20 << 1,
LDX = 21 << 1,
STWX = 151 << 1, // store word w/ x-form
MTVSRD = 179 << 1, // Move To VSR Doubleword
STDUX = 181 << 1,
- STWUX = 183 << 1, // store word w/ update x-form
- /*
- MTCRF
- MTMSR
- STWCXx
- SUBFZEX
- */
- ADDZEX = 202 << 1, // Add to Zero Extended
- /*
- MTSR
- */
+ STWUX = 183 << 1, // store word w/ update x-form
+ /*
+ MTCRF
+ MTMSR
+ STWCXx
+ SUBFZEX
+ */
+ ADDZEX = 202 << 1, // Add to Zero Extended
+ /*
+ MTSR
+ */
MTVSRWA = 211 << 1, // Move To VSR Word Algebraic
STBX = 215 << 1, // store byte w/ x-form
MULLD = 233 << 1, // Multiply Low Double Word
ADDX = 266 << 1, // Add
LHZX = 279 << 1, // load half-word zero w/ x-form
LHZUX = 311 << 1, // load half-word zero w/ update x-form
+ LWAX = 341 << 1, // load word algebraic w/ x-form
LHAX = 343 << 1, // load half-word algebraic w/ x-form
LHAUX = 375 << 1, // load half-word algebraic w/ update x-form
XORX = 316 << 1, // Exclusive OR
MFSPR = 339 << 1, // Move from Special-Purpose-Register
STHX = 407 << 1, // store half-word w/ x-form
+ ORC = 412 << 1, // Or with Complement
STHUX = 439 << 1, // store half-word w/ update x-form
ORX = 444 << 1, // Or
+ DIVDU = 457 << 1, // Divide Double Word Unsigned
+ DIVWU = 459 << 1, // Divide Word Unsigned
MTSPR = 467 << 1, // Move to Special-Purpose-Register
DIVD = 489 << 1, // Divide Double Word
DIVW = 491 << 1, // Divide Word
FMR = 72 << 1, // Floating Move Register
MTFSFI = 134 << 1, // Move to FPSCR Field Immediate
FABS = 264 << 1, // Floating Absolute Value
+ FRIN = 392 << 1, // Floating Round to Integer Nearest
+ FRIZ = 424 << 1, // Floating Round to Integer Toward Zero
+ FRIP = 456 << 1, // Floating Round to Integer Plus
FRIM = 488 << 1, // Floating Round to Integer Minus
MFFS = 583 << 1, // move from FPSCR x-form
MTFSF = 711 << 1, // move to FPSCR fields XFL-form
kTOMask = 0x1f << 21
};
-// the following is to differentiate different faked opcodes for
-// the BOGUS PPC instruction we invented (when bit 25 is 0) or to mark
-// different stub code (when bit 25 is 1)
-// - use primary opcode 1 for undefined instruction
-// - use bit 25 to indicate whether the opcode is for fake-arm
-// instr or stub-marker
-// - use the least significant 6-bit to indicate FAKE_OPCODE_T or
-// MARKER_T
-#define FAKE_OPCODE 1 << 26
-#define MARKER_SUBOPCODE_BIT 25
-#define MARKER_SUBOPCODE 1 << MARKER_SUBOPCODE_BIT
-#define FAKER_SUBOPCODE 0 << MARKER_SUBOPCODE_BIT
-
-enum FAKE_OPCODE_T {
- fBKPT = 14,
- fLastFaker // can't be more than 128 (2^^7)
-};
-#define FAKE_OPCODE_HIGH_BIT 7 // fake opcode has to fall into bit 0~7
-#define F_NEXT_AVAILABLE_STUB_MARKER 369 // must be less than 2^^9 (512)
-#define STUB_MARKER_HIGH_BIT 9 // stub marker has to fall into bit 0~9
// -----------------------------------------------------------------------------
// Addressing modes and instruction variants.
// break point
kBreakpoint = 0x821008, // bits23-0 of 0x7d821008 = twge r2, r2
// stop
- kStopCode = 1 << 23,
- // info
- kInfo = 0x9ff808 // bits23-0 of 0x7d9ff808 = twge r31, r31
+ kStopCode = 1 << 23
};
const uint32_t kStopCodeMask = kStopCode - 1;
const uint32_t kMaxStopCode = kStopCode - 1;
}
+void Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(Handle<Code> code) {
+ // Empty because there is no need for relocation information for the code
+ // patching in Deoptimizer::PatchCodeForDeoptimization below.
+}
+
+
void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
Address code_start_address = code->instruction_start();
// Set the register values. The values are not important as there are no
// callee saved registers in JavaScript frames, so all registers are
// spilled. Registers fp and sp are set to the correct values though.
+ // We ensure the values are Smis to avoid confusing the garbage
+ // collector in the event that any values are retreived and stored
+ // elsewhere.
for (int i = 0; i < Register::kNumRegisters; i++) {
- input_->SetRegister(i, i * 4);
+ input_->SetRegister(i, reinterpret_cast<intptr_t>(Smi::FromInt(i)));
}
input_->SetRegister(sp.code(), reinterpret_cast<intptr_t>(frame->sp()));
input_->SetRegister(fp.code(), reinterpret_cast<intptr_t>(frame->fp()));
void Format(Instruction* instr, const char* format);
void Unknown(Instruction* instr);
void UnknownFormat(Instruction* instr, const char* opcname);
- void MarkerFormat(Instruction* instr, const char* opcname, int id);
void DecodeExt1(Instruction* instr);
void DecodeExt2(Instruction* instr);
}
-void Decoder::MarkerFormat(Instruction* instr, const char* name, int id) {
- char buffer[100];
- snprintf(buffer, sizeof(buffer), "%s %d", name, id);
- Format(instr, buffer);
-}
-
-
void Decoder::DecodeExt1(Instruction* instr) {
switch (instr->Bits(10, 1) << 1) {
case MCRF: {
Format(instr, "cmpw 'ra, 'rb");
}
#endif
- break;
+ return;
}
case SLWX: {
Format(instr, "slw'. 'ra, 'rs, 'rb");
- break;
+ return;
}
#if V8_TARGET_ARCH_PPC64
case SLDX: {
Format(instr, "sld'. 'ra, 'rs, 'rb");
- break;
+ return;
}
#endif
case SUBFCX: {
Format(instr, "subfc'. 'rt, 'ra, 'rb");
- break;
+ return;
}
case ADDCX: {
Format(instr, "addc'. 'rt, 'ra, 'rb");
- break;
+ return;
}
case CNTLZWX: {
Format(instr, "cntlzw'. 'ra, 'rs");
- break;
+ return;
}
#if V8_TARGET_ARCH_PPC64
case CNTLZDX: {
Format(instr, "cntlzd'. 'ra, 'rs");
- break;
+ return;
}
#endif
case ANDX: {
Format(instr, "and'. 'ra, 'rs, 'rb");
- break;
+ return;
}
case ANDCX: {
Format(instr, "andc'. 'ra, 'rs, 'rb");
- break;
+ return;
}
case CMPL: {
#if V8_TARGET_ARCH_PPC64
Format(instr, "cmplw 'ra, 'rb");
}
#endif
- break;
+ return;
}
case NEGX: {
Format(instr, "neg'. 'rt, 'ra");
- break;
+ return;
}
case NORX: {
Format(instr, "nor'. 'rt, 'ra, 'rb");
- break;
+ return;
}
case SUBFX: {
Format(instr, "subf'. 'rt, 'ra, 'rb");
- break;
+ return;
}
case MULHWX: {
Format(instr, "mulhw'o'. 'rt, 'ra, 'rb");
- break;
+ return;
}
case ADDZEX: {
Format(instr, "addze'. 'rt, 'ra");
- break;
+ return;
}
case MULLW: {
Format(instr, "mullw'o'. 'rt, 'ra, 'rb");
- break;
+ return;
}
#if V8_TARGET_ARCH_PPC64
case MULLD: {
Format(instr, "mulld'o'. 'rt, 'ra, 'rb");
- break;
+ return;
}
#endif
case DIVW: {
Format(instr, "divw'o'. 'rt, 'ra, 'rb");
- break;
+ return;
+ }
+ case DIVWU: {
+ Format(instr, "divwu'o'. 'rt, 'ra, 'rb");
+ return;
}
#if V8_TARGET_ARCH_PPC64
case DIVD: {
Format(instr, "divd'o'. 'rt, 'ra, 'rb");
- break;
+ return;
}
#endif
case ADDX: {
Format(instr, "add'o 'rt, 'ra, 'rb");
- break;
+ return;
}
case XORX: {
Format(instr, "xor'. 'ra, 'rs, 'rb");
- break;
+ return;
}
case ORX: {
if (instr->RTValue() == instr->RBValue()) {
} else {
Format(instr, "or 'ra, 'rs, 'rb");
}
- break;
+ return;
}
case MFSPR: {
int spr = instr->Bits(20, 11);
} else {
Format(instr, "mfspr 'rt ??");
}
- break;
+ return;
}
case MTSPR: {
int spr = instr->Bits(20, 11);
} else {
Format(instr, "mtspr 'rt ??");
}
- break;
+ return;
}
case MFCR: {
Format(instr, "mfcr 'rt");
- break;
+ return;
}
case STWX: {
Format(instr, "stwx 'rs, 'ra, 'rb");
- break;
+ return;
}
case STWUX: {
Format(instr, "stwux 'rs, 'ra, 'rb");
- break;
+ return;
}
case STBX: {
Format(instr, "stbx 'rs, 'ra, 'rb");
- break;
+ return;
}
case STBUX: {
Format(instr, "stbux 'rs, 'ra, 'rb");
- break;
+ return;
}
case STHX: {
Format(instr, "sthx 'rs, 'ra, 'rb");
- break;
+ return;
}
case STHUX: {
Format(instr, "sthux 'rs, 'ra, 'rb");
- break;
+ return;
}
case LWZX: {
Format(instr, "lwzx 'rt, 'ra, 'rb");
- break;
+ return;
}
case LWZUX: {
Format(instr, "lwzux 'rt, 'ra, 'rb");
- break;
+ return;
+ }
+ case LWAX: {
+ Format(instr, "lwax 'rt, 'ra, 'rb");
+ return;
}
case LBZX: {
Format(instr, "lbzx 'rt, 'ra, 'rb");
- break;
+ return;
}
case LBZUX: {
Format(instr, "lbzux 'rt, 'ra, 'rb");
- break;
+ return;
}
case LHZX: {
Format(instr, "lhzx 'rt, 'ra, 'rb");
- break;
+ return;
}
case LHZUX: {
Format(instr, "lhzux 'rt, 'ra, 'rb");
- break;
+ return;
+ }
+ case LHAX: {
+ Format(instr, "lhax 'rt, 'ra, 'rb");
+ return;
}
#if V8_TARGET_ARCH_PPC64
case LDX: {
Format(instr, "ldx 'rt, 'ra, 'rb");
- break;
+ return;
}
case LDUX: {
Format(instr, "ldux 'rt, 'ra, 'rb");
- break;
+ return;
}
case STDX: {
Format(instr, "stdx 'rt, 'ra, 'rb");
- break;
+ return;
}
case STDUX: {
Format(instr, "stdux 'rt, 'ra, 'rb");
- break;
+ return;
}
case MFVSRD: {
Format(instr, "mffprd 'ra, 'Dt");
- break;
+ return;
}
case MFVSRWZ: {
Format(instr, "mffprwz 'ra, 'Dt");
- break;
+ return;
}
case MTVSRD: {
Format(instr, "mtfprd 'Dt, 'ra");
- break;
+ return;
}
case MTVSRWA: {
Format(instr, "mtfprwa 'Dt, 'ra");
- break;
+ return;
}
case MTVSRWZ: {
Format(instr, "mtfprwz 'Dt, 'ra");
- break;
+ return;
}
#endif
+ }
+
+ switch (instr->Bits(5, 1) << 1) {
+ case ISEL: {
+ Format(instr, "isel 'rt, 'ra, 'rb");
+ return;
+ }
default: {
Unknown(instr); // not used by V8
}
Format(instr, "fabs'. 'Dt, 'Db");
break;
}
+ case FRIN: {
+ Format(instr, "frin. 'Dt, 'Db");
+ break;
+ }
+ case FRIZ: {
+ Format(instr, "friz. 'Dt, 'Db");
+ break;
+ }
+ case FRIP: {
+ Format(instr, "frip. 'Dt, 'Db");
+ break;
+ }
case FRIM: {
- Format(instr, "frim 'Dt, 'Db");
+ Format(instr, "frim. 'Dt, 'Db");
break;
}
case FNEG: {
break;
}
#endif
-
- case FAKE_OPCODE: {
- if (instr->Bits(MARKER_SUBOPCODE_BIT, MARKER_SUBOPCODE_BIT) == 1) {
- int marker_code = instr->Bits(STUB_MARKER_HIGH_BIT, 0);
- DCHECK(marker_code < F_NEXT_AVAILABLE_STUB_MARKER);
- MarkerFormat(instr, "stub-marker ", marker_code);
- } else {
- int fake_opcode = instr->Bits(FAKE_OPCODE_HIGH_BIT, 0);
- MarkerFormat(instr, "faker-opcode ", fake_opcode);
- }
- break;
- }
default: {
Unknown(instr);
break;
// Sloppy mode functions and builtins need to replace the receiver with the
// global proxy when called as functions (without an explicit receiver
// object).
- if (info->strict_mode() == SLOPPY && !info->is_native()) {
+ if (is_sloppy(info->language_mode()) && !info->is_native()) {
Label ok;
int receiver_offset = info->scope()->num_parameters() * kPointerSize;
__ LoadP(r5, MemOperand(sp, receiver_offset), r0);
// The stub will rewrite receiever and parameter count if the previous
// stack frame was an arguments adapter frame.
ArgumentsAccessStub::Type type;
- if (strict_mode() == STRICT) {
+ if (is_strict(language_mode())) {
type = ArgumentsAccessStub::NEW_STRICT;
} else if (function()->has_duplicate_parameters()) {
type = ArgumentsAccessStub::NEW_SLOPPY_SLOW;
// Get the object to enumerate over. If the object is null or undefined, skip
// over the loop. See ECMA-262 version 5, section 12.6.4.
+ SetExpressionPosition(stmt->enumerable());
VisitForAccumulatorValue(stmt->enumerable());
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
__ cmp(r3, ip);
// Generate code for doing the condition check.
PrepareForBailoutForId(stmt->BodyId(), NO_REGISTERS);
__ bind(&loop);
+ SetExpressionPosition(stmt->each());
+
// Load the current count to r3, load the length to r4.
__ LoadP(r3, MemOperand(sp, 0 * kPointerSize));
__ LoadP(r4, MemOperand(sp, 1 * kPointerSize));
}
-void FullCodeGenerator::VisitForOfStatement(ForOfStatement* stmt) {
- Comment cmnt(masm_, "[ ForOfStatement");
- SetStatementPosition(stmt);
-
- Iteration loop_statement(this, stmt);
- increment_loop_depth();
-
- // var iterator = iterable[Symbol.iterator]();
- VisitForEffect(stmt->assign_iterator());
-
- // Loop entry.
- __ bind(loop_statement.continue_label());
-
- // result = iterator.next()
- VisitForEffect(stmt->next_result());
-
- // if (result.done) break;
- Label result_not_done;
- VisitForControl(stmt->result_done(), loop_statement.break_label(),
- &result_not_done, &result_not_done);
- __ bind(&result_not_done);
-
- // each = result.value
- VisitForEffect(stmt->assign_each());
-
- // Generate code for the body of the loop.
- Visit(stmt->body());
-
- // Check stack before looping.
- PrepareForBailoutForId(stmt->BackEdgeId(), NO_REGISTERS);
- EmitBackEdgeBookkeeping(stmt, loop_statement.continue_label());
- __ b(loop_statement.continue_label());
-
- // Exit and decrement the loop depth.
- PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
- __ bind(loop_statement.break_label());
- decrement_loop_depth();
-}
-
-
void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
bool pretenure) {
// Use the fast case closure allocation code that allocates in new
// doesn't just get a copy of the existing unoptimized code.
if (!FLAG_always_opt && !FLAG_prepare_always_opt && !pretenure &&
scope()->is_function_scope() && info->num_literals() == 0) {
- FastNewClosureStub stub(isolate(), info->strict_mode(), info->kind());
+ FastNewClosureStub stub(isolate(), info->language_mode(), info->kind());
__ mov(r5, Operand(info));
__ CallStub(&stub);
} else {
}
+void FullCodeGenerator::EmitSetHomeObjectIfNeeded(Expression* initializer,
+ int offset) {
+ if (NeedsHomeObject(initializer)) {
+ __ LoadP(StoreDescriptor::ReceiverRegister(), MemOperand(sp));
+ __ mov(StoreDescriptor::NameRegister(),
+ Operand(isolate()->factory()->home_object_symbol()));
+ __ LoadP(StoreDescriptor::ValueRegister(),
+ MemOperand(sp, offset * kPointerSize));
+ CallStoreIC();
+ }
+}
+
+
void FullCodeGenerator::EmitLoadGlobalCheckExtensions(VariableProxy* proxy,
TypeofState typeof_state,
Label* slow) {
bool skip_init_check;
if (var->scope()->DeclarationScope() != scope()->DeclarationScope()) {
skip_init_check = false;
+ } else if (var->is_this()) {
+ CHECK(info_->function() != nullptr &&
+ (info_->function()->kind() & kSubclassConstructor) != 0);
+ // TODO(dslomov): implement 'this' hole check elimination.
+ skip_init_check = false;
} else {
// Check that we always have valid source position.
DCHECK(var->initializer_position() != RelocInfo::kNoPosition);
expr->CalculateEmitStore(zone());
AccessorTable accessor_table(zone());
- for (int i = 0; i < expr->properties()->length(); i++) {
- ObjectLiteral::Property* property = expr->properties()->at(i);
+ int property_index = 0;
+ for (; property_index < expr->properties()->length(); property_index++) {
+ ObjectLiteral::Property* property = expr->properties()->at(property_index);
+ if (property->is_computed_name()) break;
if (property->IsCompileTimeValue()) continue;
- Literal* key = property->key();
+ Literal* key = property->key()->AsLiteral();
Expression* value = property->value();
if (!result_saved) {
__ push(r3); // Save result on stack
__ LoadP(StoreDescriptor::ReceiverRegister(), MemOperand(sp));
CallStoreIC(key->LiteralFeedbackId());
PrepareForBailoutForId(key->id(), NO_REGISTERS);
+
+ if (NeedsHomeObject(value)) {
+ __ Move(StoreDescriptor::ReceiverRegister(), r3);
+ __ mov(StoreDescriptor::NameRegister(),
+ Operand(isolate()->factory()->home_object_symbol()));
+ __ LoadP(StoreDescriptor::ValueRegister(), MemOperand(sp));
+ CallStoreIC();
+ }
} else {
VisitForEffect(value);
}
VisitForStackValue(key);
VisitForStackValue(value);
if (property->emit_store()) {
+ EmitSetHomeObjectIfNeeded(value, 2);
__ LoadSmiLiteral(r3, Smi::FromInt(SLOPPY)); // PropertyAttributes
__ push(r3);
__ CallRuntime(Runtime::kSetProperty, 4);
__ push(r3);
VisitForStackValue(it->first);
EmitAccessor(it->second->getter);
+ EmitSetHomeObjectIfNeeded(it->second->getter, 2);
EmitAccessor(it->second->setter);
+ EmitSetHomeObjectIfNeeded(it->second->setter, 3);
__ LoadSmiLiteral(r3, Smi::FromInt(NONE));
__ push(r3);
__ CallRuntime(Runtime::kDefineAccessorPropertyUnchecked, 5);
}
+ // Object literals have two parts. The "static" part on the left contains no
+ // computed property names, and so we can compute its map ahead of time; see
+ // runtime.cc::CreateObjectLiteralBoilerplate. The second "dynamic" part
+ // starts with the first computed property name, and continues with all
+ // properties to its right. All the code from above initializes the static
+ // component of the object literal, and arranges for the map of the result to
+ // reflect the static order in which the keys appear. For the dynamic
+ // properties, we compile them into a series of "SetOwnProperty" runtime
+ // calls. This will preserve insertion order.
+ for (; property_index < expr->properties()->length(); property_index++) {
+ ObjectLiteral::Property* property = expr->properties()->at(property_index);
+
+ Expression* value = property->value();
+ if (!result_saved) {
+ __ push(r3); // Save result on the stack
+ result_saved = true;
+ }
+
+ __ LoadP(r3, MemOperand(sp)); // Duplicate receiver.
+ __ push(r3);
+
+ if (property->kind() == ObjectLiteral::Property::PROTOTYPE) {
+ DCHECK(!property->is_computed_name());
+ VisitForStackValue(value);
+ DCHECK(property->emit_store());
+ __ CallRuntime(Runtime::kInternalSetPrototype, 2);
+ } else {
+ EmitPropertyKey(property, expr->GetIdForProperty(property_index));
+ VisitForStackValue(value);
+
+ switch (property->kind()) {
+ case ObjectLiteral::Property::CONSTANT:
+ case ObjectLiteral::Property::MATERIALIZED_LITERAL:
+ case ObjectLiteral::Property::COMPUTED:
+ if (property->emit_store()) {
+ __ LoadSmiLiteral(r3, Smi::FromInt(NONE));
+ __ push(r3);
+ __ CallRuntime(Runtime::kDefineDataPropertyUnchecked, 4);
+ } else {
+ __ Drop(3);
+ }
+ break;
+
+ case ObjectLiteral::Property::PROTOTYPE:
+ UNREACHABLE();
+ break;
+
+ case ObjectLiteral::Property::GETTER:
+ __ mov(r3, Operand(Smi::FromInt(NONE)));
+ __ push(r3);
+ __ CallRuntime(Runtime::kDefineGetterPropertyUnchecked, 4);
+ break;
+
+ case ObjectLiteral::Property::SETTER:
+ __ mov(r3, Operand(Smi::FromInt(NONE)));
+ __ push(r3);
+ __ CallRuntime(Runtime::kDefineSetterPropertyUnchecked, 4);
+ break;
+ }
+ }
+ }
+
if (expr->has_function()) {
DCHECK(result_saved);
__ LoadP(r3, MemOperand(sp));
__ push(r3); // Left operand goes on the stack.
VisitForAccumulatorValue(expr->value());
- OverwriteMode mode = expr->value()->ResultOverwriteAllowed()
- ? OVERWRITE_RIGHT
- : NO_OVERWRITE;
SetSourcePosition(expr->position() + 1);
AccumulatorValueContext context(this);
if (ShouldInlineSmiCase(op)) {
- EmitInlineSmiBinaryOp(expr->binary_operation(), op, mode, expr->target(),
+ EmitInlineSmiBinaryOp(expr->binary_operation(), op, expr->target(),
expr->value());
} else {
- EmitBinaryOp(expr->binary_operation(), op, mode);
+ EmitBinaryOp(expr->binary_operation(), op);
}
// Deoptimization point in case the binary operation may have side effects.
VisitForAccumulatorValue(value);
__ pop(r4);
- // Check generator state.
- Label wrong_state, closed_state, done;
- __ LoadP(r6, FieldMemOperand(r4, JSGeneratorObject::kContinuationOffset));
- STATIC_ASSERT(JSGeneratorObject::kGeneratorExecuting < 0);
- STATIC_ASSERT(JSGeneratorObject::kGeneratorClosed == 0);
- __ CmpSmiLiteral(r6, Smi::FromInt(0), r0);
- __ beq(&closed_state);
- __ blt(&wrong_state);
-
// Load suspended function and context.
__ LoadP(cp, FieldMemOperand(r4, JSGeneratorObject::kContextOffset));
__ LoadP(r7, FieldMemOperand(r4, JSGeneratorObject::kFunctionOffset));
// Enter a new JavaScript frame, and initialize its slots as they were when
// the generator was suspended.
- Label resume_frame;
+ Label resume_frame, done;
__ bind(&push_frame);
__ b(&resume_frame, SetLK);
__ b(&done);
// Not reached: the runtime call returns elsewhere.
__ stop("not-reached");
- // Reach here when generator is closed.
- __ bind(&closed_state);
- if (resume_mode == JSGeneratorObject::NEXT) {
- // Return completed iterator result when generator is closed.
- __ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
- __ push(r5);
- // Pop value from top-of-stack slot; box result into result register.
- EmitCreateIteratorResult(true);
- } else {
- // Throw the provided value.
- __ push(r3);
- __ CallRuntime(Runtime::kThrow, 1);
- }
- __ b(&done);
-
- // Throw error if we attempt to operate on a running generator.
- __ bind(&wrong_state);
- __ push(r4);
- __ CallRuntime(Runtime::kThrowGeneratorStateError, 1);
-
__ bind(&done);
context()->Plug(result_register());
}
void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
Token::Value op,
- OverwriteMode mode,
Expression* left_expr,
Expression* right_expr) {
Label done, smi_case, stub_call;
patch_site.EmitJumpIfSmi(scratch1, &smi_case);
__ bind(&stub_call);
- Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), op, mode).code();
+ Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), op).code();
CallIC(code, expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo();
__ b(&done);
}
case Token::ADD: {
__ AddAndCheckForOverflow(scratch1, left, right, scratch2, r0);
- __ bne(&stub_call, cr0);
+ __ BranchOnOverflow(&stub_call);
__ mr(right, scratch1);
break;
}
case Token::SUB: {
__ SubAndCheckForOverflow(scratch1, left, right, scratch2, r0);
- __ bne(&stub_call, cr0);
+ __ BranchOnOverflow(&stub_call);
__ mr(right, scratch1);
break;
}
__ Mul(scratch1, r0, ip);
// Check for overflowing the smi range - no overflow if higher 33 bits of
// the result are identical.
- __ TestIfInt32(scratch1, scratch2, ip);
+ __ TestIfInt32(scratch1, r0);
__ bne(&stub_call);
#else
__ SmiUntag(ip, right);
for (int i = 0; i < lit->properties()->length(); i++) {
ObjectLiteral::Property* property = lit->properties()->at(i);
- Literal* key = property->key()->AsLiteral();
Expression* value = property->value();
- DCHECK(key != NULL);
if (property->is_static()) {
__ LoadP(scratch, MemOperand(sp, kPointerSize)); // constructor
__ LoadP(scratch, MemOperand(sp, 0)); // prototype
}
__ push(scratch);
- VisitForStackValue(key);
+ EmitPropertyKey(property, lit->GetIdForProperty(i));
VisitForStackValue(value);
+ EmitSetHomeObjectIfNeeded(value, 2);
switch (property->kind()) {
case ObjectLiteral::Property::CONSTANT:
break;
case ObjectLiteral::Property::GETTER:
- __ CallRuntime(Runtime::kDefineClassGetter, 3);
+ __ mov(r3, Operand(Smi::FromInt(DONT_ENUM)));
+ __ push(r3);
+ __ CallRuntime(Runtime::kDefineGetterPropertyUnchecked, 4);
break;
case ObjectLiteral::Property::SETTER:
- __ CallRuntime(Runtime::kDefineClassSetter, 3);
+ __ mov(r3, Operand(Smi::FromInt(DONT_ENUM)));
+ __ push(r3);
+ __ CallRuntime(Runtime::kDefineSetterPropertyUnchecked, 4);
break;
default:
}
-void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr, Token::Value op,
- OverwriteMode mode) {
+void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr, Token::Value op) {
__ pop(r4);
- Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), op, mode).code();
+ Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), op).code();
JumpPatchSite patch_site(masm_); // unbound, signals no inlined smi code.
CallIC(code, expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo();
__ Pop(StoreDescriptor::ValueRegister(),
StoreDescriptor::ReceiverRegister());
Handle<Code> ic =
- CodeFactory::KeyedStoreIC(isolate(), strict_mode()).code();
+ CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
CallIC(ic);
break;
}
// Assignment to var.
__ push(r3); // Value.
__ mov(r4, Operand(var->name()));
- __ mov(r3, Operand(Smi::FromInt(strict_mode())));
- __ Push(cp, r4, r3); // Context, name, strict mode.
+ __ mov(r3, Operand(Smi::FromInt(language_mode())));
+ __ Push(cp, r4, r3); // Context, name, language mode.
__ CallRuntime(Runtime::kStoreLookupSlot, 4);
} else {
// Assignment to var or initializing assignment to let/const in harmony
}
EmitStoreToStackLocalOrContextSlot(var, location);
}
+ } else if (IsSignallingAssignmentToConst(var, op, language_mode())) {
+ __ CallRuntime(Runtime::kThrowConstAssignError, 0);
}
- // Non-initializing assignments to consts are ignored.
}
__ Push(key->value());
__ Push(r3);
- __ CallRuntime((strict_mode() == STRICT ? Runtime::kStoreToSuper_Strict
- : Runtime::kStoreToSuper_Sloppy),
+ __ CallRuntime((is_strict(language_mode()) ? Runtime::kStoreToSuper_Strict
+ : Runtime::kStoreToSuper_Sloppy),
4);
}
DCHECK(prop != NULL);
__ Push(r3);
- __ CallRuntime((strict_mode() == STRICT ? Runtime::kStoreKeyedToSuper_Strict
- : Runtime::kStoreKeyedToSuper_Sloppy),
- 4);
+ __ CallRuntime(
+ (is_strict(language_mode()) ? Runtime::kStoreKeyedToSuper_Strict
+ : Runtime::kStoreKeyedToSuper_Sloppy),
+ 4);
}
__ Pop(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister());
DCHECK(StoreDescriptor::ValueRegister().is(r3));
- Handle<Code> ic = CodeFactory::KeyedStoreIC(isolate(), strict_mode()).code();
+ Handle<Code> ic =
+ CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
CallIC(ic, expr->AssignmentFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
// Record source position of the IC call.
SetSourcePosition(expr->position());
- Handle<Code> ic = CallIC::initialize_stub(isolate(), arg_count, call_type);
- __ LoadSmiLiteral(r6, SmiFromSlot(expr->CallFeedbackSlot()));
+ Handle<Code> ic = CodeFactory::CallIC(isolate(), arg_count, call_type).code();
+ __ LoadSmiLiteral(r6, SmiFromSlot(expr->CallFeedbackICSlot()));
__ LoadP(r4, MemOperand(sp, (arg_count + 1) * kPointerSize), r0);
// Don't assign a type feedback id to the IC, since type feedback is provided
// by the vector above.
int receiver_offset = 2 + info_->scope()->num_parameters();
__ LoadP(r6, MemOperand(fp, receiver_offset * kPointerSize), r0);
- // r5: strict mode.
- __ LoadSmiLiteral(r5, Smi::FromInt(strict_mode()));
+ // r5: language mode.
+ __ LoadSmiLiteral(r5, Smi::FromInt(language_mode()));
// r4: the start position of the scope the calls resides in.
__ LoadSmiLiteral(r4, Smi::FromInt(scope()->start_position()));
}
}
} else if (call_type == Call::SUPER_CALL) {
- SuperReference* super_ref = callee->AsSuperReference();
- EmitLoadSuperConstructor(super_ref);
- __ Push(result_register());
- VisitForStackValue(super_ref->this_var());
- EmitCall(expr, CallICState::METHOD);
+ if (FLAG_experimental_classes) {
+ EmitSuperConstructorCall(expr);
+ } else {
+ SuperReference* super_ref = callee->AsSuperReference();
+ EmitLoadSuperConstructor(super_ref);
+ __ Push(result_register());
+ VisitForStackValue(super_ref->this_var());
+ EmitCall(expr, CallICState::METHOD);
+ }
} else {
DCHECK(call_type == Call::OTHER_CALL);
// Call to an arbitrary expression not handled specially above.
}
+void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
+ SuperReference* super_ref = expr->expression()->AsSuperReference();
+ EmitLoadSuperConstructor(super_ref);
+ __ push(result_register());
+
+ Variable* this_var = super_ref->this_var()->var();
+
+ GetVar(r3, this_var);
+ __ CompareRoot(r3, Heap::kTheHoleValueRootIndex);
+ Label uninitialized_this;
+ __ beq(&uninitialized_this);
+ __ mov(r3, Operand(this_var->name()));
+ __ push(r3);
+ __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ __ bind(&uninitialized_this);
+
+ // Push the arguments ("left-to-right") on the stack.
+ ZoneList<Expression*>* args = expr->arguments();
+ int arg_count = args->length();
+ for (int i = 0; i < arg_count; i++) {
+ VisitForStackValue(args->at(i));
+ }
+
+ // Call the construct call builtin that handles allocation and
+ // constructor invocation.
+ SetSourcePosition(expr->position());
+
+ // Load function and argument count into r1 and r0.
+ __ mov(r3, Operand(arg_count));
+ __ LoadP(r4, MemOperand(sp, arg_count * kPointerSize));
+
+ // Record call targets in unoptimized code.
+ if (FLAG_pretenuring_call_new) {
+ UNREACHABLE();
+ /* TODO(dslomov): support pretenuring.
+ EnsureSlotContainsAllocationSite(expr->AllocationSiteFeedbackSlot());
+ DCHECK(expr->AllocationSiteFeedbackSlot().ToInt() ==
+ expr->CallNewFeedbackSlot().ToInt() + 1);
+ */
+ }
+
+ __ Move(r5, FeedbackVector());
+ __ LoadSmiLiteral(r6, SmiFromSlot(expr->CallFeedbackSlot()));
+
+ // TODO(dslomov): use a different stub and propagate new.target.
+ CallConstructStub stub(isolate(), RECORD_CONSTRUCTOR_TARGET);
+ __ Call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
+
+ RecordJSReturnSite(expr);
+
+ EmitVariableAssignment(this_var, Token::INIT_CONST);
+ context()->Plug(r3);
+}
+
+
void FullCodeGenerator::EmitIsSmi(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
if (property != NULL) {
VisitForStackValue(property->obj());
VisitForStackValue(property->key());
- __ LoadSmiLiteral(r4, Smi::FromInt(strict_mode()));
+ __ LoadSmiLiteral(r4, Smi::FromInt(language_mode()));
__ push(r4);
__ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
context()->Plug(r3);
Variable* var = proxy->var();
// Delete of an unqualified identifier is disallowed in strict mode
// but "delete this" is allowed.
- DCHECK(strict_mode() == SLOPPY || var->is_this());
+ DCHECK(is_sloppy(language_mode()) || var->is_this());
if (var->IsUnallocated()) {
__ LoadP(r5, GlobalObjectOperand());
__ mov(r4, Operand(var->name()));
// Record position before stub call.
SetSourcePosition(expr->position());
- Handle<Code> code =
- CodeFactory::BinaryOpIC(isolate(), Token::ADD, NO_OVERWRITE).code();
+ Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), Token::ADD).code();
CallIC(code, expr->CountBinOpFeedbackId());
patch_site.EmitPatchInfo();
__ bind(&done);
__ Pop(StoreDescriptor::ReceiverRegister(),
StoreDescriptor::NameRegister());
Handle<Code> ic =
- CodeFactory::KeyedStoreIC(isolate(), strict_mode()).code();
+ CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
CallIC(ic, expr->CountStoreFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
void CreateAllocationSiteDescriptor::Initialize(
CallInterfaceDescriptorData* data) {
Register registers[] = {cp, r5, r6};
- data->Initialize(arraysize(registers), registers, NULL);
+ Representation representations[] = {Representation::Tagged(),
+ Representation::Tagged(),
+ Representation::Smi()};
+ data->Initialize(arraysize(registers), registers, representations);
+}
+
+
+void CreateWeakCellDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ Register registers[] = {cp, r5, r6, r4};
+ Representation representations[] = {
+ Representation::Tagged(), Representation::Tagged(), Representation::Smi(),
+ Representation::Tagged()};
+ data->Initialize(arraysize(registers), registers, representations);
}
}
+void CallFunctionWithFeedbackAndVectorDescriptor::Initialize(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {cp, r4, r6, r5};
+ Representation representations[] = {
+ Representation::Tagged(), Representation::Tagged(), Representation::Smi(),
+ Representation::Tagged()};
+ data->Initialize(arraysize(registers), registers, representations);
+}
+
+
void CallConstructDescriptor::Initialize(CallInterfaceDescriptorData* data) {
// r3 : number of arguments
// r4 : the function to call
}
+void AllocateHeapNumberDescriptor::Initialize(
+ CallInterfaceDescriptorData* data) {
+ // register state
+ // cp -- context
+ Register registers[] = {cp};
+ data->Initialize(arraysize(registers), registers, nullptr);
+}
+
+
void ArrayConstructorConstantArgCountDescriptor::Initialize(
CallInterfaceDescriptorData* data) {
// register state
r7, // call_data
r5, // holder
r4, // api_function_address
+ r6, // actual number of arguments
+ };
+ Representation representations[] = {
+ Representation::Tagged(), // context
+ Representation::Tagged(), // callee
+ Representation::Tagged(), // call_data
+ Representation::Tagged(), // holder
+ Representation::External(), // api_function_address
+ Representation::Integer32(), // actual number of arguments
+ };
+ data->Initialize(arraysize(registers), registers, representations);
+}
+
+
+void ApiAccessorDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ Register registers[] = {
+ cp, // context
+ r3, // callee
+ r7, // call_data
+ r5, // holder
+ r4, // api_function_address
};
Representation representations[] = {
Representation::Tagged(), // context
DCHECK(is_done());
code->set_stack_slots(GetStackSlotCount());
code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
- if (code->is_optimized_code()) RegisterWeakObjectsInOptimizedCode(code);
PopulateDeoptimizationData(code);
}
// Sloppy mode functions and builtins need to replace the receiver with the
// global proxy when called as functions (without an explicit receiver
// object).
- if (info_->this_has_uses() && info_->strict_mode() == SLOPPY &&
+ if (info_->this_has_uses() && is_sloppy(info_->language_mode()) &&
!info_->is_native()) {
Label ok;
int receiver_offset = info_->scope()->num_parameters() * kPointerSize;
void LCodeGen::DeoptimizeIf(Condition cond, LInstruction* instr,
- const char* detail,
+ Deoptimizer::DeoptReason deopt_reason,
Deoptimizer::BailoutType bailout_type,
CRegister cr) {
LEnvironment* environment = instr->environment();
}
Deoptimizer::Reason reason(instr->hydrogen_value()->position().raw(),
- instr->Mnemonic(), detail);
+ instr->Mnemonic(), deopt_reason);
DCHECK(info()->IsStub() || frame_is_built_);
// Go through jump table if we need to handle condition, build frame, or
// restore caller doubles.
void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
- const char* detail, CRegister cr) {
+ Deoptimizer::DeoptReason deopt_reason,
+ CRegister cr) {
Deoptimizer::BailoutType bailout_type =
info()->IsStub() ? Deoptimizer::LAZY : Deoptimizer::EAGER;
- DeoptimizeIf(condition, instr, detail, bailout_type, cr);
+ DeoptimizeIf(condition, instr, deopt_reason, bailout_type, cr);
}
} else {
data->SetSharedFunctionInfo(Smi::FromInt(0));
}
+ data->SetWeakCellCache(Smi::FromInt(0));
Handle<FixedArray> literals =
factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
Register right_reg = ToRegister(instr->right());
Register result_reg = ToRegister(instr->result());
Register scratch = scratch0();
+ bool can_overflow = hmod->CheckFlag(HValue::kCanOverflow);
Label done;
- if (hmod->CheckFlag(HValue::kCanOverflow)) {
+ if (can_overflow) {
__ li(r0, Operand::Zero()); // clear xer
__ mtxer(r0);
}
// Check for kMinInt % -1, divw will return undefined, which is not what we
// want. We have to deopt if we care about -0, because we can't return that.
- if (hmod->CheckFlag(HValue::kCanOverflow)) {
- Label no_overflow_possible;
+ if (can_overflow) {
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
DeoptimizeIf(overflow, instr, Deoptimizer::kMinusZero, cr0);
} else {
- __ bnooverflow(&no_overflow_possible, cr0);
- __ li(result_reg, Operand::Zero());
- __ b(&done);
+ if (CpuFeatures::IsSupported(ISELECT)) {
+ __ isel(overflow, result_reg, r0, result_reg, cr0);
+ __ boverflow(&done, cr0);
+ } else {
+ Label no_overflow_possible;
+ __ bnooverflow(&no_overflow_possible, cr0);
+ __ li(result_reg, Operand::Zero());
+ __ b(&done);
+ __ bind(&no_overflow_possible);
+ }
}
- __ bind(&no_overflow_possible);
}
__ mullw(scratch, right_reg, scratch);
const Register dividend = ToRegister(instr->dividend());
const Register divisor = ToRegister(instr->divisor());
Register result = ToRegister(instr->result());
+ bool can_overflow = hdiv->CheckFlag(HValue::kCanOverflow);
DCHECK(!dividend.is(result));
DCHECK(!divisor.is(result));
- if (hdiv->CheckFlag(HValue::kCanOverflow)) {
+ if (can_overflow) {
__ li(r0, Operand::Zero()); // clear xer
__ mtxer(r0);
}
}
// Check for (kMinInt / -1).
- if (hdiv->CheckFlag(HValue::kCanOverflow)) {
- Label no_overflow_possible;
+ if (can_overflow) {
if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow, cr0);
} else {
// When truncating, we want kMinInt / -1 = kMinInt.
- __ bnooverflow(&no_overflow_possible, cr0);
- __ mr(result, dividend);
+ if (CpuFeatures::IsSupported(ISELECT)) {
+ __ isel(overflow, result, dividend, result, cr0);
+ } else {
+ Label no_overflow_possible;
+ __ bnooverflow(&no_overflow_possible, cr0);
+ __ mr(result, dividend);
+ __ bind(&no_overflow_possible);
+ }
}
- __ bind(&no_overflow_possible);
}
if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
Register dividend = ToRegister(instr->dividend());
Register result = ToRegister(instr->result());
int32_t divisor = instr->divisor();
+ bool can_overflow = hdiv->CheckFlag(HValue::kLeftCanBeMinInt);
// If the divisor is positive, things are easy: There can be no deopts and we
// can simply do an arithmetic right shift.
// If the divisor is negative, we have to negate and handle edge cases.
OEBit oe = LeaveOE;
#if V8_TARGET_ARCH_PPC64
- if (divisor == -1 && hdiv->CheckFlag(HValue::kLeftCanBeMinInt)) {
+ if (divisor == -1 && can_overflow) {
__ lis(r0, Operand(SIGN_EXT_IMM16(0x8000)));
__ cmpw(dividend, r0);
DeoptimizeIf(eq, instr, Deoptimizer::kOverflow);
}
#else
- if (hdiv->CheckFlag(HValue::kLeftCanBeMinInt)) {
+ if (can_overflow) {
__ li(r0, Operand::Zero()); // clear xer
__ mtxer(r0);
oe = SetOE;
// If the negation could not overflow, simply shifting is OK.
#if !V8_TARGET_ARCH_PPC64
- if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
+ if (!can_overflow) {
#endif
if (shift) {
__ ShiftRightArithImm(result, result, shift);
const Register dividend = ToRegister(instr->dividend());
const Register divisor = ToRegister(instr->divisor());
Register result = ToRegister(instr->result());
+ bool can_overflow = hdiv->CheckFlag(HValue::kCanOverflow);
DCHECK(!dividend.is(result));
DCHECK(!divisor.is(result));
- if (hdiv->CheckFlag(HValue::kCanOverflow)) {
+ if (can_overflow) {
__ li(r0, Operand::Zero()); // clear xer
__ mtxer(r0);
}
}
// Check for (kMinInt / -1).
- if (hdiv->CheckFlag(HValue::kCanOverflow)) {
- Label no_overflow_possible;
+ if (can_overflow) {
if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow, cr0);
} else {
// When truncating, we want kMinInt / -1 = kMinInt.
- __ bnooverflow(&no_overflow_possible, cr0);
- __ mr(result, dividend);
+ if (CpuFeatures::IsSupported(ISELECT)) {
+ __ isel(overflow, result, dividend, result, cr0);
+ } else {
+ Label no_overflow_possible;
+ __ bnooverflow(&no_overflow_possible, cr0);
+ __ mr(result, dividend);
+ __ bind(&no_overflow_possible);
+ }
}
- __ bind(&no_overflow_possible);
}
Label done;
#if V8_TARGET_ARCH_PPC64
} else {
__ neg(result, left);
- __ TestIfInt32(result, scratch, r0);
+ __ TestIfInt32(result, r0);
DeoptimizeIf(ne, instr, Deoptimizer::kOverflow);
}
#endif
} else {
__ Mul(result, left, right);
}
- __ TestIfInt32(result, scratch, r0);
+ __ TestIfInt32(result, r0);
DeoptimizeIf(ne, instr, Deoptimizer::kOverflow);
if (instr->hydrogen()->representation().IsSmi()) {
__ SmiTag(result);
Register left = ToRegister(instr->left());
Register result = ToRegister(instr->result());
bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
- if (!can_overflow) {
+#if V8_TARGET_ARCH_PPC64
+ const bool isInteger = !instr->hydrogen()->representation().IsSmi();
+#else
+ const bool isInteger = false;
+#endif
+ if (!can_overflow || isInteger) {
if (right->IsConstantOperand()) {
__ Add(result, left, -(ToOperand(right).immediate()), r0);
} else {
__ sub(result, left, EmitLoadRegister(right, ip));
}
+#if V8_TARGET_ARCH_PPC64
+ if (can_overflow) {
+ __ TestIfInt32(result, r0);
+ DeoptimizeIf(ne, instr, Deoptimizer::kOverflow);
+ }
+#endif
} else {
if (right->IsConstantOperand()) {
__ AddAndCheckForOverflow(result, left, -(ToOperand(right).immediate()),
__ SubAndCheckForOverflow(result, left, EmitLoadRegister(right, ip),
scratch0(), r0);
}
-// Doptimize on overflow
-#if V8_TARGET_ARCH_PPC64
- if (!instr->hydrogen()->representation().IsSmi()) {
- __ extsw(scratch0(), scratch0(), SetRC);
- }
-#endif
DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, cr0);
}
-
-#if V8_TARGET_ARCH_PPC64
- if (!instr->hydrogen()->representation().IsSmi()) {
- __ extsw(result, result);
- }
-#endif
}
}
-// TODO(penguin): put const to constant pool instead
-// of storing double to stack
void LCodeGen::DoConstantD(LConstantD* instr) {
DCHECK(instr->result()->IsDoubleRegister());
DoubleRegister result = ToDoubleRegister(instr->result());
+#if V8_HOST_ARCH_IA32
+ // Need some crappy work-around for x87 sNaN -> qNaN breakage in simulator
+ // builds.
+ uint64_t bits = instr->bits();
+ if ((bits & V8_UINT64_C(0x7FF8000000000000)) ==
+ V8_UINT64_C(0x7FF0000000000000)) {
+ uint32_t lo = static_cast<uint32_t>(bits);
+ uint32_t hi = static_cast<uint32_t>(bits >> 32);
+ __ mov(ip, Operand(lo));
+ __ mov(scratch0(), Operand(hi));
+ __ MovInt64ToDouble(result, scratch0(), ip);
+ return;
+ }
+#endif
double v = instr->value();
__ LoadDoubleLiteral(result, v, scratch0());
}
Register result = ToRegister(instr->result());
bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
#if V8_TARGET_ARCH_PPC64
- bool isInteger = !(instr->hydrogen()->representation().IsSmi() ||
- instr->hydrogen()->representation().IsExternal());
+ const bool isInteger = !(instr->hydrogen()->representation().IsSmi() ||
+ instr->hydrogen()->representation().IsExternal());
+#else
+ const bool isInteger = false;
#endif
- if (!can_overflow) {
+ if (!can_overflow || isInteger) {
if (right->IsConstantOperand()) {
__ Add(result, left, ToOperand(right).immediate(), r0);
} else {
__ add(result, left, EmitLoadRegister(right, ip));
}
+#if V8_TARGET_ARCH_PPC64
+ if (can_overflow) {
+ __ TestIfInt32(result, r0);
+ DeoptimizeIf(ne, instr, Deoptimizer::kOverflow);
+ }
+#endif
} else {
if (right->IsConstantOperand()) {
__ AddAndCheckForOverflow(result, left, ToOperand(right).immediate(),
__ AddAndCheckForOverflow(result, left, EmitLoadRegister(right, ip),
scratch0(), r0);
}
-// Doptimize on overflow
-#if V8_TARGET_ARCH_PPC64
- if (isInteger) {
- __ extsw(scratch0(), scratch0(), SetRC);
- }
-#endif
DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, cr0);
}
-
-#if V8_TARGET_ARCH_PPC64
- if (isInteger) {
- __ extsw(result, result);
- }
-#endif
}
__ cmpw(left_reg, right_reg);
}
#endif
- __ b(cond, &return_left);
- __ Move(result_reg, right_reg);
- __ b(&done);
- __ bind(&return_left);
- __ Move(result_reg, left_reg);
- __ bind(&done);
+ if (CpuFeatures::IsSupported(ISELECT)) {
+ __ isel(cond, result_reg, left_reg, right_reg);
+ } else {
+ __ b(cond, &return_left);
+ __ Move(result_reg, right_reg);
+ __ b(&done);
+ __ bind(&return_left);
+ __ Move(result_reg, left_reg);
+ __ bind(&done);
+ }
} else {
DCHECK(instr->hydrogen()->representation().IsDouble());
DoubleRegister left_reg = ToDoubleRegister(left);
DCHECK(ToRegister(instr->right()).is(r3));
DCHECK(ToRegister(instr->result()).is(r3));
- Handle<Code> code =
- CodeFactory::BinaryOpIC(isolate(), instr->op(), NO_OVERWRITE).code();
+ Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), instr->op()).code();
CallCode(code, RelocInfo::CODE_TARGET, instr);
}
InstanceofStub stub(isolate(), InstanceofStub::kArgsInRegisters);
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- Label equal, done;
- __ cmpi(r3, Operand::Zero());
- __ beq(&equal);
- __ mov(r3, Operand(factory()->false_value()));
- __ b(&done);
+ if (CpuFeatures::IsSupported(ISELECT)) {
+ __ mov(r4, Operand(factory()->true_value()));
+ __ mov(r5, Operand(factory()->false_value()));
+ __ cmpi(r3, Operand::Zero());
+ __ isel(eq, r3, r4, r5);
+ } else {
+ Label equal, done;
+ __ cmpi(r3, Operand::Zero());
+ __ beq(&equal);
+ __ mov(r3, Operand(factory()->false_value()));
+ __ b(&done);
- __ bind(&equal);
- __ mov(r3, Operand(factory()->true_value()));
- __ bind(&done);
+ __ bind(&equal);
+ __ mov(r3, Operand(factory()->true_value()));
+ __ bind(&done);
+ }
}
__ Move(InstanceofStub::right(), instr->function());
// Include instructions below in delta: mov + call = mov + (mov + 2)
- static const int kAdditionalDelta = (2 * Assembler::kMovInstructions) + 2;
+ static const int kAdditionalDelta = 2 * Assembler::kMovInstructions + 2;
int delta = masm_->InstructionsGeneratedSince(map_check) + kAdditionalDelta;
{
Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
+ if (Assembler::kMovInstructions != 1 &&
+ is_int16(delta * Instruction::kInstrSize)) {
+ // The following mov will be an li rather than a multi-instruction form
+ delta -= Assembler::kMovInstructions - 1;
+ }
// r8 is used to communicate the offset to the location of the map check.
__ mov(r8, Operand(delta * Instruction::kInstrSize));
}
__ cmpi(r3, Operand::Zero());
Condition condition = ComputeCompareCondition(op);
- Label true_value, done;
+ if (CpuFeatures::IsSupported(ISELECT)) {
+ __ LoadRoot(r4, Heap::kTrueValueRootIndex);
+ __ LoadRoot(r5, Heap::kFalseValueRootIndex);
+ __ isel(condition, ToRegister(instr->result()), r4, r5);
+ } else {
+ Label true_value, done;
- __ b(condition, &true_value);
+ __ b(condition, &true_value);
- __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
- __ b(&done);
+ __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
+ __ b(&done);
- __ bind(&true_value);
- __ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex);
+ __ bind(&true_value);
+ __ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex);
- __ bind(&done);
+ __ bind(&done);
+ }
}
__ addi(sp, sp, Operand(sp_delta));
}
} else {
+ DCHECK(info()->IsStub()); // Functions would need to drop one more value.
Register reg = ToRegister(instr->parameter_count());
// The argument count parameter is a smi
if (NeedsEagerFrame()) {
template <class T>
void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
DCHECK(FLAG_vector_ics);
- Register vector = ToRegister(instr->temp_vector());
- DCHECK(vector.is(VectorLoadICDescriptor::VectorRegister()));
- __ Move(vector, instr->hydrogen()->feedback_vector());
+ Register vector_register = ToRegister(instr->temp_vector());
+ Register slot_register = VectorLoadICDescriptor::SlotRegister();
+ DCHECK(vector_register.is(VectorLoadICDescriptor::VectorRegister()));
+ DCHECK(slot_register.is(r3));
+
+ AllowDeferredHandleDereference vector_structure_check;
+ Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
+ __ Move(vector_register, vector);
// No need to allocate this register.
- DCHECK(VectorLoadICDescriptor::SlotRegister().is(r3));
- __ mov(VectorLoadICDescriptor::SlotRegister(),
- Operand(Smi::FromInt(instr->hydrogen()->slot())));
+ FeedbackVectorICSlot slot = instr->hydrogen()->slot();
+ int index = vector->GetIndex(slot);
+ __ mov(slot_register, Operand(Smi::FromInt(index)));
}
__ LoadP(result, ContextOperand(context, instr->slot_index()));
if (instr->hydrogen()->RequiresHoleCheck()) {
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
- __ cmp(result, ip);
if (instr->hydrogen()->DeoptimizesOnHole()) {
+ __ cmp(result, ip);
DeoptimizeIf(eq, instr, Deoptimizer::kHole);
} else {
- Label skip;
- __ bne(&skip);
- __ mov(result, Operand(factory()->undefined_value()));
- __ bind(&skip);
+ if (CpuFeatures::IsSupported(ISELECT)) {
+ Register scratch = scratch0();
+ __ mov(scratch, Operand(factory()->undefined_value()));
+ __ cmp(result, ip);
+ __ isel(eq, result, scratch, result);
+ } else {
+ Label skip;
+ __ cmp(result, ip);
+ __ bne(&skip);
+ __ mov(result, Operand(factory()->undefined_value()));
+ __ bind(&skip);
+ }
}
}
}
}
if (instr->hydrogen()->representation().IsDouble()) {
+ DCHECK(access.IsInobject());
DoubleRegister result = ToDoubleRegister(instr->result());
__ lfd(result, FieldMemOperand(object, offset));
return;
if (representation.IsSmi() &&
instr->hydrogen()->representation().IsInteger32()) {
// Read int value directly from upper half of the smi.
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32);
-#if V8_TARGET_LITTLE_ENDIAN
- offset += kPointerSize / 2;
-#endif
+ offset = SmiWordOffset(offset);
representation = Representation::Integer32();
}
#endif
DeoptimizeIf(eq, instr, Deoptimizer::kHole);
// If the function does not have an initial map, we're done.
- Label done;
- __ CompareObjectType(result, scratch, scratch, MAP_TYPE);
- __ bne(&done);
+ if (CpuFeatures::IsSupported(ISELECT)) {
+ // Get the prototype from the initial map (optimistic).
+ __ LoadP(ip, FieldMemOperand(result, Map::kPrototypeOffset));
+ __ CompareObjectType(result, scratch, scratch, MAP_TYPE);
+ __ isel(eq, result, ip, result);
+ } else {
+ Label done;
+ __ CompareObjectType(result, scratch, scratch, MAP_TYPE);
+ __ bne(&done);
- // Get the prototype from the initial map.
- __ LoadP(result, FieldMemOperand(result, Map::kPrototypeOffset));
+ // Get the prototype from the initial map.
+ __ LoadP(result, FieldMemOperand(result, Map::kPrototypeOffset));
- // All done.
- __ bind(&done);
+ // All done.
+ __ bind(&done);
+ }
}
case EXTERNAL_INT16_ELEMENTS:
case INT16_ELEMENTS:
if (key_is_constant) {
- __ LoadHalfWord(result, mem_operand, r0);
+ __ LoadHalfWordArith(result, mem_operand, r0);
} else {
- __ lhzx(result, mem_operand);
+ __ lhax(result, mem_operand);
}
- __ extsh(result, result);
break;
case EXTERNAL_UINT16_ELEMENTS:
case UINT16_ELEMENTS:
case EXTERNAL_INT32_ELEMENTS:
case INT32_ELEMENTS:
if (key_is_constant) {
- __ LoadWord(result, mem_operand, r0);
+ __ LoadWordArith(result, mem_operand, r0);
} else {
- __ lwzx(result, mem_operand);
+ __ lwax(result, mem_operand);
}
-#if V8_TARGET_ARCH_PPC64
- __ extsw(result, result);
-#endif
break;
case EXTERNAL_UINT32_ELEMENTS:
case UINT32_ELEMENTS:
hinstr->elements_kind() == FAST_SMI_ELEMENTS) {
DCHECK(!requires_hole_check);
// Read int value directly from upper half of the smi.
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32);
-#if V8_TARGET_LITTLE_ENDIAN
- offset += kPointerSize / 2;
-#endif
+ offset = SmiWordOffset(offset);
}
#endif
__ subi(result, sp, Operand(2 * kPointerSize));
} else {
// Check if the calling frame is an arguments adaptor frame.
- Label done, adapted;
__ LoadP(scratch, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
__ LoadP(result,
MemOperand(scratch, StandardFrameConstants::kContextOffset));
// Result is the frame pointer for the frame if not adapted and for the real
// frame below the adaptor frame if adapted.
- __ beq(&adapted);
- __ mr(result, fp);
- __ b(&done);
+ if (CpuFeatures::IsSupported(ISELECT)) {
+ __ isel(eq, result, scratch, fp);
+ } else {
+ Label done, adapted;
+ __ beq(&adapted);
+ __ mr(result, fp);
+ __ b(&done);
- __ bind(&adapted);
- __ mr(result, scratch);
- __ bind(&done);
+ __ bind(&adapted);
+ __ mr(result, scratch);
+ __ bind(&done);
+ }
}
}
// Deoptimize if the receiver is not a JS object.
__ TestIfSmi(receiver, r0);
- DeoptimizeIf(eq, instr, Deoptimizer::kSmi);
+ DeoptimizeIf(eq, instr, Deoptimizer::kSmi, cr0);
__ CompareObjectType(receiver, scratch, scratch, FIRST_SPEC_OBJECT_TYPE);
DeoptimizeIf(lt, instr, Deoptimizer::kNotAJavaScriptObject);
void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
int formal_parameter_count, int arity,
- LInstruction* instr, R4State r4_state) {
+ LInstruction* instr) {
bool dont_adapt_arguments =
formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
bool can_invoke_directly =
dont_adapt_arguments || formal_parameter_count == arity;
+ Register function_reg = r4;
+
LPointerMap* pointers = instr->pointer_map();
if (can_invoke_directly) {
- if (r4_state == R4_UNINITIALIZED) {
- __ Move(r4, function);
- }
-
// Change context.
- __ LoadP(cp, FieldMemOperand(r4, JSFunction::kContextOffset));
+ __ LoadP(cp, FieldMemOperand(function_reg, JSFunction::kContextOffset));
// Set r3 to arguments count if adaption is not needed. Assumes that r3
// is available to write to at this point.
if (is_self_call) {
__ CallSelf();
} else {
- __ LoadP(ip, FieldMemOperand(r4, JSFunction::kCodeEntryOffset));
+ __ LoadP(ip, FieldMemOperand(function_reg, JSFunction::kCodeEntryOffset));
__ CallJSEntry(ip);
}
SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
ParameterCount count(arity);
ParameterCount expected(formal_parameter_count);
- __ InvokeFunction(function, expected, count, CALL_FUNCTION, generator);
+ __ InvokeFunction(function_reg, expected, count, CALL_FUNCTION, generator);
}
}
// [-0.5, -0].
DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
}
- Label return_zero;
__ fcmpu(input, dot_five);
- __ bne(&return_zero);
- __ li(result, Operand(1)); // +0.5.
- __ b(&done);
- // Remaining cases: [+0, +0.5[ or [-0.5, +0.5[, depending on
- // flag kBailoutOnMinusZero.
- __ bind(&return_zero);
- __ li(result, Operand::Zero());
- __ b(&done);
+ if (CpuFeatures::IsSupported(ISELECT)) {
+ __ li(result, Operand(1));
+ __ isel(lt, result, r0, result);
+ __ b(&done);
+ } else {
+ Label return_zero;
+ __ bne(&return_zero);
+ __ li(result, Operand(1)); // +0.5.
+ __ b(&done);
+ // Remaining cases: [+0, +0.5[ or [-0.5, +0.5[, depending on
+ // flag kBailoutOnMinusZero.
+ __ bind(&return_zero);
+ __ li(result, Operand::Zero());
+ __ b(&done);
+ }
__ bind(&convert);
__ fadd(input_plus_dot_five, input, dot_five);
Representation exponent_type = instr->hydrogen()->right()->representation();
// Having marked this as a call, we can use any registers.
// Just make sure that the input/output registers are the expected ones.
-#ifdef DEBUG
Register tagged_exponent = MathPowTaggedDescriptor::exponent();
-#endif
DCHECK(!instr->right()->IsDoubleRegister() ||
ToDoubleRegister(instr->right()).is(d2));
DCHECK(!instr->right()->IsRegister() ||
__ CallStub(&stub);
} else if (exponent_type.IsTagged()) {
Label no_deopt;
- __ JumpIfSmi(r5, &no_deopt);
- __ LoadP(r10, FieldMemOperand(r5, HeapObject::kMapOffset));
+ __ JumpIfSmi(tagged_exponent, &no_deopt);
+ DCHECK(!r10.is(tagged_exponent));
+ __ LoadP(r10, FieldMemOperand(tagged_exponent, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
__ cmp(r10, ip);
DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber);
} else {
CallKnownFunction(known_function,
instr->hydrogen()->formal_parameter_count(),
- instr->arity(), instr, R4_CONTAINS_TARGET);
+ instr->arity(), instr);
}
}
DCHECK(name.is(LoadDescriptor::NameRegister()));
DCHECK(receiver.is(r4));
DCHECK(name.is(r5));
+ Register scratch = r7;
+ Register extra = r8;
+ Register extra2 = r9;
+ Register extra3 = r10;
- Register scratch = r6;
- Register extra = r7;
- Register extra2 = r8;
- Register extra3 = r9;
+#ifdef DEBUG
+ Register slot = FLAG_vector_ics ? ToRegister(instr->slot()) : no_reg;
+ Register vector = FLAG_vector_ics ? ToRegister(instr->vector()) : no_reg;
+ DCHECK(!FLAG_vector_ics ||
+ !AreAliased(slot, vector, scratch, extra, extra2, extra3));
+#endif
// Important for the tail-call.
bool must_teardown_frame = NeedsEagerFrame();
- // The probe will tail call to a handler if found.
- isolate()->stub_cache()->GenerateProbe(masm(), instr->hydrogen()->flags(),
- must_teardown_frame, receiver, name,
- scratch, extra, extra2, extra3);
+ if (!instr->hydrogen()->is_just_miss()) {
+ DCHECK(!instr->hydrogen()->is_keyed_load());
+
+ // The probe will tail call to a handler if found.
+ isolate()->stub_cache()->GenerateProbe(
+ masm(), Code::LOAD_IC, instr->hydrogen()->flags(), must_teardown_frame,
+ receiver, name, scratch, extra, extra2, extra3);
+ }
// Tail call to miss if we ended up here.
if (must_teardown_frame) __ LeaveFrame(StackFrame::INTERNAL);
- LoadIC::GenerateMiss(masm());
+ if (instr->hydrogen()->is_keyed_load()) {
+ KeyedLoadIC::GenerateMiss(masm());
+ } else {
+ LoadIC::GenerateMiss(masm());
+ }
}
void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
DCHECK(ToRegister(instr->result()).is(r3));
- LPointerMap* pointers = instr->pointer_map();
- SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
+ if (instr->hydrogen()->IsTailCall()) {
+ if (NeedsEagerFrame()) __ LeaveFrame(StackFrame::INTERNAL);
- if (instr->target()->IsConstantOperand()) {
- LConstantOperand* target = LConstantOperand::cast(instr->target());
- Handle<Code> code = Handle<Code>::cast(ToHandle(target));
- generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
- __ Call(code, RelocInfo::CODE_TARGET);
+ if (instr->target()->IsConstantOperand()) {
+ LConstantOperand* target = LConstantOperand::cast(instr->target());
+ Handle<Code> code = Handle<Code>::cast(ToHandle(target));
+ __ Jump(code, RelocInfo::CODE_TARGET);
+ } else {
+ DCHECK(instr->target()->IsRegister());
+ Register target = ToRegister(instr->target());
+ __ addi(ip, target, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ JumpToJSEntry(ip);
+ }
} else {
- DCHECK(instr->target()->IsRegister());
- Register target = ToRegister(instr->target());
- generator.BeforeCall(__ CallSize(target));
- __ addi(ip, target, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ CallJSEntry(ip);
+ LPointerMap* pointers = instr->pointer_map();
+ SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
+
+ if (instr->target()->IsConstantOperand()) {
+ LConstantOperand* target = LConstantOperand::cast(instr->target());
+ Handle<Code> code = Handle<Code>::cast(ToHandle(target));
+ generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
+ __ Call(code, RelocInfo::CODE_TARGET);
+ } else {
+ DCHECK(instr->target()->IsRegister());
+ Register target = ToRegister(instr->target());
+ generator.BeforeCall(__ CallSize(target));
+ __ addi(ip, target, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ CallJSEntry(ip);
+ }
+ generator.AfterCall();
}
- generator.AfterCall();
}
DCHECK(ToRegister(instr->result()).is(r3));
int arity = instr->arity();
- CallFunctionStub stub(isolate(), arity, instr->hydrogen()->function_flags());
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallFunctionFlags flags = instr->hydrogen()->function_flags();
+ if (instr->hydrogen()->HasVectorAndSlot()) {
+ Register slot_register = ToRegister(instr->temp_slot());
+ Register vector_register = ToRegister(instr->temp_vector());
+ DCHECK(slot_register.is(r6));
+ DCHECK(vector_register.is(r5));
+
+ AllowDeferredHandleDereference vector_structure_check;
+ Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
+ int index = vector->GetIndex(instr->hydrogen()->slot());
+
+ __ Move(vector_register, vector);
+ __ LoadSmiLiteral(slot_register, Smi::FromInt(index));
+
+ CallICState::CallType call_type =
+ (flags & CALL_AS_METHOD) ? CallICState::METHOD : CallICState::FUNCTION;
+
+ Handle<Code> ic =
+ CodeFactory::CallICInOptimizedCode(isolate(), arity, call_type).code();
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
+ } else {
+ CallFunctionStub stub(isolate(), arity, flags);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ }
}
DCHECK(!representation.IsSmi() || !instr->value()->IsConstantOperand() ||
IsSmi(LConstantOperand::cast(instr->value())));
#endif
- if (representation.IsDouble()) {
+ if (!FLAG_unbox_double_fields && representation.IsDouble()) {
DCHECK(access.IsInobject());
DCHECK(!hinstr->has_transition());
DCHECK(!hinstr->NeedsWriteBarrier());
}
// Do the store.
- Register value = ToRegister(instr->value());
-
+ Register record_dest = object;
+ Register record_value = no_reg;
+ Register record_scratch = scratch;
#if V8_TARGET_ARCH_PPC64
- // 64-bit Smi optimization
- if (representation.IsSmi() &&
- hinstr->value()->representation().IsInteger32()) {
- DCHECK(hinstr->store_mode() == STORE_TO_INITIALIZED_ENTRY);
- // Store int value directly to upper half of the smi.
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32);
-#if V8_TARGET_LITTLE_ENDIAN
- offset += kPointerSize / 2;
-#endif
- representation = Representation::Integer32();
- }
-#endif
-
- if (access.IsInobject()) {
- MemOperand operand = FieldMemOperand(object, offset);
- __ StoreRepresentation(value, operand, representation, r0);
+ if (FLAG_unbox_double_fields && representation.IsDouble()) {
+ DCHECK(access.IsInobject());
+ DoubleRegister value = ToDoubleRegister(instr->value());
+ __ stfd(value, FieldMemOperand(object, offset));
if (hinstr->NeedsWriteBarrier()) {
- // Update the write barrier for the object for in-object properties.
- __ RecordWriteField(
- object, offset, value, scratch, GetLinkRegisterState(), kSaveFPRegs,
- EMIT_REMEMBERED_SET, hinstr->SmiCheckForWriteBarrier(),
- hinstr->PointersToHereCheckForValue());
+ record_value = ToRegister(instr->value());
}
} else {
- __ LoadP(scratch, FieldMemOperand(object, JSObject::kPropertiesOffset));
- MemOperand operand = FieldMemOperand(scratch, offset);
- __ StoreRepresentation(value, operand, representation, r0);
- if (hinstr->NeedsWriteBarrier()) {
- // Update the write barrier for the properties array.
- // object is used as a scratch register.
- __ RecordWriteField(
- scratch, offset, value, object, GetLinkRegisterState(), kSaveFPRegs,
- EMIT_REMEMBERED_SET, hinstr->SmiCheckForWriteBarrier(),
- hinstr->PointersToHereCheckForValue());
+ if (representation.IsSmi() &&
+ hinstr->value()->representation().IsInteger32()) {
+ DCHECK(hinstr->store_mode() == STORE_TO_INITIALIZED_ENTRY);
+ // 64-bit Smi optimization
+ // Store int value directly to upper half of the smi.
+ offset = SmiWordOffset(offset);
+ representation = Representation::Integer32();
+ }
+#endif
+ if (access.IsInobject()) {
+ Register value = ToRegister(instr->value());
+ MemOperand operand = FieldMemOperand(object, offset);
+ __ StoreRepresentation(value, operand, representation, r0);
+ record_value = value;
+ } else {
+ Register value = ToRegister(instr->value());
+ __ LoadP(scratch, FieldMemOperand(object, JSObject::kPropertiesOffset));
+ MemOperand operand = FieldMemOperand(scratch, offset);
+ __ StoreRepresentation(value, operand, representation, r0);
+ record_dest = scratch;
+ record_value = value;
+ record_scratch = object;
}
+#if V8_TARGET_ARCH_PPC64
+ }
+#endif
+
+ if (hinstr->NeedsWriteBarrier()) {
+ __ RecordWriteField(record_dest, offset, record_value, record_scratch,
+ GetLinkRegisterState(), kSaveFPRegs,
+ EMIT_REMEMBERED_SET, hinstr->SmiCheckForWriteBarrier(),
+ hinstr->PointersToHereCheckForValue());
}
}
DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
__ mov(StoreDescriptor::NameRegister(), Operand(instr->name()));
- Handle<Code> ic = StoreIC::initialize_stub(isolate(), instr->strict_mode());
+ Handle<Code> ic = StoreIC::initialize_stub(isolate(), instr->language_mode());
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
}
if (instr->NeedsCanonicalization()) {
- // Force a canonical NaN.
+ // Turn potential sNaN value into qNaN.
__ CanonicalizeNaN(double_scratch, value);
__ stfd(double_scratch, MemOperand(elements, base_offset));
} else {
DCHECK(hinstr->store_mode() == STORE_TO_INITIALIZED_ENTRY);
DCHECK(hinstr->elements_kind() == FAST_SMI_ELEMENTS);
// Store int value directly to upper half of the smi.
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32);
-#if V8_TARGET_LITTLE_ENDIAN
- offset += kPointerSize / 2;
-#endif
+ offset = SmiWordOffset(offset);
}
#endif
DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
Handle<Code> ic =
- CodeFactory::KeyedStoreIC(isolate(), instr->strict_mode()).code();
+ CodeFactory::KeyedStoreIC(isolate(), instr->language_mode()).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
Register input = ToRegister(instr->value());
Register result = ToRegister(instr->result());
if (instr->needs_check()) {
- STATIC_ASSERT(kHeapObjectTag == 1);
// If the input is a HeapObject, value of scratch won't be zero.
__ andi(scratch, input, Operand(kHeapObjectTag));
__ SmiUntag(result, input);
__ bind(&check_false);
__ LoadRoot(ip, Heap::kFalseValueRootIndex);
__ cmp(input_reg, ip);
- DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefinedTrueFalse,
- cr7);
+ DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefinedBoolean);
__ li(input_reg, Operand::Zero());
} else {
- DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber, cr7);
+ DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber);
__ lfd(double_scratch2,
FieldMemOperand(input_reg, HeapNumber::kValueOffset));
}
__ TryDoubleToInt32Exact(input_reg, double_scratch2, scratch1,
double_scratch);
- DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN, cr7);
+ DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
__ cmpi(input_reg, Operand::Zero());
FieldMemOperand(scratch2, HeapNumber::kValueOffset +
Register::kExponentOffset));
__ cmpwi(scratch1, Operand::Zero());
- DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero, cr7);
+ DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
}
}
__ bind(&done);
__ bind(deferred->exit());
if (instr->hydrogen()->MustPrefillWithFiller()) {
- STATIC_ASSERT(kHeapObjectTag == 1);
if (instr->size()->IsConstantOperand()) {
int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
__ LoadIntLiteral(scratch, size - kHeapObjectTag);
// space for nested functions that don't need literals cloning.
bool pretenure = instr->hydrogen()->pretenure();
if (!pretenure && instr->hydrogen()->has_no_literals()) {
- FastNewClosureStub stub(isolate(), instr->hydrogen()->strict_mode(),
+ FastNewClosureStub stub(isolate(), instr->hydrogen()->language_mode(),
instr->hydrogen()->kind());
__ mov(r5, Operand(instr->hydrogen()->shared_info()));
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
#undef DECLARE_DO
private:
- StrictMode strict_mode() const { return info()->strict_mode(); }
+ LanguageMode language_mode() const { return info()->language_mode(); }
Scope* scope() const { return scope_; }
void CallRuntimeFromDeferred(Runtime::FunctionId id, int argc,
LInstruction* instr, LOperand* context);
- enum R4State { R4_UNINITIALIZED, R4_CONTAINS_TARGET };
-
// Generate a direct call to a known function. Expects the function
// to be in r4.
void CallKnownFunction(Handle<JSFunction> function,
int formal_parameter_count, int arity,
- LInstruction* instr, R4State r4_state);
+ LInstruction* instr);
void RecordSafepointWithLazyDeopt(LInstruction* instr,
SafepointMode safepoint_mode);
void RegisterEnvironmentForDeoptimization(LEnvironment* environment,
Safepoint::DeoptMode mode);
void DeoptimizeIf(Condition condition, LInstruction* instr,
- const char* detail, Deoptimizer::BailoutType bailout_type,
- CRegister cr = cr7);
+ Deoptimizer::DeoptReason deopt_reason,
+ Deoptimizer::BailoutType bailout_type, CRegister cr = cr7);
void DeoptimizeIf(Condition condition, LInstruction* instr,
- const char* detail, CRegister cr = cr7);
+ Deoptimizer::DeoptReason deopt_reason, CRegister cr = cr7);
void AddToTranslation(LEnvironment* environment, Translation* translation,
LOperand* op, bool is_tagged, bool is_uint32,
}
+void LCallFunction::PrintDataTo(StringStream* stream) {
+ context()->PrintTo(stream);
+ stream->Add(" ");
+ function()->PrintTo(stream);
+ if (hydrogen()->HasVectorAndSlot()) {
+ stream->Add(" (type-feedback-vector ");
+ temp_vector()->PrintTo(stream);
+ stream->Add(" ");
+ temp_slot()->PrintTo(stream);
+ stream->Add(")");
+ }
+}
+
+
void LCallJSFunction::PrintDataTo(StringStream* stream) {
stream->Add("= ");
function()->PrintTo(stream);
UseFixed(instr->receiver(), LoadDescriptor::ReceiverRegister());
LOperand* name_register =
UseFixed(instr->name(), LoadDescriptor::NameRegister());
+ LOperand* slot = NULL;
+ LOperand* vector = NULL;
+ if (FLAG_vector_ics) {
+ slot = UseFixed(instr->slot(), VectorLoadICDescriptor::SlotRegister());
+ vector =
+ UseFixed(instr->vector(), VectorLoadICDescriptor::VectorRegister());
+ }
+
// Not marked as call. It can't deoptimize, and it never returns.
return new (zone()) LTailCallThroughMegamorphicCache(
- context, receiver_register, name_register);
+ context, receiver_register, name_register, slot, vector);
}
LInstruction* LChunkBuilder::DoCallFunction(HCallFunction* instr) {
LOperand* context = UseFixed(instr->context(), cp);
LOperand* function = UseFixed(instr->function(), r4);
- LCallFunction* call = new (zone()) LCallFunction(context, function);
+ LOperand* slot = NULL;
+ LOperand* vector = NULL;
+ if (instr->HasVectorAndSlot()) {
+ slot = FixedTemp(r6);
+ vector = FixedTemp(r5);
+ }
+
+ LCallFunction* call =
+ new (zone()) LCallFunction(context, function, slot, vector);
return MarkAsCall(DefineFixed(call, r3), instr);
}
DCHECK(instr->right()->representation().Equals(instr->representation()));
LOperand* dividend = UseRegister(instr->left());
LOperand* divisor = UseRegister(instr->right());
- LFlooringDivI* div = new (zone()) LFlooringDivI(dividend, divisor);
- return AssignEnvironment(DefineAsRegister(div));
+ LInstruction* result =
+ DefineAsRegister(new (zone()) LFlooringDivI(dividend, divisor));
+ if (instr->CheckFlag(HValue::kCanBeDivByZero) ||
+ instr->CheckFlag(HValue::kBailoutOnMinusZero) ||
+ (instr->CheckFlag(HValue::kCanOverflow) &&
+ !instr->CheckFlag(HValue::kAllUsesTruncatingToInt32))) {
+ result = AssignEnvironment(result);
+ }
+ return result;
}
bool can_overflow = instr->CheckFlag(HValue::kCanOverflow);
bool bailout_on_minus_zero = instr->CheckFlag(HValue::kBailoutOnMinusZero);
+ int32_t constant_value = 0;
if (right->IsConstant()) {
HConstant* constant = HConstant::cast(right);
- int32_t constant_value = constant->Integer32Value();
+ constant_value = constant->Integer32Value();
// Constants -1, 0 and 1 can be optimized if the result can overflow.
// For other constants, it can be optimized only without overflow.
if (!can_overflow || ((constant_value >= -1) && (constant_value <= 1))) {
right_op = UseRegister(right);
}
LMulI* mul = new (zone()) LMulI(left_op, right_op);
- if (can_overflow || bailout_on_minus_zero) {
+ if (right_op->IsConstantOperand()
+ ? ((can_overflow && constant_value == -1) ||
+ (bailout_on_minus_zero && constant_value <= 0))
+ : (can_overflow || bailout_on_minus_zero)) {
AssignEnvironment(mul);
}
return DefineAsRegister(mul);
} else if (instr->representation().IsDouble()) {
- if (instr->HasOneUse() &&
- (instr->uses().value()->IsAdd() || instr->uses().value()->IsSub())) {
- HBinaryOperation* use = HBinaryOperation::cast(instr->uses().value());
-
- if (use->IsAdd() && instr == use->left()) {
- // This mul is the lhs of an add. The add and mul will be folded into a
- // multiply-add in DoAdd.
- return NULL;
- }
- if (instr == use->right() && use->IsAdd() &&
- !(use->left()->IsMul() && use->left()->HasOneUse())) {
- // This mul is the rhs of an add, where the lhs is not another mul.
- // The add and mul will be folded into a multiply-add in DoAdd.
- return NULL;
- }
- if (instr == use->left() && use->IsSub()) {
- // This mul is the lhs of a sub. The mul and sub will be folded into a
- // multiply-sub in DoSub.
- return NULL;
- }
- }
-
return DoArithmeticD(Token::MUL, instr);
} else {
return DoArithmeticT(Token::MUL, instr);
}
return result;
} else if (instr->representation().IsDouble()) {
- if (instr->left()->IsMul() && instr->left()->HasOneUse()) {
- return DoMultiplySub(instr->right(), HMul::cast(instr->left()));
- }
-
return DoArithmeticD(Token::SUB, instr);
} else {
return DoArithmeticT(Token::SUB, instr);
LInstruction* result = DefineAsRegister(add);
return result;
} else if (instr->representation().IsDouble()) {
- if (instr->left()->IsMul() && instr->left()->HasOneUse()) {
- return DoMultiplyAdd(HMul::cast(instr->left()), instr->right());
- }
-
- if (instr->right()->IsMul() && instr->right()->HasOneUse()) {
- DCHECK(!instr->left()->IsMul() || !instr->left()->HasOneUse());
- return DoMultiplyAdd(HMul::cast(instr->right()), instr->left());
- }
-
return DoArithmeticD(Token::ADD, instr);
} else {
return DoArithmeticT(Token::ADD, instr);
LOperand* global_object =
UseFixed(instr->global_object(), LoadDescriptor::ReceiverRegister());
LOperand* vector = NULL;
- if (FLAG_vector_ics) {
+ if (instr->HasVectorAndSlot()) {
vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
}
LLoadGlobalGeneric* result =
LOperand* object =
UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
LOperand* vector = NULL;
- if (FLAG_vector_ics) {
+ if (instr->HasVectorAndSlot()) {
vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
}
UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
LOperand* key = UseFixed(instr->key(), LoadDescriptor::NameRegister());
LOperand* vector = NULL;
- if (FLAG_vector_ics) {
+ if (instr->HasVectorAndSlot()) {
vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
}
STATIC_ASSERT(R == 0 || R == 1);
bool HasResult() const FINAL { return R != 0 && result() != NULL; }
void set_result(LOperand* operand) { results_[0] = operand; }
- LOperand* result() const { return results_[0]; }
+ LOperand* result() const OVERRIDE { return results_[0]; }
protected:
EmbeddedContainer<LOperand*, R> results_;
class LTailCallThroughMegamorphicCache FINAL
- : public LTemplateInstruction<0, 3, 0> {
+ : public LTemplateInstruction<0, 5, 0> {
public:
- explicit LTailCallThroughMegamorphicCache(LOperand* context,
- LOperand* receiver,
- LOperand* name) {
+ LTailCallThroughMegamorphicCache(LOperand* context, LOperand* receiver,
+ LOperand* name, LOperand* slot,
+ LOperand* vector) {
inputs_[0] = context;
inputs_[1] = receiver;
inputs_[2] = name;
+ inputs_[3] = slot;
+ inputs_[4] = vector;
}
LOperand* context() { return inputs_[0]; }
LOperand* receiver() { return inputs_[1]; }
LOperand* name() { return inputs_[2]; }
+ LOperand* slot() { return inputs_[3]; }
+ LOperand* vector() { return inputs_[4]; }
DECLARE_CONCRETE_INSTRUCTION(TailCallThroughMegamorphicCache,
"tail-call-through-megamorphic-cache")
DECLARE_HYDROGEN_ACCESSOR(TailCallThroughMegamorphicCache)
};
+
class LUnknownOSRValue FINAL : public LTemplateInstruction<1, 0, 0> {
public:
bool HasInterestingComment(LCodeGen* gen) const OVERRIDE { return false; }
DECLARE_HYDROGEN_ACCESSOR(Constant)
double value() const { return hydrogen()->DoubleValue(); }
+ uint64_t bits() const { return hydrogen()->DoubleValueAsBits(); }
};
LOperand* function() { return inputs_[0]; }
LOperand* code_object() { return inputs_[1]; }
- virtual void PrintDataTo(StringStream* stream);
+ void PrintDataTo(StringStream* stream) OVERRIDE;
DECLARE_CONCRETE_INSTRUCTION(StoreCodeEntry, "store-code-entry")
DECLARE_HYDROGEN_ACCESSOR(StoreCodeEntry)
const CallInterfaceDescriptor descriptor() { return descriptor_; }
+ DECLARE_HYDROGEN_ACCESSOR(CallWithDescriptor)
+
private:
DECLARE_CONCRETE_INSTRUCTION(CallWithDescriptor, "call-with-descriptor")
- DECLARE_HYDROGEN_ACCESSOR(CallWithDescriptor)
void PrintDataTo(StringStream* stream) OVERRIDE;
};
-class LCallFunction FINAL : public LTemplateInstruction<1, 2, 0> {
+class LCallFunction FINAL : public LTemplateInstruction<1, 2, 2> {
public:
- LCallFunction(LOperand* context, LOperand* function) {
+ LCallFunction(LOperand* context, LOperand* function, LOperand* slot,
+ LOperand* vector) {
inputs_[0] = context;
inputs_[1] = function;
+ temps_[0] = slot;
+ temps_[1] = vector;
}
LOperand* context() { return inputs_[0]; }
LOperand* function() { return inputs_[1]; }
+ LOperand* temp_slot() { return temps_[0]; }
+ LOperand* temp_vector() { return temps_[1]; }
DECLARE_CONCRETE_INSTRUCTION(CallFunction, "call-function")
DECLARE_HYDROGEN_ACCESSOR(CallFunction)
int arity() const { return hydrogen()->argument_count() - 1; }
+ void PrintDataTo(StringStream* stream) OVERRIDE;
};
void PrintDataTo(StringStream* stream) OVERRIDE;
Handle<Object> name() const { return hydrogen()->name(); }
- StrictMode strict_mode() { return hydrogen()->strict_mode(); }
+ LanguageMode language_mode() { return hydrogen()->language_mode(); }
};
void PrintDataTo(StringStream* stream) OVERRIDE;
- StrictMode strict_mode() { return hydrogen()->strict_mode(); }
+ LanguageMode language_mode() { return hydrogen()->language_mode(); }
};
void MacroAssembler::CanonicalizeNaN(const DoubleRegister dst,
const DoubleRegister src) {
- Label done;
-
- // Test for NaN
- fcmpu(src, src);
-
- if (dst.is(src)) {
- bordered(&done);
- } else {
- Label is_nan;
- bunordered(&is_nan);
- fmr(dst, src);
- b(&done);
- bind(&is_nan);
- }
-
- // Replace with canonical NaN.
- double nan_value = FixedDoubleArray::canonical_not_the_hole_nan_as_double();
- LoadDoubleLiteral(dst, nan_value, r0);
-
- bind(&done);
+ // Turn potential sNaN into qNaN.
+ fadd(dst, src, kDoubleRegZero);
}
void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
- bool restore_context) {
+ bool restore_context,
+ bool argument_count_is_length) {
#if V8_OOL_CONSTANT_POOL
ConstantPoolUnavailableScope constant_pool_unavailable(this);
#endif
LeaveFrame(StackFrame::EXIT);
if (argument_count.is_valid()) {
- ShiftLeftImm(argument_count, argument_count, Operand(kPointerSizeLog2));
+ if (!argument_count_is_length) {
+ ShiftLeftImm(argument_count, argument_count, Operand(kPointerSizeLog2));
+ }
add(sp, sp, argument_count);
}
}
ExternalReference::new_space_allocation_top_address(isolate());
// Make sure the object has no tag before resetting top.
- mov(r0, Operand(~kHeapObjectTagMask));
- and_(object, object, r0);
-// was.. and_(object, object, Operand(~kHeapObjectTagMask));
+ ClearRightImm(object, object, Operand(kHeapObjectTagSize));
#ifdef DEBUG
// Check that the object un-allocated is below the current top.
mov(scratch, Operand(new_space_allocation_top));
DONT_DO_SMI_CHECK);
lfd(double_scratch, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
- // Force a canonical NaN.
+ // Double value, turn potential sNaN into qNaN.
CanonicalizeNaN(double_scratch);
b(&store);
DCHECK(!overflow_dst.is(left));
DCHECK(!overflow_dst.is(right));
+ bool left_is_right = left.is(right);
+ RCBit xorRC = left_is_right ? SetRC : LeaveRC;
+
// C = A+B; C overflows if A/B have same sign and C has diff sign than A
if (dst.is(left)) {
mr(scratch, left); // Preserve left.
add(dst, left, right); // Left is overwritten.
- xor_(scratch, dst, scratch); // Original left.
- xor_(overflow_dst, dst, right);
+ xor_(overflow_dst, dst, scratch, xorRC); // Original left.
+ if (!left_is_right) xor_(scratch, dst, right);
} else if (dst.is(right)) {
mr(scratch, right); // Preserve right.
add(dst, left, right); // Right is overwritten.
- xor_(scratch, dst, scratch); // Original right.
- xor_(overflow_dst, dst, left);
+ xor_(overflow_dst, dst, left, xorRC);
+ if (!left_is_right) xor_(scratch, dst, scratch); // Original right.
} else {
add(dst, left, right);
- xor_(overflow_dst, dst, left);
- xor_(scratch, dst, right);
+ xor_(overflow_dst, dst, left, xorRC);
+ if (!left_is_right) xor_(scratch, dst, right);
}
- and_(overflow_dst, scratch, overflow_dst, SetRC);
+ if (!left_is_right) and_(overflow_dst, scratch, overflow_dst, SetRC);
}
}
-void MacroAssembler::DispatchMap(Register obj, Register scratch,
- Handle<Map> map, Handle<Code> success,
- SmiCheckType smi_check_type) {
+void MacroAssembler::DispatchWeakMap(Register obj, Register scratch1,
+ Register scratch2, Handle<WeakCell> cell,
+ Handle<Code> success,
+ SmiCheckType smi_check_type) {
Label fail;
if (smi_check_type == DO_SMI_CHECK) {
JumpIfSmi(obj, &fail);
}
- LoadP(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
- mov(r0, Operand(map));
- cmp(scratch, r0);
- bne(&fail);
- Jump(success, RelocInfo::CODE_TARGET, al);
+ LoadP(scratch1, FieldMemOperand(obj, HeapObject::kMapOffset));
+ CmpWeakValue(scratch1, cell, scratch2);
+ Jump(success, RelocInfo::CODE_TARGET, eq);
bind(&fail);
}
+void MacroAssembler::CmpWeakValue(Register value, Handle<WeakCell> cell,
+ Register scratch, CRegister cr) {
+ mov(scratch, Operand(cell));
+ LoadP(scratch, FieldMemOperand(scratch, WeakCell::kValueOffset));
+ cmp(value, scratch, cr);
+}
+
+
+void MacroAssembler::GetWeakValue(Register value, Handle<WeakCell> cell) {
+ mov(value, Operand(cell));
+ LoadP(value, FieldMemOperand(value, WeakCell::kValueOffset));
+}
+
+
+void MacroAssembler::LoadWeakValue(Register value, Handle<WeakCell> cell,
+ Label* miss) {
+ GetWeakValue(value, cell);
+ JumpIfSmi(value, miss);
+}
+
+
void MacroAssembler::TryGetFunctionPrototype(Register function, Register result,
Register scratch, Label* miss,
bool miss_on_bound_function) {
}
-static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
- return ref0.address() - ref1.address();
-}
-
-
-void MacroAssembler::CallApiFunctionAndReturn(
- Register function_address, ExternalReference thunk_ref, int stack_space,
- MemOperand return_value_operand, MemOperand* context_restore_operand) {
- ExternalReference next_address =
- ExternalReference::handle_scope_next_address(isolate());
- const int kNextOffset = 0;
- const int kLimitOffset = AddressOffset(
- ExternalReference::handle_scope_limit_address(isolate()), next_address);
- const int kLevelOffset = AddressOffset(
- ExternalReference::handle_scope_level_address(isolate()), next_address);
-
- DCHECK(function_address.is(r4) || function_address.is(r5));
- Register scratch = r6;
-
- Label profiler_disabled;
- Label end_profiler_check;
- mov(scratch, Operand(ExternalReference::is_profiling_address(isolate())));
- lbz(scratch, MemOperand(scratch, 0));
- cmpi(scratch, Operand::Zero());
- beq(&profiler_disabled);
-
- // Additional parameter is the address of the actual callback.
- mov(scratch, Operand(thunk_ref));
- jmp(&end_profiler_check);
-
- bind(&profiler_disabled);
- mr(scratch, function_address);
- bind(&end_profiler_check);
-
- // Allocate HandleScope in callee-save registers.
- // r17 - next_address
- // r14 - next_address->kNextOffset
- // r15 - next_address->kLimitOffset
- // r16 - next_address->kLevelOffset
- mov(r17, Operand(next_address));
- LoadP(r14, MemOperand(r17, kNextOffset));
- LoadP(r15, MemOperand(r17, kLimitOffset));
- lwz(r16, MemOperand(r17, kLevelOffset));
- addi(r16, r16, Operand(1));
- stw(r16, MemOperand(r17, kLevelOffset));
-
- if (FLAG_log_timer_events) {
- FrameScope frame(this, StackFrame::MANUAL);
- PushSafepointRegisters();
- PrepareCallCFunction(1, r3);
- mov(r3, Operand(ExternalReference::isolate_address(isolate())));
- CallCFunction(ExternalReference::log_enter_external_function(isolate()), 1);
- PopSafepointRegisters();
- }
-
- // Native call returns to the DirectCEntry stub which redirects to the
- // return address pushed on stack (could have moved after GC).
- // DirectCEntry stub itself is generated early and never moves.
- DirectCEntryStub stub(isolate());
- stub.GenerateCall(this, scratch);
-
- if (FLAG_log_timer_events) {
- FrameScope frame(this, StackFrame::MANUAL);
- PushSafepointRegisters();
- PrepareCallCFunction(1, r3);
- mov(r3, Operand(ExternalReference::isolate_address(isolate())));
- CallCFunction(ExternalReference::log_leave_external_function(isolate()), 1);
- PopSafepointRegisters();
- }
-
- Label promote_scheduled_exception;
- Label exception_handled;
- Label delete_allocated_handles;
- Label leave_exit_frame;
- Label return_value_loaded;
-
- // load value from ReturnValue
- LoadP(r3, return_value_operand);
- bind(&return_value_loaded);
- // No more valid handles (the result handle was the last one). Restore
- // previous handle scope.
- StoreP(r14, MemOperand(r17, kNextOffset));
- if (emit_debug_code()) {
- lwz(r4, MemOperand(r17, kLevelOffset));
- cmp(r4, r16);
- Check(eq, kUnexpectedLevelAfterReturnFromApiCall);
- }
- subi(r16, r16, Operand(1));
- stw(r16, MemOperand(r17, kLevelOffset));
- LoadP(r0, MemOperand(r17, kLimitOffset));
- cmp(r15, r0);
- bne(&delete_allocated_handles);
-
- // Check if the function scheduled an exception.
- bind(&leave_exit_frame);
- LoadRoot(r14, Heap::kTheHoleValueRootIndex);
- mov(r15, Operand(ExternalReference::scheduled_exception_address(isolate())));
- LoadP(r15, MemOperand(r15));
- cmp(r14, r15);
- bne(&promote_scheduled_exception);
- bind(&exception_handled);
-
- bool restore_context = context_restore_operand != NULL;
- if (restore_context) {
- LoadP(cp, *context_restore_operand);
- }
- // LeaveExitFrame expects unwind space to be in a register.
- mov(r14, Operand(stack_space));
- LeaveExitFrame(false, r14, !restore_context);
- blr();
-
- bind(&promote_scheduled_exception);
- {
- FrameScope frame(this, StackFrame::INTERNAL);
- CallExternalReference(
- ExternalReference(Runtime::kPromoteScheduledException, isolate()), 0);
- }
- jmp(&exception_handled);
-
- // HandleScope limit has changed. Delete allocated extensions.
- bind(&delete_allocated_handles);
- StoreP(r15, MemOperand(r17, kLimitOffset));
- mr(r14, r3);
- PrepareCallCFunction(1, r15);
- mov(r3, Operand(ExternalReference::isolate_address(isolate())));
- CallCFunction(ExternalReference::delete_handle_scope_extensions(isolate()),
- 1);
- mr(r3, r14);
- b(&leave_exit_frame);
-}
-
-
bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
return has_frame_ || !stub->SometimesSetsUpAFrame();
}
result, double_scratch);
#if V8_TARGET_ARCH_PPC64
- TestIfInt32(result, scratch, r0);
+ TestIfInt32(result, r0);
#else
TestIfInt32(scratch, result, r0);
#endif
// Test for overflow
#if V8_TARGET_ARCH_PPC64
- TestIfInt32(result, scratch, r0);
+ TestIfInt32(result, r0);
#else
TestIfInt32(scratch, result, r0);
#endif
DoubleRegister double_input,
Label* done) {
DoubleRegister double_scratch = kScratchDoubleReg;
+#if !V8_TARGET_ARCH_PPC64
Register scratch = ip;
+#endif
ConvertDoubleToInt64(double_input,
#if !V8_TARGET_ARCH_PPC64
// Test for overflow
#if V8_TARGET_ARCH_PPC64
- TestIfInt32(result, scratch, r0);
+ TestIfInt32(result, r0);
#else
TestIfInt32(scratch, result, r0);
#endif
LoadP(scratch,
MemOperand(scratch, Context::SlotOffset(Context::JS_ARRAY_MAPS_INDEX)));
size_t offset = expected_kind * kPointerSize + FixedArrayBase::kHeaderSize;
- LoadP(scratch, FieldMemOperand(scratch, offset));
- cmp(map_in_out, scratch);
+ LoadP(ip, FieldMemOperand(scratch, offset));
+ cmp(map_in_out, ip);
bne(no_map_match);
// Use the transitioned cached map.
void MacroAssembler::JumpIfNotBothSmi(Register reg1, Register reg2,
Label* on_not_both_smi) {
STATIC_ASSERT(kSmiTag == 0);
- DCHECK_EQ(1, static_cast<int>(kSmiTagMask));
orx(r0, reg1, reg2, LeaveRC);
JumpIfNotSmi(r0, on_not_both_smi);
}
void MacroAssembler::UntagAndJumpIfSmi(Register dst, Register src,
Label* smi_case) {
STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagSize == 1);
- TestBit(src, 0, r0);
+ TestBitRange(src, kSmiTagSize - 1, 0, r0);
SmiUntag(dst, src);
beq(smi_case, cr0);
}
void MacroAssembler::UntagAndJumpIfNotSmi(Register dst, Register src,
Label* non_smi_case) {
STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagSize == 1);
- TestBit(src, 0, r0);
+ TestBitRange(src, kSmiTagSize - 1, 0, r0);
SmiUntag(dst, src);
bne(non_smi_case, cr0);
}
}
-void MacroAssembler::CheckMapDeprecated(Handle<Map> map, Register scratch,
- Label* if_deprecated) {
- if (map->CanBeDeprecated()) {
- mov(scratch, Operand(map));
- lwz(scratch, FieldMemOperand(scratch, Map::kBitField3Offset));
- ExtractBitMask(scratch, scratch, Map::Deprecated::kMask, SetRC);
- bne(if_deprecated, cr0);
- }
-}
-
-
void MacroAssembler::JumpIfBlack(Register object, Register scratch0,
Register scratch1, Label* on_black) {
HasColor(object, scratch0, scratch1, on_black, 1, 0); // kBlackBitPattern.
// if input_value > 255, output_value is 255
// otherwise output_value is the input_value
void MacroAssembler::ClampUint8(Register output_reg, Register input_reg) {
- Label done, negative_label, overflow_label;
int satval = (1 << 8) - 1;
- cmpi(input_reg, Operand::Zero());
- blt(&negative_label);
+ if (CpuFeatures::IsSupported(ISELECT)) {
+ // set to 0 if negative
+ cmpi(input_reg, Operand::Zero());
+ isel(lt, output_reg, r0, input_reg);
- cmpi(input_reg, Operand(satval));
- bgt(&overflow_label);
- if (!output_reg.is(input_reg)) {
- mr(output_reg, input_reg);
- }
- b(&done);
-
- bind(&negative_label);
- li(output_reg, Operand::Zero()); // set to 0 if negative
- b(&done);
+ // set to satval if > satval
+ li(r0, Operand(satval));
+ cmpi(output_reg, Operand(satval));
+ isel(lt, output_reg, output_reg, r0);
+ } else {
+ Label done, negative_label, overflow_label;
+ cmpi(input_reg, Operand::Zero());
+ blt(&negative_label);
+
+ cmpi(input_reg, Operand(satval));
+ bgt(&overflow_label);
+ if (!output_reg.is(input_reg)) {
+ mr(output_reg, input_reg);
+ }
+ b(&done);
+ bind(&negative_label);
+ li(output_reg, Operand::Zero()); // set to 0 if negative
+ b(&done);
- bind(&overflow_label); // set to satval if > satval
- li(output_reg, Operand(satval));
+ bind(&overflow_label); // set to satval if > satval
+ li(output_reg, Operand(satval));
- bind(&done);
+ bind(&done);
+ }
}
}
+void MacroAssembler::LoadAccessor(Register dst, Register holder,
+ int accessor_index,
+ AccessorComponent accessor) {
+ LoadP(dst, FieldMemOperand(holder, HeapObject::kMapOffset));
+ LoadInstanceDescriptors(dst, dst);
+ LoadP(dst,
+ FieldMemOperand(dst, DescriptorArray::GetValueOffset(accessor_index)));
+ const int getterOffset = AccessorPair::kGetterOffset;
+ const int setterOffset = AccessorPair::kSetterOffset;
+ int offset = ((accessor == ACCESSOR_GETTER) ? getterOffset : setterOffset);
+ LoadP(dst, FieldMemOperand(dst, offset));
+}
+
+
void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
Register empty_fixed_array_value = r9;
LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
Register scratch) {
int offset = mem.offset();
- if (!scratch.is(no_reg) && !is_int16(offset)) {
+ if (!is_int16(offset)) {
/* cannot use d-form */
- LoadIntLiteral(scratch, offset);
+ DCHECK(!scratch.is(no_reg));
+ mov(scratch, Operand(offset));
#if V8_TARGET_ARCH_PPC64
ldx(dst, MemOperand(mem.ra(), scratch));
#else
Register scratch) {
int offset = mem.offset();
- if (!scratch.is(no_reg) && !is_int16(offset)) {
+ if (!is_int16(offset)) {
/* cannot use d-form */
- LoadIntLiteral(scratch, offset);
+ DCHECK(!scratch.is(no_reg));
+ mov(scratch, Operand(offset));
#if V8_TARGET_ARCH_PPC64
stdx(src, MemOperand(mem.ra(), scratch));
#else
Register scratch) {
int offset = mem.offset();
- if (!scratch.is(no_reg) && !is_int16(offset)) {
- /* cannot use d-form */
- LoadIntLiteral(scratch, offset);
-#if V8_TARGET_ARCH_PPC64
- // lwax(dst, MemOperand(mem.ra(), scratch));
- DCHECK(0); // lwax not yet implemented
-#else
- lwzx(dst, MemOperand(mem.ra(), scratch));
-#endif
+ if (!is_int16(offset)) {
+ DCHECK(!scratch.is(no_reg));
+ mov(scratch, Operand(offset));
+ lwax(dst, MemOperand(mem.ra(), scratch));
} else {
#if V8_TARGET_ARCH_PPC64
int misaligned = (offset & 3);
}
+void MacroAssembler::LoadHalfWordArith(Register dst, const MemOperand& mem,
+ Register scratch) {
+ int offset = mem.offset();
+
+ if (!is_int16(offset)) {
+ DCHECK(!scratch.is(no_reg));
+ mov(scratch, Operand(offset));
+ lhax(dst, MemOperand(mem.ra(), scratch));
+ } else {
+ lha(dst, mem);
+ }
+}
+
+
// Variable length depending on whether offset fits into immediate field
// MemOperand currently only supports d-form
void MacroAssembler::LoadHalfWord(Register dst, const MemOperand& mem,
} else if (r.IsUInteger8()) {
LoadByte(dst, mem, scratch);
} else if (r.IsInteger16()) {
- LoadHalfWord(dst, mem, scratch);
- extsh(dst, dst);
+ LoadHalfWordArith(dst, mem, scratch);
} else if (r.IsUInteger16()) {
LoadHalfWord(dst, mem, scratch);
#if V8_TARGET_ARCH_PPC64
} else if (r.IsInteger32()) {
- LoadWord(dst, mem, scratch);
+ LoadWordArith(dst, mem, scratch);
#endif
} else {
LoadP(dst, mem, scratch);
}
+void MacroAssembler::LoadDouble(DoubleRegister dst, const MemOperand& mem,
+ Register scratch) {
+ Register base = mem.ra();
+ int offset = mem.offset();
+
+ if (!is_int16(offset)) {
+ mov(scratch, Operand(offset));
+ lfdx(dst, MemOperand(base, scratch));
+ } else {
+ lfd(dst, mem);
+ }
+}
+
+
+void MacroAssembler::StoreDouble(DoubleRegister src, const MemOperand& mem,
+ Register scratch) {
+ Register base = mem.ra();
+ int offset = mem.offset();
+
+ if (!is_int16(offset)) {
+ mov(scratch, Operand(offset));
+ stfdx(src, MemOperand(base, scratch));
+ } else {
+ stfd(src, mem);
+ }
+}
+
+
void MacroAssembler::TestJSArrayForAllocationMemento(Register receiver_reg,
Register scratch_reg,
Label* no_memento_found) {
void CheckPageFlag(Register object, Register scratch, int mask, Condition cc,
Label* condition_met);
- void CheckMapDeprecated(Handle<Map> map, Register scratch,
- Label* if_deprecated);
-
// Check if object is in new space. Jumps if the object is not in new space.
// The register scratch can be object itself, but scratch will be clobbered.
void JumpIfNotInNewSpace(Register object, Register scratch, Label* branch) {
// Expect the number of values, pushed prior to the exit frame, to
// remove in a register (or no_reg, if there is nothing to remove).
void LeaveExitFrame(bool save_doubles, Register argument_count,
- bool restore_context);
+ bool restore_context,
+ bool argument_count_is_length = false);
// Get the actual activation frame alignment for target environment.
static int ActivationFrameAlignment();
void LoadDoubleLiteral(DoubleRegister result, double value, Register scratch);
void LoadWord(Register dst, const MemOperand& mem, Register scratch);
-
void LoadWordArith(Register dst, const MemOperand& mem,
Register scratch = no_reg);
-
void StoreWord(Register src, const MemOperand& mem, Register scratch);
void LoadHalfWord(Register dst, const MemOperand& mem, Register scratch);
-
+ void LoadHalfWordArith(Register dst, const MemOperand& mem,
+ Register scratch = no_reg);
void StoreHalfWord(Register src, const MemOperand& mem, Register scratch);
void LoadByte(Register dst, const MemOperand& mem, Register scratch);
-
void StoreByte(Register src, const MemOperand& mem, Register scratch);
void LoadRepresentation(Register dst, const MemOperand& mem, Representation r,
Register scratch = no_reg);
-
void StoreRepresentation(Register src, const MemOperand& mem,
Representation r, Register scratch = no_reg);
+ void LoadDouble(DoubleRegister dst, const MemOperand& mem, Register scratch);
+ void StoreDouble(DoubleRegister src, const MemOperand& mem, Register scratch);
+
// Move values between integer and floating point registers.
void MovIntToDouble(DoubleRegister dst, Register src, Register scratch);
void MovUnsignedIntToDouble(DoubleRegister dst, Register src,
Label* fail, SmiCheckType smi_check_type);
- // Check if the map of an object is equal to a specified map and branch to a
- // specified target if equal. Skip the smi check if not required (object is
- // known to be a heap object)
- void DispatchMap(Register obj, Register scratch, Handle<Map> map,
- Handle<Code> success, SmiCheckType smi_check_type);
+ // Check if the map of an object is equal to a specified weak map and branch
+ // to a specified target if equal. Skip the smi check if not required
+ // (object is known to be a heap object)
+ void DispatchWeakMap(Register obj, Register scratch1, Register scratch2,
+ Handle<WeakCell> cell, Handle<Code> success,
+ SmiCheckType smi_check_type);
+
+ // Compare the given value and the value of weak cell.
+ void CmpWeakValue(Register value, Handle<WeakCell> cell, Register scratch,
+ CRegister cr = cr7);
+ void GetWeakValue(Register value, Handle<WeakCell> cell);
+
+ // Load the value of the weak cell in the value register. Branch to the given
+ // miss label if the weak cell was cleared.
+ void LoadWeakValue(Register value, Handle<WeakCell> cell, Label* miss);
// Compare the object in a register to a value from the root list.
// Uses the ip register as scratch.
LoadP(type, FieldMemOperand(obj, HeapObject::kMapOffset));
lbz(type, FieldMemOperand(type, Map::kInstanceTypeOffset));
andi(r0, type, Operand(kIsNotStringMask));
- DCHECK_EQ(0, kStringTag);
+ DCHECK_EQ(0u, kStringTag);
return eq;
}
void MovFromFloatParameter(DoubleRegister dst);
void MovFromFloatResult(DoubleRegister dst);
- // Calls an API function. Allocates HandleScope, extracts returned value
- // from handle and propagates exceptions. Restores context. stack_space
- // - space to be unwound on exit (includes the call JS arguments space and
- // the additional space allocated for the fast call).
- void CallApiFunctionAndReturn(Register function_address,
- ExternalReference thunk_ref, int stack_space,
- MemOperand return_value_operand,
- MemOperand* context_restore_operand);
-
// Jump to a runtime routine.
void JumpToExternalReference(const ExternalReference& builtin);
// ---------------------------------------------------------------------------
// Smi utilities
- // Shift left by 1
+ // Shift left by kSmiShift
void SmiTag(Register reg, RCBit rc = LeaveRC) { SmiTag(reg, reg, rc); }
void SmiTag(Register dst, Register src, RCBit rc = LeaveRC) {
ShiftLeftImm(dst, src, Operand(kSmiShift), rc);
inline void JumpIfNotSmiCandidate(Register value, Register scratch,
Label* not_smi_label) {
// High bits must be identical to fit into an Smi
+ STATIC_ASSERT(kSmiShift == 1);
addis(scratch, value, Operand(0x40000000u >> 16));
cmpi(scratch, Operand::Zero());
blt(not_smi_label);
void UntagAndJumpIfNotSmi(Register dst, Register src, Label* non_smi_case);
inline void TestIfSmi(Register value, Register scratch) {
- TestBit(value, 0, scratch); // tst(value, Operand(kSmiTagMask));
+ TestBitRange(value, kSmiTagSize - 1, 0, scratch);
}
inline void TestIfPositiveSmi(Register value, Register scratch) {
- STATIC_ASSERT((kSmiTagMask | kSmiSignMask) ==
- (intptr_t)(1UL << (kBitsPerPointer - 1) | 1));
#if V8_TARGET_ARCH_PPC64
- rldicl(scratch, value, 1, kBitsPerPointer - 2, SetRC);
+ rldicl(scratch, value, 1, kBitsPerPointer - (1 + kSmiTagSize), SetRC);
#else
- rlwinm(scratch, value, 1, kBitsPerPointer - 2, kBitsPerPointer - 1, SetRC);
+ rlwinm(scratch, value, 1, kBitsPerPointer - (1 + kSmiTagSize),
+ kBitsPerPointer - 1, SetRC);
#endif
}
#if V8_TARGET_ARCH_PPC64
- inline void TestIfInt32(Register value, Register scratch1, Register scratch2,
+ inline void TestIfInt32(Register value, Register scratch,
CRegister cr = cr7) {
// High bits must be identical to fit into an 32-bit integer
- srawi(scratch1, value, 31);
- sradi(scratch2, value, 32);
- cmp(scratch1, scratch2, cr);
+ extsw(scratch, value);
+ cmp(scratch, value, cr);
}
#else
inline void TestIfInt32(Register hi_word, Register lo_word, Register scratch,
}
#endif
+#if V8_TARGET_ARCH_PPC64
+ // Ensure it is permissable to read/write int value directly from
+ // upper half of the smi.
+ STATIC_ASSERT(kSmiTag == 0);
+ STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32);
+#endif
+#if V8_TARGET_ARCH_PPC64 && V8_TARGET_LITTLE_ENDIAN
+#define SmiWordOffset(offset) (offset + kPointerSize / 2)
+#else
+#define SmiWordOffset(offset) offset
+#endif
+
// Abort execution if argument is not a string, enabled via --debug-code.
void AssertString(Register object);
void LoadInstanceDescriptors(Register map, Register descriptors);
void EnumLength(Register dst, Register map);
void NumberOfOwnDescriptors(Register dst, Register map);
+ void LoadAccessor(Register dst, Register holder, int accessor_index,
+ AccessorComponent accessor);
template <typename Field>
- void DecodeField(Register dst, Register src) {
- ExtractBitRange(dst, src, Field::kShift + Field::kSize - 1, Field::kShift);
+ void DecodeField(Register dst, Register src, RCBit rc = LeaveRC) {
+ ExtractBitRange(dst, src, Field::kShift + Field::kSize - 1, Field::kShift,
+ rc);
}
template <typename Field>
- void DecodeField(Register reg) {
- DecodeField<Field>(reg, reg);
+ void DecodeField(Register reg, RCBit rc = LeaveRC) {
+ DecodeField<Field>(reg, reg, rc);
}
template <typename Field>
#define __ ACCESS_MASM(masm_)
-RegExpMacroAssemblerPPC::RegExpMacroAssemblerPPC(Mode mode,
- int registers_to_save,
- Zone* zone)
- : NativeRegExpMacroAssembler(zone),
- masm_(new MacroAssembler(zone->isolate(), NULL, kRegExpCodeSize)),
+RegExpMacroAssemblerPPC::RegExpMacroAssemblerPPC(Isolate* isolate, Zone* zone,
+ Mode mode,
+ int registers_to_save)
+ : NativeRegExpMacroAssembler(isolate, zone),
+ masm_(new MacroAssembler(isolate, NULL, kRegExpCodeSize)),
mode_(mode),
num_registers_(registers_to_save),
num_saved_registers_(registers_to_save),
#ifndef V8_INTERPRETED_REGEXP
class RegExpMacroAssemblerPPC : public NativeRegExpMacroAssembler {
public:
- RegExpMacroAssemblerPPC(Mode mode, int registers_to_save, Zone* zone);
+ RegExpMacroAssemblerPPC(Isolate* isolate, Zone* zone, Mode mode,
+ int registers_to_save);
virtual ~RegExpMacroAssemblerPPC();
virtual int stack_limit_slack();
virtual void AdvanceCurrentPosition(int by);
~PPCDebugger();
void Stop(Instruction* instr);
- void Info(Instruction* instr);
void Debug();
private:
#endif
-void PPCDebugger::Info(Instruction* instr) {
- // Retrieve the encoded address immediately following the Info breakpoint.
- char* msg =
- *reinterpret_cast<char**>(sim_->get_pc() + Instruction::kInstrSize);
- PrintF("Simulator info %s\n", msg);
- sim_->set_pc(sim_->get_pc() + Instruction::kInstrSize + kPointerSize);
-}
-
-
intptr_t PPCDebugger::GetRegisterValue(int regnum) {
return sim_->get_register(regnum);
}
// The return value is in d1.
-void Simulator::SetFpResult(const double& result) { fp_registers_[1] = result; }
+void Simulator::SetFpResult(const double& result) {
+ set_d_register_from_double(1, result);
+}
void Simulator::TrashCallerSaveRegisters() {
}
-#if !V8_TARGET_ARCH_PPC64
-// Calls into the V8 runtime are based on this very simple interface.
-// Note: To be able to return two values from some calls the code in runtime.cc
-// uses the ObjectPair which is essentially two 32-bit values stuffed into a
-// 64-bit value. With the code below we assume that all runtime calls return
-// 64 bits of result. If they don't, the r4 result register contains a bogus
-// value, which is fine because it is caller-saved.
-typedef int64_t (*SimulatorRuntimeCall)(intptr_t arg0, intptr_t arg1,
- intptr_t arg2, intptr_t arg3,
- intptr_t arg4, intptr_t arg5);
-#else
-// For 64-bit, we need to be more explicit.
-typedef intptr_t (*SimulatorRuntimeCall)(intptr_t arg0, intptr_t arg1,
- intptr_t arg2, intptr_t arg3,
- intptr_t arg4, intptr_t arg5);
+#if V8_TARGET_ARCH_PPC64
struct ObjectPair {
intptr_t x;
intptr_t y;
};
-typedef struct ObjectPair (*SimulatorRuntimeObjectPairCall)(
- intptr_t arg0, intptr_t arg1, intptr_t arg2, intptr_t arg3, intptr_t arg4,
- intptr_t arg5);
+
+static void decodeObjectPair(ObjectPair* pair, intptr_t* x, intptr_t* y) {
+ *x = pair->x;
+ *y = pair->y;
+}
+#else
+typedef uint64_t ObjectPair;
+
+
+static void decodeObjectPair(ObjectPair* pair, intptr_t* x, intptr_t* y) {
+#if V8_TARGET_BIG_ENDIAN
+ *x = static_cast<int32_t>(*pair >> 32);
+ *y = static_cast<int32_t>(*pair);
+#else
+ *x = static_cast<int32_t>(*pair);
+ *y = static_cast<int32_t>(*pair >> 32);
+#endif
+}
#endif
+// Calls into the V8 runtime are based on this very simple interface.
+// Note: To be able to return two values from some calls the code in
+// runtime.cc uses the ObjectPair which is essentially two pointer
+// values stuffed into a structure. With the code below we assume that
+// all runtime calls return this pair. If they don't, the r4 result
+// register contains a bogus value, which is fine because it is
+// caller-saved.
+typedef ObjectPair (*SimulatorRuntimeCall)(intptr_t arg0, intptr_t arg1,
+ intptr_t arg2, intptr_t arg3,
+ intptr_t arg4, intptr_t arg5);
+
// These prototypes handle the four types of FP calls.
typedef int (*SimulatorRuntimeCompareCall)(double darg0, double darg1);
typedef double (*SimulatorRuntimeFPFPCall)(double darg0, double darg1);
Redirection* redirection = Redirection::FromSwiInstruction(instr);
const int kArgCount = 6;
int arg0_regnum = 3;
-#if V8_TARGET_ARCH_PPC64 && !ABI_RETURNS_OBJECT_PAIRS_IN_REGS
+#if !ABI_RETURNS_OBJECT_PAIRS_IN_REGS
intptr_t result_buffer = 0;
if (redirection->type() == ExternalReference::BUILTIN_OBJECTPAIR_CALL) {
result_buffer = get_register(r3);
PrintF("\n");
}
CHECK(stack_aligned);
-#if !V8_TARGET_ARCH_PPC64
DCHECK(redirection->type() == ExternalReference::BUILTIN_CALL);
SimulatorRuntimeCall target =
reinterpret_cast<SimulatorRuntimeCall>(external);
- int64_t result = target(arg[0], arg[1], arg[2], arg[3], arg[4], arg[5]);
- int32_t lo_res = static_cast<int32_t>(result);
- int32_t hi_res = static_cast<int32_t>(result >> 32);
-#if V8_TARGET_BIG_ENDIAN
- if (::v8::internal::FLAG_trace_sim) {
- PrintF("Returned %08x\n", hi_res);
- }
- set_register(r3, hi_res);
- set_register(r4, lo_res);
-#else
+ ObjectPair result =
+ target(arg[0], arg[1], arg[2], arg[3], arg[4], arg[5]);
+ intptr_t x;
+ intptr_t y;
+ decodeObjectPair(&result, &x, &y);
if (::v8::internal::FLAG_trace_sim) {
- PrintF("Returned %08x\n", lo_res);
- }
- set_register(r3, lo_res);
- set_register(r4, hi_res);
-#endif
-#else
- if (redirection->type() == ExternalReference::BUILTIN_CALL) {
- SimulatorRuntimeCall target =
- reinterpret_cast<SimulatorRuntimeCall>(external);
- intptr_t result =
- target(arg[0], arg[1], arg[2], arg[3], arg[4], arg[5]);
- if (::v8::internal::FLAG_trace_sim) {
- PrintF("Returned %08" V8PRIxPTR "\n", result);
- }
- set_register(r3, result);
- } else {
- DCHECK(redirection->type() ==
- ExternalReference::BUILTIN_OBJECTPAIR_CALL);
- SimulatorRuntimeObjectPairCall target =
- reinterpret_cast<SimulatorRuntimeObjectPairCall>(external);
- struct ObjectPair result =
- target(arg[0], arg[1], arg[2], arg[3], arg[4], arg[5]);
- if (::v8::internal::FLAG_trace_sim) {
- PrintF("Returned %08" V8PRIxPTR ", %08" V8PRIxPTR "\n", result.x,
- result.y);
- }
-#if ABI_RETURNS_OBJECT_PAIRS_IN_REGS
- set_register(r3, result.x);
- set_register(r4, result.y);
-#else
- memcpy(reinterpret_cast<void*>(result_buffer), &result,
- sizeof(struct ObjectPair));
-#endif
+ PrintF("Returned {%08" V8PRIxPTR ", %08" V8PRIxPTR "}\n", x, y);
}
-#endif
+ set_register(r3, x);
+ set_register(r4, y);
}
set_pc(saved_lr);
break;
dbg.Debug();
break;
}
- case kInfo: {
- PPCDebugger dbg(this);
- dbg.Info(instr);
- break;
- }
// stop uses all codes greater than 1 << 23.
default: {
if (svc >= (1 << 23)) {
int rb = instr->RBValue();
intptr_t ra_val = ra == 0 ? 0 : get_register(ra);
intptr_t rb_val = get_register(rb);
- double* dptr = reinterpret_cast<double*>(ReadDW(ra_val + rb_val));
- set_d_register_from_double(frt, *dptr);
+ int64_t* dptr = reinterpret_cast<int64_t*>(ReadDW(ra_val + rb_val));
+ set_d_register(frt, *dptr);
if (opcode == LFDUX) {
DCHECK(ra != 0);
set_register(ra, ra_val + rb_val);
int rb = instr->RBValue();
intptr_t ra_val = ra == 0 ? 0 : get_register(ra);
intptr_t rb_val = get_register(rb);
- double frs_val = get_double_from_d_register(frs);
- int64_t* p = reinterpret_cast<int64_t*>(&frs_val);
- WriteDW(ra_val + rb_val, *p);
+ int64_t frs_val = get_d_register(frs);
+ WriteDW(ra_val + rb_val, frs_val);
if (opcode == STFDUX) {
DCHECK(ra != 0);
set_register(ra, ra_val + rb_val);
if (instr->Bit(0)) { // RC bit set
SetCR0(static_cast<intptr_t>(alu_out));
}
- // todo - handle OE bit
+ break;
+ }
+ case MULHWUX: {
+ int rt = instr->RTValue();
+ int ra = instr->RAValue();
+ int rb = instr->RBValue();
+ uint32_t ra_val = (get_register(ra) & 0xFFFFFFFF);
+ uint32_t rb_val = (get_register(rb) & 0xFFFFFFFF);
+ uint64_t alu_out = (uint64_t)ra_val * (uint64_t)rb_val;
+ alu_out >>= 32;
+ set_register(rt, alu_out);
+ if (instr->Bit(0)) { // RC bit set
+ SetCR0(static_cast<intptr_t>(alu_out));
+ }
break;
}
case NEGX: {
DCHECK(!instr->Bit(0));
int frt = instr->RTValue();
int ra = instr->RAValue();
- double frt_val = get_double_from_d_register(frt);
- int64_t* p = reinterpret_cast<int64_t*>(&frt_val);
- set_register(ra, *p);
+ int64_t frt_val = get_d_register(frt);
+ set_register(ra, frt_val);
break;
}
case MFVSRWZ: {
DCHECK(!instr->Bit(0));
int frt = instr->RTValue();
int ra = instr->RAValue();
- double frt_val = get_double_from_d_register(frt);
- int64_t* p = reinterpret_cast<int64_t*>(&frt_val);
- set_register(ra, static_cast<uint32_t>(*p));
+ int64_t frt_val = get_d_register(frt);
+ set_register(ra, static_cast<uint32_t>(frt_val));
break;
}
case MTVSRD: {
int frt = instr->RTValue();
int ra = instr->RAValue();
int64_t ra_val = get_register(ra);
- double* p = reinterpret_cast<double*>(&ra_val);
- set_d_register_from_double(frt, *p);
+ set_d_register(frt, ra_val);
break;
}
case MTVSRWA: {
int frt = instr->RTValue();
int ra = instr->RAValue();
int64_t ra_val = static_cast<int32_t>(get_register(ra));
- double* p = reinterpret_cast<double*>(&ra_val);
- set_d_register_from_double(frt, *p);
+ set_d_register(frt, ra_val);
break;
}
case MTVSRWZ: {
int frt = instr->RTValue();
int ra = instr->RAValue();
uint64_t ra_val = static_cast<uint32_t>(get_register(ra));
- double* p = reinterpret_cast<double*>(&ra_val);
- set_d_register_from_double(frt, *p);
+ set_d_register(frt, ra_val);
break;
}
#endif
}
-void Simulator::ExecuteExt2_9bit_part2(Instruction* instr) {
+bool Simulator::ExecuteExt2_9bit_part2(Instruction* instr) {
+ bool found = true;
int opcode = instr->Bits(9, 1) << 1;
switch (opcode) {
case CNTLZWX: {
}
break;
}
+ case DIVWU: {
+ int rt = instr->RTValue();
+ int ra = instr->RAValue();
+ int rb = instr->RBValue();
+ uint32_t ra_val = get_register(ra);
+ uint32_t rb_val = get_register(rb);
+ bool overflow = (rb_val == 0);
+ // result is undefined if divisor is zero
+ uint32_t alu_out = (overflow) ? -1 : ra_val / rb_val;
+ set_register(rt, alu_out);
+ if (instr->Bit(10)) { // OE bit set
+ if (overflow) {
+ special_reg_xer_ |= 0xC0000000; // set SO,OV
+ } else {
+ special_reg_xer_ &= ~0x40000000; // clear OV
+ }
+ }
+ if (instr->Bit(0)) { // RC bit set
+ bool setSO = (special_reg_xer_ & 0x80000000);
+ SetCR0(alu_out, setSO);
+ }
+ break;
+ }
#if V8_TARGET_ARCH_PPC64
case DIVD: {
int rt = instr->RTValue();
// todo - handle OE bit
break;
}
+ case DIVDU: {
+ int rt = instr->RTValue();
+ int ra = instr->RAValue();
+ int rb = instr->RBValue();
+ uint64_t ra_val = get_register(ra);
+ uint64_t rb_val = get_register(rb);
+ // result is undefined if divisor is zero
+ uint64_t alu_out = (rb_val == 0) ? -1 : ra_val / rb_val;
+ set_register(rt, alu_out);
+ if (instr->Bit(0)) { // RC bit set
+ SetCR0(alu_out);
+ }
+ // todo - handle OE bit
+ break;
+ }
#endif
case ADDX: {
int rt = instr->RTValue();
}
break;
}
+ case ORC: {
+ int rs = instr->RSValue();
+ int ra = instr->RAValue();
+ int rb = instr->RBValue();
+ intptr_t rs_val = get_register(rs);
+ intptr_t rb_val = get_register(rb);
+ intptr_t alu_out = rs_val | ~rb_val;
+ set_register(ra, alu_out);
+ if (instr->Bit(0)) { // RC bit set
+ SetCR0(alu_out);
+ }
+ break;
+ }
case MFSPR: {
int rt = instr->RTValue();
int spr = instr->Bits(20, 11);
break;
}
#if V8_TARGET_ARCH_PPC64
+ case LWAX: {
+ int rt = instr->RTValue();
+ int ra = instr->RAValue();
+ int rb = instr->RBValue();
+ intptr_t ra_val = ra == 0 ? 0 : get_register(ra);
+ intptr_t rb_val = get_register(rb);
+ set_register(rt, ReadW(ra_val + rb_val, instr));
+ break;
+ }
case LDX:
case LDUX: {
int rt = instr->RTValue();
}
break;
}
+ case LHAX:
+ case LHAUX: {
+ int rt = instr->RTValue();
+ int ra = instr->RAValue();
+ int rb = instr->RBValue();
+ intptr_t ra_val = ra == 0 ? 0 : get_register(ra);
+ intptr_t rb_val = get_register(rb);
+ set_register(rt, ReadH(ra_val + rb_val, instr));
+ if (opcode == LHAUX) {
+ DCHECK(ra != 0 && ra != rt);
+ set_register(ra, ra_val + rb_val);
+ }
+ break;
+ }
case DCBF: {
// todo - simulate dcbf
break;
}
default: {
+ found = false;
+ break;
+ }
+ }
+
+ return found;
+}
+
+
+void Simulator::ExecuteExt2_5bit(Instruction* instr) {
+ int opcode = instr->Bits(5, 1) << 1;
+ switch (opcode) {
+ case ISEL: {
+ int rt = instr->RTValue();
+ int ra = instr->RAValue();
+ int rb = instr->RBValue();
+ int condition_bit = instr->RCValue();
+ int condition_mask = 0x80000000 >> condition_bit;
+ intptr_t ra_val = (ra == 0) ? 0 : get_register(ra);
+ intptr_t rb_val = get_register(rb);
+ intptr_t value = (condition_reg_ & condition_mask) ? ra_val : rb_val;
+ set_register(rt, value);
+ break;
+ }
+ default: {
PrintF("Unimplemented: %08x\n", instr->InstructionBits());
UNIMPLEMENTED(); // Not used by V8.
}
if (ExecuteExt2_10bit(instr)) return;
// Now look at the lesser encodings
if (ExecuteExt2_9bit_part1(instr)) return;
- ExecuteExt2_9bit_part2(instr);
+ if (ExecuteExt2_9bit_part2(instr)) return;
+ ExecuteExt2_5bit(instr);
}
condition_reg_ = (condition_reg_ & ~condition_mask) | condition;
return;
}
+ case FRIN: {
+ int frt = instr->RTValue();
+ int frb = instr->RBValue();
+ double frb_val = get_double_from_d_register(frb);
+ double frt_val = std::round(frb_val);
+ set_d_register_from_double(frt, frt_val);
+ if (instr->Bit(0)) { // RC bit set
+ // UNIMPLEMENTED();
+ }
+ return;
+ }
+ case FRIZ: {
+ int frt = instr->RTValue();
+ int frb = instr->RBValue();
+ double frb_val = get_double_from_d_register(frb);
+ double frt_val = std::trunc(frb_val);
+ set_d_register_from_double(frt, frt_val);
+ if (instr->Bit(0)) { // RC bit set
+ // UNIMPLEMENTED();
+ }
+ return;
+ }
+ case FRIP: {
+ int frt = instr->RTValue();
+ int frb = instr->RBValue();
+ double frb_val = get_double_from_d_register(frb);
+ double frt_val = std::ceil(frb_val);
+ set_d_register_from_double(frt, frt_val);
+ if (instr->Bit(0)) { // RC bit set
+ // UNIMPLEMENTED();
+ }
+ return;
+ }
+ case FRIM: {
+ int frt = instr->RTValue();
+ int frb = instr->RBValue();
+ double frb_val = get_double_from_d_register(frb);
+ double frt_val = std::floor(frb_val);
+ set_d_register_from_double(frt, frt_val);
+ if (instr->Bit(0)) { // RC bit set
+ // UNIMPLEMENTED();
+ }
+ return;
+ }
case FRSP: {
int frt = instr->RTValue();
int frb = instr->RBValue();
+ // frsp round 8-byte double-precision value to
+ // single-precision value
double frb_val = get_double_from_d_register(frb);
- // frsp round 8-byte double-precision value to 8-byte
- // single-precision value, ignore the round here
- set_d_register_from_double(frt, frb_val);
+ double frt_val = static_cast<float>(frb_val);
+ set_d_register_from_double(frt, frt_val);
if (instr->Bit(0)) { // RC bit set
// UNIMPLEMENTED();
}
case FMR: {
int frt = instr->RTValue();
int frb = instr->RBValue();
- double frb_val = get_double_from_d_register(frb);
- double frt_val = frb_val;
- set_d_register_from_double(frt, frt_val);
+ int64_t frb_val = get_d_register(frb);
+ set_d_register(frt, frb_val);
return;
}
case MTFSFI: {
}
case MTFSF: {
int frb = instr->RBValue();
- double frb_dval = get_double_from_d_register(frb);
- int64_t* p = reinterpret_cast<int64_t*>(&frb_dval);
- int32_t frb_ival = static_cast<int32_t>((*p) & 0xffffffff);
+ int64_t frb_dval = get_d_register(frb);
+ int32_t frb_ival = static_cast<int32_t>((frb_dval)&0xffffffff);
int l = instr->Bits(25, 25);
if (l == 1) {
fp_condition_reg_ = frb_ival;
case MFFS: {
int frt = instr->RTValue();
int64_t lval = static_cast<int64_t>(fp_condition_reg_);
- double* p = reinterpret_cast<double*>(&lval);
- set_d_register_from_double(frt, *p);
+ set_d_register(frt, lval);
return;
}
case FABS: {
set_d_register_from_double(frt, frt_val);
return;
}
- case FRIM: {
- int frt = instr->RTValue();
- int frb = instr->RBValue();
- double frb_val = get_double_from_d_register(frb);
- int64_t floor_val = (int64_t)frb_val;
- if (floor_val > frb_val) floor_val--;
- double frt_val = static_cast<double>(floor_val);
- set_d_register_from_double(frt, frt_val);
- return;
- }
}
UNIMPLEMENTED(); // Not used by V8.
}
uintptr_t result = ReadHU(ra_val + offset, instr) & 0xffff;
set_register(rt, result);
if (opcode == LHZU) {
- DCHECK(ra != 0);
set_register(ra, ra_val + offset);
}
break;
case LHA:
case LHAU: {
- UNIMPLEMENTED();
+ int ra = instr->RAValue();
+ int rt = instr->RTValue();
+ intptr_t ra_val = ra == 0 ? 0 : get_register(ra);
+ int offset = SIGN_EXT_IMM16(instr->Bits(15, 0));
+ intptr_t result = ReadH(ra_val + offset, instr);
+ set_register(rt, result);
+ if (opcode == LHAU) {
+ set_register(ra, ra_val + offset);
+ }
break;
}
int ra = instr->RAValue();
int32_t offset = SIGN_EXT_IMM16(instr->Bits(15, 0));
intptr_t ra_val = ra == 0 ? 0 : get_register(ra);
- double* dptr = reinterpret_cast<double*>(ReadDW(ra_val + offset));
- set_d_register_from_double(frt, *dptr);
+ int64_t* dptr = reinterpret_cast<int64_t*>(ReadDW(ra_val + offset));
+ set_d_register(frt, *dptr);
if (opcode == LFDU) {
DCHECK(ra != 0);
set_register(ra, ra_val + offset);
int ra = instr->RAValue();
int32_t offset = SIGN_EXT_IMM16(instr->Bits(15, 0));
intptr_t ra_val = ra == 0 ? 0 : get_register(ra);
- double frs_val = get_double_from_d_register(frs);
- int64_t* p = reinterpret_cast<int64_t*>(&frs_val);
- WriteDW(ra_val + offset, *p);
+ int64_t frs_val = get_d_register(frs);
+ WriteDW(ra_val + offset, frs_val);
if (opcode == STFDU) {
DCHECK(ra != 0);
set_register(ra, ra_val + offset);
}
#endif
- case FAKE_OPCODE: {
- if (instr->Bits(MARKER_SUBOPCODE_BIT, MARKER_SUBOPCODE_BIT) == 1) {
- int marker_code = instr->Bits(STUB_MARKER_HIGH_BIT, 0);
- DCHECK(marker_code < F_NEXT_AVAILABLE_STUB_MARKER);
- PrintF("Hit stub-marker: %d (EMIT_STUB_MARKER)\n", marker_code);
- } else {
- int fake_opcode = instr->Bits(FAKE_OPCODE_HIGH_BIT, 0);
- if (fake_opcode == fBKPT) {
- PPCDebugger dbg(this);
- PrintF("Simulator hit BKPT.\n");
- dbg.Debug();
- } else {
- DCHECK(fake_opcode < fLastFaker);
- PrintF("Hit ARM opcode: %d(FAKE_OPCODE defined in constant-ppc.h)\n",
- fake_opcode);
- UNIMPLEMENTED();
- }
- }
- break;
- }
-
default: {
UNIMPLEMENTED();
break;
double get_double_from_register_pair(int reg);
void set_d_register_from_double(int dreg, const double dbl) {
DCHECK(dreg >= 0 && dreg < kNumFPRs);
- fp_registers_[dreg] = dbl;
+ *bit_cast<double*>(&fp_registers_[dreg]) = dbl;
+ }
+ double get_double_from_d_register(int dreg) {
+ DCHECK(dreg >= 0 && dreg < kNumFPRs);
+ return *bit_cast<double*>(&fp_registers_[dreg]);
+ }
+ void set_d_register(int dreg, int64_t value) {
+ DCHECK(dreg >= 0 && dreg < kNumFPRs);
+ fp_registers_[dreg] = value;
+ }
+ int64_t get_d_register(int dreg) {
+ DCHECK(dreg >= 0 && dreg < kNumFPRs);
+ return fp_registers_[dreg];
}
- double get_double_from_d_register(int dreg) { return fp_registers_[dreg]; }
// Special case of set_register and get_register to access the raw PC value.
void set_pc(intptr_t value);
void ExecuteExt1(Instruction* instr);
bool ExecuteExt2_10bit(Instruction* instr);
bool ExecuteExt2_9bit_part1(Instruction* instr);
- void ExecuteExt2_9bit_part2(Instruction* instr);
+ bool ExecuteExt2_9bit_part2(Instruction* instr);
+ void ExecuteExt2_5bit(Instruction* instr);
void ExecuteExt2(Instruction* instr);
void ExecuteExt4(Instruction* instr);
#if V8_TARGET_ARCH_PPC64
intptr_t special_reg_ctr_;
int32_t special_reg_xer_;
- double fp_registers_[kNumFPRs];
+ int64_t fp_registers_[kNumFPRs];
// Simulator support.
char* stack_;
--- /dev/null
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "test/unittests/compiler/instruction-selector-unittest.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {} // namespace compiler
+} // namespace internal
+} // namespace v8