1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "src/compiler/code-generator.h"
7 #include "src/compiler/code-generator-impl.h"
8 #include "src/compiler/gap-resolver.h"
9 #include "src/compiler/node-matchers.h"
10 #include "src/ppc/macro-assembler-ppc.h"
11 #include "src/scopes.h"
20 #define kScratchReg r11
23 // Adds PPC-specific methods to convert InstructionOperands.
24 class PPCOperandConverter FINAL : public InstructionOperandConverter {
26 PPCOperandConverter(CodeGenerator* gen, Instruction* instr)
27 : InstructionOperandConverter(gen, instr) {}
29 RCBit OutputRCBit() const {
30 switch (instr_->flags_mode()) {
41 bool CompareLogical() const {
42 switch (instr_->flags_condition()) {
43 case kUnsignedLessThan:
44 case kUnsignedGreaterThanOrEqual:
45 case kUnsignedLessThanOrEqual:
46 case kUnsignedGreaterThan:
55 Operand InputImmediate(int index) {
56 Constant constant = ToConstant(instr_->InputAt(index));
57 switch (constant.type()) {
58 case Constant::kInt32:
59 return Operand(constant.ToInt32());
60 case Constant::kFloat32:
62 isolate()->factory()->NewNumber(constant.ToFloat32(), TENURED));
63 case Constant::kFloat64:
65 isolate()->factory()->NewNumber(constant.ToFloat64(), TENURED));
66 case Constant::kInt64:
67 #if V8_TARGET_ARCH_PPC64
68 return Operand(constant.ToInt64());
70 case Constant::kExternalReference:
71 case Constant::kHeapObject:
72 case Constant::kRpoNumber:
76 return Operand::Zero();
79 MemOperand MemoryOperand(AddressingMode* mode, int* first_index) {
80 const int index = *first_index;
81 *mode = AddressingModeField::decode(instr_->opcode());
87 return MemOperand(InputRegister(index + 0), InputInt32(index + 1));
90 return MemOperand(InputRegister(index + 0), InputRegister(index + 1));
93 return MemOperand(r0);
96 MemOperand MemoryOperand(AddressingMode* mode, int first_index = 0) {
97 return MemoryOperand(mode, &first_index);
100 MemOperand ToMemOperand(InstructionOperand* op) const {
102 DCHECK(!op->IsRegister());
103 DCHECK(!op->IsDoubleRegister());
104 DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
105 // The linkage computes where all spill slots are located.
106 FrameOffset offset = linkage()->GetFrameOffset(op->index(), frame(), 0);
107 return MemOperand(offset.from_stack_pointer() ? sp : fp, offset.offset());
112 static inline bool HasRegisterInput(Instruction* instr, int index) {
113 return instr->InputAt(index)->IsRegister();
119 class OutOfLineLoadNAN32 FINAL : public OutOfLineCode {
121 OutOfLineLoadNAN32(CodeGenerator* gen, DoubleRegister result)
122 : OutOfLineCode(gen), result_(result) {}
124 void Generate() FINAL {
125 __ LoadDoubleLiteral(result_, std::numeric_limits<float>::quiet_NaN(),
130 DoubleRegister const result_;
134 class OutOfLineLoadNAN64 FINAL : public OutOfLineCode {
136 OutOfLineLoadNAN64(CodeGenerator* gen, DoubleRegister result)
137 : OutOfLineCode(gen), result_(result) {}
139 void Generate() FINAL {
140 __ LoadDoubleLiteral(result_, std::numeric_limits<double>::quiet_NaN(),
145 DoubleRegister const result_;
149 class OutOfLineLoadZero FINAL : public OutOfLineCode {
151 OutOfLineLoadZero(CodeGenerator* gen, Register result)
152 : OutOfLineCode(gen), result_(result) {}
154 void Generate() FINAL { __ li(result_, Operand::Zero()); }
157 Register const result_;
161 Condition FlagsConditionToCondition(FlagsCondition condition) {
167 case kSignedLessThan:
168 case kUnsignedLessThan:
170 case kSignedGreaterThanOrEqual:
171 case kUnsignedGreaterThanOrEqual:
173 case kSignedLessThanOrEqual:
174 case kUnsignedLessThanOrEqual:
176 case kSignedGreaterThan:
177 case kUnsignedGreaterThan:
180 #if V8_TARGET_ARCH_PPC64
186 #if V8_TARGET_ARCH_PPC64
191 case kUnorderedEqual:
192 case kUnorderedNotEqual:
201 #define ASSEMBLE_FLOAT_UNOP_RC(asm_instr) \
203 __ asm_instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0), \
208 #define ASSEMBLE_FLOAT_BINOP_RC(asm_instr) \
210 __ asm_instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0), \
211 i.InputDoubleRegister(1), i.OutputRCBit()); \
215 #define ASSEMBLE_BINOP(asm_instr_reg, asm_instr_imm) \
217 if (HasRegisterInput(instr, 1)) { \
218 __ asm_instr_reg(i.OutputRegister(), i.InputRegister(0), \
219 i.InputRegister(1)); \
221 __ asm_instr_imm(i.OutputRegister(), i.InputRegister(0), \
222 i.InputImmediate(1)); \
227 #define ASSEMBLE_BINOP_RC(asm_instr_reg, asm_instr_imm) \
229 if (HasRegisterInput(instr, 1)) { \
230 __ asm_instr_reg(i.OutputRegister(), i.InputRegister(0), \
231 i.InputRegister(1), i.OutputRCBit()); \
233 __ asm_instr_imm(i.OutputRegister(), i.InputRegister(0), \
234 i.InputImmediate(1), i.OutputRCBit()); \
239 #define ASSEMBLE_BINOP_INT_RC(asm_instr_reg, asm_instr_imm) \
241 if (HasRegisterInput(instr, 1)) { \
242 __ asm_instr_reg(i.OutputRegister(), i.InputRegister(0), \
243 i.InputRegister(1), i.OutputRCBit()); \
245 __ asm_instr_imm(i.OutputRegister(), i.InputRegister(0), \
246 i.InputInt32(1), i.OutputRCBit()); \
251 #if V8_TARGET_ARCH_PPC64
252 #define ASSEMBLE_ADD_WITH_OVERFLOW() \
254 ASSEMBLE_BINOP(add, addi); \
255 __ TestIfInt32(i.OutputRegister(), r0, cr0); \
258 #define ASSEMBLE_ADD_WITH_OVERFLOW() \
260 if (HasRegisterInput(instr, 1)) { \
261 __ AddAndCheckForOverflow(i.OutputRegister(), i.InputRegister(0), \
262 i.InputRegister(1), kScratchReg, r0); \
264 __ AddAndCheckForOverflow(i.OutputRegister(), i.InputRegister(0), \
265 i.InputInt32(1), kScratchReg, r0); \
271 #if V8_TARGET_ARCH_PPC64
272 #define ASSEMBLE_SUB_WITH_OVERFLOW() \
274 ASSEMBLE_BINOP(sub, subi); \
275 __ TestIfInt32(i.OutputRegister(), r0, cr0); \
278 #define ASSEMBLE_SUB_WITH_OVERFLOW() \
280 if (HasRegisterInput(instr, 1)) { \
281 __ SubAndCheckForOverflow(i.OutputRegister(), i.InputRegister(0), \
282 i.InputRegister(1), kScratchReg, r0); \
284 __ AddAndCheckForOverflow(i.OutputRegister(), i.InputRegister(0), \
285 -i.InputInt32(1), kScratchReg, r0); \
291 #define ASSEMBLE_COMPARE(cmp_instr, cmpl_instr) \
293 const CRegister cr = cr0; \
294 if (HasRegisterInput(instr, 1)) { \
295 if (i.CompareLogical()) { \
296 __ cmpl_instr(i.InputRegister(0), i.InputRegister(1), cr); \
298 __ cmp_instr(i.InputRegister(0), i.InputRegister(1), cr); \
301 if (i.CompareLogical()) { \
302 __ cmpl_instr##i(i.InputRegister(0), i.InputImmediate(1), cr); \
304 __ cmp_instr##i(i.InputRegister(0), i.InputImmediate(1), cr); \
307 DCHECK_EQ(SetRC, i.OutputRCBit()); \
311 #define ASSEMBLE_FLOAT_COMPARE(cmp_instr) \
313 const CRegister cr = cr0; \
314 __ cmp_instr(i.InputDoubleRegister(0), i.InputDoubleRegister(1), cr); \
315 DCHECK_EQ(SetRC, i.OutputRCBit()); \
319 #define ASSEMBLE_MODULO(div_instr, mul_instr) \
321 const Register scratch = kScratchReg; \
322 __ div_instr(scratch, i.InputRegister(0), i.InputRegister(1)); \
323 __ mul_instr(scratch, scratch, i.InputRegister(1)); \
324 __ sub(i.OutputRegister(), i.InputRegister(0), scratch, LeaveOE, \
329 #define ASSEMBLE_FLOAT_MODULO() \
331 FrameScope scope(masm(), StackFrame::MANUAL); \
332 __ PrepareCallCFunction(0, 2, kScratchReg); \
333 __ MovToFloatParameters(i.InputDoubleRegister(0), \
334 i.InputDoubleRegister(1)); \
335 __ CallCFunction(ExternalReference::mod_two_doubles_operation(isolate()), \
337 __ MovFromFloatResult(i.OutputDoubleRegister()); \
338 DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
342 #define ASSEMBLE_LOAD_FLOAT(asm_instr, asm_instrx) \
344 DoubleRegister result = i.OutputDoubleRegister(); \
345 AddressingMode mode = kMode_None; \
346 MemOperand operand = i.MemoryOperand(&mode); \
347 if (mode == kMode_MRI) { \
348 __ asm_instr(result, operand); \
350 __ asm_instrx(result, operand); \
352 DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
356 #define ASSEMBLE_LOAD_INTEGER(asm_instr, asm_instrx) \
358 Register result = i.OutputRegister(); \
359 AddressingMode mode = kMode_None; \
360 MemOperand operand = i.MemoryOperand(&mode); \
361 if (mode == kMode_MRI) { \
362 __ asm_instr(result, operand); \
364 __ asm_instrx(result, operand); \
366 DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
370 #define ASSEMBLE_STORE_FLOAT(asm_instr, asm_instrx) \
373 AddressingMode mode = kMode_None; \
374 MemOperand operand = i.MemoryOperand(&mode, &index); \
375 DoubleRegister value = i.InputDoubleRegister(index); \
376 if (mode == kMode_MRI) { \
377 __ asm_instr(value, operand); \
379 __ asm_instrx(value, operand); \
381 DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
385 #define ASSEMBLE_STORE_INTEGER(asm_instr, asm_instrx) \
388 AddressingMode mode = kMode_None; \
389 MemOperand operand = i.MemoryOperand(&mode, &index); \
390 Register value = i.InputRegister(index); \
391 if (mode == kMode_MRI) { \
392 __ asm_instr(value, operand); \
394 __ asm_instrx(value, operand); \
396 DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
400 // TODO(mbrandy): fix paths that produce garbage in offset's upper 32-bits.
401 #define ASSEMBLE_CHECKED_LOAD_FLOAT(asm_instr, asm_instrx, width) \
403 DoubleRegister result = i.OutputDoubleRegister(); \
404 AddressingMode mode = kMode_None; \
405 MemOperand operand = i.MemoryOperand(&mode, 0); \
406 DCHECK_EQ(kMode_MRR, mode); \
407 Register offset = operand.rb(); \
408 __ extsw(offset, offset); \
409 if (HasRegisterInput(instr, 2)) { \
410 __ cmplw(offset, i.InputRegister(2)); \
412 __ cmplwi(offset, i.InputImmediate(2)); \
414 auto ool = new (zone()) OutOfLineLoadNAN##width(this, result); \
415 __ bge(ool->entry()); \
416 if (mode == kMode_MRI) { \
417 __ asm_instr(result, operand); \
419 __ asm_instrx(result, operand); \
421 __ bind(ool->exit()); \
422 DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
426 // TODO(mbrandy): fix paths that produce garbage in offset's upper 32-bits.
427 #define ASSEMBLE_CHECKED_LOAD_INTEGER(asm_instr, asm_instrx) \
429 Register result = i.OutputRegister(); \
430 AddressingMode mode = kMode_None; \
431 MemOperand operand = i.MemoryOperand(&mode, 0); \
432 DCHECK_EQ(kMode_MRR, mode); \
433 Register offset = operand.rb(); \
434 __ extsw(offset, offset); \
435 if (HasRegisterInput(instr, 2)) { \
436 __ cmplw(offset, i.InputRegister(2)); \
438 __ cmplwi(offset, i.InputImmediate(2)); \
440 auto ool = new (zone()) OutOfLineLoadZero(this, result); \
441 __ bge(ool->entry()); \
442 if (mode == kMode_MRI) { \
443 __ asm_instr(result, operand); \
445 __ asm_instrx(result, operand); \
447 __ bind(ool->exit()); \
448 DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
452 // TODO(mbrandy): fix paths that produce garbage in offset's upper 32-bits.
453 #define ASSEMBLE_CHECKED_STORE_FLOAT(asm_instr, asm_instrx) \
456 AddressingMode mode = kMode_None; \
457 MemOperand operand = i.MemoryOperand(&mode, 0); \
458 DCHECK_EQ(kMode_MRR, mode); \
459 Register offset = operand.rb(); \
460 __ extsw(offset, offset); \
461 if (HasRegisterInput(instr, 2)) { \
462 __ cmplw(offset, i.InputRegister(2)); \
464 __ cmplwi(offset, i.InputImmediate(2)); \
467 DoubleRegister value = i.InputDoubleRegister(3); \
468 if (mode == kMode_MRI) { \
469 __ asm_instr(value, operand); \
471 __ asm_instrx(value, operand); \
474 DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
478 // TODO(mbrandy): fix paths that produce garbage in offset's upper 32-bits.
479 #define ASSEMBLE_CHECKED_STORE_INTEGER(asm_instr, asm_instrx) \
482 AddressingMode mode = kMode_None; \
483 MemOperand operand = i.MemoryOperand(&mode, 0); \
484 DCHECK_EQ(kMode_MRR, mode); \
485 Register offset = operand.rb(); \
486 __ extsw(offset, offset); \
487 if (HasRegisterInput(instr, 2)) { \
488 __ cmplw(offset, i.InputRegister(2)); \
490 __ cmplwi(offset, i.InputImmediate(2)); \
493 Register value = i.InputRegister(3); \
494 if (mode == kMode_MRI) { \
495 __ asm_instr(value, operand); \
497 __ asm_instrx(value, operand); \
500 DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
504 #define ASSEMBLE_STORE_WRITE_BARRIER() \
506 Register object = i.InputRegister(0); \
507 Register index = i.InputRegister(1); \
508 Register value = i.InputRegister(2); \
509 __ add(index, object, index); \
510 __ StoreP(value, MemOperand(index)); \
511 SaveFPRegsMode mode = \
512 frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs; \
513 LinkRegisterStatus lr_status = kLRHasNotBeenSaved; \
514 __ RecordWrite(object, index, value, lr_status, mode); \
515 DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
519 // Assembles an instruction after register allocation, producing machine code.
520 void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
521 PPCOperandConverter i(this, instr);
522 ArchOpcode opcode = ArchOpcodeField::decode(instr->opcode());
525 case kArchCallCodeObject: {
526 EnsureSpaceForLazyDeopt();
527 if (HasRegisterInput(instr, 0)) {
528 __ addi(ip, i.InputRegister(0),
529 Operand(Code::kHeaderSize - kHeapObjectTag));
532 __ Call(Handle<Code>::cast(i.InputHeapObject(0)),
533 RelocInfo::CODE_TARGET);
535 AddSafepointAndDeopt(instr);
536 DCHECK_EQ(LeaveRC, i.OutputRCBit());
539 case kArchCallJSFunction: {
540 EnsureSpaceForLazyDeopt();
541 Register func = i.InputRegister(0);
542 if (FLAG_debug_code) {
543 // Check the function's context matches the context argument.
544 __ LoadP(kScratchReg,
545 FieldMemOperand(func, JSFunction::kContextOffset));
546 __ cmp(cp, kScratchReg);
547 __ Assert(eq, kWrongFunctionContext);
549 __ LoadP(ip, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
551 AddSafepointAndDeopt(instr);
552 DCHECK_EQ(LeaveRC, i.OutputRCBit());
556 AssembleArchJump(i.InputRpo(0));
557 DCHECK_EQ(LeaveRC, i.OutputRCBit());
560 // don't emit code for nops.
561 DCHECK_EQ(LeaveRC, i.OutputRCBit());
565 DCHECK_EQ(LeaveRC, i.OutputRCBit());
567 case kArchStackPointer:
568 __ mr(i.OutputRegister(), sp);
569 DCHECK_EQ(LeaveRC, i.OutputRCBit());
571 case kArchTruncateDoubleToI:
572 // TODO(mbrandy): move slow call to stub out of line.
573 __ TruncateDoubleToI(i.OutputRegister(), i.InputDoubleRegister(0));
574 DCHECK_EQ(LeaveRC, i.OutputRCBit());
578 if (HasRegisterInput(instr, 1)) {
579 __ and_(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
582 __ andi(i.OutputRegister(), i.InputRegister(0), i.InputImmediate(1));
585 case kPPC_AndComplement32:
586 case kPPC_AndComplement64:
587 __ andc(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
592 if (HasRegisterInput(instr, 1)) {
593 __ orx(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
596 __ ori(i.OutputRegister(), i.InputRegister(0), i.InputImmediate(1));
597 DCHECK_EQ(LeaveRC, i.OutputRCBit());
600 case kPPC_OrComplement32:
601 case kPPC_OrComplement64:
602 __ orc(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
607 if (HasRegisterInput(instr, 1)) {
608 __ xor_(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
611 __ xori(i.OutputRegister(), i.InputRegister(0), i.InputImmediate(1));
612 DCHECK_EQ(LeaveRC, i.OutputRCBit());
615 case kPPC_ShiftLeft32:
616 ASSEMBLE_BINOP_RC(slw, slwi);
618 #if V8_TARGET_ARCH_PPC64
619 case kPPC_ShiftLeft64:
620 ASSEMBLE_BINOP_RC(sld, sldi);
623 case kPPC_ShiftRight32:
624 ASSEMBLE_BINOP_RC(srw, srwi);
626 #if V8_TARGET_ARCH_PPC64
627 case kPPC_ShiftRight64:
628 ASSEMBLE_BINOP_RC(srd, srdi);
631 case kPPC_ShiftRightAlg32:
632 ASSEMBLE_BINOP_INT_RC(sraw, srawi);
634 #if V8_TARGET_ARCH_PPC64
635 case kPPC_ShiftRightAlg64:
636 ASSEMBLE_BINOP_INT_RC(srad, sradi);
639 case kPPC_RotRight32:
640 if (HasRegisterInput(instr, 1)) {
641 __ subfic(kScratchReg, i.InputRegister(1), Operand(32));
642 __ rotlw(i.OutputRegister(), i.InputRegister(0), kScratchReg,
645 int sh = i.InputInt32(1);
646 __ rotrwi(i.OutputRegister(), i.InputRegister(0), sh, i.OutputRCBit());
649 #if V8_TARGET_ARCH_PPC64
650 case kPPC_RotRight64:
651 if (HasRegisterInput(instr, 1)) {
652 __ subfic(kScratchReg, i.InputRegister(1), Operand(64));
653 __ rotld(i.OutputRegister(), i.InputRegister(0), kScratchReg,
656 int sh = i.InputInt32(1);
657 __ rotrdi(i.OutputRegister(), i.InputRegister(0), sh, i.OutputRCBit());
663 __ notx(i.OutputRegister(), i.InputRegister(0), i.OutputRCBit());
665 case kPPC_RotLeftAndMask32:
666 __ rlwinm(i.OutputRegister(), i.InputRegister(0), i.InputInt32(1),
667 31 - i.InputInt32(2), 31 - i.InputInt32(3), i.OutputRCBit());
669 #if V8_TARGET_ARCH_PPC64
670 case kPPC_RotLeftAndClear64:
671 __ rldic(i.OutputRegister(), i.InputRegister(0), i.InputInt32(1),
672 63 - i.InputInt32(2), i.OutputRCBit());
674 case kPPC_RotLeftAndClearLeft64:
675 __ rldicl(i.OutputRegister(), i.InputRegister(0), i.InputInt32(1),
676 63 - i.InputInt32(2), i.OutputRCBit());
678 case kPPC_RotLeftAndClearRight64:
679 __ rldicr(i.OutputRegister(), i.InputRegister(0), i.InputInt32(1),
680 63 - i.InputInt32(2), i.OutputRCBit());
685 if (HasRegisterInput(instr, 1)) {
686 __ add(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
687 LeaveOE, i.OutputRCBit());
689 __ addi(i.OutputRegister(), i.InputRegister(0), i.InputImmediate(1));
690 DCHECK_EQ(LeaveRC, i.OutputRCBit());
693 case kPPC_AddWithOverflow32:
694 ASSEMBLE_ADD_WITH_OVERFLOW();
696 case kPPC_AddFloat64:
697 ASSEMBLE_FLOAT_BINOP_RC(fadd);
701 if (HasRegisterInput(instr, 1)) {
702 __ sub(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
703 LeaveOE, i.OutputRCBit());
705 __ subi(i.OutputRegister(), i.InputRegister(0), i.InputImmediate(1));
706 DCHECK_EQ(LeaveRC, i.OutputRCBit());
709 case kPPC_SubWithOverflow32:
710 ASSEMBLE_SUB_WITH_OVERFLOW();
712 case kPPC_SubFloat64:
713 ASSEMBLE_FLOAT_BINOP_RC(fsub);
716 __ mullw(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
717 LeaveOE, i.OutputRCBit());
719 #if V8_TARGET_ARCH_PPC64
721 __ mulld(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
722 LeaveOE, i.OutputRCBit());
726 __ mulhw(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
729 case kPPC_MulHighU32:
730 __ mulhwu(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
733 case kPPC_MulFloat64:
734 ASSEMBLE_FLOAT_BINOP_RC(fmul);
737 __ divw(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
738 DCHECK_EQ(LeaveRC, i.OutputRCBit());
740 #if V8_TARGET_ARCH_PPC64
742 __ divd(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
743 DCHECK_EQ(LeaveRC, i.OutputRCBit());
747 __ divwu(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
748 DCHECK_EQ(LeaveRC, i.OutputRCBit());
750 #if V8_TARGET_ARCH_PPC64
752 __ divdu(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
753 DCHECK_EQ(LeaveRC, i.OutputRCBit());
756 case kPPC_DivFloat64:
757 ASSEMBLE_FLOAT_BINOP_RC(fdiv);
760 ASSEMBLE_MODULO(divw, mullw);
762 #if V8_TARGET_ARCH_PPC64
764 ASSEMBLE_MODULO(divd, mulld);
768 ASSEMBLE_MODULO(divwu, mullw);
770 #if V8_TARGET_ARCH_PPC64
772 ASSEMBLE_MODULO(divdu, mulld);
775 case kPPC_ModFloat64:
776 // TODO(bmeurer): We should really get rid of this special instruction,
777 // and generate a CallAddress instruction instead.
778 ASSEMBLE_FLOAT_MODULO();
782 __ neg(i.OutputRegister(), i.InputRegister(0), LeaveOE, i.OutputRCBit());
784 case kPPC_SqrtFloat64:
785 ASSEMBLE_FLOAT_UNOP_RC(fsqrt);
787 case kPPC_FloorFloat64:
788 ASSEMBLE_FLOAT_UNOP_RC(frim);
790 case kPPC_CeilFloat64:
791 ASSEMBLE_FLOAT_UNOP_RC(frip);
793 case kPPC_TruncateFloat64:
794 ASSEMBLE_FLOAT_UNOP_RC(friz);
796 case kPPC_RoundFloat64:
797 ASSEMBLE_FLOAT_UNOP_RC(frin);
799 case kPPC_NegFloat64:
800 ASSEMBLE_FLOAT_UNOP_RC(fneg);
803 ASSEMBLE_COMPARE(cmpw, cmplw);
805 #if V8_TARGET_ARCH_PPC64
807 ASSEMBLE_COMPARE(cmp, cmpl);
810 case kPPC_CmpFloat64:
811 ASSEMBLE_FLOAT_COMPARE(fcmpu);
814 if (HasRegisterInput(instr, 1)) {
815 __ and_(r0, i.InputRegister(0), i.InputRegister(1), i.OutputRCBit());
817 __ andi(r0, i.InputRegister(0), i.InputImmediate(1));
819 #if V8_TARGET_ARCH_PPC64
820 __ extsw(r0, r0, i.OutputRCBit());
822 DCHECK_EQ(SetRC, i.OutputRCBit());
824 #if V8_TARGET_ARCH_PPC64
826 if (HasRegisterInput(instr, 1)) {
827 __ and_(r0, i.InputRegister(0), i.InputRegister(1), i.OutputRCBit());
829 __ andi(r0, i.InputRegister(0), i.InputImmediate(1));
831 DCHECK_EQ(SetRC, i.OutputRCBit());
835 __ Push(i.InputRegister(0));
836 DCHECK_EQ(LeaveRC, i.OutputRCBit());
838 case kPPC_ExtendSignWord8:
839 __ extsb(i.OutputRegister(), i.InputRegister(0));
840 DCHECK_EQ(LeaveRC, i.OutputRCBit());
842 case kPPC_ExtendSignWord16:
843 __ extsh(i.OutputRegister(), i.InputRegister(0));
844 DCHECK_EQ(LeaveRC, i.OutputRCBit());
846 #if V8_TARGET_ARCH_PPC64
847 case kPPC_ExtendSignWord32:
848 __ extsw(i.OutputRegister(), i.InputRegister(0));
849 DCHECK_EQ(LeaveRC, i.OutputRCBit());
851 case kPPC_Uint32ToUint64:
853 __ clrldi(i.OutputRegister(), i.InputRegister(0), Operand(32));
854 DCHECK_EQ(LeaveRC, i.OutputRCBit());
856 case kPPC_Int64ToInt32:
857 // TODO(mbrandy): sign extend?
858 __ Move(i.OutputRegister(), i.InputRegister(0));
859 DCHECK_EQ(LeaveRC, i.OutputRCBit());
862 case kPPC_Int32ToFloat64:
863 __ ConvertIntToDouble(i.InputRegister(0), i.OutputDoubleRegister());
864 DCHECK_EQ(LeaveRC, i.OutputRCBit());
866 case kPPC_Uint32ToFloat64:
867 __ ConvertUnsignedIntToDouble(i.InputRegister(0),
868 i.OutputDoubleRegister());
869 DCHECK_EQ(LeaveRC, i.OutputRCBit());
871 case kPPC_Float64ToInt32:
872 case kPPC_Float64ToUint32:
873 __ ConvertDoubleToInt64(i.InputDoubleRegister(0),
874 #if !V8_TARGET_ARCH_PPC64
877 i.OutputRegister(), kScratchDoubleReg);
878 DCHECK_EQ(LeaveRC, i.OutputRCBit());
880 case kPPC_Float64ToFloat32:
881 ASSEMBLE_FLOAT_UNOP_RC(frsp);
883 case kPPC_Float32ToFloat64:
885 __ Move(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
886 DCHECK_EQ(LeaveRC, i.OutputRCBit());
888 case kPPC_LoadWordU8:
889 ASSEMBLE_LOAD_INTEGER(lbz, lbzx);
891 case kPPC_LoadWordS8:
892 ASSEMBLE_LOAD_INTEGER(lbz, lbzx);
893 __ extsb(i.OutputRegister(), i.OutputRegister());
895 case kPPC_LoadWordU16:
896 ASSEMBLE_LOAD_INTEGER(lhz, lhzx);
898 case kPPC_LoadWordS16:
899 ASSEMBLE_LOAD_INTEGER(lha, lhax);
901 case kPPC_LoadWordS32:
902 ASSEMBLE_LOAD_INTEGER(lwa, lwax);
904 #if V8_TARGET_ARCH_PPC64
905 case kPPC_LoadWord64:
906 ASSEMBLE_LOAD_INTEGER(ld, ldx);
909 case kPPC_LoadFloat32:
910 ASSEMBLE_LOAD_FLOAT(lfs, lfsx);
912 case kPPC_LoadFloat64:
913 ASSEMBLE_LOAD_FLOAT(lfd, lfdx);
915 case kPPC_StoreWord8:
916 ASSEMBLE_STORE_INTEGER(stb, stbx);
918 case kPPC_StoreWord16:
919 ASSEMBLE_STORE_INTEGER(sth, sthx);
921 case kPPC_StoreWord32:
922 ASSEMBLE_STORE_INTEGER(stw, stwx);
924 #if V8_TARGET_ARCH_PPC64
925 case kPPC_StoreWord64:
926 ASSEMBLE_STORE_INTEGER(std, stdx);
929 case kPPC_StoreFloat32:
930 ASSEMBLE_STORE_FLOAT(stfs, stfsx);
932 case kPPC_StoreFloat64:
933 ASSEMBLE_STORE_FLOAT(stfd, stfdx);
935 case kPPC_StoreWriteBarrier:
936 ASSEMBLE_STORE_WRITE_BARRIER();
938 case kCheckedLoadInt8:
939 ASSEMBLE_CHECKED_LOAD_INTEGER(lbz, lbzx);
940 __ extsb(i.OutputRegister(), i.OutputRegister());
942 case kCheckedLoadUint8:
943 ASSEMBLE_CHECKED_LOAD_INTEGER(lbz, lbzx);
945 case kCheckedLoadInt16:
946 ASSEMBLE_CHECKED_LOAD_INTEGER(lha, lhax);
948 case kCheckedLoadUint16:
949 ASSEMBLE_CHECKED_LOAD_INTEGER(lhz, lhzx);
951 case kCheckedLoadWord32:
952 ASSEMBLE_CHECKED_LOAD_INTEGER(lwa, lwax);
954 case kCheckedLoadFloat32:
955 ASSEMBLE_CHECKED_LOAD_FLOAT(lfs, lfsx, 32);
957 case kCheckedLoadFloat64:
958 ASSEMBLE_CHECKED_LOAD_FLOAT(lfd, lfdx, 64);
960 case kCheckedStoreWord8:
961 ASSEMBLE_CHECKED_STORE_INTEGER(stb, stbx);
963 case kCheckedStoreWord16:
964 ASSEMBLE_CHECKED_STORE_INTEGER(sth, sthx);
966 case kCheckedStoreWord32:
967 ASSEMBLE_CHECKED_STORE_INTEGER(stw, stwx);
969 case kCheckedStoreFloat32:
970 ASSEMBLE_CHECKED_STORE_FLOAT(stfs, stfsx);
972 case kCheckedStoreFloat64:
973 ASSEMBLE_CHECKED_STORE_FLOAT(stfd, stfdx);
982 // Assembles branches after an instruction.
983 void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
984 PPCOperandConverter i(this, instr);
985 Label* tlabel = branch->true_label;
986 Label* flabel = branch->false_label;
987 ArchOpcode op = instr->arch_opcode();
988 FlagsCondition condition = branch->condition;
991 // Overflow checked for add/sub only.
992 DCHECK((condition != kOverflow && condition != kNotOverflow) ||
993 (op == kPPC_AddWithOverflow32 || op == kPPC_SubWithOverflow32));
995 Condition cond = FlagsConditionToCondition(condition);
996 if (op == kPPC_CmpFloat64) {
997 // check for unordered if necessary
999 __ bunordered(flabel, cr);
1000 // Unnecessary for eq/lt since only FU bit will be set.
1001 } else if (cond == gt) {
1002 __ bunordered(tlabel, cr);
1003 // Unnecessary for ne/ge since only FU bit will be set.
1006 __ b(cond, tlabel, cr);
1007 if (!branch->fallthru) __ b(flabel); // no fallthru to flabel.
1011 void CodeGenerator::AssembleArchJump(BasicBlock::RpoNumber target) {
1012 if (!IsNextInAssemblyOrder(target)) __ b(GetLabel(target));
1016 // Assembles boolean materializations after an instruction.
1017 void CodeGenerator::AssembleArchBoolean(Instruction* instr,
1018 FlagsCondition condition) {
1019 PPCOperandConverter i(this, instr);
1021 ArchOpcode op = instr->arch_opcode();
1022 bool check_unordered = (op == kPPC_CmpFloat64);
1025 // Overflow checked for add/sub only.
1026 DCHECK((condition != kOverflow && condition != kNotOverflow) ||
1027 (op == kPPC_AddWithOverflow32 || op == kPPC_SubWithOverflow32));
1029 // Materialize a full 32-bit 1 or 0 value. The result register is always the
1030 // last output of the instruction.
1031 DCHECK_NE(0u, instr->OutputCount());
1032 Register reg = i.OutputRegister(instr->OutputCount() - 1);
1034 Condition cond = FlagsConditionToCondition(condition);
1038 __ li(reg, Operand::Zero());
1039 __ li(kScratchReg, Operand(1));
1040 __ isel(cond, reg, kScratchReg, reg, cr);
1044 __ li(reg, Operand(1));
1045 __ isel(NegateCondition(cond), reg, r0, reg, cr);
1048 if (check_unordered) {
1049 __ li(reg, Operand(1));
1050 __ li(kScratchReg, Operand::Zero());
1051 __ bunordered(&done, cr);
1052 __ isel(cond, reg, reg, kScratchReg, cr);
1054 __ li(reg, Operand::Zero());
1055 __ li(kScratchReg, Operand(1));
1056 __ isel(cond, reg, kScratchReg, reg, cr);
1060 if (check_unordered) {
1061 __ li(reg, Operand::Zero());
1062 __ li(kScratchReg, Operand(1));
1063 __ bunordered(&done, cr);
1064 __ isel(NegateCondition(cond), reg, r0, kScratchReg, cr);
1066 __ li(reg, Operand(1));
1067 __ isel(NegateCondition(cond), reg, r0, reg, cr);
1078 void CodeGenerator::AssembleDeoptimizerCall(int deoptimization_id) {
1079 Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
1080 isolate(), deoptimization_id, Deoptimizer::LAZY);
1081 __ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
1085 void CodeGenerator::AssemblePrologue() {
1086 CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
1087 if (descriptor->kind() == CallDescriptor::kCallAddress) {
1088 #if ABI_USES_FUNCTION_DESCRIPTORS
1089 __ function_descriptor();
1091 int register_save_area_size = 0;
1092 RegList frame_saves = fp.bit();
1094 #if V8_OOL_CONSTANT_POOL
1095 __ Push(r0, fp, kConstantPoolRegister);
1096 // Adjust FP to point to saved FP.
1097 __ subi(fp, sp, Operand(StandardFrameConstants::kConstantPoolOffset));
1098 register_save_area_size += kPointerSize;
1099 frame_saves |= kConstantPoolRegister.bit();
1104 // Save callee-saved registers.
1105 const RegList saves = descriptor->CalleeSavedRegisters() & ~frame_saves;
1106 for (int i = Register::kNumRegisters - 1; i >= 0; i--) {
1107 if (!((1 << i) & saves)) continue;
1108 register_save_area_size += kPointerSize;
1110 frame()->SetRegisterSaveAreaSize(register_save_area_size);
1111 __ MultiPush(saves);
1112 } else if (descriptor->IsJSFunctionCall()) {
1113 CompilationInfo* info = this->info();
1114 __ Prologue(info->IsCodePreAgingActive());
1115 frame()->SetRegisterSaveAreaSize(
1116 StandardFrameConstants::kFixedFrameSizeFromFp);
1119 frame()->SetRegisterSaveAreaSize(
1120 StandardFrameConstants::kFixedFrameSizeFromFp);
1122 int stack_slots = frame()->GetSpillSlotCount();
1124 if (info()->is_osr()) {
1125 // TurboFan OSR-compiled functions cannot be entered directly.
1126 __ Abort(kShouldNotDirectlyEnterOsrFunction);
1128 // Unoptimized code jumps directly to this entrypoint while the unoptimized
1129 // frame is still on the stack. Optimized code uses OSR values directly from
1130 // the unoptimized frame. Thus, all that needs to be done is to allocate the
1131 // remaining stack slots.
1132 if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
1133 osr_pc_offset_ = __ pc_offset();
1134 DCHECK(stack_slots >= frame()->GetOsrStackSlotCount());
1135 stack_slots -= frame()->GetOsrStackSlotCount();
1138 if (stack_slots > 0) {
1139 __ Add(sp, sp, -stack_slots * kPointerSize, r0);
1144 void CodeGenerator::AssembleReturn() {
1145 CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
1146 if (descriptor->kind() == CallDescriptor::kCallAddress) {
1147 if (frame()->GetRegisterSaveAreaSize() > 0) {
1148 // Remove this frame's spill slots first.
1149 int stack_slots = frame()->GetSpillSlotCount();
1150 if (stack_slots > 0) {
1151 __ Add(sp, sp, stack_slots * kPointerSize, r0);
1153 // Restore registers.
1154 RegList frame_saves = fp.bit();
1155 #if V8_OOL_CONSTANT_POOL
1156 frame_saves |= kConstantPoolRegister.bit();
1158 const RegList saves = descriptor->CalleeSavedRegisters() & ~frame_saves;
1163 __ LeaveFrame(StackFrame::MANUAL);
1166 int pop_count = descriptor->IsJSFunctionCall()
1167 ? static_cast<int>(descriptor->JSParameterCount())
1169 __ LeaveFrame(StackFrame::MANUAL, pop_count * kPointerSize);
1175 void CodeGenerator::AssembleMove(InstructionOperand* source,
1176 InstructionOperand* destination) {
1177 PPCOperandConverter g(this, NULL);
1178 // Dispatch on the source and destination operand kinds. Not all
1179 // combinations are possible.
1180 if (source->IsRegister()) {
1181 DCHECK(destination->IsRegister() || destination->IsStackSlot());
1182 Register src = g.ToRegister(source);
1183 if (destination->IsRegister()) {
1184 __ Move(g.ToRegister(destination), src);
1186 __ StoreP(src, g.ToMemOperand(destination), r0);
1188 } else if (source->IsStackSlot()) {
1189 DCHECK(destination->IsRegister() || destination->IsStackSlot());
1190 MemOperand src = g.ToMemOperand(source);
1191 if (destination->IsRegister()) {
1192 __ LoadP(g.ToRegister(destination), src, r0);
1194 Register temp = kScratchReg;
1195 __ LoadP(temp, src, r0);
1196 __ StoreP(temp, g.ToMemOperand(destination), r0);
1198 } else if (source->IsConstant()) {
1199 Constant src = g.ToConstant(source);
1200 if (destination->IsRegister() || destination->IsStackSlot()) {
1202 destination->IsRegister() ? g.ToRegister(destination) : kScratchReg;
1203 switch (src.type()) {
1204 case Constant::kInt32:
1205 __ mov(dst, Operand(src.ToInt32()));
1207 case Constant::kInt64:
1208 __ mov(dst, Operand(src.ToInt64()));
1210 case Constant::kFloat32:
1212 isolate()->factory()->NewNumber(src.ToFloat32(), TENURED));
1214 case Constant::kFloat64:
1216 isolate()->factory()->NewNumber(src.ToFloat64(), TENURED));
1218 case Constant::kExternalReference:
1219 __ mov(dst, Operand(src.ToExternalReference()));
1221 case Constant::kHeapObject:
1222 __ Move(dst, src.ToHeapObject());
1224 case Constant::kRpoNumber:
1225 UNREACHABLE(); // TODO(dcarney): loading RPO constants on PPC.
1228 if (destination->IsStackSlot()) {
1229 __ StoreP(dst, g.ToMemOperand(destination), r0);
1232 DoubleRegister dst = destination->IsDoubleRegister()
1233 ? g.ToDoubleRegister(destination)
1234 : kScratchDoubleReg;
1235 double value = (src.type() == Constant::kFloat32) ? src.ToFloat32()
1237 __ LoadDoubleLiteral(dst, value, kScratchReg);
1238 if (destination->IsDoubleStackSlot()) {
1239 __ StoreDouble(dst, g.ToMemOperand(destination), r0);
1242 } else if (source->IsDoubleRegister()) {
1243 DoubleRegister src = g.ToDoubleRegister(source);
1244 if (destination->IsDoubleRegister()) {
1245 DoubleRegister dst = g.ToDoubleRegister(destination);
1248 DCHECK(destination->IsDoubleStackSlot());
1249 __ StoreDouble(src, g.ToMemOperand(destination), r0);
1251 } else if (source->IsDoubleStackSlot()) {
1252 DCHECK(destination->IsDoubleRegister() || destination->IsDoubleStackSlot());
1253 MemOperand src = g.ToMemOperand(source);
1254 if (destination->IsDoubleRegister()) {
1255 __ LoadDouble(g.ToDoubleRegister(destination), src, r0);
1257 DoubleRegister temp = kScratchDoubleReg;
1258 __ LoadDouble(temp, src, r0);
1259 __ StoreDouble(temp, g.ToMemOperand(destination), r0);
1267 void CodeGenerator::AssembleSwap(InstructionOperand* source,
1268 InstructionOperand* destination) {
1269 PPCOperandConverter g(this, NULL);
1270 // Dispatch on the source and destination operand kinds. Not all
1271 // combinations are possible.
1272 if (source->IsRegister()) {
1273 // Register-register.
1274 Register temp = kScratchReg;
1275 Register src = g.ToRegister(source);
1276 if (destination->IsRegister()) {
1277 Register dst = g.ToRegister(destination);
1282 DCHECK(destination->IsStackSlot());
1283 MemOperand dst = g.ToMemOperand(destination);
1286 __ StoreP(temp, dst);
1288 #if V8_TARGET_ARCH_PPC64
1289 } else if (source->IsStackSlot() || source->IsDoubleStackSlot()) {
1291 } else if (source->IsStackSlot()) {
1293 DCHECK(destination->IsStackSlot());
1294 Register temp_0 = kScratchReg;
1295 Register temp_1 = r0;
1296 MemOperand src = g.ToMemOperand(source);
1297 MemOperand dst = g.ToMemOperand(destination);
1298 __ LoadP(temp_0, src);
1299 __ LoadP(temp_1, dst);
1300 __ StoreP(temp_0, dst);
1301 __ StoreP(temp_1, src);
1302 } else if (source->IsDoubleRegister()) {
1303 DoubleRegister temp = kScratchDoubleReg;
1304 DoubleRegister src = g.ToDoubleRegister(source);
1305 if (destination->IsDoubleRegister()) {
1306 DoubleRegister dst = g.ToDoubleRegister(destination);
1311 DCHECK(destination->IsDoubleStackSlot());
1312 MemOperand dst = g.ToMemOperand(destination);
1317 #if !V8_TARGET_ARCH_PPC64
1318 } else if (source->IsDoubleStackSlot()) {
1319 DCHECK(destination->IsDoubleStackSlot());
1320 DoubleRegister temp_0 = kScratchDoubleReg;
1321 DoubleRegister temp_1 = d0;
1322 MemOperand src = g.ToMemOperand(source);
1323 MemOperand dst = g.ToMemOperand(destination);
1324 __ lfd(temp_0, src);
1325 __ lfd(temp_1, dst);
1326 __ stfd(temp_0, dst);
1327 __ stfd(temp_1, src);
1330 // No other combinations are possible.
1336 void CodeGenerator::AddNopForSmiCodeInlining() {
1337 // We do not insert nops for inlined Smi code.
1341 void CodeGenerator::EnsureSpaceForLazyDeopt() {
1342 int space_needed = Deoptimizer::patch_size();
1343 if (!info()->IsStub()) {
1344 // Ensure that we have enough space after the previous lazy-bailout
1345 // instruction for patching the code here.
1346 int current_pc = masm()->pc_offset();
1347 if (current_pc < last_lazy_deopt_pc_ + space_needed) {
1348 int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
1349 DCHECK_EQ(0, padding_size % v8::internal::Assembler::kInstrSize);
1350 while (padding_size > 0) {
1352 padding_size -= v8::internal::Assembler::kInstrSize;
1356 MarkLazyDeoptSite();
1361 } // namespace compiler
1362 } // namespace internal