1 // Licensed to the .NET Foundation under one or more agreements.
2 // The .NET Foundation licenses this file to you under the MIT license.
3 // See the LICENSE file in the project root for more information.
5 /*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
6 XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
8 XX Amd64/x86 Code Generator XX
10 XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
11 XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
18 #ifndef LEGACY_BACKEND // This file is ONLY used for the RyuJIT backend that uses the linear scan register allocator.
25 #include "gcinfoencoder.h"
27 /*****************************************************************************
29 * Generate code that will set the given register to the integer constant.
32 void CodeGen::genSetRegToIcon(regNumber reg, ssize_t val, var_types type, insFlags flags)
34 // Reg cannot be a FP reg
35 assert(!genIsValidFloatReg(reg));
37 // The only TYP_REF constant that can come this path is a managed 'null' since it is not
38 // relocatable. Other ref type constants (e.g. string objects) go through a different
40 noway_assert(type != TYP_REF || val == 0);
44 instGen_Set_Reg_To_Zero(emitActualTypeSize(type), reg, flags);
48 // TODO-XArch-CQ: needs all the optimized cases
49 getEmitter()->emitIns_R_I(INS_mov, emitActualTypeSize(type), reg, val);
53 /*****************************************************************************
55 * Generate code to check that the GS cookie wasn't thrashed by a buffer
56 * overrun. If pushReg is true, preserve all registers around code sequence.
57 * Otherwise ECX could be modified.
59 * Implementation Note: pushReg = true, in case of tail calls.
61 void CodeGen::genEmitGSCookieCheck(bool pushReg)
63 noway_assert(compiler->gsGlobalSecurityCookieAddr || compiler->gsGlobalSecurityCookieVal);
65 // Make sure that EAX is reported as live GC-ref so that any GC that kicks in while
66 // executing GS cookie check will not collect the object pointed to by EAX.
68 // For Amd64 System V, a two-register-returned struct could be returned in RAX and RDX
69 // In such case make sure that the correct GC-ness of RDX is reported as well, so
70 // a GC object pointed by RDX will not be collected.
73 // Handle multi-reg return type values
74 if (compiler->compMethodReturnsMultiRegRetType())
76 ReturnTypeDesc retTypeDesc;
77 if (varTypeIsLong(compiler->info.compRetNativeType))
79 retTypeDesc.InitializeLongReturnType(compiler);
81 else // we must have a struct return type
83 retTypeDesc.InitializeStructReturnType(compiler, compiler->info.compMethodInfo->args.retTypeClass);
86 unsigned regCount = retTypeDesc.GetReturnRegCount();
88 // Only x86 and x64 Unix ABI allows multi-reg return and
89 // number of result regs should be equal to MAX_RET_REG_COUNT.
90 assert(regCount == MAX_RET_REG_COUNT);
92 for (unsigned i = 0; i < regCount; ++i)
94 gcInfo.gcMarkRegPtrVal(retTypeDesc.GetABIReturnReg(i), retTypeDesc.GetReturnRegType(i));
97 else if (compiler->compMethodReturnsRetBufAddr())
99 // This is for returning in an implicit RetBuf.
100 // If the address of the buffer is returned in REG_INTRET, mark the content of INTRET as ByRef.
102 // In case the return is in an implicit RetBuf, the native return type should be a struct
103 assert(varTypeIsStruct(compiler->info.compRetNativeType));
105 gcInfo.gcMarkRegPtrVal(REG_INTRET, TYP_BYREF);
107 // ... all other cases.
110 #ifdef _TARGET_AMD64_
111 // For x64, structs that are not returned in registers are always
112 // returned in implicit RetBuf. If we reached here, we should not have
113 // a RetBuf and the return type should not be a struct.
114 assert(compiler->info.compRetBuffArg == BAD_VAR_NUM);
115 assert(!varTypeIsStruct(compiler->info.compRetNativeType));
116 #endif // _TARGET_AMD64_
118 // For x86 Windows we can't make such assertions since we generate code for returning of
119 // the RetBuf in REG_INTRET only when the ProfilerHook is enabled. Otherwise
120 // compRetNativeType could be TYP_STRUCT.
121 gcInfo.gcMarkRegPtrVal(REG_INTRET, compiler->info.compRetNativeType);
125 regNumber regGSCheck;
126 regMaskTP regMaskGSCheck = RBM_NONE;
130 // Non-tail call: we can use any callee trash register that is not
131 // a return register or contain 'this' pointer (keep alive this), since
132 // we are generating GS cookie check after a GT_RETURN block.
133 // Note: On Amd64 System V RDX is an arg register - REG_ARG_2 - as well
134 // as return register for two-register-returned structs.
135 if (compiler->lvaKeepAliveAndReportThis() && compiler->lvaTable[compiler->info.compThisArg].lvRegister &&
136 (compiler->lvaTable[compiler->info.compThisArg].lvRegNum == REG_ARG_0))
138 regGSCheck = REG_ARG_1;
142 regGSCheck = REG_ARG_0;
148 // It doesn't matter which register we pick, since we're going to save and restore it
150 // TODO-CQ: Can we optimize the choice of register to avoid doing the push/pop sometimes?
151 regGSCheck = REG_EAX;
152 regMaskGSCheck = RBM_EAX;
153 #else // !_TARGET_X86_
154 // Tail calls from methods that need GS check: We need to preserve registers while
155 // emitting GS cookie check for a tail prefixed call or a jmp. To emit GS cookie
156 // check, we might need a register. This won't be an issue for jmp calls for the
157 // reason mentioned below (see comment starting with "Jmp Calls:").
159 // The following are the possible solutions in case of tail prefixed calls:
160 // 1) Use R11 - ignore tail prefix on calls that need to pass a param in R11 when
161 // present in methods that require GS cookie check. Rest of the tail calls that
162 // do not require R11 will be honored.
163 // 2) Internal register - GT_CALL node reserves an internal register and emits GS
164 // cookie check as part of tail call codegen. GenExitCode() needs to special case
165 // fast tail calls implemented as epilog+jmp or such tail calls should always get
166 // dispatched via helper.
167 // 3) Materialize GS cookie check as a sperate node hanging off GT_CALL node in
168 // right execution order during rationalization.
170 // There are two calls that use R11: VSD and calli pinvokes with cookie param. Tail
171 // prefix on pinvokes is ignored. That is, options 2 and 3 will allow tail prefixed
172 // VSD calls from methods that need GS check.
174 // Tail prefixed calls: Right now for Jit64 compat, method requiring GS cookie check
175 // ignores tail prefix. In future, if we intend to support tail calls from such a method,
176 // consider one of the options mentioned above. For now adding an assert that we don't
177 // expect to see a tail call in a method that requires GS check.
178 noway_assert(!compiler->compTailCallUsed);
180 // Jmp calls: specify method handle using which JIT queries VM for its entry point
181 // address and hence it can neither be a VSD call nor PInvoke calli with cookie
182 // parameter. Therefore, in case of jmp calls it is safe to use R11.
183 regGSCheck = REG_R11;
184 #endif // !_TARGET_X86_
187 regMaskTP byrefPushedRegs = RBM_NONE;
188 regMaskTP norefPushedRegs = RBM_NONE;
189 regMaskTP pushedRegs = RBM_NONE;
191 if (compiler->gsGlobalSecurityCookieAddr == nullptr)
193 #if defined(_TARGET_AMD64_)
194 // If GS cookie value fits within 32-bits we can use 'cmp mem64, imm32'.
195 // Otherwise, load the value into a reg and use 'cmp mem64, reg64'.
196 if ((int)compiler->gsGlobalSecurityCookieVal != (ssize_t)compiler->gsGlobalSecurityCookieVal)
198 genSetRegToIcon(regGSCheck, compiler->gsGlobalSecurityCookieVal, TYP_I_IMPL);
199 getEmitter()->emitIns_S_R(INS_cmp, EA_PTRSIZE, regGSCheck, compiler->lvaGSSecurityCookie, 0);
202 #endif // defined(_TARGET_AMD64_)
204 assert((int)compiler->gsGlobalSecurityCookieVal == (ssize_t)compiler->gsGlobalSecurityCookieVal);
205 getEmitter()->emitIns_S_I(INS_cmp, EA_PTRSIZE, compiler->lvaGSSecurityCookie, 0,
206 (int)compiler->gsGlobalSecurityCookieVal);
211 // Ngen case - GS cookie value needs to be accessed through an indirection.
213 pushedRegs = genPushRegs(regMaskGSCheck, &byrefPushedRegs, &norefPushedRegs);
215 instGen_Set_Reg_To_Imm(EA_HANDLE_CNS_RELOC, regGSCheck, (ssize_t)compiler->gsGlobalSecurityCookieAddr);
216 getEmitter()->emitIns_R_AR(ins_Load(TYP_I_IMPL), EA_PTRSIZE, regGSCheck, regGSCheck, 0);
217 getEmitter()->emitIns_S_R(INS_cmp, EA_PTRSIZE, regGSCheck, compiler->lvaGSSecurityCookie, 0);
220 BasicBlock* gsCheckBlk = genCreateTempLabel();
221 emitJumpKind jmpEqual = genJumpKindForOper(GT_EQ, CK_SIGNED);
222 inst_JMP(jmpEqual, gsCheckBlk);
223 genEmitHelperCall(CORINFO_HELP_FAIL_FAST, 0, EA_UNKNOWN);
224 genDefineTempLabel(gsCheckBlk);
226 genPopRegs(pushedRegs, byrefPushedRegs, norefPushedRegs);
229 BasicBlock* CodeGen::genCallFinally(BasicBlock* block)
231 #if FEATURE_EH_FUNCLETS
232 // Generate a call to the finally, like this:
233 // mov rcx,qword ptr [rbp + 20H] // Load rcx with PSPSym
234 // call finally-funclet
235 // jmp finally-return // Only for non-retless finally calls
236 // The jmp can be a NOP if we're going to the next block.
237 // If we're generating code for the main function (not a funclet), and there is no localloc,
238 // then RSP at this point is the same value as that stored in the PSPSym. So just copy RSP
239 // instead of loading the PSPSym in this case, or if PSPSym is not used (CoreRT ABI).
241 if ((compiler->lvaPSPSym == BAD_VAR_NUM) ||
242 (!compiler->compLocallocUsed && (compiler->funCurrentFunc()->funKind == FUNC_ROOT)))
245 inst_RV_RV(INS_mov, REG_ARG_0, REG_SPBASE, TYP_I_IMPL);
246 #endif // !UNIX_X86_ABI
250 getEmitter()->emitIns_R_S(ins_Load(TYP_I_IMPL), EA_PTRSIZE, REG_ARG_0, compiler->lvaPSPSym, 0);
252 getEmitter()->emitIns_J(INS_call, block->bbJumpDest);
254 if (block->bbFlags & BBF_RETLESS_CALL)
256 // We have a retless call, and the last instruction generated was a call.
257 // If the next block is in a different EH region (or is the end of the code
258 // block), then we need to generate a breakpoint here (since it will never
259 // get executed) to get proper unwind behavior.
261 if ((block->bbNext == nullptr) || !BasicBlock::sameEHRegion(block, block->bbNext))
263 instGen(INS_BREAKPOINT); // This should never get executed
268 // TODO-Linux-x86: Do we need to handle the GC information for this NOP or JMP specially, as is done for other
270 #ifndef JIT32_GCENCODER
271 // Because of the way the flowgraph is connected, the liveness info for this one instruction
272 // after the call is not (can not be) correct in cases where a variable has a last use in the
273 // handler. So turn off GC reporting for this single instruction.
274 getEmitter()->emitDisableGC();
275 #endif // JIT32_GCENCODER
277 // Now go to where the finally funclet needs to return to.
278 if (block->bbNext->bbJumpDest == block->bbNext->bbNext)
281 // TODO-XArch-CQ: Can we get rid of this instruction, and just have the call return directly
282 // to the next instruction? This would depend on stack walking from within the finally
283 // handler working without this instruction being in this special EH region.
288 inst_JMP(EJ_jmp, block->bbNext->bbJumpDest);
291 #ifndef JIT32_GCENCODER
292 getEmitter()->emitEnableGC();
293 #endif // JIT32_GCENCODER
296 #else // !FEATURE_EH_FUNCLETS
298 // If we are about to invoke a finally locally from a try block, we have to set the ShadowSP slot
299 // corresponding to the finally's nesting level. When invoked in response to an exception, the
302 // We have a BBJ_CALLFINALLY followed by a BBJ_ALWAYS.
305 // mov [ebp - (n + 1)], 0
306 // mov [ebp - n ], 0xFC
316 noway_assert(isFramePointerUsed());
318 // Get the nesting level which contains the finally
319 unsigned finallyNesting = 0;
320 compiler->fgGetNestingLevel(block, &finallyNesting);
322 // The last slot is reserved for ICodeManager::FixContext(ppEndRegion)
323 unsigned filterEndOffsetSlotOffs;
324 filterEndOffsetSlotOffs = (unsigned)(compiler->lvaLclSize(compiler->lvaShadowSPslotsVar) - TARGET_POINTER_SIZE);
326 unsigned curNestingSlotOffs;
327 curNestingSlotOffs = (unsigned)(filterEndOffsetSlotOffs - ((finallyNesting + 1) * TARGET_POINTER_SIZE));
329 // Zero out the slot for the next nesting level
330 instGen_Store_Imm_Into_Lcl(TYP_I_IMPL, EA_PTRSIZE, 0, compiler->lvaShadowSPslotsVar,
331 curNestingSlotOffs - TARGET_POINTER_SIZE);
332 instGen_Store_Imm_Into_Lcl(TYP_I_IMPL, EA_PTRSIZE, LCL_FINALLY_MARK, compiler->lvaShadowSPslotsVar,
335 // Now push the address where the finally funclet should return to directly.
336 if (!(block->bbFlags & BBF_RETLESS_CALL))
338 assert(block->isBBCallAlwaysPair());
339 getEmitter()->emitIns_J(INS_push_hide, block->bbNext->bbJumpDest);
343 // EE expects a DWORD, so we give him 0
344 inst_IV(INS_push_hide, 0);
347 // Jump to the finally BB
348 inst_JMP(EJ_jmp, block->bbJumpDest);
350 #endif // !FEATURE_EH_FUNCLETS
352 // The BBJ_ALWAYS is used because the BBJ_CALLFINALLY can't point to the
353 // jump target using bbJumpDest - that is already used to point
354 // to the finally block. So just skip past the BBJ_ALWAYS unless the
356 if (!(block->bbFlags & BBF_RETLESS_CALL))
358 assert(block->isBBCallAlwaysPair());
359 block = block->bbNext;
364 #if FEATURE_EH_FUNCLETS
365 void CodeGen::genEHCatchRet(BasicBlock* block)
367 // Set RAX to the address the VM should return to after the catch.
368 // Generate a RIP-relative
369 // lea reg, [rip + disp32] ; the RIP is implicit
370 // which will be position-indepenent.
371 getEmitter()->emitIns_R_L(INS_lea, EA_PTR_DSP_RELOC, block->bbJumpDest, REG_INTRET);
374 #else // !FEATURE_EH_FUNCLETS
376 void CodeGen::genEHFinallyOrFilterRet(BasicBlock* block)
378 // The last statement of the block must be a GT_RETFILT, which has already been generated.
379 assert(block->lastNode() != nullptr);
380 assert(block->lastNode()->OperGet() == GT_RETFILT);
382 if (block->bbJumpKind == BBJ_EHFINALLYRET)
384 assert(block->lastNode()->gtOp.gtOp1 == nullptr); // op1 == nullptr means endfinally
386 // Return using a pop-jmp sequence. As the "try" block calls
387 // the finally with a jmp, this leaves the x86 call-ret stack
388 // balanced in the normal flow of path.
390 noway_assert(isFramePointerRequired());
391 inst_RV(INS_pop_hide, REG_EAX, TYP_I_IMPL);
392 inst_RV(INS_i_jmp, REG_EAX, TYP_I_IMPL);
396 assert(block->bbJumpKind == BBJ_EHFILTERRET);
398 // The return value has already been computed.
403 #endif // !FEATURE_EH_FUNCLETS
405 // Move an immediate value into an integer register
407 void CodeGen::instGen_Set_Reg_To_Imm(emitAttr size, regNumber reg, ssize_t imm, insFlags flags)
409 // reg cannot be a FP register
410 assert(!genIsValidFloatReg(reg));
412 if (!compiler->opts.compReloc)
414 size = EA_SIZE(size); // Strip any Reloc flags from size if we aren't doing relocs
417 if ((imm == 0) && !EA_IS_RELOC(size))
419 instGen_Set_Reg_To_Zero(size, reg, flags);
423 if (genDataIndirAddrCanBeEncodedAsPCRelOffset(imm))
425 getEmitter()->emitIns_R_AI(INS_lea, EA_PTR_DSP_RELOC, reg, imm);
429 getEmitter()->emitIns_R_I(INS_mov, size, reg, imm);
432 regTracker.rsTrackRegIntCns(reg, imm);
435 /***********************************************************************************
437 * Generate code to set a register 'targetReg' of type 'targetType' to the constant
438 * specified by the constant (GT_CNS_INT or GT_CNS_DBL) in 'tree'. This does not call
439 * genProduceReg() on the target register.
441 void CodeGen::genSetRegToConst(regNumber targetReg, var_types targetType, GenTreePtr tree)
444 switch (tree->gtOper)
448 // relocatable values tend to come down as a CNS_INT of native int type
449 // so the line between these two opcodes is kind of blurry
450 GenTreeIntConCommon* con = tree->AsIntConCommon();
451 ssize_t cnsVal = con->IconValue();
453 if (con->ImmedValNeedsReloc(compiler))
455 instGen_Set_Reg_To_Imm(EA_HANDLE_CNS_RELOC, targetReg, cnsVal);
456 regTracker.rsTrackRegTrash(targetReg);
460 genSetRegToIcon(targetReg, cnsVal, targetType);
467 double constValue = tree->gtDblCon.gtDconVal;
469 // Make sure we use "xorpd reg, reg" only for +ve zero constant (0.0) and not for -ve zero (-0.0)
470 if (*(__int64*)&constValue == 0)
472 // A faster/smaller way to generate 0
473 instruction ins = genGetInsForOper(GT_XOR, targetType);
474 inst_RV_RV(ins, targetReg, targetReg, targetType);
479 if (targetType == TYP_FLOAT)
481 float f = forceCastToFloat(constValue);
482 cns = genMakeConst(&f, targetType, tree, false);
486 cns = genMakeConst(&constValue, targetType, tree, true);
489 inst_RV_TT(ins_Load(targetType), targetReg, cns);
499 //------------------------------------------------------------------------
500 // genCodeForNegNot: Produce code for a GT_NEG/GT_NOT node.
505 void CodeGen::genCodeForNegNot(GenTree* tree)
507 assert(tree->OperIs(GT_NEG, GT_NOT));
509 regNumber targetReg = tree->gtRegNum;
510 var_types targetType = tree->TypeGet();
512 if (varTypeIsFloating(targetType))
514 assert(tree->gtOper == GT_NEG);
515 genSSE2BitwiseOp(tree);
519 GenTreePtr operand = tree->gtGetOp1();
520 assert(operand->isUsedFromReg());
521 regNumber operandReg = genConsumeReg(operand);
523 if (operandReg != targetReg)
525 inst_RV_RV(INS_mov, targetReg, operandReg, targetType);
528 instruction ins = genGetInsForOper(tree->OperGet(), targetType);
529 inst_RV(ins, targetReg, targetType);
535 // Generate code to get the high N bits of a N*N=2N bit multiplication result
536 void CodeGen::genCodeForMulHi(GenTreeOp* treeNode)
538 if (treeNode->OperGet() == GT_MULHI)
540 assert(!(treeNode->gtFlags & GTF_UNSIGNED));
542 assert(!treeNode->gtOverflowEx());
544 regNumber targetReg = treeNode->gtRegNum;
545 var_types targetType = treeNode->TypeGet();
546 emitter* emit = getEmitter();
547 emitAttr size = emitTypeSize(treeNode);
548 GenTree* op1 = treeNode->gtOp.gtOp1;
549 GenTree* op2 = treeNode->gtOp.gtOp2;
551 // to get the high bits of the multiply, we are constrained to using the
552 // 1-op form: RDX:RAX = RAX * rm
553 // The 3-op form (Rx=Ry*Rz) does not support it.
555 genConsumeOperands(treeNode->AsOp());
557 GenTree* regOp = op1;
560 // Set rmOp to the memory operand (if any)
561 if (op1->isUsedFromMemory() || (op2->isUsedFromReg() && (op2->gtRegNum == REG_RAX)))
566 assert(regOp->isUsedFromReg());
568 // Setup targetReg when neither of the source operands was a matching register
569 if (regOp->gtRegNum != REG_RAX)
571 inst_RV_RV(ins_Copy(targetType), REG_RAX, regOp->gtRegNum, targetType);
575 if ((treeNode->gtFlags & GTF_UNSIGNED) == 0)
583 emit->emitInsBinary(ins, size, treeNode, rmOp);
585 // Move the result to the desired register, if necessary
586 if (treeNode->OperGet() == GT_MULHI && targetReg != REG_RDX)
588 inst_RV_RV(INS_mov, targetReg, REG_RDX, targetType);
591 genProduceReg(treeNode);
595 //------------------------------------------------------------------------
596 // genCodeForLongUMod: Generate code for a tree of the form
597 // `(umod (gt_long x y) (const int))`
600 // node - the node for which to generate code
602 void CodeGen::genCodeForLongUMod(GenTreeOp* node)
604 assert(node != nullptr);
605 assert(node->OperGet() == GT_UMOD);
606 assert(node->TypeGet() == TYP_INT);
608 GenTreeOp* const dividend = node->gtOp1->AsOp();
609 assert(dividend->OperGet() == GT_LONG);
610 assert(varTypeIsLong(dividend));
612 genConsumeOperands(node);
614 GenTree* const dividendLo = dividend->gtOp1;
615 GenTree* const dividendHi = dividend->gtOp2;
616 assert(dividendLo->isUsedFromReg());
617 assert(dividendHi->isUsedFromReg());
619 GenTree* const divisor = node->gtOp2;
620 assert(divisor->gtSkipReloadOrCopy()->OperGet() == GT_CNS_INT);
621 assert(divisor->gtSkipReloadOrCopy()->isUsedFromReg());
622 assert(divisor->gtSkipReloadOrCopy()->AsIntCon()->gtIconVal >= 2);
623 assert(divisor->gtSkipReloadOrCopy()->AsIntCon()->gtIconVal <= 0x3fffffff);
625 // dividendLo must be in RAX; dividendHi must be in RDX
626 genCopyRegIfNeeded(dividendLo, REG_EAX);
627 genCopyRegIfNeeded(dividendHi, REG_EDX);
629 // At this point, EAX:EDX contains the 64bit dividend and op2->gtRegNum
630 // contains the 32bit divisor. We want to generate the following code:
632 // cmp edx, divisor->gtRegNum
638 // div divisor->gtRegNum
642 // div divisor->gtRegNum
644 // This works because (a * 2^32 + b) % c = ((a % c) * 2^32 + b) % c.
646 BasicBlock* const noOverflow = genCreateTempLabel();
648 // cmp edx, divisor->gtRegNum
650 inst_RV_RV(INS_cmp, REG_EDX, divisor->gtRegNum);
651 inst_JMP(EJ_jb, noOverflow);
656 // div divisor->gtRegNum
658 const regNumber tempReg = node->GetSingleTempReg();
659 inst_RV_RV(INS_mov, tempReg, REG_EAX, TYP_INT);
660 inst_RV_RV(INS_mov, REG_EAX, REG_EDX, TYP_INT);
661 instGen_Set_Reg_To_Zero(EA_PTRSIZE, REG_EDX);
662 inst_RV(INS_div, divisor->gtRegNum, TYP_INT);
663 inst_RV_RV(INS_mov, REG_EAX, tempReg, TYP_INT);
666 // div divisor->gtRegNum
667 genDefineTempLabel(noOverflow);
668 inst_RV(INS_div, divisor->gtRegNum, TYP_INT);
670 const regNumber targetReg = node->gtRegNum;
671 if (targetReg != REG_EDX)
673 inst_RV_RV(INS_mov, targetReg, REG_RDX, TYP_INT);
677 #endif // _TARGET_X86_
679 //------------------------------------------------------------------------
680 // genCodeForDivMod: Generate code for a DIV or MOD operation.
683 // treeNode - the node to generate the code for
685 void CodeGen::genCodeForDivMod(GenTreeOp* treeNode)
687 assert(treeNode->OperIs(GT_DIV, GT_UDIV, GT_MOD, GT_UMOD));
689 // We shouldn't be seeing GT_MOD on float/double args as it should get morphed into a
690 // helper call by front-end. Similarly we shouldn't be seeing GT_UDIV and GT_UMOD
691 // on float/double args.
692 assert(treeNode->OperIs(GT_DIV) || !varTypeIsFloating(treeNode));
694 GenTree* dividend = treeNode->gtOp1;
697 if (varTypeIsLong(dividend->TypeGet()))
699 genCodeForLongUMod(treeNode);
702 #endif // _TARGET_X86_
704 GenTree* divisor = treeNode->gtOp2;
705 genTreeOps oper = treeNode->OperGet();
706 emitAttr size = emitTypeSize(treeNode);
707 regNumber targetReg = treeNode->gtRegNum;
708 var_types targetType = treeNode->TypeGet();
709 emitter* emit = getEmitter();
711 // dividend is in a register.
712 assert(dividend->isUsedFromReg());
714 genConsumeOperands(treeNode->AsOp());
715 if (varTypeIsFloating(targetType))
717 // Check that divisor is a valid operand.
718 // Note that a reg optional operand is a treated as a memory op
719 // if no register is allocated to it.
720 assert(divisor->isUsedFromReg() || divisor->isMemoryOp() || divisor->IsCnsFltOrDbl() ||
721 divisor->IsRegOptional());
723 // Floating point div/rem operation
724 assert(oper == GT_DIV || oper == GT_MOD);
726 if (dividend->gtRegNum == targetReg)
728 emit->emitInsBinary(genGetInsForOper(treeNode->gtOper, targetType), size, treeNode, divisor);
730 else if (divisor->isUsedFromReg() && divisor->gtRegNum == targetReg)
732 // It is not possible to generate 2-operand divss or divsd where reg2 = reg1 / reg2
733 // because divss/divsd reg1, reg2 will over-write reg1. Therefore, in case of AMD64
734 // LSRA has to make sure that such a register assignment is not generated for floating
735 // point div/rem operations.
737 !"GT_DIV/GT_MOD (float): case of reg2 = reg1 / reg2, LSRA should never generate such a reg assignment");
741 inst_RV_RV(ins_Copy(targetType), targetReg, dividend->gtRegNum, targetType);
742 emit->emitInsBinary(genGetInsForOper(treeNode->gtOper, targetType), size, treeNode, divisor);
747 // dividend must be in RAX
748 genCopyRegIfNeeded(dividend, REG_RAX);
750 // zero or sign extend rax to rdx
751 if (oper == GT_UMOD || oper == GT_UDIV)
753 instGen_Set_Reg_To_Zero(EA_PTRSIZE, REG_EDX);
757 emit->emitIns(INS_cdq, size);
758 // the cdq instruction writes RDX, So clear the gcInfo for RDX
759 gcInfo.gcMarkRegSetNpt(RBM_RDX);
762 // Perform the 'targetType' (64-bit or 32-bit) divide instruction
764 if (oper == GT_UMOD || oper == GT_UDIV)
773 emit->emitInsBinary(ins, size, treeNode, divisor);
775 // DIV/IDIV instructions always store the quotient in RAX and the remainder in RDX.
776 // Move the result to the desired register, if necessary
777 if (oper == GT_DIV || oper == GT_UDIV)
779 if (targetReg != REG_RAX)
781 inst_RV_RV(INS_mov, targetReg, REG_RAX, targetType);
786 assert((oper == GT_MOD) || (oper == GT_UMOD));
787 if (targetReg != REG_RDX)
789 inst_RV_RV(INS_mov, targetReg, REG_RDX, targetType);
793 genProduceReg(treeNode);
796 //------------------------------------------------------------------------
797 // genCodeForBinary: Generate code for many binary arithmetic operators
798 // This method is expected to have called genConsumeOperands() before calling it.
801 // treeNode - The binary operation for which we are generating code.
807 // Mul and div variants have special constraints on x64 so are not handled here.
808 // See teh assert below for the operators that are handled.
810 void CodeGen::genCodeForBinary(GenTree* treeNode)
812 const genTreeOps oper = treeNode->OperGet();
813 regNumber targetReg = treeNode->gtRegNum;
814 var_types targetType = treeNode->TypeGet();
815 emitter* emit = getEmitter();
817 #if defined(_TARGET_64BIT_)
818 assert(oper == GT_OR || oper == GT_XOR || oper == GT_AND || oper == GT_ADD || oper == GT_SUB);
819 #else // !defined(_TARGET_64BIT_)
820 assert(oper == GT_OR || oper == GT_XOR || oper == GT_AND || oper == GT_ADD_LO || oper == GT_ADD_HI ||
821 oper == GT_SUB_LO || oper == GT_SUB_HI || oper == GT_MUL_LONG || oper == GT_DIV_HI || oper == GT_MOD_HI ||
822 oper == GT_ADD || oper == GT_SUB);
823 #endif // !defined(_TARGET_64BIT_)
825 GenTreePtr op1 = treeNode->gtGetOp1();
826 GenTreePtr op2 = treeNode->gtGetOp2();
828 // Commutative operations can mark op1 as contained or reg-optional to generate "op reg, memop/immed"
829 if (!op1->isUsedFromReg())
831 assert(treeNode->OperIsCommutative());
832 assert(op1->isMemoryOp() || op1->IsCnsNonZeroFltOrDbl() || op1->IsIntCnsFitsInI32() || op1->IsRegOptional());
834 op1 = treeNode->gtGetOp2();
835 op2 = treeNode->gtGetOp1();
838 instruction ins = genGetInsForOper(treeNode->OperGet(), targetType);
840 // The arithmetic node must be sitting in a register (since it's not contained)
841 noway_assert(targetReg != REG_NA);
843 regNumber op1reg = op1->isUsedFromReg() ? op1->gtRegNum : REG_NA;
844 regNumber op2reg = op2->isUsedFromReg() ? op2->gtRegNum : REG_NA;
849 // This is the case of reg1 = reg1 op reg2
850 // We're ready to emit the instruction without any moves
851 if (op1reg == targetReg)
856 // We have reg1 = reg2 op reg1
857 // In order for this operation to be correct
858 // we need that op is a commutative operation so
859 // we can convert it into reg1 = reg1 op reg2 and emit
860 // the same code as above
861 else if (op2reg == targetReg)
863 noway_assert(GenTree::OperIsCommutative(oper));
867 // now we know there are 3 different operands so attempt to use LEA
868 else if (oper == GT_ADD && !varTypeIsFloating(treeNode) && !treeNode->gtOverflowEx() // LEA does not set flags
869 && (op2->isContainedIntOrIImmed() || op2->isUsedFromReg()) && !treeNode->gtSetFlags())
871 if (op2->isContainedIntOrIImmed())
873 emit->emitIns_R_AR(INS_lea, emitTypeSize(treeNode), targetReg, op1reg,
874 (int)op2->AsIntConCommon()->IconValue());
878 assert(op2reg != REG_NA);
879 emit->emitIns_R_ARX(INS_lea, emitTypeSize(treeNode), targetReg, op1reg, op2reg, 1, 0);
881 genProduceReg(treeNode);
884 // dest, op1 and op2 registers are different:
885 // reg3 = reg1 op reg2
886 // We can implement this by issuing a mov:
888 // reg3 = reg3 op reg2
891 inst_RV_RV(ins_Copy(targetType), targetReg, op1reg, targetType);
892 regTracker.rsTrackRegCopy(targetReg, op1reg);
893 gcInfo.gcMarkRegPtrVal(targetReg, targetType);
898 // try to use an inc or dec
899 if (oper == GT_ADD && !varTypeIsFloating(treeNode) && src->isContainedIntOrIImmed() && !treeNode->gtOverflowEx())
901 if (src->IsIntegralConst(1))
903 emit->emitIns_R(INS_inc, emitTypeSize(treeNode), targetReg);
904 genProduceReg(treeNode);
907 else if (src->IsIntegralConst(-1))
909 emit->emitIns_R(INS_dec, emitTypeSize(treeNode), targetReg);
910 genProduceReg(treeNode);
914 regNumber r = emit->emitInsBinary(ins, emitTypeSize(treeNode), dst, src);
915 noway_assert(r == targetReg);
917 if (treeNode->gtOverflowEx())
919 #if !defined(_TARGET_64BIT_)
920 assert(oper == GT_ADD || oper == GT_SUB || oper == GT_ADD_HI || oper == GT_SUB_HI);
922 assert(oper == GT_ADD || oper == GT_SUB);
924 genCheckOverflow(treeNode);
926 genProduceReg(treeNode);
929 //------------------------------------------------------------------------
930 // genCodeForMul: Generate code for a MUL operation.
933 // treeNode - the node to generate the code for
935 void CodeGen::genCodeForMul(GenTreeOp* treeNode)
937 assert(treeNode->OperIs(GT_MUL));
939 regNumber targetReg = treeNode->gtRegNum;
940 var_types targetType = treeNode->TypeGet();
941 emitter* emit = getEmitter();
944 emitAttr size = emitTypeSize(treeNode);
945 bool isUnsignedMultiply = ((treeNode->gtFlags & GTF_UNSIGNED) != 0);
946 bool requiresOverflowCheck = treeNode->gtOverflowEx();
948 GenTree* op1 = treeNode->gtGetOp1();
949 GenTree* op2 = treeNode->gtGetOp2();
951 // there are 3 forms of x64 multiply:
952 // 1-op form with 128 result: RDX:RAX = RAX * rm
953 // 2-op form: reg *= rm
954 // 3-op form: reg = rm * imm
956 genConsumeOperands(treeNode->AsOp());
958 // This matches the 'mul' lowering in Lowering::SetMulOpCounts()
960 // immOp :: Only one operand can be an immediate
961 // rmOp :: Only one operand can be a memory op.
962 // regOp :: A register op (especially the operand that matches 'targetReg')
963 // (can be nullptr when we have both a memory op and an immediate op)
965 GenTree* immOp = nullptr;
969 if (op2->isContainedIntOrIImmed())
973 else if (op1->isContainedIntOrIImmed())
979 if (immOp != nullptr)
981 // This must be a non-floating point operation.
982 assert(!varTypeIsFloating(treeNode));
984 // CQ: When possible use LEA for mul by imm 3, 5 or 9
985 ssize_t imm = immOp->AsIntConCommon()->IconValue();
987 if (!requiresOverflowCheck && rmOp->isUsedFromReg() && ((imm == 3) || (imm == 5) || (imm == 9)))
989 // We will use the LEA instruction to perform this multiply
990 // Note that an LEA with base=x, index=x and scale=(imm-1) computes x*imm when imm=3,5 or 9.
991 unsigned int scale = (unsigned int)(imm - 1);
992 getEmitter()->emitIns_R_ARX(INS_lea, size, targetReg, rmOp->gtRegNum, rmOp->gtRegNum, scale, 0);
996 // use the 3-op form with immediate
997 ins = getEmitter()->inst3opImulForReg(targetReg);
998 emit->emitInsBinary(ins, size, rmOp, immOp);
1001 else // we have no contained immediate operand
1006 regNumber mulTargetReg = targetReg;
1007 if (isUnsignedMultiply && requiresOverflowCheck)
1010 mulTargetReg = REG_RAX;
1014 ins = genGetInsForOper(GT_MUL, targetType);
1017 // Set rmOp to the memory operand (if any)
1018 // or set regOp to the op2 when it has the matching target register for our multiply op
1020 if (op1->isUsedFromMemory() || (op2->isUsedFromReg() && (op2->gtRegNum == mulTargetReg)))
1025 assert(regOp->isUsedFromReg());
1027 // Setup targetReg when neither of the source operands was a matching register
1028 if (regOp->gtRegNum != mulTargetReg)
1030 inst_RV_RV(ins_Copy(targetType), mulTargetReg, regOp->gtRegNum, targetType);
1033 emit->emitInsBinary(ins, size, treeNode, rmOp);
1035 // Move the result to the desired register, if necessary
1036 if ((ins == INS_mulEAX) && (targetReg != REG_RAX))
1038 inst_RV_RV(INS_mov, targetReg, REG_RAX, targetType);
1042 if (requiresOverflowCheck)
1044 // Overflow checking is only used for non-floating point types
1045 noway_assert(!varTypeIsFloating(treeNode));
1047 genCheckOverflow(treeNode);
1050 genProduceReg(treeNode);
1053 //------------------------------------------------------------------------
1054 // isStructReturn: Returns whether the 'treeNode' is returning a struct.
1057 // treeNode - The tree node to evaluate whether is a struct return.
1060 // For AMD64 *nix: returns true if the 'treeNode" is a GT_RETURN node, of type struct.
1061 // Otherwise returns false.
1062 // For other platforms always returns false.
1064 bool CodeGen::isStructReturn(GenTreePtr treeNode)
1066 // This method could be called for 'treeNode' of GT_RET_FILT or GT_RETURN.
1067 // For the GT_RET_FILT, the return is always
1068 // a bool or a void, for the end of a finally block.
1069 noway_assert(treeNode->OperGet() == GT_RETURN || treeNode->OperGet() == GT_RETFILT);
1070 if (treeNode->OperGet() != GT_RETURN)
1075 #ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
1076 return varTypeIsStruct(treeNode);
1077 #else // !FEATURE_UNIX_AMD64_STRUCT_PASSING
1078 assert(!varTypeIsStruct(treeNode));
1080 #endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
1083 //------------------------------------------------------------------------
1084 // genStructReturn: Generates code for returning a struct.
1087 // treeNode - The GT_RETURN tree node.
1093 // op1 of GT_RETURN node is either GT_LCL_VAR or multi-reg GT_CALL
1094 void CodeGen::genStructReturn(GenTreePtr treeNode)
1096 assert(treeNode->OperGet() == GT_RETURN);
1097 GenTreePtr op1 = treeNode->gtGetOp1();
1099 #ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
1100 if (op1->OperGet() == GT_LCL_VAR)
1102 GenTreeLclVarCommon* lclVar = op1->AsLclVarCommon();
1103 LclVarDsc* varDsc = &(compiler->lvaTable[lclVar->gtLclNum]);
1104 assert(varDsc->lvIsMultiRegRet);
1106 ReturnTypeDesc retTypeDesc;
1107 retTypeDesc.InitializeStructReturnType(compiler, varDsc->lvVerTypeInfo.GetClassHandle());
1108 unsigned regCount = retTypeDesc.GetReturnRegCount();
1109 assert(regCount == MAX_RET_REG_COUNT);
1111 if (varTypeIsEnregisterableStruct(op1))
1113 // Right now the only enregistrable structs supported are SIMD vector types.
1114 assert(varTypeIsSIMD(op1));
1115 assert(op1->isUsedFromReg());
1117 // This is a case of operand is in a single reg and needs to be
1118 // returned in multiple ABI return registers.
1119 regNumber opReg = genConsumeReg(op1);
1120 regNumber reg0 = retTypeDesc.GetABIReturnReg(0);
1121 regNumber reg1 = retTypeDesc.GetABIReturnReg(1);
1123 if (opReg != reg0 && opReg != reg1)
1125 // Operand reg is different from return regs.
1126 // Copy opReg to reg0 and let it to be handled by one of the
1128 inst_RV_RV(ins_Copy(TYP_DOUBLE), reg0, opReg, TYP_DOUBLE);
1134 assert(opReg != reg1);
1136 // reg0 - already has required 8-byte in bit position [63:0].
1138 // swap upper and lower 8-bytes of reg1 so that desired 8-byte is in bit position [63:0].
1139 inst_RV_RV(ins_Copy(TYP_DOUBLE), reg1, opReg, TYP_DOUBLE);
1143 assert(opReg == reg1);
1146 // swap upper and lower 8-bytes of reg1 so that desired 8-byte is in bit position [63:0].
1147 inst_RV_RV(ins_Copy(TYP_DOUBLE), reg0, opReg, TYP_DOUBLE);
1149 inst_RV_RV_IV(INS_shufpd, EA_16BYTE, reg1, reg1, 0x01);
1153 assert(op1->isUsedFromMemory());
1155 // Copy var on stack into ABI return registers
1157 for (unsigned i = 0; i < regCount; ++i)
1159 var_types type = retTypeDesc.GetReturnRegType(i);
1160 regNumber reg = retTypeDesc.GetABIReturnReg(i);
1161 getEmitter()->emitIns_R_S(ins_Load(type), emitTypeSize(type), reg, lclVar->gtLclNum, offset);
1162 offset += genTypeSize(type);
1168 assert(op1->IsMultiRegCall() || op1->IsCopyOrReloadOfMultiRegCall());
1170 genConsumeRegs(op1);
1172 GenTree* actualOp1 = op1->gtSkipReloadOrCopy();
1173 GenTreeCall* call = actualOp1->AsCall();
1174 ReturnTypeDesc* retTypeDesc = call->GetReturnTypeDesc();
1175 unsigned regCount = retTypeDesc->GetReturnRegCount();
1176 assert(regCount == MAX_RET_REG_COUNT);
1178 // Handle circular dependency between call allocated regs and ABI return regs.
1180 // It is possible under LSRA stress that originally allocated regs of call node,
1181 // say rax and rdx, are spilled and reloaded to rdx and rax respectively. But
1182 // GT_RETURN needs to move values as follows: rdx->rax, rax->rdx. Similar kind
1183 // kind of circular dependency could arise between xmm0 and xmm1 return regs.
1184 // Codegen is expected to handle such circular dependency.
1186 var_types regType0 = retTypeDesc->GetReturnRegType(0);
1187 regNumber returnReg0 = retTypeDesc->GetABIReturnReg(0);
1188 regNumber allocatedReg0 = call->GetRegNumByIdx(0);
1190 var_types regType1 = retTypeDesc->GetReturnRegType(1);
1191 regNumber returnReg1 = retTypeDesc->GetABIReturnReg(1);
1192 regNumber allocatedReg1 = call->GetRegNumByIdx(1);
1194 if (op1->IsCopyOrReload())
1196 // GT_COPY/GT_RELOAD will have valid reg for those positions
1197 // that need to be copied or reloaded.
1198 regNumber reloadReg = op1->AsCopyOrReload()->GetRegNumByIdx(0);
1199 if (reloadReg != REG_NA)
1201 allocatedReg0 = reloadReg;
1204 reloadReg = op1->AsCopyOrReload()->GetRegNumByIdx(1);
1205 if (reloadReg != REG_NA)
1207 allocatedReg1 = reloadReg;
1211 if (allocatedReg0 == returnReg1 && allocatedReg1 == returnReg0)
1213 // Circular dependency - swap allocatedReg0 and allocatedReg1
1214 if (varTypeIsFloating(regType0))
1216 assert(varTypeIsFloating(regType1));
1218 // The fastest way to swap two XMM regs is using PXOR
1219 inst_RV_RV(INS_pxor, allocatedReg0, allocatedReg1, TYP_DOUBLE);
1220 inst_RV_RV(INS_pxor, allocatedReg1, allocatedReg0, TYP_DOUBLE);
1221 inst_RV_RV(INS_pxor, allocatedReg0, allocatedReg1, TYP_DOUBLE);
1225 assert(varTypeIsIntegral(regType0));
1226 assert(varTypeIsIntegral(regType1));
1227 inst_RV_RV(INS_xchg, allocatedReg1, allocatedReg0, TYP_I_IMPL);
1230 else if (allocatedReg1 == returnReg0)
1232 // Change the order of moves to correctly handle dependency.
1233 if (allocatedReg1 != returnReg1)
1235 inst_RV_RV(ins_Copy(regType1), returnReg1, allocatedReg1, regType1);
1238 if (allocatedReg0 != returnReg0)
1240 inst_RV_RV(ins_Copy(regType0), returnReg0, allocatedReg0, regType0);
1245 // No circular dependency case.
1246 if (allocatedReg0 != returnReg0)
1248 inst_RV_RV(ins_Copy(regType0), returnReg0, allocatedReg0, regType0);
1251 if (allocatedReg1 != returnReg1)
1253 inst_RV_RV(ins_Copy(regType1), returnReg1, allocatedReg1, regType1);
1262 //------------------------------------------------------------------------
1263 // genReturn: Generates code for return statement.
1264 // In case of struct return, delegates to the genStructReturn method.
1267 // treeNode - The GT_RETURN or GT_RETFILT tree node.
1272 void CodeGen::genReturn(GenTreePtr treeNode)
1274 assert(treeNode->OperGet() == GT_RETURN || treeNode->OperGet() == GT_RETFILT);
1275 GenTreePtr op1 = treeNode->gtGetOp1();
1276 var_types targetType = treeNode->TypeGet();
1278 // A void GT_RETFILT is the end of a finally. For non-void filter returns we need to load the result in the return
1279 // register, if it's not already there. The processing is the same as GT_RETURN. For filters, the IL spec says the
1280 // result is type int32. Further, the only legal values are 0 or 1; the use of other values is "undefined".
1281 assert(!treeNode->OperIs(GT_RETFILT) || (targetType == TYP_VOID) || (targetType == TYP_INT));
1284 if (targetType == TYP_VOID)
1286 assert(op1 == nullptr);
1291 if (treeNode->TypeGet() == TYP_LONG)
1293 assert(op1 != nullptr);
1294 noway_assert(op1->OperGet() == GT_LONG);
1295 GenTree* loRetVal = op1->gtGetOp1();
1296 GenTree* hiRetVal = op1->gtGetOp2();
1297 noway_assert((loRetVal->gtRegNum != REG_NA) && (hiRetVal->gtRegNum != REG_NA));
1299 genConsumeReg(loRetVal);
1300 genConsumeReg(hiRetVal);
1301 if (loRetVal->gtRegNum != REG_LNGRET_LO)
1303 inst_RV_RV(ins_Copy(targetType), REG_LNGRET_LO, loRetVal->gtRegNum, TYP_INT);
1305 if (hiRetVal->gtRegNum != REG_LNGRET_HI)
1307 inst_RV_RV(ins_Copy(targetType), REG_LNGRET_HI, hiRetVal->gtRegNum, TYP_INT);
1311 #endif // !defined(_TARGET_X86_)
1313 if (isStructReturn(treeNode))
1315 genStructReturn(treeNode);
1317 else if (targetType != TYP_VOID)
1319 assert(op1 != nullptr);
1320 noway_assert(op1->gtRegNum != REG_NA);
1322 // !! NOTE !! genConsumeReg will clear op1 as GC ref after it has
1323 // consumed a reg for the operand. This is because the variable
1324 // is dead after return. But we are issuing more instructions
1325 // like "profiler leave callback" after this consumption. So
1326 // if you are issuing more instructions after this point,
1327 // remember to keep the variable live up until the new method
1328 // exit point where it is actually dead.
1331 regNumber retReg = varTypeIsFloating(treeNode) ? REG_FLOATRET : REG_INTRET;
1333 if (varTypeIsFloating(treeNode))
1335 // Spill the return value register from an XMM register to the stack, then load it on the x87 stack.
1336 // If it already has a home location, use that. Otherwise, we need a temp.
1337 if (genIsRegCandidateLocal(op1) && compiler->lvaTable[op1->gtLclVarCommon.gtLclNum].lvOnFrame)
1339 // Store local variable to its home location, if necessary.
1340 if ((op1->gtFlags & GTF_REG_VAL) != 0)
1342 op1->gtFlags &= ~GTF_REG_VAL;
1343 inst_TT_RV(ins_Store(op1->gtType,
1344 compiler->isSIMDTypeLocalAligned(op1->gtLclVarCommon.gtLclNum)),
1345 op1, op1->gtRegNum);
1347 // Now, load it to the fp stack.
1348 getEmitter()->emitIns_S(INS_fld, emitTypeSize(op1), op1->AsLclVarCommon()->gtLclNum, 0);
1352 // Spill the value, which should be in a register, then load it to the fp stack.
1353 // TODO-X86-CQ: Deal with things that are already in memory (don't call genConsumeReg yet).
1354 op1->gtFlags |= GTF_SPILL;
1355 regSet.rsSpillTree(op1->gtRegNum, op1);
1356 op1->gtFlags |= GTF_SPILLED;
1357 op1->gtFlags &= ~GTF_SPILL;
1359 TempDsc* t = regSet.rsUnspillInPlace(op1, op1->gtRegNum);
1360 inst_FS_ST(INS_fld, emitActualTypeSize(op1->gtType), t, 0);
1361 op1->gtFlags &= ~GTF_SPILLED;
1362 compiler->tmpRlsTemp(t);
1366 #endif // _TARGET_X86_
1368 if (op1->gtRegNum != retReg)
1370 inst_RV_RV(ins_Copy(targetType), retReg, op1->gtRegNum, targetType);
1376 #ifdef PROFILING_SUPPORTED
1378 // TODO-AMD64-Unix: If the profiler hook is implemented on *nix, make sure for 2 register returned structs
1379 // the RAX and RDX needs to be kept alive. Make the necessary changes in lowerxarch.cpp
1380 // in the handling of the GT_RETURN statement.
1381 // Such structs containing GC pointers need to be handled by calling gcInfo.gcMarkRegSetNpt
1382 // for the return registers containing GC refs.
1384 // There will be a single return block while generating profiler ELT callbacks.
1386 // Reason for not materializing Leave callback as a GT_PROF_HOOK node after GT_RETURN:
1387 // In flowgraph and other places assert that the last node of a block marked as
1388 // BBJ_RETURN is either a GT_RETURN or GT_JMP or a tail call. It would be nice to
1389 // maintain such an invariant irrespective of whether profiler hook needed or not.
1390 // Also, there is not much to be gained by materializing it as an explicit node.
1391 if (compiler->compCurBB == compiler->genReturnBB)
1394 // Since we are invalidating the assumption that we would slip into the epilog
1395 // right after the "return", we need to preserve the return reg's GC state
1396 // across the call until actual method return.
1397 if (varTypeIsGC(compiler->info.compRetType))
1399 gcInfo.gcMarkRegPtrVal(REG_INTRET, compiler->info.compRetType);
1402 genProfilingLeaveCallback();
1404 if (varTypeIsGC(compiler->info.compRetType))
1406 gcInfo.gcMarkRegSetNpt(REG_INTRET);
1412 //------------------------------------------------------------------------
1413 // genCodeForCompare: Produce code for a GT_EQ/GT_NE/GT_LT/GT_LE/GT_GE/GT_GT/GT_TEST_EQ/GT_TEST_NE/GT_CMP node.
1418 void CodeGen::genCodeForCompare(GenTreeOp* tree)
1420 assert(tree->OperIs(GT_EQ, GT_NE, GT_LT, GT_LE, GT_GE, GT_GT, GT_TEST_EQ, GT_TEST_NE, GT_CMP));
1422 // TODO-XArch-CQ: Check if we can use the currently set flags.
1423 // TODO-XArch-CQ: Check for the case where we can simply transfer the carry bit to a register
1424 // (signed < or >= where targetReg != REG_NA)
1426 GenTreePtr op1 = tree->gtOp1;
1427 var_types op1Type = op1->TypeGet();
1429 if (varTypeIsFloating(op1Type))
1431 genCompareFloat(tree);
1435 genCompareInt(tree);
1439 //------------------------------------------------------------------------
1440 // genCodeForJumpTrue: Generates code for jmpTrue statement.
1443 // tree - The GT_JTRUE tree node.
1448 void CodeGen::genCodeForJumpTrue(GenTreePtr tree)
1450 GenTree* cmp = tree->gtOp.gtOp1;
1452 assert(cmp->OperIsCompare());
1453 assert(compiler->compCurBB->bbJumpKind == BBJ_COND);
1455 #if !defined(_TARGET_64BIT_)
1456 // Long-typed compares should have been handled by Lowering::LowerCompare.
1457 assert(!varTypeIsLong(cmp->gtGetOp1()));
1460 // Get the "kind" and type of the comparison. Note that whether it is an unsigned cmp
1461 // is governed by a flag NOT by the inherent type of the node
1462 // TODO-XArch-CQ: Check if we can use the currently set flags.
1463 emitJumpKind jumpKind[2];
1464 bool branchToTrueLabel[2];
1465 genJumpKindsForTree(cmp, jumpKind, branchToTrueLabel);
1467 BasicBlock* skipLabel = nullptr;
1468 if (jumpKind[0] != EJ_NONE)
1470 BasicBlock* jmpTarget;
1471 if (branchToTrueLabel[0])
1473 jmpTarget = compiler->compCurBB->bbJumpDest;
1477 // This case arises only for ordered GT_EQ right now
1478 assert((cmp->gtOper == GT_EQ) && ((cmp->gtFlags & GTF_RELOP_NAN_UN) == 0));
1479 skipLabel = genCreateTempLabel();
1480 jmpTarget = skipLabel;
1483 inst_JMP(jumpKind[0], jmpTarget);
1486 if (jumpKind[1] != EJ_NONE)
1488 // the second conditional branch always has to be to the true label
1489 assert(branchToTrueLabel[1]);
1490 inst_JMP(jumpKind[1], compiler->compCurBB->bbJumpDest);
1493 if (skipLabel != nullptr)
1495 genDefineTempLabel(skipLabel);
1499 //------------------------------------------------------------------------
1500 // genCodeForJcc: Produce code for a GT_JCC node.
1505 void CodeGen::genCodeForJcc(GenTreeCC* tree)
1507 assert(compiler->compCurBB->bbJumpKind == BBJ_COND);
1509 CompareKind compareKind = ((tree->gtFlags & GTF_UNSIGNED) != 0) ? CK_UNSIGNED : CK_SIGNED;
1510 emitJumpKind jumpKind = genJumpKindForOper(tree->gtCondition, compareKind);
1512 inst_JMP(jumpKind, compiler->compCurBB->bbJumpDest);
1515 //------------------------------------------------------------------------
1516 // genCodeForSetcc: Generates a setcc instruction for a GT_SETCC node.
1519 // tree - the GT_SETCC node
1522 // The condition represents an integer comparison. This code doesn't
1523 // have the necessary logic to deal with floating point comparisons,
1524 // in fact it doesn't even know if the comparison is integer or floating
1525 // point because SETCC nodes do not have any operands.
1528 void CodeGen::genCodeForSetcc(GenTreeCC* setcc)
1530 regNumber dstReg = setcc->gtRegNum;
1531 CompareKind compareKind = setcc->IsUnsigned() ? CK_UNSIGNED : CK_SIGNED;
1532 emitJumpKind jumpKind = genJumpKindForOper(setcc->gtCondition, compareKind);
1534 assert(genIsValidIntReg(dstReg) && isByteReg(dstReg));
1535 // Make sure nobody is setting GTF_RELOP_NAN_UN on this node as it is ignored.
1536 assert((setcc->gtFlags & GTF_RELOP_NAN_UN) == 0);
1538 inst_SET(jumpKind, dstReg);
1539 inst_RV_RV(ins_Move_Extend(TYP_UBYTE, true), dstReg, dstReg, TYP_UBYTE, emitTypeSize(TYP_UBYTE));
1540 genProduceReg(setcc);
1543 //------------------------------------------------------------------------
1544 // genCodeForReturnTrap: Produce code for a GT_RETURNTRAP node.
1547 // tree - the GT_RETURNTRAP node
1549 void CodeGen::genCodeForReturnTrap(GenTreeOp* tree)
1551 assert(tree->OperGet() == GT_RETURNTRAP);
1553 // this is nothing but a conditional call to CORINFO_HELP_STOP_FOR_GC
1554 // based on the contents of 'data'
1556 GenTree* data = tree->gtOp1;
1557 genConsumeRegs(data);
1558 GenTreeIntCon cns = intForm(TYP_INT, 0);
1559 getEmitter()->emitInsBinary(INS_cmp, emitTypeSize(TYP_INT), data, &cns);
1561 BasicBlock* skipLabel = genCreateTempLabel();
1563 emitJumpKind jmpEqual = genJumpKindForOper(GT_EQ, CK_SIGNED);
1564 inst_JMP(jmpEqual, skipLabel);
1566 // emit the call to the EE-helper that stops for GC (or other reasons)
1567 regNumber tmpReg = tree->GetSingleTempReg();
1568 assert(genIsValidIntReg(tmpReg));
1570 genEmitHelperCall(CORINFO_HELP_STOP_FOR_GC, 0, EA_UNKNOWN, tmpReg);
1571 genDefineTempLabel(skipLabel);
1574 /*****************************************************************************
1576 * Generate code for a single node in the tree.
1577 * Preconditions: All operands have been evaluated
1580 void CodeGen::genCodeForTreeNode(GenTreePtr treeNode)
1582 regNumber targetReg;
1583 #if !defined(_TARGET_64BIT_)
1584 if (treeNode->TypeGet() == TYP_LONG)
1586 // All long enregistered nodes will have been decomposed into their
1587 // constituent lo and hi nodes.
1591 #endif // !defined(_TARGET_64BIT_)
1593 targetReg = treeNode->gtRegNum;
1595 var_types targetType = treeNode->TypeGet();
1596 emitter* emit = getEmitter();
1599 // Validate that all the operands for the current node are consumed in order.
1600 // This is important because LSRA ensures that any necessary copies will be
1601 // handled correctly.
1602 lastConsumedNode = nullptr;
1603 if (compiler->verbose)
1605 unsigned seqNum = treeNode->gtSeqNum; // Useful for setting a conditional break in Visual Studio
1606 compiler->gtDispLIRNode(treeNode, "Generating: ");
1610 // Is this a node whose value is already in a register? LSRA denotes this by
1611 // setting the GTF_REUSE_REG_VAL flag.
1612 if (treeNode->IsReuseRegVal())
1614 // For now, this is only used for constant nodes.
1615 assert((treeNode->OperIsConst()));
1616 JITDUMP(" TreeNode is marked ReuseReg\n");
1620 // contained nodes are part of their parents for codegen purposes
1621 // ex : immediates, most LEAs
1622 if (treeNode->isContained())
1627 switch (treeNode->gtOper)
1629 #ifndef JIT32_GCENCODER
1630 case GT_START_NONGC:
1631 getEmitter()->emitDisableGC();
1633 #endif // !defined(JIT32_GCENCODER)
1636 #ifdef PROFILING_SUPPORTED
1637 // We should be seeing this only if profiler hook is needed
1638 noway_assert(compiler->compIsProfilerHookNeeded());
1640 // Right now this node is used only for tail calls. In future if
1641 // we intend to use it for Enter or Leave hooks, add a data member
1642 // to this node indicating the kind of profiler hook. For example,
1643 // helper number can be used.
1644 genProfilingLeaveCallback(CORINFO_HELP_PROF_FCN_TAILCALL);
1645 #endif // PROFILING_SUPPORTED
1649 genLclHeap(treeNode);
1654 assert(!treeNode->IsIconHandle(GTF_ICON_TLS_HDL));
1655 #endif // _TARGET_X86_
1659 genSetRegToConst(targetReg, targetType, treeNode);
1660 genProduceReg(treeNode);
1665 genCodeForNegNot(treeNode);
1672 genCodeForDivMod(treeNode->AsOp());
1678 assert(varTypeIsIntegralOrI(treeNode));
1682 #if !defined(_TARGET_64BIT_)
1687 #endif // !defined(_TARGET_64BIT_)
1691 genConsumeOperands(treeNode->AsOp());
1692 genCodeForBinary(treeNode);
1696 genCodeForMul(treeNode->AsOp());
1704 genCodeForShift(treeNode);
1707 #if !defined(_TARGET_64BIT_)
1711 genCodeForShiftLong(treeNode);
1714 #endif // !defined(_TARGET_64BIT_)
1717 genCodeForCast(treeNode->AsOp());
1720 case GT_LCL_FLD_ADDR:
1721 case GT_LCL_VAR_ADDR:
1722 genCodeForLclAddr(treeNode);
1726 genCodeForLclFld(treeNode->AsLclFld());
1730 genCodeForLclVar(treeNode->AsLclVar());
1733 case GT_STORE_LCL_FLD:
1734 genCodeForStoreLclFld(treeNode->AsLclFld());
1737 case GT_STORE_LCL_VAR:
1738 genCodeForStoreLclVar(treeNode->AsLclVar());
1743 genReturn(treeNode);
1747 // If we are here, it is the case where there is an LEA that cannot be folded into a parent instruction.
1748 genLeaInstruction(treeNode->AsAddrMode());
1752 genCodeForIndir(treeNode->AsIndir());
1759 genCodeForMulHi(treeNode->AsOp());
1763 genIntrinsic(treeNode);
1768 genSIMDIntrinsic(treeNode->AsSIMD());
1770 #endif // FEATURE_SIMD
1773 genCkfinite(treeNode);
1785 genCodeForCompare(treeNode->AsOp());
1789 genCodeForJumpTrue(treeNode);
1793 genCodeForJcc(treeNode->AsCC());
1797 genCodeForSetcc(treeNode->AsCC());
1801 genCodeForReturnTrap(treeNode->AsOp());
1805 genCodeForStoreInd(treeNode->AsStoreInd());
1809 // This is handled at the time we call genConsumeReg() on the GT_COPY
1819 genCodeForSwap(treeNode->AsOp());
1823 genPutArgStk(treeNode->AsPutArgStk());
1827 genPutArgReg(treeNode->AsOp());
1831 genCallInstruction(treeNode->AsCall());
1835 genJmpMethod(treeNode);
1841 genLockedInstructions(treeNode->AsOp());
1844 case GT_MEMORYBARRIER:
1845 instGen_MemoryBarrier();
1849 genCodeForCmpXchg(treeNode->AsCmpXchg());
1853 // do nothing - reload is just a marker.
1854 // The parent node will call genConsumeReg on this which will trigger the unspill of this node's child
1855 // into the register specified in this node.
1862 getEmitter()->emitIns_Nop(1);
1865 case GT_ARR_BOUNDS_CHECK:
1868 #endif // FEATURE_SIMD
1869 genRangeCheck(treeNode);
1873 genCodeForPhysReg(treeNode->AsPhysReg());
1880 genCodeForNullCheck(treeNode->AsOp());
1885 noway_assert(handlerGetsXcptnObj(compiler->compCurBB->bbCatchTyp));
1887 /* Catch arguments get passed in a register. genCodeForBBlist()
1888 would have marked it as holding a GC object, but not used. */
1890 noway_assert(gcInfo.gcRegGCrefSetCur & RBM_EXCEPTION_OBJECT);
1891 genConsumeReg(treeNode);
1894 #if !FEATURE_EH_FUNCLETS
1897 // Have to clear the ShadowSP of the nesting level which encloses the finally. Generates:
1898 // mov dword ptr [ebp-0xC], 0 // for some slot of the ShadowSP local var
1900 unsigned finallyNesting;
1901 finallyNesting = treeNode->gtVal.gtVal1;
1902 noway_assert(treeNode->gtVal.gtVal1 < compiler->compHndBBtabCount);
1903 noway_assert(finallyNesting < compiler->compHndBBtabCount);
1905 // The last slot is reserved for ICodeManager::FixContext(ppEndRegion)
1906 unsigned filterEndOffsetSlotOffs;
1907 PREFIX_ASSUME(compiler->lvaLclSize(compiler->lvaShadowSPslotsVar) >
1908 TARGET_POINTER_SIZE); // below doesn't underflow.
1909 filterEndOffsetSlotOffs =
1910 (unsigned)(compiler->lvaLclSize(compiler->lvaShadowSPslotsVar) - TARGET_POINTER_SIZE);
1912 unsigned curNestingSlotOffs;
1913 curNestingSlotOffs = filterEndOffsetSlotOffs - ((finallyNesting + 1) * TARGET_POINTER_SIZE);
1914 instGen_Store_Imm_Into_Lcl(TYP_I_IMPL, EA_PTRSIZE, 0, compiler->lvaShadowSPslotsVar, curNestingSlotOffs);
1916 #endif // !FEATURE_EH_FUNCLETS
1918 case GT_PINVOKE_PROLOG:
1919 noway_assert(((gcInfo.gcRegGCrefSetCur | gcInfo.gcRegByrefSetCur) & ~fullIntArgRegMask()) == 0);
1921 // the runtime side requires the codegen here to be consistent
1922 emit->emitDisableRandomNops();
1926 genPendingCallLabel = genCreateTempLabel();
1927 treeNode->gtLabel.gtLabBB = genPendingCallLabel;
1928 emit->emitIns_R_L(INS_lea, EA_PTR_DSP_RELOC, genPendingCallLabel, treeNode->gtRegNum);
1932 case GT_STORE_DYN_BLK:
1934 genCodeForStoreBlk(treeNode->AsBlk());
1938 genJumpTable(treeNode);
1941 case GT_SWITCH_TABLE:
1942 genTableBasedSwitch(treeNode);
1946 genCodeForArrIndex(treeNode->AsArrIndex());
1950 genCodeForArrOffset(treeNode->AsArrOffs());
1953 case GT_CLS_VAR_ADDR:
1954 emit->emitIns_R_C(INS_lea, EA_PTRSIZE, targetReg, treeNode->gtClsVar.gtClsVarHnd, 0);
1955 genProduceReg(treeNode);
1958 #if !defined(_TARGET_64BIT_)
1960 assert(treeNode->isUsedFromReg());
1961 genConsumeRegs(treeNode);
1966 // Do nothing; these nodes are simply markers for debug info.
1973 _snprintf_s(message, _countof(message), _TRUNCATE, "NYI: Unimplemented node type %s\n",
1974 GenTree::NodeName(treeNode->OperGet()));
1977 assert(!"Unknown node in codegen");
1983 //----------------------------------------------------------------------------------
1984 // genMultiRegCallStoreToLocal: store multi-reg return value of a call node to a local
1987 // treeNode - Gentree of GT_STORE_LCL_VAR
1993 // The child of store is a multi-reg call node.
1994 // genProduceReg() on treeNode is made by caller of this routine.
1996 void CodeGen::genMultiRegCallStoreToLocal(GenTreePtr treeNode)
1998 assert(treeNode->OperGet() == GT_STORE_LCL_VAR);
2000 #ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
2001 // Structs of size >=9 and <=16 are returned in two return registers on x64 Unix.
2002 assert(varTypeIsStruct(treeNode));
2004 // Assumption: current x64 Unix implementation requires that a multi-reg struct
2005 // var in 'var = call' is flagged as lvIsMultiRegRet to prevent it from
2006 // being struct promoted.
2007 unsigned lclNum = treeNode->AsLclVarCommon()->gtLclNum;
2008 LclVarDsc* varDsc = &(compiler->lvaTable[lclNum]);
2009 noway_assert(varDsc->lvIsMultiRegRet);
2011 GenTree* op1 = treeNode->gtGetOp1();
2012 GenTree* actualOp1 = op1->gtSkipReloadOrCopy();
2013 GenTreeCall* call = actualOp1->AsCall();
2014 assert(call->HasMultiRegRetVal());
2016 genConsumeRegs(op1);
2018 ReturnTypeDesc* retTypeDesc = call->GetReturnTypeDesc();
2019 assert(retTypeDesc->GetReturnRegCount() == MAX_RET_REG_COUNT);
2020 unsigned regCount = retTypeDesc->GetReturnRegCount();
2022 if (treeNode->gtRegNum != REG_NA)
2024 // Right now the only enregistrable structs supported are SIMD types.
2025 assert(varTypeIsSIMD(treeNode));
2026 assert(varTypeIsFloating(retTypeDesc->GetReturnRegType(0)));
2027 assert(varTypeIsFloating(retTypeDesc->GetReturnRegType(1)));
2029 // This is a case of two 8-bytes that comprise the operand is in
2030 // two different xmm registers and needs to assembled into a single
2032 regNumber targetReg = treeNode->gtRegNum;
2033 regNumber reg0 = call->GetRegNumByIdx(0);
2034 regNumber reg1 = call->GetRegNumByIdx(1);
2036 if (op1->IsCopyOrReload())
2038 // GT_COPY/GT_RELOAD will have valid reg for those positions
2039 // that need to be copied or reloaded.
2040 regNumber reloadReg = op1->AsCopyOrReload()->GetRegNumByIdx(0);
2041 if (reloadReg != REG_NA)
2046 reloadReg = op1->AsCopyOrReload()->GetRegNumByIdx(1);
2047 if (reloadReg != REG_NA)
2053 if (targetReg != reg0 && targetReg != reg1)
2055 // Copy reg0 into targetReg and let it to be handled by one
2056 // of the cases below.
2057 inst_RV_RV(ins_Copy(TYP_DOUBLE), targetReg, reg0, TYP_DOUBLE);
2061 if (targetReg == reg0)
2063 // targeReg[63:0] = targetReg[63:0]
2064 // targetReg[127:64] = reg1[127:64]
2065 inst_RV_RV_IV(INS_shufpd, EA_16BYTE, targetReg, reg1, 0x00);
2069 assert(targetReg == reg1);
2071 // We need two shuffles to achieve this
2073 // targeReg[63:0] = targetReg[63:0]
2074 // targetReg[127:64] = reg0[63:0]
2077 // targeReg[63:0] = targetReg[127:64]
2078 // targetReg[127:64] = targetReg[63:0]
2080 // Essentially copy low 8-bytes from reg0 to high 8-bytes of targetReg
2081 // and next swap low and high 8-bytes of targetReg to have them
2082 // rearranged in the right order.
2083 inst_RV_RV_IV(INS_shufpd, EA_16BYTE, targetReg, reg0, 0x00);
2084 inst_RV_RV_IV(INS_shufpd, EA_16BYTE, targetReg, targetReg, 0x01);
2091 for (unsigned i = 0; i < regCount; ++i)
2093 var_types type = retTypeDesc->GetReturnRegType(i);
2094 regNumber reg = call->GetRegNumByIdx(i);
2095 if (op1->IsCopyOrReload())
2097 // GT_COPY/GT_RELOAD will have valid reg for those positions
2098 // that need to be copied or reloaded.
2099 regNumber reloadReg = op1->AsCopyOrReload()->GetRegNumByIdx(i);
2100 if (reloadReg != REG_NA)
2106 assert(reg != REG_NA);
2107 getEmitter()->emitIns_S_R(ins_Store(type), emitTypeSize(type), reg, lclNum, offset);
2108 offset += genTypeSize(type);
2111 varDsc->lvRegNum = REG_STK;
2113 #elif defined(_TARGET_X86_)
2114 // Longs are returned in two return registers on x86.
2115 assert(varTypeIsLong(treeNode));
2117 // Assumption: current x86 implementation requires that a multi-reg long
2118 // var in 'var = call' is flagged as lvIsMultiRegRet to prevent it from
2120 unsigned lclNum = treeNode->AsLclVarCommon()->gtLclNum;
2121 LclVarDsc* varDsc = &(compiler->lvaTable[lclNum]);
2122 noway_assert(varDsc->lvIsMultiRegRet);
2124 GenTree* op1 = treeNode->gtGetOp1();
2125 GenTree* actualOp1 = op1->gtSkipReloadOrCopy();
2126 GenTreeCall* call = actualOp1->AsCall();
2127 assert(call->HasMultiRegRetVal());
2129 genConsumeRegs(op1);
2131 ReturnTypeDesc* retTypeDesc = call->GetReturnTypeDesc();
2132 unsigned regCount = retTypeDesc->GetReturnRegCount();
2133 assert(regCount == MAX_RET_REG_COUNT);
2137 for (unsigned i = 0; i < regCount; ++i)
2139 var_types type = retTypeDesc->GetReturnRegType(i);
2140 regNumber reg = call->GetRegNumByIdx(i);
2141 if (op1->IsCopyOrReload())
2143 // GT_COPY/GT_RELOAD will have valid reg for those positions
2144 // that need to be copied or reloaded.
2145 regNumber reloadReg = op1->AsCopyOrReload()->GetRegNumByIdx(i);
2146 if (reloadReg != REG_NA)
2152 assert(reg != REG_NA);
2153 getEmitter()->emitIns_S_R(ins_Store(type), emitTypeSize(type), reg, lclNum, offset);
2154 offset += genTypeSize(type);
2157 varDsc->lvRegNum = REG_STK;
2158 #else // !FEATURE_UNIX_AMD64_STRUCT_PASSING && !_TARGET_X86_
2159 assert(!"Unreached");
2160 #endif // !FEATURE_UNIX_AMD64_STRUCT_PASSING && !_TARGET_X86_
2163 //------------------------------------------------------------------------
2164 // genLclHeap: Generate code for localloc.
2167 // tree - the localloc tree to generate.
2170 // Note that for x86, we don't track ESP movements while generating the localloc code.
2171 // The ESP tracking is used to report stack pointer-relative GC info, which is not
2172 // interesting while doing the localloc construction. Also, for functions with localloc,
2173 // we have EBP frames, and EBP-relative locals, and ESP-relative accesses only for function
2174 // call arguments. We store the ESP after the localloc is complete in the LocAllocSP
2175 // variable. This variable is implicitly reported to the VM in the GC info (its position
2176 // is defined by convention relative to other items), and is used by the GC to find the
2177 // "base" stack pointer in functions with localloc.
2179 void CodeGen::genLclHeap(GenTreePtr tree)
2181 assert(tree->OperGet() == GT_LCLHEAP);
2182 assert(compiler->compLocallocUsed);
2184 GenTreePtr size = tree->gtOp.gtOp1;
2185 noway_assert((genActualType(size->gtType) == TYP_INT) || (genActualType(size->gtType) == TYP_I_IMPL));
2187 regNumber targetReg = tree->gtRegNum;
2188 regNumber regCnt = REG_NA;
2189 var_types type = genActualType(size->gtType);
2190 emitAttr easz = emitTypeSize(type);
2191 BasicBlock* endLabel = nullptr;
2195 if (compiler->opts.compStackCheckOnRet)
2197 noway_assert(compiler->lvaReturnEspCheck != 0xCCCCCCCC &&
2198 compiler->lvaTable[compiler->lvaReturnEspCheck].lvDoNotEnregister &&
2199 compiler->lvaTable[compiler->lvaReturnEspCheck].lvOnFrame);
2200 getEmitter()->emitIns_S_R(INS_cmp, EA_PTRSIZE, REG_SPBASE, compiler->lvaReturnEspCheck, 0);
2202 BasicBlock* esp_check = genCreateTempLabel();
2203 emitJumpKind jmpEqual = genJumpKindForOper(GT_EQ, CK_SIGNED);
2204 inst_JMP(jmpEqual, esp_check);
2205 getEmitter()->emitIns(INS_BREAKPOINT);
2206 genDefineTempLabel(esp_check);
2210 noway_assert(isFramePointerUsed()); // localloc requires Frame Pointer to be established since SP changes
2211 noway_assert(genStackLevel == 0); // Can't have anything on the stack
2213 unsigned stackAdjustment = 0;
2214 BasicBlock* loop = nullptr;
2216 // compute the amount of memory to allocate to properly STACK_ALIGN.
2218 if (size->IsCnsIntOrI())
2220 // If size is a constant, then it must be contained.
2221 assert(size->isContained());
2223 // If amount is zero then return null in targetReg
2224 amount = size->gtIntCon.gtIconVal;
2227 instGen_Set_Reg_To_Zero(EA_PTRSIZE, targetReg);
2231 // 'amount' is the total number of bytes to localloc to properly STACK_ALIGN
2232 amount = AlignUp(amount, STACK_ALIGN);
2236 // The localloc requested memory size is non-constant.
2238 // Put the size value in targetReg. If it is zero, bail out by returning null in targetReg.
2239 genConsumeRegAndCopy(size, targetReg);
2240 endLabel = genCreateTempLabel();
2241 getEmitter()->emitIns_R_R(INS_test, easz, targetReg, targetReg);
2242 inst_JMP(EJ_je, endLabel);
2244 // Compute the size of the block to allocate and perform alignment.
2245 // If compInitMem=true, we can reuse targetReg as regcnt,
2246 // since we don't need any internal registers.
2247 if (compiler->info.compInitMem)
2249 assert(tree->AvailableTempRegCount() == 0);
2254 regCnt = tree->ExtractTempReg();
2255 if (regCnt != targetReg)
2257 // Above, we put the size in targetReg. Now, copy it to our new temp register if necessary.
2258 inst_RV_RV(INS_mov, regCnt, targetReg, size->TypeGet());
2262 // Round up the number of bytes to allocate to a STACK_ALIGN boundary. This is done
2266 // However, in the initialized memory case, we need the count of STACK_ALIGN-sized
2267 // elements, not a byte count, after the alignment. So instead of the "and", which
2268 // becomes unnecessary, generate a shift, e.g.:
2272 inst_RV_IV(INS_add, regCnt, STACK_ALIGN - 1, emitActualTypeSize(type));
2274 if (compiler->info.compInitMem)
2276 // Convert the count from a count of bytes to a loop count. We will loop once per
2277 // stack alignment size, so each loop will zero 4 bytes on x86 and 16 bytes on x64.
2278 // Note that we zero a single reg-size word per iteration on x86, and 2 reg-size
2279 // words per iteration on x64. We will shift off all the stack alignment bits
2280 // added above, so there is no need for an 'and' instruction.
2282 // --- shr regCnt, 2 (or 4) ---
2283 inst_RV_SH(INS_SHIFT_RIGHT_LOGICAL, EA_PTRSIZE, regCnt, STACK_ALIGN_SHIFT_ALL);
2287 // Otherwise, mask off the low bits to align the byte count.
2288 inst_RV_IV(INS_AND, regCnt, ~(STACK_ALIGN - 1), emitActualTypeSize(type));
2292 #if FEATURE_FIXED_OUT_ARGS
2293 // If we have an outgoing arg area then we must adjust the SP by popping off the
2294 // outgoing arg area. We will restore it right before we return from this method.
2296 // Localloc returns stack space that aligned to STACK_ALIGN bytes. The following
2297 // are the cases that need to be handled:
2298 // i) Method has out-going arg area.
2299 // It is guaranteed that size of out-going arg area is STACK_ALIGN'ed (see fgMorphArgs).
2300 // Therefore, we will pop off the out-going arg area from RSP before allocating the localloc space.
2301 // ii) Method has no out-going arg area.
2302 // Nothing to pop off from the stack.
2303 if (compiler->lvaOutgoingArgSpaceSize > 0)
2305 assert((compiler->lvaOutgoingArgSpaceSize % STACK_ALIGN) == 0); // This must be true for the stack to remain
2307 inst_RV_IV(INS_add, REG_SPBASE, compiler->lvaOutgoingArgSpaceSize, EA_PTRSIZE);
2308 stackAdjustment += compiler->lvaOutgoingArgSpaceSize;
2312 if (size->IsCnsIntOrI())
2314 // We should reach here only for non-zero, constant size allocations.
2316 assert((amount % STACK_ALIGN) == 0);
2317 assert((amount % REGSIZE_BYTES) == 0);
2319 // For small allocations we will generate up to six push 0 inline
2320 size_t cntRegSizedWords = amount / REGSIZE_BYTES;
2321 if (cntRegSizedWords <= 6)
2323 for (; cntRegSizedWords != 0; cntRegSizedWords--)
2325 inst_IV(INS_push_hide, 0); // push_hide means don't track the stack
2330 bool doNoInitLessThanOnePageAlloc =
2331 !compiler->info.compInitMem && (amount < compiler->eeGetPageSize()); // must be < not <=
2334 bool needRegCntRegister = true;
2335 #else // !_TARGET_X86_
2336 bool needRegCntRegister = !doNoInitLessThanOnePageAlloc;
2337 #endif // !_TARGET_X86_
2339 if (needRegCntRegister)
2341 // If compInitMem=true, we can reuse targetReg as regcnt.
2342 // Since size is a constant, regCnt is not yet initialized.
2343 assert(regCnt == REG_NA);
2344 if (compiler->info.compInitMem)
2346 assert(tree->AvailableTempRegCount() == 0);
2351 regCnt = tree->ExtractTempReg();
2355 if (doNoInitLessThanOnePageAlloc)
2357 // Since the size is less than a page, simply adjust ESP.
2358 // ESP might already be in the guard page, so we must touch it BEFORE
2359 // the alloc, not after.
2360 CLANG_FORMAT_COMMENT_ANCHOR;
2363 // For x86, we don't want to use "sub ESP" because we don't want the emitter to track the adjustment
2364 // to ESP. So do the work in the count register.
2365 // TODO-CQ: manipulate ESP directly, to share code, reduce #ifdefs, and improve CQ. This would require
2366 // creating a way to temporarily turn off the emitter's tracking of ESP, maybe marking instrDescs as "don't
2368 inst_RV_RV(INS_mov, regCnt, REG_SPBASE, TYP_I_IMPL);
2369 getEmitter()->emitIns_AR_R(INS_TEST, EA_4BYTE, REG_SPBASE, REG_SPBASE, 0);
2370 inst_RV_IV(INS_sub, regCnt, amount, EA_PTRSIZE);
2371 inst_RV_RV(INS_mov, REG_SPBASE, regCnt, TYP_I_IMPL);
2372 #else // !_TARGET_X86_
2373 getEmitter()->emitIns_AR_R(INS_TEST, EA_4BYTE, REG_SPBASE, REG_SPBASE, 0);
2374 inst_RV_IV(INS_sub, REG_SPBASE, amount, EA_PTRSIZE);
2375 #endif // !_TARGET_X86_
2380 // else, "mov regCnt, amount"
2382 if (compiler->info.compInitMem)
2384 // When initializing memory, we want 'amount' to be the loop count.
2385 assert((amount % STACK_ALIGN) == 0);
2386 amount /= STACK_ALIGN;
2389 genSetRegToIcon(regCnt, amount, ((int)amount == amount) ? TYP_INT : TYP_LONG);
2392 loop = genCreateTempLabel();
2393 if (compiler->info.compInitMem)
2395 // At this point 'regCnt' is set to the number of loop iterations for this loop, if each
2396 // iteration zeros (and subtracts from the stack pointer) STACK_ALIGN bytes.
2397 // Since we have to zero out the allocated memory AND ensure that RSP is always valid
2398 // by tickling the pages, we will just push 0's on the stack.
2400 assert(genIsValidIntReg(regCnt));
2403 genDefineTempLabel(loop);
2405 static_assert_no_msg((STACK_ALIGN % REGSIZE_BYTES) == 0);
2406 unsigned const count = (STACK_ALIGN / REGSIZE_BYTES);
2408 for (unsigned i = 0; i < count; i++)
2410 inst_IV(INS_push_hide, 0); // --- push REG_SIZE bytes of 0
2412 // Note that the stack must always be aligned to STACK_ALIGN bytes
2414 // Decrement the loop counter and loop if not done.
2415 inst_RV(INS_dec, regCnt, TYP_I_IMPL);
2416 inst_JMP(EJ_jne, loop);
2420 // At this point 'regCnt' is set to the total number of bytes to localloc.
2422 // We don't need to zero out the allocated memory. However, we do have
2423 // to tickle the pages to ensure that ESP is always valid and is
2424 // in sync with the "stack guard page". Note that in the worst
2425 // case ESP is on the last byte of the guard page. Thus you must
2426 // touch ESP+0 first not ESP+x01000.
2428 // Another subtlety is that you don't want ESP to be exactly on the
2429 // boundary of the guard page because PUSH is predecrement, thus
2430 // call setup would not touch the guard page but just beyond it
2432 // Note that we go through a few hoops so that ESP never points to
2433 // illegal pages at any time during the tickling process
2436 // add REGCNT, ESP // reg now holds ultimate ESP
2437 // jb loop // result is smaller than orignial ESP (no wrap around)
2438 // xor REGCNT, REGCNT, // Overflow, pick lowest possible number
2440 // test ESP, [ESP+0] // tickle the page
2442 // sub REGTMP, PAGE_SIZE
2449 inst_RV(INS_NEG, regCnt, TYP_I_IMPL);
2450 inst_RV_RV(INS_add, regCnt, REG_SPBASE, TYP_I_IMPL);
2451 inst_JMP(EJ_jb, loop);
2453 instGen_Set_Reg_To_Zero(EA_PTRSIZE, regCnt);
2455 genDefineTempLabel(loop);
2457 // Tickle the decremented value, and move back to ESP,
2458 // note that it has to be done BEFORE the update of ESP since
2459 // ESP might already be on the guard page. It is OK to leave
2460 // the final value of ESP on the guard page
2461 getEmitter()->emitIns_AR_R(INS_TEST, EA_4BYTE, REG_SPBASE, REG_SPBASE, 0);
2463 // This is a harmless trick to avoid the emitter trying to track the
2464 // decrement of the ESP - we do the subtraction in another reg instead
2465 // of adjusting ESP directly.
2466 regNumber regTmp = tree->GetSingleTempReg();
2468 inst_RV_RV(INS_mov, regTmp, REG_SPBASE, TYP_I_IMPL);
2469 inst_RV_IV(INS_sub, regTmp, compiler->eeGetPageSize(), EA_PTRSIZE);
2470 inst_RV_RV(INS_mov, REG_SPBASE, regTmp, TYP_I_IMPL);
2472 inst_RV_RV(INS_cmp, REG_SPBASE, regCnt, TYP_I_IMPL);
2473 inst_JMP(EJ_jae, loop);
2475 // Move the final value to ESP
2476 inst_RV_RV(INS_mov, REG_SPBASE, regCnt);
2480 // Re-adjust SP to allocate out-going arg area
2481 if (stackAdjustment > 0)
2483 assert((stackAdjustment % STACK_ALIGN) == 0); // This must be true for the stack to remain aligned
2484 inst_RV_IV(INS_sub, REG_SPBASE, stackAdjustment, EA_PTRSIZE);
2487 // Return the stackalloc'ed address in result register.
2488 // TargetReg = RSP + stackAdjustment.
2489 getEmitter()->emitIns_R_AR(INS_lea, EA_PTRSIZE, targetReg, REG_SPBASE, stackAdjustment);
2491 if (endLabel != nullptr)
2493 genDefineTempLabel(endLabel);
2498 // Write the lvaLocAllocSPvar stack frame slot
2499 if (compiler->lvaLocAllocSPvar != BAD_VAR_NUM)
2501 getEmitter()->emitIns_S_R(ins_Store(TYP_I_IMPL), EA_PTRSIZE, REG_SPBASE, compiler->lvaLocAllocSPvar, 0);
2505 if (compiler->opts.compNeedStackProbes)
2507 genGenerateStackProbe();
2513 if (compiler->opts.compStackCheckOnRet)
2515 noway_assert(compiler->lvaReturnEspCheck != 0xCCCCCCCC &&
2516 compiler->lvaTable[compiler->lvaReturnEspCheck].lvDoNotEnregister &&
2517 compiler->lvaTable[compiler->lvaReturnEspCheck].lvOnFrame);
2518 getEmitter()->emitIns_S_R(ins_Store(TYP_I_IMPL), EA_PTRSIZE, REG_SPBASE, compiler->lvaReturnEspCheck, 0);
2522 genProduceReg(tree);
2525 void CodeGen::genCodeForStoreBlk(GenTreeBlk* storeBlkNode)
2527 assert(storeBlkNode->OperIs(GT_STORE_OBJ, GT_STORE_DYN_BLK, GT_STORE_BLK));
2529 if (storeBlkNode->OperIs(GT_STORE_OBJ) && storeBlkNode->OperIsCopyBlkOp() && !storeBlkNode->gtBlkOpGcUnsafe)
2531 assert(storeBlkNode->AsObj()->gtGcPtrCount != 0);
2532 genCodeForCpObj(storeBlkNode->AsObj());
2536 #ifdef JIT32_GCENCODER
2537 assert(!storeBlkNode->gtBlkOpGcUnsafe);
2539 if (storeBlkNode->gtBlkOpGcUnsafe)
2541 getEmitter()->emitDisableGC();
2543 #endif // JIT32_GCENCODER
2545 bool isCopyBlk = storeBlkNode->OperIsCopyBlkOp();
2547 switch (storeBlkNode->gtBlkOpKind)
2549 #ifdef _TARGET_AMD64_
2550 case GenTreeBlk::BlkOpKindHelper:
2553 genCodeForCpBlk(storeBlkNode);
2557 genCodeForInitBlk(storeBlkNode);
2560 #endif // _TARGET_AMD64_
2561 case GenTreeBlk::BlkOpKindRepInstr:
2564 genCodeForCpBlkRepMovs(storeBlkNode);
2568 genCodeForInitBlkRepStos(storeBlkNode);
2571 case GenTreeBlk::BlkOpKindUnroll:
2574 genCodeForCpBlkUnroll(storeBlkNode);
2578 genCodeForInitBlkUnroll(storeBlkNode);
2585 #ifndef JIT32_GCENCODER
2586 if (storeBlkNode->gtBlkOpGcUnsafe)
2588 getEmitter()->emitEnableGC();
2590 #endif // !defined(JIT32_GCENCODER)
2594 //------------------------------------------------------------------------
2595 // genCodeForInitBlkRepStos: Generate code for InitBlk using rep stos.
2598 // initBlkNode - The Block store for which we are generating code.
2602 // The size of the buffers must be a constant and also less than INITBLK_STOS_LIMIT bytes.
2603 // Any value larger than that, we'll use the helper even if both the fill byte and the
2604 // size are integer constants.
2606 // The size must either be a non-constant or less than INITBLK_STOS_LIMIT bytes.
2608 void CodeGen::genCodeForInitBlkRepStos(GenTreeBlk* initBlkNode)
2610 // Make sure we got the arguments of the initblk/initobj operation in the right registers.
2611 unsigned size = initBlkNode->Size();
2612 GenTreePtr dstAddr = initBlkNode->Addr();
2613 GenTreePtr initVal = initBlkNode->Data();
2614 if (initVal->OperIsInitVal())
2616 initVal = initVal->gtGetOp1();
2620 assert(dstAddr->isUsedFromReg());
2621 assert(initVal->isUsedFromReg());
2622 #ifdef _TARGET_AMD64_
2625 if (initVal->IsCnsIntOrI())
2627 #ifdef _TARGET_AMD64_
2628 assert(size > CPBLK_UNROLL_LIMIT && size < CPBLK_MOVS_LIMIT);
2630 // Note that a size of zero means a non-constant size.
2631 assert((size == 0) || (size > CPBLK_UNROLL_LIMIT));
2637 genConsumeBlockOp(initBlkNode, REG_RDI, REG_RAX, REG_RCX);
2638 instGen(INS_r_stosb);
2641 // Generate code for InitBlk by performing a loop unroll
2643 // a) Both the size and fill byte value are integer constants.
2644 // b) The size of the struct to initialize is smaller than INITBLK_UNROLL_LIMIT bytes.
2646 void CodeGen::genCodeForInitBlkUnroll(GenTreeBlk* initBlkNode)
2648 // Make sure we got the arguments of the initblk/initobj operation in the right registers
2649 unsigned size = initBlkNode->Size();
2650 GenTreePtr dstAddr = initBlkNode->Addr();
2651 GenTreePtr initVal = initBlkNode->Data();
2652 if (initVal->OperIsInitVal())
2654 initVal = initVal->gtGetOp1();
2657 assert(dstAddr->isUsedFromReg());
2658 assert(initVal->isUsedFromReg() || (initVal->IsIntegralConst(0) && ((size & 0xf) == 0)));
2660 assert(size <= INITBLK_UNROLL_LIMIT);
2661 assert(initVal->gtSkipReloadOrCopy()->IsCnsIntOrI());
2663 emitter* emit = getEmitter();
2665 genConsumeOperands(initBlkNode);
2667 // If the initVal was moved, or spilled and reloaded to a different register,
2668 // get the original initVal from below the GT_RELOAD, but only after capturing the valReg,
2669 // which needs to be the new register.
2670 regNumber valReg = initVal->gtRegNum;
2671 initVal = initVal->gtSkipReloadOrCopy();
2673 unsigned offset = 0;
2675 // Perform an unroll using SSE2 loads and stores.
2676 if (size >= XMM_REGSIZE_BYTES)
2678 regNumber tmpReg = initBlkNode->GetSingleTempReg();
2679 assert(genIsValidFloatReg(tmpReg));
2681 if (initVal->gtIntCon.gtIconVal != 0)
2683 emit->emitIns_R_R(INS_mov_i2xmm, EA_PTRSIZE, tmpReg, valReg);
2684 emit->emitIns_R_R(INS_punpckldq, EA_8BYTE, tmpReg, tmpReg);
2686 // For x86, we need one more to convert it from 8 bytes to 16 bytes.
2687 emit->emitIns_R_R(INS_punpckldq, EA_8BYTE, tmpReg, tmpReg);
2688 #endif // _TARGET_X86_
2692 emit->emitIns_R_R(INS_xorpd, EA_8BYTE, tmpReg, tmpReg);
2695 // Determine how many 16 byte slots we're going to fill using SSE movs.
2696 size_t slots = size / XMM_REGSIZE_BYTES;
2700 emit->emitIns_AR_R(INS_movdqu, EA_8BYTE, tmpReg, dstAddr->gtRegNum, offset);
2701 offset += XMM_REGSIZE_BYTES;
2705 // Fill the remainder (or a < 16 byte sized struct)
2706 if ((size & 8) != 0)
2709 // TODO-X86-CQ: [1091735] Revisit block ops codegen. One example: use movq for 8 byte movs.
2710 emit->emitIns_AR_R(INS_mov, EA_4BYTE, valReg, dstAddr->gtRegNum, offset);
2712 emit->emitIns_AR_R(INS_mov, EA_4BYTE, valReg, dstAddr->gtRegNum, offset);
2714 #else // !_TARGET_X86_
2716 emit->emitIns_AR_R(INS_mov, EA_8BYTE, valReg, dstAddr->gtRegNum, offset);
2719 #endif // !_TARGET_X86_
2721 if ((size & 4) != 0)
2723 emit->emitIns_AR_R(INS_mov, EA_4BYTE, valReg, dstAddr->gtRegNum, offset);
2726 if ((size & 2) != 0)
2728 emit->emitIns_AR_R(INS_mov, EA_2BYTE, valReg, dstAddr->gtRegNum, offset);
2731 if ((size & 1) != 0)
2733 emit->emitIns_AR_R(INS_mov, EA_1BYTE, valReg, dstAddr->gtRegNum, offset);
2737 // Generates code for InitBlk by calling the VM memset helper function.
2739 // a) The size argument of the InitBlk is not an integer constant.
2740 // b) The size argument of the InitBlk is >= INITBLK_STOS_LIMIT bytes.
2741 void CodeGen::genCodeForInitBlk(GenTreeBlk* initBlkNode)
2743 #ifdef _TARGET_AMD64_
2744 // Make sure we got the arguments of the initblk operation in the right registers
2745 unsigned blockSize = initBlkNode->Size();
2746 GenTreePtr dstAddr = initBlkNode->Addr();
2747 GenTreePtr initVal = initBlkNode->Data();
2748 if (initVal->OperIsInitVal())
2750 initVal = initVal->gtGetOp1();
2753 assert(dstAddr->isUsedFromReg());
2754 assert(initVal->isUsedFromReg());
2758 assert(blockSize >= CPBLK_MOVS_LIMIT);
2761 genConsumeBlockOp(initBlkNode, REG_ARG_0, REG_ARG_1, REG_ARG_2);
2763 genEmitHelperCall(CORINFO_HELP_MEMSET, 0, EA_UNKNOWN);
2764 #else // !_TARGET_AMD64_
2765 NYI_X86("Helper call for InitBlk");
2766 #endif // !_TARGET_AMD64_
2769 // Generate code for a load from some address + offset
2770 // baseNode: tree node which can be either a local address or arbitrary node
2771 // offset: distance from the baseNode from which to load
2772 void CodeGen::genCodeForLoadOffset(instruction ins, emitAttr size, regNumber dst, GenTree* baseNode, unsigned offset)
2774 emitter* emit = getEmitter();
2776 if (baseNode->OperIsLocalAddr())
2778 if (baseNode->gtOper == GT_LCL_FLD_ADDR)
2780 offset += baseNode->gtLclFld.gtLclOffs;
2782 emit->emitIns_R_S(ins, size, dst, baseNode->gtLclVarCommon.gtLclNum, offset);
2786 emit->emitIns_R_AR(ins, size, dst, baseNode->gtRegNum, offset);
2790 //------------------------------------------------------------------------
2791 // genCodeForStoreOffset: Generate code to store a reg to [base + offset].
2794 // ins - the instruction to generate.
2795 // size - the size that needs to be stored.
2796 // src - the register which needs to be stored.
2797 // baseNode - the base, relative to which to store the src register.
2798 // offset - the offset that is added to the baseNode to calculate the address to store into.
2800 void CodeGen::genCodeForStoreOffset(instruction ins, emitAttr size, regNumber src, GenTree* baseNode, unsigned offset)
2802 emitter* emit = getEmitter();
2804 if (baseNode->OperIsLocalAddr())
2806 if (baseNode->gtOper == GT_LCL_FLD_ADDR)
2808 offset += baseNode->gtLclFld.gtLclOffs;
2811 emit->emitIns_S_R(ins, size, src, baseNode->AsLclVarCommon()->GetLclNum(), offset);
2815 emit->emitIns_AR_R(ins, size, src, baseNode->gtRegNum, offset);
2819 // Generates CpBlk code by performing a loop unroll
2821 // The size argument of the CpBlk node is a constant and <= 64 bytes.
2822 // This may seem small but covers >95% of the cases in several framework assemblies.
2824 void CodeGen::genCodeForCpBlkUnroll(GenTreeBlk* cpBlkNode)
2826 // Make sure we got the arguments of the cpblk operation in the right registers
2827 unsigned size = cpBlkNode->Size();
2828 GenTreePtr dstAddr = cpBlkNode->Addr();
2829 GenTreePtr source = cpBlkNode->Data();
2830 GenTreePtr srcAddr = nullptr;
2831 assert(size <= CPBLK_UNROLL_LIMIT);
2833 emitter* emit = getEmitter();
2835 if (dstAddr->isUsedFromReg())
2837 genConsumeReg(dstAddr);
2840 if (source->gtOper == GT_IND)
2842 srcAddr = source->gtGetOp1();
2843 if (srcAddr->isUsedFromReg())
2845 genConsumeReg(srcAddr);
2850 noway_assert(source->IsLocal());
2851 // TODO-Cleanup: Consider making the addrForm() method in Rationalize public, e.g. in GenTree.
2852 // OR: transform source to GT_IND(GT_LCL_VAR_ADDR)
2853 if (source->OperGet() == GT_LCL_VAR)
2855 source->SetOper(GT_LCL_VAR_ADDR);
2859 assert(source->OperGet() == GT_LCL_FLD);
2860 source->SetOper(GT_LCL_FLD_ADDR);
2865 unsigned offset = 0;
2867 // If the size of this struct is larger than 16 bytes
2868 // let's use SSE2 to be able to do 16 byte at a time
2869 // loads and stores.
2871 if (size >= XMM_REGSIZE_BYTES)
2873 regNumber xmmReg = cpBlkNode->GetSingleTempReg(RBM_ALLFLOAT);
2874 assert(genIsValidFloatReg(xmmReg));
2875 size_t slots = size / XMM_REGSIZE_BYTES;
2877 // TODO: In the below code the load and store instructions are for 16 bytes, but the
2878 // type is EA_8BYTE. The movdqa/u are 16 byte instructions, so it works, but
2879 // this probably needs to be changed.
2883 genCodeForLoadOffset(INS_movdqu, EA_8BYTE, xmmReg, srcAddr, offset);
2885 genCodeForStoreOffset(INS_movdqu, EA_8BYTE, xmmReg, dstAddr, offset);
2886 offset += XMM_REGSIZE_BYTES;
2890 // Fill the remainder (15 bytes or less) if there's one.
2891 if ((size & 0xf) != 0)
2893 // Grab the integer temp register to emit the remaining loads and stores.
2894 regNumber tmpReg = cpBlkNode->GetSingleTempReg(RBM_ALLINT);
2896 if ((size & 8) != 0)
2899 // TODO-X86-CQ: [1091735] Revisit block ops codegen. One example: use movq for 8 byte movs.
2900 for (unsigned savedOffs = offset; offset < savedOffs + 8; offset += 4)
2902 genCodeForLoadOffset(INS_mov, EA_4BYTE, tmpReg, srcAddr, offset);
2903 genCodeForStoreOffset(INS_mov, EA_4BYTE, tmpReg, dstAddr, offset);
2905 #else // !_TARGET_X86_
2906 genCodeForLoadOffset(INS_mov, EA_8BYTE, tmpReg, srcAddr, offset);
2907 genCodeForStoreOffset(INS_mov, EA_8BYTE, tmpReg, dstAddr, offset);
2909 #endif // !_TARGET_X86_
2911 if ((size & 4) != 0)
2913 genCodeForLoadOffset(INS_mov, EA_4BYTE, tmpReg, srcAddr, offset);
2914 genCodeForStoreOffset(INS_mov, EA_4BYTE, tmpReg, dstAddr, offset);
2917 if ((size & 2) != 0)
2919 genCodeForLoadOffset(INS_mov, EA_2BYTE, tmpReg, srcAddr, offset);
2920 genCodeForStoreOffset(INS_mov, EA_2BYTE, tmpReg, dstAddr, offset);
2923 if ((size & 1) != 0)
2925 genCodeForLoadOffset(INS_mov, EA_1BYTE, tmpReg, srcAddr, offset);
2926 genCodeForStoreOffset(INS_mov, EA_1BYTE, tmpReg, dstAddr, offset);
2931 // Generate code for CpBlk by using rep movs
2933 // The size argument of the CpBlk is a constant and is between
2934 // CPBLK_UNROLL_LIMIT and CPBLK_MOVS_LIMIT bytes.
2935 void CodeGen::genCodeForCpBlkRepMovs(GenTreeBlk* cpBlkNode)
2937 // Make sure we got the arguments of the cpblk operation in the right registers
2938 unsigned size = cpBlkNode->Size();
2939 GenTreePtr dstAddr = cpBlkNode->Addr();
2940 GenTreePtr source = cpBlkNode->Data();
2941 GenTreePtr srcAddr = nullptr;
2944 assert(dstAddr->isUsedFromReg());
2945 assert(source->isContained());
2950 noway_assert(cpBlkNode->OperGet() == GT_STORE_DYN_BLK);
2956 assert(size > CPBLK_UNROLL_LIMIT && size < CPBLK_MOVS_LIMIT);
2958 assert(size > CPBLK_UNROLL_LIMIT);
2963 genConsumeBlockOp(cpBlkNode, REG_RDI, REG_RSI, REG_RCX);
2964 instGen(INS_r_movsb);
2967 #ifdef FEATURE_PUT_STRUCT_ARG_STK
2968 //------------------------------------------------------------------------
2969 // CodeGen::genMove8IfNeeded: Conditionally move 8 bytes of a struct to the argument area
2972 // size - The size of bytes remaining to be moved
2973 // longTmpReg - The tmp register to be used for the long value
2974 // srcAddr - The address of the source struct
2975 // offset - The current offset being copied
2978 // Returns the number of bytes moved (8 or 0).
2981 // This is used in the PutArgStkKindUnroll case, to move any bytes that are
2982 // not an even multiple of 16.
2983 // On x86, longTmpReg must be an xmm reg; on x64 it must be an integer register.
2984 // This is checked by genStoreRegToStackArg.
2986 unsigned CodeGen::genMove8IfNeeded(unsigned size, regNumber longTmpReg, GenTree* srcAddr, unsigned offset)
2989 instruction longMovIns = INS_movq;
2990 #else // !_TARGET_X86_
2991 instruction longMovIns = INS_mov;
2992 #endif // !_TARGET_X86_
2993 if ((size & 8) != 0)
2995 genCodeForLoadOffset(longMovIns, EA_8BYTE, longTmpReg, srcAddr, offset);
2996 genStoreRegToStackArg(TYP_LONG, longTmpReg, offset);
3002 //------------------------------------------------------------------------
3003 // CodeGen::genMove4IfNeeded: Conditionally move 4 bytes of a struct to the argument area
3006 // size - The size of bytes remaining to be moved
3007 // intTmpReg - The tmp register to be used for the long value
3008 // srcAddr - The address of the source struct
3009 // offset - The current offset being copied
3012 // Returns the number of bytes moved (4 or 0).
3015 // This is used in the PutArgStkKindUnroll case, to move any bytes that are
3016 // not an even multiple of 16.
3017 // intTmpReg must be an integer register.
3018 // This is checked by genStoreRegToStackArg.
3020 unsigned CodeGen::genMove4IfNeeded(unsigned size, regNumber intTmpReg, GenTree* srcAddr, unsigned offset)
3022 if ((size & 4) != 0)
3024 genCodeForLoadOffset(INS_mov, EA_4BYTE, intTmpReg, srcAddr, offset);
3025 genStoreRegToStackArg(TYP_INT, intTmpReg, offset);
3031 //------------------------------------------------------------------------
3032 // CodeGen::genMove2IfNeeded: Conditionally move 2 bytes of a struct to the argument area
3035 // size - The size of bytes remaining to be moved
3036 // intTmpReg - The tmp register to be used for the long value
3037 // srcAddr - The address of the source struct
3038 // offset - The current offset being copied
3041 // Returns the number of bytes moved (2 or 0).
3044 // This is used in the PutArgStkKindUnroll case, to move any bytes that are
3045 // not an even multiple of 16.
3046 // intTmpReg must be an integer register.
3047 // This is checked by genStoreRegToStackArg.
3049 unsigned CodeGen::genMove2IfNeeded(unsigned size, regNumber intTmpReg, GenTree* srcAddr, unsigned offset)
3051 if ((size & 2) != 0)
3053 genCodeForLoadOffset(INS_mov, EA_2BYTE, intTmpReg, srcAddr, offset);
3054 genStoreRegToStackArg(TYP_SHORT, intTmpReg, offset);
3060 //------------------------------------------------------------------------
3061 // CodeGen::genMove1IfNeeded: Conditionally move 1 byte of a struct to the argument area
3064 // size - The size of bytes remaining to be moved
3065 // intTmpReg - The tmp register to be used for the long value
3066 // srcAddr - The address of the source struct
3067 // offset - The current offset being copied
3070 // Returns the number of bytes moved (1 or 0).
3073 // This is used in the PutArgStkKindUnroll case, to move any bytes that are
3074 // not an even multiple of 16.
3075 // intTmpReg must be an integer register.
3076 // This is checked by genStoreRegToStackArg.
3078 unsigned CodeGen::genMove1IfNeeded(unsigned size, regNumber intTmpReg, GenTree* srcAddr, unsigned offset)
3080 if ((size & 1) != 0)
3082 genCodeForLoadOffset(INS_mov, EA_1BYTE, intTmpReg, srcAddr, offset);
3083 genStoreRegToStackArg(TYP_BYTE, intTmpReg, offset);
3089 //---------------------------------------------------------------------------------------------------------------//
3090 // genStructPutArgUnroll: Generates code for passing a struct arg on stack by value using loop unrolling.
3093 // putArgNode - the PutArgStk tree.
3096 // m_stkArgVarNum must be set to the base var number, relative to which the by-val struct will be copied to the
3099 // TODO-Amd64-Unix: Try to share code with copyblk.
3100 // Need refactoring of copyblk before it could be used for putarg_stk.
3101 // The difference for now is that a putarg_stk contains its children, while cpyblk does not.
3102 // This creates differences in code. After some significant refactoring it could be reused.
3104 void CodeGen::genStructPutArgUnroll(GenTreePutArgStk* putArgNode)
3106 // We will never call this method for SIMD types, which are stored directly
3107 // in genPutStructArgStk().
3108 noway_assert(putArgNode->TypeGet() == TYP_STRUCT);
3110 // Make sure we got the arguments of the cpblk operation in the right registers
3111 GenTreePtr dstAddr = putArgNode;
3112 GenTreePtr src = putArgNode->gtOp.gtOp1;
3114 unsigned size = putArgNode->getArgSize();
3115 assert(size <= CPBLK_UNROLL_LIMIT);
3117 emitter* emit = getEmitter();
3118 unsigned putArgOffset = putArgNode->getArgOffset();
3120 assert(src->isContained());
3122 assert(src->gtOper == GT_OBJ);
3124 if (src->gtOp.gtOp1->isUsedFromReg())
3126 genConsumeReg(src->gtOp.gtOp1);
3129 unsigned offset = 0;
3131 regNumber xmmTmpReg = REG_NA;
3132 regNumber intTmpReg = REG_NA;
3133 regNumber longTmpReg = REG_NA;
3135 // On x86 we use an XMM register for both 16 and 8-byte chunks, but if it's
3136 // less than 16 bytes, we will just be using pushes
3139 xmmTmpReg = putArgNode->GetSingleTempReg(RBM_ALLFLOAT);
3140 longTmpReg = xmmTmpReg;
3142 if ((size & 0x7) != 0)
3144 intTmpReg = putArgNode->GetSingleTempReg(RBM_ALLINT);
3146 #else // !_TARGET_X86_
3147 // On x64 we use an XMM register only for 16-byte chunks.
3148 if (size >= XMM_REGSIZE_BYTES)
3150 xmmTmpReg = putArgNode->GetSingleTempReg(RBM_ALLFLOAT);
3152 if ((size & 0xf) != 0)
3154 intTmpReg = putArgNode->GetSingleTempReg(RBM_ALLINT);
3155 longTmpReg = intTmpReg;
3157 #endif // !_TARGET_X86_
3159 // If the size of this struct is larger than 16 bytes
3160 // let's use SSE2 to be able to do 16 byte at a time
3161 // loads and stores.
3162 if (size >= XMM_REGSIZE_BYTES)
3165 assert(!m_pushStkArg);
3166 #endif // _TARGET_X86_
3167 size_t slots = size / XMM_REGSIZE_BYTES;
3169 assert(putArgNode->gtGetOp1()->isContained());
3170 assert(putArgNode->gtGetOp1()->gtOp.gtOper == GT_OBJ);
3172 // TODO: In the below code the load and store instructions are for 16 bytes, but the
3173 // type is EA_8BYTE. The movdqa/u are 16 byte instructions, so it works, but
3174 // this probably needs to be changed.
3178 genCodeForLoadOffset(INS_movdqu, EA_8BYTE, xmmTmpReg, src->gtGetOp1(), offset);
3181 genStoreRegToStackArg(TYP_STRUCT, xmmTmpReg, offset);
3183 offset += XMM_REGSIZE_BYTES;
3187 // Fill the remainder (15 bytes or less) if there's one.
3188 if ((size & 0xf) != 0)
3193 // This case is currently supported only for the case where the total size is
3194 // less than XMM_REGSIZE_BYTES. We need to push the remaining chunks in reverse
3195 // order. However, morph has ensured that we have a struct that is an even
3196 // multiple of TARGET_POINTER_SIZE, so we don't need to worry about alignment.
3197 assert(((size & 0xc) == size) && (offset == 0));
3198 // If we have a 4 byte chunk, load it from either offset 0 or 8, depending on
3199 // whether we've got an 8 byte chunk, and then push it on the stack.
3200 unsigned pushedBytes = genMove4IfNeeded(size, intTmpReg, src->gtOp.gtOp1, size & 0x8);
3201 // Now if we have an 8 byte chunk, load it from offset 0 (it's the first chunk)
3202 // and push it on the stack.
3203 pushedBytes += genMove8IfNeeded(size, longTmpReg, src->gtOp.gtOp1, 0);
3206 #endif // _TARGET_X86_
3208 offset += genMove8IfNeeded(size, longTmpReg, src->gtOp.gtOp1, offset);
3209 offset += genMove4IfNeeded(size, intTmpReg, src->gtOp.gtOp1, offset);
3210 offset += genMove2IfNeeded(size, intTmpReg, src->gtOp.gtOp1, offset);
3211 offset += genMove1IfNeeded(size, intTmpReg, src->gtOp.gtOp1, offset);
3212 assert(offset == size);
3217 //------------------------------------------------------------------------
3218 // genStructPutArgRepMovs: Generates code for passing a struct arg by value on stack using Rep Movs.
3221 // putArgNode - the PutArgStk tree.
3224 // The size argument of the PutArgStk (for structs) is a constant and is between
3225 // CPBLK_UNROLL_LIMIT and CPBLK_MOVS_LIMIT bytes.
3226 // m_stkArgVarNum must be set to the base var number, relative to which the by-val struct bits will go.
3228 void CodeGen::genStructPutArgRepMovs(GenTreePutArgStk* putArgNode)
3230 assert(putArgNode->TypeGet() == TYP_STRUCT);
3231 assert(putArgNode->getArgSize() > CPBLK_UNROLL_LIMIT);
3233 // Make sure we got the arguments of the cpblk operation in the right registers
3234 GenTreePtr dstAddr = putArgNode;
3235 GenTreePtr srcAddr = putArgNode->gtGetOp1();
3238 assert(putArgNode->gtRsvdRegs == (RBM_RDI | RBM_RCX | RBM_RSI));
3239 assert(srcAddr->isContained());
3241 genConsumePutStructArgStk(putArgNode, REG_RDI, REG_RSI, REG_RCX);
3242 instGen(INS_r_movsb);
3245 //------------------------------------------------------------------------
3246 // If any Vector3 args are on stack and they are not pass-by-ref, the upper 32bits
3247 // must be cleared to zeroes. The native compiler doesn't clear the upper bits
3248 // and there is no way to know if the caller is native or not. So, the upper
3249 // 32 bits of Vector argument on stack are always cleared to zero.
3250 #if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING) && defined(FEATURE_SIMD)
3251 void CodeGen::genClearStackVec3ArgUpperBits()
3256 printf("*************** In genClearStackVec3ArgUpperBits()\n");
3260 assert(compiler->compGeneratingProlog);
3262 unsigned varNum = 0;
3264 for (unsigned varNum = 0; varNum < compiler->info.compArgsCount; varNum++)
3266 LclVarDsc* varDsc = &(compiler->lvaTable[varNum]);
3267 assert(varDsc->lvIsParam);
3269 // Does var has simd12 type?
3270 if (varDsc->lvType != TYP_SIMD12)
3275 if (!varDsc->lvIsRegArg)
3277 // Clear the upper 32 bits by mov dword ptr [V_ARG_BASE+0xC], 0
3278 getEmitter()->emitIns_S_I(ins_Store(TYP_INT), EA_4BYTE, varNum, genTypeSize(TYP_FLOAT) * 3, 0);
3282 // Assume that for x64 linux, an argument is fully in registers
3283 // or fully on stack.
3284 regNumber argReg = varDsc->GetOtherArgReg();
3286 // Clear the upper 32 bits by two shift instructions.
3287 // argReg = argReg << 96
3288 getEmitter()->emitIns_R_I(INS_pslldq, emitActualTypeSize(TYP_SIMD12), argReg, 12);
3289 // argReg = argReg >> 96
3290 getEmitter()->emitIns_R_I(INS_psrldq, emitActualTypeSize(TYP_SIMD12), argReg, 12);
3294 #endif // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING) && defined(FEATURE_SIMD)
3295 #endif // FEATURE_PUT_STRUCT_ARG_STK
3297 // Generate code for CpObj nodes wich copy structs that have interleaved
3299 // This will generate a sequence of movsp instructions for the cases of non-gc members.
3300 // Note that movsp is an alias for movsd on x86 and movsq on x64.
3301 // and calls to the BY_REF_ASSIGN helper otherwise.
3302 void CodeGen::genCodeForCpObj(GenTreeObj* cpObjNode)
3304 // Make sure we got the arguments of the cpobj operation in the right registers
3305 GenTreePtr dstAddr = cpObjNode->Addr();
3306 GenTreePtr source = cpObjNode->Data();
3307 GenTreePtr srcAddr = nullptr;
3308 var_types srcAddrType = TYP_BYREF;
3309 bool sourceIsLocal = false;
3311 assert(source->isContained());
3312 if (source->gtOper == GT_IND)
3314 srcAddr = source->gtGetOp1();
3315 assert(srcAddr->isUsedFromReg());
3319 noway_assert(source->IsLocal());
3320 sourceIsLocal = true;
3323 bool dstOnStack = dstAddr->OperIsLocalAddr();
3327 assert(dstAddr->isUsedFromReg());
3329 // If the GenTree node has data about GC pointers, this means we're dealing
3330 // with CpObj, so this requires special logic.
3331 assert(cpObjNode->gtGcPtrCount > 0);
3333 // MovSp (alias for movsq on x64 and movsd on x86) instruction is used for copying non-gcref fields
3334 // and it needs src = RSI and dst = RDI.
3335 // Either these registers must not contain lclVars, or they must be dying or marked for spill.
3336 // This is because these registers are incremented as we go through the struct.
3339 GenTree* actualSrcAddr = srcAddr->gtSkipReloadOrCopy();
3340 GenTree* actualDstAddr = dstAddr->gtSkipReloadOrCopy();
3341 unsigned srcLclVarNum = BAD_VAR_NUM;
3342 unsigned dstLclVarNum = BAD_VAR_NUM;
3343 bool isSrcAddrLiveOut = false;
3344 bool isDstAddrLiveOut = false;
3345 if (genIsRegCandidateLocal(actualSrcAddr))
3347 srcLclVarNum = actualSrcAddr->AsLclVarCommon()->gtLclNum;
3348 isSrcAddrLiveOut = ((actualSrcAddr->gtFlags & (GTF_VAR_DEATH | GTF_SPILL)) == 0);
3350 if (genIsRegCandidateLocal(actualDstAddr))
3352 dstLclVarNum = actualDstAddr->AsLclVarCommon()->gtLclNum;
3353 isDstAddrLiveOut = ((actualDstAddr->gtFlags & (GTF_VAR_DEATH | GTF_SPILL)) == 0);
3355 assert((actualSrcAddr->gtRegNum != REG_RSI) || !isSrcAddrLiveOut ||
3356 ((srcLclVarNum == dstLclVarNum) && !isDstAddrLiveOut));
3357 assert((actualDstAddr->gtRegNum != REG_RDI) || !isDstAddrLiveOut ||
3358 ((srcLclVarNum == dstLclVarNum) && !isSrcAddrLiveOut));
3359 srcAddrType = srcAddr->TypeGet();
3363 // Consume the operands and get them into the right registers.
3364 // They may now contain gc pointers (depending on their type; gcMarkRegPtrVal will "do the right thing").
3365 genConsumeBlockOp(cpObjNode, REG_RDI, REG_RSI, REG_NA);
3366 gcInfo.gcMarkRegPtrVal(REG_RSI, srcAddrType);
3367 gcInfo.gcMarkRegPtrVal(REG_RDI, dstAddr->TypeGet());
3369 unsigned slots = cpObjNode->gtSlots;
3371 // If we can prove it's on the stack we don't need to use the write barrier.
3374 if (slots >= CPOBJ_NONGC_SLOTS_LIMIT)
3376 // If the destination of the CpObj is on the stack, make sure we allocated
3377 // RCX to emit the movsp (alias for movsd or movsq for 32 and 64 bits respectively).
3378 assert((cpObjNode->gtRsvdRegs & RBM_RCX) != 0);
3380 getEmitter()->emitIns_R_I(INS_mov, EA_4BYTE, REG_RCX, slots);
3381 instGen(INS_r_movsp);
3385 // For small structs, it's better to emit a sequence of movsp than to
3386 // emit a rep movsp instruction.
3396 BYTE* gcPtrs = cpObjNode->gtGcPtrs;
3397 unsigned gcPtrCount = cpObjNode->gtGcPtrCount;
3405 // Let's see if we can use rep movsp instead of a sequence of movsp instructions
3406 // to save cycles and code size.
3408 unsigned nonGcSlotCount = 0;
3414 } while (i < slots && gcPtrs[i] == TYPE_GC_NONE);
3416 // If we have a very small contiguous non-gc region, it's better just to
3417 // emit a sequence of movsp instructions
3418 if (nonGcSlotCount < CPOBJ_NONGC_SLOTS_LIMIT)
3420 while (nonGcSlotCount > 0)
3428 // Otherwise, we can save code-size and improve CQ by emitting
3429 // rep movsp (alias for movsd/movsq for x86/x64)
3430 assert((cpObjNode->gtRsvdRegs & RBM_RCX) != 0);
3432 getEmitter()->emitIns_R_I(INS_mov, EA_4BYTE, REG_RCX, nonGcSlotCount);
3433 instGen(INS_r_movsp);
3438 // We have a GC pointer, call the memory barrier.
3439 genEmitHelperCall(CORINFO_HELP_ASSIGN_BYREF, 0, EA_PTRSIZE);
3445 assert(gcPtrCount == 0);
3448 // Clear the gcInfo for RSI and RDI.
3449 // While we normally update GC info prior to the last instruction that uses them,
3450 // these actually live into the helper call.
3451 gcInfo.gcMarkRegSetNpt(RBM_RSI);
3452 gcInfo.gcMarkRegSetNpt(RBM_RDI);
3455 // Generate code for a CpBlk node by the means of the VM memcpy helper call
3457 // a) The size argument of the CpBlk is not an integer constant
3458 // b) The size argument is a constant but is larger than CPBLK_MOVS_LIMIT bytes.
3459 void CodeGen::genCodeForCpBlk(GenTreeBlk* cpBlkNode)
3461 #ifdef _TARGET_AMD64_
3462 // Make sure we got the arguments of the cpblk operation in the right registers
3463 unsigned blockSize = cpBlkNode->Size();
3464 GenTreePtr dstAddr = cpBlkNode->Addr();
3465 GenTreePtr source = cpBlkNode->Data();
3466 GenTreePtr srcAddr = nullptr;
3468 // Size goes in arg2
3471 assert(blockSize >= CPBLK_MOVS_LIMIT);
3472 assert((cpBlkNode->gtRsvdRegs & RBM_ARG_2) != 0);
3476 noway_assert(cpBlkNode->gtOper == GT_STORE_DYN_BLK);
3479 // Source address goes in arg1
3480 if (source->gtOper == GT_IND)
3482 srcAddr = source->gtGetOp1();
3483 assert(srcAddr->isUsedFromReg());
3487 noway_assert(source->IsLocal());
3488 assert((cpBlkNode->gtRsvdRegs & RBM_ARG_1) != 0);
3489 inst_RV_TT(INS_lea, REG_ARG_1, source, 0, EA_BYREF);
3492 genConsumeBlockOp(cpBlkNode, REG_ARG_0, REG_ARG_1, REG_ARG_2);
3494 genEmitHelperCall(CORINFO_HELP_MEMCPY, 0, EA_UNKNOWN);
3495 #else // !_TARGET_AMD64_
3496 noway_assert(false && "Helper call for CpBlk is not needed.");
3497 #endif // !_TARGET_AMD64_
3500 // generate code do a switch statement based on a table of ip-relative offsets
3501 void CodeGen::genTableBasedSwitch(GenTree* treeNode)
3503 genConsumeOperands(treeNode->AsOp());
3504 regNumber idxReg = treeNode->gtOp.gtOp1->gtRegNum;
3505 regNumber baseReg = treeNode->gtOp.gtOp2->gtRegNum;
3507 regNumber tmpReg = treeNode->GetSingleTempReg();
3509 // load the ip-relative offset (which is relative to start of fgFirstBB)
3510 getEmitter()->emitIns_R_ARX(INS_mov, EA_4BYTE, baseReg, baseReg, idxReg, 4, 0);
3512 // add it to the absolute address of fgFirstBB
3513 compiler->fgFirstBB->bbFlags |= BBF_JMP_TARGET;
3514 getEmitter()->emitIns_R_L(INS_lea, EA_PTR_DSP_RELOC, compiler->fgFirstBB, tmpReg);
3515 getEmitter()->emitIns_R_R(INS_add, EA_PTRSIZE, baseReg, tmpReg);
3517 getEmitter()->emitIns_R(INS_i_jmp, emitTypeSize(TYP_I_IMPL), baseReg);
3520 // emits the table and an instruction to get the address of the first element
3521 void CodeGen::genJumpTable(GenTree* treeNode)
3523 noway_assert(compiler->compCurBB->bbJumpKind == BBJ_SWITCH);
3524 assert(treeNode->OperGet() == GT_JMPTABLE);
3526 unsigned jumpCount = compiler->compCurBB->bbJumpSwt->bbsCount;
3527 BasicBlock** jumpTable = compiler->compCurBB->bbJumpSwt->bbsDstTab;
3528 unsigned jmpTabOffs;
3529 unsigned jmpTabBase;
3531 jmpTabBase = getEmitter()->emitBBTableDataGenBeg(jumpCount, true);
3535 JITDUMP("\n J_M%03u_DS%02u LABEL DWORD\n", Compiler::s_compMethodsCount, jmpTabBase);
3537 for (unsigned i = 0; i < jumpCount; i++)
3539 BasicBlock* target = *jumpTable++;
3540 noway_assert(target->bbFlags & BBF_JMP_TARGET);
3542 JITDUMP(" DD L_M%03u_BB%02u\n", Compiler::s_compMethodsCount, target->bbNum);
3544 getEmitter()->emitDataGenData(i, target);
3547 getEmitter()->emitDataGenEnd();
3549 // Access to inline data is 'abstracted' by a special type of static member
3550 // (produced by eeFindJitDataOffs) which the emitter recognizes as being a reference
3551 // to constant data, not a real static field.
3552 getEmitter()->emitIns_R_C(INS_lea, emitTypeSize(TYP_I_IMPL), treeNode->gtRegNum,
3553 compiler->eeFindJitDataOffs(jmpTabBase), 0);
3554 genProduceReg(treeNode);
3557 // generate code for the locked operations:
3558 // GT_LOCKADD, GT_XCHG, GT_XADD
3559 void CodeGen::genLockedInstructions(GenTreeOp* treeNode)
3561 GenTree* data = treeNode->gtOp.gtOp2;
3562 GenTree* addr = treeNode->gtOp.gtOp1;
3563 regNumber targetReg = treeNode->gtRegNum;
3564 regNumber dataReg = data->gtRegNum;
3565 regNumber addrReg = addr->gtRegNum;
3566 var_types type = genActualType(data->TypeGet());
3569 // The register allocator should have extended the lifetime of the address
3570 // so that it is not used as the target.
3571 noway_assert(addrReg != targetReg);
3573 // If data is a lclVar that's not a last use, we'd better have allocated a register
3574 // for the result (except in the case of GT_LOCKADD which does not produce a register result).
3575 assert(targetReg != REG_NA || treeNode->OperGet() == GT_LOCKADD || !genIsRegCandidateLocal(data) ||
3576 (data->gtFlags & GTF_VAR_DEATH) != 0);
3578 genConsumeOperands(treeNode);
3579 if (targetReg != REG_NA && dataReg != REG_NA && dataReg != targetReg)
3581 inst_RV_RV(ins_Copy(type), targetReg, dataReg);
3582 data->gtRegNum = targetReg;
3584 // TODO-XArch-Cleanup: Consider whether it is worth it, for debugging purposes, to restore the
3585 // original gtRegNum on data, after calling emitInsBinary below.
3587 switch (treeNode->OperGet())
3594 // lock is implied by xchg
3605 // all of these nodes implicitly do an indirection on op1
3606 // so create a temporary node to feed into the pattern matching
3607 GenTreeIndir i = indirForm(type, addr);
3608 getEmitter()->emitInsBinary(ins, emitTypeSize(type), &i, data);
3610 if (treeNode->gtRegNum != REG_NA)
3612 genProduceReg(treeNode);
3616 //------------------------------------------------------------------------
3617 // genCodeForSwap: Produce code for a GT_CMPXCHG node.
3620 // tree - the GT_CMPXCHG node
3622 void CodeGen::genCodeForCmpXchg(GenTreeCmpXchg* tree)
3624 assert(tree->OperIs(GT_CMPXCHG));
3626 var_types targetType = tree->TypeGet();
3627 regNumber targetReg = tree->gtRegNum;
3629 GenTreePtr location = tree->gtOpLocation; // arg1
3630 GenTreePtr value = tree->gtOpValue; // arg2
3631 GenTreePtr comparand = tree->gtOpComparand; // arg3
3633 assert(location->gtRegNum != REG_NA && location->gtRegNum != REG_RAX);
3634 assert(value->gtRegNum != REG_NA && value->gtRegNum != REG_RAX);
3636 genConsumeReg(location);
3637 genConsumeReg(value);
3638 genConsumeReg(comparand);
3640 // comparand goes to RAX;
3641 // Note that we must issue this move after the genConsumeRegs(), in case any of the above
3642 // have a GT_COPY from RAX.
3643 if (comparand->gtRegNum != REG_RAX)
3645 inst_RV_RV(ins_Copy(comparand->TypeGet()), REG_RAX, comparand->gtRegNum, comparand->TypeGet());
3651 getEmitter()->emitIns_AR_R(INS_cmpxchg, emitTypeSize(targetType), value->gtRegNum, location->gtRegNum, 0);
3654 if (targetReg != REG_RAX)
3656 inst_RV_RV(ins_Copy(targetType), targetReg, REG_RAX, targetType);
3659 genProduceReg(tree);
3662 // generate code for BoundsCheck nodes
3663 void CodeGen::genRangeCheck(GenTreePtr oper)
3666 noway_assert(oper->OperGet() == GT_ARR_BOUNDS_CHECK || oper->OperGet() == GT_SIMD_CHK);
3667 #else // !FEATURE_SIMD
3668 noway_assert(oper->OperGet() == GT_ARR_BOUNDS_CHECK);
3669 #endif // !FEATURE_SIMD
3671 GenTreeBoundsChk* bndsChk = oper->AsBoundsChk();
3673 GenTreePtr arrIndex = bndsChk->gtIndex;
3674 GenTreePtr arrLen = bndsChk->gtArrLen;
3675 GenTreePtr arrRef = nullptr;
3678 GenTree * src1, *src2;
3679 emitJumpKind jmpKind;
3681 genConsumeRegs(arrIndex);
3682 genConsumeRegs(arrLen);
3684 if (arrIndex->isContainedIntOrIImmed())
3686 // arrIndex is a contained constant. In this case
3687 // we will generate one of the following
3688 // cmp [mem], immed (if arrLen is a memory op)
3689 // cmp reg, immed (if arrLen is in a reg)
3691 // That is arrLen cannot be a contained immed.
3692 assert(!arrLen->isContainedIntOrIImmed());
3700 // arrIndex could either be a contained memory op or a reg
3701 // In this case we will generate one of the following
3702 // cmp [mem], immed (if arrLen is a constant)
3703 // cmp [mem], reg (if arrLen is in a reg)
3704 // cmp reg, immed (if arrIndex is in a reg)
3705 // cmp reg1, reg2 (if arraIndex is in reg1)
3706 // cmp reg, [mem] (if arrLen is a memory op)
3708 // That is only one of arrIndex or arrLen can be a memory op.
3709 assert(!arrIndex->isUsedFromMemory() || !arrLen->isUsedFromMemory());
3716 var_types bndsChkType = src2->TypeGet();
3718 // Bounds checks can only be 32 or 64 bit sized comparisons.
3719 assert(bndsChkType == TYP_INT || bndsChkType == TYP_LONG);
3721 // The type of the bounds check should always wide enough to compare against the index.
3722 assert(emitTypeSize(bndsChkType) >= emitTypeSize(src1->TypeGet()));
3725 getEmitter()->emitInsBinary(INS_cmp, emitTypeSize(bndsChkType), src1, src2);
3726 genJumpToThrowHlpBlk(jmpKind, bndsChk->gtThrowKind, bndsChk->gtIndRngFailBB);
3729 //---------------------------------------------------------------------
3730 // genCodeForPhysReg - generate code for a GT_PHYSREG node
3733 // tree - the GT_PHYSREG node
3738 void CodeGen::genCodeForPhysReg(GenTreePhysReg* tree)
3740 assert(tree->OperIs(GT_PHYSREG));
3742 var_types targetType = tree->TypeGet();
3743 regNumber targetReg = tree->gtRegNum;
3745 if (targetReg != tree->gtSrcReg)
3747 inst_RV_RV(ins_Copy(targetType), targetReg, tree->gtSrcReg, targetType);
3748 genTransferRegGCState(targetReg, tree->gtSrcReg);
3751 genProduceReg(tree);
3754 //---------------------------------------------------------------------
3755 // genCodeForNullCheck - generate code for a GT_NULLCHECK node
3758 // tree - the GT_NULLCHECK node
3763 void CodeGen::genCodeForNullCheck(GenTreeOp* tree)
3765 assert(tree->OperIs(GT_NULLCHECK));
3767 assert(tree->gtOp1->isUsedFromReg());
3768 regNumber reg = genConsumeReg(tree->gtOp1);
3769 getEmitter()->emitIns_AR_R(INS_cmp, EA_4BYTE, reg, reg, 0);
3772 //------------------------------------------------------------------------
3773 // genOffsetOfMDArrayLowerBound: Returns the offset from the Array object to the
3774 // lower bound for the given dimension.
3777 // elemType - the element type of the array
3778 // rank - the rank of the array
3779 // dimension - the dimension for which the lower bound offset will be returned.
3784 unsigned CodeGen::genOffsetOfMDArrayLowerBound(var_types elemType, unsigned rank, unsigned dimension)
3786 // Note that the lower bound and length fields of the Array object are always TYP_INT, even on 64-bit targets.
3787 return compiler->eeGetArrayDataOffset(elemType) + genTypeSize(TYP_INT) * (dimension + rank);
3790 //------------------------------------------------------------------------
3791 // genOffsetOfMDArrayLength: Returns the offset from the Array object to the
3792 // size for the given dimension.
3795 // elemType - the element type of the array
3796 // rank - the rank of the array
3797 // dimension - the dimension for which the lower bound offset will be returned.
3802 unsigned CodeGen::genOffsetOfMDArrayDimensionSize(var_types elemType, unsigned rank, unsigned dimension)
3804 // Note that the lower bound and length fields of the Array object are always TYP_INT, even on 64-bit targets.
3805 return compiler->eeGetArrayDataOffset(elemType) + genTypeSize(TYP_INT) * dimension;
3808 //------------------------------------------------------------------------
3809 // genCodeForArrIndex: Generates code to bounds check the index for one dimension of an array reference,
3810 // producing the effective index by subtracting the lower bound.
3813 // arrIndex - the node for which we're generating code
3819 void CodeGen::genCodeForArrIndex(GenTreeArrIndex* arrIndex)
3821 GenTreePtr arrObj = arrIndex->ArrObj();
3822 GenTreePtr indexNode = arrIndex->IndexExpr();
3824 regNumber arrReg = genConsumeReg(arrObj);
3825 regNumber indexReg = genConsumeReg(indexNode);
3826 regNumber tgtReg = arrIndex->gtRegNum;
3828 unsigned dim = arrIndex->gtCurrDim;
3829 unsigned rank = arrIndex->gtArrRank;
3830 var_types elemType = arrIndex->gtArrElemType;
3832 noway_assert(tgtReg != REG_NA);
3834 // Subtract the lower bound for this dimension.
3835 // TODO-XArch-CQ: make this contained if it's an immediate that fits.
3836 if (tgtReg != indexReg)
3838 inst_RV_RV(INS_mov, tgtReg, indexReg, indexNode->TypeGet());
3840 getEmitter()->emitIns_R_AR(INS_sub, emitActualTypeSize(TYP_INT), tgtReg, arrReg,
3841 genOffsetOfMDArrayLowerBound(elemType, rank, dim));
3842 getEmitter()->emitIns_R_AR(INS_cmp, emitActualTypeSize(TYP_INT), tgtReg, arrReg,
3843 genOffsetOfMDArrayDimensionSize(elemType, rank, dim));
3844 genJumpToThrowHlpBlk(EJ_jae, SCK_RNGCHK_FAIL);
3846 genProduceReg(arrIndex);
3849 //------------------------------------------------------------------------
3850 // genCodeForArrOffset: Generates code to compute the flattened array offset for
3851 // one dimension of an array reference:
3852 // result = (prevDimOffset * dimSize) + effectiveIndex
3853 // where dimSize is obtained from the arrObj operand
3856 // arrOffset - the node for which we're generating code
3862 // dimSize and effectiveIndex are always non-negative, the former by design,
3863 // and the latter because it has been normalized to be zero-based.
3865 void CodeGen::genCodeForArrOffset(GenTreeArrOffs* arrOffset)
3867 GenTreePtr offsetNode = arrOffset->gtOffset;
3868 GenTreePtr indexNode = arrOffset->gtIndex;
3869 GenTreePtr arrObj = arrOffset->gtArrObj;
3871 regNumber tgtReg = arrOffset->gtRegNum;
3872 assert(tgtReg != REG_NA);
3874 unsigned dim = arrOffset->gtCurrDim;
3875 unsigned rank = arrOffset->gtArrRank;
3876 var_types elemType = arrOffset->gtArrElemType;
3878 // First, consume the operands in the correct order.
3879 regNumber offsetReg = REG_NA;
3880 regNumber tmpReg = REG_NA;
3881 if (!offsetNode->IsIntegralConst(0))
3883 offsetReg = genConsumeReg(offsetNode);
3885 // We will use a temp register for the offset*scale+effectiveIndex computation.
3886 tmpReg = arrOffset->GetSingleTempReg();
3890 assert(offsetNode->isContained());
3892 regNumber indexReg = genConsumeReg(indexNode);
3893 // Although arrReg may not be used in the constant-index case, if we have generated
3894 // the value into a register, we must consume it, otherwise we will fail to end the
3895 // live range of the gc ptr.
3896 // TODO-CQ: Currently arrObj will always have a register allocated to it.
3897 // We could avoid allocating a register for it, which would be of value if the arrObj
3898 // is an on-stack lclVar.
3899 regNumber arrReg = REG_NA;
3900 if (arrObj->gtHasReg())
3902 arrReg = genConsumeReg(arrObj);
3905 if (!offsetNode->IsIntegralConst(0))
3907 assert(tmpReg != REG_NA);
3908 assert(arrReg != REG_NA);
3910 // Evaluate tgtReg = offsetReg*dim_size + indexReg.
3911 // tmpReg is used to load dim_size and the result of the multiplication.
3912 // Note that dim_size will never be negative.
3914 getEmitter()->emitIns_R_AR(INS_mov, emitActualTypeSize(TYP_INT), tmpReg, arrReg,
3915 genOffsetOfMDArrayDimensionSize(elemType, rank, dim));
3916 inst_RV_RV(INS_imul, tmpReg, offsetReg);
3918 if (tmpReg == tgtReg)
3920 inst_RV_RV(INS_add, tmpReg, indexReg);
3924 if (indexReg != tgtReg)
3926 inst_RV_RV(INS_mov, tgtReg, indexReg, TYP_I_IMPL);
3928 inst_RV_RV(INS_add, tgtReg, tmpReg);
3933 if (indexReg != tgtReg)
3935 inst_RV_RV(INS_mov, tgtReg, indexReg, TYP_INT);
3938 genProduceReg(arrOffset);
3941 // make a temporary indir we can feed to pattern matching routines
3942 // in cases where we don't want to instantiate all the indirs that happen
3944 GenTreeIndir CodeGen::indirForm(var_types type, GenTree* base)
3946 GenTreeIndir i(GT_IND, type, base, nullptr);
3947 i.gtRegNum = REG_NA;
3948 // has to be nonnull (because contained nodes can't be the last in block)
3949 // but don't want it to be a valid pointer
3950 i.gtNext = (GenTree*)(-1);
3954 // make a temporary int we can feed to pattern matching routines
3955 // in cases where we don't want to instantiate
3957 GenTreeIntCon CodeGen::intForm(var_types type, ssize_t value)
3959 GenTreeIntCon i(type, value);
3960 i.gtRegNum = REG_NA;
3961 // has to be nonnull (because contained nodes can't be the last in block)
3962 // but don't want it to be a valid pointer
3963 i.gtNext = (GenTree*)(-1);
3967 instruction CodeGen::genGetInsForOper(genTreeOps oper, var_types type)
3971 // Operations on SIMD vectors shouldn't come this path
3972 assert(!varTypeIsSIMD(type));
3973 if (varTypeIsFloating(type))
3975 return ins_MathOp(oper, type);
4019 #if !defined(_TARGET_64BIT_)
4038 #endif // !defined(_TARGET_64BIT_)
4046 //------------------------------------------------------------------------
4047 // genCodeForShift: Generates the code sequence for a GenTree node that
4048 // represents a bit shift or rotate operation (<<, >>, >>>, rol, ror).
4051 // tree - the bit shift node (that specifies the type of bit shift to perform).
4054 // a) All GenTrees are register allocated.
4055 // b) The shift-by-amount in tree->gtOp.gtOp2 is either a contained constant or
4056 // it's a register-allocated expression. If it is in a register that is
4057 // not RCX, it will be moved to RCX (so RCX better not be in use!).
4059 void CodeGen::genCodeForShift(GenTreePtr tree)
4061 // Only the non-RMW case here.
4062 assert(tree->OperIsShiftOrRotate());
4063 assert(tree->gtOp.gtOp1->isUsedFromReg());
4064 assert(tree->gtRegNum != REG_NA);
4066 genConsumeOperands(tree->AsOp());
4068 var_types targetType = tree->TypeGet();
4069 instruction ins = genGetInsForOper(tree->OperGet(), targetType);
4071 GenTreePtr operand = tree->gtGetOp1();
4072 regNumber operandReg = operand->gtRegNum;
4074 GenTreePtr shiftBy = tree->gtGetOp2();
4076 if (shiftBy->isContainedIntOrIImmed())
4078 // First, move the operand to the destination register and
4079 // later on perform the shift in-place.
4080 // (LSRA will try to avoid this situation through preferencing.)
4081 if (tree->gtRegNum != operandReg)
4083 inst_RV_RV(INS_mov, tree->gtRegNum, operandReg, targetType);
4086 int shiftByValue = (int)shiftBy->AsIntConCommon()->IconValue();
4087 inst_RV_SH(ins, emitTypeSize(tree), tree->gtRegNum, shiftByValue);
4091 // We must have the number of bits to shift stored in ECX, since we constrained this node to
4092 // sit in ECX. In case this didn't happen, LSRA expects the code generator to move it since it's a single
4093 // register destination requirement.
4094 genCopyRegIfNeeded(shiftBy, REG_RCX);
4096 // The operand to be shifted must not be in ECX
4097 noway_assert(operandReg != REG_RCX);
4099 if (tree->gtRegNum != operandReg)
4101 inst_RV_RV(INS_mov, tree->gtRegNum, operandReg, targetType);
4103 inst_RV_CL(ins, tree->gtRegNum, targetType);
4106 genProduceReg(tree);
4110 //------------------------------------------------------------------------
4111 // genCodeForShiftLong: Generates the code sequence for a GenTree node that
4112 // represents a three operand bit shift or rotate operation (<<Hi, >>Lo).
4115 // tree - the bit shift node (that specifies the type of bit shift to perform).
4118 // a) All GenTrees are register allocated.
4119 // b) The shift-by-amount in tree->gtOp.gtOp2 is a contained constant
4121 // TODO-X86-CQ: This only handles the case where the operand being shifted is in a register. We don't
4122 // need sourceHi to be always in reg in case of GT_LSH_HI (because it could be moved from memory to
4123 // targetReg if sourceHi is a memory operand). Similarly for GT_RSH_LO, sourceLo could be marked as
4124 // contained memory-op. Even if not a memory-op, we could mark it as reg-optional.
4126 void CodeGen::genCodeForShiftLong(GenTreePtr tree)
4128 // Only the non-RMW case here.
4129 genTreeOps oper = tree->OperGet();
4130 assert(oper == GT_LSH_HI || oper == GT_RSH_LO);
4132 GenTree* operand = tree->gtOp.gtOp1;
4133 assert(operand->OperGet() == GT_LONG);
4134 assert(operand->gtOp.gtOp1->isUsedFromReg());
4135 assert(operand->gtOp.gtOp2->isUsedFromReg());
4137 GenTree* operandLo = operand->gtGetOp1();
4138 GenTree* operandHi = operand->gtGetOp2();
4140 regNumber regLo = operandLo->gtRegNum;
4141 regNumber regHi = operandHi->gtRegNum;
4143 genConsumeOperands(tree->AsOp());
4145 var_types targetType = tree->TypeGet();
4146 instruction ins = genGetInsForOper(oper, targetType);
4148 GenTreePtr shiftBy = tree->gtGetOp2();
4150 assert(shiftBy->isContainedIntOrIImmed());
4152 unsigned int count = shiftBy->AsIntConCommon()->IconValue();
4154 regNumber regResult = (oper == GT_LSH_HI) ? regHi : regLo;
4156 if (regResult != tree->gtRegNum)
4158 inst_RV_RV(INS_mov, tree->gtRegNum, regResult, targetType);
4161 if (oper == GT_LSH_HI)
4163 inst_RV_RV_IV(ins, emitTypeSize(targetType), tree->gtRegNum, regLo, count);
4167 assert(oper == GT_RSH_LO);
4168 inst_RV_RV_IV(ins, emitTypeSize(targetType), tree->gtRegNum, regHi, count);
4171 genProduceReg(tree);
4175 //------------------------------------------------------------------------
4176 // genCodeForShiftRMW: Generates the code sequence for a GT_STOREIND GenTree node that
4177 // represents a RMW bit shift or rotate operation (<<, >>, >>>, rol, ror), for example:
4178 // GT_STOREIND( AddressTree, GT_SHL( Ind ( AddressTree ), Operand ) )
4181 // storeIndNode: the GT_STOREIND node.
4183 void CodeGen::genCodeForShiftRMW(GenTreeStoreInd* storeInd)
4185 GenTree* data = storeInd->Data();
4186 GenTree* addr = storeInd->Addr();
4188 assert(data->OperIsShiftOrRotate());
4190 // This function only handles the RMW case.
4191 assert(data->gtOp.gtOp1->isUsedFromMemory());
4192 assert(data->gtOp.gtOp1->isIndir());
4193 assert(Lowering::IndirsAreEquivalent(data->gtOp.gtOp1, storeInd));
4194 assert(data->gtRegNum == REG_NA);
4196 var_types targetType = data->TypeGet();
4197 genTreeOps oper = data->OperGet();
4198 instruction ins = genGetInsForOper(oper, targetType);
4199 emitAttr attr = EA_ATTR(genTypeSize(targetType));
4201 GenTree* shiftBy = data->gtOp.gtOp2;
4202 if (shiftBy->isContainedIntOrIImmed())
4204 int shiftByValue = (int)shiftBy->AsIntConCommon()->IconValue();
4205 ins = genMapShiftInsToShiftByConstantIns(ins, shiftByValue);
4206 if (shiftByValue == 1)
4208 // There is no source in this case, as the shift by count is embedded in the instruction opcode itself.
4209 getEmitter()->emitInsRMW(ins, attr, storeInd);
4213 getEmitter()->emitInsRMW(ins, attr, storeInd, shiftBy);
4218 // We must have the number of bits to shift stored in ECX, since we constrained this node to
4219 // sit in ECX. In case this didn't happen, LSRA expects the code generator to move it since it's a single
4220 // register destination requirement.
4221 regNumber shiftReg = shiftBy->gtRegNum;
4222 genCopyRegIfNeeded(shiftBy, REG_RCX);
4224 // The shiftBy operand is implicit, so call the unary version of emitInsRMW.
4225 getEmitter()->emitInsRMW(ins, attr, storeInd);
4229 //------------------------------------------------------------------------
4230 // genCodeForLclAddr: Generates the code for GT_LCL_FLD_ADDR/GT_LCL_VAR_ADDR.
4235 void CodeGen::genCodeForLclAddr(GenTree* tree)
4237 assert(tree->OperIs(GT_LCL_FLD_ADDR, GT_LCL_VAR_ADDR));
4239 var_types targetType = tree->TypeGet();
4240 regNumber targetReg = tree->gtRegNum;
4242 // Address of a local var. This by itself should never be allocated a register.
4243 // If it is worth storing the address in a register then it should be cse'ed into
4244 // a temp and that would be allocated a register.
4245 noway_assert(targetType == TYP_BYREF);
4246 noway_assert(!tree->InReg());
4248 inst_RV_TT(INS_lea, targetReg, tree, 0, EA_BYREF);
4249 genProduceReg(tree);
4252 //------------------------------------------------------------------------
4253 // genCodeForLclFld: Produce code for a GT_LCL_FLD node.
4256 // tree - the GT_LCL_FLD node
4258 void CodeGen::genCodeForLclFld(GenTreeLclFld* tree)
4260 assert(tree->OperIs(GT_LCL_FLD));
4262 var_types targetType = tree->TypeGet();
4263 regNumber targetReg = tree->gtRegNum;
4265 noway_assert(targetReg != REG_NA);
4268 // Loading of TYP_SIMD12 (i.e. Vector3) field
4269 if (targetType == TYP_SIMD12)
4271 genLoadLclTypeSIMD12(tree);
4276 noway_assert(targetType != TYP_STRUCT);
4278 emitAttr size = emitTypeSize(targetType);
4279 unsigned offs = tree->gtLclOffs;
4280 unsigned varNum = tree->gtLclNum;
4281 assert(varNum < compiler->lvaCount);
4283 getEmitter()->emitIns_R_S(ins_Move_Extend(targetType, tree->InReg()), size, targetReg, varNum, offs);
4285 genProduceReg(tree);
4288 //------------------------------------------------------------------------
4289 // genCodeForLclVar: Produce code for a GT_LCL_VAR node.
4292 // tree - the GT_LCL_VAR node
4294 void CodeGen::genCodeForLclVar(GenTreeLclVar* tree)
4296 assert(tree->OperIs(GT_LCL_VAR));
4298 // lcl_vars are not defs
4299 assert((tree->gtFlags & GTF_VAR_DEF) == 0);
4301 bool isRegCandidate = compiler->lvaTable[tree->gtLclNum].lvIsRegCandidate();
4303 if (isRegCandidate && !(tree->gtFlags & GTF_VAR_DEATH))
4305 assert(tree->InReg() || (tree->gtFlags & GTF_SPILLED));
4308 // If this is a register candidate that has been spilled, genConsumeReg() will
4309 // reload it at the point of use. Otherwise, if it's not in a register, we load it here.
4311 if (!tree->InReg() && !(tree->gtFlags & GTF_SPILLED))
4313 assert(!isRegCandidate);
4315 #if defined(FEATURE_SIMD) && defined(_TARGET_X86_)
4316 // Loading of TYP_SIMD12 (i.e. Vector3) variable
4317 if (tree->TypeGet() == TYP_SIMD12)
4319 genLoadLclTypeSIMD12(tree);
4322 #endif // defined(FEATURE_SIMD) && defined(_TARGET_X86_)
4324 getEmitter()->emitIns_R_S(ins_Load(tree->TypeGet(), compiler->isSIMDTypeLocalAligned(tree->gtLclNum)),
4325 emitTypeSize(tree), tree->gtRegNum, tree->gtLclNum, 0);
4326 genProduceReg(tree);
4330 //------------------------------------------------------------------------
4331 // genCodeForStoreLclFld: Produce code for a GT_STORE_LCL_FLD node.
4334 // tree - the GT_STORE_LCL_FLD node
4336 void CodeGen::genCodeForStoreLclFld(GenTreeLclFld* tree)
4338 assert(tree->OperIs(GT_STORE_LCL_FLD));
4340 var_types targetType = tree->TypeGet();
4341 noway_assert(targetType != TYP_STRUCT);
4342 noway_assert(!tree->InReg());
4343 assert(!varTypeIsFloating(targetType) || (targetType == tree->gtOp1->TypeGet()));
4346 // storing of TYP_SIMD12 (i.e. Vector3) field
4347 if (tree->TypeGet() == TYP_SIMD12)
4349 genStoreLclTypeSIMD12(tree);
4352 #endif // FEATURE_SIMD
4354 GenTreePtr op1 = tree->gtGetOp1();
4355 genConsumeRegs(op1);
4356 getEmitter()->emitInsBinary(ins_Store(targetType), emitTypeSize(tree), tree, op1);
4358 genUpdateLife(tree);
4361 //------------------------------------------------------------------------
4362 // genCodeForStoreLclVar: Produce code for a GT_STORE_LCL_VAR node.
4365 // tree - the GT_STORE_LCL_VAR node
4367 void CodeGen::genCodeForStoreLclVar(GenTreeLclVar* tree)
4369 assert(tree->OperIs(GT_STORE_LCL_VAR));
4371 var_types targetType = tree->TypeGet();
4372 regNumber targetReg = tree->gtRegNum;
4373 emitter* emit = getEmitter();
4375 GenTreePtr op1 = tree->gtGetOp1();
4377 // var = call, where call returns a multi-reg return value
4378 // case is handled separately.
4379 if (op1->gtSkipReloadOrCopy()->IsMultiRegCall())
4381 genMultiRegCallStoreToLocal(tree);
4385 noway_assert(targetType != TYP_STRUCT);
4386 assert(!varTypeIsFloating(targetType) || (targetType == op1->TypeGet()));
4388 unsigned lclNum = tree->gtLclNum;
4389 LclVarDsc* varDsc = &(compiler->lvaTable[lclNum]);
4391 // Ensure that lclVar nodes are typed correctly.
4392 assert(!varDsc->lvNormalizeOnStore() || (targetType == genActualType(varDsc->TypeGet())));
4394 #if !defined(_TARGET_64BIT_)
4395 if (targetType == TYP_LONG)
4397 genStoreLongLclVar(tree);
4400 #endif // !defined(_TARGET_64BIT_)
4403 // storing of TYP_SIMD12 (i.e. Vector3) field
4404 if (targetType == TYP_SIMD12)
4406 genStoreLclTypeSIMD12(tree);
4410 if (varTypeIsSIMD(targetType) && (targetReg != REG_NA) && op1->IsCnsIntOrI())
4412 // This is only possible for a zero-init.
4413 noway_assert(op1->IsIntegralConst(0));
4414 genSIMDZero(targetType, varDsc->lvBaseType, targetReg);
4415 genProduceReg(tree);
4418 #endif // FEATURE_SIMD
4420 genConsumeRegs(op1);
4422 if (targetReg == REG_NA)
4425 emit->emitInsMov(ins_Store(targetType, compiler->isSIMDTypeLocalAligned(lclNum)), emitTypeSize(targetType),
4427 varDsc->lvRegNum = REG_STK;
4431 // Look for the case where we have a constant zero which we've marked for reuse,
4432 // but which isn't actually in the register we want. In that case, it's better to create
4433 // zero in the target register, because an xor is smaller than a copy. Note that we could
4434 // potentially handle this in the register allocator, but we can't always catch it there
4435 // because the target may not have a register allocated for it yet.
4436 if (op1->isUsedFromReg() && (op1->gtRegNum != targetReg) && (op1->IsIntegralConst(0) || op1->IsFPZero()))
4438 op1->gtRegNum = REG_NA;
4439 op1->ResetReuseRegVal();
4442 if (!op1->isUsedFromReg())
4444 // Currently, we assume that the non-reg source of a GT_STORE_LCL_VAR writing to a register
4445 // must be a constant. However, in the future we might want to support an operand used from
4446 // memory. This is a bit tricky because we have to decide it can be used from memory before
4447 // register allocation,
4448 // and this would be a case where, once that's done, we need to mark that node as always
4449 // requiring a register - which we always assume now anyway, but once we "optimize" that
4450 // we'll have to take cases like this into account.
4451 assert((op1->gtRegNum == REG_NA) && op1->OperIsConst());
4452 genSetRegToConst(targetReg, targetType, op1);
4454 else if (op1->gtRegNum != targetReg)
4456 assert(op1->gtRegNum != REG_NA);
4457 emit->emitInsBinary(ins_Move_Extend(targetType, true), emitTypeSize(tree), tree, op1);
4462 if (targetReg != REG_NA)
4464 genProduceReg(tree);
4468 //------------------------------------------------------------------------
4469 // genCodeForIndir: Produce code for a GT_IND node.
4472 // tree - the GT_IND node
4474 void CodeGen::genCodeForIndir(GenTreeIndir* tree)
4476 assert(tree->OperIs(GT_IND));
4479 // Handling of Vector3 type values loaded through indirection.
4480 if (tree->TypeGet() == TYP_SIMD12)
4482 genLoadIndTypeSIMD12(tree);
4485 #endif // FEATURE_SIMD
4487 var_types targetType = tree->TypeGet();
4488 emitter* emit = getEmitter();
4490 GenTree* addr = tree->Addr();
4491 if (addr->IsCnsIntOrI() && addr->IsIconHandle(GTF_ICON_TLS_HDL))
4493 noway_assert(EA_ATTR(genTypeSize(targetType)) == EA_PTRSIZE);
4494 emit->emitIns_R_C(ins_Load(TYP_I_IMPL), EA_PTRSIZE, tree->gtRegNum, FLD_GLOBAL_FS,
4495 (int)addr->gtIntCon.gtIconVal);
4499 genConsumeAddress(addr);
4500 emit->emitInsMov(ins_Load(targetType), emitTypeSize(tree), tree);
4503 genProduceReg(tree);
4506 void CodeGen::genRegCopy(GenTree* treeNode)
4508 assert(treeNode->OperGet() == GT_COPY);
4509 GenTree* op1 = treeNode->gtOp.gtOp1;
4511 if (op1->IsMultiRegCall())
4515 GenTreeCopyOrReload* copyTree = treeNode->AsCopyOrReload();
4516 GenTreeCall* call = op1->AsCall();
4517 ReturnTypeDesc* retTypeDesc = call->GetReturnTypeDesc();
4518 unsigned regCount = retTypeDesc->GetReturnRegCount();
4520 for (unsigned i = 0; i < regCount; ++i)
4522 var_types type = retTypeDesc->GetReturnRegType(i);
4523 regNumber fromReg = call->GetRegNumByIdx(i);
4524 regNumber toReg = copyTree->GetRegNumByIdx(i);
4526 // A Multi-reg GT_COPY node will have valid reg only for those
4527 // positions that corresponding result reg of call node needs
4529 if (toReg != REG_NA)
4531 assert(toReg != fromReg);
4532 inst_RV_RV(ins_Copy(type), toReg, fromReg, type);
4538 var_types targetType = treeNode->TypeGet();
4539 regNumber targetReg = treeNode->gtRegNum;
4540 assert(targetReg != REG_NA);
4542 // Check whether this node and the node from which we're copying the value have
4543 // different register types. This can happen if (currently iff) we have a SIMD
4544 // vector type that fits in an integer register, in which case it is passed as
4545 // an argument, or returned from a call, in an integer register and must be
4546 // copied if it's in an xmm register.
4548 bool srcFltReg = (varTypeIsFloating(op1) || varTypeIsSIMD(op1));
4549 bool tgtFltReg = (varTypeIsFloating(treeNode) || varTypeIsSIMD(treeNode));
4550 if (srcFltReg != tgtFltReg)
4557 ins = ins_CopyIntToFloat(op1->TypeGet(), treeNode->TypeGet());
4559 intReg = op1->gtRegNum;
4563 ins = ins_CopyFloatToInt(op1->TypeGet(), treeNode->TypeGet());
4565 fpReg = op1->gtRegNum;
4567 inst_RV_RV(ins, fpReg, intReg, targetType);
4571 inst_RV_RV(ins_Copy(targetType), targetReg, genConsumeReg(op1), targetType);
4576 // The lclVar will never be a def.
4577 // If it is a last use, the lclVar will be killed by genConsumeReg(), as usual, and genProduceReg will
4578 // appropriately set the gcInfo for the copied value.
4579 // If not, there are two cases we need to handle:
4580 // - If this is a TEMPORARY copy (indicated by the GTF_VAR_DEATH flag) the variable
4581 // will remain live in its original register.
4582 // genProduceReg() will appropriately set the gcInfo for the copied value,
4583 // and genConsumeReg will reset it.
4584 // - Otherwise, we need to update register info for the lclVar.
4586 GenTreeLclVarCommon* lcl = op1->AsLclVarCommon();
4587 assert((lcl->gtFlags & GTF_VAR_DEF) == 0);
4589 if ((lcl->gtFlags & GTF_VAR_DEATH) == 0 && (treeNode->gtFlags & GTF_VAR_DEATH) == 0)
4591 LclVarDsc* varDsc = &compiler->lvaTable[lcl->gtLclNum];
4593 // If we didn't just spill it (in genConsumeReg, above), then update the register info
4594 if (varDsc->lvRegNum != REG_STK)
4596 // The old location is dying
4597 genUpdateRegLife(varDsc, /*isBorn*/ false, /*isDying*/ true DEBUGARG(op1));
4599 gcInfo.gcMarkRegSetNpt(genRegMask(op1->gtRegNum));
4601 genUpdateVarReg(varDsc, treeNode);
4603 // The new location is going live
4604 genUpdateRegLife(varDsc, /*isBorn*/ true, /*isDying*/ false DEBUGARG(treeNode));
4610 genProduceReg(treeNode);
4613 //------------------------------------------------------------------------
4614 // genCodeForStoreInd: Produce code for a GT_STOREIND node.
4617 // tree - the GT_STOREIND node
4619 void CodeGen::genCodeForStoreInd(GenTreeStoreInd* tree)
4621 assert(tree->OperIs(GT_STOREIND));
4624 // Storing Vector3 of size 12 bytes through indirection
4625 if (tree->TypeGet() == TYP_SIMD12)
4627 genStoreIndTypeSIMD12(tree);
4630 #endif // FEATURE_SIMD
4632 GenTree* data = tree->Data();
4633 GenTree* addr = tree->Addr();
4634 var_types targetType = tree->TypeGet();
4636 assert(!varTypeIsFloating(targetType) || (targetType == data->TypeGet()));
4638 GCInfo::WriteBarrierForm writeBarrierForm = gcInfo.gcIsWriteBarrierCandidate(tree, data);
4639 if (writeBarrierForm != GCInfo::WBF_NoBarrier)
4641 // data and addr must be in registers.
4642 // Consume both registers so that any copies of interfering registers are taken care of.
4643 genConsumeOperands(tree);
4645 if (genEmitOptimizedGCWriteBarrier(writeBarrierForm, addr, data))
4650 // At this point, we should not have any interference.
4651 // That is, 'data' must not be in REG_ARG_0, as that is where 'addr' must go.
4652 noway_assert(data->gtRegNum != REG_ARG_0);
4654 // addr goes in REG_ARG_0
4655 genCopyRegIfNeeded(addr, REG_ARG_0);
4657 // data goes in REG_ARG_1
4658 genCopyRegIfNeeded(data, REG_ARG_1);
4660 genGCWriteBarrier(tree, writeBarrierForm);
4664 bool dataIsUnary = false;
4665 bool isRMWMemoryOp = tree->IsRMWMemoryOp();
4666 GenTree* rmwSrc = nullptr;
4668 // We must consume the operands in the proper execution order, so that liveness is
4669 // updated appropriately.
4670 genConsumeAddress(addr);
4672 // If tree represents a RMW memory op then its data is a non-leaf node marked as contained
4673 // and non-indir operand of data is the source of RMW memory op.
4676 assert(data->isContained() && !data->OperIsLeaf());
4678 GenTreePtr rmwDst = nullptr;
4680 dataIsUnary = (GenTree::OperIsUnary(data->OperGet()) != 0);
4683 if (tree->IsRMWDstOp1())
4685 rmwDst = data->gtGetOp1();
4686 rmwSrc = data->gtGetOp2();
4690 assert(tree->IsRMWDstOp2());
4691 rmwDst = data->gtGetOp2();
4692 rmwSrc = data->gtGetOp1();
4695 genConsumeRegs(rmwSrc);
4699 // *(p) = oper *(p): Here addr = p, rmwsrc=rmwDst = *(p) i.e. GT_IND(p)
4700 // For unary RMW ops, src and dst of RMW memory op is the same. Lower
4701 // clears operand counts on rmwSrc and we don't need to perform a
4702 // genConsumeReg() on it.
4703 assert(tree->IsRMWDstOp1());
4704 rmwSrc = data->gtGetOp1();
4705 rmwDst = data->gtGetOp1();
4706 assert(rmwSrc->isUsedFromMemory());
4709 assert(rmwSrc != nullptr);
4710 assert(rmwDst != nullptr);
4711 assert(Lowering::IndirsAreEquivalent(rmwDst, tree));
4715 genConsumeRegs(data);
4722 // generate code for unary RMW memory ops like neg/not
4723 getEmitter()->emitInsRMW(genGetInsForOper(data->OperGet(), data->TypeGet()), emitTypeSize(tree), tree);
4727 if (data->OperIsShiftOrRotate())
4729 // Generate code for shift RMW memory ops.
4730 // The data address needs to be op1 (it must be [addr] = [addr] <shift> <amount>, not [addr] =
4731 // <amount> <shift> [addr]).
4732 assert(tree->IsRMWDstOp1());
4733 assert(rmwSrc == data->gtGetOp2());
4734 genCodeForShiftRMW(tree);
4736 else if (data->OperGet() == GT_ADD && (rmwSrc->IsIntegralConst(1) || rmwSrc->IsIntegralConst(-1)))
4738 // Generate "inc/dec [mem]" instead of "add/sub [mem], 1".
4741 // 1) Global morph transforms GT_SUB(x, +/-1) into GT_ADD(x, -/+1).
4742 // 2) TODO-AMD64: Debugger routine NativeWalker::Decode() runs into
4743 // an assert while decoding ModR/M byte of "inc dword ptr [rax]".
4744 // It is not clear whether Decode() can handle all possible
4745 // addr modes with inc/dec. For this reason, inc/dec [mem]
4746 // is not generated while generating debuggable code. Update
4747 // the above if condition once Decode() routine is fixed.
4748 assert(rmwSrc->isContainedIntOrIImmed());
4749 instruction ins = rmwSrc->IsIntegralConst(1) ? INS_inc : INS_dec;
4750 getEmitter()->emitInsRMW(ins, emitTypeSize(tree), tree);
4754 // generate code for remaining binary RMW memory ops like add/sub/and/or/xor
4755 getEmitter()->emitInsRMW(genGetInsForOper(data->OperGet(), data->TypeGet()), emitTypeSize(tree),
4762 getEmitter()->emitInsMov(ins_Store(data->TypeGet()), emitTypeSize(tree), tree);
4767 //------------------------------------------------------------------------
4768 // genCodeForSwap: Produce code for a GT_SWAP node.
4771 // tree - the GT_SWAP node
4773 void CodeGen::genCodeForSwap(GenTreeOp* tree)
4775 assert(tree->OperIs(GT_SWAP));
4777 // Swap is only supported for lclVar operands that are enregistered
4778 // We do not consume or produce any registers. Both operands remain enregistered.
4779 // However, the gc-ness may change.
4780 assert(genIsRegCandidateLocal(tree->gtOp1) && genIsRegCandidateLocal(tree->gtOp2));
4782 GenTreeLclVarCommon* lcl1 = tree->gtOp1->AsLclVarCommon();
4783 LclVarDsc* varDsc1 = &(compiler->lvaTable[lcl1->gtLclNum]);
4784 var_types type1 = varDsc1->TypeGet();
4785 GenTreeLclVarCommon* lcl2 = tree->gtOp2->AsLclVarCommon();
4786 LclVarDsc* varDsc2 = &(compiler->lvaTable[lcl2->gtLclNum]);
4787 var_types type2 = varDsc2->TypeGet();
4789 // We must have both int or both fp regs
4790 assert(!varTypeIsFloating(type1) || varTypeIsFloating(type2));
4792 // FP swap is not yet implemented (and should have NYI'd in LSRA)
4793 assert(!varTypeIsFloating(type1));
4795 regNumber oldOp1Reg = lcl1->gtRegNum;
4796 regMaskTP oldOp1RegMask = genRegMask(oldOp1Reg);
4797 regNumber oldOp2Reg = lcl2->gtRegNum;
4798 regMaskTP oldOp2RegMask = genRegMask(oldOp2Reg);
4800 // We don't call genUpdateVarReg because we don't have a tree node with the new register.
4801 varDsc1->lvRegNum = oldOp2Reg;
4802 varDsc2->lvRegNum = oldOp1Reg;
4805 emitAttr size = EA_PTRSIZE;
4806 if (varTypeGCtype(type1) != varTypeGCtype(type2))
4808 // If the type specified to the emitter is a GC type, it will swap the GC-ness of the registers.
4809 // Otherwise it will leave them alone, which is correct if they have the same GC-ness.
4812 inst_RV_RV(INS_xchg, oldOp1Reg, oldOp2Reg, TYP_I_IMPL, size);
4814 // Update the gcInfo.
4815 // Manually remove these regs for the gc sets (mostly to avoid confusing duplicative dump output)
4816 gcInfo.gcRegByrefSetCur &= ~(oldOp1RegMask | oldOp2RegMask);
4817 gcInfo.gcRegGCrefSetCur &= ~(oldOp1RegMask | oldOp2RegMask);
4819 // gcMarkRegPtrVal will do the appropriate thing for non-gc types.
4820 // It will also dump the updates.
4821 gcInfo.gcMarkRegPtrVal(oldOp2Reg, type1);
4822 gcInfo.gcMarkRegPtrVal(oldOp1Reg, type2);
4825 //------------------------------------------------------------------------
4826 // genEmitOptimizedGCWriteBarrier: Generate write barrier store using the optimized
4827 // helper functions.
4830 // writeBarrierForm - the write barrier form to use
4831 // addr - the address at which to do the store
4832 // data - the data to store
4835 // true if an optimized write barrier form was used, false if not. If this
4836 // function returns false, the caller must emit a "standard" write barrier.
4838 bool CodeGen::genEmitOptimizedGCWriteBarrier(GCInfo::WriteBarrierForm writeBarrierForm, GenTree* addr, GenTree* data)
4840 assert(writeBarrierForm != GCInfo::WBF_NoBarrier);
4842 #if defined(_TARGET_X86_) && NOGC_WRITE_BARRIERS
4843 bool useOptimizedWriteBarriers = true;
4846 useOptimizedWriteBarriers =
4847 (writeBarrierForm != GCInfo::WBF_NoBarrier_CheckNotHeapInDebug); // This one is always a call to a C++ method.
4850 if (!useOptimizedWriteBarriers)
4855 const static int regToHelper[2][8] = {
4856 // If the target is known to be in managed memory
4858 CORINFO_HELP_ASSIGN_REF_EAX, CORINFO_HELP_ASSIGN_REF_ECX, -1, CORINFO_HELP_ASSIGN_REF_EBX, -1,
4859 CORINFO_HELP_ASSIGN_REF_EBP, CORINFO_HELP_ASSIGN_REF_ESI, CORINFO_HELP_ASSIGN_REF_EDI,
4862 // Don't know if the target is in managed memory
4864 CORINFO_HELP_CHECKED_ASSIGN_REF_EAX, CORINFO_HELP_CHECKED_ASSIGN_REF_ECX, -1,
4865 CORINFO_HELP_CHECKED_ASSIGN_REF_EBX, -1, CORINFO_HELP_CHECKED_ASSIGN_REF_EBP,
4866 CORINFO_HELP_CHECKED_ASSIGN_REF_ESI, CORINFO_HELP_CHECKED_ASSIGN_REF_EDI,
4870 noway_assert(regToHelper[0][REG_EAX] == CORINFO_HELP_ASSIGN_REF_EAX);
4871 noway_assert(regToHelper[0][REG_ECX] == CORINFO_HELP_ASSIGN_REF_ECX);
4872 noway_assert(regToHelper[0][REG_EBX] == CORINFO_HELP_ASSIGN_REF_EBX);
4873 noway_assert(regToHelper[0][REG_ESP] == -1);
4874 noway_assert(regToHelper[0][REG_EBP] == CORINFO_HELP_ASSIGN_REF_EBP);
4875 noway_assert(regToHelper[0][REG_ESI] == CORINFO_HELP_ASSIGN_REF_ESI);
4876 noway_assert(regToHelper[0][REG_EDI] == CORINFO_HELP_ASSIGN_REF_EDI);
4878 noway_assert(regToHelper[1][REG_EAX] == CORINFO_HELP_CHECKED_ASSIGN_REF_EAX);
4879 noway_assert(regToHelper[1][REG_ECX] == CORINFO_HELP_CHECKED_ASSIGN_REF_ECX);
4880 noway_assert(regToHelper[1][REG_EBX] == CORINFO_HELP_CHECKED_ASSIGN_REF_EBX);
4881 noway_assert(regToHelper[1][REG_ESP] == -1);
4882 noway_assert(regToHelper[1][REG_EBP] == CORINFO_HELP_CHECKED_ASSIGN_REF_EBP);
4883 noway_assert(regToHelper[1][REG_ESI] == CORINFO_HELP_CHECKED_ASSIGN_REF_ESI);
4884 noway_assert(regToHelper[1][REG_EDI] == CORINFO_HELP_CHECKED_ASSIGN_REF_EDI);
4886 regNumber reg = data->gtRegNum;
4887 noway_assert((reg != REG_ESP) && (reg != REG_WRITE_BARRIER));
4889 // Generate the following code:
4891 // call write_barrier_helper_reg
4893 // addr goes in REG_ARG_0
4894 genCopyRegIfNeeded(addr, REG_WRITE_BARRIER);
4896 unsigned tgtAnywhere = 0;
4897 if (writeBarrierForm != GCInfo::WBF_BarrierUnchecked)
4902 // We might want to call a modified version of genGCWriteBarrier() to get the benefit of
4903 // the FEATURE_COUNT_GC_WRITE_BARRIERS code there, but that code doesn't look like it works
4904 // with rationalized RyuJIT IR. So, for now, just emit the helper call directly here.
4906 genEmitHelperCall(regToHelper[tgtAnywhere][reg],
4908 EA_PTRSIZE); // retSize
4911 #else // !defined(_TARGET_X86_) || !NOGC_WRITE_BARRIERS
4913 #endif // !defined(_TARGET_X86_) || !NOGC_WRITE_BARRIERS
4916 // Produce code for a GT_CALL node
4917 void CodeGen::genCallInstruction(GenTreeCall* call)
4919 genAlignStackBeforeCall(call);
4921 gtCallTypes callType = (gtCallTypes)call->gtCallType;
4923 IL_OFFSETX ilOffset = BAD_IL_OFFSET;
4925 // all virtuals should have been expanded into a control expression
4926 assert(!call->IsVirtual() || call->gtControlExpr || call->gtCallAddr);
4928 // Insert a GS check if necessary
4929 if (call->IsTailCallViaHelper())
4931 if (compiler->getNeedsGSSecurityCookie())
4933 #if FEATURE_FIXED_OUT_ARGS
4934 // If either of the conditions below is true, we will need a temporary register in order to perform the GS
4935 // cookie check. When FEATURE_FIXED_OUT_ARGS is disabled, we save and restore the temporary register using
4936 // push/pop. When FEATURE_FIXED_OUT_ARGS is enabled, however, we need an alternative solution. For now,
4937 // though, the tail prefix is ignored on all platforms that use fixed out args, so we should never hit this
4939 assert(compiler->gsGlobalSecurityCookieAddr == nullptr);
4940 assert((int)compiler->gsGlobalSecurityCookieVal == (ssize_t)compiler->gsGlobalSecurityCookieVal);
4942 genEmitGSCookieCheck(true);
4946 // Consume all the arg regs
4947 for (GenTreePtr list = call->gtCallLateArgs; list; list = list->MoveNext())
4949 assert(list->OperIsList());
4951 GenTreePtr argNode = list->Current();
4953 fgArgTabEntryPtr curArgTabEntry = compiler->gtArgEntryByNode(call, argNode->gtSkipReloadOrCopy());
4954 assert(curArgTabEntry);
4956 if (curArgTabEntry->regNum == REG_STK)
4961 #ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
4962 // Deal with multi register passed struct args.
4963 if (argNode->OperGet() == GT_FIELD_LIST)
4965 GenTreeFieldList* fieldListPtr = argNode->AsFieldList();
4966 unsigned iterationNum = 0;
4967 for (; fieldListPtr != nullptr; fieldListPtr = fieldListPtr->Rest(), iterationNum++)
4969 GenTreePtr putArgRegNode = fieldListPtr->gtOp.gtOp1;
4970 assert(putArgRegNode->gtOper == GT_PUTARG_REG);
4971 regNumber argReg = REG_NA;
4973 if (iterationNum == 0)
4975 argReg = curArgTabEntry->regNum;
4979 assert(iterationNum == 1);
4980 argReg = curArgTabEntry->otherRegNum;
4983 genConsumeReg(putArgRegNode);
4985 // Validate the putArgRegNode has the right type.
4986 assert(putArgRegNode->TypeGet() ==
4987 compiler->GetTypeFromClassificationAndSizes(curArgTabEntry->structDesc
4988 .eightByteClassifications[iterationNum],
4989 curArgTabEntry->structDesc
4990 .eightByteSizes[iterationNum]));
4991 if (putArgRegNode->gtRegNum != argReg)
4993 inst_RV_RV(ins_Move_Extend(putArgRegNode->TypeGet(), putArgRegNode->InReg()), argReg,
4994 putArgRegNode->gtRegNum);
4999 #endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
5001 regNumber argReg = curArgTabEntry->regNum;
5002 genConsumeReg(argNode);
5003 if (argNode->gtRegNum != argReg)
5005 inst_RV_RV(ins_Move_Extend(argNode->TypeGet(), argNode->InReg()), argReg, argNode->gtRegNum);
5010 // In the case of a varargs call,
5011 // the ABI dictates that if we have floating point args,
5012 // we must pass the enregistered arguments in both the
5013 // integer and floating point registers so, let's do that.
5014 if (call->IsVarargs() && varTypeIsFloating(argNode))
5016 regNumber targetReg = compiler->getCallArgIntRegister(argNode->gtRegNum);
5017 instruction ins = ins_CopyFloatToInt(argNode->TypeGet(), TYP_LONG);
5018 inst_RV_RV(ins, argNode->gtRegNum, targetReg);
5020 #endif // FEATURE_VARARG
5023 #if defined(_TARGET_X86_) || defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
5024 // The call will pop its arguments.
5025 // for each putarg_stk:
5026 ssize_t stackArgBytes = 0;
5027 GenTreePtr args = call->gtCallArgs;
5030 GenTreePtr arg = args->gtOp.gtOp1;
5031 if (arg->OperGet() != GT_ARGPLACE && !(arg->gtFlags & GTF_LATE_ARG))
5033 #if defined(_TARGET_X86_)
5034 if ((arg->OperGet() == GT_PUTARG_STK) && (arg->gtGetOp1()->OperGet() == GT_FIELD_LIST))
5036 fgArgTabEntryPtr curArgTabEntry = compiler->gtArgEntryByNode(call, arg);
5037 assert(curArgTabEntry);
5038 stackArgBytes += curArgTabEntry->numSlots * TARGET_POINTER_SIZE;
5041 #endif // defined(_TARGET_X86_)
5043 #ifdef FEATURE_PUT_STRUCT_ARG_STK
5044 if (genActualType(arg->TypeGet()) == TYP_STRUCT)
5046 assert(arg->OperGet() == GT_PUTARG_STK);
5048 GenTreeObj* obj = arg->gtGetOp1()->AsObj();
5049 unsigned argBytes = (unsigned)roundUp(obj->gtBlkSize, TARGET_POINTER_SIZE);
5051 fgArgTabEntryPtr curArgTabEntry = compiler->gtArgEntryByNode(call, arg);
5052 assert((curArgTabEntry->numSlots * TARGET_POINTER_SIZE) == argBytes);
5054 stackArgBytes += argBytes;
5057 #endif // FEATURE_PUT_STRUCT_ARG_STK
5060 stackArgBytes += genTypeSize(genActualType(arg->TypeGet()));
5063 args = args->gtOp.gtOp2;
5065 #endif // defined(_TARGET_X86_) || defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
5067 // Insert a null check on "this" pointer if asked.
5068 if (call->NeedsNullCheck())
5070 const regNumber regThis = genGetThisArgReg(call);
5071 getEmitter()->emitIns_AR_R(INS_cmp, EA_4BYTE, regThis, regThis, 0);
5074 // Either gtControlExpr != null or gtCallAddr != null or it is a direct non-virtual call to a user or helper method.
5075 CORINFO_METHOD_HANDLE methHnd;
5076 GenTree* target = call->gtControlExpr;
5077 if (callType == CT_INDIRECT)
5079 assert(target == nullptr);
5080 target = call->gtCallAddr;
5085 methHnd = call->gtCallMethHnd;
5088 CORINFO_SIG_INFO* sigInfo = nullptr;
5090 // Pass the call signature information down into the emitter so the emitter can associate
5091 // native call sites with the signatures they were generated from.
5092 if (callType != CT_HELPER)
5094 sigInfo = call->callSig;
5098 // If fast tail call, then we are done. In this case we setup the args (both reg args
5099 // and stack args in incoming arg area) and call target in rax. Epilog sequence would
5100 // generate "jmp rax".
5101 if (call->IsFastTailCall())
5103 // Don't support fast tail calling JIT helpers
5104 assert(callType != CT_HELPER);
5106 // Fast tail calls materialize call target either in gtControlExpr or in gtCallAddr.
5107 assert(target != nullptr);
5109 genConsumeReg(target);
5110 genCopyRegIfNeeded(target, REG_RAX);
5114 // For a pinvoke to unmanged code we emit a label to clear
5115 // the GC pointer state before the callsite.
5116 // We can't utilize the typical lazy killing of GC pointers
5117 // at (or inside) the callsite.
5118 if (call->IsUnmanaged())
5120 genDefineTempLabel(genCreateTempLabel());
5123 // Determine return value size(s).
5124 ReturnTypeDesc* retTypeDesc = call->GetReturnTypeDesc();
5125 emitAttr retSize = EA_PTRSIZE;
5126 emitAttr secondRetSize = EA_UNKNOWN;
5128 if (call->HasMultiRegRetVal())
5130 retSize = emitTypeSize(retTypeDesc->GetReturnRegType(0));
5131 secondRetSize = emitTypeSize(retTypeDesc->GetReturnRegType(1));
5135 assert(!varTypeIsStruct(call));
5137 if (call->gtType == TYP_REF || call->gtType == TYP_ARRAY)
5141 else if (call->gtType == TYP_BYREF)
5147 bool fPossibleSyncHelperCall = false;
5148 CorInfoHelpFunc helperNum = CORINFO_HELP_UNDEF;
5150 // We need to propagate the IL offset information to the call instruction, so we can emit
5151 // an IL to native mapping record for the call, to support managed return value debugging.
5152 // We don't want tail call helper calls that were converted from normal calls to get a record,
5153 // so we skip this hash table lookup logic in that case.
5154 if (compiler->opts.compDbgInfo && compiler->genCallSite2ILOffsetMap != nullptr && !call->IsTailCall())
5156 (void)compiler->genCallSite2ILOffsetMap->Lookup(call, &ilOffset);
5159 #if defined(_TARGET_X86_)
5160 bool fCallerPop = call->CallerPop();
5163 if (!call->IsUnmanaged())
5165 CorInfoCallConv callConv = CORINFO_CALLCONV_DEFAULT;
5167 if ((callType != CT_HELPER) && call->callSig)
5169 callConv = call->callSig->callConv;
5172 fCallerPop |= IsCallerPop(callConv);
5174 #endif // UNIX_X86_ABI
5176 // If the callee pops the arguments, we pass a positive value as the argSize, and the emitter will
5177 // adjust its stack level accordingly.
5178 // If the caller needs to explicitly pop its arguments, we must pass a negative value, and then do the
5179 // pop when we're done.
5180 ssize_t argSizeForEmitter = stackArgBytes;
5183 argSizeForEmitter = -stackArgBytes;
5185 #endif // defined(_TARGET_X86_)
5187 #ifdef FEATURE_AVX_SUPPORT
5188 // When it's a PInvoke call and the call type is USER function, we issue VZEROUPPER here
5189 // if the function contains 256bit AVX instructions, this is to avoid AVX-256 to Legacy SSE
5190 // transition penalty, assuming the user function contains legacy SSE instruction.
5191 // To limit code size increase impact: we only issue VZEROUPPER before PInvoke call, not issue
5192 // VZEROUPPER after PInvoke call because transition penalty from legacy SSE to AVX only happens
5193 // when there's preceding 256-bit AVX to legacy SSE transition penalty.
5194 if (call->IsPInvoke() && (call->gtCallType == CT_USER_FUNC) && getEmitter()->Contains256bitAVX())
5196 assert(compiler->getSIMDInstructionSet() == InstructionSet_AVX);
5197 instGen(INS_vzeroupper);
5201 if (target != nullptr)
5204 if (call->IsVirtualStub() && (call->gtCallType == CT_INDIRECT))
5206 // On x86, we need to generate a very specific pattern for indirect VSD calls:
5209 // call dword ptr [eax]
5211 // Where EAX is also used as an argument to the stub dispatch helper. Make
5212 // sure that the call target address is computed into EAX in this case.
5214 assert(REG_VIRTUAL_STUB_PARAM == REG_VIRTUAL_STUB_TARGET);
5216 assert(target->isContainedIndir());
5217 assert(target->OperGet() == GT_IND);
5219 GenTree* addr = target->AsIndir()->Addr();
5220 assert(addr->isUsedFromReg());
5222 genConsumeReg(addr);
5223 genCopyRegIfNeeded(addr, REG_VIRTUAL_STUB_TARGET);
5225 getEmitter()->emitIns_Nop(3);
5228 getEmitter()->emitIns_Call(emitter::EmitCallType(emitter::EC_INDIR_ARD),
5230 INDEBUG_LDISASM_COMMA(sigInfo)
5234 MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(secondRetSize),
5235 gcInfo.gcVarPtrSetCur,
5236 gcInfo.gcRegGCrefSetCur,
5237 gcInfo.gcRegByrefSetCur,
5238 ilOffset, REG_VIRTUAL_STUB_TARGET, REG_NA, 1, 0);
5243 if (target->isContainedIndir())
5245 if (target->AsIndir()->HasBase() && target->AsIndir()->Base()->isContainedIntOrIImmed())
5247 // Note that if gtControlExpr is an indir of an absolute address, we mark it as
5248 // contained only if it can be encoded as PC-relative offset.
5249 assert(target->AsIndir()->Base()->AsIntConCommon()->FitsInAddrBase(compiler));
5252 genEmitCall(emitter::EC_FUNC_TOKEN_INDIR,
5254 INDEBUG_LDISASM_COMMA(sigInfo)
5255 (void*) target->AsIndir()->Base()->AsIntConCommon()->IconValue()
5256 X86_ARG(argSizeForEmitter),
5258 MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(secondRetSize),
5265 genEmitCall(emitter::EC_INDIR_ARD,
5267 INDEBUG_LDISASM_COMMA(sigInfo)
5269 X86_ARG(argSizeForEmitter),
5271 MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(secondRetSize),
5278 // We have already generated code for gtControlExpr evaluating it into a register.
5279 // We just need to emit "call reg" in this case.
5280 assert(genIsValidIntReg(target->gtRegNum));
5283 genEmitCall(emitter::EC_INDIR_R,
5285 INDEBUG_LDISASM_COMMA(sigInfo)
5287 X86_ARG(argSizeForEmitter),
5289 MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(secondRetSize),
5291 genConsumeReg(target));
5295 #ifdef FEATURE_READYTORUN_COMPILER
5296 else if (call->gtEntryPoint.addr != nullptr)
5299 genEmitCall((call->gtEntryPoint.accessType == IAT_VALUE) ? emitter::EC_FUNC_TOKEN
5300 : emitter::EC_FUNC_TOKEN_INDIR,
5302 INDEBUG_LDISASM_COMMA(sigInfo)
5303 (void*) call->gtEntryPoint.addr
5304 X86_ARG(argSizeForEmitter),
5306 MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(secondRetSize),
5313 // Generate a direct call to a non-virtual user defined or helper method
5314 assert(callType == CT_HELPER || callType == CT_USER_FUNC);
5316 void* addr = nullptr;
5317 if (callType == CT_HELPER)
5319 // Direct call to a helper method.
5320 helperNum = compiler->eeGetHelperNum(methHnd);
5321 noway_assert(helperNum != CORINFO_HELP_UNDEF);
5323 void* pAddr = nullptr;
5324 addr = compiler->compGetHelperFtn(helperNum, (void**)&pAddr);
5326 if (addr == nullptr)
5331 // tracking of region protected by the monitor in synchronized methods
5332 if (compiler->info.compFlags & CORINFO_FLG_SYNCH)
5334 fPossibleSyncHelperCall = true;
5339 // Direct call to a non-virtual user function.
5340 addr = call->gtDirectCallAddress;
5343 // Non-virtual direct calls to known addresses
5346 genEmitCall(emitter::EC_FUNC_TOKEN,
5348 INDEBUG_LDISASM_COMMA(sigInfo)
5350 X86_ARG(argSizeForEmitter),
5352 MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(secondRetSize),
5357 // if it was a pinvoke we may have needed to get the address of a label
5358 if (genPendingCallLabel)
5360 assert(call->IsUnmanaged());
5361 genDefineTempLabel(genPendingCallLabel);
5362 genPendingCallLabel = nullptr;
5366 // All Callee arg registers are trashed and no longer contain any GC pointers.
5367 // TODO-XArch-Bug?: As a matter of fact shouldn't we be killing all of callee trashed regs here?
5368 // For now we will assert that other than arg regs gc ref/byref set doesn't contain any other
5369 // registers from RBM_CALLEE_TRASH.
5370 assert((gcInfo.gcRegGCrefSetCur & (RBM_CALLEE_TRASH & ~RBM_ARG_REGS)) == 0);
5371 assert((gcInfo.gcRegByrefSetCur & (RBM_CALLEE_TRASH & ~RBM_ARG_REGS)) == 0);
5372 gcInfo.gcRegGCrefSetCur &= ~RBM_ARG_REGS;
5373 gcInfo.gcRegByrefSetCur &= ~RBM_ARG_REGS;
5375 var_types returnType = call->TypeGet();
5376 if (returnType != TYP_VOID)
5379 if (varTypeIsFloating(returnType))
5381 // Spill the value from the fp stack.
5382 // Then, load it into the target register.
5383 call->gtFlags |= GTF_SPILL;
5384 regSet.rsSpillFPStack(call);
5385 call->gtFlags |= GTF_SPILLED;
5386 call->gtFlags &= ~GTF_SPILL;
5389 #endif // _TARGET_X86_
5391 regNumber returnReg;
5393 if (call->HasMultiRegRetVal())
5395 assert(retTypeDesc != nullptr);
5396 unsigned regCount = retTypeDesc->GetReturnRegCount();
5398 // If regs allocated to call node are different from ABI return
5399 // regs in which the call has returned its result, move the result
5400 // to regs allocated to call node.
5401 for (unsigned i = 0; i < regCount; ++i)
5403 var_types regType = retTypeDesc->GetReturnRegType(i);
5404 returnReg = retTypeDesc->GetABIReturnReg(i);
5405 regNumber allocatedReg = call->GetRegNumByIdx(i);
5406 if (returnReg != allocatedReg)
5408 inst_RV_RV(ins_Copy(regType), allocatedReg, returnReg, regType);
5413 // A Vector3 return value is stored in xmm0 and xmm1.
5414 // RyuJIT assumes that the upper unused bits of xmm1 are cleared but
5415 // the native compiler doesn't guarantee it.
5416 if (returnType == TYP_SIMD12)
5418 returnReg = retTypeDesc->GetABIReturnReg(1);
5419 // Clear the upper 32 bits by two shift instructions.
5420 // retReg = retReg << 96
5421 // retReg = retReg >> 96
5422 getEmitter()->emitIns_R_I(INS_pslldq, emitActualTypeSize(TYP_SIMD12), returnReg, 12);
5423 getEmitter()->emitIns_R_I(INS_psrldq, emitActualTypeSize(TYP_SIMD12), returnReg, 12);
5425 #endif // FEATURE_SIMD
5430 if (call->IsHelperCall(compiler, CORINFO_HELP_INIT_PINVOKE_FRAME))
5432 // The x86 CORINFO_HELP_INIT_PINVOKE_FRAME helper uses a custom calling convention that returns with
5433 // TCB in REG_PINVOKE_TCB. AMD64/ARM64 use the standard calling convention. fgMorphCall() sets the
5434 // correct argument registers.
5435 returnReg = REG_PINVOKE_TCB;
5438 #endif // _TARGET_X86_
5439 if (varTypeIsFloating(returnType))
5441 returnReg = REG_FLOATRET;
5445 returnReg = REG_INTRET;
5448 if (call->gtRegNum != returnReg)
5450 inst_RV_RV(ins_Copy(returnType), call->gtRegNum, returnReg, returnType);
5454 genProduceReg(call);
5458 // If there is nothing next, that means the result is thrown away, so this value is not live.
5459 // However, for minopts or debuggable code, we keep it live to support managed return value debugging.
5460 if ((call->gtNext == nullptr) && !compiler->opts.MinOpts() && !compiler->opts.compDbgCode)
5462 gcInfo.gcMarkRegSetNpt(RBM_INTRET);
5465 #if !FEATURE_EH_FUNCLETS
5466 //-------------------------------------------------------------------------
5467 // Create a label for tracking of region protected by the monitor in synchronized methods.
5468 // This needs to be here, rather than above where fPossibleSyncHelperCall is set,
5469 // so the GC state vars have been updated before creating the label.
5471 if (fPossibleSyncHelperCall)
5475 case CORINFO_HELP_MON_ENTER:
5476 case CORINFO_HELP_MON_ENTER_STATIC:
5477 noway_assert(compiler->syncStartEmitCookie == NULL);
5478 compiler->syncStartEmitCookie =
5479 getEmitter()->emitAddLabel(gcInfo.gcVarPtrSetCur, gcInfo.gcRegGCrefSetCur, gcInfo.gcRegByrefSetCur);
5480 noway_assert(compiler->syncStartEmitCookie != NULL);
5482 case CORINFO_HELP_MON_EXIT:
5483 case CORINFO_HELP_MON_EXIT_STATIC:
5484 noway_assert(compiler->syncEndEmitCookie == NULL);
5485 compiler->syncEndEmitCookie =
5486 getEmitter()->emitAddLabel(gcInfo.gcVarPtrSetCur, gcInfo.gcRegGCrefSetCur, gcInfo.gcRegByrefSetCur);
5487 noway_assert(compiler->syncEndEmitCookie != NULL);
5493 #endif // !FEATURE_EH_FUNCLETS
5495 unsigned stackAdjustBias = 0;
5497 #if defined(_TARGET_X86_)
5498 // Is the caller supposed to pop the arguments?
5499 if (fCallerPop && (stackArgBytes != 0))
5501 stackAdjustBias = stackArgBytes;
5504 SubtractStackLevel(stackArgBytes);
5505 #endif // _TARGET_X86_
5507 genRemoveAlignmentAfterCall(call, stackAdjustBias);
5510 // Produce code for a GT_JMP node.
5511 // The arguments of the caller needs to be transferred to the callee before exiting caller.
5512 // The actual jump to callee is generated as part of caller epilog sequence.
5513 // Therefore the codegen of GT_JMP is to ensure that the callee arguments are correctly setup.
5514 void CodeGen::genJmpMethod(GenTreePtr jmp)
5516 assert(jmp->OperGet() == GT_JMP);
5517 assert(compiler->compJmpOpUsed);
5519 // If no arguments, nothing to do
5520 if (compiler->info.compArgsCount == 0)
5525 // Make sure register arguments are in their initial registers
5526 // and stack arguments are put back as well.
5530 // First move any en-registered stack arguments back to the stack.
5531 // At the same time any reg arg not in correct reg is moved back to its stack location.
5533 // We are not strictly required to spill reg args that are not in the desired reg for a jmp call
5534 // But that would require us to deal with circularity while moving values around. Spilling
5535 // to stack makes the implementation simple, which is not a bad trade off given Jmp calls
5536 // are not frequent.
5537 for (varNum = 0; (varNum < compiler->info.compArgsCount); varNum++)
5539 varDsc = compiler->lvaTable + varNum;
5541 if (varDsc->lvPromoted)
5543 noway_assert(varDsc->lvFieldCnt == 1); // We only handle one field here
5545 unsigned fieldVarNum = varDsc->lvFieldLclStart;
5546 varDsc = compiler->lvaTable + fieldVarNum;
5548 noway_assert(varDsc->lvIsParam);
5550 if (varDsc->lvIsRegArg && (varDsc->lvRegNum != REG_STK))
5552 // Skip reg args which are already in its right register for jmp call.
5553 // If not, we will spill such args to their stack locations.
5555 // If we need to generate a tail call profiler hook, then spill all
5556 // arg regs to free them up for the callback.
5557 if (!compiler->compIsProfilerHookNeeded() && (varDsc->lvRegNum == varDsc->lvArgReg))
5562 else if (varDsc->lvRegNum == REG_STK)
5564 // Skip args which are currently living in stack.
5568 // If we came here it means either a reg argument not in the right register or
5569 // a stack argument currently living in a register. In either case the following
5570 // assert should hold.
5571 assert(varDsc->lvRegNum != REG_STK);
5573 assert(!varDsc->lvIsStructField || (compiler->lvaTable[varDsc->lvParentLcl].lvFieldCnt == 1));
5574 var_types storeType = genActualType(varDsc->lvaArgType()); // We own the memory and can use the full move.
5575 getEmitter()->emitIns_S_R(ins_Store(storeType), emitTypeSize(storeType), varDsc->lvRegNum, varNum, 0);
5577 // Update lvRegNum life and GC info to indicate lvRegNum is dead and varDsc stack slot is going live.
5578 // Note that we cannot modify varDsc->lvRegNum here because another basic block may not be expecting it.
5579 // Therefore manually update life of varDsc->lvRegNum.
5580 regMaskTP tempMask = varDsc->lvRegMask();
5581 regSet.RemoveMaskVars(tempMask);
5582 gcInfo.gcMarkRegSetNpt(tempMask);
5583 if (compiler->lvaIsGCTracked(varDsc))
5586 if (!VarSetOps::IsMember(compiler, gcInfo.gcVarPtrSetCur, varDsc->lvVarIndex))
5588 JITDUMP("\t\t\t\t\t\t\tVar V%02u becoming live\n", varNum);
5592 JITDUMP("\t\t\t\t\t\t\tVar V%02u continuing live\n", varNum);
5596 VarSetOps::AddElemD(compiler, gcInfo.gcVarPtrSetCur, varDsc->lvVarIndex);
5600 #ifdef PROFILING_SUPPORTED
5601 // At this point all arg regs are free.
5602 // Emit tail call profiler callback.
5603 genProfilingLeaveCallback(CORINFO_HELP_PROF_FCN_TAILCALL);
5606 // Next move any un-enregistered register arguments back to their register.
5607 regMaskTP fixedIntArgMask = RBM_NONE; // tracks the int arg regs occupying fixed args in case of a vararg method.
5608 unsigned firstArgVarNum = BAD_VAR_NUM; // varNum of the first argument in case of a vararg method.
5609 for (varNum = 0; (varNum < compiler->info.compArgsCount); varNum++)
5611 varDsc = compiler->lvaTable + varNum;
5612 if (varDsc->lvPromoted)
5614 noway_assert(varDsc->lvFieldCnt == 1); // We only handle one field here
5616 unsigned fieldVarNum = varDsc->lvFieldLclStart;
5617 varDsc = compiler->lvaTable + fieldVarNum;
5619 noway_assert(varDsc->lvIsParam);
5621 // Skip if arg not passed in a register.
5622 if (!varDsc->lvIsRegArg)
5627 #if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
5628 if (varTypeIsStruct(varDsc))
5630 CORINFO_CLASS_HANDLE typeHnd = varDsc->lvVerTypeInfo.GetClassHandle();
5631 assert(typeHnd != nullptr);
5633 SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR structDesc;
5634 compiler->eeGetSystemVAmd64PassStructInRegisterDescriptor(typeHnd, &structDesc);
5635 assert(structDesc.passedInRegisters);
5637 unsigned __int8 offset0 = 0;
5638 unsigned __int8 offset1 = 0;
5639 var_types type0 = TYP_UNKNOWN;
5640 var_types type1 = TYP_UNKNOWN;
5642 // Get the eightbyte data
5643 compiler->GetStructTypeOffset(structDesc, &type0, &type1, &offset0, &offset1);
5645 // Move the values into the right registers.
5648 // Update varDsc->lvArgReg and lvOtherArgReg life and GC Info to indicate varDsc stack slot is dead and
5649 // argReg is going live. Note that we cannot modify varDsc->lvRegNum and lvOtherArgReg here because another
5650 // basic block may not be expecting it. Therefore manually update life of argReg. Note that GT_JMP marks
5651 // the end of the basic block and after which reg life and gc info will be recomputed for the new block in
5652 // genCodeForBBList().
5653 if (type0 != TYP_UNKNOWN)
5655 getEmitter()->emitIns_R_S(ins_Load(type0), emitTypeSize(type0), varDsc->lvArgReg, varNum, offset0);
5656 regSet.rsMaskVars |= genRegMask(varDsc->lvArgReg);
5657 gcInfo.gcMarkRegPtrVal(varDsc->lvArgReg, type0);
5660 if (type1 != TYP_UNKNOWN)
5662 getEmitter()->emitIns_R_S(ins_Load(type1), emitTypeSize(type1), varDsc->lvOtherArgReg, varNum, offset1);
5663 regSet.rsMaskVars |= genRegMask(varDsc->lvOtherArgReg);
5664 gcInfo.gcMarkRegPtrVal(varDsc->lvOtherArgReg, type1);
5667 if (varDsc->lvTracked)
5669 VarSetOps::RemoveElemD(compiler, gcInfo.gcVarPtrSetCur, varDsc->lvVarIndex);
5673 #endif // !defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
5675 // Register argument
5676 noway_assert(isRegParamType(genActualType(varDsc->TypeGet())));
5678 // Is register argument already in the right register?
5679 // If not load it from its stack location.
5680 var_types loadType = varDsc->lvaArgType();
5681 regNumber argReg = varDsc->lvArgReg; // incoming arg register
5683 if (varDsc->lvRegNum != argReg)
5685 assert(genIsValidReg(argReg));
5686 getEmitter()->emitIns_R_S(ins_Load(loadType), emitTypeSize(loadType), argReg, varNum, 0);
5688 // Update argReg life and GC Info to indicate varDsc stack slot is dead and argReg is going live.
5689 // Note that we cannot modify varDsc->lvRegNum here because another basic block may not be expecting it.
5690 // Therefore manually update life of argReg. Note that GT_JMP marks the end of the basic block
5691 // and after which reg life and gc info will be recomputed for the new block in genCodeForBBList().
5692 regSet.AddMaskVars(genRegMask(argReg));
5693 gcInfo.gcMarkRegPtrVal(argReg, loadType);
5694 if (compiler->lvaIsGCTracked(varDsc))
5697 if (VarSetOps::IsMember(compiler, gcInfo.gcVarPtrSetCur, varDsc->lvVarIndex))
5699 JITDUMP("\t\t\t\t\t\t\tVar V%02u becoming dead\n", varNum);
5703 JITDUMP("\t\t\t\t\t\t\tVar V%02u continuing dead\n", varNum);
5707 VarSetOps::RemoveElemD(compiler, gcInfo.gcVarPtrSetCur, varDsc->lvVarIndex);
5712 #if FEATURE_VARARG && defined(_TARGET_AMD64_)
5713 // In case of a jmp call to a vararg method also pass the float/double arg in the corresponding int arg
5714 // register. This is due to the AMD64 ABI which requires floating point values passed to varargs functions to
5715 // be passed in both integer and floating point registers. It doesn't apply to x86, which passes floating point
5716 // values on the stack.
5717 if (compiler->info.compIsVarArgs)
5719 regNumber intArgReg;
5720 var_types loadType = varDsc->lvaArgType();
5721 regNumber argReg = varDsc->lvArgReg; // incoming arg register
5723 if (varTypeIsFloating(loadType))
5725 intArgReg = compiler->getCallArgIntRegister(argReg);
5726 instruction ins = ins_CopyFloatToInt(loadType, TYP_LONG);
5727 inst_RV_RV(ins, argReg, intArgReg, loadType);
5734 fixedIntArgMask |= genRegMask(intArgReg);
5736 if (intArgReg == REG_ARG_0)
5738 assert(firstArgVarNum == BAD_VAR_NUM);
5739 firstArgVarNum = varNum;
5742 #endif // FEATURE_VARARG
5745 #if FEATURE_VARARG && defined(_TARGET_AMD64_)
5746 // Jmp call to a vararg method - if the method has fewer than 4 fixed arguments,
5747 // load the remaining arg registers (both int and float) from the corresponding
5748 // shadow stack slots. This is for the reason that we don't know the number and type
5749 // of non-fixed params passed by the caller, therefore we have to assume the worst case
5750 // of caller passing float/double args both in int and float arg regs.
5752 // This doesn't apply to x86, which doesn't pass floating point values in floating
5755 // The caller could have passed gc-ref/byref type var args. Since these are var args
5756 // the callee no way of knowing their gc-ness. Therefore, mark the region that loads
5757 // remaining arg registers from shadow stack slots as non-gc interruptible.
5758 if (fixedIntArgMask != RBM_NONE)
5760 assert(compiler->info.compIsVarArgs);
5761 assert(firstArgVarNum != BAD_VAR_NUM);
5763 regMaskTP remainingIntArgMask = RBM_ARG_REGS & ~fixedIntArgMask;
5764 if (remainingIntArgMask != RBM_NONE)
5766 instruction insCopyIntToFloat = ins_CopyIntToFloat(TYP_LONG, TYP_DOUBLE);
5767 getEmitter()->emitDisableGC();
5768 for (int argNum = 0, argOffset = 0; argNum < MAX_REG_ARG; ++argNum)
5770 regNumber argReg = intArgRegs[argNum];
5771 regMaskTP argRegMask = genRegMask(argReg);
5773 if ((remainingIntArgMask & argRegMask) != 0)
5775 remainingIntArgMask &= ~argRegMask;
5776 getEmitter()->emitIns_R_S(INS_mov, EA_8BYTE, argReg, firstArgVarNum, argOffset);
5778 // also load it in corresponding float arg reg
5779 regNumber floatReg = compiler->getCallArgFloatRegister(argReg);
5780 inst_RV_RV(insCopyIntToFloat, floatReg, argReg);
5783 argOffset += REGSIZE_BYTES;
5785 getEmitter()->emitEnableGC();
5788 #endif // FEATURE_VARARG
5791 // produce code for a GT_LEA subnode
5792 void CodeGen::genLeaInstruction(GenTreeAddrMode* lea)
5794 emitAttr size = emitTypeSize(lea);
5795 genConsumeOperands(lea);
5797 if (lea->Base() && lea->Index())
5799 regNumber baseReg = lea->Base()->gtRegNum;
5800 regNumber indexReg = lea->Index()->gtRegNum;
5801 getEmitter()->emitIns_R_ARX(INS_lea, size, lea->gtRegNum, baseReg, indexReg, lea->gtScale, lea->gtOffset);
5803 else if (lea->Base())
5805 getEmitter()->emitIns_R_AR(INS_lea, size, lea->gtRegNum, lea->Base()->gtRegNum, lea->gtOffset);
5807 else if (lea->Index())
5809 getEmitter()->emitIns_R_ARX(INS_lea, size, lea->gtRegNum, REG_NA, lea->Index()->gtRegNum, lea->gtScale,
5816 //-------------------------------------------------------------------------------------------
5817 // genJumpKindsForTree: Determine the number and kinds of conditional branches
5818 // necessary to implement the given GT_CMP node
5821 // cmpTree - (input) The GenTree node that is used to set the Condition codes
5822 // - The GenTree Relop node that was used to set the Condition codes
5823 // jmpKind[2] - (output) One or two conditional branch instructions
5824 // jmpToTrueLabel[2] - (output) When true we branch to the true case
5825 // When false we create a second label and branch to the false case
5826 // Only GT_EQ for a floating point compares can have a false value.
5829 // Sets the proper values into the array elements of jmpKind[] and jmpToTrueLabel[]
5832 // At least one conditional branch instruction will be returned.
5833 // Typically only one conditional branch is needed
5834 // and the second jmpKind[] value is set to EJ_NONE
5837 // jmpToTrueLabel[i]= true implies branch when the compare operation is true.
5838 // jmpToTrueLabel[i]= false implies branch when the compare operation is false.
5839 //-------------------------------------------------------------------------------------------
5842 void CodeGen::genJumpKindsForTree(GenTreePtr cmpTree, emitJumpKind jmpKind[2], bool jmpToTrueLabel[2])
5844 // Except for BEQ (= ordered GT_EQ) both jumps are to the true label.
5845 jmpToTrueLabel[0] = true;
5846 jmpToTrueLabel[1] = true;
5848 // For integer comparisons just use genJumpKindForOper
5849 if (!varTypeIsFloating(cmpTree->gtOp.gtOp1->gtEffectiveVal()))
5851 CompareKind compareKind = ((cmpTree->gtFlags & GTF_UNSIGNED) != 0) ? CK_UNSIGNED : CK_SIGNED;
5852 jmpKind[0] = genJumpKindForOper(cmpTree->gtOper, compareKind);
5853 jmpKind[1] = EJ_NONE;
5857 assert(cmpTree->OperIsCompare());
5859 // For details on how we arrived at this mapping, see the comment block in genCodeForTreeNode()
5860 // while generating code for compare opererators (e.g. GT_EQ etc).
5861 if ((cmpTree->gtFlags & GTF_RELOP_NAN_UN) != 0)
5863 // Must branch if we have an NaN, unordered
5864 switch (cmpTree->gtOper)
5869 jmpKind[1] = EJ_NONE;
5874 jmpKind[0] = EJ_jbe;
5875 jmpKind[1] = EJ_NONE;
5879 jmpKind[0] = EJ_jpe;
5880 jmpKind[1] = EJ_jne;
5885 jmpKind[1] = EJ_NONE;
5892 else // ((cmpTree->gtFlags & GTF_RELOP_NAN_UN) == 0)
5894 // Do not branch if we have an NaN, unordered
5895 switch (cmpTree->gtOper)
5900 jmpKind[1] = EJ_NONE;
5905 jmpKind[0] = EJ_jae;
5906 jmpKind[1] = EJ_NONE;
5910 jmpKind[0] = EJ_jne;
5911 jmpKind[1] = EJ_NONE;
5915 jmpKind[0] = EJ_jpe;
5917 jmpToTrueLabel[0] = false;
5927 //------------------------------------------------------------------------
5928 // genCompareFloat: Generate code for comparing two floating point values
5931 // treeNode - the compare tree
5936 // SSE2 instruction ucomis[s|d] is performs unordered comparison and
5937 // updates rFLAGS register as follows.
5938 // Result of compare ZF PF CF
5939 // ----------------- ------------
5940 // Unordered 1 1 1 <-- this result implies one of operands of compare is a NAN.
5945 // From the above table the following equalities follow. As per ECMA spec *.UN opcodes perform
5946 // unordered comparison of floating point values. That is *.UN comparisons result in true when
5947 // one of the operands is a NaN whereas ordered comparisons results in false.
5949 // Opcode Amd64 equivalent Comment
5950 // ------ ----------------- --------
5951 // BLT.UN(a,b) ucomis[s|d] a, b Jb branches if CF=1, which means either a<b or unordered from the above
5954 // BLT(a,b) ucomis[s|d] b, a Ja branches if CF=0 and ZF=0, which means b>a that in turn implies a<b
5957 // BGT.UN(a,b) ucomis[s|d] b, a branch if b<a or unordered ==> branch if a>b or unordered
5960 // BGT(a, b) ucomis[s|d] a, b branch if a>b
5963 // BLE.UN(a,b) ucomis[s|d] a, b jbe branches if CF=1 or ZF=1, which implies a<=b or unordered
5966 // BLE(a,b) ucomis[s|d] b, a jae branches if CF=0, which mean b>=a or a<=b
5969 // BGE.UN(a,b) ucomis[s|d] b, a branch if b<=a or unordered ==> branch if a>=b or unordered
5972 // BGE(a,b) ucomis[s|d] a, b branch if a>=b
5975 // BEQ.UN(a,b) ucomis[s|d] a, b branch if a==b or unordered. There is no BEQ.UN opcode in ECMA spec.
5976 // je This case is given for completeness, in case if JIT generates such
5977 // a gentree internally.
5979 // BEQ(a,b) ucomis[s|d] a, b From the above table, PF=0 and ZF=1 corresponds to a==b.
5984 // BNE(a,b) ucomis[s|d] a, b branch if a!=b. There is no BNE opcode in ECMA spec. This case is
5985 // jne given for completeness, in case if JIT generates such a gentree
5988 // BNE.UN(a,b) ucomis[s|d] a, b From the above table, PF=1 or ZF=0 implies unordered or a!=b
5992 // As we can see from the above equalities that the operands of a compare operator need to be
5993 // reveresed in case of BLT/CLT, BGT.UN/CGT.UN, BLE/CLE, BGE.UN/CGE.UN.
5994 void CodeGen::genCompareFloat(GenTreePtr treeNode)
5996 assert(treeNode->OperIsCompare());
5998 GenTreeOp* tree = treeNode->AsOp();
5999 GenTreePtr op1 = tree->gtOp1;
6000 GenTreePtr op2 = tree->gtOp2;
6001 var_types op1Type = op1->TypeGet();
6002 var_types op2Type = op2->TypeGet();
6004 genConsumeOperands(tree);
6006 assert(varTypeIsFloating(op1Type));
6007 assert(op1Type == op2Type);
6009 regNumber targetReg = treeNode->gtRegNum;
6014 if ((tree->gtFlags & GTF_RELOP_NAN_UN) != 0)
6016 // Unordered comparison case
6017 reverseOps = (tree->gtOper == GT_GT || tree->gtOper == GT_GE);
6021 reverseOps = (tree->gtOper == GT_LT || tree->gtOper == GT_LE);
6026 GenTreePtr tmp = op1;
6031 ins = ins_FloatCompare(op1Type);
6032 cmpAttr = emitTypeSize(op1Type);
6034 getEmitter()->emitInsBinary(ins, cmpAttr, op1, op2);
6036 // Are we evaluating this into a register?
6037 if (targetReg != REG_NA)
6039 genSetRegToCond(targetReg, tree);
6040 genProduceReg(tree);
6044 //------------------------------------------------------------------------
6045 // genCompareInt: Generate code for comparing ints or, on amd64, longs.
6048 // treeNode - the compare tree
6052 void CodeGen::genCompareInt(GenTreePtr treeNode)
6054 assert(treeNode->OperIsCompare() || treeNode->OperIs(GT_CMP));
6056 GenTreeOp* tree = treeNode->AsOp();
6057 GenTreePtr op1 = tree->gtOp1;
6058 GenTreePtr op2 = tree->gtOp2;
6059 var_types op1Type = op1->TypeGet();
6060 var_types op2Type = op2->TypeGet();
6061 regNumber targetReg = tree->gtRegNum;
6063 // Case of op1 == 0 or op1 != 0:
6064 // Optimize generation of 'test' instruction if op1 sets flags.
6066 // Note that if LSRA has inserted any GT_RELOAD/GT_COPY before
6067 // op1, it will not modify the flags set by codegen of op1.
6068 // Similarly op1 could also be reg-optional at its use and
6069 // it was spilled after producing its result in a register.
6070 // Spill code too will not modify the flags set by op1.
6071 GenTree* realOp1 = op1->gtSkipReloadOrCopy();
6072 if (realOp1->gtSetFlags())
6074 // op1 must set ZF and SF flags
6075 assert(realOp1->gtSetZSFlags());
6077 // Must be (in)equality against zero.
6078 assert(tree->OperIs(GT_EQ, GT_NE));
6079 assert(op2->IsIntegralConst(0));
6080 assert(op2->isContained());
6082 // Just consume the operands
6083 genConsumeOperands(tree);
6085 // No need to generate test instruction since
6088 // Are we evaluating this into a register?
6089 if (targetReg != REG_NA)
6091 genSetRegToCond(targetReg, tree);
6092 genProduceReg(tree);
6099 // If we have GT_JTRUE(GT_EQ/NE(GT_SIMD((in)Equality, v1, v2), true/false)),
6100 // then we don't need to generate code for GT_EQ/GT_NE, since SIMD (in)Equality intrinsic
6101 // would set or clear Zero flag.
6102 if ((targetReg == REG_NA) && tree->OperIs(GT_EQ, GT_NE))
6104 // Is it a SIMD (in)Equality that doesn't need to materialize result into a register?
6105 if ((op1->gtRegNum == REG_NA) && op1->IsSIMDEqualityOrInequality())
6107 // Must be comparing against true or false.
6108 assert(op2->IsIntegralConst(0) || op2->IsIntegralConst(1));
6109 assert(op2->isContainedIntOrIImmed());
6111 // In this case SIMD (in)Equality will set or clear
6112 // Zero flag, based on which GT_JTRUE would generate
6113 // the right conditional jump.
6117 #endif // FEATURE_SIMD
6119 genConsumeOperands(tree);
6121 // TODO-CQ: We should be able to support swapping op1 and op2 to generate cmp reg, imm.
6122 // https://github.com/dotnet/coreclr/issues/7270
6123 assert(!op1->isContainedIntOrIImmed()); // We no longer support
6124 assert(!varTypeIsFloating(op2Type));
6127 var_types type = TYP_UNKNOWN;
6129 if (tree->OperIs(GT_TEST_EQ, GT_TEST_NE))
6133 // Unlike many xarch instructions TEST doesn't have a form with a 16/32/64 bit first operand and
6134 // an 8 bit immediate second operand. But if the immediate value fits in 8 bits then we can simply
6135 // emit a 8 bit TEST instruction, unless we're targeting x86 and the first operand is a non-byteable
6137 // Note that lowering does something similar but its main purpose is to allow memory operands to be
6138 // contained so it doesn't handle other kind of operands. It could do more but on x86 that results
6139 // in additional register constrains and that may be worse than wasting 3 bytes on an immediate.
6142 (!op1->isUsedFromReg() || isByteReg(op1->gtRegNum)) &&
6144 (op2->IsCnsIntOrI() && genTypeCanRepresentValue(TYP_UBYTE, op2->AsIntCon()->IconValue())))
6149 else if (op1->isUsedFromReg() && op2->IsIntegralConst(0))
6151 // We're comparing a register to 0 so we can generate "test reg1, reg1"
6152 // instead of the longer "cmp reg1, 0"
6161 if (type == TYP_UNKNOWN)
6163 if (op1Type == op2Type)
6167 else if (genTypeSize(op1Type) == genTypeSize(op2Type))
6169 // If the types are different but have the same size then we'll use TYP_INT or TYP_LONG.
6170 // This primarily deals with small type mixes (e.g. byte/ubyte) that need to be widened
6171 // and compared as int. We should not get long type mixes here but handle that as well
6173 type = genTypeSize(op1Type) == 8 ? TYP_LONG : TYP_INT;
6177 // In the types are different simply use TYP_INT. This deals with small type/int type
6178 // mixes (e.g. byte/short ubyte/int) that need to be widened and compared as int.
6179 // Lowering is expected to handle any mixes that involve long types (e.g. int/long).
6183 // The common type cannot be smaller than any of the operand types, we're probably mixing int/long
6184 assert(genTypeSize(type) >= max(genTypeSize(op1Type), genTypeSize(op2Type)));
6185 // Small unsigned int types (TYP_BOOL can use anything) should use unsigned comparisons
6186 assert(!(varTypeIsSmallInt(type) && varTypeIsUnsigned(type)) || ((tree->gtFlags & GTF_UNSIGNED) != 0));
6187 // If op1 is smaller then it cannot be in memory, we're probably missing a cast
6188 assert((genTypeSize(op1Type) >= genTypeSize(type)) || !op1->isUsedFromMemory());
6189 // If op2 is smaller then it cannot be in memory, we're probably missing a cast
6190 assert((genTypeSize(op2Type) >= genTypeSize(type)) || !op2->isUsedFromMemory());
6191 // If op2 is a constant then it should fit in the common type
6192 assert(!op2->IsCnsIntOrI() || genTypeCanRepresentValue(type, op2->AsIntCon()->IconValue()));
6195 // The type cannot be larger than the machine word size
6196 assert(genTypeSize(type) <= genTypeSize(TYP_I_IMPL));
6197 // TYP_UINT and TYP_ULONG should not appear here, only small types can be unsigned
6198 assert(!varTypeIsUnsigned(type) || varTypeIsSmall(type));
6200 getEmitter()->emitInsBinary(ins, emitTypeSize(type), op1, op2);
6202 // Are we evaluating this into a register?
6203 if (targetReg != REG_NA)
6205 genSetRegToCond(targetReg, tree);
6206 genProduceReg(tree);
6210 //-------------------------------------------------------------------------------------------
6211 // genSetRegToCond: Set a register 'dstReg' to the appropriate one or zero value
6212 // corresponding to a binary Relational operator result.
6215 // dstReg - The target register to set to 1 or 0
6216 // tree - The GenTree Relop node that was used to set the Condition codes
6218 // Return Value: none
6221 // A full 64-bit value of either 1 or 0 is setup in the 'dstReg'
6222 //-------------------------------------------------------------------------------------------
6224 void CodeGen::genSetRegToCond(regNumber dstReg, GenTreePtr tree)
6226 noway_assert((genRegMask(dstReg) & RBM_BYTE_REGS) != 0);
6228 emitJumpKind jumpKind[2];
6229 bool branchToTrueLabel[2];
6230 genJumpKindsForTree(tree, jumpKind, branchToTrueLabel);
6232 if (jumpKind[1] == EJ_NONE)
6234 // Set (lower byte of) reg according to the flags
6235 inst_SET(jumpKind[0], dstReg);
6240 // jmpKind[1] != EJ_NONE implies BEQ and BEN.UN of floating point values.
6241 // These are represented by two conditions.
6242 if (tree->gtOper == GT_EQ)
6244 // This must be an ordered comparison.
6245 assert((tree->gtFlags & GTF_RELOP_NAN_UN) == 0);
6249 // This must be BNE.UN
6250 assert((tree->gtOper == GT_NE) && ((tree->gtFlags & GTF_RELOP_NAN_UN) != 0));
6254 // Here is the sample code generated in each case:
6255 // BEQ == cmp, jpe <false label>, je <true label>
6256 // That is, to materialize comparison reg needs to be set if PF=0 and ZF=1
6257 // setnp reg // if (PF==0) reg = 1 else reg = 0
6258 // jpe L1 // Jmp if PF==1
6262 // BNE.UN == cmp, jpe <true label>, jne <true label>
6263 // That is, to materialize the comparison reg needs to be set if either PF=1 or ZF=0;
6269 // reverse the jmpkind condition before setting dstReg if it is to false label.
6270 inst_SET(branchToTrueLabel[0] ? jumpKind[0] : emitter::emitReverseJumpKind(jumpKind[0]), dstReg);
6272 BasicBlock* label = genCreateTempLabel();
6273 inst_JMP(jumpKind[0], label);
6275 // second branch is always to true label
6276 assert(branchToTrueLabel[1]);
6277 inst_SET(jumpKind[1], dstReg);
6278 genDefineTempLabel(label);
6281 var_types treeType = tree->TypeGet();
6282 if (treeType == TYP_INT || treeType == TYP_LONG)
6284 // Set the higher bytes to 0
6285 inst_RV_RV(ins_Move_Extend(TYP_UBYTE, true), dstReg, dstReg, TYP_UBYTE, emitTypeSize(TYP_UBYTE));
6289 noway_assert(treeType == TYP_BYTE);
6293 #if !defined(_TARGET_64BIT_)
6294 //------------------------------------------------------------------------
6295 // genLongToIntCast: Generate code for long to int casts on x86.
6298 // cast - The GT_CAST node
6304 // The cast node and its sources (via GT_LONG) must have been assigned registers.
6305 // The destination cannot be a floating point type or a small integer type.
6307 void CodeGen::genLongToIntCast(GenTree* cast)
6309 assert(cast->OperGet() == GT_CAST);
6311 GenTree* src = cast->gtGetOp1();
6312 noway_assert(src->OperGet() == GT_LONG);
6314 genConsumeRegs(src);
6316 var_types srcType = ((cast->gtFlags & GTF_UNSIGNED) != 0) ? TYP_ULONG : TYP_LONG;
6317 var_types dstType = cast->CastToType();
6318 regNumber loSrcReg = src->gtGetOp1()->gtRegNum;
6319 regNumber hiSrcReg = src->gtGetOp2()->gtRegNum;
6320 regNumber dstReg = cast->gtRegNum;
6322 assert((dstType == TYP_INT) || (dstType == TYP_UINT));
6323 assert(genIsValidIntReg(loSrcReg));
6324 assert(genIsValidIntReg(hiSrcReg));
6325 assert(genIsValidIntReg(dstReg));
6327 if (cast->gtOverflow())
6330 // Generate an overflow check for [u]long to [u]int casts:
6332 // long -> int - check if the upper 33 bits are all 0 or all 1
6334 // ulong -> int - check if the upper 33 bits are all 0
6336 // long -> uint - check if the upper 32 bits are all 0
6337 // ulong -> uint - check if the upper 32 bits are all 0
6340 if ((srcType == TYP_LONG) && (dstType == TYP_INT))
6342 BasicBlock* allOne = genCreateTempLabel();
6343 BasicBlock* success = genCreateTempLabel();
6345 inst_RV_RV(INS_test, loSrcReg, loSrcReg, TYP_INT, EA_4BYTE);
6346 inst_JMP(EJ_js, allOne);
6348 inst_RV_RV(INS_test, hiSrcReg, hiSrcReg, TYP_INT, EA_4BYTE);
6349 genJumpToThrowHlpBlk(EJ_jne, SCK_OVERFLOW);
6350 inst_JMP(EJ_jmp, success);
6352 genDefineTempLabel(allOne);
6353 inst_RV_IV(INS_cmp, hiSrcReg, -1, EA_4BYTE);
6354 genJumpToThrowHlpBlk(EJ_jne, SCK_OVERFLOW);
6356 genDefineTempLabel(success);
6360 if ((srcType == TYP_ULONG) && (dstType == TYP_INT))
6362 inst_RV_RV(INS_test, loSrcReg, loSrcReg, TYP_INT, EA_4BYTE);
6363 genJumpToThrowHlpBlk(EJ_js, SCK_OVERFLOW);
6366 inst_RV_RV(INS_test, hiSrcReg, hiSrcReg, TYP_INT, EA_4BYTE);
6367 genJumpToThrowHlpBlk(EJ_jne, SCK_OVERFLOW);
6371 if (dstReg != loSrcReg)
6373 inst_RV_RV(INS_mov, dstReg, loSrcReg, TYP_INT, EA_4BYTE);
6376 genProduceReg(cast);
6380 //------------------------------------------------------------------------
6381 // genIntToIntCast: Generate code for an integer cast
6382 // This method handles integer overflow checking casts
6383 // as well as ordinary integer casts.
6386 // treeNode - The GT_CAST node
6392 // The treeNode is not a contained node and must have an assigned register.
6393 // For a signed convert from byte, the source must be in a byte-addressable register.
6394 // Neither the source nor target type can be a floating point type.
6396 // TODO-XArch-CQ: Allow castOp to be a contained node without an assigned register.
6397 // TODO: refactor to use getCastDescription
6399 void CodeGen::genIntToIntCast(GenTreePtr treeNode)
6401 assert(treeNode->OperGet() == GT_CAST);
6403 GenTreePtr castOp = treeNode->gtCast.CastOp();
6404 var_types srcType = genActualType(castOp->TypeGet());
6405 noway_assert(genTypeSize(srcType) >= 4);
6408 if (varTypeIsLong(srcType))
6410 genLongToIntCast(treeNode);
6413 #endif // _TARGET_X86_
6415 regNumber targetReg = treeNode->gtRegNum;
6416 regNumber sourceReg = castOp->gtRegNum;
6417 var_types dstType = treeNode->CastToType();
6418 bool isUnsignedDst = varTypeIsUnsigned(dstType);
6419 bool isUnsignedSrc = varTypeIsUnsigned(srcType);
6421 // if necessary, force the srcType to unsigned when the GT_UNSIGNED flag is set
6422 if (!isUnsignedSrc && (treeNode->gtFlags & GTF_UNSIGNED) != 0)
6424 srcType = genUnsignedType(srcType);
6425 isUnsignedSrc = true;
6428 bool requiresOverflowCheck = false;
6430 assert(genIsValidIntReg(targetReg));
6431 assert(genIsValidIntReg(sourceReg));
6433 instruction ins = INS_invalid;
6434 emitAttr srcSize = EA_ATTR(genTypeSize(srcType));
6435 emitAttr dstSize = EA_ATTR(genTypeSize(dstType));
6437 if (srcSize < dstSize)
6440 // Is this an Overflow checking cast?
6441 // We only need to handle one case, as the other casts can never overflow.
6442 // cast from TYP_INT to TYP_ULONG
6444 if (treeNode->gtOverflow() && (srcType == TYP_INT) && (dstType == TYP_ULONG))
6446 requiresOverflowCheck = true;
6451 noway_assert(srcSize < EA_PTRSIZE);
6453 ins = ins_Move_Extend(srcType, castOp->InReg());
6456 Special case: ins_Move_Extend assumes the destination type is no bigger
6457 than TYP_INT. movsx and movzx can already extend all the way to
6458 64-bit, and a regular 32-bit mov clears the high 32 bits (like the non-existant movzxd),
6459 but for a sign extension from TYP_INT to TYP_LONG, we need to use movsxd opcode.
6461 if (!isUnsignedSrc && !isUnsignedDst)
6464 NYI_X86("Cast to 64 bit for x86/RyuJIT");
6465 #else // !_TARGET_X86_
6467 #endif // !_TARGET_X86_
6473 // Narrowing cast, or sign-changing cast
6474 noway_assert(srcSize >= dstSize);
6476 // Is this an Overflow checking cast?
6477 if (treeNode->gtOverflow())
6479 requiresOverflowCheck = true;
6484 ins = ins_Move_Extend(dstType, castOp->InReg());
6488 noway_assert(ins != INS_invalid);
6490 genConsumeReg(castOp);
6492 if (requiresOverflowCheck)
6494 ssize_t typeMin = 0;
6495 ssize_t typeMax = 0;
6496 ssize_t typeMask = 0;
6497 bool needScratchReg = false;
6498 bool signCheckOnly = false;
6500 /* Do we need to compare the value, or just check masks */
6505 typeMask = ssize_t((int)0xFFFFFF80);
6506 typeMin = SCHAR_MIN;
6507 typeMax = SCHAR_MAX;
6511 typeMask = ssize_t((int)0xFFFFFF00L);
6515 typeMask = ssize_t((int)0xFFFF8000);
6521 typeMask = ssize_t((int)0xFFFF0000L);
6525 if (srcType == TYP_UINT)
6527 signCheckOnly = true;
6531 typeMask = ssize_t((int)0x80000000);
6538 if (srcType == TYP_INT)
6540 signCheckOnly = true;
6544 needScratchReg = true;
6549 noway_assert(srcType == TYP_ULONG);
6550 signCheckOnly = true;
6554 noway_assert((srcType == TYP_LONG) || (srcType == TYP_INT));
6555 signCheckOnly = true;
6559 NO_WAY("Unknown type");
6565 // We only need to check for a negative value in sourceReg
6566 inst_RV_IV(INS_cmp, sourceReg, 0, srcSize);
6567 genJumpToThrowHlpBlk(EJ_jl, SCK_OVERFLOW);
6571 // When we are converting from unsigned or to unsigned, we
6572 // will only have to check for any bits set using 'typeMask'
6573 if (isUnsignedSrc || isUnsignedDst)
6577 regNumber tmpReg = treeNode->GetSingleTempReg();
6578 inst_RV_RV(INS_mov, tmpReg, sourceReg, TYP_LONG); // Move the 64-bit value to a writeable temp reg
6579 inst_RV_SH(INS_SHIFT_RIGHT_LOGICAL, srcSize, tmpReg, 32); // Shift right by 32 bits
6580 genJumpToThrowHlpBlk(EJ_jne, SCK_OVERFLOW); // Throw if result shift is non-zero
6584 noway_assert(typeMask != 0);
6585 inst_RV_IV(INS_TEST, sourceReg, typeMask, srcSize);
6586 genJumpToThrowHlpBlk(EJ_jne, SCK_OVERFLOW);
6591 // For a narrowing signed cast
6593 // We must check the value is in a signed range.
6595 // Compare with the MAX
6597 noway_assert((typeMin != 0) && (typeMax != 0));
6599 inst_RV_IV(INS_cmp, sourceReg, typeMax, srcSize);
6600 genJumpToThrowHlpBlk(EJ_jg, SCK_OVERFLOW);
6602 // Compare with the MIN
6604 inst_RV_IV(INS_cmp, sourceReg, typeMin, srcSize);
6605 genJumpToThrowHlpBlk(EJ_jl, SCK_OVERFLOW);
6609 if (targetReg != sourceReg
6610 #ifdef _TARGET_AMD64_
6611 // On amd64, we can hit this path for a same-register
6612 // 4-byte to 8-byte widening conversion, and need to
6613 // emit the instruction to set the high bits correctly.
6614 || (dstSize == EA_8BYTE && srcSize == EA_4BYTE)
6615 #endif // _TARGET_AMD64_
6617 inst_RV_RV(ins, targetReg, sourceReg, srcType, srcSize);
6619 else // non-overflow checking cast
6621 // We may have code transformations that result in casts where srcType is the same as dstType.
6622 // e.g. Bug 824281, in which a comma is split by the rationalizer, leaving an assignment of a
6623 // long constant to a long lclVar.
6624 if (srcType == dstType)
6628 /* Is the value sitting in a non-byte-addressable register? */
6629 else if (castOp->InReg() && (dstSize == EA_1BYTE) && !isByteReg(sourceReg))
6633 // for unsigned values we can AND, so it need not be a byte register
6638 // Move the value into a byte register
6639 noway_assert(!"Signed byte convert from non-byte-addressable register");
6642 /* Generate "mov targetReg, castOp->gtReg */
6643 if (targetReg != sourceReg)
6645 inst_RV_RV(INS_mov, targetReg, sourceReg, srcType, srcSize);
6651 noway_assert(isUnsignedDst);
6653 /* Generate "and reg, MASK */
6654 unsigned fillPattern;
6655 if (dstSize == EA_1BYTE)
6659 else if (dstSize == EA_2BYTE)
6661 fillPattern = 0xffff;
6665 fillPattern = 0xffffffff;
6668 inst_RV_IV(INS_AND, targetReg, fillPattern, EA_4BYTE);
6670 #ifdef _TARGET_AMD64_
6671 else if (ins == INS_movsxd)
6673 inst_RV_RV(ins, targetReg, sourceReg, srcType, srcSize);
6675 #endif // _TARGET_AMD64_
6676 else if (ins == INS_mov)
6678 if (targetReg != sourceReg
6679 #ifdef _TARGET_AMD64_
6680 // On amd64, 'mov' is the opcode used to zero-extend from
6681 // 4 bytes to 8 bytes.
6682 || (dstSize == EA_8BYTE && srcSize == EA_4BYTE)
6683 #endif // _TARGET_AMD64_
6686 inst_RV_RV(ins, targetReg, sourceReg, srcType, srcSize);
6691 noway_assert(ins == INS_movsx || ins == INS_movzx);
6692 noway_assert(srcSize >= dstSize);
6694 /* Generate "mov targetReg, castOp->gtReg */
6695 inst_RV_RV(ins, targetReg, sourceReg, srcType, dstSize);
6699 genProduceReg(treeNode);
6702 //------------------------------------------------------------------------
6703 // genFloatToFloatCast: Generate code for a cast between float and double
6706 // treeNode - The GT_CAST node
6712 // Cast is a non-overflow conversion.
6713 // The treeNode must have an assigned register.
6714 // The cast is between float and double or vice versa.
6716 void CodeGen::genFloatToFloatCast(GenTreePtr treeNode)
6718 // float <--> double conversions are always non-overflow ones
6719 assert(treeNode->OperGet() == GT_CAST);
6720 assert(!treeNode->gtOverflow());
6722 regNumber targetReg = treeNode->gtRegNum;
6723 assert(genIsValidFloatReg(targetReg));
6725 GenTreePtr op1 = treeNode->gtOp.gtOp1;
6727 // If not contained, must be a valid float reg.
6728 if (op1->isUsedFromReg())
6730 assert(genIsValidFloatReg(op1->gtRegNum));
6734 var_types dstType = treeNode->CastToType();
6735 var_types srcType = op1->TypeGet();
6736 assert(varTypeIsFloating(srcType) && varTypeIsFloating(dstType));
6738 genConsumeOperands(treeNode->AsOp());
6739 if (srcType == dstType && (op1->isUsedFromReg() && (targetReg == op1->gtRegNum)))
6741 // source and destinations types are the same and also reside in the same register.
6742 // we just need to consume and produce the reg in this case.
6747 instruction ins = ins_FloatConv(dstType, srcType);
6748 getEmitter()->emitInsBinary(ins, emitTypeSize(dstType), treeNode, op1);
6751 genProduceReg(treeNode);
6754 //------------------------------------------------------------------------
6755 // genIntToFloatCast: Generate code to cast an int/long to float/double
6758 // treeNode - The GT_CAST node
6764 // Cast is a non-overflow conversion.
6765 // The treeNode must have an assigned register.
6766 // SrcType= int32/uint32/int64/uint64 and DstType=float/double.
6768 void CodeGen::genIntToFloatCast(GenTreePtr treeNode)
6770 // int type --> float/double conversions are always non-overflow ones
6771 assert(treeNode->OperGet() == GT_CAST);
6772 assert(!treeNode->gtOverflow());
6774 regNumber targetReg = treeNode->gtRegNum;
6775 assert(genIsValidFloatReg(targetReg));
6777 GenTreePtr op1 = treeNode->gtOp.gtOp1;
6779 if (op1->isUsedFromReg())
6781 assert(genIsValidIntReg(op1->gtRegNum));
6785 var_types dstType = treeNode->CastToType();
6786 var_types srcType = op1->TypeGet();
6787 assert(!varTypeIsFloating(srcType) && varTypeIsFloating(dstType));
6789 #if !defined(_TARGET_64BIT_)
6790 // We expect morph to replace long to float/double casts with helper calls
6791 noway_assert(!varTypeIsLong(srcType));
6792 #endif // !defined(_TARGET_64BIT_)
6794 // Since xarch emitter doesn't handle reporting gc-info correctly while casting away gc-ness we
6795 // ensure srcType of a cast is non gc-type. Codegen should never see BYREF as source type except
6796 // for GT_LCL_VAR_ADDR and GT_LCL_FLD_ADDR that represent stack addresses and can be considered
6797 // as TYP_I_IMPL. In all other cases where src operand is a gc-type and not known to be on stack,
6798 // Front-end (see fgMorphCast()) ensures this by assigning gc-type local to a non gc-type
6799 // temp and using temp as operand of cast operation.
6800 if (srcType == TYP_BYREF)
6802 noway_assert(op1->OperGet() == GT_LCL_VAR_ADDR || op1->OperGet() == GT_LCL_FLD_ADDR);
6803 srcType = TYP_I_IMPL;
6806 // force the srcType to unsigned if GT_UNSIGNED flag is set
6807 if (treeNode->gtFlags & GTF_UNSIGNED)
6809 srcType = genUnsignedType(srcType);
6812 noway_assert(!varTypeIsGC(srcType));
6814 // We should never be seeing srcType whose size is not sizeof(int) nor sizeof(long).
6815 // For conversions from byte/sbyte/int16/uint16 to float/double, we would expect
6816 // either the front-end or lowering phase to have generated two levels of cast.
6817 // The first one is for widening smaller int type to int32 and the second one is
6818 // to the float/double.
6819 emitAttr srcSize = EA_ATTR(genTypeSize(srcType));
6820 noway_assert((srcSize == EA_ATTR(genTypeSize(TYP_INT))) || (srcSize == EA_ATTR(genTypeSize(TYP_LONG))));
6822 // Also we don't expect to see uint32 -> float/double and uint64 -> float conversions
6823 // here since they should have been lowered apropriately.
6824 noway_assert(srcType != TYP_UINT);
6825 noway_assert((srcType != TYP_ULONG) || (dstType != TYP_FLOAT));
6827 // To convert int to a float/double, cvtsi2ss/sd SSE2 instruction is used
6828 // which does a partial write to lower 4/8 bytes of xmm register keeping the other
6829 // upper bytes unmodified. If "cvtsi2ss/sd xmmReg, r32/r64" occurs inside a loop,
6830 // the partial write could introduce a false dependency and could cause a stall
6831 // if there are further uses of xmmReg. We have such a case occuring with a
6832 // customer reported version of SpectralNorm benchmark, resulting in 2x perf
6833 // regression. To avoid false dependency, we emit "xorps xmmReg, xmmReg" before
6834 // cvtsi2ss/sd instruction.
6836 genConsumeOperands(treeNode->AsOp());
6837 getEmitter()->emitIns_R_R(INS_xorps, EA_4BYTE, treeNode->gtRegNum, treeNode->gtRegNum);
6839 // Note that here we need to specify srcType that will determine
6840 // the size of source reg/mem operand and rex.w prefix.
6841 instruction ins = ins_FloatConv(dstType, TYP_INT);
6842 getEmitter()->emitInsBinary(ins, emitTypeSize(srcType), treeNode, op1);
6844 // Handle the case of srcType = TYP_ULONG. SSE2 conversion instruction
6845 // will interpret ULONG value as LONG. Hence we need to adjust the
6846 // result if sign-bit of srcType is set.
6847 if (srcType == TYP_ULONG)
6849 // The instruction sequence below is less accurate than what clang
6850 // and gcc generate. However, we keep the current sequence for backward compatiblity.
6851 // If we change the instructions below, FloatingPointUtils::convertUInt64ToDobule
6852 // should be also updated for consistent conversion result.
6853 assert(dstType == TYP_DOUBLE);
6854 assert(op1->isUsedFromReg());
6856 // Set the flags without modifying op1.
6857 // test op1Reg, op1Reg
6858 inst_RV_RV(INS_test, op1->gtRegNum, op1->gtRegNum, srcType);
6860 // No need to adjust result if op1 >= 0 i.e. positive
6862 BasicBlock* label = genCreateTempLabel();
6863 inst_JMP(EJ_jge, label);
6865 // Adjust the result
6866 // result = result + 0x43f00000 00000000
6867 // addsd resultReg, 0x43f00000 00000000
6868 GenTreePtr* cns = &u8ToDblBitmask;
6869 if (*cns == nullptr)
6872 static_assert_no_msg(sizeof(double) == sizeof(__int64));
6873 *((__int64*)&d) = 0x43f0000000000000LL;
6875 *cns = genMakeConst(&d, dstType, treeNode, true);
6877 inst_RV_TT(INS_addsd, treeNode->gtRegNum, *cns);
6879 genDefineTempLabel(label);
6882 genProduceReg(treeNode);
6885 //------------------------------------------------------------------------
6886 // genFloatToIntCast: Generate code to cast float/double to int/long
6889 // treeNode - The GT_CAST node
6895 // Cast is a non-overflow conversion.
6896 // The treeNode must have an assigned register.
6897 // SrcType=float/double and DstType= int32/uint32/int64/uint64
6899 // TODO-XArch-CQ: (Low-pri) - generate in-line code when DstType = uint64
6901 void CodeGen::genFloatToIntCast(GenTreePtr treeNode)
6903 // we don't expect to see overflow detecting float/double --> int type conversions here
6904 // as they should have been converted into helper calls by front-end.
6905 assert(treeNode->OperGet() == GT_CAST);
6906 assert(!treeNode->gtOverflow());
6908 regNumber targetReg = treeNode->gtRegNum;
6909 assert(genIsValidIntReg(targetReg));
6911 GenTreePtr op1 = treeNode->gtOp.gtOp1;
6913 if (op1->isUsedFromReg())
6915 assert(genIsValidFloatReg(op1->gtRegNum));
6919 var_types dstType = treeNode->CastToType();
6920 var_types srcType = op1->TypeGet();
6921 assert(varTypeIsFloating(srcType) && !varTypeIsFloating(dstType));
6923 // We should never be seeing dstType whose size is neither sizeof(TYP_INT) nor sizeof(TYP_LONG).
6924 // For conversions to byte/sbyte/int16/uint16 from float/double, we would expect the
6925 // front-end or lowering phase to have generated two levels of cast. The first one is
6926 // for float or double to int32/uint32 and the second one for narrowing int32/uint32 to
6927 // the required smaller int type.
6928 emitAttr dstSize = EA_ATTR(genTypeSize(dstType));
6929 noway_assert((dstSize == EA_ATTR(genTypeSize(TYP_INT))) || (dstSize == EA_ATTR(genTypeSize(TYP_LONG))));
6931 // We shouldn't be seeing uint64 here as it should have been converted
6932 // into a helper call by either front-end or lowering phase.
6933 noway_assert(!varTypeIsUnsigned(dstType) || (dstSize != EA_ATTR(genTypeSize(TYP_LONG))));
6935 // If the dstType is TYP_UINT, we have 32-bits to encode the
6936 // float number. Any of 33rd or above bits can be the sign bit.
6937 // To acheive it we pretend as if we are converting it to a long.
6938 if (varTypeIsUnsigned(dstType) && (dstSize == EA_ATTR(genTypeSize(TYP_INT))))
6943 // Note that we need to specify dstType here so that it will determine
6944 // the size of destination integer register and also the rex.w prefix.
6945 genConsumeOperands(treeNode->AsOp());
6946 instruction ins = ins_FloatConv(TYP_INT, srcType);
6947 getEmitter()->emitInsBinary(ins, emitTypeSize(dstType), treeNode, op1);
6948 genProduceReg(treeNode);
6951 //------------------------------------------------------------------------
6952 // genCkfinite: Generate code for ckfinite opcode.
6955 // treeNode - The GT_CKFINITE node
6961 // GT_CKFINITE node has reserved an internal register.
6963 // TODO-XArch-CQ - mark the operand as contained if known to be in
6964 // memory (e.g. field or an array element).
6966 void CodeGen::genCkfinite(GenTreePtr treeNode)
6968 assert(treeNode->OperGet() == GT_CKFINITE);
6970 GenTreePtr op1 = treeNode->gtOp.gtOp1;
6971 var_types targetType = treeNode->TypeGet();
6972 int expMask = (targetType == TYP_FLOAT) ? 0x7F800000 : 0x7FF00000; // Bit mask to extract exponent.
6973 regNumber targetReg = treeNode->gtRegNum;
6975 // Extract exponent into a register.
6976 regNumber tmpReg = treeNode->GetSingleTempReg();
6980 #ifdef _TARGET_64BIT_
6982 // Copy the floating-point value to an integer register. If we copied a float to a long, then
6983 // right-shift the value so the high 32 bits of the floating-point value sit in the low 32
6984 // bits of the integer register.
6985 instruction ins = ins_CopyFloatToInt(targetType, (targetType == TYP_FLOAT) ? TYP_INT : TYP_LONG);
6986 inst_RV_RV(ins, op1->gtRegNum, tmpReg, targetType);
6987 if (targetType == TYP_DOUBLE)
6989 // right shift by 32 bits to get to exponent.
6990 inst_RV_SH(INS_shr, EA_8BYTE, tmpReg, 32);
6993 // Mask exponent with all 1's and check if the exponent is all 1's
6994 inst_RV_IV(INS_and, tmpReg, expMask, EA_4BYTE);
6995 inst_RV_IV(INS_cmp, tmpReg, expMask, EA_4BYTE);
6997 // If exponent is all 1's, throw ArithmeticException
6998 genJumpToThrowHlpBlk(EJ_je, SCK_ARITH_EXCPN);
7000 // if it is a finite value copy it to targetReg
7001 if (targetReg != op1->gtRegNum)
7003 inst_RV_RV(ins_Copy(targetType), targetReg, op1->gtRegNum, targetType);
7006 #else // !_TARGET_64BIT_
7008 // If the target type is TYP_DOUBLE, we want to extract the high 32 bits into the register.
7009 // There is no easy way to do this. To not require an extra register, we'll use shuffles
7010 // to move the high 32 bits into the low 32 bits, then then shuffle it back, since we
7011 // need to produce the value into the target register.
7013 // For TYP_DOUBLE, we'll generate (for targetReg != op1->gtRegNum):
7014 // movaps targetReg, op1->gtRegNum
7015 // shufps targetReg, targetReg, 0xB1 // WZYX => ZWXY
7016 // mov_xmm2i tmpReg, targetReg // tmpReg <= Y
7017 // and tmpReg, <mask>
7018 // cmp tmpReg, <mask>
7020 // movaps targetReg, op1->gtRegNum // copy the value again, instead of un-shuffling it
7022 // For TYP_DOUBLE with (targetReg == op1->gtRegNum):
7023 // shufps targetReg, targetReg, 0xB1 // WZYX => ZWXY
7024 // mov_xmm2i tmpReg, targetReg // tmpReg <= Y
7025 // and tmpReg, <mask>
7026 // cmp tmpReg, <mask>
7028 // shufps targetReg, targetReg, 0xB1 // ZWXY => WZYX
7030 // For TYP_FLOAT, it's the same as _TARGET_64BIT_:
7031 // mov_xmm2i tmpReg, targetReg // tmpReg <= low 32 bits
7032 // and tmpReg, <mask>
7033 // cmp tmpReg, <mask>
7035 // movaps targetReg, op1->gtRegNum // only if targetReg != op1->gtRegNum
7037 regNumber copyToTmpSrcReg; // The register we'll copy to the integer temp.
7039 if (targetType == TYP_DOUBLE)
7041 if (targetReg != op1->gtRegNum)
7043 inst_RV_RV(ins_Copy(targetType), targetReg, op1->gtRegNum, targetType);
7045 inst_RV_RV_IV(INS_shufps, EA_16BYTE, targetReg, targetReg, 0xb1);
7046 copyToTmpSrcReg = targetReg;
7050 copyToTmpSrcReg = op1->gtRegNum;
7053 // Copy only the low 32 bits. This will be the high order 32 bits of the floating-point
7054 // value, no matter the floating-point type.
7055 inst_RV_RV(ins_CopyFloatToInt(TYP_FLOAT, TYP_INT), copyToTmpSrcReg, tmpReg, TYP_FLOAT);
7057 // Mask exponent with all 1's and check if the exponent is all 1's
7058 inst_RV_IV(INS_and, tmpReg, expMask, EA_4BYTE);
7059 inst_RV_IV(INS_cmp, tmpReg, expMask, EA_4BYTE);
7061 // If exponent is all 1's, throw ArithmeticException
7062 genJumpToThrowHlpBlk(EJ_je, SCK_ARITH_EXCPN);
7064 if (targetReg != op1->gtRegNum)
7066 // In both the TYP_FLOAT and TYP_DOUBLE case, the op1 register is untouched,
7067 // so copy it to the targetReg. This is faster and smaller for TYP_DOUBLE
7068 // than re-shuffling the targetReg.
7069 inst_RV_RV(ins_Copy(targetType), targetReg, op1->gtRegNum, targetType);
7071 else if (targetType == TYP_DOUBLE)
7073 // We need to re-shuffle the targetReg to get the correct result.
7074 inst_RV_RV_IV(INS_shufps, EA_16BYTE, targetReg, targetReg, 0xb1);
7077 #endif // !_TARGET_64BIT_
7079 genProduceReg(treeNode);
7082 #ifdef _TARGET_AMD64_
7083 int CodeGenInterface::genSPtoFPdelta()
7087 #ifdef UNIX_AMD64_ABI
7089 // We require frame chaining on Unix to support native tool unwinding (such as
7090 // unwinding by the native debugger). We have a CLR-only extension to the
7091 // unwind codes (UWOP_SET_FPREG_LARGE) to support SP->FP offsets larger than 240.
7092 // If Unix ever supports EnC, the RSP == RBP assumption will have to be reevaluated.
7093 delta = genTotalFrameSize();
7095 #else // !UNIX_AMD64_ABI
7097 // As per Amd64 ABI, RBP offset from initial RSP can be between 0 and 240 if
7098 // RBP needs to be reported in unwind codes. This case would arise for methods
7100 if (compiler->compLocallocUsed)
7102 // We cannot base delta computation on compLclFrameSize since it changes from
7103 // tentative to final frame layout and hence there is a possibility of
7104 // under-estimating offset of vars from FP, which in turn results in under-
7105 // estimating instruction size.
7107 // To be predictive and so as never to under-estimate offset of vars from FP
7108 // we will always position FP at min(240, outgoing arg area size).
7109 delta = Min(240, (int)compiler->lvaOutgoingArgSpaceSize);
7111 else if (compiler->opts.compDbgEnC)
7113 // vm assumption on EnC methods is that rsp and rbp are equal
7118 delta = genTotalFrameSize();
7121 #endif // !UNIX_AMD64_ABI
7126 //---------------------------------------------------------------------
7127 // genTotalFrameSize - return the total size of the stack frame, including local size,
7128 // callee-saved register size, etc. For AMD64, this does not include the caller-pushed
7135 int CodeGenInterface::genTotalFrameSize()
7137 assert(!IsUninitialized(compiler->compCalleeRegsPushed));
7139 int totalFrameSize = compiler->compCalleeRegsPushed * REGSIZE_BYTES + compiler->compLclFrameSize;
7141 assert(totalFrameSize >= 0);
7142 return totalFrameSize;
7145 //---------------------------------------------------------------------
7146 // genCallerSPtoFPdelta - return the offset from Caller-SP to the frame pointer.
7147 // This number is going to be negative, since the Caller-SP is at a higher
7148 // address than the frame pointer.
7150 // There must be a frame pointer to call this function!
7152 // We can't compute this directly from the Caller-SP, since the frame pointer
7153 // is based on a maximum delta from Initial-SP, so first we find SP, then
7154 // compute the FP offset.
7156 int CodeGenInterface::genCallerSPtoFPdelta()
7158 assert(isFramePointerUsed());
7159 int callerSPtoFPdelta;
7161 callerSPtoFPdelta = genCallerSPtoInitialSPdelta() + genSPtoFPdelta();
7163 assert(callerSPtoFPdelta <= 0);
7164 return callerSPtoFPdelta;
7167 //---------------------------------------------------------------------
7168 // genCallerSPtoInitialSPdelta - return the offset from Caller-SP to Initial SP.
7170 // This number will be negative.
7172 int CodeGenInterface::genCallerSPtoInitialSPdelta()
7174 int callerSPtoSPdelta = 0;
7176 callerSPtoSPdelta -= genTotalFrameSize();
7177 callerSPtoSPdelta -= REGSIZE_BYTES; // caller-pushed return address
7179 // compCalleeRegsPushed does not account for the frame pointer
7180 // TODO-Cleanup: shouldn't this be part of genTotalFrameSize?
7181 if (isFramePointerUsed())
7183 callerSPtoSPdelta -= REGSIZE_BYTES;
7186 assert(callerSPtoSPdelta <= 0);
7187 return callerSPtoSPdelta;
7189 #endif // _TARGET_AMD64_
7191 //-----------------------------------------------------------------------------------------
7192 // genSSE2BitwiseOp - generate SSE2 code for the given oper as "Operand BitWiseOp BitMask"
7195 // treeNode - tree node
7201 // i) tree oper is one of GT_NEG or GT_INTRINSIC Abs()
7202 // ii) tree type is floating point type.
7203 // iii) caller of this routine needs to call genProduceReg()
7204 void CodeGen::genSSE2BitwiseOp(GenTreePtr treeNode)
7206 regNumber targetReg = treeNode->gtRegNum;
7207 var_types targetType = treeNode->TypeGet();
7208 assert(varTypeIsFloating(targetType));
7212 GenTreePtr* bitMask = nullptr;
7213 instruction ins = INS_invalid;
7214 void* cnsAddr = nullptr;
7215 bool dblAlign = false;
7217 switch (treeNode->OperGet())
7220 // Neg(x) = flip the sign bit.
7221 // Neg(f) = f ^ 0x80000000
7222 // Neg(d) = d ^ 0x8000000000000000
7223 ins = genGetInsForOper(GT_XOR, targetType);
7224 if (targetType == TYP_FLOAT)
7226 bitMask = &negBitmaskFlt;
7228 static_assert_no_msg(sizeof(float) == sizeof(int));
7229 *((int*)&f) = 0x80000000;
7234 bitMask = &negBitmaskDbl;
7236 static_assert_no_msg(sizeof(double) == sizeof(__int64));
7237 *((__int64*)&d) = 0x8000000000000000LL;
7244 assert(treeNode->gtIntrinsic.gtIntrinsicId == CORINFO_INTRINSIC_Abs);
7246 // Abs(x) = set sign-bit to zero
7247 // Abs(f) = f & 0x7fffffff
7248 // Abs(d) = d & 0x7fffffffffffffff
7249 ins = genGetInsForOper(GT_AND, targetType);
7250 if (targetType == TYP_FLOAT)
7252 bitMask = &absBitmaskFlt;
7254 static_assert_no_msg(sizeof(float) == sizeof(int));
7255 *((int*)&f) = 0x7fffffff;
7260 bitMask = &absBitmaskDbl;
7262 static_assert_no_msg(sizeof(double) == sizeof(__int64));
7263 *((__int64*)&d) = 0x7fffffffffffffffLL;
7270 assert(!"genSSE2: unsupported oper");
7275 if (*bitMask == nullptr)
7277 assert(cnsAddr != nullptr);
7278 *bitMask = genMakeConst(cnsAddr, targetType, treeNode, dblAlign);
7281 // We need an additional register for bitmask.
7282 regNumber tmpReg = treeNode->GetSingleTempReg();
7284 // Move operand into targetReg only if the reg reserved for
7285 // internal purpose is not the same as targetReg.
7286 GenTreePtr op1 = treeNode->gtOp.gtOp1;
7287 assert(op1->isUsedFromReg());
7288 regNumber operandReg = genConsumeReg(op1);
7289 if (tmpReg != targetReg)
7291 if (operandReg != targetReg)
7293 inst_RV_RV(ins_Copy(targetType), targetReg, operandReg, targetType);
7296 operandReg = tmpReg;
7299 inst_RV_TT(ins_Load(targetType, false), tmpReg, *bitMask);
7300 assert(ins != INS_invalid);
7301 inst_RV_RV(ins, targetReg, operandReg, targetType);
7304 //---------------------------------------------------------------------
7305 // genIntrinsic - generate code for a given intrinsic
7308 // treeNode - the GT_INTRINSIC node
7313 void CodeGen::genIntrinsic(GenTreePtr treeNode)
7315 // Right now only Sqrt/Abs are treated as math intrinsics.
7316 switch (treeNode->gtIntrinsic.gtIntrinsicId)
7318 case CORINFO_INTRINSIC_Sqrt:
7320 // Both operand and its result must be of the same floating point type.
7321 GenTreePtr srcNode = treeNode->gtOp.gtOp1;
7322 assert(varTypeIsFloating(srcNode));
7323 assert(srcNode->TypeGet() == treeNode->TypeGet());
7325 genConsumeOperands(treeNode->AsOp());
7326 getEmitter()->emitInsBinary(ins_FloatSqrt(treeNode->TypeGet()), emitTypeSize(treeNode), treeNode, srcNode);
7330 case CORINFO_INTRINSIC_Abs:
7331 genSSE2BitwiseOp(treeNode);
7335 assert(!"genIntrinsic: Unsupported intrinsic");
7339 genProduceReg(treeNode);
7342 //-------------------------------------------------------------------------- //
7343 // getBaseVarForPutArgStk - returns the baseVarNum for passing a stack arg.
7346 // treeNode - the GT_PUTARG_STK node
7349 // The number of the base variable.
7352 // If tail call the outgoing args are placed in the caller's incoming arg stack space.
7353 // Otherwise, they go in the outgoing arg area on the current frame.
7355 // On Windows the caller always creates slots (homing space) in its frame for the
7356 // first 4 arguments of a callee (register passed args). So, the baseVarNum is always 0.
7357 // For System V systems there is no such calling convention requirement, and the code needs to find
7358 // the first stack passed argument from the caller. This is done by iterating over
7359 // all the lvParam variables and finding the first with lvArgReg equals to REG_STK.
7361 unsigned CodeGen::getBaseVarForPutArgStk(GenTreePtr treeNode)
7363 assert(treeNode->OperGet() == GT_PUTARG_STK);
7365 unsigned baseVarNum;
7367 // Whether to setup stk arg in incoming or out-going arg area?
7368 // Fast tail calls implemented as epilog+jmp = stk arg is setup in incoming arg area.
7369 // All other calls - stk arg is setup in out-going arg area.
7370 if (treeNode->AsPutArgStk()->putInIncomingArgArea())
7372 // See the note in the function header re: finding the first stack passed argument.
7373 baseVarNum = getFirstArgWithStackSlot();
7374 assert(baseVarNum != BAD_VAR_NUM);
7377 // This must be a fast tail call.
7378 assert(treeNode->AsPutArgStk()->gtCall->AsCall()->IsFastTailCall());
7380 // Since it is a fast tail call, the existence of first incoming arg is guaranteed
7381 // because fast tail call requires that in-coming arg area of caller is >= out-going
7382 // arg area required for tail call.
7383 LclVarDsc* varDsc = &(compiler->lvaTable[baseVarNum]);
7384 assert(varDsc != nullptr);
7386 #ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
7387 assert(!varDsc->lvIsRegArg && varDsc->lvArgReg == REG_STK);
7388 #else // !FEATURE_UNIX_AMD64_STRUCT_PASSING
7389 // On Windows this assert is always true. The first argument will always be in REG_ARG_0 or REG_FLTARG_0.
7390 assert(varDsc->lvIsRegArg && (varDsc->lvArgReg == REG_ARG_0 || varDsc->lvArgReg == REG_FLTARG_0));
7391 #endif // !FEATURE_UNIX_AMD64_STRUCT_PASSING
7396 #if FEATURE_FIXED_OUT_ARGS
7397 baseVarNum = compiler->lvaOutgoingArgSpaceVar;
7398 #else // !FEATURE_FIXED_OUT_ARGS
7399 assert(!"No BaseVarForPutArgStk on x86");
7400 baseVarNum = BAD_VAR_NUM;
7401 #endif // !FEATURE_FIXED_OUT_ARGS
7407 //---------------------------------------------------------------------
7408 // genAlignStackBeforeCall: Align the stack if necessary before a call.
7411 // putArgStk - the putArgStk node.
7413 void CodeGen::genAlignStackBeforeCall(GenTreePutArgStk* putArgStk)
7415 #if defined(UNIX_X86_ABI)
7417 genAlignStackBeforeCall(putArgStk->gtCall);
7419 #endif // UNIX_X86_ABI
7422 //---------------------------------------------------------------------
7423 // genAlignStackBeforeCall: Align the stack if necessary before a call.
7426 // call - the call node.
7428 void CodeGen::genAlignStackBeforeCall(GenTreeCall* call)
7430 #if defined(UNIX_X86_ABI)
7432 // Have we aligned the stack yet?
7433 if (!call->fgArgInfo->IsStkAlignmentDone())
7435 // We haven't done any stack alignment yet for this call. We might need to create
7436 // an alignment adjustment, even if this function itself doesn't have any stack args.
7437 // This can happen if this function call is part of a nested call sequence, and the outer
7438 // call has already pushed some arguments.
7440 unsigned stkLevel = genStackLevel + call->fgArgInfo->GetStkSizeBytes();
7441 call->fgArgInfo->ComputeStackAlignment(stkLevel);
7443 unsigned padStkAlign = call->fgArgInfo->GetStkAlign();
7444 if (padStkAlign != 0)
7446 // Now generate the alignment
7447 inst_RV_IV(INS_sub, REG_SPBASE, padStkAlign, EA_PTRSIZE);
7448 AddStackLevel(padStkAlign);
7449 AddNestedAlignment(padStkAlign);
7452 call->fgArgInfo->SetStkAlignmentDone();
7455 #endif // UNIX_X86_ABI
7458 //---------------------------------------------------------------------
7459 // genRemoveAlignmentAfterCall: After a call, remove the alignment
7460 // added before the call, if any.
7463 // call - the call node.
7464 // bias - additional stack adjustment
7467 // When bias > 0, caller should adjust stack level appropriately as
7468 // bias is not considered when adjusting stack level.
7470 void CodeGen::genRemoveAlignmentAfterCall(GenTreeCall* call, unsigned bias)
7472 #if defined(_TARGET_X86_)
7473 #if defined(UNIX_X86_ABI)
7474 // Put back the stack pointer if there was any padding for stack alignment
7475 unsigned padStkAlign = call->fgArgInfo->GetStkAlign();
7476 unsigned padStkAdjust = padStkAlign + bias;
7478 if (padStkAdjust != 0)
7480 inst_RV_IV(INS_add, REG_SPBASE, padStkAdjust, EA_PTRSIZE);
7481 SubtractStackLevel(padStkAlign);
7482 SubtractNestedAlignment(padStkAlign);
7484 #else // UNIX_X86_ABI
7489 #endif // !UNIX_X86_ABI_
7490 #else // _TARGET_X86_
7492 #endif // !_TARGET_X86
7497 //---------------------------------------------------------------------
7498 // genAdjustStackForPutArgStk:
7499 // adjust the stack pointer for a putArgStk node if necessary.
7502 // putArgStk - the putArgStk node.
7504 // Returns: true if the stack pointer was adjusted; false otherwise.
7507 // Sets `m_pushStkArg` to true if the stack arg needs to be pushed,
7508 // false if the stack arg needs to be stored at the current stack
7509 // pointer address. This is exactly the opposite of the return value
7510 // of this function.
7512 bool CodeGen::genAdjustStackForPutArgStk(GenTreePutArgStk* putArgStk)
7515 if (varTypeIsSIMD(putArgStk))
7517 const unsigned argSize = genTypeSize(putArgStk);
7518 inst_RV_IV(INS_sub, REG_SPBASE, argSize, EA_PTRSIZE);
7519 AddStackLevel(argSize);
7520 m_pushStkArg = false;
7523 #endif // FEATURE_SIMD
7525 const unsigned argSize = putArgStk->getArgSize();
7527 // If the gtPutArgStkKind is one of the push types, we do not pre-adjust the stack.
7528 // This is set in Lowering, and is true if and only if:
7529 // - This argument contains any GC pointers OR
7530 // - It is a GT_FIELD_LIST OR
7531 // - It is less than 16 bytes in size.
7532 CLANG_FORMAT_COMMENT_ANCHOR;
7535 switch (putArgStk->gtPutArgStkKind)
7537 case GenTreePutArgStk::Kind::RepInstr:
7538 case GenTreePutArgStk::Kind::Unroll:
7539 assert((putArgStk->gtNumberReferenceSlots == 0) && (putArgStk->gtGetOp1()->OperGet() != GT_FIELD_LIST) &&
7542 case GenTreePutArgStk::Kind::Push:
7543 case GenTreePutArgStk::Kind::PushAllSlots:
7544 assert((putArgStk->gtNumberReferenceSlots != 0) || (putArgStk->gtGetOp1()->OperGet() == GT_FIELD_LIST) ||
7547 case GenTreePutArgStk::Kind::Invalid:
7549 assert(!"Uninitialized GenTreePutArgStk::Kind");
7554 if (putArgStk->isPushKind())
7556 m_pushStkArg = true;
7561 m_pushStkArg = false;
7562 inst_RV_IV(INS_sub, REG_SPBASE, argSize, EA_PTRSIZE);
7563 AddStackLevel(argSize);
7568 //---------------------------------------------------------------------
7569 // genPutArgStkFieldList - generate code for passing a GT_FIELD_LIST arg on the stack.
7572 // treeNode - the GT_PUTARG_STK node whose op1 is a GT_FIELD_LIST
7577 void CodeGen::genPutArgStkFieldList(GenTreePutArgStk* putArgStk)
7579 GenTreeFieldList* const fieldList = putArgStk->gtOp1->AsFieldList();
7580 assert(fieldList != nullptr);
7582 // Set m_pushStkArg and pre-adjust the stack if necessary.
7583 const bool preAdjustedStack = genAdjustStackForPutArgStk(putArgStk);
7585 // For now, we only support the "push" case; we will push a full slot for the first field of each slot
7586 // within the struct.
7587 assert((putArgStk->isPushKind()) && !preAdjustedStack && m_pushStkArg);
7589 // If we have pre-adjusted the stack and are simply storing the fields in order, set the offset to 0.
7590 // (Note that this mode is not currently being used.)
7591 // If we are pushing the arguments (i.e. we have not pre-adjusted the stack), then we are pushing them
7592 // in reverse order, so we start with the current field offset at the size of the struct arg (which must be
7593 // a multiple of the target pointer size).
7594 unsigned currentOffset = (preAdjustedStack) ? 0 : putArgStk->getArgSize();
7595 unsigned prevFieldOffset = currentOffset;
7596 regNumber intTmpReg = REG_NA;
7597 regNumber simdTmpReg = REG_NA;
7598 if (putArgStk->AvailableTempRegCount() != 0)
7600 regMaskTP rsvdRegs = putArgStk->gtRsvdRegs;
7601 if ((rsvdRegs & RBM_ALLINT) != 0)
7603 intTmpReg = putArgStk->GetSingleTempReg(RBM_ALLINT);
7604 assert(genIsValidIntReg(intTmpReg));
7606 if ((rsvdRegs & RBM_ALLFLOAT) != 0)
7608 simdTmpReg = putArgStk->GetSingleTempReg(RBM_ALLFLOAT);
7609 assert(genIsValidFloatReg(simdTmpReg));
7611 assert(genCountBits(rsvdRegs) == (unsigned)((intTmpReg == REG_NA) ? 0 : 1) + ((simdTmpReg == REG_NA) ? 0 : 1));
7614 for (GenTreeFieldList* current = fieldList; current != nullptr; current = current->Rest())
7616 GenTree* const fieldNode = current->Current();
7617 const unsigned fieldOffset = current->gtFieldOffset;
7618 var_types fieldType = current->gtFieldType;
7620 // Long-typed nodes should have been handled by the decomposition pass, and lowering should have sorted the
7621 // field list in descending order by offset.
7622 assert(!varTypeIsLong(fieldType));
7623 assert(fieldOffset <= prevFieldOffset);
7625 // Consume the register, if any, for this field. Note that genConsumeRegs() will appropriately
7626 // update the liveness info for a lclVar that has been marked RegOptional, which hasn't been
7627 // assigned a register, and which is therefore contained.
7628 // Unlike genConsumeReg(), it handles the case where no registers are being consumed.
7629 genConsumeRegs(fieldNode);
7630 regNumber argReg = fieldNode->isUsedFromSpillTemp() ? REG_NA : fieldNode->gtRegNum;
7632 // If the field is slot-like, we can use a push instruction to store the entire register no matter the type.
7634 // The GC encoder requires that the stack remain 4-byte aligned at all times. Round the adjustment up
7635 // to the next multiple of 4. If we are going to generate a `push` instruction, the adjustment must
7636 // not require rounding.
7637 // NOTE: if the field is of GC type, we must use a push instruction, since the emitter is not otherwise
7638 // able to detect stores into the outgoing argument area of the stack on x86.
7639 const bool fieldIsSlot = ((fieldOffset % 4) == 0) && ((prevFieldOffset - fieldOffset) >= 4);
7640 int adjustment = roundUp(currentOffset - fieldOffset, 4);
7641 if (fieldIsSlot && !varTypeIsSIMD(fieldType))
7643 fieldType = genActualType(fieldType);
7644 unsigned pushSize = genTypeSize(fieldType);
7645 assert((pushSize % 4) == 0);
7646 adjustment -= pushSize;
7647 while (adjustment != 0)
7649 inst_IV(INS_push, 0);
7650 currentOffset -= pushSize;
7651 AddStackLevel(pushSize);
7652 adjustment -= pushSize;
7654 m_pushStkArg = true;
7658 m_pushStkArg = false;
7660 // We always "push" floating point fields (i.e. they are full slot values that don't
7661 // require special handling).
7662 assert(varTypeIsIntegralOrI(fieldNode) || varTypeIsSIMD(fieldNode));
7664 // If we can't push this field, it needs to be in a register so that we can store
7665 // it to the stack location.
7666 if (adjustment != 0)
7668 // This moves the stack pointer to fieldOffset.
7669 // For this case, we must adjust the stack and generate stack-relative stores rather than pushes.
7670 // Adjust the stack pointer to the next slot boundary.
7671 inst_RV_IV(INS_sub, REG_SPBASE, adjustment, EA_PTRSIZE);
7672 currentOffset -= adjustment;
7673 AddStackLevel(adjustment);
7676 // Does it need to be in a byte register?
7677 // If so, we'll use intTmpReg, which must have been allocated as a byte register.
7678 // If it's already in a register, but not a byteable one, then move it.
7679 if (varTypeIsByte(fieldType) && ((argReg == REG_NA) || ((genRegMask(argReg) & RBM_BYTE_REGS) == 0)))
7681 assert(intTmpReg != REG_NA);
7682 noway_assert((genRegMask(intTmpReg) & RBM_BYTE_REGS) != 0);
7683 if (argReg != REG_NA)
7685 inst_RV_RV(INS_mov, intTmpReg, argReg, fieldType);
7691 if (argReg == REG_NA)
7695 if (fieldNode->isUsedFromSpillTemp())
7697 assert(!varTypeIsSIMD(fieldType)); // Q: can we get here with SIMD?
7698 assert(fieldNode->IsRegOptional());
7699 TempDsc* tmp = getSpillTempDsc(fieldNode);
7700 getEmitter()->emitIns_S(INS_push, emitActualTypeSize(fieldNode->TypeGet()), tmp->tdTempNum(), 0);
7701 compiler->tmpRlsTemp(tmp);
7705 assert(varTypeIsIntegralOrI(fieldNode));
7706 switch (fieldNode->OperGet())
7709 inst_TT(INS_push, fieldNode, 0, 0, emitActualTypeSize(fieldNode->TypeGet()));
7712 if (fieldNode->IsIconHandle())
7714 inst_IV_handle(INS_push, fieldNode->gtIntCon.gtIconVal);
7718 inst_IV(INS_push, fieldNode->gtIntCon.gtIconVal);
7725 currentOffset -= TARGET_POINTER_SIZE;
7726 AddStackLevel(TARGET_POINTER_SIZE);
7730 // The stack has been adjusted and we will load the field to intTmpReg and then store it on the stack.
7731 assert(varTypeIsIntegralOrI(fieldNode));
7732 switch (fieldNode->OperGet())
7735 inst_RV_TT(INS_mov, intTmpReg, fieldNode);
7738 genSetRegToConst(intTmpReg, fieldNode->TypeGet(), fieldNode);
7743 genStoreRegToStackArg(fieldType, intTmpReg, fieldOffset - currentOffset);
7748 #if defined(FEATURE_SIMD)
7749 if (fieldType == TYP_SIMD12)
7751 assert(genIsValidFloatReg(simdTmpReg));
7752 genStoreSIMD12ToStack(argReg, simdTmpReg);
7755 #endif // defined(FEATURE_SIMD)
7757 genStoreRegToStackArg(fieldType, argReg, fieldOffset - currentOffset);
7761 // We always push a slot-rounded size
7762 currentOffset -= genTypeSize(fieldType);
7766 prevFieldOffset = fieldOffset;
7768 if (currentOffset != 0)
7770 // We don't expect padding at the beginning of a struct, but it could happen with explicit layout.
7771 inst_RV_IV(INS_sub, REG_SPBASE, currentOffset, EA_PTRSIZE);
7772 AddStackLevel(currentOffset);
7775 #endif // _TARGET_X86_
7777 //---------------------------------------------------------------------
7778 // genPutArgStk - generate code for passing an arg on the stack.
7781 // treeNode - the GT_PUTARG_STK node
7782 // targetType - the type of the treeNode
7787 void CodeGen::genPutArgStk(GenTreePutArgStk* putArgStk)
7789 var_types targetType = putArgStk->TypeGet();
7793 genAlignStackBeforeCall(putArgStk);
7795 if (varTypeIsStruct(targetType))
7797 (void)genAdjustStackForPutArgStk(putArgStk);
7798 genPutStructArgStk(putArgStk);
7802 // The following logic is applicable for x86 arch.
7803 assert(!varTypeIsFloating(targetType) || (targetType == putArgStk->gtOp1->TypeGet()));
7805 GenTreePtr data = putArgStk->gtOp1;
7807 // On a 32-bit target, all of the long arguments are handled with GT_FIELD_LIST,
7808 // and the type of the putArgStk is TYP_VOID.
7809 assert(targetType != TYP_LONG);
7811 const unsigned argSize = putArgStk->getArgSize();
7812 assert((argSize % TARGET_POINTER_SIZE) == 0);
7814 if (data->isContainedIntOrIImmed())
7816 if (data->IsIconHandle())
7818 inst_IV_handle(INS_push, data->gtIntCon.gtIconVal);
7822 inst_IV(INS_push, data->gtIntCon.gtIconVal);
7824 AddStackLevel(argSize);
7826 else if (data->OperGet() == GT_FIELD_LIST)
7828 genPutArgStkFieldList(putArgStk);
7832 // We should not see any contained nodes that are not immediates.
7833 assert(data->isUsedFromReg());
7834 genConsumeReg(data);
7835 genPushReg(targetType, data->gtRegNum);
7837 #else // !_TARGET_X86_
7839 unsigned baseVarNum = getBaseVarForPutArgStk(putArgStk);
7841 #ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
7843 if (varTypeIsStruct(targetType))
7845 m_stkArgVarNum = baseVarNum;
7846 m_stkArgOffset = putArgStk->getArgOffset();
7847 genPutStructArgStk(putArgStk);
7848 m_stkArgVarNum = BAD_VAR_NUM;
7851 #endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
7853 noway_assert(targetType != TYP_STRUCT);
7854 assert(!varTypeIsFloating(targetType) || (targetType == putArgStk->gtOp1->TypeGet()));
7856 // Get argument offset on stack.
7857 // Here we cross check that argument offset hasn't changed from lowering to codegen since
7858 // we are storing arg slot number in GT_PUTARG_STK node in lowering phase.
7859 int argOffset = putArgStk->getArgOffset();
7862 fgArgTabEntryPtr curArgTabEntry = compiler->gtArgEntryByNode(putArgStk->gtCall, putArgStk);
7863 assert(curArgTabEntry);
7864 assert(argOffset == (int)curArgTabEntry->slotNum * TARGET_POINTER_SIZE);
7867 GenTreePtr data = putArgStk->gtOp1;
7869 if (data->isContainedIntOrIImmed())
7871 getEmitter()->emitIns_S_I(ins_Store(targetType), emitTypeSize(targetType), baseVarNum, argOffset,
7872 (int)data->AsIntConCommon()->IconValue());
7876 assert(data->isUsedFromReg());
7877 genConsumeReg(data);
7878 getEmitter()->emitIns_S_R(ins_Store(targetType), emitTypeSize(targetType), data->gtRegNum, baseVarNum,
7882 #endif // !_TARGET_X86_
7885 //---------------------------------------------------------------------
7886 // genPutArgReg - generate code for a GT_PUTARG_REG node
7889 // tree - the GT_PUTARG_REG node
7894 void CodeGen::genPutArgReg(GenTreeOp* tree)
7896 assert(tree->OperIs(GT_PUTARG_REG));
7898 var_types targetType = tree->TypeGet();
7899 regNumber targetReg = tree->gtRegNum;
7901 #ifndef FEATURE_UNIX_AMD64_STRUCT_PASSING
7902 assert(targetType != TYP_STRUCT);
7903 #endif // !FEATURE_UNIX_AMD64_STRUCT_PASSING
7905 GenTree* op1 = tree->gtOp1;
7908 // If child node is not already in the register we need, move it
7909 if (targetReg != op1->gtRegNum)
7911 inst_RV_RV(ins_Copy(targetType), targetReg, op1->gtRegNum, targetType);
7914 genProduceReg(tree);
7918 // genPushReg: Push a register value onto the stack and adjust the stack level
7921 // type - the type of value to be stored
7922 // reg - the register containing the value
7925 // For TYP_LONG, the srcReg must be a floating point register.
7926 // Otherwise, the register type must be consistent with the given type.
7928 void CodeGen::genPushReg(var_types type, regNumber srcReg)
7930 unsigned size = genTypeSize(type);
7931 if (varTypeIsIntegralOrI(type) && type != TYP_LONG)
7933 assert(genIsValidIntReg(srcReg));
7934 inst_RV(INS_push, srcReg, type);
7939 emitAttr attr = emitTypeSize(type);
7940 if (type == TYP_LONG)
7942 // On x86, the only way we can push a TYP_LONG from a register is if it is in an xmm reg.
7943 // This is only used when we are pushing a struct from memory to memory, and basically is
7944 // handling an 8-byte "chunk", as opposed to strictly a long type.
7949 ins = ins_Store(type);
7951 assert(genIsValidFloatReg(srcReg));
7952 inst_RV_IV(INS_sub, REG_SPBASE, size, EA_PTRSIZE);
7953 getEmitter()->emitIns_AR_R(ins, attr, srcReg, REG_SPBASE, 0);
7955 AddStackLevel(size);
7957 #endif // _TARGET_X86_
7959 #if defined(FEATURE_PUT_STRUCT_ARG_STK)
7960 // genStoreRegToStackArg: Store a register value into the stack argument area
7963 // type - the type of value to be stored
7964 // reg - the register containing the value
7965 // offset - the offset from the base (see Assumptions below)
7968 // A type of TYP_STRUCT instructs this method to store a 16-byte chunk
7969 // at the given offset (i.e. not the full struct).
7972 // The caller must set the context appropriately before calling this method:
7973 // - On x64, m_stkArgVarNum must be set according to whether this is a regular or tail call.
7974 // - On x86, the caller must set m_pushStkArg if this method should push the argument.
7975 // Otherwise, the argument is stored at the given offset from sp.
7977 // TODO: In the below code the load and store instructions are for 16 bytes, but the
7978 // type is EA_8BYTE. The movdqa/u are 16 byte instructions, so it works, but
7979 // this probably needs to be changed.
7981 void CodeGen::genStoreRegToStackArg(var_types type, regNumber srcReg, int offset)
7983 assert(srcReg != REG_NA);
7988 if (type == TYP_STRUCT)
7991 // This should be changed!
7998 if (varTypeIsSIMD(type))
8000 assert(genIsValidFloatReg(srcReg));
8001 ins = ins_Store(type); // TODO-CQ: pass 'aligned' correctly
8004 #endif // FEATURE_SIMD
8006 if (type == TYP_LONG)
8008 assert(genIsValidFloatReg(srcReg));
8012 #endif // _TARGET_X86_
8014 assert((varTypeIsFloating(type) && genIsValidFloatReg(srcReg)) ||
8015 (varTypeIsIntegralOrI(type) && genIsValidIntReg(srcReg)));
8016 ins = ins_Store(type);
8018 attr = emitTypeSize(type);
8019 size = genTypeSize(type);
8025 genPushReg(type, srcReg);
8029 getEmitter()->emitIns_AR_R(ins, attr, srcReg, REG_SPBASE, offset);
8031 #else // !_TARGET_X86_
8032 assert(m_stkArgVarNum != BAD_VAR_NUM);
8033 getEmitter()->emitIns_S_R(ins, attr, srcReg, m_stkArgVarNum, m_stkArgOffset + offset);
8034 #endif // !_TARGET_X86_
8037 //---------------------------------------------------------------------
8038 // genPutStructArgStk - generate code for copying a struct arg on the stack by value.
8039 // In case there are references to heap object in the struct,
8040 // it generates the gcinfo as well.
8043 // putArgStk - the GT_PUTARG_STK node
8046 // In the case of fixed out args, the caller must have set m_stkArgVarNum to the variable number
8047 // corresponding to the argument area (where we will put the argument on the stack).
8048 // For tail calls this is the baseVarNum = 0.
8049 // For non tail calls this is the outgoingArgSpace.
8050 void CodeGen::genPutStructArgStk(GenTreePutArgStk* putArgStk)
8052 var_types targetType = putArgStk->TypeGet();
8054 #if defined(_TARGET_X86_) && defined(FEATURE_SIMD)
8055 if (targetType == TYP_SIMD12)
8057 genPutArgStkSIMD12(putArgStk);
8060 #endif // defined(_TARGET_X86_) && defined(FEATURE_SIMD)
8062 if (varTypeIsSIMD(targetType))
8064 regNumber srcReg = genConsumeReg(putArgStk->gtGetOp1());
8065 assert((srcReg != REG_NA) && (genIsValidFloatReg(srcReg)));
8066 genStoreRegToStackArg(targetType, srcReg, 0);
8070 assert(targetType == TYP_STRUCT);
8072 if (putArgStk->gtNumberReferenceSlots == 0)
8074 switch (putArgStk->gtPutArgStkKind)
8076 case GenTreePutArgStk::Kind::RepInstr:
8077 genStructPutArgRepMovs(putArgStk);
8079 case GenTreePutArgStk::Kind::Unroll:
8080 genStructPutArgUnroll(putArgStk);
8082 case GenTreePutArgStk::Kind::Push:
8083 genStructPutArgUnroll(putArgStk);
8091 // No need to disable GC the way COPYOBJ does. Here the refs are copied in atomic operations always.
8092 CLANG_FORMAT_COMMENT_ANCHOR;
8095 // On x86, any struct that has contains GC references must be stored to the stack using `push` instructions so
8096 // that the emitter properly detects the need to update the method's GC information.
8098 // Strictly speaking, it is only necessary to use `push` to store the GC references themselves, so for structs
8099 // with large numbers of consecutive non-GC-ref-typed fields, we may be able to improve the code size in the
8101 assert(m_pushStkArg);
8103 GenTree* srcAddr = putArgStk->gtGetOp1()->gtGetOp1();
8104 BYTE* gcPtrs = putArgStk->gtGcPtrs;
8105 const unsigned numSlots = putArgStk->gtNumSlots;
8107 regNumber srcRegNum = srcAddr->gtRegNum;
8108 const bool srcAddrInReg = srcRegNum != REG_NA;
8110 unsigned srcLclNum = 0;
8111 unsigned srcLclOffset = 0;
8114 genConsumeReg(srcAddr);
8118 assert(srcAddr->OperIsLocalAddr());
8120 srcLclNum = srcAddr->AsLclVarCommon()->gtLclNum;
8121 if (srcAddr->OperGet() == GT_LCL_FLD_ADDR)
8123 srcLclOffset = srcAddr->AsLclFld()->gtLclOffs;
8127 for (int i = numSlots - 1; i >= 0; --i)
8130 if (gcPtrs[i] == TYPE_GC_NONE)
8132 slotAttr = EA_4BYTE;
8134 else if (gcPtrs[i] == TYPE_GC_REF)
8136 slotAttr = EA_GCREF;
8140 assert(gcPtrs[i] == TYPE_GC_BYREF);
8141 slotAttr = EA_BYREF;
8144 const unsigned offset = i * TARGET_POINTER_SIZE;
8147 getEmitter()->emitIns_AR_R(INS_push, slotAttr, REG_NA, srcRegNum, offset);
8151 getEmitter()->emitIns_S(INS_push, slotAttr, srcLclNum, srcLclOffset + offset);
8153 AddStackLevel(TARGET_POINTER_SIZE);
8155 #else // !defined(_TARGET_X86_)
8157 // Consume these registers.
8158 // They may now contain gc pointers (depending on their type; gcMarkRegPtrVal will "do the right thing").
8159 genConsumePutStructArgStk(putArgStk, REG_RDI, REG_RSI, REG_NA);
8161 const bool srcIsLocal = putArgStk->gtOp1->AsObj()->gtOp1->OperIsLocalAddr();
8162 const emitAttr srcAddrAttr = srcIsLocal ? EA_PTRSIZE : EA_BYREF;
8165 unsigned numGCSlotsCopied = 0;
8168 BYTE* gcPtrs = putArgStk->gtGcPtrs;
8169 const unsigned numSlots = putArgStk->gtNumSlots;
8170 for (unsigned i = 0; i < numSlots;)
8172 if (gcPtrs[i] == TYPE_GC_NONE)
8174 // Let's see if we can use rep movsp (alias for movsd or movsq for 32 and 64 bits respectively)
8175 // instead of a sequence of movsp instructions to save cycles and code size.
8176 unsigned adjacentNonGCSlotCount = 0;
8179 adjacentNonGCSlotCount++;
8181 } while ((i < numSlots) && (gcPtrs[i] == TYPE_GC_NONE));
8183 // If we have a very small contiguous non-ref region, it's better just to
8184 // emit a sequence of movsp instructions
8185 if (adjacentNonGCSlotCount < CPOBJ_NONGC_SLOTS_LIMIT)
8187 for (; adjacentNonGCSlotCount > 0; adjacentNonGCSlotCount--)
8194 getEmitter()->emitIns_R_I(INS_mov, EA_4BYTE, REG_RCX, adjacentNonGCSlotCount);
8195 instGen(INS_r_movsp);
8200 assert((gcPtrs[i] == TYPE_GC_REF) || (gcPtrs[i] == TYPE_GC_BYREF));
8202 // We have a GC (byref or ref) pointer
8203 // TODO-Amd64-Unix: Here a better solution (for code size and CQ) would be to use movsp instruction,
8204 // but the logic for emitting a GC info record is not available (it is internal for the emitter
8205 // only.) See emitGCVarLiveUpd function. If we could call it separately, we could do
8206 // instGen(INS_movsp); and emission of gc info.
8208 var_types memType = (gcPtrs[i] == TYPE_GC_REF) ? TYP_REF : TYP_BYREF;
8209 getEmitter()->emitIns_R_AR(ins_Load(memType), emitTypeSize(memType), REG_RCX, REG_RSI, 0);
8210 genStoreRegToStackArg(memType, REG_RCX, i * TARGET_POINTER_SIZE);
8219 // Source for the copy operation.
8220 // If a LocalAddr, use EA_PTRSIZE - copy from stack.
8221 // If not a LocalAddr, use EA_BYREF - the source location is not on the stack.
8222 getEmitter()->emitIns_R_I(INS_add, srcAddrAttr, REG_RSI, TARGET_POINTER_SIZE);
8224 // Always copying to the stack - outgoing arg area
8225 // (or the outgoing arg area of the caller for a tail call) - use EA_PTRSIZE.
8226 getEmitter()->emitIns_R_I(INS_add, EA_PTRSIZE, REG_RDI, TARGET_POINTER_SIZE);
8231 assert(numGCSlotsCopied == putArgStk->gtNumberReferenceSlots);
8232 #endif // _TARGET_X86_
8235 #endif // defined(FEATURE_PUT_STRUCT_ARG_STK)
8237 /*****************************************************************************
8239 * Create and record GC Info for the function.
8241 #ifndef JIT32_GCENCODER
8243 #else // !JIT32_GCENCODER
8245 #endif // !JIT32_GCENCODER
8246 CodeGen::genCreateAndStoreGCInfo(unsigned codeSize, unsigned prologSize, unsigned epilogSize DEBUGARG(void* codePtr))
8248 #ifdef JIT32_GCENCODER
8249 return genCreateAndStoreGCInfoJIT32(codeSize, prologSize, epilogSize DEBUGARG(codePtr));
8250 #else // !JIT32_GCENCODER
8251 genCreateAndStoreGCInfoX64(codeSize, prologSize DEBUGARG(codePtr));
8252 #endif // !JIT32_GCENCODER
8255 #ifdef JIT32_GCENCODER
8256 void* CodeGen::genCreateAndStoreGCInfoJIT32(unsigned codeSize,
8257 unsigned prologSize,
8258 unsigned epilogSize DEBUGARG(void* codePtr))
8267 compiler->compInfoBlkSize =
8268 gcInfo.gcInfoBlockHdrSave(headerBuf, 0, codeSize, prologSize, epilogSize, &header, &s_cached);
8270 size_t argTabOffset = 0;
8271 size_t ptrMapSize = gcInfo.gcPtrTableSize(header, codeSize, &argTabOffset);
8275 if (genInterruptible)
8277 gcHeaderISize += compiler->compInfoBlkSize;
8278 gcPtrMapISize += ptrMapSize;
8282 gcHeaderNSize += compiler->compInfoBlkSize;
8283 gcPtrMapNSize += ptrMapSize;
8286 #endif // DISPLAY_SIZES
8288 compiler->compInfoBlkSize += ptrMapSize;
8290 /* Allocate the info block for the method */
8292 compiler->compInfoBlkAddr = (BYTE*)compiler->info.compCompHnd->allocGCInfo(compiler->compInfoBlkSize);
8294 #if 0 // VERBOSE_SIZES
8295 // TODO-X86-Cleanup: 'dataSize', below, is not defined
8297 // if (compiler->compInfoBlkSize > codeSize && compiler->compInfoBlkSize > 100)
8299 printf("[%7u VM, %7u+%7u/%7u x86 %03u/%03u%%] %s.%s\n",
8300 compiler->info.compILCodeSize,
8301 compiler->compInfoBlkSize,
8302 codeSize + dataSize,
8303 codeSize + dataSize - prologSize - epilogSize,
8304 100 * (codeSize + dataSize) / compiler->info.compILCodeSize,
8305 100 * (codeSize + dataSize + compiler->compInfoBlkSize) / compiler->info.compILCodeSize,
8306 compiler->info.compClassName,
8307 compiler->info.compMethodName);
8312 /* Fill in the info block and return it to the caller */
8314 void* infoPtr = compiler->compInfoBlkAddr;
8316 /* Create the method info block: header followed by GC tracking tables */
8318 compiler->compInfoBlkAddr +=
8319 gcInfo.gcInfoBlockHdrSave(compiler->compInfoBlkAddr, -1, codeSize, prologSize, epilogSize, &header, &s_cached);
8321 assert(compiler->compInfoBlkAddr == (BYTE*)infoPtr + headerSize);
8322 compiler->compInfoBlkAddr = gcInfo.gcPtrTableSave(compiler->compInfoBlkAddr, header, codeSize, &argTabOffset);
8323 assert(compiler->compInfoBlkAddr == (BYTE*)infoPtr + headerSize + ptrMapSize);
8329 BYTE* temp = (BYTE*)infoPtr;
8330 unsigned size = compiler->compInfoBlkAddr - temp;
8331 BYTE* ptab = temp + headerSize;
8333 noway_assert(size == headerSize + ptrMapSize);
8335 printf("Method info block - header [%u bytes]:", headerSize);
8337 for (unsigned i = 0; i < size; i++)
8341 printf("\nMethod info block - ptrtab [%u bytes]:", ptrMapSize);
8342 printf("\n %04X: %*c", i & ~0xF, 3 * (i & 0xF), ' ');
8347 printf("\n %04X: ", i);
8350 printf("%02X ", *temp++);
8360 if (compiler->opts.dspGCtbls)
8362 const BYTE* base = (BYTE*)infoPtr;
8364 unsigned methodSize;
8367 printf("GC Info for method %s\n", compiler->info.compFullName);
8368 printf("GC info size = %3u\n", compiler->compInfoBlkSize);
8370 size = gcInfo.gcInfoBlockHdrDump(base, &dumpHeader, &methodSize);
8371 // printf("size of header encoding is %3u\n", size);
8374 if (compiler->opts.dspGCtbls)
8377 size = gcInfo.gcDumpPtrTable(base, dumpHeader, methodSize);
8378 // printf("size of pointer table is %3u\n", size);
8380 noway_assert(compiler->compInfoBlkAddr == (base + size));
8385 if (jitOpts.testMask & 128)
8387 for (unsigned offs = 0; offs < codeSize; offs++)
8389 gcInfo.gcFindPtrsInFrame(infoPtr, codePtr, offs);
8393 #endif // DUMP_GC_TABLES
8395 /* Make sure we ended up generating the expected number of bytes */
8397 noway_assert(compiler->compInfoBlkAddr == (BYTE*)infoPtr + compiler->compInfoBlkSize);
8402 #else // !JIT32_GCENCODER
8403 void CodeGen::genCreateAndStoreGCInfoX64(unsigned codeSize, unsigned prologSize DEBUGARG(void* codePtr))
8405 IAllocator* allowZeroAlloc = new (compiler, CMK_GC) AllowZeroAllocator(compiler->getAllocatorGC());
8406 GcInfoEncoder* gcInfoEncoder = new (compiler, CMK_GC)
8407 GcInfoEncoder(compiler->info.compCompHnd, compiler->info.compMethodInfo, allowZeroAlloc, NOMEM);
8408 assert(gcInfoEncoder);
8410 // Follow the code pattern of the x86 gc info encoder (genCreateAndStoreGCInfoJIT32).
8411 gcInfo.gcInfoBlockHdrSave(gcInfoEncoder, codeSize, prologSize);
8413 // We keep the call count for the second call to gcMakeRegPtrTable() below.
8414 unsigned callCnt = 0;
8415 // First we figure out the encoder ID's for the stack slots and registers.
8416 gcInfo.gcMakeRegPtrTable(gcInfoEncoder, codeSize, prologSize, GCInfo::MAKE_REG_PTR_MODE_ASSIGN_SLOTS, &callCnt);
8417 // Now we've requested all the slots we'll need; "finalize" these (make more compact data structures for them).
8418 gcInfoEncoder->FinalizeSlotIds();
8419 // Now we can actually use those slot ID's to declare live ranges.
8420 gcInfo.gcMakeRegPtrTable(gcInfoEncoder, codeSize, prologSize, GCInfo::MAKE_REG_PTR_MODE_DO_WORK, &callCnt);
8422 if (compiler->opts.compDbgEnC)
8424 // what we have to preserve is called the "frame header" (see comments in VM\eetwain.cpp)
8428 // -saved 'this' pointer and bool for synchronized methods
8430 // 4 slots for RBP + return address + RSI + RDI
8431 int preservedAreaSize = 4 * REGSIZE_BYTES;
8433 if (compiler->info.compFlags & CORINFO_FLG_SYNCH)
8435 if (!(compiler->info.compFlags & CORINFO_FLG_STATIC))
8437 preservedAreaSize += REGSIZE_BYTES;
8440 // bool in synchronized methods that tracks whether the lock has been taken (takes 4 bytes on stack)
8441 preservedAreaSize += 4;
8444 // Used to signal both that the method is compiled for EnC, and also the size of the block at the top of the
8446 gcInfoEncoder->SetSizeOfEditAndContinuePreservedArea(preservedAreaSize);
8449 if (compiler->opts.IsReversePInvoke())
8451 unsigned reversePInvokeFrameVarNumber = compiler->lvaReversePInvokeFrameVar;
8452 assert(reversePInvokeFrameVarNumber != BAD_VAR_NUM && reversePInvokeFrameVarNumber < compiler->lvaRefCount);
8453 LclVarDsc& reversePInvokeFrameVar = compiler->lvaTable[reversePInvokeFrameVarNumber];
8454 gcInfoEncoder->SetReversePInvokeFrameSlot(reversePInvokeFrameVar.lvStkOffs);
8457 gcInfoEncoder->Build();
8459 // GC Encoder automatically puts the GC info in the right spot using ICorJitInfo::allocGCInfo(size_t)
8460 // let's save the values anyway for debugging purposes
8461 compiler->compInfoBlkAddr = gcInfoEncoder->Emit();
8462 compiler->compInfoBlkSize = 0; // not exposed by the GCEncoder interface
8464 #endif // !JIT32_GCENCODER
8466 /*****************************************************************************
8467 * Emit a call to a helper function.
8471 void CodeGen::genEmitHelperCall(unsigned helper, int argSize, emitAttr retSize, regNumber callTargetReg)
8473 void* addr = nullptr;
8474 void* pAddr = nullptr;
8476 emitter::EmitCallType callType = emitter::EC_FUNC_TOKEN;
8477 addr = compiler->compGetHelperFtn((CorInfoHelpFunc)helper, &pAddr);
8478 regNumber callTarget = REG_NA;
8479 regMaskTP killMask = compiler->compHelperCallKillSet((CorInfoHelpFunc)helper);
8483 assert(pAddr != nullptr);
8485 // Absolute indirect call addr
8486 // Note: Order of checks is important. First always check for pc-relative and next
8487 // zero-relative. Because the former encoding is 1-byte smaller than the latter.
8488 if (genCodeIndirAddrCanBeEncodedAsPCRelOffset((size_t)pAddr) ||
8489 genCodeIndirAddrCanBeEncodedAsZeroRelOffset((size_t)pAddr))
8491 // generate call whose target is specified by 32-bit offset relative to PC or zero.
8492 callType = emitter::EC_FUNC_TOKEN_INDIR;
8497 #ifdef _TARGET_AMD64_
8498 // If this indirect address cannot be encoded as 32-bit offset relative to PC or Zero,
8499 // load it into REG_HELPER_CALL_TARGET and use register indirect addressing mode to
8504 if (callTargetReg == REG_NA)
8506 // If a callTargetReg has not been explicitly provided, we will use REG_DEFAULT_HELPER_CALL_TARGET, but
8507 // this is only a valid assumption if the helper call is known to kill REG_DEFAULT_HELPER_CALL_TARGET.
8508 callTargetReg = REG_DEFAULT_HELPER_CALL_TARGET;
8509 regMaskTP callTargetMask = genRegMask(callTargetReg);
8510 noway_assert((callTargetMask & killMask) == callTargetMask);
8514 // The call target must not overwrite any live variable, though it may not be in the
8515 // kill set for the call.
8516 regMaskTP callTargetMask = genRegMask(callTargetReg);
8517 noway_assert((callTargetMask & regSet.rsMaskVars) == RBM_NONE);
8521 callTarget = callTargetReg;
8522 CodeGen::genSetRegToIcon(callTarget, (ssize_t)pAddr, TYP_I_IMPL);
8523 callType = emitter::EC_INDIR_ARD;
8528 getEmitter()->emitIns_Call(callType,
8529 compiler->eeFindHelper(helper),
8530 INDEBUG_LDISASM_COMMA(nullptr) addr,
8533 MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(EA_UNKNOWN),
8534 gcInfo.gcVarPtrSetCur,
8535 gcInfo.gcRegGCrefSetCur,
8536 gcInfo.gcRegByrefSetCur,
8537 BAD_IL_OFFSET, // IL offset
8539 REG_NA, 0, 0, // xreg, xmul, disp
8541 emitter::emitNoGChelper(helper));
8544 regTracker.rsTrashRegSet(killMask);
8545 regTracker.rsTrashRegsForGCInterruptability();
8548 #if !defined(_TARGET_64BIT_)
8549 //-----------------------------------------------------------------------------
8551 // Code Generation for Long integers
8553 //-----------------------------------------------------------------------------
8555 //------------------------------------------------------------------------
8556 // genStoreLongLclVar: Generate code to store a non-enregistered long lclVar
8559 // treeNode - A TYP_LONG lclVar node.
8565 // 'treeNode' must be a TYP_LONG lclVar node for a lclVar that has NOT been promoted.
8566 // Its operand must be a GT_LONG node.
8568 void CodeGen::genStoreLongLclVar(GenTree* treeNode)
8570 emitter* emit = getEmitter();
8572 GenTreeLclVarCommon* lclNode = treeNode->AsLclVarCommon();
8573 unsigned lclNum = lclNode->gtLclNum;
8574 LclVarDsc* varDsc = &(compiler->lvaTable[lclNum]);
8575 assert(varDsc->TypeGet() == TYP_LONG);
8576 assert(!varDsc->lvPromoted);
8577 GenTreePtr op1 = treeNode->gtOp.gtOp1;
8578 noway_assert(op1->OperGet() == GT_LONG || op1->OperGet() == GT_MUL_LONG);
8579 genConsumeRegs(op1);
8581 if (op1->OperGet() == GT_LONG)
8583 // Definitions of register candidates will have been lowered to 2 int lclVars.
8584 assert(!treeNode->InReg());
8586 GenTreePtr loVal = op1->gtGetOp1();
8587 GenTreePtr hiVal = op1->gtGetOp2();
8589 // NYI: Contained immediates.
8590 NYI_IF((loVal->gtRegNum == REG_NA) || (hiVal->gtRegNum == REG_NA),
8591 "Store of long lclVar with contained immediate");
8593 emit->emitIns_S_R(ins_Store(TYP_INT), EA_4BYTE, loVal->gtRegNum, lclNum, 0);
8594 emit->emitIns_S_R(ins_Store(TYP_INT), EA_4BYTE, hiVal->gtRegNum, lclNum, genTypeSize(TYP_INT));
8596 else if (op1->OperGet() == GT_MUL_LONG)
8598 assert((op1->gtFlags & GTF_MUL_64RSLT) != 0);
8601 getEmitter()->emitIns_S_R(ins_Store(TYP_INT), emitTypeSize(TYP_INT), REG_LNGRET_LO, lclNum, 0);
8602 getEmitter()->emitIns_S_R(ins_Store(TYP_INT), emitTypeSize(TYP_INT), REG_LNGRET_HI, lclNum,
8603 genTypeSize(TYP_INT));
8606 #endif // !defined(_TARGET_64BIT_)
8608 /*****************************************************************************
8609 * Unit testing of the XArch emitter: generate a bunch of instructions into the prolog
8610 * (it's as good a place as any), then use COMPlus_JitLateDisasm=* to see if the late
8611 * disassembler thinks the instructions as the same as we do.
8614 // Uncomment "#define ALL_ARM64_EMITTER_UNIT_TESTS" to run all the unit tests here.
8615 // After adding a unit test, and verifying it works, put it under this #ifdef, so we don't see it run every time.
8616 //#define ALL_XARCH_EMITTER_UNIT_TESTS
8618 #if defined(DEBUG) && defined(LATE_DISASM) && defined(_TARGET_AMD64_)
8619 void CodeGen::genAmd64EmitterUnitTests()
8626 if (!compiler->opts.altJit)
8628 // No point doing this in a "real" JIT.
8632 // Mark the "fake" instructions in the output.
8633 printf("*************** In genAmd64EmitterUnitTests()\n");
8636 // genDefineTempLabel(genCreateTempLabel());
8637 // to create artificial labels to help separate groups of tests.
8642 CLANG_FORMAT_COMMENT_ANCHOR;
8644 #ifdef ALL_XARCH_EMITTER_UNIT_TESTS
8645 #ifdef FEATURE_AVX_SUPPORT
8646 genDefineTempLabel(genCreateTempLabel());
8648 // vhaddpd ymm0,ymm1,ymm2
8649 getEmitter()->emitIns_R_R_R(INS_haddpd, EA_32BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
8650 // vaddss xmm0,xmm1,xmm2
8651 getEmitter()->emitIns_R_R_R(INS_addss, EA_4BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
8652 // vaddsd xmm0,xmm1,xmm2
8653 getEmitter()->emitIns_R_R_R(INS_addsd, EA_8BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
8654 // vaddps xmm0,xmm1,xmm2
8655 getEmitter()->emitIns_R_R_R(INS_addps, EA_16BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
8656 // vaddps ymm0,ymm1,ymm2
8657 getEmitter()->emitIns_R_R_R(INS_addps, EA_32BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
8658 // vaddpd xmm0,xmm1,xmm2
8659 getEmitter()->emitIns_R_R_R(INS_addpd, EA_16BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
8660 // vaddpd ymm0,ymm1,ymm2
8661 getEmitter()->emitIns_R_R_R(INS_addpd, EA_32BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
8662 // vsubss xmm0,xmm1,xmm2
8663 getEmitter()->emitIns_R_R_R(INS_subss, EA_4BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
8664 // vsubsd xmm0,xmm1,xmm2
8665 getEmitter()->emitIns_R_R_R(INS_subsd, EA_8BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
8666 // vsubps ymm0,ymm1,ymm2
8667 getEmitter()->emitIns_R_R_R(INS_subps, EA_16BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
8668 // vsubps ymm0,ymm1,ymm2
8669 getEmitter()->emitIns_R_R_R(INS_subps, EA_32BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
8670 // vsubpd xmm0,xmm1,xmm2
8671 getEmitter()->emitIns_R_R_R(INS_subpd, EA_16BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
8672 // vsubpd ymm0,ymm1,ymm2
8673 getEmitter()->emitIns_R_R_R(INS_subpd, EA_32BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
8674 // vmulss xmm0,xmm1,xmm2
8675 getEmitter()->emitIns_R_R_R(INS_mulss, EA_4BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
8676 // vmulsd xmm0,xmm1,xmm2
8677 getEmitter()->emitIns_R_R_R(INS_mulsd, EA_8BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
8678 // vmulps xmm0,xmm1,xmm2
8679 getEmitter()->emitIns_R_R_R(INS_mulps, EA_16BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
8680 // vmulpd xmm0,xmm1,xmm2
8681 getEmitter()->emitIns_R_R_R(INS_mulpd, EA_16BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
8682 // vmulps ymm0,ymm1,ymm2
8683 getEmitter()->emitIns_R_R_R(INS_mulps, EA_32BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
8684 // vmulpd ymm0,ymm1,ymm2
8685 getEmitter()->emitIns_R_R_R(INS_mulpd, EA_32BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
8686 // vandps xmm0,xmm1,xmm2
8687 getEmitter()->emitIns_R_R_R(INS_andps, EA_16BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
8688 // vandpd xmm0,xmm1,xmm2
8689 getEmitter()->emitIns_R_R_R(INS_andpd, EA_16BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
8690 // vandps ymm0,ymm1,ymm2
8691 getEmitter()->emitIns_R_R_R(INS_andps, EA_32BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
8692 // vandpd ymm0,ymm1,ymm2
8693 getEmitter()->emitIns_R_R_R(INS_andpd, EA_32BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
8694 // vorps xmm0,xmm1,xmm2
8695 getEmitter()->emitIns_R_R_R(INS_orps, EA_16BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
8696 // vorpd xmm0,xmm1,xmm2
8697 getEmitter()->emitIns_R_R_R(INS_orpd, EA_16BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
8698 // vorps ymm0,ymm1,ymm2
8699 getEmitter()->emitIns_R_R_R(INS_orps, EA_32BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
8700 // vorpd ymm0,ymm1,ymm2
8701 getEmitter()->emitIns_R_R_R(INS_orpd, EA_32BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
8702 // vdivss xmm0,xmm1,xmm2
8703 getEmitter()->emitIns_R_R_R(INS_divss, EA_4BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
8704 // vdivsd xmm0,xmm1,xmm2
8705 getEmitter()->emitIns_R_R_R(INS_divsd, EA_8BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
8706 // vdivss xmm0,xmm1,xmm2
8707 getEmitter()->emitIns_R_R_R(INS_divss, EA_4BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
8708 // vdivsd xmm0,xmm1,xmm2
8709 getEmitter()->emitIns_R_R_R(INS_divsd, EA_8BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
8711 // vdivss xmm0,xmm1,xmm2
8712 getEmitter()->emitIns_R_R_R(INS_cvtss2sd, EA_4BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
8713 // vdivsd xmm0,xmm1,xmm2
8714 getEmitter()->emitIns_R_R_R(INS_cvtsd2ss, EA_8BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
8715 #endif // FEATURE_AVX_SUPPORT
8716 #endif // ALL_XARCH_EMITTER_UNIT_TESTS
8717 printf("*************** End of genAmd64EmitterUnitTests()\n");
8720 #endif // defined(DEBUG) && defined(LATE_DISASM) && defined(_TARGET_AMD64_)
8722 #endif // _TARGET_AMD64_
8724 #endif // !LEGACY_BACKEND