1 // Licensed to the .NET Foundation under one or more agreements.
2 // The .NET Foundation licenses this file to you under the MIT license.
3 // See the LICENSE file in the project root for more information.
5 /*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
6 XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
8 XX Amd64/x86 Code Generator XX
10 XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
11 XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
18 #ifndef LEGACY_BACKEND // This file is ONLY used for the RyuJIT backend that uses the linear scan register allocator.
25 #include "gcinfoencoder.h"
27 /*****************************************************************************
29 * Generate code that will set the given register to the integer constant.
32 void CodeGen::genSetRegToIcon(regNumber reg, ssize_t val, var_types type, insFlags flags)
34 // Reg cannot be a FP reg
35 assert(!genIsValidFloatReg(reg));
37 // The only TYP_REF constant that can come this path is a managed 'null' since it is not
38 // relocatable. Other ref type constants (e.g. string objects) go through a different
40 noway_assert(type != TYP_REF || val == 0);
44 instGen_Set_Reg_To_Zero(emitActualTypeSize(type), reg, flags);
48 // TODO-XArch-CQ: needs all the optimized cases
49 getEmitter()->emitIns_R_I(INS_mov, emitActualTypeSize(type), reg, val);
53 /*****************************************************************************
55 * Generate code to check that the GS cookie wasn't thrashed by a buffer
56 * overrun. If pushReg is true, preserve all registers around code sequence.
57 * Otherwise ECX could be modified.
59 * Implementation Note: pushReg = true, in case of tail calls.
61 void CodeGen::genEmitGSCookieCheck(bool pushReg)
63 noway_assert(compiler->gsGlobalSecurityCookieAddr || compiler->gsGlobalSecurityCookieVal);
65 // Make sure that EAX is reported as live GC-ref so that any GC that kicks in while
66 // executing GS cookie check will not collect the object pointed to by EAX.
68 // For Amd64 System V, a two-register-returned struct could be returned in RAX and RDX
69 // In such case make sure that the correct GC-ness of RDX is reported as well, so
70 // a GC object pointed by RDX will not be collected.
73 // Handle multi-reg return type values
74 if (compiler->compMethodReturnsMultiRegRetType())
76 ReturnTypeDesc retTypeDesc;
77 if (varTypeIsLong(compiler->info.compRetNativeType))
79 retTypeDesc.InitializeLongReturnType(compiler);
81 else // we must have a struct return type
83 retTypeDesc.InitializeStructReturnType(compiler, compiler->info.compMethodInfo->args.retTypeClass);
86 unsigned regCount = retTypeDesc.GetReturnRegCount();
88 // Only x86 and x64 Unix ABI allows multi-reg return and
89 // number of result regs should be equal to MAX_RET_REG_COUNT.
90 assert(regCount == MAX_RET_REG_COUNT);
92 for (unsigned i = 0; i < regCount; ++i)
94 gcInfo.gcMarkRegPtrVal(retTypeDesc.GetABIReturnReg(i), retTypeDesc.GetReturnRegType(i));
97 else if (compiler->compMethodReturnsRetBufAddr())
99 // This is for returning in an implicit RetBuf.
100 // If the address of the buffer is returned in REG_INTRET, mark the content of INTRET as ByRef.
102 // In case the return is in an implicit RetBuf, the native return type should be a struct
103 assert(varTypeIsStruct(compiler->info.compRetNativeType));
105 gcInfo.gcMarkRegPtrVal(REG_INTRET, TYP_BYREF);
107 // ... all other cases.
110 #ifdef _TARGET_AMD64_
111 // For x64, structs that are not returned in registers are always
112 // returned in implicit RetBuf. If we reached here, we should not have
113 // a RetBuf and the return type should not be a struct.
114 assert(compiler->info.compRetBuffArg == BAD_VAR_NUM);
115 assert(!varTypeIsStruct(compiler->info.compRetNativeType));
116 #endif // _TARGET_AMD64_
118 // For x86 Windows we can't make such assertions since we generate code for returning of
119 // the RetBuf in REG_INTRET only when the ProfilerHook is enabled. Otherwise
120 // compRetNativeType could be TYP_STRUCT.
121 gcInfo.gcMarkRegPtrVal(REG_INTRET, compiler->info.compRetNativeType);
125 regNumber regGSCheck;
126 regMaskTP regMaskGSCheck = RBM_NONE;
130 // Non-tail call: we can use any callee trash register that is not
131 // a return register or contain 'this' pointer (keep alive this), since
132 // we are generating GS cookie check after a GT_RETURN block.
133 // Note: On Amd64 System V RDX is an arg register - REG_ARG_2 - as well
134 // as return register for two-register-returned structs.
135 if (compiler->lvaKeepAliveAndReportThis() && compiler->lvaTable[compiler->info.compThisArg].lvRegister &&
136 (compiler->lvaTable[compiler->info.compThisArg].lvRegNum == REG_ARG_0))
138 regGSCheck = REG_ARG_1;
142 regGSCheck = REG_ARG_0;
148 // It doesn't matter which register we pick, since we're going to save and restore it
150 // TODO-CQ: Can we optimize the choice of register to avoid doing the push/pop sometimes?
151 regGSCheck = REG_EAX;
152 regMaskGSCheck = RBM_EAX;
153 #else // !_TARGET_X86_
154 // Tail calls from methods that need GS check: We need to preserve registers while
155 // emitting GS cookie check for a tail prefixed call or a jmp. To emit GS cookie
156 // check, we might need a register. This won't be an issue for jmp calls for the
157 // reason mentioned below (see comment starting with "Jmp Calls:").
159 // The following are the possible solutions in case of tail prefixed calls:
160 // 1) Use R11 - ignore tail prefix on calls that need to pass a param in R11 when
161 // present in methods that require GS cookie check. Rest of the tail calls that
162 // do not require R11 will be honored.
163 // 2) Internal register - GT_CALL node reserves an internal register and emits GS
164 // cookie check as part of tail call codegen. GenExitCode() needs to special case
165 // fast tail calls implemented as epilog+jmp or such tail calls should always get
166 // dispatched via helper.
167 // 3) Materialize GS cookie check as a sperate node hanging off GT_CALL node in
168 // right execution order during rationalization.
170 // There are two calls that use R11: VSD and calli pinvokes with cookie param. Tail
171 // prefix on pinvokes is ignored. That is, options 2 and 3 will allow tail prefixed
172 // VSD calls from methods that need GS check.
174 // Tail prefixed calls: Right now for Jit64 compat, method requiring GS cookie check
175 // ignores tail prefix. In future, if we intend to support tail calls from such a method,
176 // consider one of the options mentioned above. For now adding an assert that we don't
177 // expect to see a tail call in a method that requires GS check.
178 noway_assert(!compiler->compTailCallUsed);
180 // Jmp calls: specify method handle using which JIT queries VM for its entry point
181 // address and hence it can neither be a VSD call nor PInvoke calli with cookie
182 // parameter. Therefore, in case of jmp calls it is safe to use R11.
183 regGSCheck = REG_R11;
184 #endif // !_TARGET_X86_
187 regMaskTP byrefPushedRegs = RBM_NONE;
188 regMaskTP norefPushedRegs = RBM_NONE;
189 regMaskTP pushedRegs = RBM_NONE;
191 if (compiler->gsGlobalSecurityCookieAddr == nullptr)
193 #if defined(_TARGET_AMD64_)
194 // If GS cookie value fits within 32-bits we can use 'cmp mem64, imm32'.
195 // Otherwise, load the value into a reg and use 'cmp mem64, reg64'.
196 if ((int)compiler->gsGlobalSecurityCookieVal != (ssize_t)compiler->gsGlobalSecurityCookieVal)
198 genSetRegToIcon(regGSCheck, compiler->gsGlobalSecurityCookieVal, TYP_I_IMPL);
199 getEmitter()->emitIns_S_R(INS_cmp, EA_PTRSIZE, regGSCheck, compiler->lvaGSSecurityCookie, 0);
202 #endif // defined(_TARGET_AMD64_)
204 assert((int)compiler->gsGlobalSecurityCookieVal == (ssize_t)compiler->gsGlobalSecurityCookieVal);
205 getEmitter()->emitIns_S_I(INS_cmp, EA_PTRSIZE, compiler->lvaGSSecurityCookie, 0,
206 (int)compiler->gsGlobalSecurityCookieVal);
211 // Ngen case - GS cookie value needs to be accessed through an indirection.
213 pushedRegs = genPushRegs(regMaskGSCheck, &byrefPushedRegs, &norefPushedRegs);
215 instGen_Set_Reg_To_Imm(EA_HANDLE_CNS_RELOC, regGSCheck, (ssize_t)compiler->gsGlobalSecurityCookieAddr);
216 getEmitter()->emitIns_R_AR(ins_Load(TYP_I_IMPL), EA_PTRSIZE, regGSCheck, regGSCheck, 0);
217 getEmitter()->emitIns_S_R(INS_cmp, EA_PTRSIZE, regGSCheck, compiler->lvaGSSecurityCookie, 0);
220 BasicBlock* gsCheckBlk = genCreateTempLabel();
221 emitJumpKind jmpEqual = genJumpKindForOper(GT_EQ, CK_SIGNED);
222 inst_JMP(jmpEqual, gsCheckBlk);
223 genEmitHelperCall(CORINFO_HELP_FAIL_FAST, 0, EA_UNKNOWN);
224 genDefineTempLabel(gsCheckBlk);
226 genPopRegs(pushedRegs, byrefPushedRegs, norefPushedRegs);
229 BasicBlock* CodeGen::genCallFinally(BasicBlock* block)
231 #if FEATURE_EH_FUNCLETS
232 // Generate a call to the finally, like this:
233 // mov rcx,qword ptr [rbp + 20H] // Load rcx with PSPSym
234 // call finally-funclet
235 // jmp finally-return // Only for non-retless finally calls
236 // The jmp can be a NOP if we're going to the next block.
237 // If we're generating code for the main function (not a funclet), and there is no localloc,
238 // then RSP at this point is the same value as that stored in the PSPSym. So just copy RSP
239 // instead of loading the PSPSym in this case, or if PSPSym is not used (CoreRT ABI).
241 if ((compiler->lvaPSPSym == BAD_VAR_NUM) ||
242 (!compiler->compLocallocUsed && (compiler->funCurrentFunc()->funKind == FUNC_ROOT)))
245 inst_RV_RV(INS_mov, REG_ARG_0, REG_SPBASE, TYP_I_IMPL);
246 #endif // !UNIX_X86_ABI
250 getEmitter()->emitIns_R_S(ins_Load(TYP_I_IMPL), EA_PTRSIZE, REG_ARG_0, compiler->lvaPSPSym, 0);
252 getEmitter()->emitIns_J(INS_call, block->bbJumpDest);
254 if (block->bbFlags & BBF_RETLESS_CALL)
256 // We have a retless call, and the last instruction generated was a call.
257 // If the next block is in a different EH region (or is the end of the code
258 // block), then we need to generate a breakpoint here (since it will never
259 // get executed) to get proper unwind behavior.
261 if ((block->bbNext == nullptr) || !BasicBlock::sameEHRegion(block, block->bbNext))
263 instGen(INS_BREAKPOINT); // This should never get executed
268 // TODO-Linux-x86: Do we need to handle the GC information for this NOP or JMP specially, as is done for other
270 #ifndef JIT32_GCENCODER
271 // Because of the way the flowgraph is connected, the liveness info for this one instruction
272 // after the call is not (can not be) correct in cases where a variable has a last use in the
273 // handler. So turn off GC reporting for this single instruction.
274 getEmitter()->emitDisableGC();
275 #endif // JIT32_GCENCODER
277 // Now go to where the finally funclet needs to return to.
278 if (block->bbNext->bbJumpDest == block->bbNext->bbNext)
281 // TODO-XArch-CQ: Can we get rid of this instruction, and just have the call return directly
282 // to the next instruction? This would depend on stack walking from within the finally
283 // handler working without this instruction being in this special EH region.
288 inst_JMP(EJ_jmp, block->bbNext->bbJumpDest);
291 #ifndef JIT32_GCENCODER
292 getEmitter()->emitEnableGC();
293 #endif // JIT32_GCENCODER
296 #else // !FEATURE_EH_FUNCLETS
298 // If we are about to invoke a finally locally from a try block, we have to set the ShadowSP slot
299 // corresponding to the finally's nesting level. When invoked in response to an exception, the
302 // We have a BBJ_CALLFINALLY followed by a BBJ_ALWAYS.
305 // mov [ebp - (n + 1)], 0
306 // mov [ebp - n ], 0xFC
316 noway_assert(isFramePointerUsed());
318 // Get the nesting level which contains the finally
319 unsigned finallyNesting = 0;
320 compiler->fgGetNestingLevel(block, &finallyNesting);
322 // The last slot is reserved for ICodeManager::FixContext(ppEndRegion)
323 unsigned filterEndOffsetSlotOffs;
324 filterEndOffsetSlotOffs = (unsigned)(compiler->lvaLclSize(compiler->lvaShadowSPslotsVar) - TARGET_POINTER_SIZE);
326 unsigned curNestingSlotOffs;
327 curNestingSlotOffs = (unsigned)(filterEndOffsetSlotOffs - ((finallyNesting + 1) * TARGET_POINTER_SIZE));
329 // Zero out the slot for the next nesting level
330 instGen_Store_Imm_Into_Lcl(TYP_I_IMPL, EA_PTRSIZE, 0, compiler->lvaShadowSPslotsVar,
331 curNestingSlotOffs - TARGET_POINTER_SIZE);
332 instGen_Store_Imm_Into_Lcl(TYP_I_IMPL, EA_PTRSIZE, LCL_FINALLY_MARK, compiler->lvaShadowSPslotsVar,
335 // Now push the address where the finally funclet should return to directly.
336 if (!(block->bbFlags & BBF_RETLESS_CALL))
338 assert(block->isBBCallAlwaysPair());
339 getEmitter()->emitIns_J(INS_push_hide, block->bbNext->bbJumpDest);
343 // EE expects a DWORD, so we give him 0
344 inst_IV(INS_push_hide, 0);
347 // Jump to the finally BB
348 inst_JMP(EJ_jmp, block->bbJumpDest);
350 #endif // !FEATURE_EH_FUNCLETS
352 // The BBJ_ALWAYS is used because the BBJ_CALLFINALLY can't point to the
353 // jump target using bbJumpDest - that is already used to point
354 // to the finally block. So just skip past the BBJ_ALWAYS unless the
356 if (!(block->bbFlags & BBF_RETLESS_CALL))
358 assert(block->isBBCallAlwaysPair());
359 block = block->bbNext;
364 #if FEATURE_EH_FUNCLETS
365 void CodeGen::genEHCatchRet(BasicBlock* block)
367 // Set RAX to the address the VM should return to after the catch.
368 // Generate a RIP-relative
369 // lea reg, [rip + disp32] ; the RIP is implicit
370 // which will be position-indepenent.
371 getEmitter()->emitIns_R_L(INS_lea, EA_PTR_DSP_RELOC, block->bbJumpDest, REG_INTRET);
374 #else // !FEATURE_EH_FUNCLETS
376 void CodeGen::genEHFinallyOrFilterRet(BasicBlock* block)
378 // The last statement of the block must be a GT_RETFILT, which has already been generated.
379 assert(block->lastNode() != nullptr);
380 assert(block->lastNode()->OperGet() == GT_RETFILT);
382 if (block->bbJumpKind == BBJ_EHFINALLYRET)
384 assert(block->lastNode()->gtOp.gtOp1 == nullptr); // op1 == nullptr means endfinally
386 // Return using a pop-jmp sequence. As the "try" block calls
387 // the finally with a jmp, this leaves the x86 call-ret stack
388 // balanced in the normal flow of path.
390 noway_assert(isFramePointerRequired());
391 inst_RV(INS_pop_hide, REG_EAX, TYP_I_IMPL);
392 inst_RV(INS_i_jmp, REG_EAX, TYP_I_IMPL);
396 assert(block->bbJumpKind == BBJ_EHFILTERRET);
398 // The return value has already been computed.
403 #endif // !FEATURE_EH_FUNCLETS
405 // Move an immediate value into an integer register
407 void CodeGen::instGen_Set_Reg_To_Imm(emitAttr size, regNumber reg, ssize_t imm, insFlags flags)
409 // reg cannot be a FP register
410 assert(!genIsValidFloatReg(reg));
412 if (!compiler->opts.compReloc)
414 size = EA_SIZE(size); // Strip any Reloc flags from size if we aren't doing relocs
417 if ((imm == 0) && !EA_IS_RELOC(size))
419 instGen_Set_Reg_To_Zero(size, reg, flags);
423 if (genDataIndirAddrCanBeEncodedAsPCRelOffset(imm))
425 getEmitter()->emitIns_R_AI(INS_lea, EA_PTR_DSP_RELOC, reg, imm);
429 getEmitter()->emitIns_R_I(INS_mov, size, reg, imm);
432 regTracker.rsTrackRegIntCns(reg, imm);
435 /***********************************************************************************
437 * Generate code to set a register 'targetReg' of type 'targetType' to the constant
438 * specified by the constant (GT_CNS_INT or GT_CNS_DBL) in 'tree'. This does not call
439 * genProduceReg() on the target register.
441 void CodeGen::genSetRegToConst(regNumber targetReg, var_types targetType, GenTreePtr tree)
444 switch (tree->gtOper)
448 // relocatable values tend to come down as a CNS_INT of native int type
449 // so the line between these two opcodes is kind of blurry
450 GenTreeIntConCommon* con = tree->AsIntConCommon();
451 ssize_t cnsVal = con->IconValue();
453 if (con->ImmedValNeedsReloc(compiler))
455 instGen_Set_Reg_To_Imm(EA_HANDLE_CNS_RELOC, targetReg, cnsVal);
456 regTracker.rsTrackRegTrash(targetReg);
460 genSetRegToIcon(targetReg, cnsVal, targetType);
467 double constValue = tree->gtDblCon.gtDconVal;
469 // Make sure we use "xorpd reg, reg" only for +ve zero constant (0.0) and not for -ve zero (-0.0)
470 if (*(__int64*)&constValue == 0)
472 // A faster/smaller way to generate 0
473 instruction ins = genGetInsForOper(GT_XOR, targetType);
474 inst_RV_RV(ins, targetReg, targetReg, targetType);
479 if (targetType == TYP_FLOAT)
481 float f = forceCastToFloat(constValue);
482 cns = genMakeConst(&f, targetType, tree, false);
486 cns = genMakeConst(&constValue, targetType, tree, true);
489 inst_RV_TT(ins_Load(targetType), targetReg, cns);
499 //------------------------------------------------------------------------
500 // genCodeForNegNot: Produce code for a GT_NEG/GT_NOT node.
505 void CodeGen::genCodeForNegNot(GenTree* tree)
507 assert(tree->OperIs(GT_NEG, GT_NOT));
509 regNumber targetReg = tree->gtRegNum;
510 var_types targetType = tree->TypeGet();
512 if (varTypeIsFloating(targetType))
514 assert(tree->gtOper == GT_NEG);
515 genSSE2BitwiseOp(tree);
519 GenTreePtr operand = tree->gtGetOp1();
520 assert(operand->isUsedFromReg());
521 regNumber operandReg = genConsumeReg(operand);
523 if (operandReg != targetReg)
525 inst_RV_RV(INS_mov, targetReg, operandReg, targetType);
528 instruction ins = genGetInsForOper(tree->OperGet(), targetType);
529 inst_RV(ins, targetReg, targetType);
535 // Generate code to get the high N bits of a N*N=2N bit multiplication result
536 void CodeGen::genCodeForMulHi(GenTreeOp* treeNode)
538 if (treeNode->OperGet() == GT_MULHI)
540 assert(!(treeNode->gtFlags & GTF_UNSIGNED));
542 assert(!treeNode->gtOverflowEx());
544 regNumber targetReg = treeNode->gtRegNum;
545 var_types targetType = treeNode->TypeGet();
546 emitter* emit = getEmitter();
547 emitAttr size = emitTypeSize(treeNode);
548 GenTree* op1 = treeNode->gtOp.gtOp1;
549 GenTree* op2 = treeNode->gtOp.gtOp2;
551 // to get the high bits of the multiply, we are constrained to using the
552 // 1-op form: RDX:RAX = RAX * rm
553 // The 3-op form (Rx=Ry*Rz) does not support it.
555 genConsumeOperands(treeNode->AsOp());
557 GenTree* regOp = op1;
560 // Set rmOp to the memory operand (if any)
561 if (op1->isUsedFromMemory() || (op2->isUsedFromReg() && (op2->gtRegNum == REG_RAX)))
566 assert(regOp->isUsedFromReg());
568 // Setup targetReg when neither of the source operands was a matching register
569 if (regOp->gtRegNum != REG_RAX)
571 inst_RV_RV(ins_Copy(targetType), REG_RAX, regOp->gtRegNum, targetType);
575 if ((treeNode->gtFlags & GTF_UNSIGNED) == 0)
583 emit->emitInsBinary(ins, size, treeNode, rmOp);
585 // Move the result to the desired register, if necessary
586 if (treeNode->OperGet() == GT_MULHI && targetReg != REG_RDX)
588 inst_RV_RV(INS_mov, targetReg, REG_RDX, targetType);
591 genProduceReg(treeNode);
595 //------------------------------------------------------------------------
596 // genCodeForLongUMod: Generate code for a tree of the form
597 // `(umod (gt_long x y) (const int))`
600 // node - the node for which to generate code
602 void CodeGen::genCodeForLongUMod(GenTreeOp* node)
604 assert(node != nullptr);
605 assert(node->OperGet() == GT_UMOD);
606 assert(node->TypeGet() == TYP_INT);
608 GenTreeOp* const dividend = node->gtOp1->AsOp();
609 assert(dividend->OperGet() == GT_LONG);
610 assert(varTypeIsLong(dividend));
612 genConsumeOperands(node);
614 GenTree* const dividendLo = dividend->gtOp1;
615 GenTree* const dividendHi = dividend->gtOp2;
616 assert(dividendLo->isUsedFromReg());
617 assert(dividendHi->isUsedFromReg());
619 GenTree* const divisor = node->gtOp2;
620 assert(divisor->gtSkipReloadOrCopy()->OperGet() == GT_CNS_INT);
621 assert(divisor->gtSkipReloadOrCopy()->isUsedFromReg());
622 assert(divisor->gtSkipReloadOrCopy()->AsIntCon()->gtIconVal >= 2);
623 assert(divisor->gtSkipReloadOrCopy()->AsIntCon()->gtIconVal <= 0x3fffffff);
625 // dividendLo must be in RAX; dividendHi must be in RDX
626 genCopyRegIfNeeded(dividendLo, REG_EAX);
627 genCopyRegIfNeeded(dividendHi, REG_EDX);
629 // At this point, EAX:EDX contains the 64bit dividend and op2->gtRegNum
630 // contains the 32bit divisor. We want to generate the following code:
632 // cmp edx, divisor->gtRegNum
638 // div divisor->gtRegNum
642 // div divisor->gtRegNum
644 // This works because (a * 2^32 + b) % c = ((a % c) * 2^32 + b) % c.
646 BasicBlock* const noOverflow = genCreateTempLabel();
648 // cmp edx, divisor->gtRegNum
650 inst_RV_RV(INS_cmp, REG_EDX, divisor->gtRegNum);
651 inst_JMP(EJ_jb, noOverflow);
656 // div divisor->gtRegNum
658 const regNumber tempReg = node->GetSingleTempReg();
659 inst_RV_RV(INS_mov, tempReg, REG_EAX, TYP_INT);
660 inst_RV_RV(INS_mov, REG_EAX, REG_EDX, TYP_INT);
661 instGen_Set_Reg_To_Zero(EA_PTRSIZE, REG_EDX);
662 inst_RV(INS_div, divisor->gtRegNum, TYP_INT);
663 inst_RV_RV(INS_mov, REG_EAX, tempReg, TYP_INT);
666 // div divisor->gtRegNum
667 genDefineTempLabel(noOverflow);
668 inst_RV(INS_div, divisor->gtRegNum, TYP_INT);
670 const regNumber targetReg = node->gtRegNum;
671 if (targetReg != REG_EDX)
673 inst_RV_RV(INS_mov, targetReg, REG_RDX, TYP_INT);
677 #endif // _TARGET_X86_
679 //------------------------------------------------------------------------
680 // genCodeForDivMod: Generate code for a DIV or MOD operation.
683 // treeNode - the node to generate the code for
685 void CodeGen::genCodeForDivMod(GenTreeOp* treeNode)
687 assert(treeNode->OperIs(GT_DIV, GT_UDIV, GT_MOD, GT_UMOD));
689 // We shouldn't be seeing GT_MOD on float/double args as it should get morphed into a
690 // helper call by front-end. Similarly we shouldn't be seeing GT_UDIV and GT_UMOD
691 // on float/double args.
692 assert(treeNode->OperIs(GT_DIV) || !varTypeIsFloating(treeNode));
694 GenTree* dividend = treeNode->gtOp1;
697 if (varTypeIsLong(dividend->TypeGet()))
699 genCodeForLongUMod(treeNode);
702 #endif // _TARGET_X86_
704 GenTree* divisor = treeNode->gtOp2;
705 genTreeOps oper = treeNode->OperGet();
706 emitAttr size = emitTypeSize(treeNode);
707 regNumber targetReg = treeNode->gtRegNum;
708 var_types targetType = treeNode->TypeGet();
709 emitter* emit = getEmitter();
711 // dividend is in a register.
712 assert(dividend->isUsedFromReg());
714 genConsumeOperands(treeNode->AsOp());
715 if (varTypeIsFloating(targetType))
717 // Floating point div/rem operation
718 assert(oper == GT_DIV || oper == GT_MOD);
720 if (dividend->gtRegNum == targetReg)
722 emit->emitInsBinary(genGetInsForOper(treeNode->gtOper, targetType), size, treeNode, divisor);
724 else if (divisor->isUsedFromReg() && divisor->gtRegNum == targetReg)
726 // It is not possible to generate 2-operand divss or divsd where reg2 = reg1 / reg2
727 // because divss/divsd reg1, reg2 will over-write reg1. Therefore, in case of AMD64
728 // LSRA has to make sure that such a register assignment is not generated for floating
729 // point div/rem operations.
731 !"GT_DIV/GT_MOD (float): case of reg2 = reg1 / reg2, LSRA should never generate such a reg assignment");
735 inst_RV_RV(ins_Copy(targetType), targetReg, dividend->gtRegNum, targetType);
736 emit->emitInsBinary(genGetInsForOper(treeNode->gtOper, targetType), size, treeNode, divisor);
741 // dividend must be in RAX
742 genCopyRegIfNeeded(dividend, REG_RAX);
744 // zero or sign extend rax to rdx
745 if (oper == GT_UMOD || oper == GT_UDIV)
747 instGen_Set_Reg_To_Zero(EA_PTRSIZE, REG_EDX);
751 emit->emitIns(INS_cdq, size);
752 // the cdq instruction writes RDX, So clear the gcInfo for RDX
753 gcInfo.gcMarkRegSetNpt(RBM_RDX);
756 // Perform the 'targetType' (64-bit or 32-bit) divide instruction
758 if (oper == GT_UMOD || oper == GT_UDIV)
767 emit->emitInsBinary(ins, size, treeNode, divisor);
769 // DIV/IDIV instructions always store the quotient in RAX and the remainder in RDX.
770 // Move the result to the desired register, if necessary
771 if (oper == GT_DIV || oper == GT_UDIV)
773 if (targetReg != REG_RAX)
775 inst_RV_RV(INS_mov, targetReg, REG_RAX, targetType);
780 assert((oper == GT_MOD) || (oper == GT_UMOD));
781 if (targetReg != REG_RDX)
783 inst_RV_RV(INS_mov, targetReg, REG_RDX, targetType);
787 genProduceReg(treeNode);
790 //------------------------------------------------------------------------
791 // genCodeForBinary: Generate code for many binary arithmetic operators
792 // This method is expected to have called genConsumeOperands() before calling it.
795 // treeNode - The binary operation for which we are generating code.
801 // Mul and div variants have special constraints on x64 so are not handled here.
802 // See teh assert below for the operators that are handled.
804 void CodeGen::genCodeForBinary(GenTree* treeNode)
806 const genTreeOps oper = treeNode->OperGet();
807 regNumber targetReg = treeNode->gtRegNum;
808 var_types targetType = treeNode->TypeGet();
809 emitter* emit = getEmitter();
811 #if defined(_TARGET_64BIT_)
812 assert(oper == GT_OR || oper == GT_XOR || oper == GT_AND || oper == GT_ADD || oper == GT_SUB);
813 #else // !defined(_TARGET_64BIT_)
814 assert(oper == GT_OR || oper == GT_XOR || oper == GT_AND || oper == GT_ADD_LO || oper == GT_ADD_HI ||
815 oper == GT_SUB_LO || oper == GT_SUB_HI || oper == GT_MUL_LONG || oper == GT_DIV_HI || oper == GT_MOD_HI ||
816 oper == GT_ADD || oper == GT_SUB);
817 #endif // !defined(_TARGET_64BIT_)
819 GenTreePtr op1 = treeNode->gtGetOp1();
820 GenTreePtr op2 = treeNode->gtGetOp2();
822 // Commutative operations can mark op1 as contained or reg-optional to generate "op reg, memop/immed"
823 if (!op1->isUsedFromReg())
825 assert(treeNode->OperIsCommutative());
826 assert(op1->isMemoryOp() || op1->IsLocal() || op1->IsCnsNonZeroFltOrDbl() || op1->IsIntCnsFitsInI32() ||
827 op1->IsRegOptional());
829 op1 = treeNode->gtGetOp2();
830 op2 = treeNode->gtGetOp1();
833 instruction ins = genGetInsForOper(treeNode->OperGet(), targetType);
835 // The arithmetic node must be sitting in a register (since it's not contained)
836 noway_assert(targetReg != REG_NA);
838 regNumber op1reg = op1->isUsedFromReg() ? op1->gtRegNum : REG_NA;
839 regNumber op2reg = op2->isUsedFromReg() ? op2->gtRegNum : REG_NA;
844 // This is the case of reg1 = reg1 op reg2
845 // We're ready to emit the instruction without any moves
846 if (op1reg == targetReg)
851 // We have reg1 = reg2 op reg1
852 // In order for this operation to be correct
853 // we need that op is a commutative operation so
854 // we can convert it into reg1 = reg1 op reg2 and emit
855 // the same code as above
856 else if (op2reg == targetReg)
858 noway_assert(GenTree::OperIsCommutative(oper));
862 // now we know there are 3 different operands so attempt to use LEA
863 else if (oper == GT_ADD && !varTypeIsFloating(treeNode) && !treeNode->gtOverflowEx() // LEA does not set flags
864 && (op2->isContainedIntOrIImmed() || op2->isUsedFromReg()) && !treeNode->gtSetFlags())
866 if (op2->isContainedIntOrIImmed())
868 emit->emitIns_R_AR(INS_lea, emitTypeSize(treeNode), targetReg, op1reg,
869 (int)op2->AsIntConCommon()->IconValue());
873 assert(op2reg != REG_NA);
874 emit->emitIns_R_ARX(INS_lea, emitTypeSize(treeNode), targetReg, op1reg, op2reg, 1, 0);
876 genProduceReg(treeNode);
879 // dest, op1 and op2 registers are different:
880 // reg3 = reg1 op reg2
881 // We can implement this by issuing a mov:
883 // reg3 = reg3 op reg2
886 inst_RV_RV(ins_Copy(targetType), targetReg, op1reg, targetType);
887 regTracker.rsTrackRegCopy(targetReg, op1reg);
888 gcInfo.gcMarkRegPtrVal(targetReg, targetType);
893 // try to use an inc or dec
894 if (oper == GT_ADD && !varTypeIsFloating(treeNode) && src->isContainedIntOrIImmed() && !treeNode->gtOverflowEx())
896 if (src->IsIntegralConst(1))
898 emit->emitIns_R(INS_inc, emitTypeSize(treeNode), targetReg);
899 genProduceReg(treeNode);
902 else if (src->IsIntegralConst(-1))
904 emit->emitIns_R(INS_dec, emitTypeSize(treeNode), targetReg);
905 genProduceReg(treeNode);
909 regNumber r = emit->emitInsBinary(ins, emitTypeSize(treeNode), dst, src);
910 noway_assert(r == targetReg);
912 if (treeNode->gtOverflowEx())
914 #if !defined(_TARGET_64BIT_)
915 assert(oper == GT_ADD || oper == GT_SUB || oper == GT_ADD_HI || oper == GT_SUB_HI);
917 assert(oper == GT_ADD || oper == GT_SUB);
919 genCheckOverflow(treeNode);
921 genProduceReg(treeNode);
924 //------------------------------------------------------------------------
925 // genCodeForMul: Generate code for a MUL operation.
928 // treeNode - the node to generate the code for
930 void CodeGen::genCodeForMul(GenTreeOp* treeNode)
932 assert(treeNode->OperIs(GT_MUL));
934 regNumber targetReg = treeNode->gtRegNum;
935 var_types targetType = treeNode->TypeGet();
936 emitter* emit = getEmitter();
939 emitAttr size = emitTypeSize(treeNode);
940 bool isUnsignedMultiply = ((treeNode->gtFlags & GTF_UNSIGNED) != 0);
941 bool requiresOverflowCheck = treeNode->gtOverflowEx();
943 GenTree* op1 = treeNode->gtGetOp1();
944 GenTree* op2 = treeNode->gtGetOp2();
946 // there are 3 forms of x64 multiply:
947 // 1-op form with 128 result: RDX:RAX = RAX * rm
948 // 2-op form: reg *= rm
949 // 3-op form: reg = rm * imm
951 genConsumeOperands(treeNode->AsOp());
953 // This matches the 'mul' lowering in Lowering::SetMulOpCounts()
955 // immOp :: Only one operand can be an immediate
956 // rmOp :: Only one operand can be a memory op.
957 // regOp :: A register op (especially the operand that matches 'targetReg')
958 // (can be nullptr when we have both a memory op and an immediate op)
960 GenTree* immOp = nullptr;
964 if (op2->isContainedIntOrIImmed())
968 else if (op1->isContainedIntOrIImmed())
974 if (immOp != nullptr)
976 // This must be a non-floating point operation.
977 assert(!varTypeIsFloating(treeNode));
979 // CQ: When possible use LEA for mul by imm 3, 5 or 9
980 ssize_t imm = immOp->AsIntConCommon()->IconValue();
982 if (!requiresOverflowCheck && rmOp->isUsedFromReg() && ((imm == 3) || (imm == 5) || (imm == 9)))
984 // We will use the LEA instruction to perform this multiply
985 // Note that an LEA with base=x, index=x and scale=(imm-1) computes x*imm when imm=3,5 or 9.
986 unsigned int scale = (unsigned int)(imm - 1);
987 getEmitter()->emitIns_R_ARX(INS_lea, size, targetReg, rmOp->gtRegNum, rmOp->gtRegNum, scale, 0);
991 // use the 3-op form with immediate
992 ins = getEmitter()->inst3opImulForReg(targetReg);
993 emit->emitInsBinary(ins, size, rmOp, immOp);
996 else // we have no contained immediate operand
1001 regNumber mulTargetReg = targetReg;
1002 if (isUnsignedMultiply && requiresOverflowCheck)
1005 mulTargetReg = REG_RAX;
1009 ins = genGetInsForOper(GT_MUL, targetType);
1012 // Set rmOp to the memory operand (if any)
1013 // or set regOp to the op2 when it has the matching target register for our multiply op
1015 if (op1->isUsedFromMemory() || (op2->isUsedFromReg() && (op2->gtRegNum == mulTargetReg)))
1020 assert(regOp->isUsedFromReg());
1022 // Setup targetReg when neither of the source operands was a matching register
1023 if (regOp->gtRegNum != mulTargetReg)
1025 inst_RV_RV(ins_Copy(targetType), mulTargetReg, regOp->gtRegNum, targetType);
1028 emit->emitInsBinary(ins, size, treeNode, rmOp);
1030 // Move the result to the desired register, if necessary
1031 if ((ins == INS_mulEAX) && (targetReg != REG_RAX))
1033 inst_RV_RV(INS_mov, targetReg, REG_RAX, targetType);
1037 if (requiresOverflowCheck)
1039 // Overflow checking is only used for non-floating point types
1040 noway_assert(!varTypeIsFloating(treeNode));
1042 genCheckOverflow(treeNode);
1045 genProduceReg(treeNode);
1048 //------------------------------------------------------------------------
1049 // isStructReturn: Returns whether the 'treeNode' is returning a struct.
1052 // treeNode - The tree node to evaluate whether is a struct return.
1055 // For AMD64 *nix: returns true if the 'treeNode" is a GT_RETURN node, of type struct.
1056 // Otherwise returns false.
1057 // For other platforms always returns false.
1059 bool CodeGen::isStructReturn(GenTreePtr treeNode)
1061 // This method could be called for 'treeNode' of GT_RET_FILT or GT_RETURN.
1062 // For the GT_RET_FILT, the return is always
1063 // a bool or a void, for the end of a finally block.
1064 noway_assert(treeNode->OperGet() == GT_RETURN || treeNode->OperGet() == GT_RETFILT);
1065 if (treeNode->OperGet() != GT_RETURN)
1070 #ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
1071 return varTypeIsStruct(treeNode);
1072 #else // !FEATURE_UNIX_AMD64_STRUCT_PASSING
1073 assert(!varTypeIsStruct(treeNode));
1075 #endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
1078 //------------------------------------------------------------------------
1079 // genStructReturn: Generates code for returning a struct.
1082 // treeNode - The GT_RETURN tree node.
1088 // op1 of GT_RETURN node is either GT_LCL_VAR or multi-reg GT_CALL
1089 void CodeGen::genStructReturn(GenTreePtr treeNode)
1091 assert(treeNode->OperGet() == GT_RETURN);
1092 GenTreePtr op1 = treeNode->gtGetOp1();
1094 #ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
1095 if (op1->OperGet() == GT_LCL_VAR)
1097 GenTreeLclVarCommon* lclVar = op1->AsLclVarCommon();
1098 LclVarDsc* varDsc = &(compiler->lvaTable[lclVar->gtLclNum]);
1099 assert(varDsc->lvIsMultiRegRet);
1101 ReturnTypeDesc retTypeDesc;
1102 retTypeDesc.InitializeStructReturnType(compiler, varDsc->lvVerTypeInfo.GetClassHandle());
1103 unsigned regCount = retTypeDesc.GetReturnRegCount();
1104 assert(regCount == MAX_RET_REG_COUNT);
1106 if (varTypeIsEnregisterableStruct(op1))
1108 // Right now the only enregistrable structs supported are SIMD vector types.
1109 assert(varTypeIsSIMD(op1));
1110 assert(op1->isUsedFromReg());
1112 // This is a case of operand is in a single reg and needs to be
1113 // returned in multiple ABI return registers.
1114 regNumber opReg = genConsumeReg(op1);
1115 regNumber reg0 = retTypeDesc.GetABIReturnReg(0);
1116 regNumber reg1 = retTypeDesc.GetABIReturnReg(1);
1118 if (opReg != reg0 && opReg != reg1)
1120 // Operand reg is different from return regs.
1121 // Copy opReg to reg0 and let it to be handled by one of the
1123 inst_RV_RV(ins_Copy(TYP_DOUBLE), reg0, opReg, TYP_DOUBLE);
1129 assert(opReg != reg1);
1131 // reg0 - already has required 8-byte in bit position [63:0].
1133 // swap upper and lower 8-bytes of reg1 so that desired 8-byte is in bit position [63:0].
1134 inst_RV_RV(ins_Copy(TYP_DOUBLE), reg1, opReg, TYP_DOUBLE);
1138 assert(opReg == reg1);
1141 // swap upper and lower 8-bytes of reg1 so that desired 8-byte is in bit position [63:0].
1142 inst_RV_RV(ins_Copy(TYP_DOUBLE), reg0, opReg, TYP_DOUBLE);
1144 inst_RV_RV_IV(INS_shufpd, EA_16BYTE, reg1, reg1, 0x01);
1148 assert(op1->isUsedFromMemory());
1150 // Copy var on stack into ABI return registers
1152 for (unsigned i = 0; i < regCount; ++i)
1154 var_types type = retTypeDesc.GetReturnRegType(i);
1155 regNumber reg = retTypeDesc.GetABIReturnReg(i);
1156 getEmitter()->emitIns_R_S(ins_Load(type), emitTypeSize(type), reg, lclVar->gtLclNum, offset);
1157 offset += genTypeSize(type);
1163 assert(op1->IsMultiRegCall() || op1->IsCopyOrReloadOfMultiRegCall());
1165 genConsumeRegs(op1);
1167 GenTree* actualOp1 = op1->gtSkipReloadOrCopy();
1168 GenTreeCall* call = actualOp1->AsCall();
1169 ReturnTypeDesc* retTypeDesc = call->GetReturnTypeDesc();
1170 unsigned regCount = retTypeDesc->GetReturnRegCount();
1171 assert(regCount == MAX_RET_REG_COUNT);
1173 // Handle circular dependency between call allocated regs and ABI return regs.
1175 // It is possible under LSRA stress that originally allocated regs of call node,
1176 // say rax and rdx, are spilled and reloaded to rdx and rax respectively. But
1177 // GT_RETURN needs to move values as follows: rdx->rax, rax->rdx. Similar kind
1178 // kind of circular dependency could arise between xmm0 and xmm1 return regs.
1179 // Codegen is expected to handle such circular dependency.
1181 var_types regType0 = retTypeDesc->GetReturnRegType(0);
1182 regNumber returnReg0 = retTypeDesc->GetABIReturnReg(0);
1183 regNumber allocatedReg0 = call->GetRegNumByIdx(0);
1185 var_types regType1 = retTypeDesc->GetReturnRegType(1);
1186 regNumber returnReg1 = retTypeDesc->GetABIReturnReg(1);
1187 regNumber allocatedReg1 = call->GetRegNumByIdx(1);
1189 if (op1->IsCopyOrReload())
1191 // GT_COPY/GT_RELOAD will have valid reg for those positions
1192 // that need to be copied or reloaded.
1193 regNumber reloadReg = op1->AsCopyOrReload()->GetRegNumByIdx(0);
1194 if (reloadReg != REG_NA)
1196 allocatedReg0 = reloadReg;
1199 reloadReg = op1->AsCopyOrReload()->GetRegNumByIdx(1);
1200 if (reloadReg != REG_NA)
1202 allocatedReg1 = reloadReg;
1206 if (allocatedReg0 == returnReg1 && allocatedReg1 == returnReg0)
1208 // Circular dependency - swap allocatedReg0 and allocatedReg1
1209 if (varTypeIsFloating(regType0))
1211 assert(varTypeIsFloating(regType1));
1213 // The fastest way to swap two XMM regs is using PXOR
1214 inst_RV_RV(INS_pxor, allocatedReg0, allocatedReg1, TYP_DOUBLE);
1215 inst_RV_RV(INS_pxor, allocatedReg1, allocatedReg0, TYP_DOUBLE);
1216 inst_RV_RV(INS_pxor, allocatedReg0, allocatedReg1, TYP_DOUBLE);
1220 assert(varTypeIsIntegral(regType0));
1221 assert(varTypeIsIntegral(regType1));
1222 inst_RV_RV(INS_xchg, allocatedReg1, allocatedReg0, TYP_I_IMPL);
1225 else if (allocatedReg1 == returnReg0)
1227 // Change the order of moves to correctly handle dependency.
1228 if (allocatedReg1 != returnReg1)
1230 inst_RV_RV(ins_Copy(regType1), returnReg1, allocatedReg1, regType1);
1233 if (allocatedReg0 != returnReg0)
1235 inst_RV_RV(ins_Copy(regType0), returnReg0, allocatedReg0, regType0);
1240 // No circular dependency case.
1241 if (allocatedReg0 != returnReg0)
1243 inst_RV_RV(ins_Copy(regType0), returnReg0, allocatedReg0, regType0);
1246 if (allocatedReg1 != returnReg1)
1248 inst_RV_RV(ins_Copy(regType1), returnReg1, allocatedReg1, regType1);
1257 //------------------------------------------------------------------------
1258 // genReturn: Generates code for return statement.
1259 // In case of struct return, delegates to the genStructReturn method.
1262 // treeNode - The GT_RETURN or GT_RETFILT tree node.
1267 void CodeGen::genReturn(GenTreePtr treeNode)
1269 assert(treeNode->OperGet() == GT_RETURN || treeNode->OperGet() == GT_RETFILT);
1270 GenTreePtr op1 = treeNode->gtGetOp1();
1271 var_types targetType = treeNode->TypeGet();
1273 // A void GT_RETFILT is the end of a finally. For non-void filter returns we need to load the result in the return
1274 // register, if it's not already there. The processing is the same as GT_RETURN. For filters, the IL spec says the
1275 // result is type int32. Further, the only legal values are 0 or 1; the use of other values is "undefined".
1276 assert(!treeNode->OperIs(GT_RETFILT) || (targetType == TYP_VOID) || (targetType == TYP_INT));
1279 if (targetType == TYP_VOID)
1281 assert(op1 == nullptr);
1286 if (treeNode->TypeGet() == TYP_LONG)
1288 assert(op1 != nullptr);
1289 noway_assert(op1->OperGet() == GT_LONG);
1290 GenTree* loRetVal = op1->gtGetOp1();
1291 GenTree* hiRetVal = op1->gtGetOp2();
1292 noway_assert((loRetVal->gtRegNum != REG_NA) && (hiRetVal->gtRegNum != REG_NA));
1294 genConsumeReg(loRetVal);
1295 genConsumeReg(hiRetVal);
1296 if (loRetVal->gtRegNum != REG_LNGRET_LO)
1298 inst_RV_RV(ins_Copy(targetType), REG_LNGRET_LO, loRetVal->gtRegNum, TYP_INT);
1300 if (hiRetVal->gtRegNum != REG_LNGRET_HI)
1302 inst_RV_RV(ins_Copy(targetType), REG_LNGRET_HI, hiRetVal->gtRegNum, TYP_INT);
1306 #endif // !defined(_TARGET_X86_)
1308 if (isStructReturn(treeNode))
1310 genStructReturn(treeNode);
1312 else if (targetType != TYP_VOID)
1314 assert(op1 != nullptr);
1315 noway_assert(op1->gtRegNum != REG_NA);
1317 // !! NOTE !! genConsumeReg will clear op1 as GC ref after it has
1318 // consumed a reg for the operand. This is because the variable
1319 // is dead after return. But we are issuing more instructions
1320 // like "profiler leave callback" after this consumption. So
1321 // if you are issuing more instructions after this point,
1322 // remember to keep the variable live up until the new method
1323 // exit point where it is actually dead.
1326 regNumber retReg = varTypeIsFloating(treeNode) ? REG_FLOATRET : REG_INTRET;
1328 if (varTypeIsFloating(treeNode))
1330 // Spill the return value register from an XMM register to the stack, then load it on the x87 stack.
1331 // If it already has a home location, use that. Otherwise, we need a temp.
1332 if (genIsRegCandidateLocal(op1) && compiler->lvaTable[op1->gtLclVarCommon.gtLclNum].lvOnFrame)
1334 // Store local variable to its home location, if necessary.
1335 if ((op1->gtFlags & GTF_REG_VAL) != 0)
1337 op1->gtFlags &= ~GTF_REG_VAL;
1338 inst_TT_RV(ins_Store(op1->gtType,
1339 compiler->isSIMDTypeLocalAligned(op1->gtLclVarCommon.gtLclNum)),
1340 op1, op1->gtRegNum);
1342 // Now, load it to the fp stack.
1343 getEmitter()->emitIns_S(INS_fld, emitTypeSize(op1), op1->AsLclVarCommon()->gtLclNum, 0);
1347 // Spill the value, which should be in a register, then load it to the fp stack.
1348 // TODO-X86-CQ: Deal with things that are already in memory (don't call genConsumeReg yet).
1349 op1->gtFlags |= GTF_SPILL;
1350 regSet.rsSpillTree(op1->gtRegNum, op1);
1351 op1->gtFlags |= GTF_SPILLED;
1352 op1->gtFlags &= ~GTF_SPILL;
1354 TempDsc* t = regSet.rsUnspillInPlace(op1, op1->gtRegNum);
1355 inst_FS_ST(INS_fld, emitActualTypeSize(op1->gtType), t, 0);
1356 op1->gtFlags &= ~GTF_SPILLED;
1357 compiler->tmpRlsTemp(t);
1361 #endif // _TARGET_X86_
1363 if (op1->gtRegNum != retReg)
1365 inst_RV_RV(ins_Copy(targetType), retReg, op1->gtRegNum, targetType);
1371 #ifdef PROFILING_SUPPORTED
1373 // TODO-AMD64-Unix: If the profiler hook is implemented on *nix, make sure for 2 register returned structs
1374 // the RAX and RDX needs to be kept alive. Make the necessary changes in lowerxarch.cpp
1375 // in the handling of the GT_RETURN statement.
1376 // Such structs containing GC pointers need to be handled by calling gcInfo.gcMarkRegSetNpt
1377 // for the return registers containing GC refs.
1379 // There will be a single return block while generating profiler ELT callbacks.
1381 // Reason for not materializing Leave callback as a GT_PROF_HOOK node after GT_RETURN:
1382 // In flowgraph and other places assert that the last node of a block marked as
1383 // BBJ_RETURN is either a GT_RETURN or GT_JMP or a tail call. It would be nice to
1384 // maintain such an invariant irrespective of whether profiler hook needed or not.
1385 // Also, there is not much to be gained by materializing it as an explicit node.
1386 if (compiler->compCurBB == compiler->genReturnBB)
1389 // Since we are invalidating the assumption that we would slip into the epilog
1390 // right after the "return", we need to preserve the return reg's GC state
1391 // across the call until actual method return.
1392 if (varTypeIsGC(compiler->info.compRetType))
1394 gcInfo.gcMarkRegPtrVal(REG_INTRET, compiler->info.compRetType);
1397 genProfilingLeaveCallback();
1399 if (varTypeIsGC(compiler->info.compRetType))
1401 gcInfo.gcMarkRegSetNpt(REG_INTRET);
1407 //------------------------------------------------------------------------
1408 // genCodeForCompare: Produce code for a GT_EQ/GT_NE/GT_LT/GT_LE/GT_GE/GT_GT/GT_TEST_EQ/GT_TEST_NE/GT_CMP node.
1413 void CodeGen::genCodeForCompare(GenTreeOp* tree)
1415 assert(tree->OperIs(GT_EQ, GT_NE, GT_LT, GT_LE, GT_GE, GT_GT, GT_TEST_EQ, GT_TEST_NE, GT_CMP));
1417 // TODO-XArch-CQ: Check if we can use the currently set flags.
1418 // TODO-XArch-CQ: Check for the case where we can simply transfer the carry bit to a register
1419 // (signed < or >= where targetReg != REG_NA)
1421 GenTreePtr op1 = tree->gtOp1;
1422 var_types op1Type = op1->TypeGet();
1424 if (varTypeIsFloating(op1Type))
1426 genCompareFloat(tree);
1430 genCompareInt(tree);
1434 //------------------------------------------------------------------------
1435 // genCodeForJumpTrue: Generates code for jmpTrue statement.
1438 // tree - The GT_JTRUE tree node.
1443 void CodeGen::genCodeForJumpTrue(GenTreePtr tree)
1445 GenTree* cmp = tree->gtOp.gtOp1;
1447 assert(cmp->OperIsCompare());
1448 assert(compiler->compCurBB->bbJumpKind == BBJ_COND);
1450 #if !defined(_TARGET_64BIT_)
1451 // Long-typed compares should have been handled by Lowering::LowerCompare.
1452 assert(!varTypeIsLong(cmp->gtGetOp1()));
1455 // Get the "kind" and type of the comparison. Note that whether it is an unsigned cmp
1456 // is governed by a flag NOT by the inherent type of the node
1457 // TODO-XArch-CQ: Check if we can use the currently set flags.
1458 emitJumpKind jumpKind[2];
1459 bool branchToTrueLabel[2];
1460 genJumpKindsForTree(cmp, jumpKind, branchToTrueLabel);
1462 BasicBlock* skipLabel = nullptr;
1463 if (jumpKind[0] != EJ_NONE)
1465 BasicBlock* jmpTarget;
1466 if (branchToTrueLabel[0])
1468 jmpTarget = compiler->compCurBB->bbJumpDest;
1472 // This case arises only for ordered GT_EQ right now
1473 assert((cmp->gtOper == GT_EQ) && ((cmp->gtFlags & GTF_RELOP_NAN_UN) == 0));
1474 skipLabel = genCreateTempLabel();
1475 jmpTarget = skipLabel;
1478 inst_JMP(jumpKind[0], jmpTarget);
1481 if (jumpKind[1] != EJ_NONE)
1483 // the second conditional branch always has to be to the true label
1484 assert(branchToTrueLabel[1]);
1485 inst_JMP(jumpKind[1], compiler->compCurBB->bbJumpDest);
1488 if (skipLabel != nullptr)
1490 genDefineTempLabel(skipLabel);
1494 //------------------------------------------------------------------------
1495 // genCodeForJcc: Produce code for a GT_JCC node.
1500 void CodeGen::genCodeForJcc(GenTreeCC* tree)
1502 assert(compiler->compCurBB->bbJumpKind == BBJ_COND);
1504 CompareKind compareKind = ((tree->gtFlags & GTF_UNSIGNED) != 0) ? CK_UNSIGNED : CK_SIGNED;
1505 emitJumpKind jumpKind = genJumpKindForOper(tree->gtCondition, compareKind);
1507 inst_JMP(jumpKind, compiler->compCurBB->bbJumpDest);
1510 //------------------------------------------------------------------------
1511 // genCodeForSetcc: Generates a setcc instruction for a GT_SETCC node.
1514 // tree - the GT_SETCC node
1517 // The condition represents an integer comparison. This code doesn't
1518 // have the necessary logic to deal with floating point comparisons,
1519 // in fact it doesn't even know if the comparison is integer or floating
1520 // point because SETCC nodes do not have any operands.
1523 void CodeGen::genCodeForSetcc(GenTreeCC* setcc)
1525 regNumber dstReg = setcc->gtRegNum;
1526 CompareKind compareKind = setcc->IsUnsigned() ? CK_UNSIGNED : CK_SIGNED;
1527 emitJumpKind jumpKind = genJumpKindForOper(setcc->gtCondition, compareKind);
1529 assert(genIsValidIntReg(dstReg) && isByteReg(dstReg));
1530 // Make sure nobody is setting GTF_RELOP_NAN_UN on this node as it is ignored.
1531 assert((setcc->gtFlags & GTF_RELOP_NAN_UN) == 0);
1533 inst_SET(jumpKind, dstReg);
1534 inst_RV_RV(ins_Move_Extend(TYP_UBYTE, true), dstReg, dstReg, TYP_UBYTE, emitTypeSize(TYP_UBYTE));
1535 genProduceReg(setcc);
1538 //------------------------------------------------------------------------
1539 // genCodeForReturnTrap: Produce code for a GT_RETURNTRAP node.
1542 // tree - the GT_RETURNTRAP node
1544 void CodeGen::genCodeForReturnTrap(GenTreeOp* tree)
1546 assert(tree->OperGet() == GT_RETURNTRAP);
1548 // this is nothing but a conditional call to CORINFO_HELP_STOP_FOR_GC
1549 // based on the contents of 'data'
1551 GenTree* data = tree->gtOp1;
1552 genConsumeRegs(data);
1553 GenTreeIntCon cns = intForm(TYP_INT, 0);
1554 getEmitter()->emitInsBinary(INS_cmp, emitTypeSize(TYP_INT), data, &cns);
1556 BasicBlock* skipLabel = genCreateTempLabel();
1558 emitJumpKind jmpEqual = genJumpKindForOper(GT_EQ, CK_SIGNED);
1559 inst_JMP(jmpEqual, skipLabel);
1561 // emit the call to the EE-helper that stops for GC (or other reasons)
1562 regNumber tmpReg = tree->GetSingleTempReg();
1563 assert(genIsValidIntReg(tmpReg));
1565 genEmitHelperCall(CORINFO_HELP_STOP_FOR_GC, 0, EA_UNKNOWN, tmpReg);
1566 genDefineTempLabel(skipLabel);
1569 /*****************************************************************************
1571 * Generate code for a single node in the tree.
1572 * Preconditions: All operands have been evaluated
1575 void CodeGen::genCodeForTreeNode(GenTreePtr treeNode)
1577 regNumber targetReg;
1578 #if !defined(_TARGET_64BIT_)
1579 if (treeNode->TypeGet() == TYP_LONG)
1581 // All long enregistered nodes will have been decomposed into their
1582 // constituent lo and hi nodes.
1586 #endif // !defined(_TARGET_64BIT_)
1588 targetReg = treeNode->gtRegNum;
1590 var_types targetType = treeNode->TypeGet();
1591 emitter* emit = getEmitter();
1594 // Validate that all the operands for the current node are consumed in order.
1595 // This is important because LSRA ensures that any necessary copies will be
1596 // handled correctly.
1597 lastConsumedNode = nullptr;
1598 if (compiler->verbose)
1600 unsigned seqNum = treeNode->gtSeqNum; // Useful for setting a conditional break in Visual Studio
1601 compiler->gtDispLIRNode(treeNode, "Generating: ");
1605 // Is this a node whose value is already in a register? LSRA denotes this by
1606 // setting the GTF_REUSE_REG_VAL flag.
1607 if (treeNode->IsReuseRegVal())
1609 // For now, this is only used for constant nodes.
1610 assert((treeNode->OperIsConst()));
1611 JITDUMP(" TreeNode is marked ReuseReg\n");
1615 // contained nodes are part of their parents for codegen purposes
1616 // ex : immediates, most LEAs
1617 if (treeNode->isContained())
1622 switch (treeNode->gtOper)
1624 #ifndef JIT32_GCENCODER
1625 case GT_START_NONGC:
1626 getEmitter()->emitDisableGC();
1628 #endif // !defined(JIT32_GCENCODER)
1631 #ifdef PROFILING_SUPPORTED
1632 // We should be seeing this only if profiler hook is needed
1633 noway_assert(compiler->compIsProfilerHookNeeded());
1635 // Right now this node is used only for tail calls. In future if
1636 // we intend to use it for Enter or Leave hooks, add a data member
1637 // to this node indicating the kind of profiler hook. For example,
1638 // helper number can be used.
1639 genProfilingLeaveCallback(CORINFO_HELP_PROF_FCN_TAILCALL);
1640 #endif // PROFILING_SUPPORTED
1644 genLclHeap(treeNode);
1649 assert(!treeNode->IsIconHandle(GTF_ICON_TLS_HDL));
1650 #endif // _TARGET_X86_
1654 genSetRegToConst(targetReg, targetType, treeNode);
1655 genProduceReg(treeNode);
1660 genCodeForNegNot(treeNode);
1667 genCodeForDivMod(treeNode->AsOp());
1673 assert(varTypeIsIntegralOrI(treeNode));
1677 #if !defined(_TARGET_64BIT_)
1682 #endif // !defined(_TARGET_64BIT_)
1686 genConsumeOperands(treeNode->AsOp());
1687 genCodeForBinary(treeNode);
1691 genCodeForMul(treeNode->AsOp());
1699 genCodeForShift(treeNode);
1702 #if !defined(_TARGET_64BIT_)
1706 genCodeForShiftLong(treeNode);
1709 #endif // !defined(_TARGET_64BIT_)
1712 genCodeForCast(treeNode->AsOp());
1715 case GT_LCL_FLD_ADDR:
1716 case GT_LCL_VAR_ADDR:
1717 genCodeForLclAddr(treeNode);
1721 genCodeForLclFld(treeNode->AsLclFld());
1725 genCodeForLclVar(treeNode->AsLclVar());
1728 case GT_STORE_LCL_FLD:
1729 genCodeForStoreLclFld(treeNode->AsLclFld());
1732 case GT_STORE_LCL_VAR:
1733 genCodeForStoreLclVar(treeNode->AsLclVar());
1738 genReturn(treeNode);
1742 // If we are here, it is the case where there is an LEA that cannot be folded into a parent instruction.
1743 genLeaInstruction(treeNode->AsAddrMode());
1747 genCodeForIndir(treeNode->AsIndir());
1754 genCodeForMulHi(treeNode->AsOp());
1758 genIntrinsic(treeNode);
1763 genSIMDIntrinsic(treeNode->AsSIMD());
1765 #endif // FEATURE_SIMD
1768 genCkfinite(treeNode);
1780 genCodeForCompare(treeNode->AsOp());
1784 genCodeForJumpTrue(treeNode);
1788 genCodeForJcc(treeNode->AsCC());
1792 genCodeForSetcc(treeNode->AsCC());
1796 genCodeForReturnTrap(treeNode->AsOp());
1800 genCodeForStoreInd(treeNode->AsStoreInd());
1804 // This is handled at the time we call genConsumeReg() on the GT_COPY
1814 genCodeForSwap(treeNode->AsOp());
1818 genPutArgStk(treeNode->AsPutArgStk());
1822 genPutArgReg(treeNode->AsOp());
1826 genCallInstruction(treeNode->AsCall());
1830 genJmpMethod(treeNode);
1836 genLockedInstructions(treeNode->AsOp());
1839 case GT_MEMORYBARRIER:
1840 instGen_MemoryBarrier();
1844 genCodeForCmpXchg(treeNode->AsCmpXchg());
1848 // do nothing - reload is just a marker.
1849 // The parent node will call genConsumeReg on this which will trigger the unspill of this node's child
1850 // into the register specified in this node.
1857 getEmitter()->emitIns_Nop(1);
1860 case GT_ARR_BOUNDS_CHECK:
1863 #endif // FEATURE_SIMD
1864 genRangeCheck(treeNode);
1868 genCodeForPhysReg(treeNode->AsPhysReg());
1875 genCodeForNullCheck(treeNode->AsOp());
1880 noway_assert(handlerGetsXcptnObj(compiler->compCurBB->bbCatchTyp));
1882 /* Catch arguments get passed in a register. genCodeForBBlist()
1883 would have marked it as holding a GC object, but not used. */
1885 noway_assert(gcInfo.gcRegGCrefSetCur & RBM_EXCEPTION_OBJECT);
1886 genConsumeReg(treeNode);
1889 #if !FEATURE_EH_FUNCLETS
1892 // Have to clear the ShadowSP of the nesting level which encloses the finally. Generates:
1893 // mov dword ptr [ebp-0xC], 0 // for some slot of the ShadowSP local var
1895 unsigned finallyNesting;
1896 finallyNesting = treeNode->gtVal.gtVal1;
1897 noway_assert(treeNode->gtVal.gtVal1 < compiler->compHndBBtabCount);
1898 noway_assert(finallyNesting < compiler->compHndBBtabCount);
1900 // The last slot is reserved for ICodeManager::FixContext(ppEndRegion)
1901 unsigned filterEndOffsetSlotOffs;
1902 PREFIX_ASSUME(compiler->lvaLclSize(compiler->lvaShadowSPslotsVar) >
1903 TARGET_POINTER_SIZE); // below doesn't underflow.
1904 filterEndOffsetSlotOffs =
1905 (unsigned)(compiler->lvaLclSize(compiler->lvaShadowSPslotsVar) - TARGET_POINTER_SIZE);
1907 unsigned curNestingSlotOffs;
1908 curNestingSlotOffs = filterEndOffsetSlotOffs - ((finallyNesting + 1) * TARGET_POINTER_SIZE);
1909 instGen_Store_Imm_Into_Lcl(TYP_I_IMPL, EA_PTRSIZE, 0, compiler->lvaShadowSPslotsVar, curNestingSlotOffs);
1911 #endif // !FEATURE_EH_FUNCLETS
1913 case GT_PINVOKE_PROLOG:
1914 noway_assert(((gcInfo.gcRegGCrefSetCur | gcInfo.gcRegByrefSetCur) & ~fullIntArgRegMask()) == 0);
1916 // the runtime side requires the codegen here to be consistent
1917 emit->emitDisableRandomNops();
1921 genPendingCallLabel = genCreateTempLabel();
1922 treeNode->gtLabel.gtLabBB = genPendingCallLabel;
1923 emit->emitIns_R_L(INS_lea, EA_PTR_DSP_RELOC, genPendingCallLabel, treeNode->gtRegNum);
1927 case GT_STORE_DYN_BLK:
1929 genCodeForStoreBlk(treeNode->AsBlk());
1933 genJumpTable(treeNode);
1936 case GT_SWITCH_TABLE:
1937 genTableBasedSwitch(treeNode);
1941 genCodeForArrIndex(treeNode->AsArrIndex());
1945 genCodeForArrOffset(treeNode->AsArrOffs());
1948 case GT_CLS_VAR_ADDR:
1949 emit->emitIns_R_C(INS_lea, EA_PTRSIZE, targetReg, treeNode->gtClsVar.gtClsVarHnd, 0);
1950 genProduceReg(treeNode);
1953 #if !defined(_TARGET_64BIT_)
1955 assert(treeNode->isUsedFromReg());
1956 genConsumeRegs(treeNode);
1961 // Do nothing; these nodes are simply markers for debug info.
1968 _snprintf_s(message, _countof(message), _TRUNCATE, "NYI: Unimplemented node type %s\n",
1969 GenTree::NodeName(treeNode->OperGet()));
1972 assert(!"Unknown node in codegen");
1978 //----------------------------------------------------------------------------------
1979 // genMultiRegCallStoreToLocal: store multi-reg return value of a call node to a local
1982 // treeNode - Gentree of GT_STORE_LCL_VAR
1988 // The child of store is a multi-reg call node.
1989 // genProduceReg() on treeNode is made by caller of this routine.
1991 void CodeGen::genMultiRegCallStoreToLocal(GenTreePtr treeNode)
1993 assert(treeNode->OperGet() == GT_STORE_LCL_VAR);
1995 #ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
1996 // Structs of size >=9 and <=16 are returned in two return registers on x64 Unix.
1997 assert(varTypeIsStruct(treeNode));
1999 // Assumption: current x64 Unix implementation requires that a multi-reg struct
2000 // var in 'var = call' is flagged as lvIsMultiRegRet to prevent it from
2001 // being struct promoted.
2002 unsigned lclNum = treeNode->AsLclVarCommon()->gtLclNum;
2003 LclVarDsc* varDsc = &(compiler->lvaTable[lclNum]);
2004 noway_assert(varDsc->lvIsMultiRegRet);
2006 GenTree* op1 = treeNode->gtGetOp1();
2007 GenTree* actualOp1 = op1->gtSkipReloadOrCopy();
2008 GenTreeCall* call = actualOp1->AsCall();
2009 assert(call->HasMultiRegRetVal());
2011 genConsumeRegs(op1);
2013 ReturnTypeDesc* retTypeDesc = call->GetReturnTypeDesc();
2014 assert(retTypeDesc->GetReturnRegCount() == MAX_RET_REG_COUNT);
2015 unsigned regCount = retTypeDesc->GetReturnRegCount();
2017 if (treeNode->gtRegNum != REG_NA)
2019 // Right now the only enregistrable structs supported are SIMD types.
2020 assert(varTypeIsSIMD(treeNode));
2021 assert(varTypeIsFloating(retTypeDesc->GetReturnRegType(0)));
2022 assert(varTypeIsFloating(retTypeDesc->GetReturnRegType(1)));
2024 // This is a case of two 8-bytes that comprise the operand is in
2025 // two different xmm registers and needs to assembled into a single
2027 regNumber targetReg = treeNode->gtRegNum;
2028 regNumber reg0 = call->GetRegNumByIdx(0);
2029 regNumber reg1 = call->GetRegNumByIdx(1);
2031 if (op1->IsCopyOrReload())
2033 // GT_COPY/GT_RELOAD will have valid reg for those positions
2034 // that need to be copied or reloaded.
2035 regNumber reloadReg = op1->AsCopyOrReload()->GetRegNumByIdx(0);
2036 if (reloadReg != REG_NA)
2041 reloadReg = op1->AsCopyOrReload()->GetRegNumByIdx(1);
2042 if (reloadReg != REG_NA)
2048 if (targetReg != reg0 && targetReg != reg1)
2050 // Copy reg0 into targetReg and let it to be handled by one
2051 // of the cases below.
2052 inst_RV_RV(ins_Copy(TYP_DOUBLE), targetReg, reg0, TYP_DOUBLE);
2056 if (targetReg == reg0)
2058 // targeReg[63:0] = targetReg[63:0]
2059 // targetReg[127:64] = reg1[127:64]
2060 inst_RV_RV_IV(INS_shufpd, EA_16BYTE, targetReg, reg1, 0x00);
2064 assert(targetReg == reg1);
2066 // We need two shuffles to achieve this
2068 // targeReg[63:0] = targetReg[63:0]
2069 // targetReg[127:64] = reg0[63:0]
2072 // targeReg[63:0] = targetReg[127:64]
2073 // targetReg[127:64] = targetReg[63:0]
2075 // Essentially copy low 8-bytes from reg0 to high 8-bytes of targetReg
2076 // and next swap low and high 8-bytes of targetReg to have them
2077 // rearranged in the right order.
2078 inst_RV_RV_IV(INS_shufpd, EA_16BYTE, targetReg, reg0, 0x00);
2079 inst_RV_RV_IV(INS_shufpd, EA_16BYTE, targetReg, targetReg, 0x01);
2086 for (unsigned i = 0; i < regCount; ++i)
2088 var_types type = retTypeDesc->GetReturnRegType(i);
2089 regNumber reg = call->GetRegNumByIdx(i);
2090 if (op1->IsCopyOrReload())
2092 // GT_COPY/GT_RELOAD will have valid reg for those positions
2093 // that need to be copied or reloaded.
2094 regNumber reloadReg = op1->AsCopyOrReload()->GetRegNumByIdx(i);
2095 if (reloadReg != REG_NA)
2101 assert(reg != REG_NA);
2102 getEmitter()->emitIns_S_R(ins_Store(type), emitTypeSize(type), reg, lclNum, offset);
2103 offset += genTypeSize(type);
2106 varDsc->lvRegNum = REG_STK;
2108 #elif defined(_TARGET_X86_)
2109 // Longs are returned in two return registers on x86.
2110 assert(varTypeIsLong(treeNode));
2112 // Assumption: current x86 implementation requires that a multi-reg long
2113 // var in 'var = call' is flagged as lvIsMultiRegRet to prevent it from
2115 unsigned lclNum = treeNode->AsLclVarCommon()->gtLclNum;
2116 LclVarDsc* varDsc = &(compiler->lvaTable[lclNum]);
2117 noway_assert(varDsc->lvIsMultiRegRet);
2119 GenTree* op1 = treeNode->gtGetOp1();
2120 GenTree* actualOp1 = op1->gtSkipReloadOrCopy();
2121 GenTreeCall* call = actualOp1->AsCall();
2122 assert(call->HasMultiRegRetVal());
2124 genConsumeRegs(op1);
2126 ReturnTypeDesc* retTypeDesc = call->GetReturnTypeDesc();
2127 unsigned regCount = retTypeDesc->GetReturnRegCount();
2128 assert(regCount == MAX_RET_REG_COUNT);
2132 for (unsigned i = 0; i < regCount; ++i)
2134 var_types type = retTypeDesc->GetReturnRegType(i);
2135 regNumber reg = call->GetRegNumByIdx(i);
2136 if (op1->IsCopyOrReload())
2138 // GT_COPY/GT_RELOAD will have valid reg for those positions
2139 // that need to be copied or reloaded.
2140 regNumber reloadReg = op1->AsCopyOrReload()->GetRegNumByIdx(i);
2141 if (reloadReg != REG_NA)
2147 assert(reg != REG_NA);
2148 getEmitter()->emitIns_S_R(ins_Store(type), emitTypeSize(type), reg, lclNum, offset);
2149 offset += genTypeSize(type);
2152 varDsc->lvRegNum = REG_STK;
2153 #else // !FEATURE_UNIX_AMD64_STRUCT_PASSING && !_TARGET_X86_
2154 assert(!"Unreached");
2155 #endif // !FEATURE_UNIX_AMD64_STRUCT_PASSING && !_TARGET_X86_
2158 //------------------------------------------------------------------------
2159 // genLclHeap: Generate code for localloc.
2162 // tree - the localloc tree to generate.
2165 // Note that for x86, we don't track ESP movements while generating the localloc code.
2166 // The ESP tracking is used to report stack pointer-relative GC info, which is not
2167 // interesting while doing the localloc construction. Also, for functions with localloc,
2168 // we have EBP frames, and EBP-relative locals, and ESP-relative accesses only for function
2169 // call arguments. We store the ESP after the localloc is complete in the LocAllocSP
2170 // variable. This variable is implicitly reported to the VM in the GC info (its position
2171 // is defined by convention relative to other items), and is used by the GC to find the
2172 // "base" stack pointer in functions with localloc.
2174 void CodeGen::genLclHeap(GenTreePtr tree)
2176 assert(tree->OperGet() == GT_LCLHEAP);
2177 assert(compiler->compLocallocUsed);
2179 GenTreePtr size = tree->gtOp.gtOp1;
2180 noway_assert((genActualType(size->gtType) == TYP_INT) || (genActualType(size->gtType) == TYP_I_IMPL));
2182 regNumber targetReg = tree->gtRegNum;
2183 regNumber regCnt = REG_NA;
2184 var_types type = genActualType(size->gtType);
2185 emitAttr easz = emitTypeSize(type);
2186 BasicBlock* endLabel = nullptr;
2190 if (compiler->opts.compStackCheckOnRet)
2192 noway_assert(compiler->lvaReturnEspCheck != 0xCCCCCCCC &&
2193 compiler->lvaTable[compiler->lvaReturnEspCheck].lvDoNotEnregister &&
2194 compiler->lvaTable[compiler->lvaReturnEspCheck].lvOnFrame);
2195 getEmitter()->emitIns_S_R(INS_cmp, EA_PTRSIZE, REG_SPBASE, compiler->lvaReturnEspCheck, 0);
2197 BasicBlock* esp_check = genCreateTempLabel();
2198 emitJumpKind jmpEqual = genJumpKindForOper(GT_EQ, CK_SIGNED);
2199 inst_JMP(jmpEqual, esp_check);
2200 getEmitter()->emitIns(INS_BREAKPOINT);
2201 genDefineTempLabel(esp_check);
2205 noway_assert(isFramePointerUsed()); // localloc requires Frame Pointer to be established since SP changes
2206 noway_assert(genStackLevel == 0); // Can't have anything on the stack
2208 unsigned stackAdjustment = 0;
2209 BasicBlock* loop = nullptr;
2211 // compute the amount of memory to allocate to properly STACK_ALIGN.
2213 if (size->IsCnsIntOrI())
2215 // If size is a constant, then it must be contained.
2216 assert(size->isContained());
2218 // If amount is zero then return null in targetReg
2219 amount = size->gtIntCon.gtIconVal;
2222 instGen_Set_Reg_To_Zero(EA_PTRSIZE, targetReg);
2226 // 'amount' is the total number of bytes to localloc to properly STACK_ALIGN
2227 amount = AlignUp(amount, STACK_ALIGN);
2231 // The localloc requested memory size is non-constant.
2233 // Put the size value in targetReg. If it is zero, bail out by returning null in targetReg.
2234 genConsumeRegAndCopy(size, targetReg);
2235 endLabel = genCreateTempLabel();
2236 getEmitter()->emitIns_R_R(INS_test, easz, targetReg, targetReg);
2237 inst_JMP(EJ_je, endLabel);
2239 // Compute the size of the block to allocate and perform alignment.
2240 // If compInitMem=true, we can reuse targetReg as regcnt,
2241 // since we don't need any internal registers.
2242 if (compiler->info.compInitMem)
2244 assert(tree->AvailableTempRegCount() == 0);
2249 regCnt = tree->ExtractTempReg();
2250 if (regCnt != targetReg)
2252 // Above, we put the size in targetReg. Now, copy it to our new temp register if necessary.
2253 inst_RV_RV(INS_mov, regCnt, targetReg, size->TypeGet());
2257 // Round up the number of bytes to allocate to a STACK_ALIGN boundary. This is done
2261 // However, in the initialized memory case, we need the count of STACK_ALIGN-sized
2262 // elements, not a byte count, after the alignment. So instead of the "and", which
2263 // becomes unnecessary, generate a shift, e.g.:
2267 inst_RV_IV(INS_add, regCnt, STACK_ALIGN - 1, emitActualTypeSize(type));
2269 if (compiler->info.compInitMem)
2271 // Convert the count from a count of bytes to a loop count. We will loop once per
2272 // stack alignment size, so each loop will zero 4 bytes on x86 and 16 bytes on x64.
2273 // Note that we zero a single reg-size word per iteration on x86, and 2 reg-size
2274 // words per iteration on x64. We will shift off all the stack alignment bits
2275 // added above, so there is no need for an 'and' instruction.
2277 // --- shr regCnt, 2 (or 4) ---
2278 inst_RV_SH(INS_SHIFT_RIGHT_LOGICAL, EA_PTRSIZE, regCnt, STACK_ALIGN_SHIFT_ALL);
2282 // Otherwise, mask off the low bits to align the byte count.
2283 inst_RV_IV(INS_AND, regCnt, ~(STACK_ALIGN - 1), emitActualTypeSize(type));
2287 #if FEATURE_FIXED_OUT_ARGS
2288 // If we have an outgoing arg area then we must adjust the SP by popping off the
2289 // outgoing arg area. We will restore it right before we return from this method.
2291 // Localloc returns stack space that aligned to STACK_ALIGN bytes. The following
2292 // are the cases that need to be handled:
2293 // i) Method has out-going arg area.
2294 // It is guaranteed that size of out-going arg area is STACK_ALIGN'ed (see fgMorphArgs).
2295 // Therefore, we will pop off the out-going arg area from RSP before allocating the localloc space.
2296 // ii) Method has no out-going arg area.
2297 // Nothing to pop off from the stack.
2298 if (compiler->lvaOutgoingArgSpaceSize > 0)
2300 assert((compiler->lvaOutgoingArgSpaceSize % STACK_ALIGN) == 0); // This must be true for the stack to remain
2302 inst_RV_IV(INS_add, REG_SPBASE, compiler->lvaOutgoingArgSpaceSize, EA_PTRSIZE);
2303 stackAdjustment += compiler->lvaOutgoingArgSpaceSize;
2307 if (size->IsCnsIntOrI())
2309 // We should reach here only for non-zero, constant size allocations.
2311 assert((amount % STACK_ALIGN) == 0);
2312 assert((amount % REGSIZE_BYTES) == 0);
2314 // For small allocations we will generate up to six push 0 inline
2315 size_t cntRegSizedWords = amount / REGSIZE_BYTES;
2316 if (cntRegSizedWords <= 6)
2318 for (; cntRegSizedWords != 0; cntRegSizedWords--)
2320 inst_IV(INS_push_hide, 0); // push_hide means don't track the stack
2325 bool doNoInitLessThanOnePageAlloc =
2326 !compiler->info.compInitMem && (amount < compiler->eeGetPageSize()); // must be < not <=
2329 bool needRegCntRegister = true;
2330 #else // !_TARGET_X86_
2331 bool needRegCntRegister = !doNoInitLessThanOnePageAlloc;
2332 #endif // !_TARGET_X86_
2334 if (needRegCntRegister)
2336 // If compInitMem=true, we can reuse targetReg as regcnt.
2337 // Since size is a constant, regCnt is not yet initialized.
2338 assert(regCnt == REG_NA);
2339 if (compiler->info.compInitMem)
2341 assert(tree->AvailableTempRegCount() == 0);
2346 regCnt = tree->ExtractTempReg();
2350 if (doNoInitLessThanOnePageAlloc)
2352 // Since the size is less than a page, simply adjust ESP.
2353 // ESP might already be in the guard page, so we must touch it BEFORE
2354 // the alloc, not after.
2355 CLANG_FORMAT_COMMENT_ANCHOR;
2358 // For x86, we don't want to use "sub ESP" because we don't want the emitter to track the adjustment
2359 // to ESP. So do the work in the count register.
2360 // TODO-CQ: manipulate ESP directly, to share code, reduce #ifdefs, and improve CQ. This would require
2361 // creating a way to temporarily turn off the emitter's tracking of ESP, maybe marking instrDescs as "don't
2363 inst_RV_RV(INS_mov, regCnt, REG_SPBASE, TYP_I_IMPL);
2364 getEmitter()->emitIns_AR_R(INS_TEST, EA_4BYTE, REG_SPBASE, REG_SPBASE, 0);
2365 inst_RV_IV(INS_sub, regCnt, amount, EA_PTRSIZE);
2366 inst_RV_RV(INS_mov, REG_SPBASE, regCnt, TYP_I_IMPL);
2367 #else // !_TARGET_X86_
2368 getEmitter()->emitIns_AR_R(INS_TEST, EA_4BYTE, REG_SPBASE, REG_SPBASE, 0);
2369 inst_RV_IV(INS_sub, REG_SPBASE, amount, EA_PTRSIZE);
2370 #endif // !_TARGET_X86_
2375 // else, "mov regCnt, amount"
2377 if (compiler->info.compInitMem)
2379 // When initializing memory, we want 'amount' to be the loop count.
2380 assert((amount % STACK_ALIGN) == 0);
2381 amount /= STACK_ALIGN;
2384 genSetRegToIcon(regCnt, amount, ((int)amount == amount) ? TYP_INT : TYP_LONG);
2387 loop = genCreateTempLabel();
2388 if (compiler->info.compInitMem)
2390 // At this point 'regCnt' is set to the number of loop iterations for this loop, if each
2391 // iteration zeros (and subtracts from the stack pointer) STACK_ALIGN bytes.
2392 // Since we have to zero out the allocated memory AND ensure that RSP is always valid
2393 // by tickling the pages, we will just push 0's on the stack.
2395 assert(genIsValidIntReg(regCnt));
2398 genDefineTempLabel(loop);
2400 static_assert_no_msg((STACK_ALIGN % REGSIZE_BYTES) == 0);
2401 unsigned const count = (STACK_ALIGN / REGSIZE_BYTES);
2403 for (unsigned i = 0; i < count; i++)
2405 inst_IV(INS_push_hide, 0); // --- push REG_SIZE bytes of 0
2407 // Note that the stack must always be aligned to STACK_ALIGN bytes
2409 // Decrement the loop counter and loop if not done.
2410 inst_RV(INS_dec, regCnt, TYP_I_IMPL);
2411 inst_JMP(EJ_jne, loop);
2415 // At this point 'regCnt' is set to the total number of bytes to localloc.
2417 // We don't need to zero out the allocated memory. However, we do have
2418 // to tickle the pages to ensure that ESP is always valid and is
2419 // in sync with the "stack guard page". Note that in the worst
2420 // case ESP is on the last byte of the guard page. Thus you must
2421 // touch ESP+0 first not ESP+x01000.
2423 // Another subtlety is that you don't want ESP to be exactly on the
2424 // boundary of the guard page because PUSH is predecrement, thus
2425 // call setup would not touch the guard page but just beyond it
2427 // Note that we go through a few hoops so that ESP never points to
2428 // illegal pages at any time during the tickling process
2431 // add REGCNT, ESP // reg now holds ultimate ESP
2432 // jb loop // result is smaller than orignial ESP (no wrap around)
2433 // xor REGCNT, REGCNT, // Overflow, pick lowest possible number
2435 // test ESP, [ESP+0] // tickle the page
2437 // sub REGTMP, PAGE_SIZE
2444 inst_RV(INS_NEG, regCnt, TYP_I_IMPL);
2445 inst_RV_RV(INS_add, regCnt, REG_SPBASE, TYP_I_IMPL);
2446 inst_JMP(EJ_jb, loop);
2448 instGen_Set_Reg_To_Zero(EA_PTRSIZE, regCnt);
2450 genDefineTempLabel(loop);
2452 // Tickle the decremented value, and move back to ESP,
2453 // note that it has to be done BEFORE the update of ESP since
2454 // ESP might already be on the guard page. It is OK to leave
2455 // the final value of ESP on the guard page
2456 getEmitter()->emitIns_AR_R(INS_TEST, EA_4BYTE, REG_SPBASE, REG_SPBASE, 0);
2458 // This is a harmless trick to avoid the emitter trying to track the
2459 // decrement of the ESP - we do the subtraction in another reg instead
2460 // of adjusting ESP directly.
2461 regNumber regTmp = tree->GetSingleTempReg();
2463 inst_RV_RV(INS_mov, regTmp, REG_SPBASE, TYP_I_IMPL);
2464 inst_RV_IV(INS_sub, regTmp, compiler->eeGetPageSize(), EA_PTRSIZE);
2465 inst_RV_RV(INS_mov, REG_SPBASE, regTmp, TYP_I_IMPL);
2467 inst_RV_RV(INS_cmp, REG_SPBASE, regCnt, TYP_I_IMPL);
2468 inst_JMP(EJ_jae, loop);
2470 // Move the final value to ESP
2471 inst_RV_RV(INS_mov, REG_SPBASE, regCnt);
2475 // Re-adjust SP to allocate out-going arg area
2476 if (stackAdjustment > 0)
2478 assert((stackAdjustment % STACK_ALIGN) == 0); // This must be true for the stack to remain aligned
2479 inst_RV_IV(INS_sub, REG_SPBASE, stackAdjustment, EA_PTRSIZE);
2482 // Return the stackalloc'ed address in result register.
2483 // TargetReg = RSP + stackAdjustment.
2484 getEmitter()->emitIns_R_AR(INS_lea, EA_PTRSIZE, targetReg, REG_SPBASE, stackAdjustment);
2486 if (endLabel != nullptr)
2488 genDefineTempLabel(endLabel);
2493 // Write the lvaLocAllocSPvar stack frame slot
2494 if (compiler->lvaLocAllocSPvar != BAD_VAR_NUM)
2496 getEmitter()->emitIns_S_R(ins_Store(TYP_I_IMPL), EA_PTRSIZE, REG_SPBASE, compiler->lvaLocAllocSPvar, 0);
2500 if (compiler->opts.compNeedStackProbes)
2502 genGenerateStackProbe();
2508 if (compiler->opts.compStackCheckOnRet)
2510 noway_assert(compiler->lvaReturnEspCheck != 0xCCCCCCCC &&
2511 compiler->lvaTable[compiler->lvaReturnEspCheck].lvDoNotEnregister &&
2512 compiler->lvaTable[compiler->lvaReturnEspCheck].lvOnFrame);
2513 getEmitter()->emitIns_S_R(ins_Store(TYP_I_IMPL), EA_PTRSIZE, REG_SPBASE, compiler->lvaReturnEspCheck, 0);
2517 genProduceReg(tree);
2520 void CodeGen::genCodeForStoreBlk(GenTreeBlk* storeBlkNode)
2522 assert(storeBlkNode->OperIs(GT_STORE_OBJ, GT_STORE_DYN_BLK, GT_STORE_BLK));
2524 if (storeBlkNode->OperIs(GT_STORE_OBJ) && storeBlkNode->OperIsCopyBlkOp() && !storeBlkNode->gtBlkOpGcUnsafe)
2526 assert(storeBlkNode->AsObj()->gtGcPtrCount != 0);
2527 genCodeForCpObj(storeBlkNode->AsObj());
2531 #ifdef JIT32_GCENCODER
2532 assert(!storeBlkNode->gtBlkOpGcUnsafe);
2534 if (storeBlkNode->gtBlkOpGcUnsafe)
2536 getEmitter()->emitDisableGC();
2538 #endif // JIT32_GCENCODER
2540 bool isCopyBlk = storeBlkNode->OperIsCopyBlkOp();
2542 switch (storeBlkNode->gtBlkOpKind)
2544 #ifdef _TARGET_AMD64_
2545 case GenTreeBlk::BlkOpKindHelper:
2548 genCodeForCpBlk(storeBlkNode);
2552 genCodeForInitBlk(storeBlkNode);
2555 #endif // _TARGET_AMD64_
2556 case GenTreeBlk::BlkOpKindRepInstr:
2559 genCodeForCpBlkRepMovs(storeBlkNode);
2563 genCodeForInitBlkRepStos(storeBlkNode);
2566 case GenTreeBlk::BlkOpKindUnroll:
2569 genCodeForCpBlkUnroll(storeBlkNode);
2573 genCodeForInitBlkUnroll(storeBlkNode);
2580 #ifndef JIT32_GCENCODER
2581 if (storeBlkNode->gtBlkOpGcUnsafe)
2583 getEmitter()->emitEnableGC();
2585 #endif // !defined(JIT32_GCENCODER)
2589 //------------------------------------------------------------------------
2590 // genCodeForInitBlkRepStos: Generate code for InitBlk using rep stos.
2593 // initBlkNode - The Block store for which we are generating code.
2597 // The size of the buffers must be a constant and also less than INITBLK_STOS_LIMIT bytes.
2598 // Any value larger than that, we'll use the helper even if both the fill byte and the
2599 // size are integer constants.
2601 // The size must either be a non-constant or less than INITBLK_STOS_LIMIT bytes.
2603 void CodeGen::genCodeForInitBlkRepStos(GenTreeBlk* initBlkNode)
2605 // Make sure we got the arguments of the initblk/initobj operation in the right registers.
2606 unsigned size = initBlkNode->Size();
2607 GenTreePtr dstAddr = initBlkNode->Addr();
2608 GenTreePtr initVal = initBlkNode->Data();
2609 if (initVal->OperIsInitVal())
2611 initVal = initVal->gtGetOp1();
2615 assert(dstAddr->isUsedFromReg());
2616 assert(initVal->isUsedFromReg());
2617 #ifdef _TARGET_AMD64_
2620 if (initVal->IsCnsIntOrI())
2622 #ifdef _TARGET_AMD64_
2623 assert(size > CPBLK_UNROLL_LIMIT && size < CPBLK_MOVS_LIMIT);
2625 // Note that a size of zero means a non-constant size.
2626 assert((size == 0) || (size > CPBLK_UNROLL_LIMIT));
2632 genConsumeBlockOp(initBlkNode, REG_RDI, REG_RAX, REG_RCX);
2633 instGen(INS_r_stosb);
2636 // Generate code for InitBlk by performing a loop unroll
2638 // a) Both the size and fill byte value are integer constants.
2639 // b) The size of the struct to initialize is smaller than INITBLK_UNROLL_LIMIT bytes.
2641 void CodeGen::genCodeForInitBlkUnroll(GenTreeBlk* initBlkNode)
2643 // Make sure we got the arguments of the initblk/initobj operation in the right registers
2644 unsigned size = initBlkNode->Size();
2645 GenTreePtr dstAddr = initBlkNode->Addr();
2646 GenTreePtr initVal = initBlkNode->Data();
2647 if (initVal->OperIsInitVal())
2649 initVal = initVal->gtGetOp1();
2652 assert(dstAddr->isUsedFromReg());
2653 assert(initVal->isUsedFromReg() || (initVal->IsIntegralConst(0) && ((size & 0xf) == 0)));
2655 assert(size <= INITBLK_UNROLL_LIMIT);
2656 assert(initVal->gtSkipReloadOrCopy()->IsCnsIntOrI());
2658 emitter* emit = getEmitter();
2660 genConsumeOperands(initBlkNode);
2662 // If the initVal was moved, or spilled and reloaded to a different register,
2663 // get the original initVal from below the GT_RELOAD, but only after capturing the valReg,
2664 // which needs to be the new register.
2665 regNumber valReg = initVal->gtRegNum;
2666 initVal = initVal->gtSkipReloadOrCopy();
2668 unsigned offset = 0;
2670 // Perform an unroll using SSE2 loads and stores.
2671 if (size >= XMM_REGSIZE_BYTES)
2673 regNumber tmpReg = initBlkNode->GetSingleTempReg();
2674 assert(genIsValidFloatReg(tmpReg));
2676 if (initVal->gtIntCon.gtIconVal != 0)
2678 emit->emitIns_R_R(INS_mov_i2xmm, EA_PTRSIZE, tmpReg, valReg);
2679 emit->emitIns_R_R(INS_punpckldq, EA_8BYTE, tmpReg, tmpReg);
2681 // For x86, we need one more to convert it from 8 bytes to 16 bytes.
2682 emit->emitIns_R_R(INS_punpckldq, EA_8BYTE, tmpReg, tmpReg);
2683 #endif // _TARGET_X86_
2687 emit->emitIns_R_R(INS_xorpd, EA_8BYTE, tmpReg, tmpReg);
2690 // Determine how many 16 byte slots we're going to fill using SSE movs.
2691 size_t slots = size / XMM_REGSIZE_BYTES;
2695 emit->emitIns_AR_R(INS_movdqu, EA_8BYTE, tmpReg, dstAddr->gtRegNum, offset);
2696 offset += XMM_REGSIZE_BYTES;
2700 // Fill the remainder (or a < 16 byte sized struct)
2701 if ((size & 8) != 0)
2704 // TODO-X86-CQ: [1091735] Revisit block ops codegen. One example: use movq for 8 byte movs.
2705 emit->emitIns_AR_R(INS_mov, EA_4BYTE, valReg, dstAddr->gtRegNum, offset);
2707 emit->emitIns_AR_R(INS_mov, EA_4BYTE, valReg, dstAddr->gtRegNum, offset);
2709 #else // !_TARGET_X86_
2711 emit->emitIns_AR_R(INS_mov, EA_8BYTE, valReg, dstAddr->gtRegNum, offset);
2714 #endif // !_TARGET_X86_
2716 if ((size & 4) != 0)
2718 emit->emitIns_AR_R(INS_mov, EA_4BYTE, valReg, dstAddr->gtRegNum, offset);
2721 if ((size & 2) != 0)
2723 emit->emitIns_AR_R(INS_mov, EA_2BYTE, valReg, dstAddr->gtRegNum, offset);
2726 if ((size & 1) != 0)
2728 emit->emitIns_AR_R(INS_mov, EA_1BYTE, valReg, dstAddr->gtRegNum, offset);
2732 // Generates code for InitBlk by calling the VM memset helper function.
2734 // a) The size argument of the InitBlk is not an integer constant.
2735 // b) The size argument of the InitBlk is >= INITBLK_STOS_LIMIT bytes.
2736 void CodeGen::genCodeForInitBlk(GenTreeBlk* initBlkNode)
2738 #ifdef _TARGET_AMD64_
2739 // Make sure we got the arguments of the initblk operation in the right registers
2740 unsigned blockSize = initBlkNode->Size();
2741 GenTreePtr dstAddr = initBlkNode->Addr();
2742 GenTreePtr initVal = initBlkNode->Data();
2743 if (initVal->OperIsInitVal())
2745 initVal = initVal->gtGetOp1();
2748 assert(dstAddr->isUsedFromReg());
2749 assert(initVal->isUsedFromReg());
2753 assert(blockSize >= CPBLK_MOVS_LIMIT);
2756 genConsumeBlockOp(initBlkNode, REG_ARG_0, REG_ARG_1, REG_ARG_2);
2758 genEmitHelperCall(CORINFO_HELP_MEMSET, 0, EA_UNKNOWN);
2759 #else // !_TARGET_AMD64_
2760 NYI_X86("Helper call for InitBlk");
2761 #endif // !_TARGET_AMD64_
2764 // Generate code for a load from some address + offset
2765 // baseNode: tree node which can be either a local address or arbitrary node
2766 // offset: distance from the baseNode from which to load
2767 void CodeGen::genCodeForLoadOffset(instruction ins, emitAttr size, regNumber dst, GenTree* baseNode, unsigned offset)
2769 emitter* emit = getEmitter();
2771 if (baseNode->OperIsLocalAddr())
2773 if (baseNode->gtOper == GT_LCL_FLD_ADDR)
2775 offset += baseNode->gtLclFld.gtLclOffs;
2777 emit->emitIns_R_S(ins, size, dst, baseNode->gtLclVarCommon.gtLclNum, offset);
2781 emit->emitIns_R_AR(ins, size, dst, baseNode->gtRegNum, offset);
2785 //------------------------------------------------------------------------
2786 // genCodeForStoreOffset: Generate code to store a reg to [base + offset].
2789 // ins - the instruction to generate.
2790 // size - the size that needs to be stored.
2791 // src - the register which needs to be stored.
2792 // baseNode - the base, relative to which to store the src register.
2793 // offset - the offset that is added to the baseNode to calculate the address to store into.
2795 void CodeGen::genCodeForStoreOffset(instruction ins, emitAttr size, regNumber src, GenTree* baseNode, unsigned offset)
2797 emitter* emit = getEmitter();
2799 if (baseNode->OperIsLocalAddr())
2801 if (baseNode->gtOper == GT_LCL_FLD_ADDR)
2803 offset += baseNode->gtLclFld.gtLclOffs;
2806 emit->emitIns_S_R(ins, size, src, baseNode->AsLclVarCommon()->GetLclNum(), offset);
2810 emit->emitIns_AR_R(ins, size, src, baseNode->gtRegNum, offset);
2814 // Generates CpBlk code by performing a loop unroll
2816 // The size argument of the CpBlk node is a constant and <= 64 bytes.
2817 // This may seem small but covers >95% of the cases in several framework assemblies.
2819 void CodeGen::genCodeForCpBlkUnroll(GenTreeBlk* cpBlkNode)
2821 // Make sure we got the arguments of the cpblk operation in the right registers
2822 unsigned size = cpBlkNode->Size();
2823 GenTreePtr dstAddr = cpBlkNode->Addr();
2824 GenTreePtr source = cpBlkNode->Data();
2825 GenTreePtr srcAddr = nullptr;
2826 assert(size <= CPBLK_UNROLL_LIMIT);
2828 emitter* emit = getEmitter();
2830 if (dstAddr->isUsedFromReg())
2832 genConsumeReg(dstAddr);
2835 if (source->gtOper == GT_IND)
2837 srcAddr = source->gtGetOp1();
2838 if (srcAddr->isUsedFromReg())
2840 genConsumeReg(srcAddr);
2845 noway_assert(source->IsLocal());
2846 // TODO-Cleanup: Consider making the addrForm() method in Rationalize public, e.g. in GenTree.
2847 // OR: transform source to GT_IND(GT_LCL_VAR_ADDR)
2848 if (source->OperGet() == GT_LCL_VAR)
2850 source->SetOper(GT_LCL_VAR_ADDR);
2854 assert(source->OperGet() == GT_LCL_FLD);
2855 source->SetOper(GT_LCL_FLD_ADDR);
2860 unsigned offset = 0;
2862 // If the size of this struct is larger than 16 bytes
2863 // let's use SSE2 to be able to do 16 byte at a time
2864 // loads and stores.
2866 if (size >= XMM_REGSIZE_BYTES)
2868 regNumber xmmReg = cpBlkNode->GetSingleTempReg(RBM_ALLFLOAT);
2869 assert(genIsValidFloatReg(xmmReg));
2870 size_t slots = size / XMM_REGSIZE_BYTES;
2872 // TODO: In the below code the load and store instructions are for 16 bytes, but the
2873 // type is EA_8BYTE. The movdqa/u are 16 byte instructions, so it works, but
2874 // this probably needs to be changed.
2878 genCodeForLoadOffset(INS_movdqu, EA_8BYTE, xmmReg, srcAddr, offset);
2880 genCodeForStoreOffset(INS_movdqu, EA_8BYTE, xmmReg, dstAddr, offset);
2881 offset += XMM_REGSIZE_BYTES;
2885 // Fill the remainder (15 bytes or less) if there's one.
2886 if ((size & 0xf) != 0)
2888 // Grab the integer temp register to emit the remaining loads and stores.
2889 regNumber tmpReg = cpBlkNode->GetSingleTempReg(RBM_ALLINT);
2891 if ((size & 8) != 0)
2894 // TODO-X86-CQ: [1091735] Revisit block ops codegen. One example: use movq for 8 byte movs.
2895 for (unsigned savedOffs = offset; offset < savedOffs + 8; offset += 4)
2897 genCodeForLoadOffset(INS_mov, EA_4BYTE, tmpReg, srcAddr, offset);
2898 genCodeForStoreOffset(INS_mov, EA_4BYTE, tmpReg, dstAddr, offset);
2900 #else // !_TARGET_X86_
2901 genCodeForLoadOffset(INS_mov, EA_8BYTE, tmpReg, srcAddr, offset);
2902 genCodeForStoreOffset(INS_mov, EA_8BYTE, tmpReg, dstAddr, offset);
2904 #endif // !_TARGET_X86_
2906 if ((size & 4) != 0)
2908 genCodeForLoadOffset(INS_mov, EA_4BYTE, tmpReg, srcAddr, offset);
2909 genCodeForStoreOffset(INS_mov, EA_4BYTE, tmpReg, dstAddr, offset);
2912 if ((size & 2) != 0)
2914 genCodeForLoadOffset(INS_mov, EA_2BYTE, tmpReg, srcAddr, offset);
2915 genCodeForStoreOffset(INS_mov, EA_2BYTE, tmpReg, dstAddr, offset);
2918 if ((size & 1) != 0)
2920 genCodeForLoadOffset(INS_mov, EA_1BYTE, tmpReg, srcAddr, offset);
2921 genCodeForStoreOffset(INS_mov, EA_1BYTE, tmpReg, dstAddr, offset);
2926 // Generate code for CpBlk by using rep movs
2928 // The size argument of the CpBlk is a constant and is between
2929 // CPBLK_UNROLL_LIMIT and CPBLK_MOVS_LIMIT bytes.
2930 void CodeGen::genCodeForCpBlkRepMovs(GenTreeBlk* cpBlkNode)
2932 // Make sure we got the arguments of the cpblk operation in the right registers
2933 unsigned size = cpBlkNode->Size();
2934 GenTreePtr dstAddr = cpBlkNode->Addr();
2935 GenTreePtr source = cpBlkNode->Data();
2936 GenTreePtr srcAddr = nullptr;
2939 assert(dstAddr->isUsedFromReg());
2940 assert(source->isContained());
2945 noway_assert(cpBlkNode->OperGet() == GT_STORE_DYN_BLK);
2951 assert(size > CPBLK_UNROLL_LIMIT && size < CPBLK_MOVS_LIMIT);
2953 assert(size > CPBLK_UNROLL_LIMIT);
2958 genConsumeBlockOp(cpBlkNode, REG_RDI, REG_RSI, REG_RCX);
2959 instGen(INS_r_movsb);
2962 #ifdef FEATURE_PUT_STRUCT_ARG_STK
2963 //------------------------------------------------------------------------
2964 // CodeGen::genMove8IfNeeded: Conditionally move 8 bytes of a struct to the argument area
2967 // size - The size of bytes remaining to be moved
2968 // longTmpReg - The tmp register to be used for the long value
2969 // srcAddr - The address of the source struct
2970 // offset - The current offset being copied
2973 // Returns the number of bytes moved (8 or 0).
2976 // This is used in the PutArgStkKindUnroll case, to move any bytes that are
2977 // not an even multiple of 16.
2978 // On x86, longTmpReg must be an xmm reg; on x64 it must be an integer register.
2979 // This is checked by genStoreRegToStackArg.
2981 unsigned CodeGen::genMove8IfNeeded(unsigned size, regNumber longTmpReg, GenTree* srcAddr, unsigned offset)
2984 instruction longMovIns = INS_movq;
2985 #else // !_TARGET_X86_
2986 instruction longMovIns = INS_mov;
2987 #endif // !_TARGET_X86_
2988 if ((size & 8) != 0)
2990 genCodeForLoadOffset(longMovIns, EA_8BYTE, longTmpReg, srcAddr, offset);
2991 genStoreRegToStackArg(TYP_LONG, longTmpReg, offset);
2997 //------------------------------------------------------------------------
2998 // CodeGen::genMove4IfNeeded: Conditionally move 4 bytes of a struct to the argument area
3001 // size - The size of bytes remaining to be moved
3002 // intTmpReg - The tmp register to be used for the long value
3003 // srcAddr - The address of the source struct
3004 // offset - The current offset being copied
3007 // Returns the number of bytes moved (4 or 0).
3010 // This is used in the PutArgStkKindUnroll case, to move any bytes that are
3011 // not an even multiple of 16.
3012 // intTmpReg must be an integer register.
3013 // This is checked by genStoreRegToStackArg.
3015 unsigned CodeGen::genMove4IfNeeded(unsigned size, regNumber intTmpReg, GenTree* srcAddr, unsigned offset)
3017 if ((size & 4) != 0)
3019 genCodeForLoadOffset(INS_mov, EA_4BYTE, intTmpReg, srcAddr, offset);
3020 genStoreRegToStackArg(TYP_INT, intTmpReg, offset);
3026 //------------------------------------------------------------------------
3027 // CodeGen::genMove2IfNeeded: Conditionally move 2 bytes of a struct to the argument area
3030 // size - The size of bytes remaining to be moved
3031 // intTmpReg - The tmp register to be used for the long value
3032 // srcAddr - The address of the source struct
3033 // offset - The current offset being copied
3036 // Returns the number of bytes moved (2 or 0).
3039 // This is used in the PutArgStkKindUnroll case, to move any bytes that are
3040 // not an even multiple of 16.
3041 // intTmpReg must be an integer register.
3042 // This is checked by genStoreRegToStackArg.
3044 unsigned CodeGen::genMove2IfNeeded(unsigned size, regNumber intTmpReg, GenTree* srcAddr, unsigned offset)
3046 if ((size & 2) != 0)
3048 genCodeForLoadOffset(INS_mov, EA_2BYTE, intTmpReg, srcAddr, offset);
3049 genStoreRegToStackArg(TYP_SHORT, intTmpReg, offset);
3055 //------------------------------------------------------------------------
3056 // CodeGen::genMove1IfNeeded: Conditionally move 1 byte of a struct to the argument area
3059 // size - The size of bytes remaining to be moved
3060 // intTmpReg - The tmp register to be used for the long value
3061 // srcAddr - The address of the source struct
3062 // offset - The current offset being copied
3065 // Returns the number of bytes moved (1 or 0).
3068 // This is used in the PutArgStkKindUnroll case, to move any bytes that are
3069 // not an even multiple of 16.
3070 // intTmpReg must be an integer register.
3071 // This is checked by genStoreRegToStackArg.
3073 unsigned CodeGen::genMove1IfNeeded(unsigned size, regNumber intTmpReg, GenTree* srcAddr, unsigned offset)
3075 if ((size & 1) != 0)
3077 genCodeForLoadOffset(INS_mov, EA_1BYTE, intTmpReg, srcAddr, offset);
3078 genStoreRegToStackArg(TYP_BYTE, intTmpReg, offset);
3084 //---------------------------------------------------------------------------------------------------------------//
3085 // genStructPutArgUnroll: Generates code for passing a struct arg on stack by value using loop unrolling.
3088 // putArgNode - the PutArgStk tree.
3091 // m_stkArgVarNum must be set to the base var number, relative to which the by-val struct will be copied to the
3094 // TODO-Amd64-Unix: Try to share code with copyblk.
3095 // Need refactoring of copyblk before it could be used for putarg_stk.
3096 // The difference for now is that a putarg_stk contains its children, while cpyblk does not.
3097 // This creates differences in code. After some significant refactoring it could be reused.
3099 void CodeGen::genStructPutArgUnroll(GenTreePutArgStk* putArgNode)
3101 // We will never call this method for SIMD types, which are stored directly
3102 // in genPutStructArgStk().
3103 noway_assert(putArgNode->TypeGet() == TYP_STRUCT);
3105 // Make sure we got the arguments of the cpblk operation in the right registers
3106 GenTreePtr dstAddr = putArgNode;
3107 GenTreePtr src = putArgNode->gtOp.gtOp1;
3109 unsigned size = putArgNode->getArgSize();
3110 assert(size <= CPBLK_UNROLL_LIMIT);
3112 emitter* emit = getEmitter();
3113 unsigned putArgOffset = putArgNode->getArgOffset();
3115 assert(src->isContained());
3117 assert(src->gtOper == GT_OBJ);
3119 if (src->gtOp.gtOp1->isUsedFromReg())
3121 genConsumeReg(src->gtOp.gtOp1);
3124 unsigned offset = 0;
3126 regNumber xmmTmpReg = REG_NA;
3127 regNumber intTmpReg = REG_NA;
3128 regNumber longTmpReg = REG_NA;
3130 // On x86 we use an XMM register for both 16 and 8-byte chunks, but if it's
3131 // less than 16 bytes, we will just be using pushes
3134 xmmTmpReg = putArgNode->GetSingleTempReg(RBM_ALLFLOAT);
3135 longTmpReg = xmmTmpReg;
3137 if ((size & 0x7) != 0)
3139 intTmpReg = putArgNode->GetSingleTempReg(RBM_ALLINT);
3141 #else // !_TARGET_X86_
3142 // On x64 we use an XMM register only for 16-byte chunks.
3143 if (size >= XMM_REGSIZE_BYTES)
3145 xmmTmpReg = putArgNode->GetSingleTempReg(RBM_ALLFLOAT);
3147 if ((size & 0xf) != 0)
3149 intTmpReg = putArgNode->GetSingleTempReg(RBM_ALLINT);
3150 longTmpReg = intTmpReg;
3152 #endif // !_TARGET_X86_
3154 // If the size of this struct is larger than 16 bytes
3155 // let's use SSE2 to be able to do 16 byte at a time
3156 // loads and stores.
3157 if (size >= XMM_REGSIZE_BYTES)
3160 assert(!m_pushStkArg);
3161 #endif // _TARGET_X86_
3162 size_t slots = size / XMM_REGSIZE_BYTES;
3164 assert(putArgNode->gtGetOp1()->isContained());
3165 assert(putArgNode->gtGetOp1()->gtOp.gtOper == GT_OBJ);
3167 // TODO: In the below code the load and store instructions are for 16 bytes, but the
3168 // type is EA_8BYTE. The movdqa/u are 16 byte instructions, so it works, but
3169 // this probably needs to be changed.
3173 genCodeForLoadOffset(INS_movdqu, EA_8BYTE, xmmTmpReg, src->gtGetOp1(), offset);
3176 genStoreRegToStackArg(TYP_STRUCT, xmmTmpReg, offset);
3178 offset += XMM_REGSIZE_BYTES;
3182 // Fill the remainder (15 bytes or less) if there's one.
3183 if ((size & 0xf) != 0)
3188 // This case is currently supported only for the case where the total size is
3189 // less than XMM_REGSIZE_BYTES. We need to push the remaining chunks in reverse
3190 // order. However, morph has ensured that we have a struct that is an even
3191 // multiple of TARGET_POINTER_SIZE, so we don't need to worry about alignment.
3192 assert(((size & 0xc) == size) && (offset == 0));
3193 // If we have a 4 byte chunk, load it from either offset 0 or 8, depending on
3194 // whether we've got an 8 byte chunk, and then push it on the stack.
3195 unsigned pushedBytes = genMove4IfNeeded(size, intTmpReg, src->gtOp.gtOp1, size & 0x8);
3196 // Now if we have an 8 byte chunk, load it from offset 0 (it's the first chunk)
3197 // and push it on the stack.
3198 pushedBytes += genMove8IfNeeded(size, longTmpReg, src->gtOp.gtOp1, 0);
3201 #endif // _TARGET_X86_
3203 offset += genMove8IfNeeded(size, longTmpReg, src->gtOp.gtOp1, offset);
3204 offset += genMove4IfNeeded(size, intTmpReg, src->gtOp.gtOp1, offset);
3205 offset += genMove2IfNeeded(size, intTmpReg, src->gtOp.gtOp1, offset);
3206 offset += genMove1IfNeeded(size, intTmpReg, src->gtOp.gtOp1, offset);
3207 assert(offset == size);
3212 //------------------------------------------------------------------------
3213 // genStructPutArgRepMovs: Generates code for passing a struct arg by value on stack using Rep Movs.
3216 // putArgNode - the PutArgStk tree.
3219 // The size argument of the PutArgStk (for structs) is a constant and is between
3220 // CPBLK_UNROLL_LIMIT and CPBLK_MOVS_LIMIT bytes.
3221 // m_stkArgVarNum must be set to the base var number, relative to which the by-val struct bits will go.
3223 void CodeGen::genStructPutArgRepMovs(GenTreePutArgStk* putArgNode)
3225 assert(putArgNode->TypeGet() == TYP_STRUCT);
3226 assert(putArgNode->getArgSize() > CPBLK_UNROLL_LIMIT);
3228 // Make sure we got the arguments of the cpblk operation in the right registers
3229 GenTreePtr dstAddr = putArgNode;
3230 GenTreePtr srcAddr = putArgNode->gtGetOp1();
3233 assert(putArgNode->gtRsvdRegs == (RBM_RDI | RBM_RCX | RBM_RSI));
3234 assert(srcAddr->isContained());
3236 genConsumePutStructArgStk(putArgNode, REG_RDI, REG_RSI, REG_RCX);
3237 instGen(INS_r_movsb);
3240 //------------------------------------------------------------------------
3241 // If any Vector3 args are on stack and they are not pass-by-ref, the upper 32bits
3242 // must be cleared to zeroes. The native compiler doesn't clear the upper bits
3243 // and there is no way to know if the caller is native or not. So, the upper
3244 // 32 bits of Vector argument on stack are always cleared to zero.
3245 #if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING) && defined(FEATURE_SIMD)
3246 void CodeGen::genClearStackVec3ArgUpperBits()
3251 printf("*************** In genClearStackVec3ArgUpperBits()\n");
3255 assert(compiler->compGeneratingProlog);
3257 unsigned varNum = 0;
3259 for (unsigned varNum = 0; varNum < compiler->info.compArgsCount; varNum++)
3261 LclVarDsc* varDsc = &(compiler->lvaTable[varNum]);
3262 assert(varDsc->lvIsParam);
3264 // Does var has simd12 type?
3265 if (varDsc->lvType != TYP_SIMD12)
3270 if (!varDsc->lvIsRegArg)
3272 // Clear the upper 32 bits by mov dword ptr [V_ARG_BASE+0xC], 0
3273 getEmitter()->emitIns_S_I(ins_Store(TYP_INT), EA_4BYTE, varNum, genTypeSize(TYP_FLOAT) * 3, 0);
3277 // Assume that for x64 linux, an argument is fully in registers
3278 // or fully on stack.
3279 regNumber argReg = varDsc->GetOtherArgReg();
3281 // Clear the upper 32 bits by two shift instructions.
3282 // argReg = argReg << 96
3283 getEmitter()->emitIns_R_I(INS_pslldq, emitActualTypeSize(TYP_SIMD12), argReg, 12);
3284 // argReg = argReg >> 96
3285 getEmitter()->emitIns_R_I(INS_psrldq, emitActualTypeSize(TYP_SIMD12), argReg, 12);
3289 #endif // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING) && defined(FEATURE_SIMD)
3290 #endif // FEATURE_PUT_STRUCT_ARG_STK
3292 // Generate code for CpObj nodes wich copy structs that have interleaved
3294 // This will generate a sequence of movsp instructions for the cases of non-gc members.
3295 // Note that movsp is an alias for movsd on x86 and movsq on x64.
3296 // and calls to the BY_REF_ASSIGN helper otherwise.
3297 void CodeGen::genCodeForCpObj(GenTreeObj* cpObjNode)
3299 // Make sure we got the arguments of the cpobj operation in the right registers
3300 GenTreePtr dstAddr = cpObjNode->Addr();
3301 GenTreePtr source = cpObjNode->Data();
3302 GenTreePtr srcAddr = nullptr;
3303 var_types srcAddrType = TYP_BYREF;
3304 bool sourceIsLocal = false;
3306 assert(source->isContained());
3307 if (source->gtOper == GT_IND)
3309 srcAddr = source->gtGetOp1();
3310 assert(srcAddr->isUsedFromReg());
3314 noway_assert(source->IsLocal());
3315 sourceIsLocal = true;
3318 bool dstOnStack = dstAddr->OperIsLocalAddr();
3322 assert(dstAddr->isUsedFromReg());
3324 // If the GenTree node has data about GC pointers, this means we're dealing
3325 // with CpObj, so this requires special logic.
3326 assert(cpObjNode->gtGcPtrCount > 0);
3328 // MovSp (alias for movsq on x64 and movsd on x86) instruction is used for copying non-gcref fields
3329 // and it needs src = RSI and dst = RDI.
3330 // Either these registers must not contain lclVars, or they must be dying or marked for spill.
3331 // This is because these registers are incremented as we go through the struct.
3334 GenTree* actualSrcAddr = srcAddr->gtSkipReloadOrCopy();
3335 GenTree* actualDstAddr = dstAddr->gtSkipReloadOrCopy();
3336 unsigned srcLclVarNum = BAD_VAR_NUM;
3337 unsigned dstLclVarNum = BAD_VAR_NUM;
3338 bool isSrcAddrLiveOut = false;
3339 bool isDstAddrLiveOut = false;
3340 if (genIsRegCandidateLocal(actualSrcAddr))
3342 srcLclVarNum = actualSrcAddr->AsLclVarCommon()->gtLclNum;
3343 isSrcAddrLiveOut = ((actualSrcAddr->gtFlags & (GTF_VAR_DEATH | GTF_SPILL)) == 0);
3345 if (genIsRegCandidateLocal(actualDstAddr))
3347 dstLclVarNum = actualDstAddr->AsLclVarCommon()->gtLclNum;
3348 isDstAddrLiveOut = ((actualDstAddr->gtFlags & (GTF_VAR_DEATH | GTF_SPILL)) == 0);
3350 assert((actualSrcAddr->gtRegNum != REG_RSI) || !isSrcAddrLiveOut ||
3351 ((srcLclVarNum == dstLclVarNum) && !isDstAddrLiveOut));
3352 assert((actualDstAddr->gtRegNum != REG_RDI) || !isDstAddrLiveOut ||
3353 ((srcLclVarNum == dstLclVarNum) && !isSrcAddrLiveOut));
3354 srcAddrType = srcAddr->TypeGet();
3358 // Consume the operands and get them into the right registers.
3359 // They may now contain gc pointers (depending on their type; gcMarkRegPtrVal will "do the right thing").
3360 genConsumeBlockOp(cpObjNode, REG_RDI, REG_RSI, REG_NA);
3361 gcInfo.gcMarkRegPtrVal(REG_RSI, srcAddrType);
3362 gcInfo.gcMarkRegPtrVal(REG_RDI, dstAddr->TypeGet());
3364 unsigned slots = cpObjNode->gtSlots;
3366 // If we can prove it's on the stack we don't need to use the write barrier.
3369 if (slots >= CPOBJ_NONGC_SLOTS_LIMIT)
3371 // If the destination of the CpObj is on the stack, make sure we allocated
3372 // RCX to emit the movsp (alias for movsd or movsq for 32 and 64 bits respectively).
3373 assert((cpObjNode->gtRsvdRegs & RBM_RCX) != 0);
3375 getEmitter()->emitIns_R_I(INS_mov, EA_4BYTE, REG_RCX, slots);
3376 instGen(INS_r_movsp);
3380 // For small structs, it's better to emit a sequence of movsp than to
3381 // emit a rep movsp instruction.
3391 BYTE* gcPtrs = cpObjNode->gtGcPtrs;
3392 unsigned gcPtrCount = cpObjNode->gtGcPtrCount;
3400 // Let's see if we can use rep movsp instead of a sequence of movsp instructions
3401 // to save cycles and code size.
3403 unsigned nonGcSlotCount = 0;
3409 } while (i < slots && gcPtrs[i] == TYPE_GC_NONE);
3411 // If we have a very small contiguous non-gc region, it's better just to
3412 // emit a sequence of movsp instructions
3413 if (nonGcSlotCount < CPOBJ_NONGC_SLOTS_LIMIT)
3415 while (nonGcSlotCount > 0)
3423 // Otherwise, we can save code-size and improve CQ by emitting
3424 // rep movsp (alias for movsd/movsq for x86/x64)
3425 assert((cpObjNode->gtRsvdRegs & RBM_RCX) != 0);
3427 getEmitter()->emitIns_R_I(INS_mov, EA_4BYTE, REG_RCX, nonGcSlotCount);
3428 instGen(INS_r_movsp);
3433 // We have a GC pointer, call the memory barrier.
3434 genEmitHelperCall(CORINFO_HELP_ASSIGN_BYREF, 0, EA_PTRSIZE);
3440 assert(gcPtrCount == 0);
3443 // Clear the gcInfo for RSI and RDI.
3444 // While we normally update GC info prior to the last instruction that uses them,
3445 // these actually live into the helper call.
3446 gcInfo.gcMarkRegSetNpt(RBM_RSI);
3447 gcInfo.gcMarkRegSetNpt(RBM_RDI);
3450 // Generate code for a CpBlk node by the means of the VM memcpy helper call
3452 // a) The size argument of the CpBlk is not an integer constant
3453 // b) The size argument is a constant but is larger than CPBLK_MOVS_LIMIT bytes.
3454 void CodeGen::genCodeForCpBlk(GenTreeBlk* cpBlkNode)
3456 #ifdef _TARGET_AMD64_
3457 // Make sure we got the arguments of the cpblk operation in the right registers
3458 unsigned blockSize = cpBlkNode->Size();
3459 GenTreePtr dstAddr = cpBlkNode->Addr();
3460 GenTreePtr source = cpBlkNode->Data();
3461 GenTreePtr srcAddr = nullptr;
3463 // Size goes in arg2
3466 assert(blockSize >= CPBLK_MOVS_LIMIT);
3467 assert((cpBlkNode->gtRsvdRegs & RBM_ARG_2) != 0);
3471 noway_assert(cpBlkNode->gtOper == GT_STORE_DYN_BLK);
3474 // Source address goes in arg1
3475 if (source->gtOper == GT_IND)
3477 srcAddr = source->gtGetOp1();
3478 assert(srcAddr->isUsedFromReg());
3482 noway_assert(source->IsLocal());
3483 assert((cpBlkNode->gtRsvdRegs & RBM_ARG_1) != 0);
3484 inst_RV_TT(INS_lea, REG_ARG_1, source, 0, EA_BYREF);
3487 genConsumeBlockOp(cpBlkNode, REG_ARG_0, REG_ARG_1, REG_ARG_2);
3489 genEmitHelperCall(CORINFO_HELP_MEMCPY, 0, EA_UNKNOWN);
3490 #else // !_TARGET_AMD64_
3491 noway_assert(false && "Helper call for CpBlk is not needed.");
3492 #endif // !_TARGET_AMD64_
3495 // generate code do a switch statement based on a table of ip-relative offsets
3496 void CodeGen::genTableBasedSwitch(GenTree* treeNode)
3498 genConsumeOperands(treeNode->AsOp());
3499 regNumber idxReg = treeNode->gtOp.gtOp1->gtRegNum;
3500 regNumber baseReg = treeNode->gtOp.gtOp2->gtRegNum;
3502 regNumber tmpReg = treeNode->GetSingleTempReg();
3504 // load the ip-relative offset (which is relative to start of fgFirstBB)
3505 getEmitter()->emitIns_R_ARX(INS_mov, EA_4BYTE, baseReg, baseReg, idxReg, 4, 0);
3507 // add it to the absolute address of fgFirstBB
3508 compiler->fgFirstBB->bbFlags |= BBF_JMP_TARGET;
3509 getEmitter()->emitIns_R_L(INS_lea, EA_PTR_DSP_RELOC, compiler->fgFirstBB, tmpReg);
3510 getEmitter()->emitIns_R_R(INS_add, EA_PTRSIZE, baseReg, tmpReg);
3512 getEmitter()->emitIns_R(INS_i_jmp, emitTypeSize(TYP_I_IMPL), baseReg);
3515 // emits the table and an instruction to get the address of the first element
3516 void CodeGen::genJumpTable(GenTree* treeNode)
3518 noway_assert(compiler->compCurBB->bbJumpKind == BBJ_SWITCH);
3519 assert(treeNode->OperGet() == GT_JMPTABLE);
3521 unsigned jumpCount = compiler->compCurBB->bbJumpSwt->bbsCount;
3522 BasicBlock** jumpTable = compiler->compCurBB->bbJumpSwt->bbsDstTab;
3523 unsigned jmpTabOffs;
3524 unsigned jmpTabBase;
3526 jmpTabBase = getEmitter()->emitBBTableDataGenBeg(jumpCount, true);
3530 JITDUMP("\n J_M%03u_DS%02u LABEL DWORD\n", Compiler::s_compMethodsCount, jmpTabBase);
3532 for (unsigned i = 0; i < jumpCount; i++)
3534 BasicBlock* target = *jumpTable++;
3535 noway_assert(target->bbFlags & BBF_JMP_TARGET);
3537 JITDUMP(" DD L_M%03u_BB%02u\n", Compiler::s_compMethodsCount, target->bbNum);
3539 getEmitter()->emitDataGenData(i, target);
3542 getEmitter()->emitDataGenEnd();
3544 // Access to inline data is 'abstracted' by a special type of static member
3545 // (produced by eeFindJitDataOffs) which the emitter recognizes as being a reference
3546 // to constant data, not a real static field.
3547 getEmitter()->emitIns_R_C(INS_lea, emitTypeSize(TYP_I_IMPL), treeNode->gtRegNum,
3548 compiler->eeFindJitDataOffs(jmpTabBase), 0);
3549 genProduceReg(treeNode);
3552 // generate code for the locked operations:
3553 // GT_LOCKADD, GT_XCHG, GT_XADD
3554 void CodeGen::genLockedInstructions(GenTreeOp* treeNode)
3556 GenTree* data = treeNode->gtOp.gtOp2;
3557 GenTree* addr = treeNode->gtOp.gtOp1;
3558 regNumber targetReg = treeNode->gtRegNum;
3559 regNumber dataReg = data->gtRegNum;
3560 regNumber addrReg = addr->gtRegNum;
3561 var_types type = genActualType(data->TypeGet());
3564 // The register allocator should have extended the lifetime of the address
3565 // so that it is not used as the target.
3566 noway_assert(addrReg != targetReg);
3568 // If data is a lclVar that's not a last use, we'd better have allocated a register
3569 // for the result (except in the case of GT_LOCKADD which does not produce a register result).
3570 assert(targetReg != REG_NA || treeNode->OperGet() == GT_LOCKADD || !genIsRegCandidateLocal(data) ||
3571 (data->gtFlags & GTF_VAR_DEATH) != 0);
3573 genConsumeOperands(treeNode);
3574 if (targetReg != REG_NA && dataReg != REG_NA && dataReg != targetReg)
3576 inst_RV_RV(ins_Copy(type), targetReg, dataReg);
3577 data->gtRegNum = targetReg;
3579 // TODO-XArch-Cleanup: Consider whether it is worth it, for debugging purposes, to restore the
3580 // original gtRegNum on data, after calling emitInsBinary below.
3582 switch (treeNode->OperGet())
3589 // lock is implied by xchg
3600 // all of these nodes implicitly do an indirection on op1
3601 // so create a temporary node to feed into the pattern matching
3602 GenTreeIndir i = indirForm(type, addr);
3603 getEmitter()->emitInsBinary(ins, emitTypeSize(type), &i, data);
3605 if (treeNode->gtRegNum != REG_NA)
3607 genProduceReg(treeNode);
3611 //------------------------------------------------------------------------
3612 // genCodeForSwap: Produce code for a GT_CMPXCHG node.
3615 // tree - the GT_CMPXCHG node
3617 void CodeGen::genCodeForCmpXchg(GenTreeCmpXchg* tree)
3619 assert(tree->OperIs(GT_CMPXCHG));
3621 var_types targetType = tree->TypeGet();
3622 regNumber targetReg = tree->gtRegNum;
3624 GenTreePtr location = tree->gtOpLocation; // arg1
3625 GenTreePtr value = tree->gtOpValue; // arg2
3626 GenTreePtr comparand = tree->gtOpComparand; // arg3
3628 assert(location->gtRegNum != REG_NA && location->gtRegNum != REG_RAX);
3629 assert(value->gtRegNum != REG_NA && value->gtRegNum != REG_RAX);
3631 genConsumeReg(location);
3632 genConsumeReg(value);
3633 genConsumeReg(comparand);
3635 // comparand goes to RAX;
3636 // Note that we must issue this move after the genConsumeRegs(), in case any of the above
3637 // have a GT_COPY from RAX.
3638 if (comparand->gtRegNum != REG_RAX)
3640 inst_RV_RV(ins_Copy(comparand->TypeGet()), REG_RAX, comparand->gtRegNum, comparand->TypeGet());
3646 getEmitter()->emitIns_AR_R(INS_cmpxchg, emitTypeSize(targetType), value->gtRegNum, location->gtRegNum, 0);
3649 if (targetReg != REG_RAX)
3651 inst_RV_RV(ins_Copy(targetType), targetReg, REG_RAX, targetType);
3654 genProduceReg(tree);
3657 // generate code for BoundsCheck nodes
3658 void CodeGen::genRangeCheck(GenTreePtr oper)
3661 noway_assert(oper->OperGet() == GT_ARR_BOUNDS_CHECK || oper->OperGet() == GT_SIMD_CHK);
3662 #else // !FEATURE_SIMD
3663 noway_assert(oper->OperGet() == GT_ARR_BOUNDS_CHECK);
3664 #endif // !FEATURE_SIMD
3666 GenTreeBoundsChk* bndsChk = oper->AsBoundsChk();
3668 GenTreePtr arrIndex = bndsChk->gtIndex;
3669 GenTreePtr arrLen = bndsChk->gtArrLen;
3670 GenTreePtr arrRef = nullptr;
3673 GenTree * src1, *src2;
3674 emitJumpKind jmpKind;
3676 genConsumeRegs(arrIndex);
3677 genConsumeRegs(arrLen);
3679 if (arrIndex->isContainedIntOrIImmed())
3681 // arrIndex is a contained constant. In this case
3682 // we will generate one of the following
3683 // cmp [mem], immed (if arrLen is a memory op)
3684 // cmp reg, immed (if arrLen is in a reg)
3686 // That is arrLen cannot be a contained immed.
3687 assert(!arrLen->isContainedIntOrIImmed());
3695 // arrIndex could either be a contained memory op or a reg
3696 // In this case we will generate one of the following
3697 // cmp [mem], immed (if arrLen is a constant)
3698 // cmp [mem], reg (if arrLen is in a reg)
3699 // cmp reg, immed (if arrIndex is in a reg)
3700 // cmp reg1, reg2 (if arraIndex is in reg1)
3701 // cmp reg, [mem] (if arrLen is a memory op)
3703 // That is only one of arrIndex or arrLen can be a memory op.
3704 assert(!arrIndex->isUsedFromMemory() || !arrLen->isUsedFromMemory());
3711 var_types bndsChkType = src2->TypeGet();
3713 // Bounds checks can only be 32 or 64 bit sized comparisons.
3714 assert(bndsChkType == TYP_INT || bndsChkType == TYP_LONG);
3716 // The type of the bounds check should always wide enough to compare against the index.
3717 assert(emitTypeSize(bndsChkType) >= emitTypeSize(src1->TypeGet()));
3720 getEmitter()->emitInsBinary(INS_cmp, emitTypeSize(bndsChkType), src1, src2);
3721 genJumpToThrowHlpBlk(jmpKind, bndsChk->gtThrowKind, bndsChk->gtIndRngFailBB);
3724 //---------------------------------------------------------------------
3725 // genCodeForPhysReg - generate code for a GT_PHYSREG node
3728 // tree - the GT_PHYSREG node
3733 void CodeGen::genCodeForPhysReg(GenTreePhysReg* tree)
3735 assert(tree->OperIs(GT_PHYSREG));
3737 var_types targetType = tree->TypeGet();
3738 regNumber targetReg = tree->gtRegNum;
3740 if (targetReg != tree->gtSrcReg)
3742 inst_RV_RV(ins_Copy(targetType), targetReg, tree->gtSrcReg, targetType);
3743 genTransferRegGCState(targetReg, tree->gtSrcReg);
3746 genProduceReg(tree);
3749 //---------------------------------------------------------------------
3750 // genCodeForNullCheck - generate code for a GT_NULLCHECK node
3753 // tree - the GT_NULLCHECK node
3758 void CodeGen::genCodeForNullCheck(GenTreeOp* tree)
3760 assert(tree->OperIs(GT_NULLCHECK));
3762 assert(tree->gtOp1->isUsedFromReg());
3763 regNumber reg = genConsumeReg(tree->gtOp1);
3764 getEmitter()->emitIns_AR_R(INS_cmp, EA_4BYTE, reg, reg, 0);
3767 //------------------------------------------------------------------------
3768 // genOffsetOfMDArrayLowerBound: Returns the offset from the Array object to the
3769 // lower bound for the given dimension.
3772 // elemType - the element type of the array
3773 // rank - the rank of the array
3774 // dimension - the dimension for which the lower bound offset will be returned.
3779 unsigned CodeGen::genOffsetOfMDArrayLowerBound(var_types elemType, unsigned rank, unsigned dimension)
3781 // Note that the lower bound and length fields of the Array object are always TYP_INT, even on 64-bit targets.
3782 return compiler->eeGetArrayDataOffset(elemType) + genTypeSize(TYP_INT) * (dimension + rank);
3785 //------------------------------------------------------------------------
3786 // genOffsetOfMDArrayLength: Returns the offset from the Array object to the
3787 // size for the given dimension.
3790 // elemType - the element type of the array
3791 // rank - the rank of the array
3792 // dimension - the dimension for which the lower bound offset will be returned.
3797 unsigned CodeGen::genOffsetOfMDArrayDimensionSize(var_types elemType, unsigned rank, unsigned dimension)
3799 // Note that the lower bound and length fields of the Array object are always TYP_INT, even on 64-bit targets.
3800 return compiler->eeGetArrayDataOffset(elemType) + genTypeSize(TYP_INT) * dimension;
3803 //------------------------------------------------------------------------
3804 // genCodeForArrIndex: Generates code to bounds check the index for one dimension of an array reference,
3805 // producing the effective index by subtracting the lower bound.
3808 // arrIndex - the node for which we're generating code
3814 void CodeGen::genCodeForArrIndex(GenTreeArrIndex* arrIndex)
3816 GenTreePtr arrObj = arrIndex->ArrObj();
3817 GenTreePtr indexNode = arrIndex->IndexExpr();
3819 regNumber arrReg = genConsumeReg(arrObj);
3820 regNumber indexReg = genConsumeReg(indexNode);
3821 regNumber tgtReg = arrIndex->gtRegNum;
3823 unsigned dim = arrIndex->gtCurrDim;
3824 unsigned rank = arrIndex->gtArrRank;
3825 var_types elemType = arrIndex->gtArrElemType;
3827 noway_assert(tgtReg != REG_NA);
3829 // Subtract the lower bound for this dimension.
3830 // TODO-XArch-CQ: make this contained if it's an immediate that fits.
3831 if (tgtReg != indexReg)
3833 inst_RV_RV(INS_mov, tgtReg, indexReg, indexNode->TypeGet());
3835 getEmitter()->emitIns_R_AR(INS_sub, emitActualTypeSize(TYP_INT), tgtReg, arrReg,
3836 genOffsetOfMDArrayLowerBound(elemType, rank, dim));
3837 getEmitter()->emitIns_R_AR(INS_cmp, emitActualTypeSize(TYP_INT), tgtReg, arrReg,
3838 genOffsetOfMDArrayDimensionSize(elemType, rank, dim));
3839 genJumpToThrowHlpBlk(EJ_jae, SCK_RNGCHK_FAIL);
3841 genProduceReg(arrIndex);
3844 //------------------------------------------------------------------------
3845 // genCodeForArrOffset: Generates code to compute the flattened array offset for
3846 // one dimension of an array reference:
3847 // result = (prevDimOffset * dimSize) + effectiveIndex
3848 // where dimSize is obtained from the arrObj operand
3851 // arrOffset - the node for which we're generating code
3857 // dimSize and effectiveIndex are always non-negative, the former by design,
3858 // and the latter because it has been normalized to be zero-based.
3860 void CodeGen::genCodeForArrOffset(GenTreeArrOffs* arrOffset)
3862 GenTreePtr offsetNode = arrOffset->gtOffset;
3863 GenTreePtr indexNode = arrOffset->gtIndex;
3864 GenTreePtr arrObj = arrOffset->gtArrObj;
3866 regNumber tgtReg = arrOffset->gtRegNum;
3867 assert(tgtReg != REG_NA);
3869 unsigned dim = arrOffset->gtCurrDim;
3870 unsigned rank = arrOffset->gtArrRank;
3871 var_types elemType = arrOffset->gtArrElemType;
3873 // First, consume the operands in the correct order.
3874 regNumber offsetReg = REG_NA;
3875 regNumber tmpReg = REG_NA;
3876 if (!offsetNode->IsIntegralConst(0))
3878 offsetReg = genConsumeReg(offsetNode);
3880 // We will use a temp register for the offset*scale+effectiveIndex computation.
3881 tmpReg = arrOffset->GetSingleTempReg();
3885 assert(offsetNode->isContained());
3887 regNumber indexReg = genConsumeReg(indexNode);
3888 // Although arrReg may not be used in the constant-index case, if we have generated
3889 // the value into a register, we must consume it, otherwise we will fail to end the
3890 // live range of the gc ptr.
3891 // TODO-CQ: Currently arrObj will always have a register allocated to it.
3892 // We could avoid allocating a register for it, which would be of value if the arrObj
3893 // is an on-stack lclVar.
3894 regNumber arrReg = REG_NA;
3895 if (arrObj->gtHasReg())
3897 arrReg = genConsumeReg(arrObj);
3900 if (!offsetNode->IsIntegralConst(0))
3902 assert(tmpReg != REG_NA);
3903 assert(arrReg != REG_NA);
3905 // Evaluate tgtReg = offsetReg*dim_size + indexReg.
3906 // tmpReg is used to load dim_size and the result of the multiplication.
3907 // Note that dim_size will never be negative.
3909 getEmitter()->emitIns_R_AR(INS_mov, emitActualTypeSize(TYP_INT), tmpReg, arrReg,
3910 genOffsetOfMDArrayDimensionSize(elemType, rank, dim));
3911 inst_RV_RV(INS_imul, tmpReg, offsetReg);
3913 if (tmpReg == tgtReg)
3915 inst_RV_RV(INS_add, tmpReg, indexReg);
3919 if (indexReg != tgtReg)
3921 inst_RV_RV(INS_mov, tgtReg, indexReg, TYP_I_IMPL);
3923 inst_RV_RV(INS_add, tgtReg, tmpReg);
3928 if (indexReg != tgtReg)
3930 inst_RV_RV(INS_mov, tgtReg, indexReg, TYP_INT);
3933 genProduceReg(arrOffset);
3936 // make a temporary indir we can feed to pattern matching routines
3937 // in cases where we don't want to instantiate all the indirs that happen
3939 GenTreeIndir CodeGen::indirForm(var_types type, GenTree* base)
3941 GenTreeIndir i(GT_IND, type, base, nullptr);
3942 i.gtRegNum = REG_NA;
3943 // has to be nonnull (because contained nodes can't be the last in block)
3944 // but don't want it to be a valid pointer
3945 i.gtNext = (GenTree*)(-1);
3949 // make a temporary int we can feed to pattern matching routines
3950 // in cases where we don't want to instantiate
3952 GenTreeIntCon CodeGen::intForm(var_types type, ssize_t value)
3954 GenTreeIntCon i(type, value);
3955 i.gtRegNum = REG_NA;
3956 // has to be nonnull (because contained nodes can't be the last in block)
3957 // but don't want it to be a valid pointer
3958 i.gtNext = (GenTree*)(-1);
3962 instruction CodeGen::genGetInsForOper(genTreeOps oper, var_types type)
3966 // Operations on SIMD vectors shouldn't come this path
3967 assert(!varTypeIsSIMD(type));
3968 if (varTypeIsFloating(type))
3970 return ins_MathOp(oper, type);
4014 #if !defined(_TARGET_64BIT_)
4033 #endif // !defined(_TARGET_64BIT_)
4041 //------------------------------------------------------------------------
4042 // genCodeForShift: Generates the code sequence for a GenTree node that
4043 // represents a bit shift or rotate operation (<<, >>, >>>, rol, ror).
4046 // tree - the bit shift node (that specifies the type of bit shift to perform).
4049 // a) All GenTrees are register allocated.
4050 // b) The shift-by-amount in tree->gtOp.gtOp2 is either a contained constant or
4051 // it's a register-allocated expression. If it is in a register that is
4052 // not RCX, it will be moved to RCX (so RCX better not be in use!).
4054 void CodeGen::genCodeForShift(GenTreePtr tree)
4056 // Only the non-RMW case here.
4057 assert(tree->OperIsShiftOrRotate());
4058 assert(tree->gtOp.gtOp1->isUsedFromReg());
4059 assert(tree->gtRegNum != REG_NA);
4061 genConsumeOperands(tree->AsOp());
4063 var_types targetType = tree->TypeGet();
4064 instruction ins = genGetInsForOper(tree->OperGet(), targetType);
4066 GenTreePtr operand = tree->gtGetOp1();
4067 regNumber operandReg = operand->gtRegNum;
4069 GenTreePtr shiftBy = tree->gtGetOp2();
4071 if (shiftBy->isContainedIntOrIImmed())
4073 // First, move the operand to the destination register and
4074 // later on perform the shift in-place.
4075 // (LSRA will try to avoid this situation through preferencing.)
4076 if (tree->gtRegNum != operandReg)
4078 inst_RV_RV(INS_mov, tree->gtRegNum, operandReg, targetType);
4081 int shiftByValue = (int)shiftBy->AsIntConCommon()->IconValue();
4082 inst_RV_SH(ins, emitTypeSize(tree), tree->gtRegNum, shiftByValue);
4086 // We must have the number of bits to shift stored in ECX, since we constrained this node to
4087 // sit in ECX. In case this didn't happen, LSRA expects the code generator to move it since it's a single
4088 // register destination requirement.
4089 genCopyRegIfNeeded(shiftBy, REG_RCX);
4091 // The operand to be shifted must not be in ECX
4092 noway_assert(operandReg != REG_RCX);
4094 if (tree->gtRegNum != operandReg)
4096 inst_RV_RV(INS_mov, tree->gtRegNum, operandReg, targetType);
4098 inst_RV_CL(ins, tree->gtRegNum, targetType);
4101 genProduceReg(tree);
4105 //------------------------------------------------------------------------
4106 // genCodeForShiftLong: Generates the code sequence for a GenTree node that
4107 // represents a three operand bit shift or rotate operation (<<Hi, >>Lo).
4110 // tree - the bit shift node (that specifies the type of bit shift to perform).
4113 // a) All GenTrees are register allocated.
4114 // b) The shift-by-amount in tree->gtOp.gtOp2 is a contained constant
4116 // TODO-X86-CQ: This only handles the case where the operand being shifted is in a register. We don't
4117 // need sourceHi to be always in reg in case of GT_LSH_HI (because it could be moved from memory to
4118 // targetReg if sourceHi is a memory operand). Similarly for GT_RSH_LO, sourceLo could be marked as
4119 // contained memory-op. Even if not a memory-op, we could mark it as reg-optional.
4121 void CodeGen::genCodeForShiftLong(GenTreePtr tree)
4123 // Only the non-RMW case here.
4124 genTreeOps oper = tree->OperGet();
4125 assert(oper == GT_LSH_HI || oper == GT_RSH_LO);
4127 GenTree* operand = tree->gtOp.gtOp1;
4128 assert(operand->OperGet() == GT_LONG);
4129 assert(operand->gtOp.gtOp1->isUsedFromReg());
4130 assert(operand->gtOp.gtOp2->isUsedFromReg());
4132 GenTree* operandLo = operand->gtGetOp1();
4133 GenTree* operandHi = operand->gtGetOp2();
4135 regNumber regLo = operandLo->gtRegNum;
4136 regNumber regHi = operandHi->gtRegNum;
4138 genConsumeOperands(tree->AsOp());
4140 var_types targetType = tree->TypeGet();
4141 instruction ins = genGetInsForOper(oper, targetType);
4143 GenTreePtr shiftBy = tree->gtGetOp2();
4145 assert(shiftBy->isContainedIntOrIImmed());
4147 unsigned int count = shiftBy->AsIntConCommon()->IconValue();
4149 regNumber regResult = (oper == GT_LSH_HI) ? regHi : regLo;
4151 if (regResult != tree->gtRegNum)
4153 inst_RV_RV(INS_mov, tree->gtRegNum, regResult, targetType);
4156 if (oper == GT_LSH_HI)
4158 inst_RV_RV_IV(ins, emitTypeSize(targetType), tree->gtRegNum, regLo, count);
4162 assert(oper == GT_RSH_LO);
4163 inst_RV_RV_IV(ins, emitTypeSize(targetType), tree->gtRegNum, regHi, count);
4166 genProduceReg(tree);
4170 //------------------------------------------------------------------------
4171 // genCodeForShiftRMW: Generates the code sequence for a GT_STOREIND GenTree node that
4172 // represents a RMW bit shift or rotate operation (<<, >>, >>>, rol, ror), for example:
4173 // GT_STOREIND( AddressTree, GT_SHL( Ind ( AddressTree ), Operand ) )
4176 // storeIndNode: the GT_STOREIND node.
4178 void CodeGen::genCodeForShiftRMW(GenTreeStoreInd* storeInd)
4180 GenTree* data = storeInd->Data();
4181 GenTree* addr = storeInd->Addr();
4183 assert(data->OperIsShiftOrRotate());
4185 // This function only handles the RMW case.
4186 assert(data->gtOp.gtOp1->isUsedFromMemory());
4187 assert(data->gtOp.gtOp1->isIndir());
4188 assert(Lowering::IndirsAreEquivalent(data->gtOp.gtOp1, storeInd));
4189 assert(data->gtRegNum == REG_NA);
4191 var_types targetType = data->TypeGet();
4192 genTreeOps oper = data->OperGet();
4193 instruction ins = genGetInsForOper(oper, targetType);
4194 emitAttr attr = EA_ATTR(genTypeSize(targetType));
4196 GenTree* shiftBy = data->gtOp.gtOp2;
4197 if (shiftBy->isContainedIntOrIImmed())
4199 int shiftByValue = (int)shiftBy->AsIntConCommon()->IconValue();
4200 ins = genMapShiftInsToShiftByConstantIns(ins, shiftByValue);
4201 if (shiftByValue == 1)
4203 // There is no source in this case, as the shift by count is embedded in the instruction opcode itself.
4204 getEmitter()->emitInsRMW(ins, attr, storeInd);
4208 getEmitter()->emitInsRMW(ins, attr, storeInd, shiftBy);
4213 // We must have the number of bits to shift stored in ECX, since we constrained this node to
4214 // sit in ECX. In case this didn't happen, LSRA expects the code generator to move it since it's a single
4215 // register destination requirement.
4216 regNumber shiftReg = shiftBy->gtRegNum;
4217 genCopyRegIfNeeded(shiftBy, REG_RCX);
4219 // The shiftBy operand is implicit, so call the unary version of emitInsRMW.
4220 getEmitter()->emitInsRMW(ins, attr, storeInd);
4224 //------------------------------------------------------------------------
4225 // genCodeForLclAddr: Generates the code for GT_LCL_FLD_ADDR/GT_LCL_VAR_ADDR.
4230 void CodeGen::genCodeForLclAddr(GenTree* tree)
4232 assert(tree->OperIs(GT_LCL_FLD_ADDR, GT_LCL_VAR_ADDR));
4234 var_types targetType = tree->TypeGet();
4235 regNumber targetReg = tree->gtRegNum;
4237 // Address of a local var. This by itself should never be allocated a register.
4238 // If it is worth storing the address in a register then it should be cse'ed into
4239 // a temp and that would be allocated a register.
4240 noway_assert(targetType == TYP_BYREF);
4241 noway_assert(!tree->InReg());
4243 inst_RV_TT(INS_lea, targetReg, tree, 0, EA_BYREF);
4244 genProduceReg(tree);
4247 //------------------------------------------------------------------------
4248 // genCodeForLclFld: Produce code for a GT_LCL_FLD node.
4251 // tree - the GT_LCL_FLD node
4253 void CodeGen::genCodeForLclFld(GenTreeLclFld* tree)
4255 assert(tree->OperIs(GT_LCL_FLD));
4257 var_types targetType = tree->TypeGet();
4258 regNumber targetReg = tree->gtRegNum;
4260 noway_assert(targetReg != REG_NA);
4263 // Loading of TYP_SIMD12 (i.e. Vector3) field
4264 if (targetType == TYP_SIMD12)
4266 genLoadLclTypeSIMD12(tree);
4271 noway_assert(targetType != TYP_STRUCT);
4273 emitAttr size = emitTypeSize(targetType);
4274 unsigned offs = tree->gtLclOffs;
4275 unsigned varNum = tree->gtLclNum;
4276 assert(varNum < compiler->lvaCount);
4278 getEmitter()->emitIns_R_S(ins_Move_Extend(targetType, tree->InReg()), size, targetReg, varNum, offs);
4280 genProduceReg(tree);
4283 //------------------------------------------------------------------------
4284 // genCodeForLclVar: Produce code for a GT_LCL_VAR node.
4287 // tree - the GT_LCL_VAR node
4289 void CodeGen::genCodeForLclVar(GenTreeLclVar* tree)
4291 assert(tree->OperIs(GT_LCL_VAR));
4293 // lcl_vars are not defs
4294 assert((tree->gtFlags & GTF_VAR_DEF) == 0);
4296 bool isRegCandidate = compiler->lvaTable[tree->gtLclNum].lvIsRegCandidate();
4298 if (isRegCandidate && !(tree->gtFlags & GTF_VAR_DEATH))
4300 assert(tree->InReg() || (tree->gtFlags & GTF_SPILLED));
4303 // If this is a register candidate that has been spilled, genConsumeReg() will
4304 // reload it at the point of use. Otherwise, if it's not in a register, we load it here.
4306 if (!tree->InReg() && !(tree->gtFlags & GTF_SPILLED))
4308 assert(!isRegCandidate);
4310 #if defined(FEATURE_SIMD) && defined(_TARGET_X86_)
4311 // Loading of TYP_SIMD12 (i.e. Vector3) variable
4312 if (tree->TypeGet() == TYP_SIMD12)
4314 genLoadLclTypeSIMD12(tree);
4317 #endif // defined(FEATURE_SIMD) && defined(_TARGET_X86_)
4319 getEmitter()->emitIns_R_S(ins_Load(tree->TypeGet(), compiler->isSIMDTypeLocalAligned(tree->gtLclNum)),
4320 emitTypeSize(tree), tree->gtRegNum, tree->gtLclNum, 0);
4321 genProduceReg(tree);
4325 //------------------------------------------------------------------------
4326 // genCodeForStoreLclFld: Produce code for a GT_STORE_LCL_FLD node.
4329 // tree - the GT_STORE_LCL_FLD node
4331 void CodeGen::genCodeForStoreLclFld(GenTreeLclFld* tree)
4333 assert(tree->OperIs(GT_STORE_LCL_FLD));
4335 var_types targetType = tree->TypeGet();
4336 noway_assert(targetType != TYP_STRUCT);
4337 noway_assert(!tree->InReg());
4338 assert(!varTypeIsFloating(targetType) || (targetType == tree->gtOp1->TypeGet()));
4341 // storing of TYP_SIMD12 (i.e. Vector3) field
4342 if (tree->TypeGet() == TYP_SIMD12)
4344 genStoreLclTypeSIMD12(tree);
4347 #endif // FEATURE_SIMD
4349 GenTreePtr op1 = tree->gtGetOp1();
4350 genConsumeRegs(op1);
4351 getEmitter()->emitInsBinary(ins_Store(targetType), emitTypeSize(tree), tree, op1);
4353 genUpdateLife(tree);
4356 //------------------------------------------------------------------------
4357 // genCodeForStoreLclVar: Produce code for a GT_STORE_LCL_VAR node.
4360 // tree - the GT_STORE_LCL_VAR node
4362 void CodeGen::genCodeForStoreLclVar(GenTreeLclVar* tree)
4364 assert(tree->OperIs(GT_STORE_LCL_VAR));
4366 var_types targetType = tree->TypeGet();
4367 regNumber targetReg = tree->gtRegNum;
4368 emitter* emit = getEmitter();
4370 GenTreePtr op1 = tree->gtGetOp1();
4372 // var = call, where call returns a multi-reg return value
4373 // case is handled separately.
4374 if (op1->gtSkipReloadOrCopy()->IsMultiRegCall())
4376 genMultiRegCallStoreToLocal(tree);
4380 noway_assert(targetType != TYP_STRUCT);
4381 assert(!varTypeIsFloating(targetType) || (targetType == op1->TypeGet()));
4383 unsigned lclNum = tree->gtLclNum;
4384 LclVarDsc* varDsc = &(compiler->lvaTable[lclNum]);
4386 // Ensure that lclVar nodes are typed correctly.
4387 assert(!varDsc->lvNormalizeOnStore() || (targetType == genActualType(varDsc->TypeGet())));
4389 #if !defined(_TARGET_64BIT_)
4390 if (targetType == TYP_LONG)
4392 genStoreLongLclVar(tree);
4395 #endif // !defined(_TARGET_64BIT_)
4398 // storing of TYP_SIMD12 (i.e. Vector3) field
4399 if (targetType == TYP_SIMD12)
4401 genStoreLclTypeSIMD12(tree);
4405 if (varTypeIsSIMD(targetType) && (targetReg != REG_NA) && op1->IsCnsIntOrI())
4407 // This is only possible for a zero-init.
4408 noway_assert(op1->IsIntegralConst(0));
4409 genSIMDZero(targetType, varDsc->lvBaseType, targetReg);
4410 genProduceReg(tree);
4413 #endif // FEATURE_SIMD
4415 genConsumeRegs(op1);
4417 if (targetReg == REG_NA)
4420 emit->emitInsMov(ins_Store(targetType, compiler->isSIMDTypeLocalAligned(lclNum)), emitTypeSize(targetType),
4422 varDsc->lvRegNum = REG_STK;
4426 // Look for the case where we have a constant zero which we've marked for reuse,
4427 // but which isn't actually in the register we want. In that case, it's better to create
4428 // zero in the target register, because an xor is smaller than a copy. Note that we could
4429 // potentially handle this in the register allocator, but we can't always catch it there
4430 // because the target may not have a register allocated for it yet.
4431 if (op1->isUsedFromReg() && (op1->gtRegNum != targetReg) && (op1->IsIntegralConst(0) || op1->IsFPZero()))
4433 op1->gtRegNum = REG_NA;
4434 op1->ResetReuseRegVal();
4437 if (!op1->isUsedFromReg())
4439 // Currently, we assume that the non-reg source of a GT_STORE_LCL_VAR writing to a register
4440 // must be a constant. However, in the future we might want to support an operand used from
4441 // memory. This is a bit tricky because we have to decide it can be used from memory before
4442 // register allocation,
4443 // and this would be a case where, once that's done, we need to mark that node as always
4444 // requiring a register - which we always assume now anyway, but once we "optimize" that
4445 // we'll have to take cases like this into account.
4446 assert((op1->gtRegNum == REG_NA) && op1->OperIsConst());
4447 genSetRegToConst(targetReg, targetType, op1);
4449 else if (op1->gtRegNum != targetReg)
4451 assert(op1->gtRegNum != REG_NA);
4452 emit->emitInsBinary(ins_Move_Extend(targetType, true), emitTypeSize(tree), tree, op1);
4457 if (targetReg != REG_NA)
4459 genProduceReg(tree);
4463 //------------------------------------------------------------------------
4464 // genCodeForIndir: Produce code for a GT_IND node.
4467 // tree - the GT_IND node
4469 void CodeGen::genCodeForIndir(GenTreeIndir* tree)
4471 assert(tree->OperIs(GT_IND));
4474 // Handling of Vector3 type values loaded through indirection.
4475 if (tree->TypeGet() == TYP_SIMD12)
4477 genLoadIndTypeSIMD12(tree);
4480 #endif // FEATURE_SIMD
4482 var_types targetType = tree->TypeGet();
4483 emitter* emit = getEmitter();
4485 GenTree* addr = tree->Addr();
4486 if (addr->IsCnsIntOrI() && addr->IsIconHandle(GTF_ICON_TLS_HDL))
4488 noway_assert(EA_ATTR(genTypeSize(targetType)) == EA_PTRSIZE);
4489 emit->emitIns_R_C(ins_Load(TYP_I_IMPL), EA_PTRSIZE, tree->gtRegNum, FLD_GLOBAL_FS,
4490 (int)addr->gtIntCon.gtIconVal);
4494 genConsumeAddress(addr);
4495 emit->emitInsMov(ins_Load(targetType), emitTypeSize(tree), tree);
4498 genProduceReg(tree);
4501 void CodeGen::genRegCopy(GenTree* treeNode)
4503 assert(treeNode->OperGet() == GT_COPY);
4504 GenTree* op1 = treeNode->gtOp.gtOp1;
4506 if (op1->IsMultiRegCall())
4510 GenTreeCopyOrReload* copyTree = treeNode->AsCopyOrReload();
4511 GenTreeCall* call = op1->AsCall();
4512 ReturnTypeDesc* retTypeDesc = call->GetReturnTypeDesc();
4513 unsigned regCount = retTypeDesc->GetReturnRegCount();
4515 for (unsigned i = 0; i < regCount; ++i)
4517 var_types type = retTypeDesc->GetReturnRegType(i);
4518 regNumber fromReg = call->GetRegNumByIdx(i);
4519 regNumber toReg = copyTree->GetRegNumByIdx(i);
4521 // A Multi-reg GT_COPY node will have valid reg only for those
4522 // positions that corresponding result reg of call node needs
4524 if (toReg != REG_NA)
4526 assert(toReg != fromReg);
4527 inst_RV_RV(ins_Copy(type), toReg, fromReg, type);
4533 var_types targetType = treeNode->TypeGet();
4534 regNumber targetReg = treeNode->gtRegNum;
4535 assert(targetReg != REG_NA);
4537 // Check whether this node and the node from which we're copying the value have
4538 // different register types. This can happen if (currently iff) we have a SIMD
4539 // vector type that fits in an integer register, in which case it is passed as
4540 // an argument, or returned from a call, in an integer register and must be
4541 // copied if it's in an xmm register.
4543 bool srcFltReg = (varTypeIsFloating(op1) || varTypeIsSIMD(op1));
4544 bool tgtFltReg = (varTypeIsFloating(treeNode) || varTypeIsSIMD(treeNode));
4545 if (srcFltReg != tgtFltReg)
4552 ins = ins_CopyIntToFloat(op1->TypeGet(), treeNode->TypeGet());
4554 intReg = op1->gtRegNum;
4558 ins = ins_CopyFloatToInt(op1->TypeGet(), treeNode->TypeGet());
4560 fpReg = op1->gtRegNum;
4562 inst_RV_RV(ins, fpReg, intReg, targetType);
4566 inst_RV_RV(ins_Copy(targetType), targetReg, genConsumeReg(op1), targetType);
4571 // The lclVar will never be a def.
4572 // If it is a last use, the lclVar will be killed by genConsumeReg(), as usual, and genProduceReg will
4573 // appropriately set the gcInfo for the copied value.
4574 // If not, there are two cases we need to handle:
4575 // - If this is a TEMPORARY copy (indicated by the GTF_VAR_DEATH flag) the variable
4576 // will remain live in its original register.
4577 // genProduceReg() will appropriately set the gcInfo for the copied value,
4578 // and genConsumeReg will reset it.
4579 // - Otherwise, we need to update register info for the lclVar.
4581 GenTreeLclVarCommon* lcl = op1->AsLclVarCommon();
4582 assert((lcl->gtFlags & GTF_VAR_DEF) == 0);
4584 if ((lcl->gtFlags & GTF_VAR_DEATH) == 0 && (treeNode->gtFlags & GTF_VAR_DEATH) == 0)
4586 LclVarDsc* varDsc = &compiler->lvaTable[lcl->gtLclNum];
4588 // If we didn't just spill it (in genConsumeReg, above), then update the register info
4589 if (varDsc->lvRegNum != REG_STK)
4591 // The old location is dying
4592 genUpdateRegLife(varDsc, /*isBorn*/ false, /*isDying*/ true DEBUGARG(op1));
4594 gcInfo.gcMarkRegSetNpt(genRegMask(op1->gtRegNum));
4596 genUpdateVarReg(varDsc, treeNode);
4598 // The new location is going live
4599 genUpdateRegLife(varDsc, /*isBorn*/ true, /*isDying*/ false DEBUGARG(treeNode));
4605 genProduceReg(treeNode);
4608 //------------------------------------------------------------------------
4609 // genCodeForStoreInd: Produce code for a GT_STOREIND node.
4612 // tree - the GT_STOREIND node
4614 void CodeGen::genCodeForStoreInd(GenTreeStoreInd* tree)
4616 assert(tree->OperIs(GT_STOREIND));
4619 // Storing Vector3 of size 12 bytes through indirection
4620 if (tree->TypeGet() == TYP_SIMD12)
4622 genStoreIndTypeSIMD12(tree);
4625 #endif // FEATURE_SIMD
4627 GenTree* data = tree->Data();
4628 GenTree* addr = tree->Addr();
4629 var_types targetType = tree->TypeGet();
4631 assert(!varTypeIsFloating(targetType) || (targetType == data->TypeGet()));
4633 GCInfo::WriteBarrierForm writeBarrierForm = gcInfo.gcIsWriteBarrierCandidate(tree, data);
4634 if (writeBarrierForm != GCInfo::WBF_NoBarrier)
4636 // data and addr must be in registers.
4637 // Consume both registers so that any copies of interfering registers are taken care of.
4638 genConsumeOperands(tree);
4640 if (genEmitOptimizedGCWriteBarrier(writeBarrierForm, addr, data))
4645 // At this point, we should not have any interference.
4646 // That is, 'data' must not be in REG_ARG_0, as that is where 'addr' must go.
4647 noway_assert(data->gtRegNum != REG_ARG_0);
4649 // addr goes in REG_ARG_0
4650 genCopyRegIfNeeded(addr, REG_ARG_0);
4652 // data goes in REG_ARG_1
4653 genCopyRegIfNeeded(data, REG_ARG_1);
4655 genGCWriteBarrier(tree, writeBarrierForm);
4659 bool dataIsUnary = false;
4660 bool isRMWMemoryOp = tree->IsRMWMemoryOp();
4661 GenTree* rmwSrc = nullptr;
4663 // We must consume the operands in the proper execution order, so that liveness is
4664 // updated appropriately.
4665 genConsumeAddress(addr);
4667 // If tree represents a RMW memory op then its data is a non-leaf node marked as contained
4668 // and non-indir operand of data is the source of RMW memory op.
4671 assert(data->isContained() && !data->OperIsLeaf());
4673 GenTreePtr rmwDst = nullptr;
4675 dataIsUnary = (GenTree::OperIsUnary(data->OperGet()) != 0);
4678 if (tree->IsRMWDstOp1())
4680 rmwDst = data->gtGetOp1();
4681 rmwSrc = data->gtGetOp2();
4685 assert(tree->IsRMWDstOp2());
4686 rmwDst = data->gtGetOp2();
4687 rmwSrc = data->gtGetOp1();
4690 genConsumeRegs(rmwSrc);
4694 // *(p) = oper *(p): Here addr = p, rmwsrc=rmwDst = *(p) i.e. GT_IND(p)
4695 // For unary RMW ops, src and dst of RMW memory op is the same. Lower
4696 // clears operand counts on rmwSrc and we don't need to perform a
4697 // genConsumeReg() on it.
4698 assert(tree->IsRMWDstOp1());
4699 rmwSrc = data->gtGetOp1();
4700 rmwDst = data->gtGetOp1();
4701 assert(rmwSrc->isUsedFromMemory());
4704 assert(rmwSrc != nullptr);
4705 assert(rmwDst != nullptr);
4706 assert(Lowering::IndirsAreEquivalent(rmwDst, tree));
4710 genConsumeRegs(data);
4717 // generate code for unary RMW memory ops like neg/not
4718 getEmitter()->emitInsRMW(genGetInsForOper(data->OperGet(), data->TypeGet()), emitTypeSize(tree), tree);
4722 if (data->OperIsShiftOrRotate())
4724 // Generate code for shift RMW memory ops.
4725 // The data address needs to be op1 (it must be [addr] = [addr] <shift> <amount>, not [addr] =
4726 // <amount> <shift> [addr]).
4727 assert(tree->IsRMWDstOp1());
4728 assert(rmwSrc == data->gtGetOp2());
4729 genCodeForShiftRMW(tree);
4731 else if (data->OperGet() == GT_ADD && (rmwSrc->IsIntegralConst(1) || rmwSrc->IsIntegralConst(-1)))
4733 // Generate "inc/dec [mem]" instead of "add/sub [mem], 1".
4736 // 1) Global morph transforms GT_SUB(x, +/-1) into GT_ADD(x, -/+1).
4737 // 2) TODO-AMD64: Debugger routine NativeWalker::Decode() runs into
4738 // an assert while decoding ModR/M byte of "inc dword ptr [rax]".
4739 // It is not clear whether Decode() can handle all possible
4740 // addr modes with inc/dec. For this reason, inc/dec [mem]
4741 // is not generated while generating debuggable code. Update
4742 // the above if condition once Decode() routine is fixed.
4743 assert(rmwSrc->isContainedIntOrIImmed());
4744 instruction ins = rmwSrc->IsIntegralConst(1) ? INS_inc : INS_dec;
4745 getEmitter()->emitInsRMW(ins, emitTypeSize(tree), tree);
4749 // generate code for remaining binary RMW memory ops like add/sub/and/or/xor
4750 getEmitter()->emitInsRMW(genGetInsForOper(data->OperGet(), data->TypeGet()), emitTypeSize(tree),
4757 getEmitter()->emitInsMov(ins_Store(data->TypeGet()), emitTypeSize(tree), tree);
4762 //------------------------------------------------------------------------
4763 // genCodeForSwap: Produce code for a GT_SWAP node.
4766 // tree - the GT_SWAP node
4768 void CodeGen::genCodeForSwap(GenTreeOp* tree)
4770 assert(tree->OperIs(GT_SWAP));
4772 // Swap is only supported for lclVar operands that are enregistered
4773 // We do not consume or produce any registers. Both operands remain enregistered.
4774 // However, the gc-ness may change.
4775 assert(genIsRegCandidateLocal(tree->gtOp1) && genIsRegCandidateLocal(tree->gtOp2));
4777 GenTreeLclVarCommon* lcl1 = tree->gtOp1->AsLclVarCommon();
4778 LclVarDsc* varDsc1 = &(compiler->lvaTable[lcl1->gtLclNum]);
4779 var_types type1 = varDsc1->TypeGet();
4780 GenTreeLclVarCommon* lcl2 = tree->gtOp2->AsLclVarCommon();
4781 LclVarDsc* varDsc2 = &(compiler->lvaTable[lcl2->gtLclNum]);
4782 var_types type2 = varDsc2->TypeGet();
4784 // We must have both int or both fp regs
4785 assert(!varTypeIsFloating(type1) || varTypeIsFloating(type2));
4787 // FP swap is not yet implemented (and should have NYI'd in LSRA)
4788 assert(!varTypeIsFloating(type1));
4790 regNumber oldOp1Reg = lcl1->gtRegNum;
4791 regMaskTP oldOp1RegMask = genRegMask(oldOp1Reg);
4792 regNumber oldOp2Reg = lcl2->gtRegNum;
4793 regMaskTP oldOp2RegMask = genRegMask(oldOp2Reg);
4795 // We don't call genUpdateVarReg because we don't have a tree node with the new register.
4796 varDsc1->lvRegNum = oldOp2Reg;
4797 varDsc2->lvRegNum = oldOp1Reg;
4800 emitAttr size = EA_PTRSIZE;
4801 if (varTypeGCtype(type1) != varTypeGCtype(type2))
4803 // If the type specified to the emitter is a GC type, it will swap the GC-ness of the registers.
4804 // Otherwise it will leave them alone, which is correct if they have the same GC-ness.
4807 inst_RV_RV(INS_xchg, oldOp1Reg, oldOp2Reg, TYP_I_IMPL, size);
4809 // Update the gcInfo.
4810 // Manually remove these regs for the gc sets (mostly to avoid confusing duplicative dump output)
4811 gcInfo.gcRegByrefSetCur &= ~(oldOp1RegMask | oldOp2RegMask);
4812 gcInfo.gcRegGCrefSetCur &= ~(oldOp1RegMask | oldOp2RegMask);
4814 // gcMarkRegPtrVal will do the appropriate thing for non-gc types.
4815 // It will also dump the updates.
4816 gcInfo.gcMarkRegPtrVal(oldOp2Reg, type1);
4817 gcInfo.gcMarkRegPtrVal(oldOp1Reg, type2);
4820 //------------------------------------------------------------------------
4821 // genEmitOptimizedGCWriteBarrier: Generate write barrier store using the optimized
4822 // helper functions.
4825 // writeBarrierForm - the write barrier form to use
4826 // addr - the address at which to do the store
4827 // data - the data to store
4830 // true if an optimized write barrier form was used, false if not. If this
4831 // function returns false, the caller must emit a "standard" write barrier.
4833 bool CodeGen::genEmitOptimizedGCWriteBarrier(GCInfo::WriteBarrierForm writeBarrierForm, GenTree* addr, GenTree* data)
4835 assert(writeBarrierForm != GCInfo::WBF_NoBarrier);
4837 #if defined(_TARGET_X86_) && NOGC_WRITE_BARRIERS
4838 bool useOptimizedWriteBarriers = true;
4841 useOptimizedWriteBarriers =
4842 (writeBarrierForm != GCInfo::WBF_NoBarrier_CheckNotHeapInDebug); // This one is always a call to a C++ method.
4845 if (!useOptimizedWriteBarriers)
4850 const static int regToHelper[2][8] = {
4851 // If the target is known to be in managed memory
4853 CORINFO_HELP_ASSIGN_REF_EAX, CORINFO_HELP_ASSIGN_REF_ECX, -1, CORINFO_HELP_ASSIGN_REF_EBX, -1,
4854 CORINFO_HELP_ASSIGN_REF_EBP, CORINFO_HELP_ASSIGN_REF_ESI, CORINFO_HELP_ASSIGN_REF_EDI,
4857 // Don't know if the target is in managed memory
4859 CORINFO_HELP_CHECKED_ASSIGN_REF_EAX, CORINFO_HELP_CHECKED_ASSIGN_REF_ECX, -1,
4860 CORINFO_HELP_CHECKED_ASSIGN_REF_EBX, -1, CORINFO_HELP_CHECKED_ASSIGN_REF_EBP,
4861 CORINFO_HELP_CHECKED_ASSIGN_REF_ESI, CORINFO_HELP_CHECKED_ASSIGN_REF_EDI,
4865 noway_assert(regToHelper[0][REG_EAX] == CORINFO_HELP_ASSIGN_REF_EAX);
4866 noway_assert(regToHelper[0][REG_ECX] == CORINFO_HELP_ASSIGN_REF_ECX);
4867 noway_assert(regToHelper[0][REG_EBX] == CORINFO_HELP_ASSIGN_REF_EBX);
4868 noway_assert(regToHelper[0][REG_ESP] == -1);
4869 noway_assert(regToHelper[0][REG_EBP] == CORINFO_HELP_ASSIGN_REF_EBP);
4870 noway_assert(regToHelper[0][REG_ESI] == CORINFO_HELP_ASSIGN_REF_ESI);
4871 noway_assert(regToHelper[0][REG_EDI] == CORINFO_HELP_ASSIGN_REF_EDI);
4873 noway_assert(regToHelper[1][REG_EAX] == CORINFO_HELP_CHECKED_ASSIGN_REF_EAX);
4874 noway_assert(regToHelper[1][REG_ECX] == CORINFO_HELP_CHECKED_ASSIGN_REF_ECX);
4875 noway_assert(regToHelper[1][REG_EBX] == CORINFO_HELP_CHECKED_ASSIGN_REF_EBX);
4876 noway_assert(regToHelper[1][REG_ESP] == -1);
4877 noway_assert(regToHelper[1][REG_EBP] == CORINFO_HELP_CHECKED_ASSIGN_REF_EBP);
4878 noway_assert(regToHelper[1][REG_ESI] == CORINFO_HELP_CHECKED_ASSIGN_REF_ESI);
4879 noway_assert(regToHelper[1][REG_EDI] == CORINFO_HELP_CHECKED_ASSIGN_REF_EDI);
4881 regNumber reg = data->gtRegNum;
4882 noway_assert((reg != REG_ESP) && (reg != REG_WRITE_BARRIER));
4884 // Generate the following code:
4886 // call write_barrier_helper_reg
4888 // addr goes in REG_ARG_0
4889 genCopyRegIfNeeded(addr, REG_WRITE_BARRIER);
4891 unsigned tgtAnywhere = 0;
4892 if (writeBarrierForm != GCInfo::WBF_BarrierUnchecked)
4897 // We might want to call a modified version of genGCWriteBarrier() to get the benefit of
4898 // the FEATURE_COUNT_GC_WRITE_BARRIERS code there, but that code doesn't look like it works
4899 // with rationalized RyuJIT IR. So, for now, just emit the helper call directly here.
4901 genEmitHelperCall(regToHelper[tgtAnywhere][reg],
4903 EA_PTRSIZE); // retSize
4906 #else // !defined(_TARGET_X86_) || !NOGC_WRITE_BARRIERS
4908 #endif // !defined(_TARGET_X86_) || !NOGC_WRITE_BARRIERS
4911 // Produce code for a GT_CALL node
4912 void CodeGen::genCallInstruction(GenTreeCall* call)
4914 genAlignStackBeforeCall(call);
4916 gtCallTypes callType = (gtCallTypes)call->gtCallType;
4918 IL_OFFSETX ilOffset = BAD_IL_OFFSET;
4920 // all virtuals should have been expanded into a control expression
4921 assert(!call->IsVirtual() || call->gtControlExpr || call->gtCallAddr);
4923 // Insert a GS check if necessary
4924 if (call->IsTailCallViaHelper())
4926 if (compiler->getNeedsGSSecurityCookie())
4928 #if FEATURE_FIXED_OUT_ARGS
4929 // If either of the conditions below is true, we will need a temporary register in order to perform the GS
4930 // cookie check. When FEATURE_FIXED_OUT_ARGS is disabled, we save and restore the temporary register using
4931 // push/pop. When FEATURE_FIXED_OUT_ARGS is enabled, however, we need an alternative solution. For now,
4932 // though, the tail prefix is ignored on all platforms that use fixed out args, so we should never hit this
4934 assert(compiler->gsGlobalSecurityCookieAddr == nullptr);
4935 assert((int)compiler->gsGlobalSecurityCookieVal == (ssize_t)compiler->gsGlobalSecurityCookieVal);
4937 genEmitGSCookieCheck(true);
4941 // Consume all the arg regs
4942 for (GenTreePtr list = call->gtCallLateArgs; list; list = list->MoveNext())
4944 assert(list->OperIsList());
4946 GenTreePtr argNode = list->Current();
4948 fgArgTabEntryPtr curArgTabEntry = compiler->gtArgEntryByNode(call, argNode->gtSkipReloadOrCopy());
4949 assert(curArgTabEntry);
4951 if (curArgTabEntry->regNum == REG_STK)
4956 #ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
4957 // Deal with multi register passed struct args.
4958 if (argNode->OperGet() == GT_FIELD_LIST)
4960 GenTreeFieldList* fieldListPtr = argNode->AsFieldList();
4961 unsigned iterationNum = 0;
4962 for (; fieldListPtr != nullptr; fieldListPtr = fieldListPtr->Rest(), iterationNum++)
4964 GenTreePtr putArgRegNode = fieldListPtr->gtOp.gtOp1;
4965 assert(putArgRegNode->gtOper == GT_PUTARG_REG);
4966 regNumber argReg = REG_NA;
4968 if (iterationNum == 0)
4970 argReg = curArgTabEntry->regNum;
4974 assert(iterationNum == 1);
4975 argReg = curArgTabEntry->otherRegNum;
4978 genConsumeReg(putArgRegNode);
4980 // Validate the putArgRegNode has the right type.
4981 assert(putArgRegNode->TypeGet() ==
4982 compiler->GetTypeFromClassificationAndSizes(curArgTabEntry->structDesc
4983 .eightByteClassifications[iterationNum],
4984 curArgTabEntry->structDesc
4985 .eightByteSizes[iterationNum]));
4986 if (putArgRegNode->gtRegNum != argReg)
4988 inst_RV_RV(ins_Move_Extend(putArgRegNode->TypeGet(), putArgRegNode->InReg()), argReg,
4989 putArgRegNode->gtRegNum);
4994 #endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
4996 regNumber argReg = curArgTabEntry->regNum;
4997 genConsumeReg(argNode);
4998 if (argNode->gtRegNum != argReg)
5000 inst_RV_RV(ins_Move_Extend(argNode->TypeGet(), argNode->InReg()), argReg, argNode->gtRegNum);
5005 // In the case of a varargs call,
5006 // the ABI dictates that if we have floating point args,
5007 // we must pass the enregistered arguments in both the
5008 // integer and floating point registers so, let's do that.
5009 if (call->IsVarargs() && varTypeIsFloating(argNode))
5011 regNumber targetReg = compiler->getCallArgIntRegister(argNode->gtRegNum);
5012 instruction ins = ins_CopyFloatToInt(argNode->TypeGet(), TYP_LONG);
5013 inst_RV_RV(ins, argNode->gtRegNum, targetReg);
5015 #endif // FEATURE_VARARG
5018 #if defined(_TARGET_X86_) || defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
5019 // The call will pop its arguments.
5020 // for each putarg_stk:
5021 ssize_t stackArgBytes = 0;
5022 GenTreePtr args = call->gtCallArgs;
5025 GenTreePtr arg = args->gtOp.gtOp1;
5026 if (arg->OperGet() != GT_ARGPLACE && !(arg->gtFlags & GTF_LATE_ARG))
5028 #if defined(_TARGET_X86_)
5029 if ((arg->OperGet() == GT_PUTARG_STK) && (arg->gtGetOp1()->OperGet() == GT_FIELD_LIST))
5031 fgArgTabEntryPtr curArgTabEntry = compiler->gtArgEntryByNode(call, arg);
5032 assert(curArgTabEntry);
5033 stackArgBytes += curArgTabEntry->numSlots * TARGET_POINTER_SIZE;
5036 #endif // defined(_TARGET_X86_)
5038 #ifdef FEATURE_PUT_STRUCT_ARG_STK
5039 if (genActualType(arg->TypeGet()) == TYP_STRUCT)
5041 assert(arg->OperGet() == GT_PUTARG_STK);
5043 GenTreeObj* obj = arg->gtGetOp1()->AsObj();
5044 unsigned argBytes = (unsigned)roundUp(obj->gtBlkSize, TARGET_POINTER_SIZE);
5046 fgArgTabEntryPtr curArgTabEntry = compiler->gtArgEntryByNode(call, arg);
5047 assert((curArgTabEntry->numSlots * TARGET_POINTER_SIZE) == argBytes);
5049 stackArgBytes += argBytes;
5052 #endif // FEATURE_PUT_STRUCT_ARG_STK
5055 stackArgBytes += genTypeSize(genActualType(arg->TypeGet()));
5058 args = args->gtOp.gtOp2;
5060 #endif // defined(_TARGET_X86_) || defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
5062 // Insert a null check on "this" pointer if asked.
5063 if (call->NeedsNullCheck())
5065 const regNumber regThis = genGetThisArgReg(call);
5066 getEmitter()->emitIns_AR_R(INS_cmp, EA_4BYTE, regThis, regThis, 0);
5069 // Either gtControlExpr != null or gtCallAddr != null or it is a direct non-virtual call to a user or helper method.
5070 CORINFO_METHOD_HANDLE methHnd;
5071 GenTree* target = call->gtControlExpr;
5072 if (callType == CT_INDIRECT)
5074 assert(target == nullptr);
5075 target = call->gtCallAddr;
5080 methHnd = call->gtCallMethHnd;
5083 CORINFO_SIG_INFO* sigInfo = nullptr;
5085 // Pass the call signature information down into the emitter so the emitter can associate
5086 // native call sites with the signatures they were generated from.
5087 if (callType != CT_HELPER)
5089 sigInfo = call->callSig;
5093 // If fast tail call, then we are done. In this case we setup the args (both reg args
5094 // and stack args in incoming arg area) and call target in rax. Epilog sequence would
5095 // generate "jmp rax".
5096 if (call->IsFastTailCall())
5098 // Don't support fast tail calling JIT helpers
5099 assert(callType != CT_HELPER);
5101 // Fast tail calls materialize call target either in gtControlExpr or in gtCallAddr.
5102 assert(target != nullptr);
5104 genConsumeReg(target);
5105 genCopyRegIfNeeded(target, REG_RAX);
5109 // For a pinvoke to unmanged code we emit a label to clear
5110 // the GC pointer state before the callsite.
5111 // We can't utilize the typical lazy killing of GC pointers
5112 // at (or inside) the callsite.
5113 if (call->IsUnmanaged())
5115 genDefineTempLabel(genCreateTempLabel());
5118 // Determine return value size(s).
5119 ReturnTypeDesc* retTypeDesc = call->GetReturnTypeDesc();
5120 emitAttr retSize = EA_PTRSIZE;
5121 emitAttr secondRetSize = EA_UNKNOWN;
5123 if (call->HasMultiRegRetVal())
5125 retSize = emitTypeSize(retTypeDesc->GetReturnRegType(0));
5126 secondRetSize = emitTypeSize(retTypeDesc->GetReturnRegType(1));
5130 assert(!varTypeIsStruct(call));
5132 if (call->gtType == TYP_REF || call->gtType == TYP_ARRAY)
5136 else if (call->gtType == TYP_BYREF)
5142 bool fPossibleSyncHelperCall = false;
5143 CorInfoHelpFunc helperNum = CORINFO_HELP_UNDEF;
5145 // We need to propagate the IL offset information to the call instruction, so we can emit
5146 // an IL to native mapping record for the call, to support managed return value debugging.
5147 // We don't want tail call helper calls that were converted from normal calls to get a record,
5148 // so we skip this hash table lookup logic in that case.
5149 if (compiler->opts.compDbgInfo && compiler->genCallSite2ILOffsetMap != nullptr && !call->IsTailCall())
5151 (void)compiler->genCallSite2ILOffsetMap->Lookup(call, &ilOffset);
5154 #if defined(_TARGET_X86_)
5155 bool fCallerPop = call->CallerPop();
5158 if (!call->IsUnmanaged())
5160 CorInfoCallConv callConv = CORINFO_CALLCONV_DEFAULT;
5162 if ((callType != CT_HELPER) && call->callSig)
5164 callConv = call->callSig->callConv;
5167 fCallerPop |= IsCallerPop(callConv);
5169 #endif // UNIX_X86_ABI
5171 // If the callee pops the arguments, we pass a positive value as the argSize, and the emitter will
5172 // adjust its stack level accordingly.
5173 // If the caller needs to explicitly pop its arguments, we must pass a negative value, and then do the
5174 // pop when we're done.
5175 ssize_t argSizeForEmitter = stackArgBytes;
5178 argSizeForEmitter = -stackArgBytes;
5180 #endif // defined(_TARGET_X86_)
5182 #ifdef FEATURE_AVX_SUPPORT
5183 // When it's a PInvoke call and the call type is USER function, we issue VZEROUPPER here
5184 // if the function contains 256bit AVX instructions, this is to avoid AVX-256 to Legacy SSE
5185 // transition penalty, assuming the user function contains legacy SSE instruction.
5186 // To limit code size increase impact: we only issue VZEROUPPER before PInvoke call, not issue
5187 // VZEROUPPER after PInvoke call because transition penalty from legacy SSE to AVX only happens
5188 // when there's preceding 256-bit AVX to legacy SSE transition penalty.
5189 if (call->IsPInvoke() && (call->gtCallType == CT_USER_FUNC) && getEmitter()->Contains256bitAVX())
5191 assert(compiler->getSIMDInstructionSet() == InstructionSet_AVX);
5192 instGen(INS_vzeroupper);
5196 if (target != nullptr)
5199 if (call->IsVirtualStub() && (call->gtCallType == CT_INDIRECT))
5201 // On x86, we need to generate a very specific pattern for indirect VSD calls:
5204 // call dword ptr [eax]
5206 // Where EAX is also used as an argument to the stub dispatch helper. Make
5207 // sure that the call target address is computed into EAX in this case.
5209 assert(REG_VIRTUAL_STUB_PARAM == REG_VIRTUAL_STUB_TARGET);
5211 assert(target->isContainedIndir());
5212 assert(target->OperGet() == GT_IND);
5214 GenTree* addr = target->AsIndir()->Addr();
5215 assert(addr->isUsedFromReg());
5217 genConsumeReg(addr);
5218 genCopyRegIfNeeded(addr, REG_VIRTUAL_STUB_TARGET);
5220 getEmitter()->emitIns_Nop(3);
5223 getEmitter()->emitIns_Call(emitter::EmitCallType(emitter::EC_INDIR_ARD),
5225 INDEBUG_LDISASM_COMMA(sigInfo)
5229 MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(secondRetSize),
5230 gcInfo.gcVarPtrSetCur,
5231 gcInfo.gcRegGCrefSetCur,
5232 gcInfo.gcRegByrefSetCur,
5233 ilOffset, REG_VIRTUAL_STUB_TARGET, REG_NA, 1, 0);
5238 if (target->isContainedIndir())
5240 if (target->AsIndir()->HasBase() && target->AsIndir()->Base()->isContainedIntOrIImmed())
5242 // Note that if gtControlExpr is an indir of an absolute address, we mark it as
5243 // contained only if it can be encoded as PC-relative offset.
5244 assert(target->AsIndir()->Base()->AsIntConCommon()->FitsInAddrBase(compiler));
5247 genEmitCall(emitter::EC_FUNC_TOKEN_INDIR,
5249 INDEBUG_LDISASM_COMMA(sigInfo)
5250 (void*) target->AsIndir()->Base()->AsIntConCommon()->IconValue()
5251 X86_ARG(argSizeForEmitter),
5253 MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(secondRetSize),
5260 genEmitCall(emitter::EC_INDIR_ARD,
5262 INDEBUG_LDISASM_COMMA(sigInfo)
5264 X86_ARG(argSizeForEmitter),
5266 MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(secondRetSize),
5273 // We have already generated code for gtControlExpr evaluating it into a register.
5274 // We just need to emit "call reg" in this case.
5275 assert(genIsValidIntReg(target->gtRegNum));
5278 genEmitCall(emitter::EC_INDIR_R,
5280 INDEBUG_LDISASM_COMMA(sigInfo)
5282 X86_ARG(argSizeForEmitter),
5284 MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(secondRetSize),
5286 genConsumeReg(target));
5290 #ifdef FEATURE_READYTORUN_COMPILER
5291 else if (call->gtEntryPoint.addr != nullptr)
5294 genEmitCall((call->gtEntryPoint.accessType == IAT_VALUE) ? emitter::EC_FUNC_TOKEN
5295 : emitter::EC_FUNC_TOKEN_INDIR,
5297 INDEBUG_LDISASM_COMMA(sigInfo)
5298 (void*) call->gtEntryPoint.addr
5299 X86_ARG(argSizeForEmitter),
5301 MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(secondRetSize),
5308 // Generate a direct call to a non-virtual user defined or helper method
5309 assert(callType == CT_HELPER || callType == CT_USER_FUNC);
5311 void* addr = nullptr;
5312 if (callType == CT_HELPER)
5314 // Direct call to a helper method.
5315 helperNum = compiler->eeGetHelperNum(methHnd);
5316 noway_assert(helperNum != CORINFO_HELP_UNDEF);
5318 void* pAddr = nullptr;
5319 addr = compiler->compGetHelperFtn(helperNum, (void**)&pAddr);
5321 if (addr == nullptr)
5326 // tracking of region protected by the monitor in synchronized methods
5327 if (compiler->info.compFlags & CORINFO_FLG_SYNCH)
5329 fPossibleSyncHelperCall = true;
5334 // Direct call to a non-virtual user function.
5335 addr = call->gtDirectCallAddress;
5338 // Non-virtual direct calls to known addresses
5341 genEmitCall(emitter::EC_FUNC_TOKEN,
5343 INDEBUG_LDISASM_COMMA(sigInfo)
5345 X86_ARG(argSizeForEmitter),
5347 MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(secondRetSize),
5352 // if it was a pinvoke we may have needed to get the address of a label
5353 if (genPendingCallLabel)
5355 assert(call->IsUnmanaged());
5356 genDefineTempLabel(genPendingCallLabel);
5357 genPendingCallLabel = nullptr;
5361 // All Callee arg registers are trashed and no longer contain any GC pointers.
5362 // TODO-XArch-Bug?: As a matter of fact shouldn't we be killing all of callee trashed regs here?
5363 // For now we will assert that other than arg regs gc ref/byref set doesn't contain any other
5364 // registers from RBM_CALLEE_TRASH.
5365 assert((gcInfo.gcRegGCrefSetCur & (RBM_CALLEE_TRASH & ~RBM_ARG_REGS)) == 0);
5366 assert((gcInfo.gcRegByrefSetCur & (RBM_CALLEE_TRASH & ~RBM_ARG_REGS)) == 0);
5367 gcInfo.gcRegGCrefSetCur &= ~RBM_ARG_REGS;
5368 gcInfo.gcRegByrefSetCur &= ~RBM_ARG_REGS;
5370 var_types returnType = call->TypeGet();
5371 if (returnType != TYP_VOID)
5374 if (varTypeIsFloating(returnType))
5376 // Spill the value from the fp stack.
5377 // Then, load it into the target register.
5378 call->gtFlags |= GTF_SPILL;
5379 regSet.rsSpillFPStack(call);
5380 call->gtFlags |= GTF_SPILLED;
5381 call->gtFlags &= ~GTF_SPILL;
5384 #endif // _TARGET_X86_
5386 regNumber returnReg;
5388 if (call->HasMultiRegRetVal())
5390 assert(retTypeDesc != nullptr);
5391 unsigned regCount = retTypeDesc->GetReturnRegCount();
5393 // If regs allocated to call node are different from ABI return
5394 // regs in which the call has returned its result, move the result
5395 // to regs allocated to call node.
5396 for (unsigned i = 0; i < regCount; ++i)
5398 var_types regType = retTypeDesc->GetReturnRegType(i);
5399 returnReg = retTypeDesc->GetABIReturnReg(i);
5400 regNumber allocatedReg = call->GetRegNumByIdx(i);
5401 if (returnReg != allocatedReg)
5403 inst_RV_RV(ins_Copy(regType), allocatedReg, returnReg, regType);
5408 // A Vector3 return value is stored in xmm0 and xmm1.
5409 // RyuJIT assumes that the upper unused bits of xmm1 are cleared but
5410 // the native compiler doesn't guarantee it.
5411 if (returnType == TYP_SIMD12)
5413 returnReg = retTypeDesc->GetABIReturnReg(1);
5414 // Clear the upper 32 bits by two shift instructions.
5415 // retReg = retReg << 96
5416 // retReg = retReg >> 96
5417 getEmitter()->emitIns_R_I(INS_pslldq, emitActualTypeSize(TYP_SIMD12), returnReg, 12);
5418 getEmitter()->emitIns_R_I(INS_psrldq, emitActualTypeSize(TYP_SIMD12), returnReg, 12);
5420 #endif // FEATURE_SIMD
5425 if (call->IsHelperCall(compiler, CORINFO_HELP_INIT_PINVOKE_FRAME))
5427 // The x86 CORINFO_HELP_INIT_PINVOKE_FRAME helper uses a custom calling convention that returns with
5428 // TCB in REG_PINVOKE_TCB. AMD64/ARM64 use the standard calling convention. fgMorphCall() sets the
5429 // correct argument registers.
5430 returnReg = REG_PINVOKE_TCB;
5433 #endif // _TARGET_X86_
5434 if (varTypeIsFloating(returnType))
5436 returnReg = REG_FLOATRET;
5440 returnReg = REG_INTRET;
5443 if (call->gtRegNum != returnReg)
5445 inst_RV_RV(ins_Copy(returnType), call->gtRegNum, returnReg, returnType);
5449 genProduceReg(call);
5453 // If there is nothing next, that means the result is thrown away, so this value is not live.
5454 // However, for minopts or debuggable code, we keep it live to support managed return value debugging.
5455 if ((call->gtNext == nullptr) && !compiler->opts.MinOpts() && !compiler->opts.compDbgCode)
5457 gcInfo.gcMarkRegSetNpt(RBM_INTRET);
5460 #if !FEATURE_EH_FUNCLETS
5461 //-------------------------------------------------------------------------
5462 // Create a label for tracking of region protected by the monitor in synchronized methods.
5463 // This needs to be here, rather than above where fPossibleSyncHelperCall is set,
5464 // so the GC state vars have been updated before creating the label.
5466 if (fPossibleSyncHelperCall)
5470 case CORINFO_HELP_MON_ENTER:
5471 case CORINFO_HELP_MON_ENTER_STATIC:
5472 noway_assert(compiler->syncStartEmitCookie == NULL);
5473 compiler->syncStartEmitCookie =
5474 getEmitter()->emitAddLabel(gcInfo.gcVarPtrSetCur, gcInfo.gcRegGCrefSetCur, gcInfo.gcRegByrefSetCur);
5475 noway_assert(compiler->syncStartEmitCookie != NULL);
5477 case CORINFO_HELP_MON_EXIT:
5478 case CORINFO_HELP_MON_EXIT_STATIC:
5479 noway_assert(compiler->syncEndEmitCookie == NULL);
5480 compiler->syncEndEmitCookie =
5481 getEmitter()->emitAddLabel(gcInfo.gcVarPtrSetCur, gcInfo.gcRegGCrefSetCur, gcInfo.gcRegByrefSetCur);
5482 noway_assert(compiler->syncEndEmitCookie != NULL);
5488 #endif // !FEATURE_EH_FUNCLETS
5490 unsigned stackAdjustBias = 0;
5492 #if defined(_TARGET_X86_)
5493 // Is the caller supposed to pop the arguments?
5494 if (fCallerPop && (stackArgBytes != 0))
5496 stackAdjustBias = stackArgBytes;
5499 SubtractStackLevel(stackArgBytes);
5500 #endif // _TARGET_X86_
5502 genRemoveAlignmentAfterCall(call, stackAdjustBias);
5505 // Produce code for a GT_JMP node.
5506 // The arguments of the caller needs to be transferred to the callee before exiting caller.
5507 // The actual jump to callee is generated as part of caller epilog sequence.
5508 // Therefore the codegen of GT_JMP is to ensure that the callee arguments are correctly setup.
5509 void CodeGen::genJmpMethod(GenTreePtr jmp)
5511 assert(jmp->OperGet() == GT_JMP);
5512 assert(compiler->compJmpOpUsed);
5514 // If no arguments, nothing to do
5515 if (compiler->info.compArgsCount == 0)
5520 // Make sure register arguments are in their initial registers
5521 // and stack arguments are put back as well.
5525 // First move any en-registered stack arguments back to the stack.
5526 // At the same time any reg arg not in correct reg is moved back to its stack location.
5528 // We are not strictly required to spill reg args that are not in the desired reg for a jmp call
5529 // But that would require us to deal with circularity while moving values around. Spilling
5530 // to stack makes the implementation simple, which is not a bad trade off given Jmp calls
5531 // are not frequent.
5532 for (varNum = 0; (varNum < compiler->info.compArgsCount); varNum++)
5534 varDsc = compiler->lvaTable + varNum;
5536 if (varDsc->lvPromoted)
5538 noway_assert(varDsc->lvFieldCnt == 1); // We only handle one field here
5540 unsigned fieldVarNum = varDsc->lvFieldLclStart;
5541 varDsc = compiler->lvaTable + fieldVarNum;
5543 noway_assert(varDsc->lvIsParam);
5545 if (varDsc->lvIsRegArg && (varDsc->lvRegNum != REG_STK))
5547 // Skip reg args which are already in its right register for jmp call.
5548 // If not, we will spill such args to their stack locations.
5550 // If we need to generate a tail call profiler hook, then spill all
5551 // arg regs to free them up for the callback.
5552 if (!compiler->compIsProfilerHookNeeded() && (varDsc->lvRegNum == varDsc->lvArgReg))
5557 else if (varDsc->lvRegNum == REG_STK)
5559 // Skip args which are currently living in stack.
5563 // If we came here it means either a reg argument not in the right register or
5564 // a stack argument currently living in a register. In either case the following
5565 // assert should hold.
5566 assert(varDsc->lvRegNum != REG_STK);
5568 assert(!varDsc->lvIsStructField || (compiler->lvaTable[varDsc->lvParentLcl].lvFieldCnt == 1));
5569 var_types storeType = genActualType(varDsc->lvaArgType()); // We own the memory and can use the full move.
5570 getEmitter()->emitIns_S_R(ins_Store(storeType), emitTypeSize(storeType), varDsc->lvRegNum, varNum, 0);
5572 // Update lvRegNum life and GC info to indicate lvRegNum is dead and varDsc stack slot is going live.
5573 // Note that we cannot modify varDsc->lvRegNum here because another basic block may not be expecting it.
5574 // Therefore manually update life of varDsc->lvRegNum.
5575 regMaskTP tempMask = varDsc->lvRegMask();
5576 regSet.RemoveMaskVars(tempMask);
5577 gcInfo.gcMarkRegSetNpt(tempMask);
5578 if (compiler->lvaIsGCTracked(varDsc))
5581 if (!VarSetOps::IsMember(compiler, gcInfo.gcVarPtrSetCur, varDsc->lvVarIndex))
5583 JITDUMP("\t\t\t\t\t\t\tVar V%02u becoming live\n", varNum);
5587 JITDUMP("\t\t\t\t\t\t\tVar V%02u continuing live\n", varNum);
5591 VarSetOps::AddElemD(compiler, gcInfo.gcVarPtrSetCur, varDsc->lvVarIndex);
5595 #ifdef PROFILING_SUPPORTED
5596 // At this point all arg regs are free.
5597 // Emit tail call profiler callback.
5598 genProfilingLeaveCallback(CORINFO_HELP_PROF_FCN_TAILCALL);
5601 // Next move any un-enregistered register arguments back to their register.
5602 regMaskTP fixedIntArgMask = RBM_NONE; // tracks the int arg regs occupying fixed args in case of a vararg method.
5603 unsigned firstArgVarNum = BAD_VAR_NUM; // varNum of the first argument in case of a vararg method.
5604 for (varNum = 0; (varNum < compiler->info.compArgsCount); varNum++)
5606 varDsc = compiler->lvaTable + varNum;
5607 if (varDsc->lvPromoted)
5609 noway_assert(varDsc->lvFieldCnt == 1); // We only handle one field here
5611 unsigned fieldVarNum = varDsc->lvFieldLclStart;
5612 varDsc = compiler->lvaTable + fieldVarNum;
5614 noway_assert(varDsc->lvIsParam);
5616 // Skip if arg not passed in a register.
5617 if (!varDsc->lvIsRegArg)
5622 #if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
5623 if (varTypeIsStruct(varDsc))
5625 CORINFO_CLASS_HANDLE typeHnd = varDsc->lvVerTypeInfo.GetClassHandle();
5626 assert(typeHnd != nullptr);
5628 SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR structDesc;
5629 compiler->eeGetSystemVAmd64PassStructInRegisterDescriptor(typeHnd, &structDesc);
5630 assert(structDesc.passedInRegisters);
5632 unsigned __int8 offset0 = 0;
5633 unsigned __int8 offset1 = 0;
5634 var_types type0 = TYP_UNKNOWN;
5635 var_types type1 = TYP_UNKNOWN;
5637 // Get the eightbyte data
5638 compiler->GetStructTypeOffset(structDesc, &type0, &type1, &offset0, &offset1);
5640 // Move the values into the right registers.
5643 // Update varDsc->lvArgReg and lvOtherArgReg life and GC Info to indicate varDsc stack slot is dead and
5644 // argReg is going live. Note that we cannot modify varDsc->lvRegNum and lvOtherArgReg here because another
5645 // basic block may not be expecting it. Therefore manually update life of argReg. Note that GT_JMP marks
5646 // the end of the basic block and after which reg life and gc info will be recomputed for the new block in
5647 // genCodeForBBList().
5648 if (type0 != TYP_UNKNOWN)
5650 getEmitter()->emitIns_R_S(ins_Load(type0), emitTypeSize(type0), varDsc->lvArgReg, varNum, offset0);
5651 regSet.rsMaskVars |= genRegMask(varDsc->lvArgReg);
5652 gcInfo.gcMarkRegPtrVal(varDsc->lvArgReg, type0);
5655 if (type1 != TYP_UNKNOWN)
5657 getEmitter()->emitIns_R_S(ins_Load(type1), emitTypeSize(type1), varDsc->lvOtherArgReg, varNum, offset1);
5658 regSet.rsMaskVars |= genRegMask(varDsc->lvOtherArgReg);
5659 gcInfo.gcMarkRegPtrVal(varDsc->lvOtherArgReg, type1);
5662 if (varDsc->lvTracked)
5664 VarSetOps::RemoveElemD(compiler, gcInfo.gcVarPtrSetCur, varDsc->lvVarIndex);
5668 #endif // !defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
5670 // Register argument
5671 noway_assert(isRegParamType(genActualType(varDsc->TypeGet())));
5673 // Is register argument already in the right register?
5674 // If not load it from its stack location.
5675 var_types loadType = varDsc->lvaArgType();
5676 regNumber argReg = varDsc->lvArgReg; // incoming arg register
5678 if (varDsc->lvRegNum != argReg)
5680 assert(genIsValidReg(argReg));
5681 getEmitter()->emitIns_R_S(ins_Load(loadType), emitTypeSize(loadType), argReg, varNum, 0);
5683 // Update argReg life and GC Info to indicate varDsc stack slot is dead and argReg is going live.
5684 // Note that we cannot modify varDsc->lvRegNum here because another basic block may not be expecting it.
5685 // Therefore manually update life of argReg. Note that GT_JMP marks the end of the basic block
5686 // and after which reg life and gc info will be recomputed for the new block in genCodeForBBList().
5687 regSet.AddMaskVars(genRegMask(argReg));
5688 gcInfo.gcMarkRegPtrVal(argReg, loadType);
5689 if (compiler->lvaIsGCTracked(varDsc))
5692 if (VarSetOps::IsMember(compiler, gcInfo.gcVarPtrSetCur, varDsc->lvVarIndex))
5694 JITDUMP("\t\t\t\t\t\t\tVar V%02u becoming dead\n", varNum);
5698 JITDUMP("\t\t\t\t\t\t\tVar V%02u continuing dead\n", varNum);
5702 VarSetOps::RemoveElemD(compiler, gcInfo.gcVarPtrSetCur, varDsc->lvVarIndex);
5707 #if FEATURE_VARARG && defined(_TARGET_AMD64_)
5708 // In case of a jmp call to a vararg method also pass the float/double arg in the corresponding int arg
5709 // register. This is due to the AMD64 ABI which requires floating point values passed to varargs functions to
5710 // be passed in both integer and floating point registers. It doesn't apply to x86, which passes floating point
5711 // values on the stack.
5712 if (compiler->info.compIsVarArgs)
5714 regNumber intArgReg;
5715 var_types loadType = varDsc->lvaArgType();
5716 regNumber argReg = varDsc->lvArgReg; // incoming arg register
5718 if (varTypeIsFloating(loadType))
5720 intArgReg = compiler->getCallArgIntRegister(argReg);
5721 instruction ins = ins_CopyFloatToInt(loadType, TYP_LONG);
5722 inst_RV_RV(ins, argReg, intArgReg, loadType);
5729 fixedIntArgMask |= genRegMask(intArgReg);
5731 if (intArgReg == REG_ARG_0)
5733 assert(firstArgVarNum == BAD_VAR_NUM);
5734 firstArgVarNum = varNum;
5737 #endif // FEATURE_VARARG
5740 #if FEATURE_VARARG && defined(_TARGET_AMD64_)
5741 // Jmp call to a vararg method - if the method has fewer than 4 fixed arguments,
5742 // load the remaining arg registers (both int and float) from the corresponding
5743 // shadow stack slots. This is for the reason that we don't know the number and type
5744 // of non-fixed params passed by the caller, therefore we have to assume the worst case
5745 // of caller passing float/double args both in int and float arg regs.
5747 // This doesn't apply to x86, which doesn't pass floating point values in floating
5750 // The caller could have passed gc-ref/byref type var args. Since these are var args
5751 // the callee no way of knowing their gc-ness. Therefore, mark the region that loads
5752 // remaining arg registers from shadow stack slots as non-gc interruptible.
5753 if (fixedIntArgMask != RBM_NONE)
5755 assert(compiler->info.compIsVarArgs);
5756 assert(firstArgVarNum != BAD_VAR_NUM);
5758 regMaskTP remainingIntArgMask = RBM_ARG_REGS & ~fixedIntArgMask;
5759 if (remainingIntArgMask != RBM_NONE)
5761 instruction insCopyIntToFloat = ins_CopyIntToFloat(TYP_LONG, TYP_DOUBLE);
5762 getEmitter()->emitDisableGC();
5763 for (int argNum = 0, argOffset = 0; argNum < MAX_REG_ARG; ++argNum)
5765 regNumber argReg = intArgRegs[argNum];
5766 regMaskTP argRegMask = genRegMask(argReg);
5768 if ((remainingIntArgMask & argRegMask) != 0)
5770 remainingIntArgMask &= ~argRegMask;
5771 getEmitter()->emitIns_R_S(INS_mov, EA_8BYTE, argReg, firstArgVarNum, argOffset);
5773 // also load it in corresponding float arg reg
5774 regNumber floatReg = compiler->getCallArgFloatRegister(argReg);
5775 inst_RV_RV(insCopyIntToFloat, floatReg, argReg);
5778 argOffset += REGSIZE_BYTES;
5780 getEmitter()->emitEnableGC();
5783 #endif // FEATURE_VARARG
5786 // produce code for a GT_LEA subnode
5787 void CodeGen::genLeaInstruction(GenTreeAddrMode* lea)
5789 emitAttr size = emitTypeSize(lea);
5790 genConsumeOperands(lea);
5792 if (lea->Base() && lea->Index())
5794 regNumber baseReg = lea->Base()->gtRegNum;
5795 regNumber indexReg = lea->Index()->gtRegNum;
5796 getEmitter()->emitIns_R_ARX(INS_lea, size, lea->gtRegNum, baseReg, indexReg, lea->gtScale, lea->gtOffset);
5798 else if (lea->Base())
5800 getEmitter()->emitIns_R_AR(INS_lea, size, lea->gtRegNum, lea->Base()->gtRegNum, lea->gtOffset);
5802 else if (lea->Index())
5804 getEmitter()->emitIns_R_ARX(INS_lea, size, lea->gtRegNum, REG_NA, lea->Index()->gtRegNum, lea->gtScale,
5811 //-------------------------------------------------------------------------------------------
5812 // genJumpKindsForTree: Determine the number and kinds of conditional branches
5813 // necessary to implement the given GT_CMP node
5816 // cmpTree - (input) The GenTree node that is used to set the Condition codes
5817 // - The GenTree Relop node that was used to set the Condition codes
5818 // jmpKind[2] - (output) One or two conditional branch instructions
5819 // jmpToTrueLabel[2] - (output) When true we branch to the true case
5820 // When false we create a second label and branch to the false case
5821 // Only GT_EQ for a floating point compares can have a false value.
5824 // Sets the proper values into the array elements of jmpKind[] and jmpToTrueLabel[]
5827 // At least one conditional branch instruction will be returned.
5828 // Typically only one conditional branch is needed
5829 // and the second jmpKind[] value is set to EJ_NONE
5832 // jmpToTrueLabel[i]= true implies branch when the compare operation is true.
5833 // jmpToTrueLabel[i]= false implies branch when the compare operation is false.
5834 //-------------------------------------------------------------------------------------------
5837 void CodeGen::genJumpKindsForTree(GenTreePtr cmpTree, emitJumpKind jmpKind[2], bool jmpToTrueLabel[2])
5839 // Except for BEQ (= ordered GT_EQ) both jumps are to the true label.
5840 jmpToTrueLabel[0] = true;
5841 jmpToTrueLabel[1] = true;
5843 // For integer comparisons just use genJumpKindForOper
5844 if (!varTypeIsFloating(cmpTree->gtOp.gtOp1->gtEffectiveVal()))
5846 CompareKind compareKind = ((cmpTree->gtFlags & GTF_UNSIGNED) != 0) ? CK_UNSIGNED : CK_SIGNED;
5847 jmpKind[0] = genJumpKindForOper(cmpTree->gtOper, compareKind);
5848 jmpKind[1] = EJ_NONE;
5852 assert(cmpTree->OperIsCompare());
5854 // For details on how we arrived at this mapping, see the comment block in genCodeForTreeNode()
5855 // while generating code for compare opererators (e.g. GT_EQ etc).
5856 if ((cmpTree->gtFlags & GTF_RELOP_NAN_UN) != 0)
5858 // Must branch if we have an NaN, unordered
5859 switch (cmpTree->gtOper)
5864 jmpKind[1] = EJ_NONE;
5869 jmpKind[0] = EJ_jbe;
5870 jmpKind[1] = EJ_NONE;
5874 jmpKind[0] = EJ_jpe;
5875 jmpKind[1] = EJ_jne;
5880 jmpKind[1] = EJ_NONE;
5887 else // ((cmpTree->gtFlags & GTF_RELOP_NAN_UN) == 0)
5889 // Do not branch if we have an NaN, unordered
5890 switch (cmpTree->gtOper)
5895 jmpKind[1] = EJ_NONE;
5900 jmpKind[0] = EJ_jae;
5901 jmpKind[1] = EJ_NONE;
5905 jmpKind[0] = EJ_jne;
5906 jmpKind[1] = EJ_NONE;
5910 jmpKind[0] = EJ_jpe;
5912 jmpToTrueLabel[0] = false;
5922 //------------------------------------------------------------------------
5923 // genCompareFloat: Generate code for comparing two floating point values
5926 // treeNode - the compare tree
5931 // SSE2 instruction ucomis[s|d] is performs unordered comparison and
5932 // updates rFLAGS register as follows.
5933 // Result of compare ZF PF CF
5934 // ----------------- ------------
5935 // Unordered 1 1 1 <-- this result implies one of operands of compare is a NAN.
5940 // From the above table the following equalities follow. As per ECMA spec *.UN opcodes perform
5941 // unordered comparison of floating point values. That is *.UN comparisons result in true when
5942 // one of the operands is a NaN whereas ordered comparisons results in false.
5944 // Opcode Amd64 equivalent Comment
5945 // ------ ----------------- --------
5946 // BLT.UN(a,b) ucomis[s|d] a, b Jb branches if CF=1, which means either a<b or unordered from the above
5949 // BLT(a,b) ucomis[s|d] b, a Ja branches if CF=0 and ZF=0, which means b>a that in turn implies a<b
5952 // BGT.UN(a,b) ucomis[s|d] b, a branch if b<a or unordered ==> branch if a>b or unordered
5955 // BGT(a, b) ucomis[s|d] a, b branch if a>b
5958 // BLE.UN(a,b) ucomis[s|d] a, b jbe branches if CF=1 or ZF=1, which implies a<=b or unordered
5961 // BLE(a,b) ucomis[s|d] b, a jae branches if CF=0, which mean b>=a or a<=b
5964 // BGE.UN(a,b) ucomis[s|d] b, a branch if b<=a or unordered ==> branch if a>=b or unordered
5967 // BGE(a,b) ucomis[s|d] a, b branch if a>=b
5970 // BEQ.UN(a,b) ucomis[s|d] a, b branch if a==b or unordered. There is no BEQ.UN opcode in ECMA spec.
5971 // je This case is given for completeness, in case if JIT generates such
5972 // a gentree internally.
5974 // BEQ(a,b) ucomis[s|d] a, b From the above table, PF=0 and ZF=1 corresponds to a==b.
5979 // BNE(a,b) ucomis[s|d] a, b branch if a!=b. There is no BNE opcode in ECMA spec. This case is
5980 // jne given for completeness, in case if JIT generates such a gentree
5983 // BNE.UN(a,b) ucomis[s|d] a, b From the above table, PF=1 or ZF=0 implies unordered or a!=b
5987 // As we can see from the above equalities that the operands of a compare operator need to be
5988 // reveresed in case of BLT/CLT, BGT.UN/CGT.UN, BLE/CLE, BGE.UN/CGE.UN.
5989 void CodeGen::genCompareFloat(GenTreePtr treeNode)
5991 assert(treeNode->OperIsCompare());
5993 GenTreeOp* tree = treeNode->AsOp();
5994 GenTreePtr op1 = tree->gtOp1;
5995 GenTreePtr op2 = tree->gtOp2;
5996 var_types op1Type = op1->TypeGet();
5997 var_types op2Type = op2->TypeGet();
5999 genConsumeOperands(tree);
6001 assert(varTypeIsFloating(op1Type));
6002 assert(op1Type == op2Type);
6004 regNumber targetReg = treeNode->gtRegNum;
6009 if ((tree->gtFlags & GTF_RELOP_NAN_UN) != 0)
6011 // Unordered comparison case
6012 reverseOps = (tree->gtOper == GT_GT || tree->gtOper == GT_GE);
6016 reverseOps = (tree->gtOper == GT_LT || tree->gtOper == GT_LE);
6021 GenTreePtr tmp = op1;
6026 ins = ins_FloatCompare(op1Type);
6027 cmpAttr = emitTypeSize(op1Type);
6029 getEmitter()->emitInsBinary(ins, cmpAttr, op1, op2);
6031 // Are we evaluating this into a register?
6032 if (targetReg != REG_NA)
6034 genSetRegToCond(targetReg, tree);
6035 genProduceReg(tree);
6039 //------------------------------------------------------------------------
6040 // genCompareInt: Generate code for comparing ints or, on amd64, longs.
6043 // treeNode - the compare tree
6047 void CodeGen::genCompareInt(GenTreePtr treeNode)
6049 assert(treeNode->OperIsCompare() || treeNode->OperIs(GT_CMP));
6051 GenTreeOp* tree = treeNode->AsOp();
6052 GenTreePtr op1 = tree->gtOp1;
6053 GenTreePtr op2 = tree->gtOp2;
6054 var_types op1Type = op1->TypeGet();
6055 var_types op2Type = op2->TypeGet();
6056 regNumber targetReg = tree->gtRegNum;
6058 // Case of op1 == 0 or op1 != 0:
6059 // Optimize generation of 'test' instruction if op1 sets flags.
6061 // Note that if LSRA has inserted any GT_RELOAD/GT_COPY before
6062 // op1, it will not modify the flags set by codegen of op1.
6063 // Similarly op1 could also be reg-optional at its use and
6064 // it was spilled after producing its result in a register.
6065 // Spill code too will not modify the flags set by op1.
6066 GenTree* realOp1 = op1->gtSkipReloadOrCopy();
6067 if (realOp1->gtSetFlags())
6069 // op1 must set ZF and SF flags
6070 assert(realOp1->gtSetZSFlags());
6072 // Must be (in)equality against zero.
6073 assert(tree->OperIs(GT_EQ, GT_NE));
6074 assert(op2->IsIntegralConst(0));
6075 assert(op2->isContained());
6077 // Just consume the operands
6078 genConsumeOperands(tree);
6080 // No need to generate test instruction since
6083 // Are we evaluating this into a register?
6084 if (targetReg != REG_NA)
6086 genSetRegToCond(targetReg, tree);
6087 genProduceReg(tree);
6094 // If we have GT_JTRUE(GT_EQ/NE(GT_SIMD((in)Equality, v1, v2), true/false)),
6095 // then we don't need to generate code for GT_EQ/GT_NE, since SIMD (in)Equality intrinsic
6096 // would set or clear Zero flag.
6097 if ((targetReg == REG_NA) && tree->OperIs(GT_EQ, GT_NE))
6099 // Is it a SIMD (in)Equality that doesn't need to materialize result into a register?
6100 if ((op1->gtRegNum == REG_NA) && op1->IsSIMDEqualityOrInequality())
6102 // Must be comparing against true or false.
6103 assert(op2->IsIntegralConst(0) || op2->IsIntegralConst(1));
6104 assert(op2->isContainedIntOrIImmed());
6106 // In this case SIMD (in)Equality will set or clear
6107 // Zero flag, based on which GT_JTRUE would generate
6108 // the right conditional jump.
6112 #endif // FEATURE_SIMD
6114 genConsumeOperands(tree);
6116 // TODO-CQ: We should be able to support swapping op1 and op2 to generate cmp reg, imm.
6117 // https://github.com/dotnet/coreclr/issues/7270
6118 assert(!op1->isContainedIntOrIImmed()); // We no longer support
6119 assert(!varTypeIsFloating(op2Type));
6122 var_types type = TYP_UNKNOWN;
6124 if (tree->OperIs(GT_TEST_EQ, GT_TEST_NE))
6128 // Unlike many xarch instructions TEST doesn't have a form with a 16/32/64 bit first operand and
6129 // an 8 bit immediate second operand. But if the immediate value fits in 8 bits then we can simply
6130 // emit a 8 bit TEST instruction, unless we're targeting x86 and the first operand is a non-byteable
6132 // Note that lowering does something similar but its main purpose is to allow memory operands to be
6133 // contained so it doesn't handle other kind of operands. It could do more but on x86 that results
6134 // in additional register constrains and that may be worse than wasting 3 bytes on an immediate.
6137 (!op1->isUsedFromReg() || isByteReg(op1->gtRegNum)) &&
6139 (op2->IsCnsIntOrI() && genTypeCanRepresentValue(TYP_UBYTE, op2->AsIntCon()->IconValue())))
6144 else if (op1->isUsedFromReg() && op2->IsIntegralConst(0))
6146 // We're comparing a register to 0 so we can generate "test reg1, reg1"
6147 // instead of the longer "cmp reg1, 0"
6156 if (type == TYP_UNKNOWN)
6158 if (op1Type == op2Type)
6162 else if (genTypeSize(op1Type) == genTypeSize(op2Type))
6164 // If the types are different but have the same size then we'll use TYP_INT or TYP_LONG.
6165 // This primarily deals with small type mixes (e.g. byte/ubyte) that need to be widened
6166 // and compared as int. We should not get long type mixes here but handle that as well
6168 type = genTypeSize(op1Type) == 8 ? TYP_LONG : TYP_INT;
6172 // In the types are different simply use TYP_INT. This deals with small type/int type
6173 // mixes (e.g. byte/short ubyte/int) that need to be widened and compared as int.
6174 // Lowering is expected to handle any mixes that involve long types (e.g. int/long).
6178 // The common type cannot be smaller than any of the operand types, we're probably mixing int/long
6179 assert(genTypeSize(type) >= max(genTypeSize(op1Type), genTypeSize(op2Type)));
6180 // Small unsigned int types (TYP_BOOL can use anything) should use unsigned comparisons
6181 assert(!(varTypeIsSmallInt(type) && varTypeIsUnsigned(type)) || ((tree->gtFlags & GTF_UNSIGNED) != 0));
6182 // If op1 is smaller then it cannot be in memory, we're probably missing a cast
6183 assert((genTypeSize(op1Type) >= genTypeSize(type)) || !op1->isUsedFromMemory());
6184 // If op2 is smaller then it cannot be in memory, we're probably missing a cast
6185 assert((genTypeSize(op2Type) >= genTypeSize(type)) || !op2->isUsedFromMemory());
6186 // If op2 is a constant then it should fit in the common type
6187 assert(!op2->IsCnsIntOrI() || genTypeCanRepresentValue(type, op2->AsIntCon()->IconValue()));
6190 // The type cannot be larger than the machine word size
6191 assert(genTypeSize(type) <= genTypeSize(TYP_I_IMPL));
6192 // TYP_UINT and TYP_ULONG should not appear here, only small types can be unsigned
6193 assert(!varTypeIsUnsigned(type) || varTypeIsSmall(type));
6195 getEmitter()->emitInsBinary(ins, emitTypeSize(type), op1, op2);
6197 // Are we evaluating this into a register?
6198 if (targetReg != REG_NA)
6200 genSetRegToCond(targetReg, tree);
6201 genProduceReg(tree);
6205 //-------------------------------------------------------------------------------------------
6206 // genSetRegToCond: Set a register 'dstReg' to the appropriate one or zero value
6207 // corresponding to a binary Relational operator result.
6210 // dstReg - The target register to set to 1 or 0
6211 // tree - The GenTree Relop node that was used to set the Condition codes
6213 // Return Value: none
6216 // A full 64-bit value of either 1 or 0 is setup in the 'dstReg'
6217 //-------------------------------------------------------------------------------------------
6219 void CodeGen::genSetRegToCond(regNumber dstReg, GenTreePtr tree)
6221 noway_assert((genRegMask(dstReg) & RBM_BYTE_REGS) != 0);
6223 emitJumpKind jumpKind[2];
6224 bool branchToTrueLabel[2];
6225 genJumpKindsForTree(tree, jumpKind, branchToTrueLabel);
6227 if (jumpKind[1] == EJ_NONE)
6229 // Set (lower byte of) reg according to the flags
6230 inst_SET(jumpKind[0], dstReg);
6235 // jmpKind[1] != EJ_NONE implies BEQ and BEN.UN of floating point values.
6236 // These are represented by two conditions.
6237 if (tree->gtOper == GT_EQ)
6239 // This must be an ordered comparison.
6240 assert((tree->gtFlags & GTF_RELOP_NAN_UN) == 0);
6244 // This must be BNE.UN
6245 assert((tree->gtOper == GT_NE) && ((tree->gtFlags & GTF_RELOP_NAN_UN) != 0));
6249 // Here is the sample code generated in each case:
6250 // BEQ == cmp, jpe <false label>, je <true label>
6251 // That is, to materialize comparison reg needs to be set if PF=0 and ZF=1
6252 // setnp reg // if (PF==0) reg = 1 else reg = 0
6253 // jpe L1 // Jmp if PF==1
6257 // BNE.UN == cmp, jpe <true label>, jne <true label>
6258 // That is, to materialize the comparison reg needs to be set if either PF=1 or ZF=0;
6264 // reverse the jmpkind condition before setting dstReg if it is to false label.
6265 inst_SET(branchToTrueLabel[0] ? jumpKind[0] : emitter::emitReverseJumpKind(jumpKind[0]), dstReg);
6267 BasicBlock* label = genCreateTempLabel();
6268 inst_JMP(jumpKind[0], label);
6270 // second branch is always to true label
6271 assert(branchToTrueLabel[1]);
6272 inst_SET(jumpKind[1], dstReg);
6273 genDefineTempLabel(label);
6276 var_types treeType = tree->TypeGet();
6277 if (treeType == TYP_INT || treeType == TYP_LONG)
6279 // Set the higher bytes to 0
6280 inst_RV_RV(ins_Move_Extend(TYP_UBYTE, true), dstReg, dstReg, TYP_UBYTE, emitTypeSize(TYP_UBYTE));
6284 noway_assert(treeType == TYP_BYTE);
6288 #if !defined(_TARGET_64BIT_)
6289 //------------------------------------------------------------------------
6290 // genLongToIntCast: Generate code for long to int casts on x86.
6293 // cast - The GT_CAST node
6299 // The cast node and its sources (via GT_LONG) must have been assigned registers.
6300 // The destination cannot be a floating point type or a small integer type.
6302 void CodeGen::genLongToIntCast(GenTree* cast)
6304 assert(cast->OperGet() == GT_CAST);
6306 GenTree* src = cast->gtGetOp1();
6307 noway_assert(src->OperGet() == GT_LONG);
6309 genConsumeRegs(src);
6311 var_types srcType = ((cast->gtFlags & GTF_UNSIGNED) != 0) ? TYP_ULONG : TYP_LONG;
6312 var_types dstType = cast->CastToType();
6313 regNumber loSrcReg = src->gtGetOp1()->gtRegNum;
6314 regNumber hiSrcReg = src->gtGetOp2()->gtRegNum;
6315 regNumber dstReg = cast->gtRegNum;
6317 assert((dstType == TYP_INT) || (dstType == TYP_UINT));
6318 assert(genIsValidIntReg(loSrcReg));
6319 assert(genIsValidIntReg(hiSrcReg));
6320 assert(genIsValidIntReg(dstReg));
6322 if (cast->gtOverflow())
6325 // Generate an overflow check for [u]long to [u]int casts:
6327 // long -> int - check if the upper 33 bits are all 0 or all 1
6329 // ulong -> int - check if the upper 33 bits are all 0
6331 // long -> uint - check if the upper 32 bits are all 0
6332 // ulong -> uint - check if the upper 32 bits are all 0
6335 if ((srcType == TYP_LONG) && (dstType == TYP_INT))
6337 BasicBlock* allOne = genCreateTempLabel();
6338 BasicBlock* success = genCreateTempLabel();
6340 inst_RV_RV(INS_test, loSrcReg, loSrcReg, TYP_INT, EA_4BYTE);
6341 inst_JMP(EJ_js, allOne);
6343 inst_RV_RV(INS_test, hiSrcReg, hiSrcReg, TYP_INT, EA_4BYTE);
6344 genJumpToThrowHlpBlk(EJ_jne, SCK_OVERFLOW);
6345 inst_JMP(EJ_jmp, success);
6347 genDefineTempLabel(allOne);
6348 inst_RV_IV(INS_cmp, hiSrcReg, -1, EA_4BYTE);
6349 genJumpToThrowHlpBlk(EJ_jne, SCK_OVERFLOW);
6351 genDefineTempLabel(success);
6355 if ((srcType == TYP_ULONG) && (dstType == TYP_INT))
6357 inst_RV_RV(INS_test, loSrcReg, loSrcReg, TYP_INT, EA_4BYTE);
6358 genJumpToThrowHlpBlk(EJ_js, SCK_OVERFLOW);
6361 inst_RV_RV(INS_test, hiSrcReg, hiSrcReg, TYP_INT, EA_4BYTE);
6362 genJumpToThrowHlpBlk(EJ_jne, SCK_OVERFLOW);
6366 if (dstReg != loSrcReg)
6368 inst_RV_RV(INS_mov, dstReg, loSrcReg, TYP_INT, EA_4BYTE);
6371 genProduceReg(cast);
6375 //------------------------------------------------------------------------
6376 // genIntToIntCast: Generate code for an integer cast
6377 // This method handles integer overflow checking casts
6378 // as well as ordinary integer casts.
6381 // treeNode - The GT_CAST node
6387 // The treeNode is not a contained node and must have an assigned register.
6388 // For a signed convert from byte, the source must be in a byte-addressable register.
6389 // Neither the source nor target type can be a floating point type.
6391 // TODO-XArch-CQ: Allow castOp to be a contained node without an assigned register.
6392 // TODO: refactor to use getCastDescription
6394 void CodeGen::genIntToIntCast(GenTreePtr treeNode)
6396 assert(treeNode->OperGet() == GT_CAST);
6398 GenTreePtr castOp = treeNode->gtCast.CastOp();
6399 var_types srcType = genActualType(castOp->TypeGet());
6400 noway_assert(genTypeSize(srcType) >= 4);
6403 if (varTypeIsLong(srcType))
6405 genLongToIntCast(treeNode);
6408 #endif // _TARGET_X86_
6410 regNumber targetReg = treeNode->gtRegNum;
6411 regNumber sourceReg = castOp->gtRegNum;
6412 var_types dstType = treeNode->CastToType();
6413 bool isUnsignedDst = varTypeIsUnsigned(dstType);
6414 bool isUnsignedSrc = varTypeIsUnsigned(srcType);
6416 // if necessary, force the srcType to unsigned when the GT_UNSIGNED flag is set
6417 if (!isUnsignedSrc && (treeNode->gtFlags & GTF_UNSIGNED) != 0)
6419 srcType = genUnsignedType(srcType);
6420 isUnsignedSrc = true;
6423 bool requiresOverflowCheck = false;
6425 assert(genIsValidIntReg(targetReg));
6426 assert(genIsValidIntReg(sourceReg));
6428 instruction ins = INS_invalid;
6429 emitAttr srcSize = EA_ATTR(genTypeSize(srcType));
6430 emitAttr dstSize = EA_ATTR(genTypeSize(dstType));
6432 if (srcSize < dstSize)
6435 // Is this an Overflow checking cast?
6436 // We only need to handle one case, as the other casts can never overflow.
6437 // cast from TYP_INT to TYP_ULONG
6439 if (treeNode->gtOverflow() && (srcType == TYP_INT) && (dstType == TYP_ULONG))
6441 requiresOverflowCheck = true;
6446 noway_assert(srcSize < EA_PTRSIZE);
6448 ins = ins_Move_Extend(srcType, castOp->InReg());
6451 Special case: ins_Move_Extend assumes the destination type is no bigger
6452 than TYP_INT. movsx and movzx can already extend all the way to
6453 64-bit, and a regular 32-bit mov clears the high 32 bits (like the non-existant movzxd),
6454 but for a sign extension from TYP_INT to TYP_LONG, we need to use movsxd opcode.
6456 if (!isUnsignedSrc && !isUnsignedDst)
6459 NYI_X86("Cast to 64 bit for x86/RyuJIT");
6460 #else // !_TARGET_X86_
6462 #endif // !_TARGET_X86_
6468 // Narrowing cast, or sign-changing cast
6469 noway_assert(srcSize >= dstSize);
6471 // Is this an Overflow checking cast?
6472 if (treeNode->gtOverflow())
6474 requiresOverflowCheck = true;
6479 ins = ins_Move_Extend(dstType, castOp->InReg());
6483 noway_assert(ins != INS_invalid);
6485 genConsumeReg(castOp);
6487 if (requiresOverflowCheck)
6489 ssize_t typeMin = 0;
6490 ssize_t typeMax = 0;
6491 ssize_t typeMask = 0;
6492 bool needScratchReg = false;
6493 bool signCheckOnly = false;
6495 /* Do we need to compare the value, or just check masks */
6500 typeMask = ssize_t((int)0xFFFFFF80);
6501 typeMin = SCHAR_MIN;
6502 typeMax = SCHAR_MAX;
6506 typeMask = ssize_t((int)0xFFFFFF00L);
6510 typeMask = ssize_t((int)0xFFFF8000);
6516 typeMask = ssize_t((int)0xFFFF0000L);
6520 if (srcType == TYP_UINT)
6522 signCheckOnly = true;
6526 typeMask = ssize_t((int)0x80000000);
6533 if (srcType == TYP_INT)
6535 signCheckOnly = true;
6539 needScratchReg = true;
6544 noway_assert(srcType == TYP_ULONG);
6545 signCheckOnly = true;
6549 noway_assert((srcType == TYP_LONG) || (srcType == TYP_INT));
6550 signCheckOnly = true;
6554 NO_WAY("Unknown type");
6560 // We only need to check for a negative value in sourceReg
6561 inst_RV_IV(INS_cmp, sourceReg, 0, srcSize);
6562 genJumpToThrowHlpBlk(EJ_jl, SCK_OVERFLOW);
6566 // When we are converting from unsigned or to unsigned, we
6567 // will only have to check for any bits set using 'typeMask'
6568 if (isUnsignedSrc || isUnsignedDst)
6572 regNumber tmpReg = treeNode->GetSingleTempReg();
6573 inst_RV_RV(INS_mov, tmpReg, sourceReg, TYP_LONG); // Move the 64-bit value to a writeable temp reg
6574 inst_RV_SH(INS_SHIFT_RIGHT_LOGICAL, srcSize, tmpReg, 32); // Shift right by 32 bits
6575 genJumpToThrowHlpBlk(EJ_jne, SCK_OVERFLOW); // Throw if result shift is non-zero
6579 noway_assert(typeMask != 0);
6580 inst_RV_IV(INS_TEST, sourceReg, typeMask, srcSize);
6581 genJumpToThrowHlpBlk(EJ_jne, SCK_OVERFLOW);
6586 // For a narrowing signed cast
6588 // We must check the value is in a signed range.
6590 // Compare with the MAX
6592 noway_assert((typeMin != 0) && (typeMax != 0));
6594 inst_RV_IV(INS_cmp, sourceReg, typeMax, srcSize);
6595 genJumpToThrowHlpBlk(EJ_jg, SCK_OVERFLOW);
6597 // Compare with the MIN
6599 inst_RV_IV(INS_cmp, sourceReg, typeMin, srcSize);
6600 genJumpToThrowHlpBlk(EJ_jl, SCK_OVERFLOW);
6604 if (targetReg != sourceReg
6605 #ifdef _TARGET_AMD64_
6606 // On amd64, we can hit this path for a same-register
6607 // 4-byte to 8-byte widening conversion, and need to
6608 // emit the instruction to set the high bits correctly.
6609 || (dstSize == EA_8BYTE && srcSize == EA_4BYTE)
6610 #endif // _TARGET_AMD64_
6612 inst_RV_RV(ins, targetReg, sourceReg, srcType, srcSize);
6614 else // non-overflow checking cast
6616 // We may have code transformations that result in casts where srcType is the same as dstType.
6617 // e.g. Bug 824281, in which a comma is split by the rationalizer, leaving an assignment of a
6618 // long constant to a long lclVar.
6619 if (srcType == dstType)
6623 /* Is the value sitting in a non-byte-addressable register? */
6624 else if (castOp->InReg() && (dstSize == EA_1BYTE) && !isByteReg(sourceReg))
6628 // for unsigned values we can AND, so it need not be a byte register
6633 // Move the value into a byte register
6634 noway_assert(!"Signed byte convert from non-byte-addressable register");
6637 /* Generate "mov targetReg, castOp->gtReg */
6638 if (targetReg != sourceReg)
6640 inst_RV_RV(INS_mov, targetReg, sourceReg, srcType, srcSize);
6646 noway_assert(isUnsignedDst);
6648 /* Generate "and reg, MASK */
6649 unsigned fillPattern;
6650 if (dstSize == EA_1BYTE)
6654 else if (dstSize == EA_2BYTE)
6656 fillPattern = 0xffff;
6660 fillPattern = 0xffffffff;
6663 inst_RV_IV(INS_AND, targetReg, fillPattern, EA_4BYTE);
6665 #ifdef _TARGET_AMD64_
6666 else if (ins == INS_movsxd)
6668 inst_RV_RV(ins, targetReg, sourceReg, srcType, srcSize);
6670 #endif // _TARGET_AMD64_
6671 else if (ins == INS_mov)
6673 if (targetReg != sourceReg
6674 #ifdef _TARGET_AMD64_
6675 // On amd64, 'mov' is the opcode used to zero-extend from
6676 // 4 bytes to 8 bytes.
6677 || (dstSize == EA_8BYTE && srcSize == EA_4BYTE)
6678 #endif // _TARGET_AMD64_
6681 inst_RV_RV(ins, targetReg, sourceReg, srcType, srcSize);
6686 noway_assert(ins == INS_movsx || ins == INS_movzx);
6687 noway_assert(srcSize >= dstSize);
6689 /* Generate "mov targetReg, castOp->gtReg */
6690 inst_RV_RV(ins, targetReg, sourceReg, srcType, dstSize);
6694 genProduceReg(treeNode);
6697 //------------------------------------------------------------------------
6698 // genFloatToFloatCast: Generate code for a cast between float and double
6701 // treeNode - The GT_CAST node
6707 // Cast is a non-overflow conversion.
6708 // The treeNode must have an assigned register.
6709 // The cast is between float and double or vice versa.
6711 void CodeGen::genFloatToFloatCast(GenTreePtr treeNode)
6713 // float <--> double conversions are always non-overflow ones
6714 assert(treeNode->OperGet() == GT_CAST);
6715 assert(!treeNode->gtOverflow());
6717 regNumber targetReg = treeNode->gtRegNum;
6718 assert(genIsValidFloatReg(targetReg));
6720 GenTreePtr op1 = treeNode->gtOp.gtOp1;
6722 // If not contained, must be a valid float reg.
6723 if (op1->isUsedFromReg())
6725 assert(genIsValidFloatReg(op1->gtRegNum));
6729 var_types dstType = treeNode->CastToType();
6730 var_types srcType = op1->TypeGet();
6731 assert(varTypeIsFloating(srcType) && varTypeIsFloating(dstType));
6733 genConsumeOperands(treeNode->AsOp());
6734 if (srcType == dstType && (op1->isUsedFromReg() && (targetReg == op1->gtRegNum)))
6736 // source and destinations types are the same and also reside in the same register.
6737 // we just need to consume and produce the reg in this case.
6742 instruction ins = ins_FloatConv(dstType, srcType);
6743 getEmitter()->emitInsBinary(ins, emitTypeSize(dstType), treeNode, op1);
6746 genProduceReg(treeNode);
6749 //------------------------------------------------------------------------
6750 // genIntToFloatCast: Generate code to cast an int/long to float/double
6753 // treeNode - The GT_CAST node
6759 // Cast is a non-overflow conversion.
6760 // The treeNode must have an assigned register.
6761 // SrcType= int32/uint32/int64/uint64 and DstType=float/double.
6763 void CodeGen::genIntToFloatCast(GenTreePtr treeNode)
6765 // int type --> float/double conversions are always non-overflow ones
6766 assert(treeNode->OperGet() == GT_CAST);
6767 assert(!treeNode->gtOverflow());
6769 regNumber targetReg = treeNode->gtRegNum;
6770 assert(genIsValidFloatReg(targetReg));
6772 GenTreePtr op1 = treeNode->gtOp.gtOp1;
6774 if (op1->isUsedFromReg())
6776 assert(genIsValidIntReg(op1->gtRegNum));
6780 var_types dstType = treeNode->CastToType();
6781 var_types srcType = op1->TypeGet();
6782 assert(!varTypeIsFloating(srcType) && varTypeIsFloating(dstType));
6784 #if !defined(_TARGET_64BIT_)
6785 // We expect morph to replace long to float/double casts with helper calls
6786 noway_assert(!varTypeIsLong(srcType));
6787 #endif // !defined(_TARGET_64BIT_)
6789 // Since xarch emitter doesn't handle reporting gc-info correctly while casting away gc-ness we
6790 // ensure srcType of a cast is non gc-type. Codegen should never see BYREF as source type except
6791 // for GT_LCL_VAR_ADDR and GT_LCL_FLD_ADDR that represent stack addresses and can be considered
6792 // as TYP_I_IMPL. In all other cases where src operand is a gc-type and not known to be on stack,
6793 // Front-end (see fgMorphCast()) ensures this by assigning gc-type local to a non gc-type
6794 // temp and using temp as operand of cast operation.
6795 if (srcType == TYP_BYREF)
6797 noway_assert(op1->OperGet() == GT_LCL_VAR_ADDR || op1->OperGet() == GT_LCL_FLD_ADDR);
6798 srcType = TYP_I_IMPL;
6801 // force the srcType to unsigned if GT_UNSIGNED flag is set
6802 if (treeNode->gtFlags & GTF_UNSIGNED)
6804 srcType = genUnsignedType(srcType);
6807 noway_assert(!varTypeIsGC(srcType));
6809 // We should never be seeing srcType whose size is not sizeof(int) nor sizeof(long).
6810 // For conversions from byte/sbyte/int16/uint16 to float/double, we would expect
6811 // either the front-end or lowering phase to have generated two levels of cast.
6812 // The first one is for widening smaller int type to int32 and the second one is
6813 // to the float/double.
6814 emitAttr srcSize = EA_ATTR(genTypeSize(srcType));
6815 noway_assert((srcSize == EA_ATTR(genTypeSize(TYP_INT))) || (srcSize == EA_ATTR(genTypeSize(TYP_LONG))));
6817 // Also we don't expect to see uint32 -> float/double and uint64 -> float conversions
6818 // here since they should have been lowered apropriately.
6819 noway_assert(srcType != TYP_UINT);
6820 noway_assert((srcType != TYP_ULONG) || (dstType != TYP_FLOAT));
6822 // To convert int to a float/double, cvtsi2ss/sd SSE2 instruction is used
6823 // which does a partial write to lower 4/8 bytes of xmm register keeping the other
6824 // upper bytes unmodified. If "cvtsi2ss/sd xmmReg, r32/r64" occurs inside a loop,
6825 // the partial write could introduce a false dependency and could cause a stall
6826 // if there are further uses of xmmReg. We have such a case occuring with a
6827 // customer reported version of SpectralNorm benchmark, resulting in 2x perf
6828 // regression. To avoid false dependency, we emit "xorps xmmReg, xmmReg" before
6829 // cvtsi2ss/sd instruction.
6831 genConsumeOperands(treeNode->AsOp());
6832 getEmitter()->emitIns_R_R(INS_xorps, EA_4BYTE, treeNode->gtRegNum, treeNode->gtRegNum);
6834 // Note that here we need to specify srcType that will determine
6835 // the size of source reg/mem operand and rex.w prefix.
6836 instruction ins = ins_FloatConv(dstType, TYP_INT);
6837 getEmitter()->emitInsBinary(ins, emitTypeSize(srcType), treeNode, op1);
6839 // Handle the case of srcType = TYP_ULONG. SSE2 conversion instruction
6840 // will interpret ULONG value as LONG. Hence we need to adjust the
6841 // result if sign-bit of srcType is set.
6842 if (srcType == TYP_ULONG)
6844 // The instruction sequence below is less accurate than what clang
6845 // and gcc generate. However, we keep the current sequence for backward compatiblity.
6846 // If we change the instructions below, FloatingPointUtils::convertUInt64ToDobule
6847 // should be also updated for consistent conversion result.
6848 assert(dstType == TYP_DOUBLE);
6849 assert(op1->isUsedFromReg());
6851 // Set the flags without modifying op1.
6852 // test op1Reg, op1Reg
6853 inst_RV_RV(INS_test, op1->gtRegNum, op1->gtRegNum, srcType);
6855 // No need to adjust result if op1 >= 0 i.e. positive
6857 BasicBlock* label = genCreateTempLabel();
6858 inst_JMP(EJ_jge, label);
6860 // Adjust the result
6861 // result = result + 0x43f00000 00000000
6862 // addsd resultReg, 0x43f00000 00000000
6863 GenTreePtr* cns = &u8ToDblBitmask;
6864 if (*cns == nullptr)
6867 static_assert_no_msg(sizeof(double) == sizeof(__int64));
6868 *((__int64*)&d) = 0x43f0000000000000LL;
6870 *cns = genMakeConst(&d, dstType, treeNode, true);
6872 inst_RV_TT(INS_addsd, treeNode->gtRegNum, *cns);
6874 genDefineTempLabel(label);
6877 genProduceReg(treeNode);
6880 //------------------------------------------------------------------------
6881 // genFloatToIntCast: Generate code to cast float/double to int/long
6884 // treeNode - The GT_CAST node
6890 // Cast is a non-overflow conversion.
6891 // The treeNode must have an assigned register.
6892 // SrcType=float/double and DstType= int32/uint32/int64/uint64
6894 // TODO-XArch-CQ: (Low-pri) - generate in-line code when DstType = uint64
6896 void CodeGen::genFloatToIntCast(GenTreePtr treeNode)
6898 // we don't expect to see overflow detecting float/double --> int type conversions here
6899 // as they should have been converted into helper calls by front-end.
6900 assert(treeNode->OperGet() == GT_CAST);
6901 assert(!treeNode->gtOverflow());
6903 regNumber targetReg = treeNode->gtRegNum;
6904 assert(genIsValidIntReg(targetReg));
6906 GenTreePtr op1 = treeNode->gtOp.gtOp1;
6908 if (op1->isUsedFromReg())
6910 assert(genIsValidFloatReg(op1->gtRegNum));
6914 var_types dstType = treeNode->CastToType();
6915 var_types srcType = op1->TypeGet();
6916 assert(varTypeIsFloating(srcType) && !varTypeIsFloating(dstType));
6918 // We should never be seeing dstType whose size is neither sizeof(TYP_INT) nor sizeof(TYP_LONG).
6919 // For conversions to byte/sbyte/int16/uint16 from float/double, we would expect the
6920 // front-end or lowering phase to have generated two levels of cast. The first one is
6921 // for float or double to int32/uint32 and the second one for narrowing int32/uint32 to
6922 // the required smaller int type.
6923 emitAttr dstSize = EA_ATTR(genTypeSize(dstType));
6924 noway_assert((dstSize == EA_ATTR(genTypeSize(TYP_INT))) || (dstSize == EA_ATTR(genTypeSize(TYP_LONG))));
6926 // We shouldn't be seeing uint64 here as it should have been converted
6927 // into a helper call by either front-end or lowering phase.
6928 noway_assert(!varTypeIsUnsigned(dstType) || (dstSize != EA_ATTR(genTypeSize(TYP_LONG))));
6930 // If the dstType is TYP_UINT, we have 32-bits to encode the
6931 // float number. Any of 33rd or above bits can be the sign bit.
6932 // To acheive it we pretend as if we are converting it to a long.
6933 if (varTypeIsUnsigned(dstType) && (dstSize == EA_ATTR(genTypeSize(TYP_INT))))
6938 // Note that we need to specify dstType here so that it will determine
6939 // the size of destination integer register and also the rex.w prefix.
6940 genConsumeOperands(treeNode->AsOp());
6941 instruction ins = ins_FloatConv(TYP_INT, srcType);
6942 getEmitter()->emitInsBinary(ins, emitTypeSize(dstType), treeNode, op1);
6943 genProduceReg(treeNode);
6946 //------------------------------------------------------------------------
6947 // genCkfinite: Generate code for ckfinite opcode.
6950 // treeNode - The GT_CKFINITE node
6956 // GT_CKFINITE node has reserved an internal register.
6958 // TODO-XArch-CQ - mark the operand as contained if known to be in
6959 // memory (e.g. field or an array element).
6961 void CodeGen::genCkfinite(GenTreePtr treeNode)
6963 assert(treeNode->OperGet() == GT_CKFINITE);
6965 GenTreePtr op1 = treeNode->gtOp.gtOp1;
6966 var_types targetType = treeNode->TypeGet();
6967 int expMask = (targetType == TYP_FLOAT) ? 0x7F800000 : 0x7FF00000; // Bit mask to extract exponent.
6968 regNumber targetReg = treeNode->gtRegNum;
6970 // Extract exponent into a register.
6971 regNumber tmpReg = treeNode->GetSingleTempReg();
6975 #ifdef _TARGET_64BIT_
6977 // Copy the floating-point value to an integer register. If we copied a float to a long, then
6978 // right-shift the value so the high 32 bits of the floating-point value sit in the low 32
6979 // bits of the integer register.
6980 instruction ins = ins_CopyFloatToInt(targetType, (targetType == TYP_FLOAT) ? TYP_INT : TYP_LONG);
6981 inst_RV_RV(ins, op1->gtRegNum, tmpReg, targetType);
6982 if (targetType == TYP_DOUBLE)
6984 // right shift by 32 bits to get to exponent.
6985 inst_RV_SH(INS_shr, EA_8BYTE, tmpReg, 32);
6988 // Mask exponent with all 1's and check if the exponent is all 1's
6989 inst_RV_IV(INS_and, tmpReg, expMask, EA_4BYTE);
6990 inst_RV_IV(INS_cmp, tmpReg, expMask, EA_4BYTE);
6992 // If exponent is all 1's, throw ArithmeticException
6993 genJumpToThrowHlpBlk(EJ_je, SCK_ARITH_EXCPN);
6995 // if it is a finite value copy it to targetReg
6996 if (targetReg != op1->gtRegNum)
6998 inst_RV_RV(ins_Copy(targetType), targetReg, op1->gtRegNum, targetType);
7001 #else // !_TARGET_64BIT_
7003 // If the target type is TYP_DOUBLE, we want to extract the high 32 bits into the register.
7004 // There is no easy way to do this. To not require an extra register, we'll use shuffles
7005 // to move the high 32 bits into the low 32 bits, then then shuffle it back, since we
7006 // need to produce the value into the target register.
7008 // For TYP_DOUBLE, we'll generate (for targetReg != op1->gtRegNum):
7009 // movaps targetReg, op1->gtRegNum
7010 // shufps targetReg, targetReg, 0xB1 // WZYX => ZWXY
7011 // mov_xmm2i tmpReg, targetReg // tmpReg <= Y
7012 // and tmpReg, <mask>
7013 // cmp tmpReg, <mask>
7015 // movaps targetReg, op1->gtRegNum // copy the value again, instead of un-shuffling it
7017 // For TYP_DOUBLE with (targetReg == op1->gtRegNum):
7018 // shufps targetReg, targetReg, 0xB1 // WZYX => ZWXY
7019 // mov_xmm2i tmpReg, targetReg // tmpReg <= Y
7020 // and tmpReg, <mask>
7021 // cmp tmpReg, <mask>
7023 // shufps targetReg, targetReg, 0xB1 // ZWXY => WZYX
7025 // For TYP_FLOAT, it's the same as _TARGET_64BIT_:
7026 // mov_xmm2i tmpReg, targetReg // tmpReg <= low 32 bits
7027 // and tmpReg, <mask>
7028 // cmp tmpReg, <mask>
7030 // movaps targetReg, op1->gtRegNum // only if targetReg != op1->gtRegNum
7032 regNumber copyToTmpSrcReg; // The register we'll copy to the integer temp.
7034 if (targetType == TYP_DOUBLE)
7036 if (targetReg != op1->gtRegNum)
7038 inst_RV_RV(ins_Copy(targetType), targetReg, op1->gtRegNum, targetType);
7040 inst_RV_RV_IV(INS_shufps, EA_16BYTE, targetReg, targetReg, 0xb1);
7041 copyToTmpSrcReg = targetReg;
7045 copyToTmpSrcReg = op1->gtRegNum;
7048 // Copy only the low 32 bits. This will be the high order 32 bits of the floating-point
7049 // value, no matter the floating-point type.
7050 inst_RV_RV(ins_CopyFloatToInt(TYP_FLOAT, TYP_INT), copyToTmpSrcReg, tmpReg, TYP_FLOAT);
7052 // Mask exponent with all 1's and check if the exponent is all 1's
7053 inst_RV_IV(INS_and, tmpReg, expMask, EA_4BYTE);
7054 inst_RV_IV(INS_cmp, tmpReg, expMask, EA_4BYTE);
7056 // If exponent is all 1's, throw ArithmeticException
7057 genJumpToThrowHlpBlk(EJ_je, SCK_ARITH_EXCPN);
7059 if (targetReg != op1->gtRegNum)
7061 // In both the TYP_FLOAT and TYP_DOUBLE case, the op1 register is untouched,
7062 // so copy it to the targetReg. This is faster and smaller for TYP_DOUBLE
7063 // than re-shuffling the targetReg.
7064 inst_RV_RV(ins_Copy(targetType), targetReg, op1->gtRegNum, targetType);
7066 else if (targetType == TYP_DOUBLE)
7068 // We need to re-shuffle the targetReg to get the correct result.
7069 inst_RV_RV_IV(INS_shufps, EA_16BYTE, targetReg, targetReg, 0xb1);
7072 #endif // !_TARGET_64BIT_
7074 genProduceReg(treeNode);
7077 #ifdef _TARGET_AMD64_
7078 int CodeGenInterface::genSPtoFPdelta()
7082 #ifdef UNIX_AMD64_ABI
7084 // We require frame chaining on Unix to support native tool unwinding (such as
7085 // unwinding by the native debugger). We have a CLR-only extension to the
7086 // unwind codes (UWOP_SET_FPREG_LARGE) to support SP->FP offsets larger than 240.
7087 // If Unix ever supports EnC, the RSP == RBP assumption will have to be reevaluated.
7088 delta = genTotalFrameSize();
7090 #else // !UNIX_AMD64_ABI
7092 // As per Amd64 ABI, RBP offset from initial RSP can be between 0 and 240 if
7093 // RBP needs to be reported in unwind codes. This case would arise for methods
7095 if (compiler->compLocallocUsed)
7097 // We cannot base delta computation on compLclFrameSize since it changes from
7098 // tentative to final frame layout and hence there is a possibility of
7099 // under-estimating offset of vars from FP, which in turn results in under-
7100 // estimating instruction size.
7102 // To be predictive and so as never to under-estimate offset of vars from FP
7103 // we will always position FP at min(240, outgoing arg area size).
7104 delta = Min(240, (int)compiler->lvaOutgoingArgSpaceSize);
7106 else if (compiler->opts.compDbgEnC)
7108 // vm assumption on EnC methods is that rsp and rbp are equal
7113 delta = genTotalFrameSize();
7116 #endif // !UNIX_AMD64_ABI
7121 //---------------------------------------------------------------------
7122 // genTotalFrameSize - return the total size of the stack frame, including local size,
7123 // callee-saved register size, etc. For AMD64, this does not include the caller-pushed
7130 int CodeGenInterface::genTotalFrameSize()
7132 assert(!IsUninitialized(compiler->compCalleeRegsPushed));
7134 int totalFrameSize = compiler->compCalleeRegsPushed * REGSIZE_BYTES + compiler->compLclFrameSize;
7136 assert(totalFrameSize >= 0);
7137 return totalFrameSize;
7140 //---------------------------------------------------------------------
7141 // genCallerSPtoFPdelta - return the offset from Caller-SP to the frame pointer.
7142 // This number is going to be negative, since the Caller-SP is at a higher
7143 // address than the frame pointer.
7145 // There must be a frame pointer to call this function!
7147 // We can't compute this directly from the Caller-SP, since the frame pointer
7148 // is based on a maximum delta from Initial-SP, so first we find SP, then
7149 // compute the FP offset.
7151 int CodeGenInterface::genCallerSPtoFPdelta()
7153 assert(isFramePointerUsed());
7154 int callerSPtoFPdelta;
7156 callerSPtoFPdelta = genCallerSPtoInitialSPdelta() + genSPtoFPdelta();
7158 assert(callerSPtoFPdelta <= 0);
7159 return callerSPtoFPdelta;
7162 //---------------------------------------------------------------------
7163 // genCallerSPtoInitialSPdelta - return the offset from Caller-SP to Initial SP.
7165 // This number will be negative.
7167 int CodeGenInterface::genCallerSPtoInitialSPdelta()
7169 int callerSPtoSPdelta = 0;
7171 callerSPtoSPdelta -= genTotalFrameSize();
7172 callerSPtoSPdelta -= REGSIZE_BYTES; // caller-pushed return address
7174 // compCalleeRegsPushed does not account for the frame pointer
7175 // TODO-Cleanup: shouldn't this be part of genTotalFrameSize?
7176 if (isFramePointerUsed())
7178 callerSPtoSPdelta -= REGSIZE_BYTES;
7181 assert(callerSPtoSPdelta <= 0);
7182 return callerSPtoSPdelta;
7184 #endif // _TARGET_AMD64_
7186 //-----------------------------------------------------------------------------------------
7187 // genSSE2BitwiseOp - generate SSE2 code for the given oper as "Operand BitWiseOp BitMask"
7190 // treeNode - tree node
7196 // i) tree oper is one of GT_NEG or GT_INTRINSIC Abs()
7197 // ii) tree type is floating point type.
7198 // iii) caller of this routine needs to call genProduceReg()
7199 void CodeGen::genSSE2BitwiseOp(GenTreePtr treeNode)
7201 regNumber targetReg = treeNode->gtRegNum;
7202 var_types targetType = treeNode->TypeGet();
7203 assert(varTypeIsFloating(targetType));
7207 GenTreePtr* bitMask = nullptr;
7208 instruction ins = INS_invalid;
7209 void* cnsAddr = nullptr;
7210 bool dblAlign = false;
7212 switch (treeNode->OperGet())
7215 // Neg(x) = flip the sign bit.
7216 // Neg(f) = f ^ 0x80000000
7217 // Neg(d) = d ^ 0x8000000000000000
7218 ins = genGetInsForOper(GT_XOR, targetType);
7219 if (targetType == TYP_FLOAT)
7221 bitMask = &negBitmaskFlt;
7223 static_assert_no_msg(sizeof(float) == sizeof(int));
7224 *((int*)&f) = 0x80000000;
7229 bitMask = &negBitmaskDbl;
7231 static_assert_no_msg(sizeof(double) == sizeof(__int64));
7232 *((__int64*)&d) = 0x8000000000000000LL;
7239 assert(treeNode->gtIntrinsic.gtIntrinsicId == CORINFO_INTRINSIC_Abs);
7241 // Abs(x) = set sign-bit to zero
7242 // Abs(f) = f & 0x7fffffff
7243 // Abs(d) = d & 0x7fffffffffffffff
7244 ins = genGetInsForOper(GT_AND, targetType);
7245 if (targetType == TYP_FLOAT)
7247 bitMask = &absBitmaskFlt;
7249 static_assert_no_msg(sizeof(float) == sizeof(int));
7250 *((int*)&f) = 0x7fffffff;
7255 bitMask = &absBitmaskDbl;
7257 static_assert_no_msg(sizeof(double) == sizeof(__int64));
7258 *((__int64*)&d) = 0x7fffffffffffffffLL;
7265 assert(!"genSSE2: unsupported oper");
7270 if (*bitMask == nullptr)
7272 assert(cnsAddr != nullptr);
7273 *bitMask = genMakeConst(cnsAddr, targetType, treeNode, dblAlign);
7276 // We need an additional register for bitmask.
7277 regNumber tmpReg = treeNode->GetSingleTempReg();
7279 // Move operand into targetReg only if the reg reserved for
7280 // internal purpose is not the same as targetReg.
7281 GenTreePtr op1 = treeNode->gtOp.gtOp1;
7282 assert(op1->isUsedFromReg());
7283 regNumber operandReg = genConsumeReg(op1);
7284 if (tmpReg != targetReg)
7286 if (operandReg != targetReg)
7288 inst_RV_RV(ins_Copy(targetType), targetReg, operandReg, targetType);
7291 operandReg = tmpReg;
7294 inst_RV_TT(ins_Load(targetType, false), tmpReg, *bitMask);
7295 assert(ins != INS_invalid);
7296 inst_RV_RV(ins, targetReg, operandReg, targetType);
7299 //---------------------------------------------------------------------
7300 // genIntrinsic - generate code for a given intrinsic
7303 // treeNode - the GT_INTRINSIC node
7308 void CodeGen::genIntrinsic(GenTreePtr treeNode)
7310 // Right now only Sqrt/Abs are treated as math intrinsics.
7311 switch (treeNode->gtIntrinsic.gtIntrinsicId)
7313 case CORINFO_INTRINSIC_Sqrt:
7315 // Both operand and its result must be of the same floating point type.
7316 GenTreePtr srcNode = treeNode->gtOp.gtOp1;
7317 assert(varTypeIsFloating(srcNode));
7318 assert(srcNode->TypeGet() == treeNode->TypeGet());
7320 genConsumeOperands(treeNode->AsOp());
7321 getEmitter()->emitInsBinary(ins_FloatSqrt(treeNode->TypeGet()), emitTypeSize(treeNode), treeNode, srcNode);
7325 case CORINFO_INTRINSIC_Abs:
7326 genSSE2BitwiseOp(treeNode);
7330 assert(!"genIntrinsic: Unsupported intrinsic");
7334 genProduceReg(treeNode);
7337 //-------------------------------------------------------------------------- //
7338 // getBaseVarForPutArgStk - returns the baseVarNum for passing a stack arg.
7341 // treeNode - the GT_PUTARG_STK node
7344 // The number of the base variable.
7347 // If tail call the outgoing args are placed in the caller's incoming arg stack space.
7348 // Otherwise, they go in the outgoing arg area on the current frame.
7350 // On Windows the caller always creates slots (homing space) in its frame for the
7351 // first 4 arguments of a callee (register passed args). So, the baseVarNum is always 0.
7352 // For System V systems there is no such calling convention requirement, and the code needs to find
7353 // the first stack passed argument from the caller. This is done by iterating over
7354 // all the lvParam variables and finding the first with lvArgReg equals to REG_STK.
7356 unsigned CodeGen::getBaseVarForPutArgStk(GenTreePtr treeNode)
7358 assert(treeNode->OperGet() == GT_PUTARG_STK);
7360 unsigned baseVarNum;
7362 // Whether to setup stk arg in incoming or out-going arg area?
7363 // Fast tail calls implemented as epilog+jmp = stk arg is setup in incoming arg area.
7364 // All other calls - stk arg is setup in out-going arg area.
7365 if (treeNode->AsPutArgStk()->putInIncomingArgArea())
7367 // See the note in the function header re: finding the first stack passed argument.
7368 baseVarNum = getFirstArgWithStackSlot();
7369 assert(baseVarNum != BAD_VAR_NUM);
7372 // This must be a fast tail call.
7373 assert(treeNode->AsPutArgStk()->gtCall->AsCall()->IsFastTailCall());
7375 // Since it is a fast tail call, the existence of first incoming arg is guaranteed
7376 // because fast tail call requires that in-coming arg area of caller is >= out-going
7377 // arg area required for tail call.
7378 LclVarDsc* varDsc = &(compiler->lvaTable[baseVarNum]);
7379 assert(varDsc != nullptr);
7381 #ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
7382 assert(!varDsc->lvIsRegArg && varDsc->lvArgReg == REG_STK);
7383 #else // !FEATURE_UNIX_AMD64_STRUCT_PASSING
7384 // On Windows this assert is always true. The first argument will always be in REG_ARG_0 or REG_FLTARG_0.
7385 assert(varDsc->lvIsRegArg && (varDsc->lvArgReg == REG_ARG_0 || varDsc->lvArgReg == REG_FLTARG_0));
7386 #endif // !FEATURE_UNIX_AMD64_STRUCT_PASSING
7391 #if FEATURE_FIXED_OUT_ARGS
7392 baseVarNum = compiler->lvaOutgoingArgSpaceVar;
7393 #else // !FEATURE_FIXED_OUT_ARGS
7394 assert(!"No BaseVarForPutArgStk on x86");
7395 baseVarNum = BAD_VAR_NUM;
7396 #endif // !FEATURE_FIXED_OUT_ARGS
7402 //---------------------------------------------------------------------
7403 // genAlignStackBeforeCall: Align the stack if necessary before a call.
7406 // putArgStk - the putArgStk node.
7408 void CodeGen::genAlignStackBeforeCall(GenTreePutArgStk* putArgStk)
7410 #if defined(UNIX_X86_ABI)
7412 genAlignStackBeforeCall(putArgStk->gtCall);
7414 #endif // UNIX_X86_ABI
7417 //---------------------------------------------------------------------
7418 // genAlignStackBeforeCall: Align the stack if necessary before a call.
7421 // call - the call node.
7423 void CodeGen::genAlignStackBeforeCall(GenTreeCall* call)
7425 #if defined(UNIX_X86_ABI)
7427 // Have we aligned the stack yet?
7428 if (!call->fgArgInfo->IsStkAlignmentDone())
7430 // We haven't done any stack alignment yet for this call. We might need to create
7431 // an alignment adjustment, even if this function itself doesn't have any stack args.
7432 // This can happen if this function call is part of a nested call sequence, and the outer
7433 // call has already pushed some arguments.
7435 unsigned stkLevel = genStackLevel + call->fgArgInfo->GetStkSizeBytes();
7436 call->fgArgInfo->ComputeStackAlignment(stkLevel);
7438 unsigned padStkAlign = call->fgArgInfo->GetStkAlign();
7439 if (padStkAlign != 0)
7441 // Now generate the alignment
7442 inst_RV_IV(INS_sub, REG_SPBASE, padStkAlign, EA_PTRSIZE);
7443 AddStackLevel(padStkAlign);
7444 AddNestedAlignment(padStkAlign);
7447 call->fgArgInfo->SetStkAlignmentDone();
7450 #endif // UNIX_X86_ABI
7453 //---------------------------------------------------------------------
7454 // genRemoveAlignmentAfterCall: After a call, remove the alignment
7455 // added before the call, if any.
7458 // call - the call node.
7459 // bias - additional stack adjustment
7462 // When bias > 0, caller should adjust stack level appropriately as
7463 // bias is not considered when adjusting stack level.
7465 void CodeGen::genRemoveAlignmentAfterCall(GenTreeCall* call, unsigned bias)
7467 #if defined(_TARGET_X86_)
7468 #if defined(UNIX_X86_ABI)
7469 // Put back the stack pointer if there was any padding for stack alignment
7470 unsigned padStkAlign = call->fgArgInfo->GetStkAlign();
7471 unsigned padStkAdjust = padStkAlign + bias;
7473 if (padStkAdjust != 0)
7475 inst_RV_IV(INS_add, REG_SPBASE, padStkAdjust, EA_PTRSIZE);
7476 SubtractStackLevel(padStkAlign);
7477 SubtractNestedAlignment(padStkAlign);
7479 #else // UNIX_X86_ABI
7484 #endif // !UNIX_X86_ABI_
7485 #else // _TARGET_X86_
7487 #endif // !_TARGET_X86
7492 //---------------------------------------------------------------------
7493 // genAdjustStackForPutArgStk:
7494 // adjust the stack pointer for a putArgStk node if necessary.
7497 // putArgStk - the putArgStk node.
7499 // Returns: true if the stack pointer was adjusted; false otherwise.
7502 // Sets `m_pushStkArg` to true if the stack arg needs to be pushed,
7503 // false if the stack arg needs to be stored at the current stack
7504 // pointer address. This is exactly the opposite of the return value
7505 // of this function.
7507 bool CodeGen::genAdjustStackForPutArgStk(GenTreePutArgStk* putArgStk)
7510 if (varTypeIsSIMD(putArgStk))
7512 const unsigned argSize = genTypeSize(putArgStk);
7513 inst_RV_IV(INS_sub, REG_SPBASE, argSize, EA_PTRSIZE);
7514 AddStackLevel(argSize);
7515 m_pushStkArg = false;
7518 #endif // FEATURE_SIMD
7520 const unsigned argSize = putArgStk->getArgSize();
7522 // If the gtPutArgStkKind is one of the push types, we do not pre-adjust the stack.
7523 // This is set in Lowering, and is true if and only if:
7524 // - This argument contains any GC pointers OR
7525 // - It is a GT_FIELD_LIST OR
7526 // - It is less than 16 bytes in size.
7527 CLANG_FORMAT_COMMENT_ANCHOR;
7530 switch (putArgStk->gtPutArgStkKind)
7532 case GenTreePutArgStk::Kind::RepInstr:
7533 case GenTreePutArgStk::Kind::Unroll:
7534 assert((putArgStk->gtNumberReferenceSlots == 0) && (putArgStk->gtGetOp1()->OperGet() != GT_FIELD_LIST) &&
7537 case GenTreePutArgStk::Kind::Push:
7538 case GenTreePutArgStk::Kind::PushAllSlots:
7539 assert((putArgStk->gtNumberReferenceSlots != 0) || (putArgStk->gtGetOp1()->OperGet() == GT_FIELD_LIST) ||
7542 case GenTreePutArgStk::Kind::Invalid:
7544 assert(!"Uninitialized GenTreePutArgStk::Kind");
7549 if (putArgStk->isPushKind())
7551 m_pushStkArg = true;
7556 m_pushStkArg = false;
7557 inst_RV_IV(INS_sub, REG_SPBASE, argSize, EA_PTRSIZE);
7558 AddStackLevel(argSize);
7563 //---------------------------------------------------------------------
7564 // genPutArgStkFieldList - generate code for passing a GT_FIELD_LIST arg on the stack.
7567 // treeNode - the GT_PUTARG_STK node whose op1 is a GT_FIELD_LIST
7572 void CodeGen::genPutArgStkFieldList(GenTreePutArgStk* putArgStk)
7574 GenTreeFieldList* const fieldList = putArgStk->gtOp1->AsFieldList();
7575 assert(fieldList != nullptr);
7577 // Set m_pushStkArg and pre-adjust the stack if necessary.
7578 const bool preAdjustedStack = genAdjustStackForPutArgStk(putArgStk);
7580 // For now, we only support the "push" case; we will push a full slot for the first field of each slot
7581 // within the struct.
7582 assert((putArgStk->isPushKind()) && !preAdjustedStack && m_pushStkArg);
7584 // If we have pre-adjusted the stack and are simply storing the fields in order, set the offset to 0.
7585 // (Note that this mode is not currently being used.)
7586 // If we are pushing the arguments (i.e. we have not pre-adjusted the stack), then we are pushing them
7587 // in reverse order, so we start with the current field offset at the size of the struct arg (which must be
7588 // a multiple of the target pointer size).
7589 unsigned currentOffset = (preAdjustedStack) ? 0 : putArgStk->getArgSize();
7590 unsigned prevFieldOffset = currentOffset;
7591 regNumber intTmpReg = REG_NA;
7592 regNumber simdTmpReg = REG_NA;
7593 if (putArgStk->AvailableTempRegCount() != 0)
7595 regMaskTP rsvdRegs = putArgStk->gtRsvdRegs;
7596 if ((rsvdRegs & RBM_ALLINT) != 0)
7598 intTmpReg = putArgStk->GetSingleTempReg(RBM_ALLINT);
7599 assert(genIsValidIntReg(intTmpReg));
7601 if ((rsvdRegs & RBM_ALLFLOAT) != 0)
7603 simdTmpReg = putArgStk->GetSingleTempReg(RBM_ALLFLOAT);
7604 assert(genIsValidFloatReg(simdTmpReg));
7606 assert(genCountBits(rsvdRegs) == (unsigned)((intTmpReg == REG_NA) ? 0 : 1) + ((simdTmpReg == REG_NA) ? 0 : 1));
7609 for (GenTreeFieldList* current = fieldList; current != nullptr; current = current->Rest())
7611 GenTree* const fieldNode = current->Current();
7612 const unsigned fieldOffset = current->gtFieldOffset;
7613 var_types fieldType = current->gtFieldType;
7615 // Long-typed nodes should have been handled by the decomposition pass, and lowering should have sorted the
7616 // field list in descending order by offset.
7617 assert(!varTypeIsLong(fieldType));
7618 assert(fieldOffset <= prevFieldOffset);
7620 // Consume the register, if any, for this field. Note that genConsumeRegs() will appropriately
7621 // update the liveness info for a lclVar that has been marked RegOptional, which hasn't been
7622 // assigned a register, and which is therefore contained.
7623 // Unlike genConsumeReg(), it handles the case where no registers are being consumed.
7624 genConsumeRegs(fieldNode);
7625 regNumber argReg = fieldNode->isUsedFromSpillTemp() ? REG_NA : fieldNode->gtRegNum;
7627 // If the field is slot-like, we can use a push instruction to store the entire register no matter the type.
7629 // The GC encoder requires that the stack remain 4-byte aligned at all times. Round the adjustment up
7630 // to the next multiple of 4. If we are going to generate a `push` instruction, the adjustment must
7631 // not require rounding.
7632 // NOTE: if the field is of GC type, we must use a push instruction, since the emitter is not otherwise
7633 // able to detect stores into the outgoing argument area of the stack on x86.
7634 const bool fieldIsSlot = ((fieldOffset % 4) == 0) && ((prevFieldOffset - fieldOffset) >= 4);
7635 int adjustment = roundUp(currentOffset - fieldOffset, 4);
7636 if (fieldIsSlot && !varTypeIsSIMD(fieldType))
7638 fieldType = genActualType(fieldType);
7639 unsigned pushSize = genTypeSize(fieldType);
7640 assert((pushSize % 4) == 0);
7641 adjustment -= pushSize;
7642 while (adjustment != 0)
7644 inst_IV(INS_push, 0);
7645 currentOffset -= pushSize;
7646 AddStackLevel(pushSize);
7647 adjustment -= pushSize;
7649 m_pushStkArg = true;
7653 m_pushStkArg = false;
7655 // We always "push" floating point fields (i.e. they are full slot values that don't
7656 // require special handling).
7657 assert(varTypeIsIntegralOrI(fieldNode) || varTypeIsSIMD(fieldNode));
7659 // If we can't push this field, it needs to be in a register so that we can store
7660 // it to the stack location.
7661 if (adjustment != 0)
7663 // This moves the stack pointer to fieldOffset.
7664 // For this case, we must adjust the stack and generate stack-relative stores rather than pushes.
7665 // Adjust the stack pointer to the next slot boundary.
7666 inst_RV_IV(INS_sub, REG_SPBASE, adjustment, EA_PTRSIZE);
7667 currentOffset -= adjustment;
7668 AddStackLevel(adjustment);
7671 // Does it need to be in a byte register?
7672 // If so, we'll use intTmpReg, which must have been allocated as a byte register.
7673 // If it's already in a register, but not a byteable one, then move it.
7674 if (varTypeIsByte(fieldType) && ((argReg == REG_NA) || ((genRegMask(argReg) & RBM_BYTE_REGS) == 0)))
7676 assert(intTmpReg != REG_NA);
7677 noway_assert((genRegMask(intTmpReg) & RBM_BYTE_REGS) != 0);
7678 if (argReg != REG_NA)
7680 inst_RV_RV(INS_mov, intTmpReg, argReg, fieldType);
7686 if (argReg == REG_NA)
7690 if (fieldNode->isUsedFromSpillTemp())
7692 assert(!varTypeIsSIMD(fieldType)); // Q: can we get here with SIMD?
7693 assert(fieldNode->IsRegOptional());
7694 TempDsc* tmp = getSpillTempDsc(fieldNode);
7695 getEmitter()->emitIns_S(INS_push, emitActualTypeSize(fieldNode->TypeGet()), tmp->tdTempNum(), 0);
7696 compiler->tmpRlsTemp(tmp);
7700 assert(varTypeIsIntegralOrI(fieldNode));
7701 switch (fieldNode->OperGet())
7704 inst_TT(INS_push, fieldNode, 0, 0, emitActualTypeSize(fieldNode->TypeGet()));
7707 if (fieldNode->IsIconHandle())
7709 inst_IV_handle(INS_push, fieldNode->gtIntCon.gtIconVal);
7713 inst_IV(INS_push, fieldNode->gtIntCon.gtIconVal);
7720 currentOffset -= TARGET_POINTER_SIZE;
7721 AddStackLevel(TARGET_POINTER_SIZE);
7725 // The stack has been adjusted and we will load the field to intTmpReg and then store it on the stack.
7726 assert(varTypeIsIntegralOrI(fieldNode));
7727 switch (fieldNode->OperGet())
7730 inst_RV_TT(INS_mov, intTmpReg, fieldNode);
7733 genSetRegToConst(intTmpReg, fieldNode->TypeGet(), fieldNode);
7738 genStoreRegToStackArg(fieldType, intTmpReg, fieldOffset - currentOffset);
7743 #if defined(FEATURE_SIMD)
7744 if (fieldType == TYP_SIMD12)
7746 assert(genIsValidFloatReg(simdTmpReg));
7747 genStoreSIMD12ToStack(argReg, simdTmpReg);
7750 #endif // defined(FEATURE_SIMD)
7752 genStoreRegToStackArg(fieldType, argReg, fieldOffset - currentOffset);
7756 // We always push a slot-rounded size
7757 currentOffset -= genTypeSize(fieldType);
7761 prevFieldOffset = fieldOffset;
7763 if (currentOffset != 0)
7765 // We don't expect padding at the beginning of a struct, but it could happen with explicit layout.
7766 inst_RV_IV(INS_sub, REG_SPBASE, currentOffset, EA_PTRSIZE);
7767 AddStackLevel(currentOffset);
7770 #endif // _TARGET_X86_
7772 //---------------------------------------------------------------------
7773 // genPutArgStk - generate code for passing an arg on the stack.
7776 // treeNode - the GT_PUTARG_STK node
7777 // targetType - the type of the treeNode
7782 void CodeGen::genPutArgStk(GenTreePutArgStk* putArgStk)
7784 var_types targetType = putArgStk->TypeGet();
7788 genAlignStackBeforeCall(putArgStk);
7790 if (varTypeIsStruct(targetType))
7792 (void)genAdjustStackForPutArgStk(putArgStk);
7793 genPutStructArgStk(putArgStk);
7797 // The following logic is applicable for x86 arch.
7798 assert(!varTypeIsFloating(targetType) || (targetType == putArgStk->gtOp1->TypeGet()));
7800 GenTreePtr data = putArgStk->gtOp1;
7802 // On a 32-bit target, all of the long arguments are handled with GT_FIELD_LIST,
7803 // and the type of the putArgStk is TYP_VOID.
7804 assert(targetType != TYP_LONG);
7806 const unsigned argSize = putArgStk->getArgSize();
7807 assert((argSize % TARGET_POINTER_SIZE) == 0);
7809 if (data->isContainedIntOrIImmed())
7811 if (data->IsIconHandle())
7813 inst_IV_handle(INS_push, data->gtIntCon.gtIconVal);
7817 inst_IV(INS_push, data->gtIntCon.gtIconVal);
7819 AddStackLevel(argSize);
7821 else if (data->OperGet() == GT_FIELD_LIST)
7823 genPutArgStkFieldList(putArgStk);
7827 // We should not see any contained nodes that are not immediates.
7828 assert(data->isUsedFromReg());
7829 genConsumeReg(data);
7830 genPushReg(targetType, data->gtRegNum);
7832 #else // !_TARGET_X86_
7834 unsigned baseVarNum = getBaseVarForPutArgStk(putArgStk);
7836 #ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
7838 if (varTypeIsStruct(targetType))
7840 m_stkArgVarNum = baseVarNum;
7841 m_stkArgOffset = putArgStk->getArgOffset();
7842 genPutStructArgStk(putArgStk);
7843 m_stkArgVarNum = BAD_VAR_NUM;
7846 #endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
7848 noway_assert(targetType != TYP_STRUCT);
7849 assert(!varTypeIsFloating(targetType) || (targetType == putArgStk->gtOp1->TypeGet()));
7851 // Get argument offset on stack.
7852 // Here we cross check that argument offset hasn't changed from lowering to codegen since
7853 // we are storing arg slot number in GT_PUTARG_STK node in lowering phase.
7854 int argOffset = putArgStk->getArgOffset();
7857 fgArgTabEntryPtr curArgTabEntry = compiler->gtArgEntryByNode(putArgStk->gtCall, putArgStk);
7858 assert(curArgTabEntry);
7859 assert(argOffset == (int)curArgTabEntry->slotNum * TARGET_POINTER_SIZE);
7862 GenTreePtr data = putArgStk->gtOp1;
7864 if (data->isContainedIntOrIImmed())
7866 getEmitter()->emitIns_S_I(ins_Store(targetType), emitTypeSize(targetType), baseVarNum, argOffset,
7867 (int)data->AsIntConCommon()->IconValue());
7871 assert(data->isUsedFromReg());
7872 genConsumeReg(data);
7873 getEmitter()->emitIns_S_R(ins_Store(targetType), emitTypeSize(targetType), data->gtRegNum, baseVarNum,
7877 #endif // !_TARGET_X86_
7880 //---------------------------------------------------------------------
7881 // genPutArgReg - generate code for a GT_PUTARG_REG node
7884 // tree - the GT_PUTARG_REG node
7889 void CodeGen::genPutArgReg(GenTreeOp* tree)
7891 assert(tree->OperIs(GT_PUTARG_REG));
7893 var_types targetType = tree->TypeGet();
7894 regNumber targetReg = tree->gtRegNum;
7896 #ifndef FEATURE_UNIX_AMD64_STRUCT_PASSING
7897 assert(targetType != TYP_STRUCT);
7898 #endif // !FEATURE_UNIX_AMD64_STRUCT_PASSING
7900 GenTree* op1 = tree->gtOp1;
7903 // If child node is not already in the register we need, move it
7904 if (targetReg != op1->gtRegNum)
7906 inst_RV_RV(ins_Copy(targetType), targetReg, op1->gtRegNum, targetType);
7909 genProduceReg(tree);
7913 // genPushReg: Push a register value onto the stack and adjust the stack level
7916 // type - the type of value to be stored
7917 // reg - the register containing the value
7920 // For TYP_LONG, the srcReg must be a floating point register.
7921 // Otherwise, the register type must be consistent with the given type.
7923 void CodeGen::genPushReg(var_types type, regNumber srcReg)
7925 unsigned size = genTypeSize(type);
7926 if (varTypeIsIntegralOrI(type) && type != TYP_LONG)
7928 assert(genIsValidIntReg(srcReg));
7929 inst_RV(INS_push, srcReg, type);
7934 emitAttr attr = emitTypeSize(type);
7935 if (type == TYP_LONG)
7937 // On x86, the only way we can push a TYP_LONG from a register is if it is in an xmm reg.
7938 // This is only used when we are pushing a struct from memory to memory, and basically is
7939 // handling an 8-byte "chunk", as opposed to strictly a long type.
7944 ins = ins_Store(type);
7946 assert(genIsValidFloatReg(srcReg));
7947 inst_RV_IV(INS_sub, REG_SPBASE, size, EA_PTRSIZE);
7948 getEmitter()->emitIns_AR_R(ins, attr, srcReg, REG_SPBASE, 0);
7950 AddStackLevel(size);
7952 #endif // _TARGET_X86_
7954 #if defined(FEATURE_PUT_STRUCT_ARG_STK)
7955 // genStoreRegToStackArg: Store a register value into the stack argument area
7958 // type - the type of value to be stored
7959 // reg - the register containing the value
7960 // offset - the offset from the base (see Assumptions below)
7963 // A type of TYP_STRUCT instructs this method to store a 16-byte chunk
7964 // at the given offset (i.e. not the full struct).
7967 // The caller must set the context appropriately before calling this method:
7968 // - On x64, m_stkArgVarNum must be set according to whether this is a regular or tail call.
7969 // - On x86, the caller must set m_pushStkArg if this method should push the argument.
7970 // Otherwise, the argument is stored at the given offset from sp.
7972 // TODO: In the below code the load and store instructions are for 16 bytes, but the
7973 // type is EA_8BYTE. The movdqa/u are 16 byte instructions, so it works, but
7974 // this probably needs to be changed.
7976 void CodeGen::genStoreRegToStackArg(var_types type, regNumber srcReg, int offset)
7978 assert(srcReg != REG_NA);
7983 if (type == TYP_STRUCT)
7986 // This should be changed!
7993 if (varTypeIsSIMD(type))
7995 assert(genIsValidFloatReg(srcReg));
7996 ins = ins_Store(type); // TODO-CQ: pass 'aligned' correctly
7999 #endif // FEATURE_SIMD
8001 if (type == TYP_LONG)
8003 assert(genIsValidFloatReg(srcReg));
8007 #endif // _TARGET_X86_
8009 assert((varTypeIsFloating(type) && genIsValidFloatReg(srcReg)) ||
8010 (varTypeIsIntegralOrI(type) && genIsValidIntReg(srcReg)));
8011 ins = ins_Store(type);
8013 attr = emitTypeSize(type);
8014 size = genTypeSize(type);
8020 genPushReg(type, srcReg);
8024 getEmitter()->emitIns_AR_R(ins, attr, srcReg, REG_SPBASE, offset);
8026 #else // !_TARGET_X86_
8027 assert(m_stkArgVarNum != BAD_VAR_NUM);
8028 getEmitter()->emitIns_S_R(ins, attr, srcReg, m_stkArgVarNum, m_stkArgOffset + offset);
8029 #endif // !_TARGET_X86_
8032 //---------------------------------------------------------------------
8033 // genPutStructArgStk - generate code for copying a struct arg on the stack by value.
8034 // In case there are references to heap object in the struct,
8035 // it generates the gcinfo as well.
8038 // putArgStk - the GT_PUTARG_STK node
8041 // In the case of fixed out args, the caller must have set m_stkArgVarNum to the variable number
8042 // corresponding to the argument area (where we will put the argument on the stack).
8043 // For tail calls this is the baseVarNum = 0.
8044 // For non tail calls this is the outgoingArgSpace.
8045 void CodeGen::genPutStructArgStk(GenTreePutArgStk* putArgStk)
8047 var_types targetType = putArgStk->TypeGet();
8049 #if defined(_TARGET_X86_) && defined(FEATURE_SIMD)
8050 if (targetType == TYP_SIMD12)
8052 genPutArgStkSIMD12(putArgStk);
8055 #endif // defined(_TARGET_X86_) && defined(FEATURE_SIMD)
8057 if (varTypeIsSIMD(targetType))
8059 regNumber srcReg = genConsumeReg(putArgStk->gtGetOp1());
8060 assert((srcReg != REG_NA) && (genIsValidFloatReg(srcReg)));
8061 genStoreRegToStackArg(targetType, srcReg, 0);
8065 assert(targetType == TYP_STRUCT);
8067 if (putArgStk->gtNumberReferenceSlots == 0)
8069 switch (putArgStk->gtPutArgStkKind)
8071 case GenTreePutArgStk::Kind::RepInstr:
8072 genStructPutArgRepMovs(putArgStk);
8074 case GenTreePutArgStk::Kind::Unroll:
8075 genStructPutArgUnroll(putArgStk);
8077 case GenTreePutArgStk::Kind::Push:
8078 genStructPutArgUnroll(putArgStk);
8086 // No need to disable GC the way COPYOBJ does. Here the refs are copied in atomic operations always.
8087 CLANG_FORMAT_COMMENT_ANCHOR;
8090 // On x86, any struct that has contains GC references must be stored to the stack using `push` instructions so
8091 // that the emitter properly detects the need to update the method's GC information.
8093 // Strictly speaking, it is only necessary to use `push` to store the GC references themselves, so for structs
8094 // with large numbers of consecutive non-GC-ref-typed fields, we may be able to improve the code size in the
8096 assert(m_pushStkArg);
8098 GenTree* srcAddr = putArgStk->gtGetOp1()->gtGetOp1();
8099 BYTE* gcPtrs = putArgStk->gtGcPtrs;
8100 const unsigned numSlots = putArgStk->gtNumSlots;
8102 regNumber srcRegNum = srcAddr->gtRegNum;
8103 const bool srcAddrInReg = srcRegNum != REG_NA;
8105 unsigned srcLclNum = 0;
8106 unsigned srcLclOffset = 0;
8109 genConsumeReg(srcAddr);
8113 assert(srcAddr->OperIsLocalAddr());
8115 srcLclNum = srcAddr->AsLclVarCommon()->gtLclNum;
8116 if (srcAddr->OperGet() == GT_LCL_FLD_ADDR)
8118 srcLclOffset = srcAddr->AsLclFld()->gtLclOffs;
8122 for (int i = numSlots - 1; i >= 0; --i)
8125 if (gcPtrs[i] == TYPE_GC_NONE)
8127 slotAttr = EA_4BYTE;
8129 else if (gcPtrs[i] == TYPE_GC_REF)
8131 slotAttr = EA_GCREF;
8135 assert(gcPtrs[i] == TYPE_GC_BYREF);
8136 slotAttr = EA_BYREF;
8139 const unsigned offset = i * TARGET_POINTER_SIZE;
8142 getEmitter()->emitIns_AR_R(INS_push, slotAttr, REG_NA, srcRegNum, offset);
8146 getEmitter()->emitIns_S(INS_push, slotAttr, srcLclNum, srcLclOffset + offset);
8148 AddStackLevel(TARGET_POINTER_SIZE);
8150 #else // !defined(_TARGET_X86_)
8152 // Consume these registers.
8153 // They may now contain gc pointers (depending on their type; gcMarkRegPtrVal will "do the right thing").
8154 genConsumePutStructArgStk(putArgStk, REG_RDI, REG_RSI, REG_NA);
8156 const bool srcIsLocal = putArgStk->gtOp1->AsObj()->gtOp1->OperIsLocalAddr();
8157 const emitAttr srcAddrAttr = srcIsLocal ? EA_PTRSIZE : EA_BYREF;
8160 unsigned numGCSlotsCopied = 0;
8163 BYTE* gcPtrs = putArgStk->gtGcPtrs;
8164 const unsigned numSlots = putArgStk->gtNumSlots;
8165 for (unsigned i = 0; i < numSlots;)
8167 if (gcPtrs[i] == TYPE_GC_NONE)
8169 // Let's see if we can use rep movsp (alias for movsd or movsq for 32 and 64 bits respectively)
8170 // instead of a sequence of movsp instructions to save cycles and code size.
8171 unsigned adjacentNonGCSlotCount = 0;
8174 adjacentNonGCSlotCount++;
8176 } while ((i < numSlots) && (gcPtrs[i] == TYPE_GC_NONE));
8178 // If we have a very small contiguous non-ref region, it's better just to
8179 // emit a sequence of movsp instructions
8180 if (adjacentNonGCSlotCount < CPOBJ_NONGC_SLOTS_LIMIT)
8182 for (; adjacentNonGCSlotCount > 0; adjacentNonGCSlotCount--)
8189 getEmitter()->emitIns_R_I(INS_mov, EA_4BYTE, REG_RCX, adjacentNonGCSlotCount);
8190 instGen(INS_r_movsp);
8195 assert((gcPtrs[i] == TYPE_GC_REF) || (gcPtrs[i] == TYPE_GC_BYREF));
8197 // We have a GC (byref or ref) pointer
8198 // TODO-Amd64-Unix: Here a better solution (for code size and CQ) would be to use movsp instruction,
8199 // but the logic for emitting a GC info record is not available (it is internal for the emitter
8200 // only.) See emitGCVarLiveUpd function. If we could call it separately, we could do
8201 // instGen(INS_movsp); and emission of gc info.
8203 var_types memType = (gcPtrs[i] == TYPE_GC_REF) ? TYP_REF : TYP_BYREF;
8204 getEmitter()->emitIns_R_AR(ins_Load(memType), emitTypeSize(memType), REG_RCX, REG_RSI, 0);
8205 genStoreRegToStackArg(memType, REG_RCX, i * TARGET_POINTER_SIZE);
8214 // Source for the copy operation.
8215 // If a LocalAddr, use EA_PTRSIZE - copy from stack.
8216 // If not a LocalAddr, use EA_BYREF - the source location is not on the stack.
8217 getEmitter()->emitIns_R_I(INS_add, srcAddrAttr, REG_RSI, TARGET_POINTER_SIZE);
8219 // Always copying to the stack - outgoing arg area
8220 // (or the outgoing arg area of the caller for a tail call) - use EA_PTRSIZE.
8221 getEmitter()->emitIns_R_I(INS_add, EA_PTRSIZE, REG_RDI, TARGET_POINTER_SIZE);
8226 assert(numGCSlotsCopied == putArgStk->gtNumberReferenceSlots);
8227 #endif // _TARGET_X86_
8230 #endif // defined(FEATURE_PUT_STRUCT_ARG_STK)
8232 /*****************************************************************************
8234 * Create and record GC Info for the function.
8236 #ifndef JIT32_GCENCODER
8238 #else // !JIT32_GCENCODER
8240 #endif // !JIT32_GCENCODER
8241 CodeGen::genCreateAndStoreGCInfo(unsigned codeSize, unsigned prologSize, unsigned epilogSize DEBUGARG(void* codePtr))
8243 #ifdef JIT32_GCENCODER
8244 return genCreateAndStoreGCInfoJIT32(codeSize, prologSize, epilogSize DEBUGARG(codePtr));
8245 #else // !JIT32_GCENCODER
8246 genCreateAndStoreGCInfoX64(codeSize, prologSize DEBUGARG(codePtr));
8247 #endif // !JIT32_GCENCODER
8250 #ifdef JIT32_GCENCODER
8251 void* CodeGen::genCreateAndStoreGCInfoJIT32(unsigned codeSize,
8252 unsigned prologSize,
8253 unsigned epilogSize DEBUGARG(void* codePtr))
8262 compiler->compInfoBlkSize =
8263 gcInfo.gcInfoBlockHdrSave(headerBuf, 0, codeSize, prologSize, epilogSize, &header, &s_cached);
8265 size_t argTabOffset = 0;
8266 size_t ptrMapSize = gcInfo.gcPtrTableSize(header, codeSize, &argTabOffset);
8270 if (genInterruptible)
8272 gcHeaderISize += compiler->compInfoBlkSize;
8273 gcPtrMapISize += ptrMapSize;
8277 gcHeaderNSize += compiler->compInfoBlkSize;
8278 gcPtrMapNSize += ptrMapSize;
8281 #endif // DISPLAY_SIZES
8283 compiler->compInfoBlkSize += ptrMapSize;
8285 /* Allocate the info block for the method */
8287 compiler->compInfoBlkAddr = (BYTE*)compiler->info.compCompHnd->allocGCInfo(compiler->compInfoBlkSize);
8289 #if 0 // VERBOSE_SIZES
8290 // TODO-X86-Cleanup: 'dataSize', below, is not defined
8292 // if (compiler->compInfoBlkSize > codeSize && compiler->compInfoBlkSize > 100)
8294 printf("[%7u VM, %7u+%7u/%7u x86 %03u/%03u%%] %s.%s\n",
8295 compiler->info.compILCodeSize,
8296 compiler->compInfoBlkSize,
8297 codeSize + dataSize,
8298 codeSize + dataSize - prologSize - epilogSize,
8299 100 * (codeSize + dataSize) / compiler->info.compILCodeSize,
8300 100 * (codeSize + dataSize + compiler->compInfoBlkSize) / compiler->info.compILCodeSize,
8301 compiler->info.compClassName,
8302 compiler->info.compMethodName);
8307 /* Fill in the info block and return it to the caller */
8309 void* infoPtr = compiler->compInfoBlkAddr;
8311 /* Create the method info block: header followed by GC tracking tables */
8313 compiler->compInfoBlkAddr +=
8314 gcInfo.gcInfoBlockHdrSave(compiler->compInfoBlkAddr, -1, codeSize, prologSize, epilogSize, &header, &s_cached);
8316 assert(compiler->compInfoBlkAddr == (BYTE*)infoPtr + headerSize);
8317 compiler->compInfoBlkAddr = gcInfo.gcPtrTableSave(compiler->compInfoBlkAddr, header, codeSize, &argTabOffset);
8318 assert(compiler->compInfoBlkAddr == (BYTE*)infoPtr + headerSize + ptrMapSize);
8324 BYTE* temp = (BYTE*)infoPtr;
8325 unsigned size = compiler->compInfoBlkAddr - temp;
8326 BYTE* ptab = temp + headerSize;
8328 noway_assert(size == headerSize + ptrMapSize);
8330 printf("Method info block - header [%u bytes]:", headerSize);
8332 for (unsigned i = 0; i < size; i++)
8336 printf("\nMethod info block - ptrtab [%u bytes]:", ptrMapSize);
8337 printf("\n %04X: %*c", i & ~0xF, 3 * (i & 0xF), ' ');
8342 printf("\n %04X: ", i);
8345 printf("%02X ", *temp++);
8355 if (compiler->opts.dspGCtbls)
8357 const BYTE* base = (BYTE*)infoPtr;
8359 unsigned methodSize;
8362 printf("GC Info for method %s\n", compiler->info.compFullName);
8363 printf("GC info size = %3u\n", compiler->compInfoBlkSize);
8365 size = gcInfo.gcInfoBlockHdrDump(base, &dumpHeader, &methodSize);
8366 // printf("size of header encoding is %3u\n", size);
8369 if (compiler->opts.dspGCtbls)
8372 size = gcInfo.gcDumpPtrTable(base, dumpHeader, methodSize);
8373 // printf("size of pointer table is %3u\n", size);
8375 noway_assert(compiler->compInfoBlkAddr == (base + size));
8380 if (jitOpts.testMask & 128)
8382 for (unsigned offs = 0; offs < codeSize; offs++)
8384 gcInfo.gcFindPtrsInFrame(infoPtr, codePtr, offs);
8388 #endif // DUMP_GC_TABLES
8390 /* Make sure we ended up generating the expected number of bytes */
8392 noway_assert(compiler->compInfoBlkAddr == (BYTE*)infoPtr + compiler->compInfoBlkSize);
8397 #else // !JIT32_GCENCODER
8398 void CodeGen::genCreateAndStoreGCInfoX64(unsigned codeSize, unsigned prologSize DEBUGARG(void* codePtr))
8400 IAllocator* allowZeroAlloc = new (compiler, CMK_GC) AllowZeroAllocator(compiler->getAllocatorGC());
8401 GcInfoEncoder* gcInfoEncoder = new (compiler, CMK_GC)
8402 GcInfoEncoder(compiler->info.compCompHnd, compiler->info.compMethodInfo, allowZeroAlloc, NOMEM);
8403 assert(gcInfoEncoder);
8405 // Follow the code pattern of the x86 gc info encoder (genCreateAndStoreGCInfoJIT32).
8406 gcInfo.gcInfoBlockHdrSave(gcInfoEncoder, codeSize, prologSize);
8408 // We keep the call count for the second call to gcMakeRegPtrTable() below.
8409 unsigned callCnt = 0;
8410 // First we figure out the encoder ID's for the stack slots and registers.
8411 gcInfo.gcMakeRegPtrTable(gcInfoEncoder, codeSize, prologSize, GCInfo::MAKE_REG_PTR_MODE_ASSIGN_SLOTS, &callCnt);
8412 // Now we've requested all the slots we'll need; "finalize" these (make more compact data structures for them).
8413 gcInfoEncoder->FinalizeSlotIds();
8414 // Now we can actually use those slot ID's to declare live ranges.
8415 gcInfo.gcMakeRegPtrTable(gcInfoEncoder, codeSize, prologSize, GCInfo::MAKE_REG_PTR_MODE_DO_WORK, &callCnt);
8417 if (compiler->opts.compDbgEnC)
8419 // what we have to preserve is called the "frame header" (see comments in VM\eetwain.cpp)
8423 // -saved 'this' pointer and bool for synchronized methods
8425 // 4 slots for RBP + return address + RSI + RDI
8426 int preservedAreaSize = 4 * REGSIZE_BYTES;
8428 if (compiler->info.compFlags & CORINFO_FLG_SYNCH)
8430 if (!(compiler->info.compFlags & CORINFO_FLG_STATIC))
8432 preservedAreaSize += REGSIZE_BYTES;
8435 // bool in synchronized methods that tracks whether the lock has been taken (takes 4 bytes on stack)
8436 preservedAreaSize += 4;
8439 // Used to signal both that the method is compiled for EnC, and also the size of the block at the top of the
8441 gcInfoEncoder->SetSizeOfEditAndContinuePreservedArea(preservedAreaSize);
8444 if (compiler->opts.IsReversePInvoke())
8446 unsigned reversePInvokeFrameVarNumber = compiler->lvaReversePInvokeFrameVar;
8447 assert(reversePInvokeFrameVarNumber != BAD_VAR_NUM && reversePInvokeFrameVarNumber < compiler->lvaRefCount);
8448 LclVarDsc& reversePInvokeFrameVar = compiler->lvaTable[reversePInvokeFrameVarNumber];
8449 gcInfoEncoder->SetReversePInvokeFrameSlot(reversePInvokeFrameVar.lvStkOffs);
8452 gcInfoEncoder->Build();
8454 // GC Encoder automatically puts the GC info in the right spot using ICorJitInfo::allocGCInfo(size_t)
8455 // let's save the values anyway for debugging purposes
8456 compiler->compInfoBlkAddr = gcInfoEncoder->Emit();
8457 compiler->compInfoBlkSize = 0; // not exposed by the GCEncoder interface
8459 #endif // !JIT32_GCENCODER
8461 /*****************************************************************************
8462 * Emit a call to a helper function.
8466 void CodeGen::genEmitHelperCall(unsigned helper, int argSize, emitAttr retSize, regNumber callTargetReg)
8468 void* addr = nullptr;
8469 void* pAddr = nullptr;
8471 emitter::EmitCallType callType = emitter::EC_FUNC_TOKEN;
8472 addr = compiler->compGetHelperFtn((CorInfoHelpFunc)helper, &pAddr);
8473 regNumber callTarget = REG_NA;
8474 regMaskTP killMask = compiler->compHelperCallKillSet((CorInfoHelpFunc)helper);
8478 assert(pAddr != nullptr);
8480 // Absolute indirect call addr
8481 // Note: Order of checks is important. First always check for pc-relative and next
8482 // zero-relative. Because the former encoding is 1-byte smaller than the latter.
8483 if (genCodeIndirAddrCanBeEncodedAsPCRelOffset((size_t)pAddr) ||
8484 genCodeIndirAddrCanBeEncodedAsZeroRelOffset((size_t)pAddr))
8486 // generate call whose target is specified by 32-bit offset relative to PC or zero.
8487 callType = emitter::EC_FUNC_TOKEN_INDIR;
8492 #ifdef _TARGET_AMD64_
8493 // If this indirect address cannot be encoded as 32-bit offset relative to PC or Zero,
8494 // load it into REG_HELPER_CALL_TARGET and use register indirect addressing mode to
8499 if (callTargetReg == REG_NA)
8501 // If a callTargetReg has not been explicitly provided, we will use REG_DEFAULT_HELPER_CALL_TARGET, but
8502 // this is only a valid assumption if the helper call is known to kill REG_DEFAULT_HELPER_CALL_TARGET.
8503 callTargetReg = REG_DEFAULT_HELPER_CALL_TARGET;
8504 regMaskTP callTargetMask = genRegMask(callTargetReg);
8505 noway_assert((callTargetMask & killMask) == callTargetMask);
8509 // The call target must not overwrite any live variable, though it may not be in the
8510 // kill set for the call.
8511 regMaskTP callTargetMask = genRegMask(callTargetReg);
8512 noway_assert((callTargetMask & regSet.rsMaskVars) == RBM_NONE);
8516 callTarget = callTargetReg;
8517 CodeGen::genSetRegToIcon(callTarget, (ssize_t)pAddr, TYP_I_IMPL);
8518 callType = emitter::EC_INDIR_ARD;
8523 getEmitter()->emitIns_Call(callType,
8524 compiler->eeFindHelper(helper),
8525 INDEBUG_LDISASM_COMMA(nullptr) addr,
8528 MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(EA_UNKNOWN),
8529 gcInfo.gcVarPtrSetCur,
8530 gcInfo.gcRegGCrefSetCur,
8531 gcInfo.gcRegByrefSetCur,
8532 BAD_IL_OFFSET, // IL offset
8534 REG_NA, 0, 0, // xreg, xmul, disp
8536 emitter::emitNoGChelper(helper));
8539 regTracker.rsTrashRegSet(killMask);
8540 regTracker.rsTrashRegsForGCInterruptability();
8543 #if !defined(_TARGET_64BIT_)
8544 //-----------------------------------------------------------------------------
8546 // Code Generation for Long integers
8548 //-----------------------------------------------------------------------------
8550 //------------------------------------------------------------------------
8551 // genStoreLongLclVar: Generate code to store a non-enregistered long lclVar
8554 // treeNode - A TYP_LONG lclVar node.
8560 // 'treeNode' must be a TYP_LONG lclVar node for a lclVar that has NOT been promoted.
8561 // Its operand must be a GT_LONG node.
8563 void CodeGen::genStoreLongLclVar(GenTree* treeNode)
8565 emitter* emit = getEmitter();
8567 GenTreeLclVarCommon* lclNode = treeNode->AsLclVarCommon();
8568 unsigned lclNum = lclNode->gtLclNum;
8569 LclVarDsc* varDsc = &(compiler->lvaTable[lclNum]);
8570 assert(varDsc->TypeGet() == TYP_LONG);
8571 assert(!varDsc->lvPromoted);
8572 GenTreePtr op1 = treeNode->gtOp.gtOp1;
8573 noway_assert(op1->OperGet() == GT_LONG || op1->OperGet() == GT_MUL_LONG);
8574 genConsumeRegs(op1);
8576 if (op1->OperGet() == GT_LONG)
8578 // Definitions of register candidates will have been lowered to 2 int lclVars.
8579 assert(!treeNode->InReg());
8581 GenTreePtr loVal = op1->gtGetOp1();
8582 GenTreePtr hiVal = op1->gtGetOp2();
8584 // NYI: Contained immediates.
8585 NYI_IF((loVal->gtRegNum == REG_NA) || (hiVal->gtRegNum == REG_NA),
8586 "Store of long lclVar with contained immediate");
8588 emit->emitIns_S_R(ins_Store(TYP_INT), EA_4BYTE, loVal->gtRegNum, lclNum, 0);
8589 emit->emitIns_S_R(ins_Store(TYP_INT), EA_4BYTE, hiVal->gtRegNum, lclNum, genTypeSize(TYP_INT));
8591 else if (op1->OperGet() == GT_MUL_LONG)
8593 assert((op1->gtFlags & GTF_MUL_64RSLT) != 0);
8596 getEmitter()->emitIns_S_R(ins_Store(TYP_INT), emitTypeSize(TYP_INT), REG_LNGRET_LO, lclNum, 0);
8597 getEmitter()->emitIns_S_R(ins_Store(TYP_INT), emitTypeSize(TYP_INT), REG_LNGRET_HI, lclNum,
8598 genTypeSize(TYP_INT));
8601 #endif // !defined(_TARGET_64BIT_)
8603 /*****************************************************************************
8604 * Unit testing of the XArch emitter: generate a bunch of instructions into the prolog
8605 * (it's as good a place as any), then use COMPlus_JitLateDisasm=* to see if the late
8606 * disassembler thinks the instructions as the same as we do.
8609 // Uncomment "#define ALL_ARM64_EMITTER_UNIT_TESTS" to run all the unit tests here.
8610 // After adding a unit test, and verifying it works, put it under this #ifdef, so we don't see it run every time.
8611 //#define ALL_XARCH_EMITTER_UNIT_TESTS
8613 #if defined(DEBUG) && defined(LATE_DISASM) && defined(_TARGET_AMD64_)
8614 void CodeGen::genAmd64EmitterUnitTests()
8621 if (!compiler->opts.altJit)
8623 // No point doing this in a "real" JIT.
8627 // Mark the "fake" instructions in the output.
8628 printf("*************** In genAmd64EmitterUnitTests()\n");
8631 // genDefineTempLabel(genCreateTempLabel());
8632 // to create artificial labels to help separate groups of tests.
8637 CLANG_FORMAT_COMMENT_ANCHOR;
8639 #ifdef ALL_XARCH_EMITTER_UNIT_TESTS
8640 #ifdef FEATURE_AVX_SUPPORT
8641 genDefineTempLabel(genCreateTempLabel());
8643 // vhaddpd ymm0,ymm1,ymm2
8644 getEmitter()->emitIns_R_R_R(INS_haddpd, EA_32BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
8645 // vaddss xmm0,xmm1,xmm2
8646 getEmitter()->emitIns_R_R_R(INS_addss, EA_4BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
8647 // vaddsd xmm0,xmm1,xmm2
8648 getEmitter()->emitIns_R_R_R(INS_addsd, EA_8BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
8649 // vaddps xmm0,xmm1,xmm2
8650 getEmitter()->emitIns_R_R_R(INS_addps, EA_16BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
8651 // vaddps ymm0,ymm1,ymm2
8652 getEmitter()->emitIns_R_R_R(INS_addps, EA_32BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
8653 // vaddpd xmm0,xmm1,xmm2
8654 getEmitter()->emitIns_R_R_R(INS_addpd, EA_16BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
8655 // vaddpd ymm0,ymm1,ymm2
8656 getEmitter()->emitIns_R_R_R(INS_addpd, EA_32BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
8657 // vsubss xmm0,xmm1,xmm2
8658 getEmitter()->emitIns_R_R_R(INS_subss, EA_4BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
8659 // vsubsd xmm0,xmm1,xmm2
8660 getEmitter()->emitIns_R_R_R(INS_subsd, EA_8BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
8661 // vsubps ymm0,ymm1,ymm2
8662 getEmitter()->emitIns_R_R_R(INS_subps, EA_16BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
8663 // vsubps ymm0,ymm1,ymm2
8664 getEmitter()->emitIns_R_R_R(INS_subps, EA_32BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
8665 // vsubpd xmm0,xmm1,xmm2
8666 getEmitter()->emitIns_R_R_R(INS_subpd, EA_16BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
8667 // vsubpd ymm0,ymm1,ymm2
8668 getEmitter()->emitIns_R_R_R(INS_subpd, EA_32BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
8669 // vmulss xmm0,xmm1,xmm2
8670 getEmitter()->emitIns_R_R_R(INS_mulss, EA_4BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
8671 // vmulsd xmm0,xmm1,xmm2
8672 getEmitter()->emitIns_R_R_R(INS_mulsd, EA_8BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
8673 // vmulps xmm0,xmm1,xmm2
8674 getEmitter()->emitIns_R_R_R(INS_mulps, EA_16BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
8675 // vmulpd xmm0,xmm1,xmm2
8676 getEmitter()->emitIns_R_R_R(INS_mulpd, EA_16BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
8677 // vmulps ymm0,ymm1,ymm2
8678 getEmitter()->emitIns_R_R_R(INS_mulps, EA_32BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
8679 // vmulpd ymm0,ymm1,ymm2
8680 getEmitter()->emitIns_R_R_R(INS_mulpd, EA_32BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
8681 // vandps xmm0,xmm1,xmm2
8682 getEmitter()->emitIns_R_R_R(INS_andps, EA_16BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
8683 // vandpd xmm0,xmm1,xmm2
8684 getEmitter()->emitIns_R_R_R(INS_andpd, EA_16BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
8685 // vandps ymm0,ymm1,ymm2
8686 getEmitter()->emitIns_R_R_R(INS_andps, EA_32BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
8687 // vandpd ymm0,ymm1,ymm2
8688 getEmitter()->emitIns_R_R_R(INS_andpd, EA_32BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
8689 // vorps xmm0,xmm1,xmm2
8690 getEmitter()->emitIns_R_R_R(INS_orps, EA_16BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
8691 // vorpd xmm0,xmm1,xmm2
8692 getEmitter()->emitIns_R_R_R(INS_orpd, EA_16BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
8693 // vorps ymm0,ymm1,ymm2
8694 getEmitter()->emitIns_R_R_R(INS_orps, EA_32BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
8695 // vorpd ymm0,ymm1,ymm2
8696 getEmitter()->emitIns_R_R_R(INS_orpd, EA_32BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
8697 // vdivss xmm0,xmm1,xmm2
8698 getEmitter()->emitIns_R_R_R(INS_divss, EA_4BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
8699 // vdivsd xmm0,xmm1,xmm2
8700 getEmitter()->emitIns_R_R_R(INS_divsd, EA_8BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
8701 // vdivss xmm0,xmm1,xmm2
8702 getEmitter()->emitIns_R_R_R(INS_divss, EA_4BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
8703 // vdivsd xmm0,xmm1,xmm2
8704 getEmitter()->emitIns_R_R_R(INS_divsd, EA_8BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
8706 // vdivss xmm0,xmm1,xmm2
8707 getEmitter()->emitIns_R_R_R(INS_cvtss2sd, EA_4BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
8708 // vdivsd xmm0,xmm1,xmm2
8709 getEmitter()->emitIns_R_R_R(INS_cvtsd2ss, EA_8BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
8710 #endif // FEATURE_AVX_SUPPORT
8711 #endif // ALL_XARCH_EMITTER_UNIT_TESTS
8712 printf("*************** End of genAmd64EmitterUnitTests()\n");
8715 #endif // defined(DEBUG) && defined(LATE_DISASM) && defined(_TARGET_AMD64_)
8717 #endif // _TARGET_AMD64_
8719 #endif // !LEGACY_BACKEND