1 // Licensed to the .NET Foundation under one or more agreements.
2 // The .NET Foundation licenses this file to you under the MIT license.
3 // See the LICENSE file in the project root for more information.
5 /*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
6 XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
8 XX Amd64/x86 Code Generator XX
10 XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
11 XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
18 #ifndef LEGACY_BACKEND // This file is ONLY used for the RyuJIT backend that uses the linear scan register allocator.
25 #include "gcinfoencoder.h"
27 /*****************************************************************************
29 * Generate code that will set the given register to the integer constant.
32 void CodeGen::genSetRegToIcon(regNumber reg, ssize_t val, var_types type, insFlags flags)
34 // Reg cannot be a FP reg
35 assert(!genIsValidFloatReg(reg));
37 // The only TYP_REF constant that can come this path is a managed 'null' since it is not
38 // relocatable. Other ref type constants (e.g. string objects) go through a different
40 noway_assert(type != TYP_REF || val == 0);
44 instGen_Set_Reg_To_Zero(emitActualTypeSize(type), reg, flags);
48 // TODO-XArch-CQ: needs all the optimized cases
49 getEmitter()->emitIns_R_I(INS_mov, emitActualTypeSize(type), reg, val);
53 /*****************************************************************************
55 * Generate code to check that the GS cookie wasn't thrashed by a buffer
56 * overrun. If pushReg is true, preserve all registers around code sequence.
57 * Otherwise ECX could be modified.
59 * Implementation Note: pushReg = true, in case of tail calls.
61 void CodeGen::genEmitGSCookieCheck(bool pushReg)
63 noway_assert(compiler->gsGlobalSecurityCookieAddr || compiler->gsGlobalSecurityCookieVal);
65 // Make sure that EAX is reported as live GC-ref so that any GC that kicks in while
66 // executing GS cookie check will not collect the object pointed to by EAX.
68 // For Amd64 System V, a two-register-returned struct could be returned in RAX and RDX
69 // In such case make sure that the correct GC-ness of RDX is reported as well, so
70 // a GC object pointed by RDX will not be collected.
73 // Handle multi-reg return type values
74 if (compiler->compMethodReturnsMultiRegRetType())
76 ReturnTypeDesc retTypeDesc;
77 if (varTypeIsLong(compiler->info.compRetNativeType))
79 retTypeDesc.InitializeLongReturnType(compiler);
81 else // we must have a struct return type
83 retTypeDesc.InitializeStructReturnType(compiler, compiler->info.compMethodInfo->args.retTypeClass);
86 unsigned regCount = retTypeDesc.GetReturnRegCount();
88 // Only x86 and x64 Unix ABI allows multi-reg return and
89 // number of result regs should be equal to MAX_RET_REG_COUNT.
90 assert(regCount == MAX_RET_REG_COUNT);
92 for (unsigned i = 0; i < regCount; ++i)
94 gcInfo.gcMarkRegPtrVal(retTypeDesc.GetABIReturnReg(i), retTypeDesc.GetReturnRegType(i));
97 else if (compiler->compMethodReturnsRetBufAddr())
99 // This is for returning in an implicit RetBuf.
100 // If the address of the buffer is returned in REG_INTRET, mark the content of INTRET as ByRef.
102 // In case the return is in an implicit RetBuf, the native return type should be a struct
103 assert(varTypeIsStruct(compiler->info.compRetNativeType));
105 gcInfo.gcMarkRegPtrVal(REG_INTRET, TYP_BYREF);
107 // ... all other cases.
110 #ifdef _TARGET_AMD64_
111 // For x64, structs that are not returned in registers are always
112 // returned in implicit RetBuf. If we reached here, we should not have
113 // a RetBuf and the return type should not be a struct.
114 assert(compiler->info.compRetBuffArg == BAD_VAR_NUM);
115 assert(!varTypeIsStruct(compiler->info.compRetNativeType));
116 #endif // _TARGET_AMD64_
118 // For x86 Windows we can't make such assertions since we generate code for returning of
119 // the RetBuf in REG_INTRET only when the ProfilerHook is enabled. Otherwise
120 // compRetNativeType could be TYP_STRUCT.
121 gcInfo.gcMarkRegPtrVal(REG_INTRET, compiler->info.compRetNativeType);
125 regNumber regGSCheck;
126 regMaskTP regMaskGSCheck = RBM_NONE;
130 // Non-tail call: we can use any callee trash register that is not
131 // a return register or contain 'this' pointer (keep alive this), since
132 // we are generating GS cookie check after a GT_RETURN block.
133 // Note: On Amd64 System V RDX is an arg register - REG_ARG_2 - as well
134 // as return register for two-register-returned structs.
135 if (compiler->lvaKeepAliveAndReportThis() && compiler->lvaTable[compiler->info.compThisArg].lvRegister &&
136 (compiler->lvaTable[compiler->info.compThisArg].lvRegNum == REG_ARG_0))
138 regGSCheck = REG_ARG_1;
142 regGSCheck = REG_ARG_0;
148 // It doesn't matter which register we pick, since we're going to save and restore it
150 // TODO-CQ: Can we optimize the choice of register to avoid doing the push/pop sometimes?
151 regGSCheck = REG_EAX;
152 regMaskGSCheck = RBM_EAX;
153 #else // !_TARGET_X86_
154 // Tail calls from methods that need GS check: We need to preserve registers while
155 // emitting GS cookie check for a tail prefixed call or a jmp. To emit GS cookie
156 // check, we might need a register. This won't be an issue for jmp calls for the
157 // reason mentioned below (see comment starting with "Jmp Calls:").
159 // The following are the possible solutions in case of tail prefixed calls:
160 // 1) Use R11 - ignore tail prefix on calls that need to pass a param in R11 when
161 // present in methods that require GS cookie check. Rest of the tail calls that
162 // do not require R11 will be honored.
163 // 2) Internal register - GT_CALL node reserves an internal register and emits GS
164 // cookie check as part of tail call codegen. GenExitCode() needs to special case
165 // fast tail calls implemented as epilog+jmp or such tail calls should always get
166 // dispatched via helper.
167 // 3) Materialize GS cookie check as a sperate node hanging off GT_CALL node in
168 // right execution order during rationalization.
170 // There are two calls that use R11: VSD and calli pinvokes with cookie param. Tail
171 // prefix on pinvokes is ignored. That is, options 2 and 3 will allow tail prefixed
172 // VSD calls from methods that need GS check.
174 // Tail prefixed calls: Right now for Jit64 compat, method requiring GS cookie check
175 // ignores tail prefix. In future, if we intend to support tail calls from such a method,
176 // consider one of the options mentioned above. For now adding an assert that we don't
177 // expect to see a tail call in a method that requires GS check.
178 noway_assert(!compiler->compTailCallUsed);
180 // Jmp calls: specify method handle using which JIT queries VM for its entry point
181 // address and hence it can neither be a VSD call nor PInvoke calli with cookie
182 // parameter. Therefore, in case of jmp calls it is safe to use R11.
183 regGSCheck = REG_R11;
184 #endif // !_TARGET_X86_
187 regMaskTP byrefPushedRegs = RBM_NONE;
188 regMaskTP norefPushedRegs = RBM_NONE;
189 regMaskTP pushedRegs = RBM_NONE;
191 if (compiler->gsGlobalSecurityCookieAddr == nullptr)
193 #if defined(_TARGET_AMD64_)
194 // If GS cookie value fits within 32-bits we can use 'cmp mem64, imm32'.
195 // Otherwise, load the value into a reg and use 'cmp mem64, reg64'.
196 if ((int)compiler->gsGlobalSecurityCookieVal != (ssize_t)compiler->gsGlobalSecurityCookieVal)
198 genSetRegToIcon(regGSCheck, compiler->gsGlobalSecurityCookieVal, TYP_I_IMPL);
199 getEmitter()->emitIns_S_R(INS_cmp, EA_PTRSIZE, regGSCheck, compiler->lvaGSSecurityCookie, 0);
202 #endif // defined(_TARGET_AMD64_)
204 assert((int)compiler->gsGlobalSecurityCookieVal == (ssize_t)compiler->gsGlobalSecurityCookieVal);
205 getEmitter()->emitIns_S_I(INS_cmp, EA_PTRSIZE, compiler->lvaGSSecurityCookie, 0,
206 (int)compiler->gsGlobalSecurityCookieVal);
211 // Ngen case - GS cookie value needs to be accessed through an indirection.
213 pushedRegs = genPushRegs(regMaskGSCheck, &byrefPushedRegs, &norefPushedRegs);
215 instGen_Set_Reg_To_Imm(EA_HANDLE_CNS_RELOC, regGSCheck, (ssize_t)compiler->gsGlobalSecurityCookieAddr);
216 getEmitter()->emitIns_R_AR(ins_Load(TYP_I_IMPL), EA_PTRSIZE, regGSCheck, regGSCheck, 0);
217 getEmitter()->emitIns_S_R(INS_cmp, EA_PTRSIZE, regGSCheck, compiler->lvaGSSecurityCookie, 0);
220 BasicBlock* gsCheckBlk = genCreateTempLabel();
221 emitJumpKind jmpEqual = genJumpKindForOper(GT_EQ, CK_SIGNED);
222 inst_JMP(jmpEqual, gsCheckBlk);
223 genEmitHelperCall(CORINFO_HELP_FAIL_FAST, 0, EA_UNKNOWN);
224 genDefineTempLabel(gsCheckBlk);
226 genPopRegs(pushedRegs, byrefPushedRegs, norefPushedRegs);
229 BasicBlock* CodeGen::genCallFinally(BasicBlock* block)
231 #if FEATURE_EH_FUNCLETS
232 // Generate a call to the finally, like this:
233 // mov rcx,qword ptr [rbp + 20H] // Load rcx with PSPSym
234 // call finally-funclet
235 // jmp finally-return // Only for non-retless finally calls
236 // The jmp can be a NOP if we're going to the next block.
237 // If we're generating code for the main function (not a funclet), and there is no localloc,
238 // then RSP at this point is the same value as that stored in the PSPSym. So just copy RSP
239 // instead of loading the PSPSym in this case, or if PSPSym is not used (CoreRT ABI).
241 if ((compiler->lvaPSPSym == BAD_VAR_NUM) ||
242 (!compiler->compLocallocUsed && (compiler->funCurrentFunc()->funKind == FUNC_ROOT)))
245 inst_RV_RV(INS_mov, REG_ARG_0, REG_SPBASE, TYP_I_IMPL);
246 #endif // !UNIX_X86_ABI
250 getEmitter()->emitIns_R_S(ins_Load(TYP_I_IMPL), EA_PTRSIZE, REG_ARG_0, compiler->lvaPSPSym, 0);
252 getEmitter()->emitIns_J(INS_call, block->bbJumpDest);
254 if (block->bbFlags & BBF_RETLESS_CALL)
256 // We have a retless call, and the last instruction generated was a call.
257 // If the next block is in a different EH region (or is the end of the code
258 // block), then we need to generate a breakpoint here (since it will never
259 // get executed) to get proper unwind behavior.
261 if ((block->bbNext == nullptr) || !BasicBlock::sameEHRegion(block, block->bbNext))
263 instGen(INS_BREAKPOINT); // This should never get executed
268 // TODO-Linux-x86: Do we need to handle the GC information for this NOP or JMP specially, as is done for other
270 #ifndef JIT32_GCENCODER
271 // Because of the way the flowgraph is connected, the liveness info for this one instruction
272 // after the call is not (can not be) correct in cases where a variable has a last use in the
273 // handler. So turn off GC reporting for this single instruction.
274 getEmitter()->emitDisableGC();
275 #endif // JIT32_GCENCODER
277 // Now go to where the finally funclet needs to return to.
278 if (block->bbNext->bbJumpDest == block->bbNext->bbNext)
281 // TODO-XArch-CQ: Can we get rid of this instruction, and just have the call return directly
282 // to the next instruction? This would depend on stack walking from within the finally
283 // handler working without this instruction being in this special EH region.
288 inst_JMP(EJ_jmp, block->bbNext->bbJumpDest);
291 #ifndef JIT32_GCENCODER
292 getEmitter()->emitEnableGC();
293 #endif // JIT32_GCENCODER
296 #else // !FEATURE_EH_FUNCLETS
298 // If we are about to invoke a finally locally from a try block, we have to set the ShadowSP slot
299 // corresponding to the finally's nesting level. When invoked in response to an exception, the
302 // We have a BBJ_CALLFINALLY followed by a BBJ_ALWAYS.
305 // mov [ebp - (n + 1)], 0
306 // mov [ebp - n ], 0xFC
316 noway_assert(isFramePointerUsed());
318 // Get the nesting level which contains the finally
319 unsigned finallyNesting = 0;
320 compiler->fgGetNestingLevel(block, &finallyNesting);
322 // The last slot is reserved for ICodeManager::FixContext(ppEndRegion)
323 unsigned filterEndOffsetSlotOffs;
324 filterEndOffsetSlotOffs = (unsigned)(compiler->lvaLclSize(compiler->lvaShadowSPslotsVar) - TARGET_POINTER_SIZE);
326 unsigned curNestingSlotOffs;
327 curNestingSlotOffs = (unsigned)(filterEndOffsetSlotOffs - ((finallyNesting + 1) * TARGET_POINTER_SIZE));
329 // Zero out the slot for the next nesting level
330 instGen_Store_Imm_Into_Lcl(TYP_I_IMPL, EA_PTRSIZE, 0, compiler->lvaShadowSPslotsVar,
331 curNestingSlotOffs - TARGET_POINTER_SIZE);
332 instGen_Store_Imm_Into_Lcl(TYP_I_IMPL, EA_PTRSIZE, LCL_FINALLY_MARK, compiler->lvaShadowSPslotsVar,
335 // Now push the address where the finally funclet should return to directly.
336 if (!(block->bbFlags & BBF_RETLESS_CALL))
338 assert(block->isBBCallAlwaysPair());
339 getEmitter()->emitIns_J(INS_push_hide, block->bbNext->bbJumpDest);
343 // EE expects a DWORD, so we give him 0
344 inst_IV(INS_push_hide, 0);
347 // Jump to the finally BB
348 inst_JMP(EJ_jmp, block->bbJumpDest);
350 #endif // !FEATURE_EH_FUNCLETS
352 // The BBJ_ALWAYS is used because the BBJ_CALLFINALLY can't point to the
353 // jump target using bbJumpDest - that is already used to point
354 // to the finally block. So just skip past the BBJ_ALWAYS unless the
356 if (!(block->bbFlags & BBF_RETLESS_CALL))
358 assert(block->isBBCallAlwaysPair());
359 block = block->bbNext;
364 #if FEATURE_EH_FUNCLETS
365 void CodeGen::genEHCatchRet(BasicBlock* block)
367 // Set RAX to the address the VM should return to after the catch.
368 // Generate a RIP-relative
369 // lea reg, [rip + disp32] ; the RIP is implicit
370 // which will be position-indepenent.
371 getEmitter()->emitIns_R_L(INS_lea, EA_PTR_DSP_RELOC, block->bbJumpDest, REG_INTRET);
374 #else // !FEATURE_EH_FUNCLETS
376 void CodeGen::genEHFinallyOrFilterRet(BasicBlock* block)
378 // The last statement of the block must be a GT_RETFILT, which has already been generated.
379 assert(block->lastNode() != nullptr);
380 assert(block->lastNode()->OperGet() == GT_RETFILT);
382 if (block->bbJumpKind == BBJ_EHFINALLYRET)
384 assert(block->lastNode()->gtOp.gtOp1 == nullptr); // op1 == nullptr means endfinally
386 // Return using a pop-jmp sequence. As the "try" block calls
387 // the finally with a jmp, this leaves the x86 call-ret stack
388 // balanced in the normal flow of path.
390 noway_assert(isFramePointerRequired());
391 inst_RV(INS_pop_hide, REG_EAX, TYP_I_IMPL);
392 inst_RV(INS_i_jmp, REG_EAX, TYP_I_IMPL);
396 assert(block->bbJumpKind == BBJ_EHFILTERRET);
398 // The return value has already been computed.
403 #endif // !FEATURE_EH_FUNCLETS
405 // Move an immediate value into an integer register
407 void CodeGen::instGen_Set_Reg_To_Imm(emitAttr size, regNumber reg, ssize_t imm, insFlags flags)
409 // reg cannot be a FP register
410 assert(!genIsValidFloatReg(reg));
412 if (!compiler->opts.compReloc)
414 size = EA_SIZE(size); // Strip any Reloc flags from size if we aren't doing relocs
417 if ((imm == 0) && !EA_IS_RELOC(size))
419 instGen_Set_Reg_To_Zero(size, reg, flags);
423 if (genDataIndirAddrCanBeEncodedAsPCRelOffset(imm))
425 getEmitter()->emitIns_R_AI(INS_lea, EA_PTR_DSP_RELOC, reg, imm);
429 getEmitter()->emitIns_R_I(INS_mov, size, reg, imm);
432 regTracker.rsTrackRegIntCns(reg, imm);
435 /***********************************************************************************
437 * Generate code to set a register 'targetReg' of type 'targetType' to the constant
438 * specified by the constant (GT_CNS_INT or GT_CNS_DBL) in 'tree'. This does not call
439 * genProduceReg() on the target register.
441 void CodeGen::genSetRegToConst(regNumber targetReg, var_types targetType, GenTreePtr tree)
444 switch (tree->gtOper)
448 // relocatable values tend to come down as a CNS_INT of native int type
449 // so the line between these two opcodes is kind of blurry
450 GenTreeIntConCommon* con = tree->AsIntConCommon();
451 ssize_t cnsVal = con->IconValue();
453 if (con->ImmedValNeedsReloc(compiler))
455 instGen_Set_Reg_To_Imm(EA_HANDLE_CNS_RELOC, targetReg, cnsVal);
456 regTracker.rsTrackRegTrash(targetReg);
460 genSetRegToIcon(targetReg, cnsVal, targetType);
467 double constValue = tree->gtDblCon.gtDconVal;
469 // Make sure we use "xorpd reg, reg" only for +ve zero constant (0.0) and not for -ve zero (-0.0)
470 if (*(__int64*)&constValue == 0)
472 // A faster/smaller way to generate 0
473 instruction ins = genGetInsForOper(GT_XOR, targetType);
474 inst_RV_RV(ins, targetReg, targetReg, targetType);
479 if (targetType == TYP_FLOAT)
481 float f = forceCastToFloat(constValue);
482 cns = genMakeConst(&f, targetType, tree, false);
486 cns = genMakeConst(&constValue, targetType, tree, true);
489 inst_RV_TT(ins_Load(targetType), targetReg, cns);
499 // Generate code to get the high N bits of a N*N=2N bit multiplication result
500 void CodeGen::genCodeForMulHi(GenTreeOp* treeNode)
502 if (treeNode->OperGet() == GT_MULHI)
504 assert(!(treeNode->gtFlags & GTF_UNSIGNED));
506 assert(!treeNode->gtOverflowEx());
508 regNumber targetReg = treeNode->gtRegNum;
509 var_types targetType = treeNode->TypeGet();
510 emitter* emit = getEmitter();
511 emitAttr size = emitTypeSize(treeNode);
512 GenTree* op1 = treeNode->gtOp.gtOp1;
513 GenTree* op2 = treeNode->gtOp.gtOp2;
515 // to get the high bits of the multiply, we are constrained to using the
516 // 1-op form: RDX:RAX = RAX * rm
517 // The 3-op form (Rx=Ry*Rz) does not support it.
519 genConsumeOperands(treeNode->AsOp());
521 GenTree* regOp = op1;
524 // Set rmOp to the memory operand (if any)
525 if (op1->isUsedFromMemory() || (op2->isUsedFromReg() && (op2->gtRegNum == REG_RAX)))
530 assert(regOp->isUsedFromReg());
532 // Setup targetReg when neither of the source operands was a matching register
533 if (regOp->gtRegNum != REG_RAX)
535 inst_RV_RV(ins_Copy(targetType), REG_RAX, regOp->gtRegNum, targetType);
539 if ((treeNode->gtFlags & GTF_UNSIGNED) == 0)
547 emit->emitInsBinary(ins, size, treeNode, rmOp);
549 // Move the result to the desired register, if necessary
550 if (treeNode->OperGet() == GT_MULHI && targetReg != REG_RDX)
552 inst_RV_RV(INS_mov, targetReg, REG_RDX, targetType);
557 //------------------------------------------------------------------------
558 // genCodeForLongUMod: Generate code for a tree of the form
559 // `(umod (gt_long x y) (const int))`
562 // node - the node for which to generate code
564 void CodeGen::genCodeForLongUMod(GenTreeOp* node)
566 assert(node != nullptr);
567 assert(node->OperGet() == GT_UMOD);
568 assert(node->TypeGet() == TYP_INT);
570 GenTreeOp* const dividend = node->gtOp1->AsOp();
571 assert(dividend->OperGet() == GT_LONG);
572 assert(varTypeIsLong(dividend));
574 genConsumeOperands(node);
576 GenTree* const dividendLo = dividend->gtOp1;
577 GenTree* const dividendHi = dividend->gtOp2;
578 assert(dividendLo->isUsedFromReg());
579 assert(dividendHi->isUsedFromReg());
581 GenTree* const divisor = node->gtOp2;
582 assert(divisor->gtSkipReloadOrCopy()->OperGet() == GT_CNS_INT);
583 assert(divisor->gtSkipReloadOrCopy()->isUsedFromReg());
584 assert(divisor->gtSkipReloadOrCopy()->AsIntCon()->gtIconVal >= 2);
585 assert(divisor->gtSkipReloadOrCopy()->AsIntCon()->gtIconVal <= 0x3fffffff);
587 // dividendLo must be in RAX; dividendHi must be in RDX
588 genCopyRegIfNeeded(dividendLo, REG_EAX);
589 genCopyRegIfNeeded(dividendHi, REG_EDX);
591 // At this point, EAX:EDX contains the 64bit dividend and op2->gtRegNum
592 // contains the 32bit divisor. We want to generate the following code:
594 // cmp edx, divisor->gtRegNum
600 // div divisor->gtRegNum
604 // div divisor->gtRegNum
606 // This works because (a * 2^32 + b) % c = ((a % c) * 2^32 + b) % c.
608 BasicBlock* const noOverflow = genCreateTempLabel();
610 // cmp edx, divisor->gtRegNum
612 inst_RV_RV(INS_cmp, REG_EDX, divisor->gtRegNum);
613 inst_JMP(EJ_jb, noOverflow);
618 // div divisor->gtRegNum
620 const regNumber tempReg = node->GetSingleTempReg();
621 inst_RV_RV(INS_mov, tempReg, REG_EAX, TYP_INT);
622 inst_RV_RV(INS_mov, REG_EAX, REG_EDX, TYP_INT);
623 instGen_Set_Reg_To_Zero(EA_PTRSIZE, REG_EDX);
624 inst_RV(INS_div, divisor->gtRegNum, TYP_INT);
625 inst_RV_RV(INS_mov, REG_EAX, tempReg, TYP_INT);
628 // div divisor->gtRegNum
629 genDefineTempLabel(noOverflow);
630 inst_RV(INS_div, divisor->gtRegNum, TYP_INT);
632 const regNumber targetReg = node->gtRegNum;
633 if (targetReg != REG_EDX)
635 inst_RV_RV(INS_mov, targetReg, REG_RDX, TYP_INT);
639 #endif // _TARGET_X86_
641 //------------------------------------------------------------------------
642 // genCodeForDivMod: Generate code for a DIV or MOD operation.
645 // treeNode - the node to generate the code for
647 void CodeGen::genCodeForDivMod(GenTreeOp* treeNode)
649 GenTree* dividend = treeNode->gtOp1;
651 if (varTypeIsLong(dividend->TypeGet()))
653 genCodeForLongUMod(treeNode);
656 #endif // _TARGET_X86_
658 GenTree* divisor = treeNode->gtOp2;
659 genTreeOps oper = treeNode->OperGet();
660 emitAttr size = emitTypeSize(treeNode);
661 regNumber targetReg = treeNode->gtRegNum;
662 var_types targetType = treeNode->TypeGet();
663 emitter* emit = getEmitter();
665 // dividend is in a register.
666 assert(dividend->isUsedFromReg());
668 genConsumeOperands(treeNode->AsOp());
669 if (varTypeIsFloating(targetType))
671 // Check that divisor is a valid operand.
672 // Note that a reg optional operand is a treated as a memory op
673 // if no register is allocated to it.
674 assert(divisor->isUsedFromReg() || divisor->isMemoryOp() || divisor->IsCnsFltOrDbl() ||
675 divisor->IsRegOptional());
677 // Floating point div/rem operation
678 assert(oper == GT_DIV || oper == GT_MOD);
680 if (dividend->gtRegNum == targetReg)
682 emit->emitInsBinary(genGetInsForOper(treeNode->gtOper, targetType), size, treeNode, divisor);
684 else if (divisor->isUsedFromReg() && divisor->gtRegNum == targetReg)
686 // It is not possible to generate 2-operand divss or divsd where reg2 = reg1 / reg2
687 // because divss/divsd reg1, reg2 will over-write reg1. Therefore, in case of AMD64
688 // LSRA has to make sure that such a register assignment is not generated for floating
689 // point div/rem operations.
691 !"GT_DIV/GT_MOD (float): case of reg2 = reg1 / reg2, LSRA should never generate such a reg assignment");
695 inst_RV_RV(ins_Copy(targetType), targetReg, dividend->gtRegNum, targetType);
696 emit->emitInsBinary(genGetInsForOper(treeNode->gtOper, targetType), size, treeNode, divisor);
701 // dividend must be in RAX
702 genCopyRegIfNeeded(dividend, REG_RAX);
704 // zero or sign extend rax to rdx
705 if (oper == GT_UMOD || oper == GT_UDIV)
707 instGen_Set_Reg_To_Zero(EA_PTRSIZE, REG_EDX);
711 emit->emitIns(INS_cdq, size);
712 // the cdq instruction writes RDX, So clear the gcInfo for RDX
713 gcInfo.gcMarkRegSetNpt(RBM_RDX);
716 // Perform the 'targetType' (64-bit or 32-bit) divide instruction
718 if (oper == GT_UMOD || oper == GT_UDIV)
727 emit->emitInsBinary(ins, size, treeNode, divisor);
729 // DIV/IDIV instructions always store the quotient in RAX and the remainder in RDX.
730 // Move the result to the desired register, if necessary
731 if (oper == GT_DIV || oper == GT_UDIV)
733 if (targetReg != REG_RAX)
735 inst_RV_RV(INS_mov, targetReg, REG_RAX, targetType);
740 assert((oper == GT_MOD) || (oper == GT_UMOD));
741 if (targetReg != REG_RDX)
743 inst_RV_RV(INS_mov, targetReg, REG_RDX, targetType);
747 genProduceReg(treeNode);
750 //------------------------------------------------------------------------
751 // genCodeForBinary: Generate code for many binary arithmetic operators
752 // This method is expected to have called genConsumeOperands() before calling it.
755 // treeNode - The binary operation for which we are generating code.
761 // Mul and div variants have special constraints on x64 so are not handled here.
762 // See teh assert below for the operators that are handled.
764 void CodeGen::genCodeForBinary(GenTree* treeNode)
766 const genTreeOps oper = treeNode->OperGet();
767 regNumber targetReg = treeNode->gtRegNum;
768 var_types targetType = treeNode->TypeGet();
769 emitter* emit = getEmitter();
771 #if defined(_TARGET_64BIT_)
772 assert(oper == GT_OR || oper == GT_XOR || oper == GT_AND || oper == GT_ADD || oper == GT_SUB);
773 #else // !defined(_TARGET_64BIT_)
774 assert(oper == GT_OR || oper == GT_XOR || oper == GT_AND || oper == GT_ADD_LO || oper == GT_ADD_HI ||
775 oper == GT_SUB_LO || oper == GT_SUB_HI || oper == GT_MUL_LONG || oper == GT_DIV_HI || oper == GT_MOD_HI ||
776 oper == GT_ADD || oper == GT_SUB);
777 #endif // !defined(_TARGET_64BIT_)
779 GenTreePtr op1 = treeNode->gtGetOp1();
780 GenTreePtr op2 = treeNode->gtGetOp2();
782 // Commutative operations can mark op1 as contained or reg-optional to generate "op reg, memop/immed"
783 if (!op1->isUsedFromReg())
785 assert(treeNode->OperIsCommutative());
786 assert(op1->isMemoryOp() || op1->IsCnsNonZeroFltOrDbl() || op1->IsIntCnsFitsInI32() || op1->IsRegOptional());
788 op1 = treeNode->gtGetOp2();
789 op2 = treeNode->gtGetOp1();
792 instruction ins = genGetInsForOper(treeNode->OperGet(), targetType);
794 // The arithmetic node must be sitting in a register (since it's not contained)
795 noway_assert(targetReg != REG_NA);
797 regNumber op1reg = op1->isUsedFromReg() ? op1->gtRegNum : REG_NA;
798 regNumber op2reg = op2->isUsedFromReg() ? op2->gtRegNum : REG_NA;
803 // This is the case of reg1 = reg1 op reg2
804 // We're ready to emit the instruction without any moves
805 if (op1reg == targetReg)
810 // We have reg1 = reg2 op reg1
811 // In order for this operation to be correct
812 // we need that op is a commutative operation so
813 // we can convert it into reg1 = reg1 op reg2 and emit
814 // the same code as above
815 else if (op2reg == targetReg)
817 noway_assert(GenTree::OperIsCommutative(oper));
821 // now we know there are 3 different operands so attempt to use LEA
822 else if (oper == GT_ADD && !varTypeIsFloating(treeNode) && !treeNode->gtOverflowEx() // LEA does not set flags
823 && (op2->isContainedIntOrIImmed() || op2->isUsedFromReg()) && !treeNode->gtSetFlags())
825 if (op2->isContainedIntOrIImmed())
827 emit->emitIns_R_AR(INS_lea, emitTypeSize(treeNode), targetReg, op1reg,
828 (int)op2->AsIntConCommon()->IconValue());
832 assert(op2reg != REG_NA);
833 emit->emitIns_R_ARX(INS_lea, emitTypeSize(treeNode), targetReg, op1reg, op2reg, 1, 0);
835 genProduceReg(treeNode);
838 // dest, op1 and op2 registers are different:
839 // reg3 = reg1 op reg2
840 // We can implement this by issuing a mov:
842 // reg3 = reg3 op reg2
845 inst_RV_RV(ins_Copy(targetType), targetReg, op1reg, targetType);
846 regTracker.rsTrackRegCopy(targetReg, op1reg);
847 gcInfo.gcMarkRegPtrVal(targetReg, targetType);
852 // try to use an inc or dec
853 if (oper == GT_ADD && !varTypeIsFloating(treeNode) && src->isContainedIntOrIImmed() && !treeNode->gtOverflowEx())
855 if (src->IsIntegralConst(1))
857 emit->emitIns_R(INS_inc, emitTypeSize(treeNode), targetReg);
858 genProduceReg(treeNode);
861 else if (src->IsIntegralConst(-1))
863 emit->emitIns_R(INS_dec, emitTypeSize(treeNode), targetReg);
864 genProduceReg(treeNode);
868 regNumber r = emit->emitInsBinary(ins, emitTypeSize(treeNode), dst, src);
869 noway_assert(r == targetReg);
871 if (treeNode->gtOverflowEx())
873 #if !defined(_TARGET_64BIT_)
874 assert(oper == GT_ADD || oper == GT_SUB || oper == GT_ADD_HI || oper == GT_SUB_HI);
876 assert(oper == GT_ADD || oper == GT_SUB);
878 genCheckOverflow(treeNode);
880 genProduceReg(treeNode);
883 //------------------------------------------------------------------------
884 // isStructReturn: Returns whether the 'treeNode' is returning a struct.
887 // treeNode - The tree node to evaluate whether is a struct return.
890 // For AMD64 *nix: returns true if the 'treeNode" is a GT_RETURN node, of type struct.
891 // Otherwise returns false.
892 // For other platforms always returns false.
894 bool CodeGen::isStructReturn(GenTreePtr treeNode)
896 // This method could be called for 'treeNode' of GT_RET_FILT or GT_RETURN.
897 // For the GT_RET_FILT, the return is always
898 // a bool or a void, for the end of a finally block.
899 noway_assert(treeNode->OperGet() == GT_RETURN || treeNode->OperGet() == GT_RETFILT);
900 if (treeNode->OperGet() != GT_RETURN)
905 #ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
906 return varTypeIsStruct(treeNode);
907 #else // !FEATURE_UNIX_AMD64_STRUCT_PASSING
908 assert(!varTypeIsStruct(treeNode));
910 #endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
913 //------------------------------------------------------------------------
914 // genStructReturn: Generates code for returning a struct.
917 // treeNode - The GT_RETURN tree node.
923 // op1 of GT_RETURN node is either GT_LCL_VAR or multi-reg GT_CALL
924 void CodeGen::genStructReturn(GenTreePtr treeNode)
926 assert(treeNode->OperGet() == GT_RETURN);
927 GenTreePtr op1 = treeNode->gtGetOp1();
929 #ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
930 if (op1->OperGet() == GT_LCL_VAR)
932 GenTreeLclVarCommon* lclVar = op1->AsLclVarCommon();
933 LclVarDsc* varDsc = &(compiler->lvaTable[lclVar->gtLclNum]);
934 assert(varDsc->lvIsMultiRegRet);
936 ReturnTypeDesc retTypeDesc;
937 retTypeDesc.InitializeStructReturnType(compiler, varDsc->lvVerTypeInfo.GetClassHandle());
938 unsigned regCount = retTypeDesc.GetReturnRegCount();
939 assert(regCount == MAX_RET_REG_COUNT);
941 if (varTypeIsEnregisterableStruct(op1))
943 // Right now the only enregistrable structs supported are SIMD vector types.
944 assert(varTypeIsSIMD(op1));
945 assert(op1->isUsedFromReg());
947 // This is a case of operand is in a single reg and needs to be
948 // returned in multiple ABI return registers.
949 regNumber opReg = genConsumeReg(op1);
950 regNumber reg0 = retTypeDesc.GetABIReturnReg(0);
951 regNumber reg1 = retTypeDesc.GetABIReturnReg(1);
953 if (opReg != reg0 && opReg != reg1)
955 // Operand reg is different from return regs.
956 // Copy opReg to reg0 and let it to be handled by one of the
958 inst_RV_RV(ins_Copy(TYP_DOUBLE), reg0, opReg, TYP_DOUBLE);
964 assert(opReg != reg1);
966 // reg0 - already has required 8-byte in bit position [63:0].
968 // swap upper and lower 8-bytes of reg1 so that desired 8-byte is in bit position [63:0].
969 inst_RV_RV(ins_Copy(TYP_DOUBLE), reg1, opReg, TYP_DOUBLE);
973 assert(opReg == reg1);
976 // swap upper and lower 8-bytes of reg1 so that desired 8-byte is in bit position [63:0].
977 inst_RV_RV(ins_Copy(TYP_DOUBLE), reg0, opReg, TYP_DOUBLE);
979 inst_RV_RV_IV(INS_shufpd, EA_16BYTE, reg1, reg1, 0x01);
983 assert(op1->isUsedFromMemory());
985 // Copy var on stack into ABI return registers
987 for (unsigned i = 0; i < regCount; ++i)
989 var_types type = retTypeDesc.GetReturnRegType(i);
990 regNumber reg = retTypeDesc.GetABIReturnReg(i);
991 getEmitter()->emitIns_R_S(ins_Load(type), emitTypeSize(type), reg, lclVar->gtLclNum, offset);
992 offset += genTypeSize(type);
998 assert(op1->IsMultiRegCall() || op1->IsCopyOrReloadOfMultiRegCall());
1000 genConsumeRegs(op1);
1002 GenTree* actualOp1 = op1->gtSkipReloadOrCopy();
1003 GenTreeCall* call = actualOp1->AsCall();
1004 ReturnTypeDesc* retTypeDesc = call->GetReturnTypeDesc();
1005 unsigned regCount = retTypeDesc->GetReturnRegCount();
1006 assert(regCount == MAX_RET_REG_COUNT);
1008 // Handle circular dependency between call allocated regs and ABI return regs.
1010 // It is possible under LSRA stress that originally allocated regs of call node,
1011 // say rax and rdx, are spilled and reloaded to rdx and rax respectively. But
1012 // GT_RETURN needs to move values as follows: rdx->rax, rax->rdx. Similar kind
1013 // kind of circular dependency could arise between xmm0 and xmm1 return regs.
1014 // Codegen is expected to handle such circular dependency.
1016 var_types regType0 = retTypeDesc->GetReturnRegType(0);
1017 regNumber returnReg0 = retTypeDesc->GetABIReturnReg(0);
1018 regNumber allocatedReg0 = call->GetRegNumByIdx(0);
1020 var_types regType1 = retTypeDesc->GetReturnRegType(1);
1021 regNumber returnReg1 = retTypeDesc->GetABIReturnReg(1);
1022 regNumber allocatedReg1 = call->GetRegNumByIdx(1);
1024 if (op1->IsCopyOrReload())
1026 // GT_COPY/GT_RELOAD will have valid reg for those positions
1027 // that need to be copied or reloaded.
1028 regNumber reloadReg = op1->AsCopyOrReload()->GetRegNumByIdx(0);
1029 if (reloadReg != REG_NA)
1031 allocatedReg0 = reloadReg;
1034 reloadReg = op1->AsCopyOrReload()->GetRegNumByIdx(1);
1035 if (reloadReg != REG_NA)
1037 allocatedReg1 = reloadReg;
1041 if (allocatedReg0 == returnReg1 && allocatedReg1 == returnReg0)
1043 // Circular dependency - swap allocatedReg0 and allocatedReg1
1044 if (varTypeIsFloating(regType0))
1046 assert(varTypeIsFloating(regType1));
1048 // The fastest way to swap two XMM regs is using PXOR
1049 inst_RV_RV(INS_pxor, allocatedReg0, allocatedReg1, TYP_DOUBLE);
1050 inst_RV_RV(INS_pxor, allocatedReg1, allocatedReg0, TYP_DOUBLE);
1051 inst_RV_RV(INS_pxor, allocatedReg0, allocatedReg1, TYP_DOUBLE);
1055 assert(varTypeIsIntegral(regType0));
1056 assert(varTypeIsIntegral(regType1));
1057 inst_RV_RV(INS_xchg, allocatedReg1, allocatedReg0, TYP_I_IMPL);
1060 else if (allocatedReg1 == returnReg0)
1062 // Change the order of moves to correctly handle dependency.
1063 if (allocatedReg1 != returnReg1)
1065 inst_RV_RV(ins_Copy(regType1), returnReg1, allocatedReg1, regType1);
1068 if (allocatedReg0 != returnReg0)
1070 inst_RV_RV(ins_Copy(regType0), returnReg0, allocatedReg0, regType0);
1075 // No circular dependency case.
1076 if (allocatedReg0 != returnReg0)
1078 inst_RV_RV(ins_Copy(regType0), returnReg0, allocatedReg0, regType0);
1081 if (allocatedReg1 != returnReg1)
1083 inst_RV_RV(ins_Copy(regType1), returnReg1, allocatedReg1, regType1);
1092 //------------------------------------------------------------------------
1093 // genReturn: Generates code for return statement.
1094 // In case of struct return, delegates to the genStructReturn method.
1097 // treeNode - The GT_RETURN or GT_RETFILT tree node.
1102 void CodeGen::genReturn(GenTreePtr treeNode)
1104 assert(treeNode->OperGet() == GT_RETURN || treeNode->OperGet() == GT_RETFILT);
1105 GenTreePtr op1 = treeNode->gtGetOp1();
1106 var_types targetType = treeNode->TypeGet();
1109 if (targetType == TYP_VOID)
1111 assert(op1 == nullptr);
1116 if (treeNode->TypeGet() == TYP_LONG)
1118 assert(op1 != nullptr);
1119 noway_assert(op1->OperGet() == GT_LONG);
1120 GenTree* loRetVal = op1->gtGetOp1();
1121 GenTree* hiRetVal = op1->gtGetOp2();
1122 noway_assert((loRetVal->gtRegNum != REG_NA) && (hiRetVal->gtRegNum != REG_NA));
1124 genConsumeReg(loRetVal);
1125 genConsumeReg(hiRetVal);
1126 if (loRetVal->gtRegNum != REG_LNGRET_LO)
1128 inst_RV_RV(ins_Copy(targetType), REG_LNGRET_LO, loRetVal->gtRegNum, TYP_INT);
1130 if (hiRetVal->gtRegNum != REG_LNGRET_HI)
1132 inst_RV_RV(ins_Copy(targetType), REG_LNGRET_HI, hiRetVal->gtRegNum, TYP_INT);
1136 #endif // !defined(_TARGET_X86_)
1138 if (isStructReturn(treeNode))
1140 genStructReturn(treeNode);
1142 else if (targetType != TYP_VOID)
1144 assert(op1 != nullptr);
1145 noway_assert(op1->gtRegNum != REG_NA);
1147 // !! NOTE !! genConsumeReg will clear op1 as GC ref after it has
1148 // consumed a reg for the operand. This is because the variable
1149 // is dead after return. But we are issuing more instructions
1150 // like "profiler leave callback" after this consumption. So
1151 // if you are issuing more instructions after this point,
1152 // remember to keep the variable live up until the new method
1153 // exit point where it is actually dead.
1156 regNumber retReg = varTypeIsFloating(treeNode) ? REG_FLOATRET : REG_INTRET;
1158 if (varTypeIsFloating(treeNode))
1160 // Spill the return value register from an XMM register to the stack, then load it on the x87 stack.
1161 // If it already has a home location, use that. Otherwise, we need a temp.
1162 if (genIsRegCandidateLocal(op1) && compiler->lvaTable[op1->gtLclVarCommon.gtLclNum].lvOnFrame)
1164 // Store local variable to its home location, if necessary.
1165 if ((op1->gtFlags & GTF_REG_VAL) != 0)
1167 op1->gtFlags &= ~GTF_REG_VAL;
1168 inst_TT_RV(ins_Store(op1->gtType,
1169 compiler->isSIMDTypeLocalAligned(op1->gtLclVarCommon.gtLclNum)),
1170 op1, op1->gtRegNum);
1172 // Now, load it to the fp stack.
1173 getEmitter()->emitIns_S(INS_fld, emitTypeSize(op1), op1->AsLclVarCommon()->gtLclNum, 0);
1177 // Spill the value, which should be in a register, then load it to the fp stack.
1178 // TODO-X86-CQ: Deal with things that are already in memory (don't call genConsumeReg yet).
1179 op1->gtFlags |= GTF_SPILL;
1180 regSet.rsSpillTree(op1->gtRegNum, op1);
1181 op1->gtFlags |= GTF_SPILLED;
1182 op1->gtFlags &= ~GTF_SPILL;
1184 TempDsc* t = regSet.rsUnspillInPlace(op1, op1->gtRegNum);
1185 inst_FS_ST(INS_fld, emitActualTypeSize(op1->gtType), t, 0);
1186 op1->gtFlags &= ~GTF_SPILLED;
1187 compiler->tmpRlsTemp(t);
1191 #endif // _TARGET_X86_
1193 if (op1->gtRegNum != retReg)
1195 inst_RV_RV(ins_Copy(targetType), retReg, op1->gtRegNum, targetType);
1201 #ifdef PROFILING_SUPPORTED
1203 // TODO-AMD64-Unix: If the profiler hook is implemented on *nix, make sure for 2 register returned structs
1204 // the RAX and RDX needs to be kept alive. Make the necessary changes in lowerxarch.cpp
1205 // in the handling of the GT_RETURN statement.
1206 // Such structs containing GC pointers need to be handled by calling gcInfo.gcMarkRegSetNpt
1207 // for the return registers containing GC refs.
1209 // There will be a single return block while generating profiler ELT callbacks.
1211 // Reason for not materializing Leave callback as a GT_PROF_HOOK node after GT_RETURN:
1212 // In flowgraph and other places assert that the last node of a block marked as
1213 // BBJ_RETURN is either a GT_RETURN or GT_JMP or a tail call. It would be nice to
1214 // maintain such an invariant irrespective of whether profiler hook needed or not.
1215 // Also, there is not much to be gained by materializing it as an explicit node.
1216 if (compiler->compCurBB == compiler->genReturnBB)
1219 // Since we are invalidating the assumption that we would slip into the epilog
1220 // right after the "return", we need to preserve the return reg's GC state
1221 // across the call until actual method return.
1222 if (varTypeIsGC(compiler->info.compRetType))
1224 gcInfo.gcMarkRegPtrVal(REG_INTRET, compiler->info.compRetType);
1227 genProfilingLeaveCallback();
1229 if (varTypeIsGC(compiler->info.compRetType))
1231 gcInfo.gcMarkRegSetNpt(REG_INTRET);
1237 /*****************************************************************************
1239 * Generate code for a single node in the tree.
1240 * Preconditions: All operands have been evaluated
1243 void CodeGen::genCodeForTreeNode(GenTreePtr treeNode)
1245 regNumber targetReg;
1246 #if !defined(_TARGET_64BIT_)
1247 if (treeNode->TypeGet() == TYP_LONG)
1249 // All long enregistered nodes will have been decomposed into their
1250 // constituent lo and hi nodes.
1254 #endif // !defined(_TARGET_64BIT_)
1256 targetReg = treeNode->gtRegNum;
1258 var_types targetType = treeNode->TypeGet();
1259 emitter* emit = getEmitter();
1262 // Validate that all the operands for the current node are consumed in order.
1263 // This is important because LSRA ensures that any necessary copies will be
1264 // handled correctly.
1265 lastConsumedNode = nullptr;
1266 if (compiler->verbose)
1268 unsigned seqNum = treeNode->gtSeqNum; // Useful for setting a conditional break in Visual Studio
1269 compiler->gtDispLIRNode(treeNode, "Generating: ");
1273 // Is this a node whose value is already in a register? LSRA denotes this by
1274 // setting the GTF_REUSE_REG_VAL flag.
1275 if (treeNode->IsReuseRegVal())
1277 // For now, this is only used for constant nodes.
1278 assert((treeNode->OperIsConst()));
1279 JITDUMP(" TreeNode is marked ReuseReg\n");
1283 // contained nodes are part of their parents for codegen purposes
1284 // ex : immediates, most LEAs
1285 if (treeNode->isContained())
1290 switch (treeNode->gtOper)
1292 #ifndef JIT32_GCENCODER
1293 case GT_START_NONGC:
1294 getEmitter()->emitDisableGC();
1296 #endif // !defined(JIT32_GCENCODER)
1299 #ifdef PROFILING_SUPPORTED
1300 // We should be seeing this only if profiler hook is needed
1301 noway_assert(compiler->compIsProfilerHookNeeded());
1303 // Right now this node is used only for tail calls. In future if
1304 // we intend to use it for Enter or Leave hooks, add a data member
1305 // to this node indicating the kind of profiler hook. For example,
1306 // helper number can be used.
1307 genProfilingLeaveCallback(CORINFO_HELP_PROF_FCN_TAILCALL);
1308 #endif // PROFILING_SUPPORTED
1312 genLclHeap(treeNode);
1317 assert(!treeNode->IsIconHandle(GTF_ICON_TLS_HDL));
1318 #endif // _TARGET_X86_
1322 genSetRegToConst(targetReg, targetType, treeNode);
1323 genProduceReg(treeNode);
1328 if (varTypeIsFloating(targetType))
1330 assert(treeNode->gtOper == GT_NEG);
1331 genSSE2BitwiseOp(treeNode);
1335 GenTreePtr operand = treeNode->gtGetOp1();
1336 assert(operand->isUsedFromReg());
1337 regNumber operandReg = genConsumeReg(operand);
1339 if (operandReg != targetReg)
1341 inst_RV_RV(INS_mov, targetReg, operandReg, targetType);
1344 instruction ins = genGetInsForOper(treeNode->OperGet(), targetType);
1345 inst_RV(ins, targetReg, targetType);
1347 genProduceReg(treeNode);
1353 assert(varTypeIsIntegralOrI(treeNode));
1356 #if !defined(_TARGET_64BIT_)
1361 #endif // !defined(_TARGET_64BIT_)
1364 genConsumeOperands(treeNode->AsOp());
1365 genCodeForBinary(treeNode);
1373 genCodeForShift(treeNode);
1374 // genCodeForShift() calls genProduceReg()
1377 #if !defined(_TARGET_64BIT_)
1380 // TODO-X86-CQ: This only handles the case where the operand being shifted is in a register. We don't
1381 // need sourceHi to be always in reg in case of GT_LSH_HI (because it could be moved from memory to
1382 // targetReg if sourceHi is a memory operand). Similarly for GT_RSH_LO, sourceLo could be marked as
1383 // contained memory-op. Even if not a memory-op, we could mark it as reg-optional.
1384 genCodeForShiftLong(treeNode);
1389 if (varTypeIsFloating(targetType) && varTypeIsFloating(treeNode->gtOp.gtOp1))
1391 // Casts float/double <--> double/float
1392 genFloatToFloatCast(treeNode);
1394 else if (varTypeIsFloating(treeNode->gtOp.gtOp1))
1396 // Casts float/double --> int32/int64
1397 genFloatToIntCast(treeNode);
1399 else if (varTypeIsFloating(targetType))
1401 // Casts int32/uint32/int64/uint64 --> float/double
1402 genIntToFloatCast(treeNode);
1406 // Casts int <--> int
1407 genIntToIntCast(treeNode);
1409 // The per-case functions call genProduceReg()
1414 // lcl_vars are not defs
1415 assert((treeNode->gtFlags & GTF_VAR_DEF) == 0);
1417 GenTreeLclVarCommon* lcl = treeNode->AsLclVarCommon();
1418 bool isRegCandidate = compiler->lvaTable[lcl->gtLclNum].lvIsRegCandidate();
1420 if (isRegCandidate && !(treeNode->gtFlags & GTF_VAR_DEATH))
1422 assert(treeNode->InReg() || (treeNode->gtFlags & GTF_SPILLED));
1425 // If this is a register candidate that has been spilled, genConsumeReg() will
1426 // reload it at the point of use. Otherwise, if it's not in a register, we load it here.
1428 if (!treeNode->InReg() && !(treeNode->gtFlags & GTF_SPILLED))
1430 assert(!isRegCandidate);
1431 #if defined(FEATURE_SIMD) && defined(_TARGET_X86_)
1432 // Loading of TYP_SIMD12 (i.e. Vector3) variable
1433 if (treeNode->TypeGet() == TYP_SIMD12)
1435 genLoadLclTypeSIMD12(treeNode);
1438 #endif // defined(FEATURE_SIMD) && defined(_TARGET_X86_)
1440 emit->emitIns_R_S(ins_Load(treeNode->TypeGet(), compiler->isSIMDTypeLocalAligned(lcl->gtLclNum)),
1441 emitTypeSize(treeNode), treeNode->gtRegNum, lcl->gtLclNum, 0);
1442 genProduceReg(treeNode);
1447 case GT_LCL_FLD_ADDR:
1448 case GT_LCL_VAR_ADDR:
1449 // Address of a local var. This by itself should never be allocated a register.
1450 // If it is worth storing the address in a register then it should be cse'ed into
1451 // a temp and that would be allocated a register.
1452 noway_assert(targetType == TYP_BYREF);
1453 noway_assert(!treeNode->InReg());
1455 inst_RV_TT(INS_lea, targetReg, treeNode, 0, EA_BYREF);
1456 genProduceReg(treeNode);
1461 noway_assert(targetType != TYP_STRUCT);
1462 noway_assert(treeNode->gtRegNum != REG_NA);
1465 // Loading of TYP_SIMD12 (i.e. Vector3) field
1466 if (treeNode->TypeGet() == TYP_SIMD12)
1468 genLoadLclTypeSIMD12(treeNode);
1473 emitAttr size = emitTypeSize(targetType);
1474 unsigned offs = treeNode->gtLclFld.gtLclOffs;
1475 unsigned varNum = treeNode->gtLclVarCommon.gtLclNum;
1476 assert(varNum < compiler->lvaCount);
1478 emit->emitIns_R_S(ins_Move_Extend(targetType, treeNode->InReg()), size, targetReg, varNum, offs);
1480 genProduceReg(treeNode);
1483 case GT_STORE_LCL_FLD:
1485 noway_assert(targetType != TYP_STRUCT);
1486 noway_assert(!treeNode->InReg());
1487 assert(!varTypeIsFloating(targetType) || (targetType == treeNode->gtGetOp1()->TypeGet()));
1490 // storing of TYP_SIMD12 (i.e. Vector3) field
1491 if (treeNode->TypeGet() == TYP_SIMD12)
1493 genStoreLclTypeSIMD12(treeNode);
1496 #endif // FEATURE_SIMD
1498 GenTreePtr op1 = treeNode->gtGetOp1();
1499 genConsumeRegs(op1);
1500 emit->emitInsBinary(ins_Store(targetType), emitTypeSize(treeNode), treeNode, op1);
1504 case GT_STORE_LCL_VAR:
1506 GenTreePtr op1 = treeNode->gtGetOp1();
1508 // var = call, where call returns a multi-reg return value
1509 // case is handled separately.
1510 if (op1->gtSkipReloadOrCopy()->IsMultiRegCall())
1512 genMultiRegCallStoreToLocal(treeNode);
1516 noway_assert(targetType != TYP_STRUCT);
1517 assert(!varTypeIsFloating(targetType) || (targetType == treeNode->gtGetOp1()->TypeGet()));
1519 unsigned lclNum = treeNode->AsLclVarCommon()->gtLclNum;
1520 LclVarDsc* varDsc = &(compiler->lvaTable[lclNum]);
1522 // Ensure that lclVar nodes are typed correctly.
1523 assert(!varDsc->lvNormalizeOnStore() || treeNode->TypeGet() == genActualType(varDsc->TypeGet()));
1525 #if !defined(_TARGET_64BIT_)
1526 if (treeNode->TypeGet() == TYP_LONG)
1528 genStoreLongLclVar(treeNode);
1531 #endif // !defined(_TARGET_64BIT_)
1534 // storing of TYP_SIMD12 (i.e. Vector3) field
1535 if (treeNode->TypeGet() == TYP_SIMD12)
1537 genStoreLclTypeSIMD12(treeNode);
1541 if (varTypeIsSIMD(targetType) && (targetReg != REG_NA) && op1->IsCnsIntOrI())
1543 // This is only possible for a zero-init.
1544 noway_assert(op1->IsIntegralConst(0));
1545 genSIMDZero(targetType, varDsc->lvBaseType, targetReg);
1546 genProduceReg(treeNode);
1549 #endif // FEATURE_SIMD
1551 genConsumeRegs(op1);
1553 if (treeNode->gtRegNum == REG_NA)
1556 emit->emitInsMov(ins_Store(targetType, compiler->isSIMDTypeLocalAligned(lclNum)),
1557 emitTypeSize(targetType), treeNode);
1558 varDsc->lvRegNum = REG_STK;
1562 // Look for the case where we have a constant zero which we've marked for reuse,
1563 // but which isn't actually in the register we want. In that case, it's better to create
1564 // zero in the target register, because an xor is smaller than a copy. Note that we could
1565 // potentially handle this in the register allocator, but we can't always catch it there
1566 // because the target may not have a register allocated for it yet.
1567 if (op1->isUsedFromReg() && (op1->gtRegNum != treeNode->gtRegNum) &&
1568 (op1->IsIntegralConst(0) || op1->IsFPZero()))
1570 op1->gtRegNum = REG_NA;
1571 op1->ResetReuseRegVal();
1574 if (!op1->isUsedFromReg())
1576 // Currently, we assume that the non-reg source of a GT_STORE_LCL_VAR writing to a register
1577 // must be a constant. However, in the future we might want to support an operand used from
1578 // memory. This is a bit tricky because we have to decide it can be used from memory before
1579 // register allocation,
1580 // and this would be a case where, once that's done, we need to mark that node as always
1581 // requiring a register - which we always assume now anyway, but once we "optimize" that
1582 // we'll have to take cases like this into account.
1583 assert((op1->gtRegNum == REG_NA) && op1->OperIsConst());
1584 genSetRegToConst(treeNode->gtRegNum, targetType, op1);
1586 else if (op1->gtRegNum != treeNode->gtRegNum)
1588 assert(op1->gtRegNum != REG_NA);
1589 emit->emitInsBinary(ins_Move_Extend(targetType, true), emitTypeSize(treeNode), treeNode, op1);
1594 if (treeNode->gtRegNum != REG_NA)
1596 genProduceReg(treeNode);
1602 // A void GT_RETFILT is the end of a finally. For non-void filter returns we need to load the result in
1603 // the return register, if it's not already there. The processing is the same as GT_RETURN.
1604 if (targetType != TYP_VOID)
1606 // For filters, the IL spec says the result is type int32. Further, the only specified legal values
1607 // are 0 or 1, with the use of other values "undefined".
1608 assert(targetType == TYP_INT);
1614 genReturn(treeNode);
1619 // if we are here, it is the case where there is an LEA that cannot
1620 // be folded into a parent instruction
1621 GenTreeAddrMode* lea = treeNode->AsAddrMode();
1622 genLeaInstruction(lea);
1624 // genLeaInstruction calls genProduceReg()
1630 // Handling of Vector3 type values loaded through indirection.
1631 if (treeNode->TypeGet() == TYP_SIMD12)
1633 genLoadIndTypeSIMD12(treeNode);
1636 #endif // FEATURE_SIMD
1638 GenTree* addr = treeNode->AsIndir()->Addr();
1639 if (addr->IsCnsIntOrI() && addr->IsIconHandle(GTF_ICON_TLS_HDL))
1641 noway_assert(EA_ATTR(genTypeSize(treeNode->gtType)) == EA_PTRSIZE);
1642 emit->emitIns_R_C(ins_Load(TYP_I_IMPL), EA_PTRSIZE, treeNode->gtRegNum, FLD_GLOBAL_FS,
1643 (int)addr->gtIntCon.gtIconVal);
1647 genConsumeAddress(addr);
1648 emit->emitInsMov(ins_Load(treeNode->TypeGet()), emitTypeSize(treeNode), treeNode);
1650 genProduceReg(treeNode);
1658 genCodeForMulHi(treeNode->AsOp());
1659 genProduceReg(treeNode);
1665 emitAttr size = emitTypeSize(treeNode);
1666 bool isUnsignedMultiply = ((treeNode->gtFlags & GTF_UNSIGNED) != 0);
1667 bool requiresOverflowCheck = treeNode->gtOverflowEx();
1669 GenTree* op1 = treeNode->gtGetOp1();
1670 GenTree* op2 = treeNode->gtGetOp2();
1672 // there are 3 forms of x64 multiply:
1673 // 1-op form with 128 result: RDX:RAX = RAX * rm
1674 // 2-op form: reg *= rm
1675 // 3-op form: reg = rm * imm
1677 genConsumeOperands(treeNode->AsOp());
1679 // This matches the 'mul' lowering in Lowering::SetMulOpCounts()
1681 // immOp :: Only one operand can be an immediate
1682 // rmOp :: Only one operand can be a memory op.
1683 // regOp :: A register op (especially the operand that matches 'targetReg')
1684 // (can be nullptr when we have both a memory op and an immediate op)
1686 GenTree* immOp = nullptr;
1687 GenTree* rmOp = op1;
1690 if (op2->isContainedIntOrIImmed())
1694 else if (op1->isContainedIntOrIImmed())
1700 if (immOp != nullptr)
1702 // This must be a non-floating point operation.
1703 assert(!varTypeIsFloating(treeNode));
1705 // CQ: When possible use LEA for mul by imm 3, 5 or 9
1706 ssize_t imm = immOp->AsIntConCommon()->IconValue();
1708 if (!requiresOverflowCheck && rmOp->isUsedFromReg() && ((imm == 3) || (imm == 5) || (imm == 9)))
1710 // We will use the LEA instruction to perform this multiply
1711 // Note that an LEA with base=x, index=x and scale=(imm-1) computes x*imm when imm=3,5 or 9.
1712 unsigned int scale = (unsigned int)(imm - 1);
1713 getEmitter()->emitIns_R_ARX(INS_lea, size, targetReg, rmOp->gtRegNum, rmOp->gtRegNum, scale, 0);
1717 // use the 3-op form with immediate
1718 ins = getEmitter()->inst3opImulForReg(targetReg);
1719 emit->emitInsBinary(ins, size, rmOp, immOp);
1722 else // we have no contained immediate operand
1727 regNumber mulTargetReg = targetReg;
1728 if (isUnsignedMultiply && requiresOverflowCheck)
1731 mulTargetReg = REG_RAX;
1735 ins = genGetInsForOper(GT_MUL, targetType);
1738 // Set rmOp to the memory operand (if any)
1739 // or set regOp to the op2 when it has the matching target register for our multiply op
1741 if (op1->isUsedFromMemory() || (op2->isUsedFromReg() && (op2->gtRegNum == mulTargetReg)))
1746 assert(regOp->isUsedFromReg());
1748 // Setup targetReg when neither of the source operands was a matching register
1749 if (regOp->gtRegNum != mulTargetReg)
1751 inst_RV_RV(ins_Copy(targetType), mulTargetReg, regOp->gtRegNum, targetType);
1754 emit->emitInsBinary(ins, size, treeNode, rmOp);
1756 // Move the result to the desired register, if necessary
1757 if ((ins == INS_mulEAX) && (targetReg != REG_RAX))
1759 inst_RV_RV(INS_mov, targetReg, REG_RAX, targetType);
1763 if (requiresOverflowCheck)
1765 // Overflow checking is only used for non-floating point types
1766 noway_assert(!varTypeIsFloating(treeNode));
1768 genCheckOverflow(treeNode);
1771 genProduceReg(treeNode);
1777 // We shouldn't be seeing GT_MOD on float/double args as it should get morphed into a
1778 // helper call by front-end. Similarly we shouldn't be seeing GT_UDIV and GT_UMOD
1779 // on float/double args.
1780 noway_assert(!varTypeIsFloating(treeNode));
1784 genCodeForDivMod(treeNode->AsOp());
1788 genIntrinsic(treeNode);
1793 genSIMDIntrinsic(treeNode->AsSIMD());
1795 #endif // FEATURE_SIMD
1798 genCkfinite(treeNode);
1810 // TODO-XArch-CQ: Check if we can use the currently set flags.
1811 // TODO-XArch-CQ: Check for the case where we can simply transfer the carry bit to a register
1812 // (signed < or >= where targetReg != REG_NA)
1814 GenTreePtr op1 = treeNode->gtGetOp1();
1815 var_types op1Type = op1->TypeGet();
1817 if (varTypeIsFloating(op1Type))
1819 genCompareFloat(treeNode);
1821 #if !defined(_TARGET_64BIT_)
1822 // X86 Long comparison
1823 else if (varTypeIsLong(op1Type))
1826 // The result of an unlowered long compare on a 32-bit target must either be
1827 // a) materialized into a register, or
1830 // A long compare that has a result that is used but not materialized into a register should
1831 // have been handled by Lowering::LowerCompare.
1834 assert((treeNode->gtRegNum != REG_NA) || !LIR::AsRange(compiler->compCurBB).TryGetUse(treeNode, &use));
1836 genCompareLong(treeNode);
1838 #endif // !defined(_TARGET_64BIT_)
1841 genCompareInt(treeNode);
1848 GenTree* cmp = treeNode->gtOp.gtOp1;
1850 assert(cmp->OperIsCompare());
1851 assert(compiler->compCurBB->bbJumpKind == BBJ_COND);
1853 #if !defined(_TARGET_64BIT_)
1854 // Long-typed compares should have been handled by Lowering::LowerCompare.
1855 assert(!varTypeIsLong(cmp->gtGetOp1()));
1858 // Get the "kind" and type of the comparison. Note that whether it is an unsigned cmp
1859 // is governed by a flag NOT by the inherent type of the node
1860 // TODO-XArch-CQ: Check if we can use the currently set flags.
1861 emitJumpKind jumpKind[2];
1862 bool branchToTrueLabel[2];
1863 genJumpKindsForTree(cmp, jumpKind, branchToTrueLabel);
1865 BasicBlock* skipLabel = nullptr;
1866 if (jumpKind[0] != EJ_NONE)
1868 BasicBlock* jmpTarget;
1869 if (branchToTrueLabel[0])
1871 jmpTarget = compiler->compCurBB->bbJumpDest;
1875 // This case arises only for ordered GT_EQ right now
1876 assert((cmp->gtOper == GT_EQ) && ((cmp->gtFlags & GTF_RELOP_NAN_UN) == 0));
1877 skipLabel = genCreateTempLabel();
1878 jmpTarget = skipLabel;
1881 inst_JMP(jumpKind[0], jmpTarget);
1884 if (jumpKind[1] != EJ_NONE)
1886 // the second conditional branch always has to be to the true label
1887 assert(branchToTrueLabel[1]);
1888 inst_JMP(jumpKind[1], compiler->compCurBB->bbJumpDest);
1891 if (skipLabel != nullptr)
1893 genDefineTempLabel(skipLabel);
1900 GenTreeJumpCC* jcc = treeNode->AsJumpCC();
1902 assert(compiler->compCurBB->bbJumpKind == BBJ_COND);
1904 CompareKind compareKind = ((jcc->gtFlags & GTF_UNSIGNED) != 0) ? CK_UNSIGNED : CK_SIGNED;
1905 emitJumpKind jumpKind = genJumpKindForOper(jcc->gtCondition, compareKind);
1907 inst_JMP(jumpKind, compiler->compCurBB->bbJumpDest);
1913 // this is nothing but a conditional call to CORINFO_HELP_STOP_FOR_GC
1914 // based on the contents of 'data'
1916 GenTree* data = treeNode->gtOp.gtOp1;
1917 genConsumeRegs(data);
1918 GenTreeIntCon cns = intForm(TYP_INT, 0);
1919 emit->emitInsBinary(INS_cmp, emitTypeSize(TYP_INT), data, &cns);
1921 BasicBlock* skipLabel = genCreateTempLabel();
1923 emitJumpKind jmpEqual = genJumpKindForOper(GT_EQ, CK_SIGNED);
1924 inst_JMP(jmpEqual, skipLabel);
1926 // emit the call to the EE-helper that stops for GC (or other reasons)
1927 regNumber tmpReg = treeNode->GetSingleTempReg();
1928 assert(genIsValidIntReg(tmpReg));
1930 genEmitHelperCall(CORINFO_HELP_STOP_FOR_GC, 0, EA_UNKNOWN, tmpReg);
1931 genDefineTempLabel(skipLabel);
1936 genStoreInd(treeNode);
1940 // This is handled at the time we call genConsumeReg() on the GT_COPY
1945 // Swap is only supported for lclVar operands that are enregistered
1946 // We do not consume or produce any registers. Both operands remain enregistered.
1947 // However, the gc-ness may change.
1948 assert(genIsRegCandidateLocal(treeNode->gtOp.gtOp1) && genIsRegCandidateLocal(treeNode->gtOp.gtOp2));
1950 GenTreeLclVarCommon* lcl1 = treeNode->gtOp.gtOp1->AsLclVarCommon();
1951 LclVarDsc* varDsc1 = &(compiler->lvaTable[lcl1->gtLclNum]);
1952 var_types type1 = varDsc1->TypeGet();
1953 GenTreeLclVarCommon* lcl2 = treeNode->gtOp.gtOp2->AsLclVarCommon();
1954 LclVarDsc* varDsc2 = &(compiler->lvaTable[lcl2->gtLclNum]);
1955 var_types type2 = varDsc2->TypeGet();
1957 // We must have both int or both fp regs
1958 assert(!varTypeIsFloating(type1) || varTypeIsFloating(type2));
1960 // FP swap is not yet implemented (and should have NYI'd in LSRA)
1961 assert(!varTypeIsFloating(type1));
1963 regNumber oldOp1Reg = lcl1->gtRegNum;
1964 regMaskTP oldOp1RegMask = genRegMask(oldOp1Reg);
1965 regNumber oldOp2Reg = lcl2->gtRegNum;
1966 regMaskTP oldOp2RegMask = genRegMask(oldOp2Reg);
1968 // We don't call genUpdateVarReg because we don't have a tree node with the new register.
1969 varDsc1->lvRegNum = oldOp2Reg;
1970 varDsc2->lvRegNum = oldOp1Reg;
1973 emitAttr size = EA_PTRSIZE;
1974 if (varTypeGCtype(type1) != varTypeGCtype(type2))
1976 // If the type specified to the emitter is a GC type, it will swap the GC-ness of the registers.
1977 // Otherwise it will leave them alone, which is correct if they have the same GC-ness.
1980 inst_RV_RV(INS_xchg, oldOp1Reg, oldOp2Reg, TYP_I_IMPL, size);
1982 // Update the gcInfo.
1983 // Manually remove these regs for the gc sets (mostly to avoid confusing duplicative dump output)
1984 gcInfo.gcRegByrefSetCur &= ~(oldOp1RegMask | oldOp2RegMask);
1985 gcInfo.gcRegGCrefSetCur &= ~(oldOp1RegMask | oldOp2RegMask);
1987 // gcMarkRegPtrVal will do the appropriate thing for non-gc types.
1988 // It will also dump the updates.
1989 gcInfo.gcMarkRegPtrVal(oldOp2Reg, type1);
1990 gcInfo.gcMarkRegPtrVal(oldOp1Reg, type2);
2001 genPutArgStk(treeNode->AsPutArgStk());
2006 #ifndef FEATURE_UNIX_AMD64_STRUCT_PASSING
2007 noway_assert(targetType != TYP_STRUCT);
2008 #endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
2009 // commas show up here commonly, as part of a nullchk operation
2010 GenTree* op1 = treeNode->gtOp.gtOp1;
2011 // If child node is not already in the register we need, move it
2013 if (treeNode->gtRegNum != op1->gtRegNum)
2015 inst_RV_RV(ins_Copy(targetType), treeNode->gtRegNum, op1->gtRegNum, targetType);
2017 genProduceReg(treeNode);
2022 genCallInstruction(treeNode->AsCall());
2026 genJmpMethod(treeNode);
2032 genLockedInstructions(treeNode->AsOp());
2035 case GT_MEMORYBARRIER:
2036 instGen_MemoryBarrier();
2041 GenTreePtr location = treeNode->gtCmpXchg.gtOpLocation; // arg1
2042 GenTreePtr value = treeNode->gtCmpXchg.gtOpValue; // arg2
2043 GenTreePtr comparand = treeNode->gtCmpXchg.gtOpComparand; // arg3
2045 assert(location->gtRegNum != REG_NA && location->gtRegNum != REG_RAX);
2046 assert(value->gtRegNum != REG_NA && value->gtRegNum != REG_RAX);
2048 genConsumeReg(location);
2049 genConsumeReg(value);
2050 genConsumeReg(comparand);
2051 // comparand goes to RAX;
2052 // Note that we must issue this move after the genConsumeRegs(), in case any of the above
2053 // have a GT_COPY from RAX.
2054 if (comparand->gtRegNum != REG_RAX)
2056 inst_RV_RV(ins_Copy(comparand->TypeGet()), REG_RAX, comparand->gtRegNum, comparand->TypeGet());
2062 emit->emitIns_AR_R(INS_cmpxchg, emitTypeSize(targetType), value->gtRegNum, location->gtRegNum, 0);
2065 if (targetReg != REG_RAX)
2067 inst_RV_RV(ins_Copy(targetType), targetReg, REG_RAX, targetType);
2070 genProduceReg(treeNode);
2074 // do nothing - reload is just a marker.
2075 // The parent node will call genConsumeReg on this which will trigger the unspill of this node's child
2076 // into the register specified in this node.
2083 if (treeNode->gtFlags & GTF_NO_OP_NO)
2085 noway_assert(!"GTF_NO_OP_NO should not be set");
2089 getEmitter()->emitIns_Nop(1);
2093 case GT_ARR_BOUNDS_CHECK:
2096 #endif // FEATURE_SIMD
2097 genRangeCheck(treeNode);
2101 if (treeNode->gtRegNum != treeNode->AsPhysReg()->gtSrcReg)
2103 inst_RV_RV(INS_mov, treeNode->gtRegNum, treeNode->AsPhysReg()->gtSrcReg, targetType);
2105 genTransferRegGCState(treeNode->gtRegNum, treeNode->AsPhysReg()->gtSrcReg);
2107 genProduceReg(treeNode);
2115 assert(treeNode->gtOp.gtOp1->isUsedFromReg());
2116 regNumber reg = genConsumeReg(treeNode->gtOp.gtOp1);
2117 emit->emitIns_AR_R(INS_cmp, EA_4BYTE, reg, reg, 0);
2123 noway_assert(handlerGetsXcptnObj(compiler->compCurBB->bbCatchTyp));
2125 /* Catch arguments get passed in a register. genCodeForBBlist()
2126 would have marked it as holding a GC object, but not used. */
2128 noway_assert(gcInfo.gcRegGCrefSetCur & RBM_EXCEPTION_OBJECT);
2129 genConsumeReg(treeNode);
2132 #if !FEATURE_EH_FUNCLETS
2135 // Have to clear the ShadowSP of the nesting level which encloses the finally. Generates:
2136 // mov dword ptr [ebp-0xC], 0 // for some slot of the ShadowSP local var
2138 unsigned finallyNesting;
2139 finallyNesting = treeNode->gtVal.gtVal1;
2140 noway_assert(treeNode->gtVal.gtVal1 < compiler->compHndBBtabCount);
2141 noway_assert(finallyNesting < compiler->compHndBBtabCount);
2143 // The last slot is reserved for ICodeManager::FixContext(ppEndRegion)
2144 unsigned filterEndOffsetSlotOffs;
2145 PREFIX_ASSUME(compiler->lvaLclSize(compiler->lvaShadowSPslotsVar) >
2146 TARGET_POINTER_SIZE); // below doesn't underflow.
2147 filterEndOffsetSlotOffs =
2148 (unsigned)(compiler->lvaLclSize(compiler->lvaShadowSPslotsVar) - TARGET_POINTER_SIZE);
2150 unsigned curNestingSlotOffs;
2151 curNestingSlotOffs = filterEndOffsetSlotOffs - ((finallyNesting + 1) * TARGET_POINTER_SIZE);
2152 instGen_Store_Imm_Into_Lcl(TYP_I_IMPL, EA_PTRSIZE, 0, compiler->lvaShadowSPslotsVar, curNestingSlotOffs);
2154 #endif // !FEATURE_EH_FUNCLETS
2156 case GT_PINVOKE_PROLOG:
2157 noway_assert(((gcInfo.gcRegGCrefSetCur | gcInfo.gcRegByrefSetCur) & ~fullIntArgRegMask()) == 0);
2159 // the runtime side requires the codegen here to be consistent
2160 emit->emitDisableRandomNops();
2164 genPendingCallLabel = genCreateTempLabel();
2165 treeNode->gtLabel.gtLabBB = genPendingCallLabel;
2166 emit->emitIns_R_L(INS_lea, EA_PTR_DSP_RELOC, genPendingCallLabel, treeNode->gtRegNum);
2170 if (treeNode->OperIsCopyBlkOp() && !treeNode->AsBlk()->gtBlkOpGcUnsafe)
2172 assert(treeNode->AsObj()->gtGcPtrCount != 0);
2173 genCodeForCpObj(treeNode->AsObj());
2178 case GT_STORE_DYN_BLK:
2180 genCodeForStoreBlk(treeNode->AsBlk());
2184 genJumpTable(treeNode);
2187 case GT_SWITCH_TABLE:
2188 genTableBasedSwitch(treeNode);
2192 genCodeForArrIndex(treeNode->AsArrIndex());
2196 genCodeForArrOffset(treeNode->AsArrOffs());
2199 case GT_CLS_VAR_ADDR:
2200 getEmitter()->emitIns_R_C(INS_lea, EA_PTRSIZE, targetReg, treeNode->gtClsVar.gtClsVarHnd, 0);
2201 genProduceReg(treeNode);
2204 #if !defined(_TARGET_64BIT_)
2206 assert(treeNode->isUsedFromReg());
2207 genConsumeRegs(treeNode);
2212 // Do nothing; these nodes are simply markers for debug info.
2219 _snprintf_s(message, _countof(message), _TRUNCATE, "Unimplemented node type %s\n",
2220 GenTree::NodeName(treeNode->OperGet()));
2222 assert(!"Unknown node in codegen");
2228 //----------------------------------------------------------------------------------
2229 // genMultiRegCallStoreToLocal: store multi-reg return value of a call node to a local
2232 // treeNode - Gentree of GT_STORE_LCL_VAR
2238 // The child of store is a multi-reg call node.
2239 // genProduceReg() on treeNode is made by caller of this routine.
2241 void CodeGen::genMultiRegCallStoreToLocal(GenTreePtr treeNode)
2243 assert(treeNode->OperGet() == GT_STORE_LCL_VAR);
2245 #ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
2246 // Structs of size >=9 and <=16 are returned in two return registers on x64 Unix.
2247 assert(varTypeIsStruct(treeNode));
2249 // Assumption: current x64 Unix implementation requires that a multi-reg struct
2250 // var in 'var = call' is flagged as lvIsMultiRegRet to prevent it from
2251 // being struct promoted.
2252 unsigned lclNum = treeNode->AsLclVarCommon()->gtLclNum;
2253 LclVarDsc* varDsc = &(compiler->lvaTable[lclNum]);
2254 noway_assert(varDsc->lvIsMultiRegRet);
2256 GenTree* op1 = treeNode->gtGetOp1();
2257 GenTree* actualOp1 = op1->gtSkipReloadOrCopy();
2258 GenTreeCall* call = actualOp1->AsCall();
2259 assert(call->HasMultiRegRetVal());
2261 genConsumeRegs(op1);
2263 ReturnTypeDesc* retTypeDesc = call->GetReturnTypeDesc();
2264 assert(retTypeDesc->GetReturnRegCount() == MAX_RET_REG_COUNT);
2265 unsigned regCount = retTypeDesc->GetReturnRegCount();
2267 if (treeNode->gtRegNum != REG_NA)
2269 // Right now the only enregistrable structs supported are SIMD types.
2270 assert(varTypeIsSIMD(treeNode));
2271 assert(varTypeIsFloating(retTypeDesc->GetReturnRegType(0)));
2272 assert(varTypeIsFloating(retTypeDesc->GetReturnRegType(1)));
2274 // This is a case of two 8-bytes that comprise the operand is in
2275 // two different xmm registers and needs to assembled into a single
2277 regNumber targetReg = treeNode->gtRegNum;
2278 regNumber reg0 = call->GetRegNumByIdx(0);
2279 regNumber reg1 = call->GetRegNumByIdx(1);
2281 if (op1->IsCopyOrReload())
2283 // GT_COPY/GT_RELOAD will have valid reg for those positions
2284 // that need to be copied or reloaded.
2285 regNumber reloadReg = op1->AsCopyOrReload()->GetRegNumByIdx(0);
2286 if (reloadReg != REG_NA)
2291 reloadReg = op1->AsCopyOrReload()->GetRegNumByIdx(1);
2292 if (reloadReg != REG_NA)
2298 if (targetReg != reg0 && targetReg != reg1)
2300 // Copy reg0 into targetReg and let it to be handled by one
2301 // of the cases below.
2302 inst_RV_RV(ins_Copy(TYP_DOUBLE), targetReg, reg0, TYP_DOUBLE);
2306 if (targetReg == reg0)
2308 // targeReg[63:0] = targetReg[63:0]
2309 // targetReg[127:64] = reg1[127:64]
2310 inst_RV_RV_IV(INS_shufpd, EA_16BYTE, targetReg, reg1, 0x00);
2314 assert(targetReg == reg1);
2316 // We need two shuffles to achieve this
2318 // targeReg[63:0] = targetReg[63:0]
2319 // targetReg[127:64] = reg0[63:0]
2322 // targeReg[63:0] = targetReg[127:64]
2323 // targetReg[127:64] = targetReg[63:0]
2325 // Essentially copy low 8-bytes from reg0 to high 8-bytes of targetReg
2326 // and next swap low and high 8-bytes of targetReg to have them
2327 // rearranged in the right order.
2328 inst_RV_RV_IV(INS_shufpd, EA_16BYTE, targetReg, reg0, 0x00);
2329 inst_RV_RV_IV(INS_shufpd, EA_16BYTE, targetReg, targetReg, 0x01);
2336 for (unsigned i = 0; i < regCount; ++i)
2338 var_types type = retTypeDesc->GetReturnRegType(i);
2339 regNumber reg = call->GetRegNumByIdx(i);
2340 if (op1->IsCopyOrReload())
2342 // GT_COPY/GT_RELOAD will have valid reg for those positions
2343 // that need to be copied or reloaded.
2344 regNumber reloadReg = op1->AsCopyOrReload()->GetRegNumByIdx(i);
2345 if (reloadReg != REG_NA)
2351 assert(reg != REG_NA);
2352 getEmitter()->emitIns_S_R(ins_Store(type), emitTypeSize(type), reg, lclNum, offset);
2353 offset += genTypeSize(type);
2356 varDsc->lvRegNum = REG_STK;
2358 #elif defined(_TARGET_X86_)
2359 // Longs are returned in two return registers on x86.
2360 assert(varTypeIsLong(treeNode));
2362 // Assumption: current x86 implementation requires that a multi-reg long
2363 // var in 'var = call' is flagged as lvIsMultiRegRet to prevent it from
2365 unsigned lclNum = treeNode->AsLclVarCommon()->gtLclNum;
2366 LclVarDsc* varDsc = &(compiler->lvaTable[lclNum]);
2367 noway_assert(varDsc->lvIsMultiRegRet);
2369 GenTree* op1 = treeNode->gtGetOp1();
2370 GenTree* actualOp1 = op1->gtSkipReloadOrCopy();
2371 GenTreeCall* call = actualOp1->AsCall();
2372 assert(call->HasMultiRegRetVal());
2374 genConsumeRegs(op1);
2376 ReturnTypeDesc* retTypeDesc = call->GetReturnTypeDesc();
2377 unsigned regCount = retTypeDesc->GetReturnRegCount();
2378 assert(regCount == MAX_RET_REG_COUNT);
2382 for (unsigned i = 0; i < regCount; ++i)
2384 var_types type = retTypeDesc->GetReturnRegType(i);
2385 regNumber reg = call->GetRegNumByIdx(i);
2386 if (op1->IsCopyOrReload())
2388 // GT_COPY/GT_RELOAD will have valid reg for those positions
2389 // that need to be copied or reloaded.
2390 regNumber reloadReg = op1->AsCopyOrReload()->GetRegNumByIdx(i);
2391 if (reloadReg != REG_NA)
2397 assert(reg != REG_NA);
2398 getEmitter()->emitIns_S_R(ins_Store(type), emitTypeSize(type), reg, lclNum, offset);
2399 offset += genTypeSize(type);
2402 varDsc->lvRegNum = REG_STK;
2403 #else // !FEATURE_UNIX_AMD64_STRUCT_PASSING && !_TARGET_X86_
2404 assert(!"Unreached");
2405 #endif // !FEATURE_UNIX_AMD64_STRUCT_PASSING && !_TARGET_X86_
2408 //------------------------------------------------------------------------
2409 // genLclHeap: Generate code for localloc.
2412 // tree - the localloc tree to generate.
2415 // Note that for x86, we don't track ESP movements while generating the localloc code.
2416 // The ESP tracking is used to report stack pointer-relative GC info, which is not
2417 // interesting while doing the localloc construction. Also, for functions with localloc,
2418 // we have EBP frames, and EBP-relative locals, and ESP-relative accesses only for function
2419 // call arguments. We store the ESP after the localloc is complete in the LocAllocSP
2420 // variable. This variable is implicitly reported to the VM in the GC info (its position
2421 // is defined by convention relative to other items), and is used by the GC to find the
2422 // "base" stack pointer in functions with localloc.
2424 void CodeGen::genLclHeap(GenTreePtr tree)
2426 assert(tree->OperGet() == GT_LCLHEAP);
2427 assert(compiler->compLocallocUsed);
2429 GenTreePtr size = tree->gtOp.gtOp1;
2430 noway_assert((genActualType(size->gtType) == TYP_INT) || (genActualType(size->gtType) == TYP_I_IMPL));
2432 regNumber targetReg = tree->gtRegNum;
2433 regNumber regCnt = REG_NA;
2434 var_types type = genActualType(size->gtType);
2435 emitAttr easz = emitTypeSize(type);
2436 BasicBlock* endLabel = nullptr;
2440 if (compiler->opts.compStackCheckOnRet)
2442 noway_assert(compiler->lvaReturnEspCheck != 0xCCCCCCCC &&
2443 compiler->lvaTable[compiler->lvaReturnEspCheck].lvDoNotEnregister &&
2444 compiler->lvaTable[compiler->lvaReturnEspCheck].lvOnFrame);
2445 getEmitter()->emitIns_S_R(INS_cmp, EA_PTRSIZE, REG_SPBASE, compiler->lvaReturnEspCheck, 0);
2447 BasicBlock* esp_check = genCreateTempLabel();
2448 emitJumpKind jmpEqual = genJumpKindForOper(GT_EQ, CK_SIGNED);
2449 inst_JMP(jmpEqual, esp_check);
2450 getEmitter()->emitIns(INS_BREAKPOINT);
2451 genDefineTempLabel(esp_check);
2455 noway_assert(isFramePointerUsed()); // localloc requires Frame Pointer to be established since SP changes
2456 noway_assert(genStackLevel == 0); // Can't have anything on the stack
2458 unsigned stackAdjustment = 0;
2459 BasicBlock* loop = nullptr;
2461 // compute the amount of memory to allocate to properly STACK_ALIGN.
2463 if (size->IsCnsIntOrI())
2465 // If size is a constant, then it must be contained.
2466 assert(size->isContained());
2468 // If amount is zero then return null in targetReg
2469 amount = size->gtIntCon.gtIconVal;
2472 instGen_Set_Reg_To_Zero(EA_PTRSIZE, targetReg);
2476 // 'amount' is the total number of bytes to localloc to properly STACK_ALIGN
2477 amount = AlignUp(amount, STACK_ALIGN);
2481 // The localloc requested memory size is non-constant.
2483 // Put the size value in targetReg. If it is zero, bail out by returning null in targetReg.
2484 genConsumeRegAndCopy(size, targetReg);
2485 endLabel = genCreateTempLabel();
2486 getEmitter()->emitIns_R_R(INS_test, easz, targetReg, targetReg);
2487 inst_JMP(EJ_je, endLabel);
2489 // Compute the size of the block to allocate and perform alignment.
2490 // If compInitMem=true, we can reuse targetReg as regcnt,
2491 // since we don't need any internal registers.
2492 if (compiler->info.compInitMem)
2494 assert(tree->AvailableTempRegCount() == 0);
2499 regCnt = tree->ExtractTempReg();
2500 if (regCnt != targetReg)
2502 // Above, we put the size in targetReg. Now, copy it to our new temp register if necessary.
2503 inst_RV_RV(INS_mov, regCnt, targetReg, size->TypeGet());
2507 // Round up the number of bytes to allocate to a STACK_ALIGN boundary. This is done
2511 // However, in the initialized memory case, we need the count of STACK_ALIGN-sized
2512 // elements, not a byte count, after the alignment. So instead of the "and", which
2513 // becomes unnecessary, generate a shift, e.g.:
2517 inst_RV_IV(INS_add, regCnt, STACK_ALIGN - 1, emitActualTypeSize(type));
2519 if (compiler->info.compInitMem)
2521 // Convert the count from a count of bytes to a loop count. We will loop once per
2522 // stack alignment size, so each loop will zero 4 bytes on x86 and 16 bytes on x64.
2523 // Note that we zero a single reg-size word per iteration on x86, and 2 reg-size
2524 // words per iteration on x64. We will shift off all the stack alignment bits
2525 // added above, so there is no need for an 'and' instruction.
2527 // --- shr regCnt, 2 (or 4) ---
2528 inst_RV_SH(INS_SHIFT_RIGHT_LOGICAL, EA_PTRSIZE, regCnt, STACK_ALIGN_SHIFT_ALL);
2532 // Otherwise, mask off the low bits to align the byte count.
2533 inst_RV_IV(INS_AND, regCnt, ~(STACK_ALIGN - 1), emitActualTypeSize(type));
2537 #if FEATURE_FIXED_OUT_ARGS
2538 // If we have an outgoing arg area then we must adjust the SP by popping off the
2539 // outgoing arg area. We will restore it right before we return from this method.
2541 // Localloc returns stack space that aligned to STACK_ALIGN bytes. The following
2542 // are the cases that need to be handled:
2543 // i) Method has out-going arg area.
2544 // It is guaranteed that size of out-going arg area is STACK_ALIGN'ed (see fgMorphArgs).
2545 // Therefore, we will pop off the out-going arg area from RSP before allocating the localloc space.
2546 // ii) Method has no out-going arg area.
2547 // Nothing to pop off from the stack.
2548 if (compiler->lvaOutgoingArgSpaceSize > 0)
2550 assert((compiler->lvaOutgoingArgSpaceSize % STACK_ALIGN) == 0); // This must be true for the stack to remain
2552 inst_RV_IV(INS_add, REG_SPBASE, compiler->lvaOutgoingArgSpaceSize, EA_PTRSIZE);
2553 stackAdjustment += compiler->lvaOutgoingArgSpaceSize;
2557 if (size->IsCnsIntOrI())
2559 // We should reach here only for non-zero, constant size allocations.
2561 assert((amount % STACK_ALIGN) == 0);
2562 assert((amount % REGSIZE_BYTES) == 0);
2564 // For small allocations we will generate up to six push 0 inline
2565 size_t cntRegSizedWords = amount / REGSIZE_BYTES;
2566 if (cntRegSizedWords <= 6)
2568 for (; cntRegSizedWords != 0; cntRegSizedWords--)
2570 inst_IV(INS_push_hide, 0); // push_hide means don't track the stack
2575 bool doNoInitLessThanOnePageAlloc =
2576 !compiler->info.compInitMem && (amount < compiler->eeGetPageSize()); // must be < not <=
2579 bool needRegCntRegister = true;
2580 #else // !_TARGET_X86_
2581 bool needRegCntRegister = !doNoInitLessThanOnePageAlloc;
2582 #endif // !_TARGET_X86_
2584 if (needRegCntRegister)
2586 // If compInitMem=true, we can reuse targetReg as regcnt.
2587 // Since size is a constant, regCnt is not yet initialized.
2588 assert(regCnt == REG_NA);
2589 if (compiler->info.compInitMem)
2591 assert(tree->AvailableTempRegCount() == 0);
2596 regCnt = tree->ExtractTempReg();
2600 if (doNoInitLessThanOnePageAlloc)
2602 // Since the size is less than a page, simply adjust ESP.
2603 // ESP might already be in the guard page, so we must touch it BEFORE
2604 // the alloc, not after.
2605 CLANG_FORMAT_COMMENT_ANCHOR;
2608 // For x86, we don't want to use "sub ESP" because we don't want the emitter to track the adjustment
2609 // to ESP. So do the work in the count register.
2610 // TODO-CQ: manipulate ESP directly, to share code, reduce #ifdefs, and improve CQ. This would require
2611 // creating a way to temporarily turn off the emitter's tracking of ESP, maybe marking instrDescs as "don't
2613 inst_RV_RV(INS_mov, regCnt, REG_SPBASE, TYP_I_IMPL);
2614 getEmitter()->emitIns_AR_R(INS_TEST, EA_4BYTE, REG_SPBASE, REG_SPBASE, 0);
2615 inst_RV_IV(INS_sub, regCnt, amount, EA_PTRSIZE);
2616 inst_RV_RV(INS_mov, REG_SPBASE, regCnt, TYP_I_IMPL);
2617 #else // !_TARGET_X86_
2618 getEmitter()->emitIns_AR_R(INS_TEST, EA_4BYTE, REG_SPBASE, REG_SPBASE, 0);
2619 inst_RV_IV(INS_sub, REG_SPBASE, amount, EA_PTRSIZE);
2620 #endif // !_TARGET_X86_
2625 // else, "mov regCnt, amount"
2627 if (compiler->info.compInitMem)
2629 // When initializing memory, we want 'amount' to be the loop count.
2630 assert((amount % STACK_ALIGN) == 0);
2631 amount /= STACK_ALIGN;
2634 genSetRegToIcon(regCnt, amount, ((int)amount == amount) ? TYP_INT : TYP_LONG);
2637 loop = genCreateTempLabel();
2638 if (compiler->info.compInitMem)
2640 // At this point 'regCnt' is set to the number of loop iterations for this loop, if each
2641 // iteration zeros (and subtracts from the stack pointer) STACK_ALIGN bytes.
2642 // Since we have to zero out the allocated memory AND ensure that RSP is always valid
2643 // by tickling the pages, we will just push 0's on the stack.
2645 assert(genIsValidIntReg(regCnt));
2648 genDefineTempLabel(loop);
2650 static_assert_no_msg((STACK_ALIGN % REGSIZE_BYTES) == 0);
2651 unsigned const count = (STACK_ALIGN / REGSIZE_BYTES);
2653 for (unsigned i = 0; i < count; i++)
2655 inst_IV(INS_push_hide, 0); // --- push REG_SIZE bytes of 0
2657 // Note that the stack must always be aligned to STACK_ALIGN bytes
2659 // Decrement the loop counter and loop if not done.
2660 inst_RV(INS_dec, regCnt, TYP_I_IMPL);
2661 inst_JMP(EJ_jne, loop);
2665 // At this point 'regCnt' is set to the total number of bytes to localloc.
2667 // We don't need to zero out the allocated memory. However, we do have
2668 // to tickle the pages to ensure that ESP is always valid and is
2669 // in sync with the "stack guard page". Note that in the worst
2670 // case ESP is on the last byte of the guard page. Thus you must
2671 // touch ESP+0 first not ESP+x01000.
2673 // Another subtlety is that you don't want ESP to be exactly on the
2674 // boundary of the guard page because PUSH is predecrement, thus
2675 // call setup would not touch the guard page but just beyond it
2677 // Note that we go through a few hoops so that ESP never points to
2678 // illegal pages at any time during the tickling process
2681 // add REGCNT, ESP // reg now holds ultimate ESP
2682 // jb loop // result is smaller than orignial ESP (no wrap around)
2683 // xor REGCNT, REGCNT, // Overflow, pick lowest possible number
2685 // test ESP, [ESP+0] // tickle the page
2687 // sub REGTMP, PAGE_SIZE
2694 inst_RV(INS_NEG, regCnt, TYP_I_IMPL);
2695 inst_RV_RV(INS_add, regCnt, REG_SPBASE, TYP_I_IMPL);
2696 inst_JMP(EJ_jb, loop);
2698 instGen_Set_Reg_To_Zero(EA_PTRSIZE, regCnt);
2700 genDefineTempLabel(loop);
2702 // Tickle the decremented value, and move back to ESP,
2703 // note that it has to be done BEFORE the update of ESP since
2704 // ESP might already be on the guard page. It is OK to leave
2705 // the final value of ESP on the guard page
2706 getEmitter()->emitIns_AR_R(INS_TEST, EA_4BYTE, REG_SPBASE, REG_SPBASE, 0);
2708 // This is a harmless trick to avoid the emitter trying to track the
2709 // decrement of the ESP - we do the subtraction in another reg instead
2710 // of adjusting ESP directly.
2711 regNumber regTmp = tree->GetSingleTempReg();
2713 inst_RV_RV(INS_mov, regTmp, REG_SPBASE, TYP_I_IMPL);
2714 inst_RV_IV(INS_sub, regTmp, compiler->eeGetPageSize(), EA_PTRSIZE);
2715 inst_RV_RV(INS_mov, REG_SPBASE, regTmp, TYP_I_IMPL);
2717 inst_RV_RV(INS_cmp, REG_SPBASE, regCnt, TYP_I_IMPL);
2718 inst_JMP(EJ_jae, loop);
2720 // Move the final value to ESP
2721 inst_RV_RV(INS_mov, REG_SPBASE, regCnt);
2725 // Re-adjust SP to allocate out-going arg area
2726 if (stackAdjustment > 0)
2728 assert((stackAdjustment % STACK_ALIGN) == 0); // This must be true for the stack to remain aligned
2729 inst_RV_IV(INS_sub, REG_SPBASE, stackAdjustment, EA_PTRSIZE);
2732 // Return the stackalloc'ed address in result register.
2733 // TargetReg = RSP + stackAdjustment.
2734 getEmitter()->emitIns_R_AR(INS_lea, EA_PTRSIZE, targetReg, REG_SPBASE, stackAdjustment);
2736 if (endLabel != nullptr)
2738 genDefineTempLabel(endLabel);
2743 // Write the lvaLocAllocSPvar stack frame slot
2744 if (compiler->lvaLocAllocSPvar != BAD_VAR_NUM)
2746 getEmitter()->emitIns_S_R(ins_Store(TYP_I_IMPL), EA_PTRSIZE, REG_SPBASE, compiler->lvaLocAllocSPvar, 0);
2750 if (compiler->opts.compNeedStackProbes)
2752 genGenerateStackProbe();
2758 if (compiler->opts.compStackCheckOnRet)
2760 noway_assert(compiler->lvaReturnEspCheck != 0xCCCCCCCC &&
2761 compiler->lvaTable[compiler->lvaReturnEspCheck].lvDoNotEnregister &&
2762 compiler->lvaTable[compiler->lvaReturnEspCheck].lvOnFrame);
2763 getEmitter()->emitIns_S_R(ins_Store(TYP_I_IMPL), EA_PTRSIZE, REG_SPBASE, compiler->lvaReturnEspCheck, 0);
2767 genProduceReg(tree);
2770 void CodeGen::genCodeForStoreBlk(GenTreeBlk* storeBlkNode)
2772 #ifdef JIT32_GCENCODER
2773 assert(!storeBlkNode->gtBlkOpGcUnsafe);
2775 if (storeBlkNode->gtBlkOpGcUnsafe)
2777 getEmitter()->emitDisableGC();
2779 #endif // JIT32_GCENCODER
2781 bool isCopyBlk = storeBlkNode->OperIsCopyBlkOp();
2783 switch (storeBlkNode->gtBlkOpKind)
2785 #ifdef _TARGET_AMD64_
2786 case GenTreeBlk::BlkOpKindHelper:
2789 genCodeForCpBlk(storeBlkNode);
2793 genCodeForInitBlk(storeBlkNode);
2796 #endif // _TARGET_AMD64_
2797 case GenTreeBlk::BlkOpKindRepInstr:
2800 genCodeForCpBlkRepMovs(storeBlkNode);
2804 genCodeForInitBlkRepStos(storeBlkNode);
2807 case GenTreeBlk::BlkOpKindUnroll:
2810 genCodeForCpBlkUnroll(storeBlkNode);
2814 genCodeForInitBlkUnroll(storeBlkNode);
2821 #ifndef JIT32_GCENCODER
2822 if (storeBlkNode->gtBlkOpGcUnsafe)
2824 getEmitter()->emitEnableGC();
2826 #endif // !defined(JIT32_GCENCODER)
2830 //------------------------------------------------------------------------
2831 // genCodeForInitBlkRepStos: Generate code for InitBlk using rep stos.
2834 // initBlkNode - The Block store for which we are generating code.
2838 // The size of the buffers must be a constant and also less than INITBLK_STOS_LIMIT bytes.
2839 // Any value larger than that, we'll use the helper even if both the fill byte and the
2840 // size are integer constants.
2842 // The size must either be a non-constant or less than INITBLK_STOS_LIMIT bytes.
2844 void CodeGen::genCodeForInitBlkRepStos(GenTreeBlk* initBlkNode)
2846 // Make sure we got the arguments of the initblk/initobj operation in the right registers.
2847 unsigned size = initBlkNode->Size();
2848 GenTreePtr dstAddr = initBlkNode->Addr();
2849 GenTreePtr initVal = initBlkNode->Data();
2850 if (initVal->OperIsInitVal())
2852 initVal = initVal->gtGetOp1();
2856 assert(dstAddr->isUsedFromReg());
2857 assert(initVal->isUsedFromReg());
2858 #ifdef _TARGET_AMD64_
2861 if (initVal->IsCnsIntOrI())
2863 #ifdef _TARGET_AMD64_
2864 assert(size > CPBLK_UNROLL_LIMIT && size < CPBLK_MOVS_LIMIT);
2866 // Note that a size of zero means a non-constant size.
2867 assert((size == 0) || (size > CPBLK_UNROLL_LIMIT));
2873 genConsumeBlockOp(initBlkNode, REG_RDI, REG_RAX, REG_RCX);
2874 instGen(INS_r_stosb);
2877 // Generate code for InitBlk by performing a loop unroll
2879 // a) Both the size and fill byte value are integer constants.
2880 // b) The size of the struct to initialize is smaller than INITBLK_UNROLL_LIMIT bytes.
2882 void CodeGen::genCodeForInitBlkUnroll(GenTreeBlk* initBlkNode)
2884 // Make sure we got the arguments of the initblk/initobj operation in the right registers
2885 unsigned size = initBlkNode->Size();
2886 GenTreePtr dstAddr = initBlkNode->Addr();
2887 GenTreePtr initVal = initBlkNode->Data();
2888 if (initVal->OperIsInitVal())
2890 initVal = initVal->gtGetOp1();
2893 assert(dstAddr->isUsedFromReg());
2894 assert(initVal->isUsedFromReg() || (initVal->IsIntegralConst(0) && ((size & 0xf) == 0)));
2896 assert(size <= INITBLK_UNROLL_LIMIT);
2897 assert(initVal->gtSkipReloadOrCopy()->IsCnsIntOrI());
2899 emitter* emit = getEmitter();
2901 genConsumeOperands(initBlkNode);
2903 // If the initVal was moved, or spilled and reloaded to a different register,
2904 // get the original initVal from below the GT_RELOAD, but only after capturing the valReg,
2905 // which needs to be the new register.
2906 regNumber valReg = initVal->gtRegNum;
2907 initVal = initVal->gtSkipReloadOrCopy();
2909 unsigned offset = 0;
2911 // Perform an unroll using SSE2 loads and stores.
2912 if (size >= XMM_REGSIZE_BYTES)
2914 regNumber tmpReg = initBlkNode->GetSingleTempReg();
2915 assert(genIsValidFloatReg(tmpReg));
2917 if (initVal->gtIntCon.gtIconVal != 0)
2919 emit->emitIns_R_R(INS_mov_i2xmm, EA_PTRSIZE, tmpReg, valReg);
2920 emit->emitIns_R_R(INS_punpckldq, EA_8BYTE, tmpReg, tmpReg);
2922 // For x86, we need one more to convert it from 8 bytes to 16 bytes.
2923 emit->emitIns_R_R(INS_punpckldq, EA_8BYTE, tmpReg, tmpReg);
2924 #endif // _TARGET_X86_
2928 emit->emitIns_R_R(INS_xorpd, EA_8BYTE, tmpReg, tmpReg);
2931 // Determine how many 16 byte slots we're going to fill using SSE movs.
2932 size_t slots = size / XMM_REGSIZE_BYTES;
2936 emit->emitIns_AR_R(INS_movdqu, EA_8BYTE, tmpReg, dstAddr->gtRegNum, offset);
2937 offset += XMM_REGSIZE_BYTES;
2941 // Fill the remainder (or a < 16 byte sized struct)
2942 if ((size & 8) != 0)
2945 // TODO-X86-CQ: [1091735] Revisit block ops codegen. One example: use movq for 8 byte movs.
2946 emit->emitIns_AR_R(INS_mov, EA_4BYTE, valReg, dstAddr->gtRegNum, offset);
2948 emit->emitIns_AR_R(INS_mov, EA_4BYTE, valReg, dstAddr->gtRegNum, offset);
2950 #else // !_TARGET_X86_
2952 emit->emitIns_AR_R(INS_mov, EA_8BYTE, valReg, dstAddr->gtRegNum, offset);
2955 #endif // !_TARGET_X86_
2957 if ((size & 4) != 0)
2959 emit->emitIns_AR_R(INS_mov, EA_4BYTE, valReg, dstAddr->gtRegNum, offset);
2962 if ((size & 2) != 0)
2964 emit->emitIns_AR_R(INS_mov, EA_2BYTE, valReg, dstAddr->gtRegNum, offset);
2967 if ((size & 1) != 0)
2969 emit->emitIns_AR_R(INS_mov, EA_1BYTE, valReg, dstAddr->gtRegNum, offset);
2973 // Generates code for InitBlk by calling the VM memset helper function.
2975 // a) The size argument of the InitBlk is not an integer constant.
2976 // b) The size argument of the InitBlk is >= INITBLK_STOS_LIMIT bytes.
2977 void CodeGen::genCodeForInitBlk(GenTreeBlk* initBlkNode)
2979 #ifdef _TARGET_AMD64_
2980 // Make sure we got the arguments of the initblk operation in the right registers
2981 unsigned blockSize = initBlkNode->Size();
2982 GenTreePtr dstAddr = initBlkNode->Addr();
2983 GenTreePtr initVal = initBlkNode->Data();
2984 if (initVal->OperIsInitVal())
2986 initVal = initVal->gtGetOp1();
2989 assert(dstAddr->isUsedFromReg());
2990 assert(initVal->isUsedFromReg());
2994 assert(blockSize >= CPBLK_MOVS_LIMIT);
2997 genConsumeBlockOp(initBlkNode, REG_ARG_0, REG_ARG_1, REG_ARG_2);
2999 genEmitHelperCall(CORINFO_HELP_MEMSET, 0, EA_UNKNOWN);
3000 #else // !_TARGET_AMD64_
3001 NYI_X86("Helper call for InitBlk");
3002 #endif // !_TARGET_AMD64_
3005 // Generate code for a load from some address + offset
3006 // baseNode: tree node which can be either a local address or arbitrary node
3007 // offset: distance from the baseNode from which to load
3008 void CodeGen::genCodeForLoadOffset(instruction ins, emitAttr size, regNumber dst, GenTree* baseNode, unsigned offset)
3010 emitter* emit = getEmitter();
3012 if (baseNode->OperIsLocalAddr())
3014 if (baseNode->gtOper == GT_LCL_FLD_ADDR)
3016 offset += baseNode->gtLclFld.gtLclOffs;
3018 emit->emitIns_R_S(ins, size, dst, baseNode->gtLclVarCommon.gtLclNum, offset);
3022 emit->emitIns_R_AR(ins, size, dst, baseNode->gtRegNum, offset);
3026 //------------------------------------------------------------------------
3027 // genCodeForStoreOffset: Generate code to store a reg to [base + offset].
3030 // ins - the instruction to generate.
3031 // size - the size that needs to be stored.
3032 // src - the register which needs to be stored.
3033 // baseNode - the base, relative to which to store the src register.
3034 // offset - the offset that is added to the baseNode to calculate the address to store into.
3036 void CodeGen::genCodeForStoreOffset(instruction ins, emitAttr size, regNumber src, GenTree* baseNode, unsigned offset)
3038 emitter* emit = getEmitter();
3040 if (baseNode->OperIsLocalAddr())
3042 if (baseNode->gtOper == GT_LCL_FLD_ADDR)
3044 offset += baseNode->gtLclFld.gtLclOffs;
3047 emit->emitIns_S_R(ins, size, src, baseNode->AsLclVarCommon()->GetLclNum(), offset);
3051 emit->emitIns_AR_R(ins, size, src, baseNode->gtRegNum, offset);
3055 // Generates CpBlk code by performing a loop unroll
3057 // The size argument of the CpBlk node is a constant and <= 64 bytes.
3058 // This may seem small but covers >95% of the cases in several framework assemblies.
3060 void CodeGen::genCodeForCpBlkUnroll(GenTreeBlk* cpBlkNode)
3062 // Make sure we got the arguments of the cpblk operation in the right registers
3063 unsigned size = cpBlkNode->Size();
3064 GenTreePtr dstAddr = cpBlkNode->Addr();
3065 GenTreePtr source = cpBlkNode->Data();
3066 GenTreePtr srcAddr = nullptr;
3067 assert(size <= CPBLK_UNROLL_LIMIT);
3069 emitter* emit = getEmitter();
3071 if (source->gtOper == GT_IND)
3073 srcAddr = source->gtGetOp1();
3074 if (srcAddr->isUsedFromReg())
3076 genConsumeReg(srcAddr);
3081 noway_assert(source->IsLocal());
3082 // TODO-Cleanup: Consider making the addrForm() method in Rationalize public, e.g. in GenTree.
3083 // OR: transform source to GT_IND(GT_LCL_VAR_ADDR)
3084 if (source->OperGet() == GT_LCL_VAR)
3086 source->SetOper(GT_LCL_VAR_ADDR);
3090 assert(source->OperGet() == GT_LCL_FLD);
3091 source->SetOper(GT_LCL_FLD_ADDR);
3096 if (dstAddr->isUsedFromReg())
3098 genConsumeReg(dstAddr);
3101 unsigned offset = 0;
3103 // If the size of this struct is larger than 16 bytes
3104 // let's use SSE2 to be able to do 16 byte at a time
3105 // loads and stores.
3107 if (size >= XMM_REGSIZE_BYTES)
3109 regNumber xmmReg = cpBlkNode->GetSingleTempReg(RBM_ALLFLOAT);
3110 assert(genIsValidFloatReg(xmmReg));
3111 size_t slots = size / XMM_REGSIZE_BYTES;
3113 // TODO: In the below code the load and store instructions are for 16 bytes, but the
3114 // type is EA_8BYTE. The movdqa/u are 16 byte instructions, so it works, but
3115 // this probably needs to be changed.
3119 genCodeForLoadOffset(INS_movdqu, EA_8BYTE, xmmReg, srcAddr, offset);
3121 genCodeForStoreOffset(INS_movdqu, EA_8BYTE, xmmReg, dstAddr, offset);
3122 offset += XMM_REGSIZE_BYTES;
3126 // Fill the remainder (15 bytes or less) if there's one.
3127 if ((size & 0xf) != 0)
3129 // Grab the integer temp register to emit the remaining loads and stores.
3130 regNumber tmpReg = cpBlkNode->GetSingleTempReg(RBM_ALLINT);
3132 if ((size & 8) != 0)
3135 // TODO-X86-CQ: [1091735] Revisit block ops codegen. One example: use movq for 8 byte movs.
3136 for (unsigned savedOffs = offset; offset < savedOffs + 8; offset += 4)
3138 genCodeForLoadOffset(INS_mov, EA_4BYTE, tmpReg, srcAddr, offset);
3139 genCodeForStoreOffset(INS_mov, EA_4BYTE, tmpReg, dstAddr, offset);
3141 #else // !_TARGET_X86_
3142 genCodeForLoadOffset(INS_mov, EA_8BYTE, tmpReg, srcAddr, offset);
3143 genCodeForStoreOffset(INS_mov, EA_8BYTE, tmpReg, dstAddr, offset);
3145 #endif // !_TARGET_X86_
3147 if ((size & 4) != 0)
3149 genCodeForLoadOffset(INS_mov, EA_4BYTE, tmpReg, srcAddr, offset);
3150 genCodeForStoreOffset(INS_mov, EA_4BYTE, tmpReg, dstAddr, offset);
3153 if ((size & 2) != 0)
3155 genCodeForLoadOffset(INS_mov, EA_2BYTE, tmpReg, srcAddr, offset);
3156 genCodeForStoreOffset(INS_mov, EA_2BYTE, tmpReg, dstAddr, offset);
3159 if ((size & 1) != 0)
3161 genCodeForLoadOffset(INS_mov, EA_1BYTE, tmpReg, srcAddr, offset);
3162 genCodeForStoreOffset(INS_mov, EA_1BYTE, tmpReg, dstAddr, offset);
3167 // Generate code for CpBlk by using rep movs
3169 // The size argument of the CpBlk is a constant and is between
3170 // CPBLK_UNROLL_LIMIT and CPBLK_MOVS_LIMIT bytes.
3171 void CodeGen::genCodeForCpBlkRepMovs(GenTreeBlk* cpBlkNode)
3173 // Make sure we got the arguments of the cpblk operation in the right registers
3174 unsigned size = cpBlkNode->Size();
3175 GenTreePtr dstAddr = cpBlkNode->Addr();
3176 GenTreePtr source = cpBlkNode->Data();
3177 GenTreePtr srcAddr = nullptr;
3180 assert(dstAddr->isUsedFromReg());
3181 assert(source->isContained());
3186 noway_assert(cpBlkNode->OperGet() == GT_STORE_DYN_BLK);
3192 assert(size > CPBLK_UNROLL_LIMIT && size < CPBLK_MOVS_LIMIT);
3194 assert(size > CPBLK_UNROLL_LIMIT);
3199 genConsumeBlockOp(cpBlkNode, REG_RDI, REG_RSI, REG_RCX);
3200 instGen(INS_r_movsb);
3203 #ifdef FEATURE_PUT_STRUCT_ARG_STK
3204 //------------------------------------------------------------------------
3205 // CodeGen::genMove8IfNeeded: Conditionally move 8 bytes of a struct to the argument area
3208 // size - The size of bytes remaining to be moved
3209 // longTmpReg - The tmp register to be used for the long value
3210 // srcAddr - The address of the source struct
3211 // offset - The current offset being copied
3214 // Returns the number of bytes moved (8 or 0).
3217 // This is used in the PutArgStkKindUnroll case, to move any bytes that are
3218 // not an even multiple of 16.
3219 // On x86, longTmpReg must be an xmm reg; on x64 it must be an integer register.
3220 // This is checked by genStoreRegToStackArg.
3222 unsigned CodeGen::genMove8IfNeeded(unsigned size, regNumber longTmpReg, GenTree* srcAddr, unsigned offset)
3225 instruction longMovIns = INS_movq;
3226 #else // !_TARGET_X86_
3227 instruction longMovIns = INS_mov;
3228 #endif // !_TARGET_X86_
3229 if ((size & 8) != 0)
3231 genCodeForLoadOffset(longMovIns, EA_8BYTE, longTmpReg, srcAddr, offset);
3232 genStoreRegToStackArg(TYP_LONG, longTmpReg, offset);
3238 //------------------------------------------------------------------------
3239 // CodeGen::genMove4IfNeeded: Conditionally move 4 bytes of a struct to the argument area
3242 // size - The size of bytes remaining to be moved
3243 // intTmpReg - The tmp register to be used for the long value
3244 // srcAddr - The address of the source struct
3245 // offset - The current offset being copied
3248 // Returns the number of bytes moved (4 or 0).
3251 // This is used in the PutArgStkKindUnroll case, to move any bytes that are
3252 // not an even multiple of 16.
3253 // intTmpReg must be an integer register.
3254 // This is checked by genStoreRegToStackArg.
3256 unsigned CodeGen::genMove4IfNeeded(unsigned size, regNumber intTmpReg, GenTree* srcAddr, unsigned offset)
3258 if ((size & 4) != 0)
3260 genCodeForLoadOffset(INS_mov, EA_4BYTE, intTmpReg, srcAddr, offset);
3261 genStoreRegToStackArg(TYP_INT, intTmpReg, offset);
3267 //------------------------------------------------------------------------
3268 // CodeGen::genMove2IfNeeded: Conditionally move 2 bytes of a struct to the argument area
3271 // size - The size of bytes remaining to be moved
3272 // intTmpReg - The tmp register to be used for the long value
3273 // srcAddr - The address of the source struct
3274 // offset - The current offset being copied
3277 // Returns the number of bytes moved (2 or 0).
3280 // This is used in the PutArgStkKindUnroll case, to move any bytes that are
3281 // not an even multiple of 16.
3282 // intTmpReg must be an integer register.
3283 // This is checked by genStoreRegToStackArg.
3285 unsigned CodeGen::genMove2IfNeeded(unsigned size, regNumber intTmpReg, GenTree* srcAddr, unsigned offset)
3287 if ((size & 2) != 0)
3289 genCodeForLoadOffset(INS_mov, EA_2BYTE, intTmpReg, srcAddr, offset);
3290 genStoreRegToStackArg(TYP_SHORT, intTmpReg, offset);
3296 //------------------------------------------------------------------------
3297 // CodeGen::genMove1IfNeeded: Conditionally move 1 byte of a struct to the argument area
3300 // size - The size of bytes remaining to be moved
3301 // intTmpReg - The tmp register to be used for the long value
3302 // srcAddr - The address of the source struct
3303 // offset - The current offset being copied
3306 // Returns the number of bytes moved (1 or 0).
3309 // This is used in the PutArgStkKindUnroll case, to move any bytes that are
3310 // not an even multiple of 16.
3311 // intTmpReg must be an integer register.
3312 // This is checked by genStoreRegToStackArg.
3314 unsigned CodeGen::genMove1IfNeeded(unsigned size, regNumber intTmpReg, GenTree* srcAddr, unsigned offset)
3317 if ((size & 1) != 0)
3319 genCodeForLoadOffset(INS_mov, EA_1BYTE, intTmpReg, srcAddr, offset);
3320 genStoreRegToStackArg(TYP_BYTE, intTmpReg, offset);
3326 //---------------------------------------------------------------------------------------------------------------//
3327 // genStructPutArgUnroll: Generates code for passing a struct arg on stack by value using loop unrolling.
3330 // putArgNode - the PutArgStk tree.
3333 // m_stkArgVarNum must be set to the base var number, relative to which the by-val struct will be copied to the
3336 // TODO-Amd64-Unix: Try to share code with copyblk.
3337 // Need refactoring of copyblk before it could be used for putarg_stk.
3338 // The difference for now is that a putarg_stk contains its children, while cpyblk does not.
3339 // This creates differences in code. After some significant refactoring it could be reused.
3341 void CodeGen::genStructPutArgUnroll(GenTreePutArgStk* putArgNode)
3343 // We will never call this method for SIMD types, which are stored directly
3344 // in genPutStructArgStk().
3345 noway_assert(putArgNode->TypeGet() == TYP_STRUCT);
3347 // Make sure we got the arguments of the cpblk operation in the right registers
3348 GenTreePtr dstAddr = putArgNode;
3349 GenTreePtr src = putArgNode->gtOp.gtOp1;
3351 unsigned size = putArgNode->getArgSize();
3352 assert(size <= CPBLK_UNROLL_LIMIT);
3354 emitter* emit = getEmitter();
3355 unsigned putArgOffset = putArgNode->getArgOffset();
3357 assert(src->isContained());
3359 assert(src->gtOper == GT_OBJ);
3361 if (src->gtOp.gtOp1->isUsedFromReg())
3363 genConsumeReg(src->gtOp.gtOp1);
3366 unsigned offset = 0;
3368 regNumber xmmTmpReg = REG_NA;
3369 regNumber intTmpReg = REG_NA;
3370 regNumber longTmpReg = REG_NA;
3372 // On x86 we use an XMM register for both 16 and 8-byte chunks, but if it's
3373 // less than 16 bytes, we will just be using pushes
3376 xmmTmpReg = putArgNode->GetSingleTempReg(RBM_ALLFLOAT);
3377 longTmpReg = xmmTmpReg;
3379 if ((size & 0x7) != 0)
3381 intTmpReg = putArgNode->GetSingleTempReg(RBM_ALLINT);
3383 #else // !_TARGET_X86_
3384 // On x64 we use an XMM register only for 16-byte chunks.
3385 if (size >= XMM_REGSIZE_BYTES)
3387 xmmTmpReg = putArgNode->GetSingleTempReg(RBM_ALLFLOAT);
3389 if ((size & 0xf) != 0)
3391 intTmpReg = putArgNode->GetSingleTempReg(RBM_ALLINT);
3392 longTmpReg = intTmpReg;
3394 #endif // !_TARGET_X86_
3396 // If the size of this struct is larger than 16 bytes
3397 // let's use SSE2 to be able to do 16 byte at a time
3398 // loads and stores.
3399 if (size >= XMM_REGSIZE_BYTES)
3402 assert(!m_pushStkArg);
3403 #endif // _TARGET_X86_
3404 size_t slots = size / XMM_REGSIZE_BYTES;
3406 assert(putArgNode->gtGetOp1()->isContained());
3407 assert(putArgNode->gtGetOp1()->gtOp.gtOper == GT_OBJ);
3409 // TODO: In the below code the load and store instructions are for 16 bytes, but the
3410 // type is EA_8BYTE. The movdqa/u are 16 byte instructions, so it works, but
3411 // this probably needs to be changed.
3415 genCodeForLoadOffset(INS_movdqu, EA_8BYTE, xmmTmpReg, src->gtGetOp1(), offset);
3418 genStoreRegToStackArg(TYP_STRUCT, xmmTmpReg, offset);
3420 offset += XMM_REGSIZE_BYTES;
3424 // Fill the remainder (15 bytes or less) if there's one.
3425 if ((size & 0xf) != 0)
3430 // This case is currently supported only for the case where the total size is
3431 // less than XMM_REGSIZE_BYTES. We need to push the remaining chunks in reverse
3432 // order. However, morph has ensured that we have a struct that is an even
3433 // multiple of TARGET_POINTER_SIZE, so we don't need to worry about alignment.
3434 assert(((size & 0xc) == size) && (offset == 0));
3435 // If we have a 4 byte chunk, load it from either offset 0 or 8, depending on
3436 // whether we've got an 8 byte chunk, and then push it on the stack.
3437 unsigned pushedBytes = genMove4IfNeeded(size, intTmpReg, src->gtOp.gtOp1, size & 0x8);
3438 // Now if we have an 8 byte chunk, load it from offset 0 (it's the first chunk)
3439 // and push it on the stack.
3440 pushedBytes += genMove8IfNeeded(size, longTmpReg, src->gtOp.gtOp1, 0);
3443 #endif // _TARGET_X86_
3445 offset += genMove8IfNeeded(size, longTmpReg, src->gtOp.gtOp1, offset);
3446 offset += genMove4IfNeeded(size, intTmpReg, src->gtOp.gtOp1, offset);
3447 offset += genMove2IfNeeded(size, intTmpReg, src->gtOp.gtOp1, offset);
3448 offset += genMove1IfNeeded(size, intTmpReg, src->gtOp.gtOp1, offset);
3449 assert(offset == size);
3454 //------------------------------------------------------------------------
3455 // genStructPutArgRepMovs: Generates code for passing a struct arg by value on stack using Rep Movs.
3458 // putArgNode - the PutArgStk tree.
3461 // The size argument of the PutArgStk (for structs) is a constant and is between
3462 // CPBLK_UNROLL_LIMIT and CPBLK_MOVS_LIMIT bytes.
3463 // m_stkArgVarNum must be set to the base var number, relative to which the by-val struct bits will go.
3465 void CodeGen::genStructPutArgRepMovs(GenTreePutArgStk* putArgNode)
3467 assert(putArgNode->TypeGet() == TYP_STRUCT);
3468 assert(putArgNode->getArgSize() > CPBLK_UNROLL_LIMIT);
3470 // Make sure we got the arguments of the cpblk operation in the right registers
3471 GenTreePtr dstAddr = putArgNode;
3472 GenTreePtr srcAddr = putArgNode->gtGetOp1();
3475 assert(putArgNode->gtRsvdRegs == (RBM_RDI | RBM_RCX | RBM_RSI));
3476 assert(srcAddr->isContained());
3478 genConsumePutStructArgStk(putArgNode, REG_RDI, REG_RSI, REG_RCX);
3479 instGen(INS_r_movsb);
3482 //------------------------------------------------------------------------
3483 // If any Vector3 args are on stack and they are not pass-by-ref, the upper 32bits
3484 // must be cleared to zeroes. The native compiler doesn't clear the upper bits
3485 // and there is no way to know if the caller is native or not. So, the upper
3486 // 32 bits of Vector argument on stack are always cleared to zero.
3487 #if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING) && defined(FEATURE_SIMD)
3488 void CodeGen::genClearStackVec3ArgUpperBits()
3493 printf("*************** In genClearStackVec3ArgUpperBits()\n");
3497 assert(compiler->compGeneratingProlog);
3499 unsigned varNum = 0;
3501 for (unsigned varNum = 0; varNum < compiler->info.compArgsCount; varNum++)
3503 LclVarDsc* varDsc = &(compiler->lvaTable[varNum]);
3504 assert(varDsc->lvIsParam);
3506 // Does var has simd12 type?
3507 if (varDsc->lvType != TYP_SIMD12)
3512 if (!varDsc->lvIsRegArg)
3514 // Clear the upper 32 bits by mov dword ptr [V_ARG_BASE+0xC], 0
3515 getEmitter()->emitIns_S_I(ins_Store(TYP_INT), EA_4BYTE, varNum, genTypeSize(TYP_FLOAT) * 3, 0);
3519 // Assume that for x64 linux, an argument is fully in registers
3520 // or fully on stack.
3521 regNumber argReg = varDsc->GetOtherArgReg();
3523 // Clear the upper 32 bits by two shift instructions.
3524 // argReg = argReg << 96
3525 getEmitter()->emitIns_R_I(INS_pslldq, emitActualTypeSize(TYP_SIMD12), argReg, 12);
3526 // argReg = argReg >> 96
3527 getEmitter()->emitIns_R_I(INS_psrldq, emitActualTypeSize(TYP_SIMD12), argReg, 12);
3531 #endif // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING) && defined(FEATURE_SIMD)
3532 #endif // FEATURE_PUT_STRUCT_ARG_STK
3534 // Generate code for CpObj nodes wich copy structs that have interleaved
3536 // This will generate a sequence of movsp instructions for the cases of non-gc members.
3537 // Note that movsp is an alias for movsd on x86 and movsq on x64.
3538 // and calls to the BY_REF_ASSIGN helper otherwise.
3539 void CodeGen::genCodeForCpObj(GenTreeObj* cpObjNode)
3541 // Make sure we got the arguments of the cpobj operation in the right registers
3542 GenTreePtr dstAddr = cpObjNode->Addr();
3543 GenTreePtr source = cpObjNode->Data();
3544 GenTreePtr srcAddr = nullptr;
3545 var_types srcAddrType = TYP_BYREF;
3546 bool sourceIsLocal = false;
3548 assert(source->isContained());
3549 if (source->gtOper == GT_IND)
3551 srcAddr = source->gtGetOp1();
3552 assert(srcAddr->isUsedFromReg());
3556 noway_assert(source->IsLocal());
3557 sourceIsLocal = true;
3560 bool dstOnStack = dstAddr->OperIsLocalAddr();
3564 assert(dstAddr->isUsedFromReg());
3566 // If the GenTree node has data about GC pointers, this means we're dealing
3567 // with CpObj, so this requires special logic.
3568 assert(cpObjNode->gtGcPtrCount > 0);
3570 // MovSp (alias for movsq on x64 and movsd on x86) instruction is used for copying non-gcref fields
3571 // and it needs src = RSI and dst = RDI.
3572 // Either these registers must not contain lclVars, or they must be dying or marked for spill.
3573 // This is because these registers are incremented as we go through the struct.
3576 GenTree* actualSrcAddr = srcAddr->gtSkipReloadOrCopy();
3577 GenTree* actualDstAddr = dstAddr->gtSkipReloadOrCopy();
3578 unsigned srcLclVarNum = BAD_VAR_NUM;
3579 unsigned dstLclVarNum = BAD_VAR_NUM;
3580 bool isSrcAddrLiveOut = false;
3581 bool isDstAddrLiveOut = false;
3582 if (genIsRegCandidateLocal(actualSrcAddr))
3584 srcLclVarNum = actualSrcAddr->AsLclVarCommon()->gtLclNum;
3585 isSrcAddrLiveOut = ((actualSrcAddr->gtFlags & (GTF_VAR_DEATH | GTF_SPILL)) == 0);
3587 if (genIsRegCandidateLocal(actualDstAddr))
3589 dstLclVarNum = actualDstAddr->AsLclVarCommon()->gtLclNum;
3590 isDstAddrLiveOut = ((actualDstAddr->gtFlags & (GTF_VAR_DEATH | GTF_SPILL)) == 0);
3592 assert((actualSrcAddr->gtRegNum != REG_RSI) || !isSrcAddrLiveOut ||
3593 ((srcLclVarNum == dstLclVarNum) && !isDstAddrLiveOut));
3594 assert((actualDstAddr->gtRegNum != REG_RDI) || !isDstAddrLiveOut ||
3595 ((srcLclVarNum == dstLclVarNum) && !isSrcAddrLiveOut));
3596 srcAddrType = srcAddr->TypeGet();
3600 // Consume the operands and get them into the right registers.
3601 // They may now contain gc pointers (depending on their type; gcMarkRegPtrVal will "do the right thing").
3602 genConsumeBlockOp(cpObjNode, REG_RDI, REG_RSI, REG_NA);
3603 gcInfo.gcMarkRegPtrVal(REG_RSI, srcAddrType);
3604 gcInfo.gcMarkRegPtrVal(REG_RDI, dstAddr->TypeGet());
3606 unsigned slots = cpObjNode->gtSlots;
3608 // If we can prove it's on the stack we don't need to use the write barrier.
3611 if (slots >= CPOBJ_NONGC_SLOTS_LIMIT)
3613 // If the destination of the CpObj is on the stack, make sure we allocated
3614 // RCX to emit the movsp (alias for movsd or movsq for 32 and 64 bits respectively).
3615 assert((cpObjNode->gtRsvdRegs & RBM_RCX) != 0);
3617 getEmitter()->emitIns_R_I(INS_mov, EA_4BYTE, REG_RCX, slots);
3618 instGen(INS_r_movsp);
3622 // For small structs, it's better to emit a sequence of movsp than to
3623 // emit a rep movsp instruction.
3633 BYTE* gcPtrs = cpObjNode->gtGcPtrs;
3634 unsigned gcPtrCount = cpObjNode->gtGcPtrCount;
3642 // Let's see if we can use rep movsp instead of a sequence of movsp instructions
3643 // to save cycles and code size.
3645 unsigned nonGcSlotCount = 0;
3651 } while (i < slots && gcPtrs[i] == TYPE_GC_NONE);
3653 // If we have a very small contiguous non-gc region, it's better just to
3654 // emit a sequence of movsp instructions
3655 if (nonGcSlotCount < CPOBJ_NONGC_SLOTS_LIMIT)
3657 while (nonGcSlotCount > 0)
3665 // Otherwise, we can save code-size and improve CQ by emitting
3666 // rep movsp (alias for movsd/movsq for x86/x64)
3667 assert((cpObjNode->gtRsvdRegs & RBM_RCX) != 0);
3669 getEmitter()->emitIns_R_I(INS_mov, EA_4BYTE, REG_RCX, nonGcSlotCount);
3670 instGen(INS_r_movsp);
3675 // We have a GC pointer, call the memory barrier.
3676 genEmitHelperCall(CORINFO_HELP_ASSIGN_BYREF, 0, EA_PTRSIZE);
3682 assert(gcPtrCount == 0);
3685 // Clear the gcInfo for RSI and RDI.
3686 // While we normally update GC info prior to the last instruction that uses them,
3687 // these actually live into the helper call.
3688 gcInfo.gcMarkRegSetNpt(RBM_RSI);
3689 gcInfo.gcMarkRegSetNpt(RBM_RDI);
3692 // Generate code for a CpBlk node by the means of the VM memcpy helper call
3694 // a) The size argument of the CpBlk is not an integer constant
3695 // b) The size argument is a constant but is larger than CPBLK_MOVS_LIMIT bytes.
3696 void CodeGen::genCodeForCpBlk(GenTreeBlk* cpBlkNode)
3698 #ifdef _TARGET_AMD64_
3699 // Make sure we got the arguments of the cpblk operation in the right registers
3700 unsigned blockSize = cpBlkNode->Size();
3701 GenTreePtr dstAddr = cpBlkNode->Addr();
3702 GenTreePtr source = cpBlkNode->Data();
3703 GenTreePtr srcAddr = nullptr;
3705 // Size goes in arg2
3708 assert(blockSize >= CPBLK_MOVS_LIMIT);
3709 assert((cpBlkNode->gtRsvdRegs & RBM_ARG_2) != 0);
3713 noway_assert(cpBlkNode->gtOper == GT_STORE_DYN_BLK);
3716 // Source address goes in arg1
3717 if (source->gtOper == GT_IND)
3719 srcAddr = source->gtGetOp1();
3720 assert(srcAddr->isUsedFromReg());
3724 noway_assert(source->IsLocal());
3725 assert((cpBlkNode->gtRsvdRegs & RBM_ARG_1) != 0);
3726 inst_RV_TT(INS_lea, REG_ARG_1, source, 0, EA_BYREF);
3729 genConsumeBlockOp(cpBlkNode, REG_ARG_0, REG_ARG_1, REG_ARG_2);
3731 genEmitHelperCall(CORINFO_HELP_MEMCPY, 0, EA_UNKNOWN);
3732 #else // !_TARGET_AMD64_
3733 noway_assert(false && "Helper call for CpBlk is not needed.");
3734 #endif // !_TARGET_AMD64_
3737 // generate code do a switch statement based on a table of ip-relative offsets
3738 void CodeGen::genTableBasedSwitch(GenTree* treeNode)
3740 genConsumeOperands(treeNode->AsOp());
3741 regNumber idxReg = treeNode->gtOp.gtOp1->gtRegNum;
3742 regNumber baseReg = treeNode->gtOp.gtOp2->gtRegNum;
3744 regNumber tmpReg = treeNode->GetSingleTempReg();
3746 // load the ip-relative offset (which is relative to start of fgFirstBB)
3747 getEmitter()->emitIns_R_ARX(INS_mov, EA_4BYTE, baseReg, baseReg, idxReg, 4, 0);
3749 // add it to the absolute address of fgFirstBB
3750 compiler->fgFirstBB->bbFlags |= BBF_JMP_TARGET;
3751 getEmitter()->emitIns_R_L(INS_lea, EA_PTR_DSP_RELOC, compiler->fgFirstBB, tmpReg);
3752 getEmitter()->emitIns_R_R(INS_add, EA_PTRSIZE, baseReg, tmpReg);
3754 getEmitter()->emitIns_R(INS_i_jmp, emitTypeSize(TYP_I_IMPL), baseReg);
3757 // emits the table and an instruction to get the address of the first element
3758 void CodeGen::genJumpTable(GenTree* treeNode)
3760 noway_assert(compiler->compCurBB->bbJumpKind == BBJ_SWITCH);
3761 assert(treeNode->OperGet() == GT_JMPTABLE);
3763 unsigned jumpCount = compiler->compCurBB->bbJumpSwt->bbsCount;
3764 BasicBlock** jumpTable = compiler->compCurBB->bbJumpSwt->bbsDstTab;
3765 unsigned jmpTabOffs;
3766 unsigned jmpTabBase;
3768 jmpTabBase = getEmitter()->emitBBTableDataGenBeg(jumpCount, true);
3772 JITDUMP("\n J_M%03u_DS%02u LABEL DWORD\n", Compiler::s_compMethodsCount, jmpTabBase);
3774 for (unsigned i = 0; i < jumpCount; i++)
3776 BasicBlock* target = *jumpTable++;
3777 noway_assert(target->bbFlags & BBF_JMP_TARGET);
3779 JITDUMP(" DD L_M%03u_BB%02u\n", Compiler::s_compMethodsCount, target->bbNum);
3781 getEmitter()->emitDataGenData(i, target);
3784 getEmitter()->emitDataGenEnd();
3786 // Access to inline data is 'abstracted' by a special type of static member
3787 // (produced by eeFindJitDataOffs) which the emitter recognizes as being a reference
3788 // to constant data, not a real static field.
3789 getEmitter()->emitIns_R_C(INS_lea, emitTypeSize(TYP_I_IMPL), treeNode->gtRegNum,
3790 compiler->eeFindJitDataOffs(jmpTabBase), 0);
3791 genProduceReg(treeNode);
3794 // generate code for the locked operations:
3795 // GT_LOCKADD, GT_XCHG, GT_XADD
3796 void CodeGen::genLockedInstructions(GenTreeOp* treeNode)
3798 GenTree* data = treeNode->gtOp.gtOp2;
3799 GenTree* addr = treeNode->gtOp.gtOp1;
3800 regNumber targetReg = treeNode->gtRegNum;
3801 regNumber dataReg = data->gtRegNum;
3802 regNumber addrReg = addr->gtRegNum;
3803 var_types type = genActualType(data->TypeGet());
3806 // The register allocator should have extended the lifetime of the address
3807 // so that it is not used as the target.
3808 noway_assert(addrReg != targetReg);
3810 // If data is a lclVar that's not a last use, we'd better have allocated a register
3811 // for the result (except in the case of GT_LOCKADD which does not produce a register result).
3812 assert(targetReg != REG_NA || treeNode->OperGet() == GT_LOCKADD || !genIsRegCandidateLocal(data) ||
3813 (data->gtFlags & GTF_VAR_DEATH) != 0);
3815 genConsumeOperands(treeNode);
3816 if (targetReg != REG_NA && dataReg != REG_NA && dataReg != targetReg)
3818 inst_RV_RV(ins_Copy(type), targetReg, dataReg);
3819 data->gtRegNum = targetReg;
3821 // TODO-XArch-Cleanup: Consider whether it is worth it, for debugging purposes, to restore the
3822 // original gtRegNum on data, after calling emitInsBinary below.
3824 switch (treeNode->OperGet())
3831 // lock is implied by xchg
3842 // all of these nodes implicitly do an indirection on op1
3843 // so create a temporary node to feed into the pattern matching
3844 GenTreeIndir i = indirForm(type, addr);
3845 getEmitter()->emitInsBinary(ins, emitTypeSize(type), &i, data);
3847 if (treeNode->gtRegNum != REG_NA)
3849 genProduceReg(treeNode);
3853 // generate code for BoundsCheck nodes
3854 void CodeGen::genRangeCheck(GenTreePtr oper)
3857 noway_assert(oper->OperGet() == GT_ARR_BOUNDS_CHECK || oper->OperGet() == GT_SIMD_CHK);
3858 #else // !FEATURE_SIMD
3859 noway_assert(oper->OperGet() == GT_ARR_BOUNDS_CHECK);
3860 #endif // !FEATURE_SIMD
3862 GenTreeBoundsChk* bndsChk = oper->AsBoundsChk();
3864 GenTreePtr arrIndex = bndsChk->gtIndex;
3865 GenTreePtr arrLen = bndsChk->gtArrLen;
3866 GenTreePtr arrRef = nullptr;
3869 GenTree * src1, *src2;
3870 emitJumpKind jmpKind;
3872 genConsumeRegs(arrIndex);
3873 genConsumeRegs(arrLen);
3875 if (arrIndex->isContainedIntOrIImmed())
3877 // arrIndex is a contained constant. In this case
3878 // we will generate one of the following
3879 // cmp [mem], immed (if arrLen is a memory op)
3880 // cmp reg, immed (if arrLen is in a reg)
3882 // That is arrLen cannot be a contained immed.
3883 assert(!arrLen->isContainedIntOrIImmed());
3891 // arrIndex could either be a contained memory op or a reg
3892 // In this case we will generate one of the following
3893 // cmp [mem], immed (if arrLen is a constant)
3894 // cmp [mem], reg (if arrLen is in a reg)
3895 // cmp reg, immed (if arrIndex is in a reg)
3896 // cmp reg1, reg2 (if arraIndex is in reg1)
3897 // cmp reg, [mem] (if arrLen is a memory op)
3899 // That is only one of arrIndex or arrLen can be a memory op.
3900 assert(!arrIndex->isUsedFromMemory() || !arrLen->isUsedFromMemory());
3907 var_types bndsChkType = src2->TypeGet();
3909 // Bounds checks can only be 32 or 64 bit sized comparisons.
3910 assert(bndsChkType == TYP_INT || bndsChkType == TYP_LONG);
3912 // The type of the bounds check should always wide enough to compare against the index.
3913 assert(emitTypeSize(bndsChkType) >= emitTypeSize(src1->TypeGet()));
3916 getEmitter()->emitInsBinary(INS_cmp, emitTypeSize(bndsChkType), src1, src2);
3917 genJumpToThrowHlpBlk(jmpKind, bndsChk->gtThrowKind, bndsChk->gtIndRngFailBB);
3920 //------------------------------------------------------------------------
3921 // genOffsetOfMDArrayLowerBound: Returns the offset from the Array object to the
3922 // lower bound for the given dimension.
3925 // elemType - the element type of the array
3926 // rank - the rank of the array
3927 // dimension - the dimension for which the lower bound offset will be returned.
3932 unsigned CodeGen::genOffsetOfMDArrayLowerBound(var_types elemType, unsigned rank, unsigned dimension)
3934 // Note that the lower bound and length fields of the Array object are always TYP_INT, even on 64-bit targets.
3935 return compiler->eeGetArrayDataOffset(elemType) + genTypeSize(TYP_INT) * (dimension + rank);
3938 //------------------------------------------------------------------------
3939 // genOffsetOfMDArrayLength: Returns the offset from the Array object to the
3940 // size for the given dimension.
3943 // elemType - the element type of the array
3944 // rank - the rank of the array
3945 // dimension - the dimension for which the lower bound offset will be returned.
3950 unsigned CodeGen::genOffsetOfMDArrayDimensionSize(var_types elemType, unsigned rank, unsigned dimension)
3952 // Note that the lower bound and length fields of the Array object are always TYP_INT, even on 64-bit targets.
3953 return compiler->eeGetArrayDataOffset(elemType) + genTypeSize(TYP_INT) * dimension;
3956 //------------------------------------------------------------------------
3957 // genCodeForArrIndex: Generates code to bounds check the index for one dimension of an array reference,
3958 // producing the effective index by subtracting the lower bound.
3961 // arrIndex - the node for which we're generating code
3967 void CodeGen::genCodeForArrIndex(GenTreeArrIndex* arrIndex)
3969 GenTreePtr arrObj = arrIndex->ArrObj();
3970 GenTreePtr indexNode = arrIndex->IndexExpr();
3972 regNumber arrReg = genConsumeReg(arrObj);
3973 regNumber indexReg = genConsumeReg(indexNode);
3974 regNumber tgtReg = arrIndex->gtRegNum;
3976 unsigned dim = arrIndex->gtCurrDim;
3977 unsigned rank = arrIndex->gtArrRank;
3978 var_types elemType = arrIndex->gtArrElemType;
3980 noway_assert(tgtReg != REG_NA);
3982 // Subtract the lower bound for this dimension.
3983 // TODO-XArch-CQ: make this contained if it's an immediate that fits.
3984 if (tgtReg != indexReg)
3986 inst_RV_RV(INS_mov, tgtReg, indexReg, indexNode->TypeGet());
3988 getEmitter()->emitIns_R_AR(INS_sub, emitActualTypeSize(TYP_INT), tgtReg, arrReg,
3989 genOffsetOfMDArrayLowerBound(elemType, rank, dim));
3990 getEmitter()->emitIns_R_AR(INS_cmp, emitActualTypeSize(TYP_INT), tgtReg, arrReg,
3991 genOffsetOfMDArrayDimensionSize(elemType, rank, dim));
3992 genJumpToThrowHlpBlk(EJ_jae, SCK_RNGCHK_FAIL);
3994 genProduceReg(arrIndex);
3997 //------------------------------------------------------------------------
3998 // genCodeForArrOffset: Generates code to compute the flattened array offset for
3999 // one dimension of an array reference:
4000 // result = (prevDimOffset * dimSize) + effectiveIndex
4001 // where dimSize is obtained from the arrObj operand
4004 // arrOffset - the node for which we're generating code
4010 // dimSize and effectiveIndex are always non-negative, the former by design,
4011 // and the latter because it has been normalized to be zero-based.
4013 void CodeGen::genCodeForArrOffset(GenTreeArrOffs* arrOffset)
4015 GenTreePtr offsetNode = arrOffset->gtOffset;
4016 GenTreePtr indexNode = arrOffset->gtIndex;
4017 GenTreePtr arrObj = arrOffset->gtArrObj;
4019 regNumber tgtReg = arrOffset->gtRegNum;
4020 assert(tgtReg != REG_NA);
4022 unsigned dim = arrOffset->gtCurrDim;
4023 unsigned rank = arrOffset->gtArrRank;
4024 var_types elemType = arrOffset->gtArrElemType;
4026 // First, consume the operands in the correct order.
4027 regNumber offsetReg = REG_NA;
4028 regNumber tmpReg = REG_NA;
4029 if (!offsetNode->IsIntegralConst(0))
4031 offsetReg = genConsumeReg(offsetNode);
4033 // We will use a temp register for the offset*scale+effectiveIndex computation.
4034 tmpReg = arrOffset->GetSingleTempReg();
4038 assert(offsetNode->isContained());
4040 regNumber indexReg = genConsumeReg(indexNode);
4041 // Although arrReg may not be used in the constant-index case, if we have generated
4042 // the value into a register, we must consume it, otherwise we will fail to end the
4043 // live range of the gc ptr.
4044 // TODO-CQ: Currently arrObj will always have a register allocated to it.
4045 // We could avoid allocating a register for it, which would be of value if the arrObj
4046 // is an on-stack lclVar.
4047 regNumber arrReg = REG_NA;
4048 if (arrObj->gtHasReg())
4050 arrReg = genConsumeReg(arrObj);
4053 if (!offsetNode->IsIntegralConst(0))
4055 assert(tmpReg != REG_NA);
4056 assert(arrReg != REG_NA);
4058 // Evaluate tgtReg = offsetReg*dim_size + indexReg.
4059 // tmpReg is used to load dim_size and the result of the multiplication.
4060 // Note that dim_size will never be negative.
4062 getEmitter()->emitIns_R_AR(INS_mov, emitActualTypeSize(TYP_INT), tmpReg, arrReg,
4063 genOffsetOfMDArrayDimensionSize(elemType, rank, dim));
4064 inst_RV_RV(INS_imul, tmpReg, offsetReg);
4066 if (tmpReg == tgtReg)
4068 inst_RV_RV(INS_add, tmpReg, indexReg);
4072 if (indexReg != tgtReg)
4074 inst_RV_RV(INS_mov, tgtReg, indexReg, TYP_I_IMPL);
4076 inst_RV_RV(INS_add, tgtReg, tmpReg);
4081 if (indexReg != tgtReg)
4083 inst_RV_RV(INS_mov, tgtReg, indexReg, TYP_INT);
4086 genProduceReg(arrOffset);
4089 // make a temporary indir we can feed to pattern matching routines
4090 // in cases where we don't want to instantiate all the indirs that happen
4092 GenTreeIndir CodeGen::indirForm(var_types type, GenTree* base)
4094 GenTreeIndir i(GT_IND, type, base, nullptr);
4095 i.gtRegNum = REG_NA;
4096 // has to be nonnull (because contained nodes can't be the last in block)
4097 // but don't want it to be a valid pointer
4098 i.gtNext = (GenTree*)(-1);
4102 // make a temporary int we can feed to pattern matching routines
4103 // in cases where we don't want to instantiate
4105 GenTreeIntCon CodeGen::intForm(var_types type, ssize_t value)
4107 GenTreeIntCon i(type, value);
4108 i.gtRegNum = REG_NA;
4109 // has to be nonnull (because contained nodes can't be the last in block)
4110 // but don't want it to be a valid pointer
4111 i.gtNext = (GenTree*)(-1);
4115 instruction CodeGen::genGetInsForOper(genTreeOps oper, var_types type)
4119 // Operations on SIMD vectors shouldn't come this path
4120 assert(!varTypeIsSIMD(type));
4121 if (varTypeIsFloating(type))
4123 return ins_MathOp(oper, type);
4167 #if !defined(_TARGET_64BIT_)
4186 #endif // !defined(_TARGET_64BIT_)
4194 //------------------------------------------------------------------------
4195 // genCodeForShift: Generates the code sequence for a GenTree node that
4196 // represents a bit shift or rotate operation (<<, >>, >>>, rol, ror).
4199 // tree - the bit shift node (that specifies the type of bit shift to perform).
4202 // a) All GenTrees are register allocated.
4203 // b) The shift-by-amount in tree->gtOp.gtOp2 is either a contained constant or
4204 // it's a register-allocated expression. If it is in a register that is
4205 // not RCX, it will be moved to RCX (so RCX better not be in use!).
4207 void CodeGen::genCodeForShift(GenTreePtr tree)
4209 // Only the non-RMW case here.
4210 assert(tree->OperIsShiftOrRotate());
4211 assert(tree->gtOp.gtOp1->isUsedFromReg());
4212 assert(tree->gtRegNum != REG_NA);
4214 genConsumeOperands(tree->AsOp());
4216 var_types targetType = tree->TypeGet();
4217 instruction ins = genGetInsForOper(tree->OperGet(), targetType);
4219 GenTreePtr operand = tree->gtGetOp1();
4220 regNumber operandReg = operand->gtRegNum;
4222 GenTreePtr shiftBy = tree->gtGetOp2();
4224 if (shiftBy->isContainedIntOrIImmed())
4226 // First, move the operand to the destination register and
4227 // later on perform the shift in-place.
4228 // (LSRA will try to avoid this situation through preferencing.)
4229 if (tree->gtRegNum != operandReg)
4231 inst_RV_RV(INS_mov, tree->gtRegNum, operandReg, targetType);
4234 int shiftByValue = (int)shiftBy->AsIntConCommon()->IconValue();
4235 inst_RV_SH(ins, emitTypeSize(tree), tree->gtRegNum, shiftByValue);
4239 // We must have the number of bits to shift stored in ECX, since we constrained this node to
4240 // sit in ECX. In case this didn't happen, LSRA expects the code generator to move it since it's a single
4241 // register destination requirement.
4242 genCopyRegIfNeeded(shiftBy, REG_RCX);
4244 // The operand to be shifted must not be in ECX
4245 noway_assert(operandReg != REG_RCX);
4247 if (tree->gtRegNum != operandReg)
4249 inst_RV_RV(INS_mov, tree->gtRegNum, operandReg, targetType);
4251 inst_RV_CL(ins, tree->gtRegNum, targetType);
4254 genProduceReg(tree);
4258 //------------------------------------------------------------------------
4259 // genCodeForShiftLong: Generates the code sequence for a GenTree node that
4260 // represents a three operand bit shift or rotate operation (<<Hi, >>Lo).
4263 // tree - the bit shift node (that specifies the type of bit shift to perform).
4266 // a) All GenTrees are register allocated.
4267 // b) The shift-by-amount in tree->gtOp.gtOp2 is a contained constant
4269 void CodeGen::genCodeForShiftLong(GenTreePtr tree)
4271 // Only the non-RMW case here.
4272 genTreeOps oper = tree->OperGet();
4273 assert(oper == GT_LSH_HI || oper == GT_RSH_LO);
4275 GenTree* operand = tree->gtOp.gtOp1;
4276 assert(operand->OperGet() == GT_LONG);
4277 assert(operand->gtOp.gtOp1->isUsedFromReg());
4278 assert(operand->gtOp.gtOp2->isUsedFromReg());
4280 GenTree* operandLo = operand->gtGetOp1();
4281 GenTree* operandHi = operand->gtGetOp2();
4283 regNumber regLo = operandLo->gtRegNum;
4284 regNumber regHi = operandHi->gtRegNum;
4286 genConsumeOperands(tree->AsOp());
4288 var_types targetType = tree->TypeGet();
4289 instruction ins = genGetInsForOper(oper, targetType);
4291 GenTreePtr shiftBy = tree->gtGetOp2();
4293 assert(shiftBy->isContainedIntOrIImmed());
4295 unsigned int count = shiftBy->AsIntConCommon()->IconValue();
4297 regNumber regResult = (oper == GT_LSH_HI) ? regHi : regLo;
4299 if (regResult != tree->gtRegNum)
4301 inst_RV_RV(INS_mov, tree->gtRegNum, regResult, targetType);
4304 if (oper == GT_LSH_HI)
4306 inst_RV_RV_IV(ins, emitTypeSize(targetType), tree->gtRegNum, regLo, count);
4310 assert(oper == GT_RSH_LO);
4311 inst_RV_RV_IV(ins, emitTypeSize(targetType), tree->gtRegNum, regHi, count);
4314 genProduceReg(tree);
4318 //------------------------------------------------------------------------
4319 // genCodeForShiftRMW: Generates the code sequence for a GT_STOREIND GenTree node that
4320 // represents a RMW bit shift or rotate operation (<<, >>, >>>, rol, ror), for example:
4321 // GT_STOREIND( AddressTree, GT_SHL( Ind ( AddressTree ), Operand ) )
4324 // storeIndNode: the GT_STOREIND node.
4326 void CodeGen::genCodeForShiftRMW(GenTreeStoreInd* storeInd)
4328 GenTree* data = storeInd->Data();
4329 GenTree* addr = storeInd->Addr();
4331 assert(data->OperIsShiftOrRotate());
4333 // This function only handles the RMW case.
4334 assert(data->gtOp.gtOp1->isUsedFromMemory());
4335 assert(data->gtOp.gtOp1->isIndir());
4336 assert(Lowering::IndirsAreEquivalent(data->gtOp.gtOp1, storeInd));
4337 assert(data->gtRegNum == REG_NA);
4339 var_types targetType = data->TypeGet();
4340 genTreeOps oper = data->OperGet();
4341 instruction ins = genGetInsForOper(oper, targetType);
4342 emitAttr attr = EA_ATTR(genTypeSize(targetType));
4344 GenTree* shiftBy = data->gtOp.gtOp2;
4345 if (shiftBy->isContainedIntOrIImmed())
4347 int shiftByValue = (int)shiftBy->AsIntConCommon()->IconValue();
4348 ins = genMapShiftInsToShiftByConstantIns(ins, shiftByValue);
4349 if (shiftByValue == 1)
4351 // There is no source in this case, as the shift by count is embedded in the instruction opcode itself.
4352 getEmitter()->emitInsRMW(ins, attr, storeInd);
4356 getEmitter()->emitInsRMW(ins, attr, storeInd, shiftBy);
4361 // We must have the number of bits to shift stored in ECX, since we constrained this node to
4362 // sit in ECX. In case this didn't happen, LSRA expects the code generator to move it since it's a single
4363 // register destination requirement.
4364 regNumber shiftReg = shiftBy->gtRegNum;
4365 genCopyRegIfNeeded(shiftBy, REG_RCX);
4367 // The shiftBy operand is implicit, so call the unary version of emitInsRMW.
4368 getEmitter()->emitInsRMW(ins, attr, storeInd);
4372 void CodeGen::genRegCopy(GenTree* treeNode)
4374 assert(treeNode->OperGet() == GT_COPY);
4375 GenTree* op1 = treeNode->gtOp.gtOp1;
4377 if (op1->IsMultiRegCall())
4381 GenTreeCopyOrReload* copyTree = treeNode->AsCopyOrReload();
4382 GenTreeCall* call = op1->AsCall();
4383 ReturnTypeDesc* retTypeDesc = call->GetReturnTypeDesc();
4384 unsigned regCount = retTypeDesc->GetReturnRegCount();
4386 for (unsigned i = 0; i < regCount; ++i)
4388 var_types type = retTypeDesc->GetReturnRegType(i);
4389 regNumber fromReg = call->GetRegNumByIdx(i);
4390 regNumber toReg = copyTree->GetRegNumByIdx(i);
4392 // A Multi-reg GT_COPY node will have valid reg only for those
4393 // positions that corresponding result reg of call node needs
4395 if (toReg != REG_NA)
4397 assert(toReg != fromReg);
4398 inst_RV_RV(ins_Copy(type), toReg, fromReg, type);
4404 var_types targetType = treeNode->TypeGet();
4405 regNumber targetReg = treeNode->gtRegNum;
4406 assert(targetReg != REG_NA);
4408 // Check whether this node and the node from which we're copying the value have
4409 // different register types. This can happen if (currently iff) we have a SIMD
4410 // vector type that fits in an integer register, in which case it is passed as
4411 // an argument, or returned from a call, in an integer register and must be
4412 // copied if it's in an xmm register.
4414 bool srcFltReg = (varTypeIsFloating(op1) || varTypeIsSIMD(op1));
4415 bool tgtFltReg = (varTypeIsFloating(treeNode) || varTypeIsSIMD(treeNode));
4416 if (srcFltReg != tgtFltReg)
4423 ins = ins_CopyIntToFloat(op1->TypeGet(), treeNode->TypeGet());
4425 intReg = op1->gtRegNum;
4429 ins = ins_CopyFloatToInt(op1->TypeGet(), treeNode->TypeGet());
4431 fpReg = op1->gtRegNum;
4433 inst_RV_RV(ins, fpReg, intReg, targetType);
4437 inst_RV_RV(ins_Copy(targetType), targetReg, genConsumeReg(op1), targetType);
4442 // The lclVar will never be a def.
4443 // If it is a last use, the lclVar will be killed by genConsumeReg(), as usual, and genProduceReg will
4444 // appropriately set the gcInfo for the copied value.
4445 // If not, there are two cases we need to handle:
4446 // - If this is a TEMPORARY copy (indicated by the GTF_VAR_DEATH flag) the variable
4447 // will remain live in its original register.
4448 // genProduceReg() will appropriately set the gcInfo for the copied value,
4449 // and genConsumeReg will reset it.
4450 // - Otherwise, we need to update register info for the lclVar.
4452 GenTreeLclVarCommon* lcl = op1->AsLclVarCommon();
4453 assert((lcl->gtFlags & GTF_VAR_DEF) == 0);
4455 if ((lcl->gtFlags & GTF_VAR_DEATH) == 0 && (treeNode->gtFlags & GTF_VAR_DEATH) == 0)
4457 LclVarDsc* varDsc = &compiler->lvaTable[lcl->gtLclNum];
4459 // If we didn't just spill it (in genConsumeReg, above), then update the register info
4460 if (varDsc->lvRegNum != REG_STK)
4462 // The old location is dying
4463 genUpdateRegLife(varDsc, /*isBorn*/ false, /*isDying*/ true DEBUGARG(op1));
4465 gcInfo.gcMarkRegSetNpt(genRegMask(op1->gtRegNum));
4467 genUpdateVarReg(varDsc, treeNode);
4469 // The new location is going live
4470 genUpdateRegLife(varDsc, /*isBorn*/ true, /*isDying*/ false DEBUGARG(treeNode));
4476 genProduceReg(treeNode);
4479 //------------------------------------------------------------------------
4480 // genStoreInd: Generate code for a GT_STOREIND node.
4483 // treeNode - The GT_STOREIND node for which to generate code.
4488 void CodeGen::genStoreInd(GenTreePtr node)
4490 assert(node->OperGet() == GT_STOREIND);
4493 // Storing Vector3 of size 12 bytes through indirection
4494 if (node->TypeGet() == TYP_SIMD12)
4496 genStoreIndTypeSIMD12(node);
4499 #endif // FEATURE_SIMD
4501 GenTreeStoreInd* storeInd = node->AsStoreInd();
4502 GenTree* data = storeInd->Data();
4503 GenTree* addr = storeInd->Addr();
4504 var_types targetType = storeInd->TypeGet();
4506 assert(!varTypeIsFloating(targetType) || (targetType == data->TypeGet()));
4508 GCInfo::WriteBarrierForm writeBarrierForm = gcInfo.gcIsWriteBarrierCandidate(storeInd, data);
4509 if (writeBarrierForm != GCInfo::WBF_NoBarrier)
4511 // data and addr must be in registers.
4512 // Consume both registers so that any copies of interfering registers are taken care of.
4513 genConsumeOperands(storeInd->AsOp());
4515 if (genEmitOptimizedGCWriteBarrier(writeBarrierForm, addr, data))
4520 // At this point, we should not have any interference.
4521 // That is, 'data' must not be in REG_ARG_0, as that is where 'addr' must go.
4522 noway_assert(data->gtRegNum != REG_ARG_0);
4524 // addr goes in REG_ARG_0
4525 genCopyRegIfNeeded(addr, REG_ARG_0);
4527 // data goes in REG_ARG_1
4528 genCopyRegIfNeeded(data, REG_ARG_1);
4530 genGCWriteBarrier(storeInd, writeBarrierForm);
4534 bool reverseOps = ((storeInd->gtFlags & GTF_REVERSE_OPS) != 0);
4535 bool dataIsUnary = false;
4536 bool isRMWMemoryOp = storeInd->IsRMWMemoryOp();
4537 GenTree* rmwSrc = nullptr;
4539 // We must consume the operands in the proper execution order, so that liveness is
4540 // updated appropriately.
4543 genConsumeAddress(addr);
4546 // If storeInd represents a RMW memory op then its data is a non-leaf node marked as contained
4547 // and non-indir operand of data is the source of RMW memory op.
4550 assert(data->isContained() && !data->OperIsLeaf());
4552 GenTreePtr rmwDst = nullptr;
4554 dataIsUnary = (GenTree::OperIsUnary(data->OperGet()) != 0);
4557 if (storeInd->IsRMWDstOp1())
4559 rmwDst = data->gtGetOp1();
4560 rmwSrc = data->gtGetOp2();
4564 assert(storeInd->IsRMWDstOp2());
4565 rmwDst = data->gtGetOp2();
4566 rmwSrc = data->gtGetOp1();
4569 genConsumeRegs(rmwSrc);
4573 // *(p) = oper *(p): Here addr = p, rmwsrc=rmwDst = *(p) i.e. GT_IND(p)
4574 // For unary RMW ops, src and dst of RMW memory op is the same. Lower
4575 // clears operand counts on rmwSrc and we don't need to perform a
4576 // genConsumeReg() on it.
4577 assert(storeInd->IsRMWDstOp1());
4578 rmwSrc = data->gtGetOp1();
4579 rmwDst = data->gtGetOp1();
4580 assert(rmwSrc->isUsedFromMemory());
4583 assert(rmwSrc != nullptr);
4584 assert(rmwDst != nullptr);
4585 assert(Lowering::IndirsAreEquivalent(rmwDst, storeInd));
4589 genConsumeRegs(data);
4594 genConsumeAddress(addr);
4601 // generate code for unary RMW memory ops like neg/not
4602 getEmitter()->emitInsRMW(genGetInsForOper(data->OperGet(), data->TypeGet()), emitTypeSize(storeInd),
4607 if (data->OperIsShiftOrRotate())
4609 // Generate code for shift RMW memory ops.
4610 // The data address needs to be op1 (it must be [addr] = [addr] <shift> <amount>, not [addr] =
4611 // <amount> <shift> [addr]).
4612 assert(storeInd->IsRMWDstOp1());
4613 assert(rmwSrc == data->gtGetOp2());
4614 genCodeForShiftRMW(storeInd);
4616 else if (data->OperGet() == GT_ADD && (rmwSrc->IsIntegralConst(1) || rmwSrc->IsIntegralConst(-1)))
4618 // Generate "inc/dec [mem]" instead of "add/sub [mem], 1".
4621 // 1) Global morph transforms GT_SUB(x, +/-1) into GT_ADD(x, -/+1).
4622 // 2) TODO-AMD64: Debugger routine NativeWalker::Decode() runs into
4623 // an assert while decoding ModR/M byte of "inc dword ptr [rax]".
4624 // It is not clear whether Decode() can handle all possible
4625 // addr modes with inc/dec. For this reason, inc/dec [mem]
4626 // is not generated while generating debuggable code. Update
4627 // the above if condition once Decode() routine is fixed.
4628 assert(rmwSrc->isContainedIntOrIImmed());
4629 instruction ins = rmwSrc->IsIntegralConst(1) ? INS_inc : INS_dec;
4630 getEmitter()->emitInsRMW(ins, emitTypeSize(storeInd), storeInd);
4634 // generate code for remaining binary RMW memory ops like add/sub/and/or/xor
4635 getEmitter()->emitInsRMW(genGetInsForOper(data->OperGet(), data->TypeGet()), emitTypeSize(storeInd),
4642 getEmitter()->emitInsMov(ins_Store(data->TypeGet()), emitTypeSize(storeInd), storeInd);
4647 //------------------------------------------------------------------------
4648 // genEmitOptimizedGCWriteBarrier: Generate write barrier store using the optimized
4649 // helper functions.
4652 // writeBarrierForm - the write barrier form to use
4653 // addr - the address at which to do the store
4654 // data - the data to store
4657 // true if an optimized write barrier form was used, false if not. If this
4658 // function returns false, the caller must emit a "standard" write barrier.
4660 bool CodeGen::genEmitOptimizedGCWriteBarrier(GCInfo::WriteBarrierForm writeBarrierForm, GenTree* addr, GenTree* data)
4662 assert(writeBarrierForm != GCInfo::WBF_NoBarrier);
4664 #if defined(_TARGET_X86_) && NOGC_WRITE_BARRIERS
4665 bool useOptimizedWriteBarriers = true;
4668 useOptimizedWriteBarriers =
4669 (writeBarrierForm != GCInfo::WBF_NoBarrier_CheckNotHeapInDebug); // This one is always a call to a C++ method.
4672 if (!useOptimizedWriteBarriers)
4677 const static int regToHelper[2][8] = {
4678 // If the target is known to be in managed memory
4680 CORINFO_HELP_ASSIGN_REF_EAX, CORINFO_HELP_ASSIGN_REF_ECX, -1, CORINFO_HELP_ASSIGN_REF_EBX, -1,
4681 CORINFO_HELP_ASSIGN_REF_EBP, CORINFO_HELP_ASSIGN_REF_ESI, CORINFO_HELP_ASSIGN_REF_EDI,
4684 // Don't know if the target is in managed memory
4686 CORINFO_HELP_CHECKED_ASSIGN_REF_EAX, CORINFO_HELP_CHECKED_ASSIGN_REF_ECX, -1,
4687 CORINFO_HELP_CHECKED_ASSIGN_REF_EBX, -1, CORINFO_HELP_CHECKED_ASSIGN_REF_EBP,
4688 CORINFO_HELP_CHECKED_ASSIGN_REF_ESI, CORINFO_HELP_CHECKED_ASSIGN_REF_EDI,
4692 noway_assert(regToHelper[0][REG_EAX] == CORINFO_HELP_ASSIGN_REF_EAX);
4693 noway_assert(regToHelper[0][REG_ECX] == CORINFO_HELP_ASSIGN_REF_ECX);
4694 noway_assert(regToHelper[0][REG_EBX] == CORINFO_HELP_ASSIGN_REF_EBX);
4695 noway_assert(regToHelper[0][REG_ESP] == -1);
4696 noway_assert(regToHelper[0][REG_EBP] == CORINFO_HELP_ASSIGN_REF_EBP);
4697 noway_assert(regToHelper[0][REG_ESI] == CORINFO_HELP_ASSIGN_REF_ESI);
4698 noway_assert(regToHelper[0][REG_EDI] == CORINFO_HELP_ASSIGN_REF_EDI);
4700 noway_assert(regToHelper[1][REG_EAX] == CORINFO_HELP_CHECKED_ASSIGN_REF_EAX);
4701 noway_assert(regToHelper[1][REG_ECX] == CORINFO_HELP_CHECKED_ASSIGN_REF_ECX);
4702 noway_assert(regToHelper[1][REG_EBX] == CORINFO_HELP_CHECKED_ASSIGN_REF_EBX);
4703 noway_assert(regToHelper[1][REG_ESP] == -1);
4704 noway_assert(regToHelper[1][REG_EBP] == CORINFO_HELP_CHECKED_ASSIGN_REF_EBP);
4705 noway_assert(regToHelper[1][REG_ESI] == CORINFO_HELP_CHECKED_ASSIGN_REF_ESI);
4706 noway_assert(regToHelper[1][REG_EDI] == CORINFO_HELP_CHECKED_ASSIGN_REF_EDI);
4708 regNumber reg = data->gtRegNum;
4709 noway_assert((reg != REG_ESP) && (reg != REG_WRITE_BARRIER));
4711 // Generate the following code:
4713 // call write_barrier_helper_reg
4715 // addr goes in REG_ARG_0
4716 genCopyRegIfNeeded(addr, REG_WRITE_BARRIER);
4718 unsigned tgtAnywhere = 0;
4719 if (writeBarrierForm != GCInfo::WBF_BarrierUnchecked)
4724 // We might want to call a modified version of genGCWriteBarrier() to get the benefit of
4725 // the FEATURE_COUNT_GC_WRITE_BARRIERS code there, but that code doesn't look like it works
4726 // with rationalized RyuJIT IR. So, for now, just emit the helper call directly here.
4728 genEmitHelperCall(regToHelper[tgtAnywhere][reg],
4730 EA_PTRSIZE); // retSize
4733 #else // !defined(_TARGET_X86_) || !NOGC_WRITE_BARRIERS
4735 #endif // !defined(_TARGET_X86_) || !NOGC_WRITE_BARRIERS
4738 // Produce code for a GT_CALL node
4739 void CodeGen::genCallInstruction(GenTreeCall* call)
4741 genAlignStackBeforeCall(call);
4743 gtCallTypes callType = (gtCallTypes)call->gtCallType;
4745 IL_OFFSETX ilOffset = BAD_IL_OFFSET;
4747 // all virtuals should have been expanded into a control expression
4748 assert(!call->IsVirtual() || call->gtControlExpr || call->gtCallAddr);
4750 // Insert a GS check if necessary
4751 if (call->IsTailCallViaHelper())
4753 if (compiler->getNeedsGSSecurityCookie())
4755 #if FEATURE_FIXED_OUT_ARGS
4756 // If either of the conditions below is true, we will need a temporary register in order to perform the GS
4757 // cookie check. When FEATURE_FIXED_OUT_ARGS is disabled, we save and restore the temporary register using
4758 // push/pop. When FEATURE_FIXED_OUT_ARGS is enabled, however, we need an alternative solution. For now,
4759 // though, the tail prefix is ignored on all platforms that use fixed out args, so we should never hit this
4761 assert(compiler->gsGlobalSecurityCookieAddr == nullptr);
4762 assert((int)compiler->gsGlobalSecurityCookieVal == (ssize_t)compiler->gsGlobalSecurityCookieVal);
4764 genEmitGSCookieCheck(true);
4768 // Consume all the arg regs
4769 for (GenTreePtr list = call->gtCallLateArgs; list; list = list->MoveNext())
4771 assert(list->OperIsList());
4773 GenTreePtr argNode = list->Current();
4775 fgArgTabEntryPtr curArgTabEntry = compiler->gtArgEntryByNode(call, argNode->gtSkipReloadOrCopy());
4776 assert(curArgTabEntry);
4778 if (curArgTabEntry->regNum == REG_STK)
4783 #ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
4784 // Deal with multi register passed struct args.
4785 if (argNode->OperGet() == GT_FIELD_LIST)
4787 GenTreeFieldList* fieldListPtr = argNode->AsFieldList();
4788 unsigned iterationNum = 0;
4789 for (; fieldListPtr != nullptr; fieldListPtr = fieldListPtr->Rest(), iterationNum++)
4791 GenTreePtr putArgRegNode = fieldListPtr->gtOp.gtOp1;
4792 assert(putArgRegNode->gtOper == GT_PUTARG_REG);
4793 regNumber argReg = REG_NA;
4795 if (iterationNum == 0)
4797 argReg = curArgTabEntry->regNum;
4801 assert(iterationNum == 1);
4802 argReg = curArgTabEntry->otherRegNum;
4805 genConsumeReg(putArgRegNode);
4807 // Validate the putArgRegNode has the right type.
4808 assert(putArgRegNode->TypeGet() ==
4809 compiler->GetTypeFromClassificationAndSizes(curArgTabEntry->structDesc
4810 .eightByteClassifications[iterationNum],
4811 curArgTabEntry->structDesc
4812 .eightByteSizes[iterationNum]));
4813 if (putArgRegNode->gtRegNum != argReg)
4815 inst_RV_RV(ins_Move_Extend(putArgRegNode->TypeGet(), putArgRegNode->InReg()), argReg,
4816 putArgRegNode->gtRegNum);
4821 #endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
4823 regNumber argReg = curArgTabEntry->regNum;
4824 genConsumeReg(argNode);
4825 if (argNode->gtRegNum != argReg)
4827 inst_RV_RV(ins_Move_Extend(argNode->TypeGet(), argNode->InReg()), argReg, argNode->gtRegNum);
4832 // In the case of a varargs call,
4833 // the ABI dictates that if we have floating point args,
4834 // we must pass the enregistered arguments in both the
4835 // integer and floating point registers so, let's do that.
4836 if (call->IsVarargs() && varTypeIsFloating(argNode))
4838 regNumber targetReg = compiler->getCallArgIntRegister(argNode->gtRegNum);
4839 instruction ins = ins_CopyFloatToInt(argNode->TypeGet(), TYP_LONG);
4840 inst_RV_RV(ins, argNode->gtRegNum, targetReg);
4842 #endif // FEATURE_VARARG
4845 #if defined(_TARGET_X86_) || defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
4846 // The call will pop its arguments.
4847 // for each putarg_stk:
4848 ssize_t stackArgBytes = 0;
4849 GenTreePtr args = call->gtCallArgs;
4852 GenTreePtr arg = args->gtOp.gtOp1;
4853 if (arg->OperGet() != GT_ARGPLACE && !(arg->gtFlags & GTF_LATE_ARG))
4855 #if defined(_TARGET_X86_)
4856 if ((arg->OperGet() == GT_PUTARG_STK) && (arg->gtGetOp1()->OperGet() == GT_FIELD_LIST))
4858 fgArgTabEntryPtr curArgTabEntry = compiler->gtArgEntryByNode(call, arg);
4859 assert(curArgTabEntry);
4860 stackArgBytes += curArgTabEntry->numSlots * TARGET_POINTER_SIZE;
4863 #endif // defined(_TARGET_X86_)
4865 #ifdef FEATURE_PUT_STRUCT_ARG_STK
4866 if (genActualType(arg->TypeGet()) == TYP_STRUCT)
4868 assert(arg->OperGet() == GT_PUTARG_STK);
4870 GenTreeObj* obj = arg->gtGetOp1()->AsObj();
4871 unsigned argBytes = (unsigned)roundUp(obj->gtBlkSize, TARGET_POINTER_SIZE);
4873 fgArgTabEntryPtr curArgTabEntry = compiler->gtArgEntryByNode(call, arg);
4874 assert((curArgTabEntry->numSlots * TARGET_POINTER_SIZE) == argBytes);
4876 stackArgBytes += argBytes;
4879 #endif // FEATURE_PUT_STRUCT_ARG_STK
4882 stackArgBytes += genTypeSize(genActualType(arg->TypeGet()));
4885 args = args->gtOp.gtOp2;
4887 #endif // defined(_TARGET_X86_) || defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
4889 // Insert a null check on "this" pointer if asked.
4890 if (call->NeedsNullCheck())
4892 const regNumber regThis = genGetThisArgReg(call);
4893 getEmitter()->emitIns_AR_R(INS_cmp, EA_4BYTE, regThis, regThis, 0);
4896 // Either gtControlExpr != null or gtCallAddr != null or it is a direct non-virtual call to a user or helper method.
4897 CORINFO_METHOD_HANDLE methHnd;
4898 GenTree* target = call->gtControlExpr;
4899 if (callType == CT_INDIRECT)
4901 assert(target == nullptr);
4902 target = call->gtCallAddr;
4907 methHnd = call->gtCallMethHnd;
4910 CORINFO_SIG_INFO* sigInfo = nullptr;
4912 // Pass the call signature information down into the emitter so the emitter can associate
4913 // native call sites with the signatures they were generated from.
4914 if (callType != CT_HELPER)
4916 sigInfo = call->callSig;
4920 // If fast tail call, then we are done. In this case we setup the args (both reg args
4921 // and stack args in incoming arg area) and call target in rax. Epilog sequence would
4922 // generate "jmp rax".
4923 if (call->IsFastTailCall())
4925 // Don't support fast tail calling JIT helpers
4926 assert(callType != CT_HELPER);
4928 // Fast tail calls materialize call target either in gtControlExpr or in gtCallAddr.
4929 assert(target != nullptr);
4931 genConsumeReg(target);
4932 genCopyRegIfNeeded(target, REG_RAX);
4936 // For a pinvoke to unmanged code we emit a label to clear
4937 // the GC pointer state before the callsite.
4938 // We can't utilize the typical lazy killing of GC pointers
4939 // at (or inside) the callsite.
4940 if (call->IsUnmanaged())
4942 genDefineTempLabel(genCreateTempLabel());
4945 // Determine return value size(s).
4946 ReturnTypeDesc* retTypeDesc = call->GetReturnTypeDesc();
4947 emitAttr retSize = EA_PTRSIZE;
4948 emitAttr secondRetSize = EA_UNKNOWN;
4950 if (call->HasMultiRegRetVal())
4952 retSize = emitTypeSize(retTypeDesc->GetReturnRegType(0));
4953 secondRetSize = emitTypeSize(retTypeDesc->GetReturnRegType(1));
4957 assert(!varTypeIsStruct(call));
4959 if (call->gtType == TYP_REF || call->gtType == TYP_ARRAY)
4963 else if (call->gtType == TYP_BYREF)
4969 bool fPossibleSyncHelperCall = false;
4970 CorInfoHelpFunc helperNum = CORINFO_HELP_UNDEF;
4972 // We need to propagate the IL offset information to the call instruction, so we can emit
4973 // an IL to native mapping record for the call, to support managed return value debugging.
4974 // We don't want tail call helper calls that were converted from normal calls to get a record,
4975 // so we skip this hash table lookup logic in that case.
4976 if (compiler->opts.compDbgInfo && compiler->genCallSite2ILOffsetMap != nullptr && !call->IsTailCall())
4978 (void)compiler->genCallSite2ILOffsetMap->Lookup(call, &ilOffset);
4981 #if defined(_TARGET_X86_)
4982 bool fCallerPop = call->CallerPop();
4985 if (!call->IsUnmanaged())
4987 CorInfoCallConv callConv = CORINFO_CALLCONV_DEFAULT;
4989 if ((callType != CT_HELPER) && call->callSig)
4991 callConv = call->callSig->callConv;
4994 fCallerPop |= IsCallerPop(callConv);
4996 #endif // UNIX_X86_ABI
4998 // If the callee pops the arguments, we pass a positive value as the argSize, and the emitter will
4999 // adjust its stack level accordingly.
5000 // If the caller needs to explicitly pop its arguments, we must pass a negative value, and then do the
5001 // pop when we're done.
5002 ssize_t argSizeForEmitter = stackArgBytes;
5005 argSizeForEmitter = -stackArgBytes;
5007 #endif // defined(_TARGET_X86_)
5009 #ifdef FEATURE_AVX_SUPPORT
5010 // When it's a PInvoke call and the call type is USER function, we issue VZEROUPPER here
5011 // if the function contains 256bit AVX instructions, this is to avoid AVX-256 to Legacy SSE
5012 // transition penalty, assuming the user function contains legacy SSE instruction.
5013 // To limit code size increase impact: we only issue VZEROUPPER before PInvoke call, not issue
5014 // VZEROUPPER after PInvoke call because transition penalty from legacy SSE to AVX only happens
5015 // when there's preceding 256-bit AVX to legacy SSE transition penalty.
5016 if (call->IsPInvoke() && (call->gtCallType == CT_USER_FUNC) && getEmitter()->Contains256bitAVX())
5018 assert(compiler->getSIMDInstructionSet() == InstructionSet_AVX);
5019 instGen(INS_vzeroupper);
5023 if (target != nullptr)
5026 if (call->IsVirtualStub() && (call->gtCallType == CT_INDIRECT))
5028 // On x86, we need to generate a very specific pattern for indirect VSD calls:
5031 // call dword ptr [eax]
5033 // Where EAX is also used as an argument to the stub dispatch helper. Make
5034 // sure that the call target address is computed into EAX in this case.
5036 assert(REG_VIRTUAL_STUB_PARAM == REG_VIRTUAL_STUB_TARGET);
5038 assert(target->isContainedIndir());
5039 assert(target->OperGet() == GT_IND);
5041 GenTree* addr = target->AsIndir()->Addr();
5042 assert(addr->isUsedFromReg());
5044 genConsumeReg(addr);
5045 genCopyRegIfNeeded(addr, REG_VIRTUAL_STUB_TARGET);
5047 getEmitter()->emitIns_Nop(3);
5050 getEmitter()->emitIns_Call(emitter::EmitCallType(emitter::EC_INDIR_ARD),
5052 INDEBUG_LDISASM_COMMA(sigInfo)
5056 MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(secondRetSize),
5057 gcInfo.gcVarPtrSetCur,
5058 gcInfo.gcRegGCrefSetCur,
5059 gcInfo.gcRegByrefSetCur,
5060 ilOffset, REG_VIRTUAL_STUB_TARGET, REG_NA, 1, 0);
5065 if (target->isContainedIndir())
5067 if (target->AsIndir()->HasBase() && target->AsIndir()->Base()->isContainedIntOrIImmed())
5069 // Note that if gtControlExpr is an indir of an absolute address, we mark it as
5070 // contained only if it can be encoded as PC-relative offset.
5071 assert(target->AsIndir()->Base()->AsIntConCommon()->FitsInAddrBase(compiler));
5074 genEmitCall(emitter::EC_FUNC_TOKEN_INDIR,
5076 INDEBUG_LDISASM_COMMA(sigInfo)
5077 (void*) target->AsIndir()->Base()->AsIntConCommon()->IconValue()
5078 X86_ARG(argSizeForEmitter),
5080 MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(secondRetSize),
5087 genEmitCall(emitter::EC_INDIR_ARD,
5089 INDEBUG_LDISASM_COMMA(sigInfo)
5091 X86_ARG(argSizeForEmitter),
5093 MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(secondRetSize),
5100 // We have already generated code for gtControlExpr evaluating it into a register.
5101 // We just need to emit "call reg" in this case.
5102 assert(genIsValidIntReg(target->gtRegNum));
5105 genEmitCall(emitter::EC_INDIR_R,
5107 INDEBUG_LDISASM_COMMA(sigInfo)
5109 X86_ARG(argSizeForEmitter),
5111 MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(secondRetSize),
5113 genConsumeReg(target));
5117 #ifdef FEATURE_READYTORUN_COMPILER
5118 else if (call->gtEntryPoint.addr != nullptr)
5121 genEmitCall((call->gtEntryPoint.accessType == IAT_VALUE) ? emitter::EC_FUNC_TOKEN
5122 : emitter::EC_FUNC_TOKEN_INDIR,
5124 INDEBUG_LDISASM_COMMA(sigInfo)
5125 (void*) call->gtEntryPoint.addr
5126 X86_ARG(argSizeForEmitter),
5128 MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(secondRetSize),
5135 // Generate a direct call to a non-virtual user defined or helper method
5136 assert(callType == CT_HELPER || callType == CT_USER_FUNC);
5138 void* addr = nullptr;
5139 if (callType == CT_HELPER)
5141 // Direct call to a helper method.
5142 helperNum = compiler->eeGetHelperNum(methHnd);
5143 noway_assert(helperNum != CORINFO_HELP_UNDEF);
5145 void* pAddr = nullptr;
5146 addr = compiler->compGetHelperFtn(helperNum, (void**)&pAddr);
5148 if (addr == nullptr)
5153 // tracking of region protected by the monitor in synchronized methods
5154 if (compiler->info.compFlags & CORINFO_FLG_SYNCH)
5156 fPossibleSyncHelperCall = true;
5161 // Direct call to a non-virtual user function.
5162 addr = call->gtDirectCallAddress;
5165 // Non-virtual direct calls to known addresses
5168 genEmitCall(emitter::EC_FUNC_TOKEN,
5170 INDEBUG_LDISASM_COMMA(sigInfo)
5172 X86_ARG(argSizeForEmitter),
5174 MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(secondRetSize),
5179 // if it was a pinvoke we may have needed to get the address of a label
5180 if (genPendingCallLabel)
5182 assert(call->IsUnmanaged());
5183 genDefineTempLabel(genPendingCallLabel);
5184 genPendingCallLabel = nullptr;
5188 // All Callee arg registers are trashed and no longer contain any GC pointers.
5189 // TODO-XArch-Bug?: As a matter of fact shouldn't we be killing all of callee trashed regs here?
5190 // For now we will assert that other than arg regs gc ref/byref set doesn't contain any other
5191 // registers from RBM_CALLEE_TRASH.
5192 assert((gcInfo.gcRegGCrefSetCur & (RBM_CALLEE_TRASH & ~RBM_ARG_REGS)) == 0);
5193 assert((gcInfo.gcRegByrefSetCur & (RBM_CALLEE_TRASH & ~RBM_ARG_REGS)) == 0);
5194 gcInfo.gcRegGCrefSetCur &= ~RBM_ARG_REGS;
5195 gcInfo.gcRegByrefSetCur &= ~RBM_ARG_REGS;
5197 var_types returnType = call->TypeGet();
5198 if (returnType != TYP_VOID)
5201 if (varTypeIsFloating(returnType))
5203 // Spill the value from the fp stack.
5204 // Then, load it into the target register.
5205 call->gtFlags |= GTF_SPILL;
5206 regSet.rsSpillFPStack(call);
5207 call->gtFlags |= GTF_SPILLED;
5208 call->gtFlags &= ~GTF_SPILL;
5211 #endif // _TARGET_X86_
5213 regNumber returnReg;
5215 if (call->HasMultiRegRetVal())
5217 assert(retTypeDesc != nullptr);
5218 unsigned regCount = retTypeDesc->GetReturnRegCount();
5220 // If regs allocated to call node are different from ABI return
5221 // regs in which the call has returned its result, move the result
5222 // to regs allocated to call node.
5223 for (unsigned i = 0; i < regCount; ++i)
5225 var_types regType = retTypeDesc->GetReturnRegType(i);
5226 returnReg = retTypeDesc->GetABIReturnReg(i);
5227 regNumber allocatedReg = call->GetRegNumByIdx(i);
5228 if (returnReg != allocatedReg)
5230 inst_RV_RV(ins_Copy(regType), allocatedReg, returnReg, regType);
5235 // A Vector3 return value is stored in xmm0 and xmm1.
5236 // RyuJIT assumes that the upper unused bits of xmm1 are cleared but
5237 // the native compiler doesn't guarantee it.
5238 if (returnType == TYP_SIMD12)
5240 returnReg = retTypeDesc->GetABIReturnReg(1);
5241 // Clear the upper 32 bits by two shift instructions.
5242 // retReg = retReg << 96
5243 // retReg = retReg >> 96
5244 getEmitter()->emitIns_R_I(INS_pslldq, emitActualTypeSize(TYP_SIMD12), returnReg, 12);
5245 getEmitter()->emitIns_R_I(INS_psrldq, emitActualTypeSize(TYP_SIMD12), returnReg, 12);
5247 #endif // FEATURE_SIMD
5252 if (call->IsHelperCall(compiler, CORINFO_HELP_INIT_PINVOKE_FRAME))
5254 // The x86 CORINFO_HELP_INIT_PINVOKE_FRAME helper uses a custom calling convention that returns with
5255 // TCB in REG_PINVOKE_TCB. AMD64/ARM64 use the standard calling convention. fgMorphCall() sets the
5256 // correct argument registers.
5257 returnReg = REG_PINVOKE_TCB;
5260 #endif // _TARGET_X86_
5261 if (varTypeIsFloating(returnType))
5263 returnReg = REG_FLOATRET;
5267 returnReg = REG_INTRET;
5270 if (call->gtRegNum != returnReg)
5272 inst_RV_RV(ins_Copy(returnType), call->gtRegNum, returnReg, returnType);
5276 genProduceReg(call);
5280 // If there is nothing next, that means the result is thrown away, so this value is not live.
5281 // However, for minopts or debuggable code, we keep it live to support managed return value debugging.
5282 if ((call->gtNext == nullptr) && !compiler->opts.MinOpts() && !compiler->opts.compDbgCode)
5284 gcInfo.gcMarkRegSetNpt(RBM_INTRET);
5287 #if !FEATURE_EH_FUNCLETS
5288 //-------------------------------------------------------------------------
5289 // Create a label for tracking of region protected by the monitor in synchronized methods.
5290 // This needs to be here, rather than above where fPossibleSyncHelperCall is set,
5291 // so the GC state vars have been updated before creating the label.
5293 if (fPossibleSyncHelperCall)
5297 case CORINFO_HELP_MON_ENTER:
5298 case CORINFO_HELP_MON_ENTER_STATIC:
5299 noway_assert(compiler->syncStartEmitCookie == NULL);
5300 compiler->syncStartEmitCookie =
5301 getEmitter()->emitAddLabel(gcInfo.gcVarPtrSetCur, gcInfo.gcRegGCrefSetCur, gcInfo.gcRegByrefSetCur);
5302 noway_assert(compiler->syncStartEmitCookie != NULL);
5304 case CORINFO_HELP_MON_EXIT:
5305 case CORINFO_HELP_MON_EXIT_STATIC:
5306 noway_assert(compiler->syncEndEmitCookie == NULL);
5307 compiler->syncEndEmitCookie =
5308 getEmitter()->emitAddLabel(gcInfo.gcVarPtrSetCur, gcInfo.gcRegGCrefSetCur, gcInfo.gcRegByrefSetCur);
5309 noway_assert(compiler->syncEndEmitCookie != NULL);
5315 #endif // !FEATURE_EH_FUNCLETS
5317 unsigned stackAdjustBias = 0;
5319 #if defined(_TARGET_X86_)
5320 // Is the caller supposed to pop the arguments?
5321 if (fCallerPop && (stackArgBytes != 0))
5323 stackAdjustBias = stackArgBytes;
5326 SubtractStackLevel(stackArgBytes);
5327 #endif // _TARGET_X86_
5329 genRemoveAlignmentAfterCall(call, stackAdjustBias);
5332 // Produce code for a GT_JMP node.
5333 // The arguments of the caller needs to be transferred to the callee before exiting caller.
5334 // The actual jump to callee is generated as part of caller epilog sequence.
5335 // Therefore the codegen of GT_JMP is to ensure that the callee arguments are correctly setup.
5336 void CodeGen::genJmpMethod(GenTreePtr jmp)
5338 assert(jmp->OperGet() == GT_JMP);
5339 assert(compiler->compJmpOpUsed);
5341 // If no arguments, nothing to do
5342 if (compiler->info.compArgsCount == 0)
5347 // Make sure register arguments are in their initial registers
5348 // and stack arguments are put back as well.
5352 // First move any en-registered stack arguments back to the stack.
5353 // At the same time any reg arg not in correct reg is moved back to its stack location.
5355 // We are not strictly required to spill reg args that are not in the desired reg for a jmp call
5356 // But that would require us to deal with circularity while moving values around. Spilling
5357 // to stack makes the implementation simple, which is not a bad trade off given Jmp calls
5358 // are not frequent.
5359 for (varNum = 0; (varNum < compiler->info.compArgsCount); varNum++)
5361 varDsc = compiler->lvaTable + varNum;
5363 if (varDsc->lvPromoted)
5365 noway_assert(varDsc->lvFieldCnt == 1); // We only handle one field here
5367 unsigned fieldVarNum = varDsc->lvFieldLclStart;
5368 varDsc = compiler->lvaTable + fieldVarNum;
5370 noway_assert(varDsc->lvIsParam);
5372 if (varDsc->lvIsRegArg && (varDsc->lvRegNum != REG_STK))
5374 // Skip reg args which are already in its right register for jmp call.
5375 // If not, we will spill such args to their stack locations.
5377 // If we need to generate a tail call profiler hook, then spill all
5378 // arg regs to free them up for the callback.
5379 if (!compiler->compIsProfilerHookNeeded() && (varDsc->lvRegNum == varDsc->lvArgReg))
5384 else if (varDsc->lvRegNum == REG_STK)
5386 // Skip args which are currently living in stack.
5390 // If we came here it means either a reg argument not in the right register or
5391 // a stack argument currently living in a register. In either case the following
5392 // assert should hold.
5393 assert(varDsc->lvRegNum != REG_STK);
5395 var_types loadType = varDsc->lvaArgType();
5396 getEmitter()->emitIns_S_R(ins_Store(loadType), emitTypeSize(loadType), varDsc->lvRegNum, varNum, 0);
5398 // Update lvRegNum life and GC info to indicate lvRegNum is dead and varDsc stack slot is going live.
5399 // Note that we cannot modify varDsc->lvRegNum here because another basic block may not be expecting it.
5400 // Therefore manually update life of varDsc->lvRegNum.
5401 regMaskTP tempMask = varDsc->lvRegMask();
5402 regSet.RemoveMaskVars(tempMask);
5403 gcInfo.gcMarkRegSetNpt(tempMask);
5404 if (compiler->lvaIsGCTracked(varDsc))
5407 if (!VarSetOps::IsMember(compiler, gcInfo.gcVarPtrSetCur, varDsc->lvVarIndex))
5409 JITDUMP("\t\t\t\t\t\t\tVar V%02u becoming live\n", varNum);
5413 JITDUMP("\t\t\t\t\t\t\tVar V%02u continuing live\n", varNum);
5417 VarSetOps::AddElemD(compiler, gcInfo.gcVarPtrSetCur, varDsc->lvVarIndex);
5421 #ifdef PROFILING_SUPPORTED
5422 // At this point all arg regs are free.
5423 // Emit tail call profiler callback.
5424 genProfilingLeaveCallback(CORINFO_HELP_PROF_FCN_TAILCALL);
5427 // Next move any un-enregistered register arguments back to their register.
5428 regMaskTP fixedIntArgMask = RBM_NONE; // tracks the int arg regs occupying fixed args in case of a vararg method.
5429 unsigned firstArgVarNum = BAD_VAR_NUM; // varNum of the first argument in case of a vararg method.
5430 for (varNum = 0; (varNum < compiler->info.compArgsCount); varNum++)
5432 varDsc = compiler->lvaTable + varNum;
5433 if (varDsc->lvPromoted)
5435 noway_assert(varDsc->lvFieldCnt == 1); // We only handle one field here
5437 unsigned fieldVarNum = varDsc->lvFieldLclStart;
5438 varDsc = compiler->lvaTable + fieldVarNum;
5440 noway_assert(varDsc->lvIsParam);
5442 // Skip if arg not passed in a register.
5443 if (!varDsc->lvIsRegArg)
5448 #if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
5449 if (varTypeIsStruct(varDsc))
5451 CORINFO_CLASS_HANDLE typeHnd = varDsc->lvVerTypeInfo.GetClassHandle();
5452 assert(typeHnd != nullptr);
5454 SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR structDesc;
5455 compiler->eeGetSystemVAmd64PassStructInRegisterDescriptor(typeHnd, &structDesc);
5456 assert(structDesc.passedInRegisters);
5458 unsigned __int8 offset0 = 0;
5459 unsigned __int8 offset1 = 0;
5460 var_types type0 = TYP_UNKNOWN;
5461 var_types type1 = TYP_UNKNOWN;
5463 // Get the eightbyte data
5464 compiler->GetStructTypeOffset(structDesc, &type0, &type1, &offset0, &offset1);
5466 // Move the values into the right registers.
5469 // Update varDsc->lvArgReg and lvOtherArgReg life and GC Info to indicate varDsc stack slot is dead and
5470 // argReg is going live. Note that we cannot modify varDsc->lvRegNum and lvOtherArgReg here because another
5471 // basic block may not be expecting it. Therefore manually update life of argReg. Note that GT_JMP marks
5472 // the end of the basic block and after which reg life and gc info will be recomputed for the new block in
5473 // genCodeForBBList().
5474 if (type0 != TYP_UNKNOWN)
5476 getEmitter()->emitIns_R_S(ins_Load(type0), emitTypeSize(type0), varDsc->lvArgReg, varNum, offset0);
5477 regSet.rsMaskVars |= genRegMask(varDsc->lvArgReg);
5478 gcInfo.gcMarkRegPtrVal(varDsc->lvArgReg, type0);
5481 if (type1 != TYP_UNKNOWN)
5483 getEmitter()->emitIns_R_S(ins_Load(type1), emitTypeSize(type1), varDsc->lvOtherArgReg, varNum, offset1);
5484 regSet.rsMaskVars |= genRegMask(varDsc->lvOtherArgReg);
5485 gcInfo.gcMarkRegPtrVal(varDsc->lvOtherArgReg, type1);
5488 if (varDsc->lvTracked)
5490 VarSetOps::RemoveElemD(compiler, gcInfo.gcVarPtrSetCur, varDsc->lvVarIndex);
5494 #endif // !defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
5496 // Register argument
5497 noway_assert(isRegParamType(genActualType(varDsc->TypeGet())));
5499 // Is register argument already in the right register?
5500 // If not load it from its stack location.
5501 var_types loadType = varDsc->lvaArgType();
5502 regNumber argReg = varDsc->lvArgReg; // incoming arg register
5504 if (varDsc->lvRegNum != argReg)
5506 assert(genIsValidReg(argReg));
5507 getEmitter()->emitIns_R_S(ins_Load(loadType), emitTypeSize(loadType), argReg, varNum, 0);
5509 // Update argReg life and GC Info to indicate varDsc stack slot is dead and argReg is going live.
5510 // Note that we cannot modify varDsc->lvRegNum here because another basic block may not be expecting it.
5511 // Therefore manually update life of argReg. Note that GT_JMP marks the end of the basic block
5512 // and after which reg life and gc info will be recomputed for the new block in genCodeForBBList().
5513 regSet.AddMaskVars(genRegMask(argReg));
5514 gcInfo.gcMarkRegPtrVal(argReg, loadType);
5515 if (compiler->lvaIsGCTracked(varDsc))
5518 if (VarSetOps::IsMember(compiler, gcInfo.gcVarPtrSetCur, varDsc->lvVarIndex))
5520 JITDUMP("\t\t\t\t\t\t\tVar V%02u becoming dead\n", varNum);
5524 JITDUMP("\t\t\t\t\t\t\tVar V%02u continuing dead\n", varNum);
5528 VarSetOps::RemoveElemD(compiler, gcInfo.gcVarPtrSetCur, varDsc->lvVarIndex);
5533 #if FEATURE_VARARG && defined(_TARGET_AMD64_)
5534 // In case of a jmp call to a vararg method also pass the float/double arg in the corresponding int arg
5535 // register. This is due to the AMD64 ABI which requires floating point values passed to varargs functions to
5536 // be passed in both integer and floating point registers. It doesn't apply to x86, which passes floating point
5537 // values on the stack.
5538 if (compiler->info.compIsVarArgs)
5540 regNumber intArgReg;
5541 var_types loadType = varDsc->lvaArgType();
5542 regNumber argReg = varDsc->lvArgReg; // incoming arg register
5544 if (varTypeIsFloating(loadType))
5546 intArgReg = compiler->getCallArgIntRegister(argReg);
5547 instruction ins = ins_CopyFloatToInt(loadType, TYP_LONG);
5548 inst_RV_RV(ins, argReg, intArgReg, loadType);
5555 fixedIntArgMask |= genRegMask(intArgReg);
5557 if (intArgReg == REG_ARG_0)
5559 assert(firstArgVarNum == BAD_VAR_NUM);
5560 firstArgVarNum = varNum;
5563 #endif // FEATURE_VARARG
5566 #if FEATURE_VARARG && defined(_TARGET_AMD64_)
5567 // Jmp call to a vararg method - if the method has fewer than 4 fixed arguments,
5568 // load the remaining arg registers (both int and float) from the corresponding
5569 // shadow stack slots. This is for the reason that we don't know the number and type
5570 // of non-fixed params passed by the caller, therefore we have to assume the worst case
5571 // of caller passing float/double args both in int and float arg regs.
5573 // This doesn't apply to x86, which doesn't pass floating point values in floating
5576 // The caller could have passed gc-ref/byref type var args. Since these are var args
5577 // the callee no way of knowing their gc-ness. Therefore, mark the region that loads
5578 // remaining arg registers from shadow stack slots as non-gc interruptible.
5579 if (fixedIntArgMask != RBM_NONE)
5581 assert(compiler->info.compIsVarArgs);
5582 assert(firstArgVarNum != BAD_VAR_NUM);
5584 regMaskTP remainingIntArgMask = RBM_ARG_REGS & ~fixedIntArgMask;
5585 if (remainingIntArgMask != RBM_NONE)
5587 instruction insCopyIntToFloat = ins_CopyIntToFloat(TYP_LONG, TYP_DOUBLE);
5588 getEmitter()->emitDisableGC();
5589 for (int argNum = 0, argOffset = 0; argNum < MAX_REG_ARG; ++argNum)
5591 regNumber argReg = intArgRegs[argNum];
5592 regMaskTP argRegMask = genRegMask(argReg);
5594 if ((remainingIntArgMask & argRegMask) != 0)
5596 remainingIntArgMask &= ~argRegMask;
5597 getEmitter()->emitIns_R_S(INS_mov, EA_8BYTE, argReg, firstArgVarNum, argOffset);
5599 // also load it in corresponding float arg reg
5600 regNumber floatReg = compiler->getCallArgFloatRegister(argReg);
5601 inst_RV_RV(insCopyIntToFloat, floatReg, argReg);
5604 argOffset += REGSIZE_BYTES;
5606 getEmitter()->emitEnableGC();
5609 #endif // FEATURE_VARARG
5612 // produce code for a GT_LEA subnode
5613 void CodeGen::genLeaInstruction(GenTreeAddrMode* lea)
5615 emitAttr size = emitTypeSize(lea);
5616 genConsumeOperands(lea);
5618 if (lea->Base() && lea->Index())
5620 regNumber baseReg = lea->Base()->gtRegNum;
5621 regNumber indexReg = lea->Index()->gtRegNum;
5622 getEmitter()->emitIns_R_ARX(INS_lea, size, lea->gtRegNum, baseReg, indexReg, lea->gtScale, lea->gtOffset);
5624 else if (lea->Base())
5626 getEmitter()->emitIns_R_AR(INS_lea, size, lea->gtRegNum, lea->Base()->gtRegNum, lea->gtOffset);
5628 else if (lea->Index())
5630 getEmitter()->emitIns_R_ARX(INS_lea, size, lea->gtRegNum, REG_NA, lea->Index()->gtRegNum, lea->gtScale,
5637 //-------------------------------------------------------------------------------------------
5638 // genJumpKindsForTree: Determine the number and kinds of conditional branches
5639 // necessary to implement the given GT_CMP node
5642 // cmpTree - (input) The GenTree node that is used to set the Condition codes
5643 // - The GenTree Relop node that was used to set the Condition codes
5644 // jmpKind[2] - (output) One or two conditional branch instructions
5645 // jmpToTrueLabel[2] - (output) When true we branch to the true case
5646 // When false we create a second label and branch to the false case
5647 // Only GT_EQ for a floating point compares can have a false value.
5650 // Sets the proper values into the array elements of jmpKind[] and jmpToTrueLabel[]
5653 // At least one conditional branch instruction will be returned.
5654 // Typically only one conditional branch is needed
5655 // and the second jmpKind[] value is set to EJ_NONE
5658 // jmpToTrueLabel[i]= true implies branch when the compare operation is true.
5659 // jmpToTrueLabel[i]= false implies branch when the compare operation is false.
5660 //-------------------------------------------------------------------------------------------
5663 void CodeGen::genJumpKindsForTree(GenTreePtr cmpTree, emitJumpKind jmpKind[2], bool jmpToTrueLabel[2])
5665 // Except for BEQ (= ordered GT_EQ) both jumps are to the true label.
5666 jmpToTrueLabel[0] = true;
5667 jmpToTrueLabel[1] = true;
5669 // For integer comparisons just use genJumpKindForOper
5670 if (!varTypeIsFloating(cmpTree->gtOp.gtOp1->gtEffectiveVal()))
5672 CompareKind compareKind = ((cmpTree->gtFlags & GTF_UNSIGNED) != 0) ? CK_UNSIGNED : CK_SIGNED;
5673 jmpKind[0] = genJumpKindForOper(cmpTree->gtOper, compareKind);
5674 jmpKind[1] = EJ_NONE;
5678 assert(cmpTree->OperIsCompare());
5680 // For details on how we arrived at this mapping, see the comment block in genCodeForTreeNode()
5681 // while generating code for compare opererators (e.g. GT_EQ etc).
5682 if ((cmpTree->gtFlags & GTF_RELOP_NAN_UN) != 0)
5684 // Must branch if we have an NaN, unordered
5685 switch (cmpTree->gtOper)
5690 jmpKind[1] = EJ_NONE;
5695 jmpKind[0] = EJ_jbe;
5696 jmpKind[1] = EJ_NONE;
5700 jmpKind[0] = EJ_jpe;
5701 jmpKind[1] = EJ_jne;
5706 jmpKind[1] = EJ_NONE;
5713 else // ((cmpTree->gtFlags & GTF_RELOP_NAN_UN) == 0)
5715 // Do not branch if we have an NaN, unordered
5716 switch (cmpTree->gtOper)
5721 jmpKind[1] = EJ_NONE;
5726 jmpKind[0] = EJ_jae;
5727 jmpKind[1] = EJ_NONE;
5731 jmpKind[0] = EJ_jne;
5732 jmpKind[1] = EJ_NONE;
5736 jmpKind[0] = EJ_jpe;
5738 jmpToTrueLabel[0] = false;
5748 #if !defined(_TARGET_64BIT_)
5749 //------------------------------------------------------------------------
5750 // genJumpKindsForTreeLongHi: Generate the jump types for compare
5751 // operators of the high parts of a compare with long type operands
5752 // on x86 for the case where rel-op result needs to be materialized into a
5756 // cmpTree - The GT_CMP node
5757 // jmpKind - Return array of jump kinds
5758 // jmpToTrueLabel - Return array of if the jump is going to true label
5763 void CodeGen::genJumpKindsForTreeLongHi(GenTreePtr cmpTree, emitJumpKind jmpKind[2])
5765 assert(cmpTree->OperIsCompare());
5766 CompareKind compareKind = ((cmpTree->gtFlags & GTF_UNSIGNED) != 0) ? CK_UNSIGNED : CK_SIGNED;
5768 switch (cmpTree->gtOper)
5772 if (compareKind == CK_SIGNED)
5786 if (compareKind == CK_SIGNED)
5799 // GT_EQ will not jump to the true label if the hi parts are equal
5800 jmpKind[0] = EJ_NONE;
5801 jmpKind[1] = EJ_jne;
5805 // GT_NE will always jump to the true label if the high parts are not equal
5806 jmpKind[0] = EJ_jne;
5807 jmpKind[1] = EJ_NONE;
5815 //------------------------------------------------------------------------
5816 // genCompareLong: Generate code for comparing two longs on x86 when the result of the compare
5817 // is manifested in a register.
5820 // treeNode - the compare tree
5825 // For long compares, we need to compare the high parts of operands first, then the low parts.
5826 // If the high compare is false, we do not need to compare the low parts. For less than and
5827 // greater than, if the high compare is true, we can assume the entire compare is true. For
5828 // compares that are realized in a register, we will generate:
5830 // Opcode x86 equivalent Comment
5831 // ------ -------------- -------
5832 // GT_EQ cmp hiOp1,hiOp2 If any part is not equal, the entire compare
5833 // jne label is false.
5837 // GT_NE cmp hiOp1,hiOp2 If any part is not equal, the entire compare
5838 // jne label is true.
5842 // GT_LT; unsigned cmp hiOp1,hiOp2 If hiOp1 is not equal to hiOp2, the flags are set
5843 // jne label correctly and we do not need to check lo. Otherwise,
5844 // cmp loOp1,loOp2 we need to compare the lo halves
5847 // GT_LE; unsigned cmp hiOp1,hiOp2 If hiOp1 is not equal to hiOp2, the flags are set
5848 // jne label correctly and we do not need to check lo. Otherwise,
5849 // cmp loOp1,loOp2 we need to compare the lo halves
5852 // GT_GT; unsigned cmp hiOp1,hiOp2 If hiOp1 is not equal to hiOp2, the flags are set
5853 // jne label correctly and we do not need to check lo. Otherwise,
5854 // cmp loOp1,loOp2 we need to compare the lo halves
5857 // GT_GE; unsigned cmp hiOp1,hiOp2 If hiOp1 is not equal to hiOp2, the flags are set
5858 // jne label correctly and we do not need to check lo. Otherwise,
5859 // cmp loOp1,loOp2 we need to compare the lo halves
5862 // For signed long comparisons, we need additional labels, as we need to use signed conditions on the
5863 // "set" instruction:
5865 // GT_LT; signed cmp hiOp1,hiOp2 If hiOp1 is not equal to hiOp2, the flags are set
5866 // jne labelHi correctly and we do not need to check lo. Otherwise,
5867 // cmp loOp1,loOp2 we need to compare the lo halves
5868 // setb Unsigned set for lo compare
5870 // labelHi: setl Signed set for high compare
5873 // GT_LE; signed cmp hiOp1,hiOp2 If hiOp1 is not equal to hiOp2, the flags are set
5874 // jne labelHi correctly and we do not need to check lo. Otherwise,
5875 // cmp loOp1,loOp2 we need to compare the lo halves
5876 // setbe Unsigend set for lo compare
5878 // labelHi: setle Signed set for hi compare
5881 // GT_GT; signed cmp hiOp1,hiOp2 If hiOp1 is not equal to hiOp2, the flags are set
5882 // jne labelHi correctly and we do not need to check lo. Otherwise,
5883 // cmp loOp1,loOp2 we need to compare the lo halves
5884 // seta Unsigned set for lo compare
5886 // labelHi: setg Signed set for high compare
5889 // GT_GE; signed cmp hiOp1,hiOp2 If hiOp1 is not equal to hiOp2, the flags are set
5890 // jne labelHi correctly and we do not need to check lo. Otherwise,
5891 // cmp loOp1,loOp2 we need to compare the lo halves
5892 // setae Unsigned set for lo compare
5894 // labelHi: setge Signed set for hi compare
5897 // TODO-X86-CQ: Check if hi or lo parts of op2 are 0 and change the compare to a test.
5898 void CodeGen::genCompareLong(GenTreePtr treeNode)
5900 assert(treeNode->OperIsCompare());
5902 GenTreeOp* tree = treeNode->AsOp();
5903 GenTreePtr op1 = tree->gtOp1;
5904 GenTreePtr op2 = tree->gtOp2;
5906 assert(varTypeIsLong(op1->TypeGet()));
5907 assert(varTypeIsLong(op2->TypeGet()));
5909 regNumber targetReg = treeNode->gtRegNum;
5911 genConsumeOperands(tree);
5913 GenTreePtr loOp1 = op1->gtGetOp1();
5914 GenTreePtr hiOp1 = op1->gtGetOp2();
5915 GenTreePtr loOp2 = op2->gtGetOp1();
5916 GenTreePtr hiOp2 = op2->gtGetOp2();
5918 // Create compare for the high parts
5919 instruction ins = INS_cmp;
5920 var_types cmpType = TYP_INT;
5921 emitAttr cmpAttr = emitTypeSize(cmpType);
5923 // Emit the compare instruction
5924 getEmitter()->emitInsBinary(ins, cmpAttr, hiOp1, hiOp2);
5926 // If the result is not being materialized in a register, we're done.
5927 if (targetReg == REG_NA)
5932 // Generate the first jump for the high compare
5933 CompareKind compareKind = ((tree->gtFlags & GTF_UNSIGNED) != 0) ? CK_UNSIGNED : CK_SIGNED;
5935 BasicBlock* labelHi = genCreateTempLabel();
5936 BasicBlock* labelFinal = genCreateTempLabel();
5938 if (compareKind == CK_SIGNED && (tree->gtOper != GT_NE && tree->gtOper != GT_EQ))
5940 // If we are doing a signed comparison, we need to do a signed set if the high compare is true,
5941 // but an unsigned set if we fall through to the low compare. If we have a GT_NE or GT_EQ, we do not
5942 // need to worry about the sign of the comparison, so we can use the simplified case.
5944 // We only have to check for equality for the hi comparison. If they are not equal, then the set will
5945 // do the right thing. If they are equal, we have to check the lo halves.
5946 inst_JMP(EJ_jne, labelHi);
5948 // Emit the comparison. Perform the set for the lo. Jump to labelFinal
5949 getEmitter()->emitInsBinary(ins, cmpAttr, loOp1, loOp2);
5951 // The low set must be unsigned
5952 emitJumpKind jumpKindLo = genJumpKindForOper(tree->gtOper, CK_UNSIGNED);
5954 inst_SET(jumpKindLo, targetReg);
5955 inst_JMP(EJ_jmp, labelFinal);
5957 // Define the label for hi jump target here. If we have jumped here, we want to set
5958 // the target register based on the jump kind of the actual compare type.
5960 genDefineTempLabel(labelHi);
5961 inst_SET(genJumpKindForOper(tree->gtOper, compareKind), targetReg);
5963 genDefineTempLabel(labelFinal);
5964 // Set the higher bytes to 0
5965 inst_RV_RV(ins_Move_Extend(TYP_UBYTE, true), targetReg, targetReg, TYP_UBYTE, emitTypeSize(TYP_UBYTE));
5966 genProduceReg(tree);
5970 // If the compare is unsigned, or if the sign doesn't change the set instruction, we can use
5971 // the same set logic for both the hi and lo compare, so we don't need to jump to a high label,
5972 // we can just jump to the set that the lo compare will use.
5974 // We only have to check for equality for the hi comparison. If they are not equal, then the set will
5975 // do the right thing. If they are equal, we have to check the lo halves.
5976 inst_JMP(EJ_jne, labelFinal);
5978 // Emit the comparison
5979 getEmitter()->emitInsBinary(ins, cmpAttr, loOp1, loOp2);
5981 // Define the label for hi jump target here. If we have jumped here, we want to set
5982 // the target register based on the jump kind of the lower half (the actual compare
5983 // type). If we have fallen through, then we are doing a normal int compare for the
5986 genDefineTempLabel(labelFinal);
5988 // The low set must be unsigned
5989 emitJumpKind jumpKindLo = genJumpKindForOper(tree->gtOper, CK_UNSIGNED);
5991 inst_SET(jumpKindLo, targetReg);
5992 // Set the higher bytes to 0
5993 inst_RV_RV(ins_Move_Extend(TYP_UBYTE, true), targetReg, targetReg, TYP_UBYTE, emitTypeSize(TYP_UBYTE));
5994 genProduceReg(tree);
5997 #endif //! defined(_TARGET_64BIT_)
5999 //------------------------------------------------------------------------
6000 // genCompareFloat: Generate code for comparing two floating point values
6003 // treeNode - the compare tree
6008 // SSE2 instruction ucomis[s|d] is performs unordered comparison and
6009 // updates rFLAGS register as follows.
6010 // Result of compare ZF PF CF
6011 // ----------------- ------------
6012 // Unordered 1 1 1 <-- this result implies one of operands of compare is a NAN.
6017 // From the above table the following equalities follow. As per ECMA spec *.UN opcodes perform
6018 // unordered comparison of floating point values. That is *.UN comparisons result in true when
6019 // one of the operands is a NaN whereas ordered comparisons results in false.
6021 // Opcode Amd64 equivalent Comment
6022 // ------ ----------------- --------
6023 // BLT.UN(a,b) ucomis[s|d] a, b Jb branches if CF=1, which means either a<b or unordered from the above
6026 // BLT(a,b) ucomis[s|d] b, a Ja branches if CF=0 and ZF=0, which means b>a that in turn implies a<b
6029 // BGT.UN(a,b) ucomis[s|d] b, a branch if b<a or unordered ==> branch if a>b or unordered
6032 // BGT(a, b) ucomis[s|d] a, b branch if a>b
6035 // BLE.UN(a,b) ucomis[s|d] a, b jbe branches if CF=1 or ZF=1, which implies a<=b or unordered
6038 // BLE(a,b) ucomis[s|d] b, a jae branches if CF=0, which mean b>=a or a<=b
6041 // BGE.UN(a,b) ucomis[s|d] b, a branch if b<=a or unordered ==> branch if a>=b or unordered
6044 // BGE(a,b) ucomis[s|d] a, b branch if a>=b
6047 // BEQ.UN(a,b) ucomis[s|d] a, b branch if a==b or unordered. There is no BEQ.UN opcode in ECMA spec.
6048 // je This case is given for completeness, in case if JIT generates such
6049 // a gentree internally.
6051 // BEQ(a,b) ucomis[s|d] a, b From the above table, PF=0 and ZF=1 corresponds to a==b.
6056 // BNE(a,b) ucomis[s|d] a, b branch if a!=b. There is no BNE opcode in ECMA spec. This case is
6057 // jne given for completeness, in case if JIT generates such a gentree
6060 // BNE.UN(a,b) ucomis[s|d] a, b From the above table, PF=1 or ZF=0 implies unordered or a!=b
6064 // As we can see from the above equalities that the operands of a compare operator need to be
6065 // reveresed in case of BLT/CLT, BGT.UN/CGT.UN, BLE/CLE, BGE.UN/CGE.UN.
6066 void CodeGen::genCompareFloat(GenTreePtr treeNode)
6068 assert(treeNode->OperIsCompare());
6070 GenTreeOp* tree = treeNode->AsOp();
6071 GenTreePtr op1 = tree->gtOp1;
6072 GenTreePtr op2 = tree->gtOp2;
6073 var_types op1Type = op1->TypeGet();
6074 var_types op2Type = op2->TypeGet();
6076 genConsumeOperands(tree);
6078 assert(varTypeIsFloating(op1Type));
6079 assert(op1Type == op2Type);
6081 regNumber targetReg = treeNode->gtRegNum;
6086 if ((tree->gtFlags & GTF_RELOP_NAN_UN) != 0)
6088 // Unordered comparison case
6089 reverseOps = (tree->gtOper == GT_GT || tree->gtOper == GT_GE);
6093 reverseOps = (tree->gtOper == GT_LT || tree->gtOper == GT_LE);
6098 GenTreePtr tmp = op1;
6103 ins = ins_FloatCompare(op1Type);
6104 cmpAttr = emitTypeSize(op1Type);
6106 getEmitter()->emitInsBinary(ins, cmpAttr, op1, op2);
6108 // Are we evaluating this into a register?
6109 if (targetReg != REG_NA)
6111 genSetRegToCond(targetReg, tree);
6112 genProduceReg(tree);
6116 //------------------------------------------------------------------------
6117 // genCompareInt: Generate code for comparing ints or, on amd64, longs.
6120 // treeNode - the compare tree
6124 void CodeGen::genCompareInt(GenTreePtr treeNode)
6126 assert(treeNode->OperIsCompare());
6128 GenTreeOp* tree = treeNode->AsOp();
6129 GenTreePtr op1 = tree->gtOp1;
6130 GenTreePtr op2 = tree->gtOp2;
6131 var_types op1Type = op1->TypeGet();
6132 var_types op2Type = op2->TypeGet();
6133 regNumber targetReg = tree->gtRegNum;
6135 // Case of op1 == 0 or op1 != 0:
6136 // Optimize generation of 'test' instruction if op1 sets flags.
6138 // Note that if LSRA has inserted any GT_RELOAD/GT_COPY before
6139 // op1, it will not modify the flags set by codegen of op1.
6140 // Similarly op1 could also be reg-optional at its use and
6141 // it was spilled after producing its result in a register.
6142 // Spill code too will not modify the flags set by op1.
6143 GenTree* realOp1 = op1->gtSkipReloadOrCopy();
6144 if (realOp1->gtSetFlags())
6146 // op1 must set ZF and SF flags
6147 assert(realOp1->gtSetZSFlags());
6149 // Must be (in)equality against zero.
6150 assert(tree->OperIs(GT_EQ, GT_NE));
6151 assert(op2->IsIntegralConst(0));
6152 assert(op2->isContained());
6154 // Just consume the operands
6155 genConsumeOperands(tree);
6157 // No need to generate test instruction since
6160 // Are we evaluating this into a register?
6161 if (targetReg != REG_NA)
6163 genSetRegToCond(targetReg, tree);
6164 genProduceReg(tree);
6171 // If we have GT_JTRUE(GT_EQ/NE(GT_SIMD((in)Equality, v1, v2), true/false)),
6172 // then we don't need to generate code for GT_EQ/GT_NE, since SIMD (in)Equality intrinsic
6173 // would set or clear Zero flag.
6174 if ((targetReg == REG_NA) && tree->OperIs(GT_EQ, GT_NE))
6176 // Is it a SIMD (in)Equality that doesn't need to materialize result into a register?
6177 if ((op1->gtRegNum == REG_NA) && op1->IsSIMDEqualityOrInequality())
6179 // Must be comparing against true or false.
6180 assert(op2->IsIntegralConst(0) || op2->IsIntegralConst(1));
6181 assert(op2->isContainedIntOrIImmed());
6183 // In this case SIMD (in)Equality will set or clear
6184 // Zero flag, based on which GT_JTRUE would generate
6185 // the right conditional jump.
6189 #endif // FEATURE_SIMD
6191 genConsumeOperands(tree);
6193 // TODO-CQ: We should be able to support swapping op1 and op2 to generate cmp reg, imm.
6194 // https://github.com/dotnet/coreclr/issues/7270
6195 assert(!op1->isContainedIntOrIImmed()); // We no longer support
6196 assert(!varTypeIsFloating(op2Type));
6200 if (tree->OperIs(GT_TEST_EQ, GT_TEST_NE))
6204 else if (op1->isUsedFromReg() && op2->IsIntegralConst(0))
6206 // We're comparing a register to 0 so we can generate "test reg1, reg1"
6207 // instead of the longer "cmp reg1, 0"
6218 if (op1Type == op2Type)
6222 else if (genTypeSize(op1Type) == genTypeSize(op2Type))
6224 // If the types are different but have the same size then we'll use TYP_INT or TYP_LONG.
6225 // This primarily deals with small type mixes (e.g. byte/ubyte) that need to be widened
6226 // and compared as int. We should not get long type mixes here but handle that as well
6228 type = genTypeSize(op1Type) == 8 ? TYP_LONG : TYP_INT;
6232 // In the types are different simply use TYP_INT. This deals with small type/int type
6233 // mixes (e.g. byte/short ubyte/int) that need to be widened and compared as int.
6234 // Lowering is expected to handle any mixes that involve long types (e.g. int/long).
6238 // The common type cannot be larger than the machine word size
6239 assert(genTypeSize(type) <= genTypeSize(TYP_I_IMPL));
6240 // The common type cannot be smaller than any of the operand types, we're probably mixing int/long
6241 assert(genTypeSize(type) >= max(genTypeSize(op1Type), genTypeSize(op2Type)));
6242 // TYP_UINT and TYP_ULONG should not appear here, only small types can be unsigned
6243 assert(!varTypeIsUnsigned(type) || varTypeIsSmall(type));
6244 // Small unsigned int types (TYP_BOOL can use anything) should use unsigned comparisons
6245 assert(!(varTypeIsSmallInt(type) && varTypeIsUnsigned(type)) || ((tree->gtFlags & GTF_UNSIGNED) != 0));
6246 // If op1 is smaller then it cannot be in memory, we're probably missing a cast
6247 assert((genTypeSize(op1Type) >= genTypeSize(type)) || !op1->isUsedFromMemory());
6248 // If op2 is smaller then it cannot be in memory, we're probably missing a cast
6249 assert((genTypeSize(op2Type) >= genTypeSize(type)) || !op2->isUsedFromMemory());
6250 // If op2 is a constant then it should fit in the common type
6251 assert(!op2->IsCnsIntOrI() || genTypeCanRepresentValue(type, op2->AsIntCon()->IconValue()));
6253 getEmitter()->emitInsBinary(ins, emitTypeSize(type), op1, op2);
6255 // Are we evaluating this into a register?
6256 if (targetReg != REG_NA)
6258 genSetRegToCond(targetReg, tree);
6259 genProduceReg(tree);
6263 //-------------------------------------------------------------------------------------------
6264 // genSetRegToCond: Set a register 'dstReg' to the appropriate one or zero value
6265 // corresponding to a binary Relational operator result.
6268 // dstReg - The target register to set to 1 or 0
6269 // tree - The GenTree Relop node that was used to set the Condition codes
6271 // Return Value: none
6274 // A full 64-bit value of either 1 or 0 is setup in the 'dstReg'
6275 //-------------------------------------------------------------------------------------------
6277 void CodeGen::genSetRegToCond(regNumber dstReg, GenTreePtr tree)
6279 noway_assert((genRegMask(dstReg) & RBM_BYTE_REGS) != 0);
6281 emitJumpKind jumpKind[2];
6282 bool branchToTrueLabel[2];
6283 genJumpKindsForTree(tree, jumpKind, branchToTrueLabel);
6285 if (jumpKind[1] == EJ_NONE)
6287 // Set (lower byte of) reg according to the flags
6288 inst_SET(jumpKind[0], dstReg);
6293 // jmpKind[1] != EJ_NONE implies BEQ and BEN.UN of floating point values.
6294 // These are represented by two conditions.
6295 if (tree->gtOper == GT_EQ)
6297 // This must be an ordered comparison.
6298 assert((tree->gtFlags & GTF_RELOP_NAN_UN) == 0);
6302 // This must be BNE.UN
6303 assert((tree->gtOper == GT_NE) && ((tree->gtFlags & GTF_RELOP_NAN_UN) != 0));
6307 // Here is the sample code generated in each case:
6308 // BEQ == cmp, jpe <false label>, je <true label>
6309 // That is, to materialize comparison reg needs to be set if PF=0 and ZF=1
6310 // setnp reg // if (PF==0) reg = 1 else reg = 0
6311 // jpe L1 // Jmp if PF==1
6315 // BNE.UN == cmp, jpe <true label>, jne <true label>
6316 // That is, to materialize the comparison reg needs to be set if either PF=1 or ZF=0;
6322 // reverse the jmpkind condition before setting dstReg if it is to false label.
6323 inst_SET(branchToTrueLabel[0] ? jumpKind[0] : emitter::emitReverseJumpKind(jumpKind[0]), dstReg);
6325 BasicBlock* label = genCreateTempLabel();
6326 inst_JMP(jumpKind[0], label);
6328 // second branch is always to true label
6329 assert(branchToTrueLabel[1]);
6330 inst_SET(jumpKind[1], dstReg);
6331 genDefineTempLabel(label);
6334 var_types treeType = tree->TypeGet();
6335 if (treeType == TYP_INT || treeType == TYP_LONG)
6337 // Set the higher bytes to 0
6338 inst_RV_RV(ins_Move_Extend(TYP_UBYTE, true), dstReg, dstReg, TYP_UBYTE, emitTypeSize(TYP_UBYTE));
6342 noway_assert(treeType == TYP_BYTE);
6346 #if !defined(_TARGET_64BIT_)
6347 //------------------------------------------------------------------------
6348 // genLongToIntCast: Generate code for long to int casts on x86.
6351 // cast - The GT_CAST node
6357 // The cast node and its sources (via GT_LONG) must have been assigned registers.
6358 // The destination cannot be a floating point type or a small integer type.
6360 void CodeGen::genLongToIntCast(GenTree* cast)
6362 assert(cast->OperGet() == GT_CAST);
6364 GenTree* src = cast->gtGetOp1();
6365 noway_assert(src->OperGet() == GT_LONG);
6367 genConsumeRegs(src);
6369 var_types srcType = ((cast->gtFlags & GTF_UNSIGNED) != 0) ? TYP_ULONG : TYP_LONG;
6370 var_types dstType = cast->CastToType();
6371 regNumber loSrcReg = src->gtGetOp1()->gtRegNum;
6372 regNumber hiSrcReg = src->gtGetOp2()->gtRegNum;
6373 regNumber dstReg = cast->gtRegNum;
6375 assert((dstType == TYP_INT) || (dstType == TYP_UINT));
6376 assert(genIsValidIntReg(loSrcReg));
6377 assert(genIsValidIntReg(hiSrcReg));
6378 assert(genIsValidIntReg(dstReg));
6380 if (cast->gtOverflow())
6383 // Generate an overflow check for [u]long to [u]int casts:
6385 // long -> int - check if the upper 33 bits are all 0 or all 1
6387 // ulong -> int - check if the upper 33 bits are all 0
6389 // long -> uint - check if the upper 32 bits are all 0
6390 // ulong -> uint - check if the upper 32 bits are all 0
6393 if ((srcType == TYP_LONG) && (dstType == TYP_INT))
6395 BasicBlock* allOne = genCreateTempLabel();
6396 BasicBlock* success = genCreateTempLabel();
6398 inst_RV_RV(INS_test, loSrcReg, loSrcReg, TYP_INT, EA_4BYTE);
6399 inst_JMP(EJ_js, allOne);
6401 inst_RV_RV(INS_test, hiSrcReg, hiSrcReg, TYP_INT, EA_4BYTE);
6402 genJumpToThrowHlpBlk(EJ_jne, SCK_OVERFLOW);
6403 inst_JMP(EJ_jmp, success);
6405 genDefineTempLabel(allOne);
6406 inst_RV_IV(INS_cmp, hiSrcReg, -1, EA_4BYTE);
6407 genJumpToThrowHlpBlk(EJ_jne, SCK_OVERFLOW);
6409 genDefineTempLabel(success);
6413 if ((srcType == TYP_ULONG) && (dstType == TYP_INT))
6415 inst_RV_RV(INS_test, loSrcReg, loSrcReg, TYP_INT, EA_4BYTE);
6416 genJumpToThrowHlpBlk(EJ_js, SCK_OVERFLOW);
6419 inst_RV_RV(INS_test, hiSrcReg, hiSrcReg, TYP_INT, EA_4BYTE);
6420 genJumpToThrowHlpBlk(EJ_jne, SCK_OVERFLOW);
6424 if (dstReg != loSrcReg)
6426 inst_RV_RV(INS_mov, dstReg, loSrcReg, TYP_INT, EA_4BYTE);
6429 genProduceReg(cast);
6433 //------------------------------------------------------------------------
6434 // genIntToIntCast: Generate code for an integer cast
6435 // This method handles integer overflow checking casts
6436 // as well as ordinary integer casts.
6439 // treeNode - The GT_CAST node
6445 // The treeNode is not a contained node and must have an assigned register.
6446 // For a signed convert from byte, the source must be in a byte-addressable register.
6447 // Neither the source nor target type can be a floating point type.
6449 // TODO-XArch-CQ: Allow castOp to be a contained node without an assigned register.
6450 // TODO: refactor to use getCastDescription
6452 void CodeGen::genIntToIntCast(GenTreePtr treeNode)
6454 assert(treeNode->OperGet() == GT_CAST);
6456 GenTreePtr castOp = treeNode->gtCast.CastOp();
6457 var_types srcType = genActualType(castOp->TypeGet());
6458 noway_assert(genTypeSize(srcType) >= 4);
6461 if (varTypeIsLong(srcType))
6463 genLongToIntCast(treeNode);
6466 #endif // _TARGET_X86_
6468 regNumber targetReg = treeNode->gtRegNum;
6469 regNumber sourceReg = castOp->gtRegNum;
6470 var_types dstType = treeNode->CastToType();
6471 bool isUnsignedDst = varTypeIsUnsigned(dstType);
6472 bool isUnsignedSrc = varTypeIsUnsigned(srcType);
6474 // if necessary, force the srcType to unsigned when the GT_UNSIGNED flag is set
6475 if (!isUnsignedSrc && (treeNode->gtFlags & GTF_UNSIGNED) != 0)
6477 srcType = genUnsignedType(srcType);
6478 isUnsignedSrc = true;
6481 bool requiresOverflowCheck = false;
6483 assert(genIsValidIntReg(targetReg));
6484 assert(genIsValidIntReg(sourceReg));
6486 instruction ins = INS_invalid;
6487 emitAttr srcSize = EA_ATTR(genTypeSize(srcType));
6488 emitAttr dstSize = EA_ATTR(genTypeSize(dstType));
6490 if (srcSize < dstSize)
6493 // Is this an Overflow checking cast?
6494 // We only need to handle one case, as the other casts can never overflow.
6495 // cast from TYP_INT to TYP_ULONG
6497 if (treeNode->gtOverflow() && (srcType == TYP_INT) && (dstType == TYP_ULONG))
6499 requiresOverflowCheck = true;
6504 noway_assert(srcSize < EA_PTRSIZE);
6506 ins = ins_Move_Extend(srcType, castOp->InReg());
6509 Special case: ins_Move_Extend assumes the destination type is no bigger
6510 than TYP_INT. movsx and movzx can already extend all the way to
6511 64-bit, and a regular 32-bit mov clears the high 32 bits (like the non-existant movzxd),
6512 but for a sign extension from TYP_INT to TYP_LONG, we need to use movsxd opcode.
6514 if (!isUnsignedSrc && !isUnsignedDst)
6517 NYI_X86("Cast to 64 bit for x86/RyuJIT");
6518 #else // !_TARGET_X86_
6520 #endif // !_TARGET_X86_
6526 // Narrowing cast, or sign-changing cast
6527 noway_assert(srcSize >= dstSize);
6529 // Is this an Overflow checking cast?
6530 if (treeNode->gtOverflow())
6532 requiresOverflowCheck = true;
6537 ins = ins_Move_Extend(dstType, castOp->InReg());
6541 noway_assert(ins != INS_invalid);
6543 genConsumeReg(castOp);
6545 if (requiresOverflowCheck)
6547 ssize_t typeMin = 0;
6548 ssize_t typeMax = 0;
6549 ssize_t typeMask = 0;
6550 bool needScratchReg = false;
6551 bool signCheckOnly = false;
6553 /* Do we need to compare the value, or just check masks */
6558 typeMask = ssize_t((int)0xFFFFFF80);
6559 typeMin = SCHAR_MIN;
6560 typeMax = SCHAR_MAX;
6564 typeMask = ssize_t((int)0xFFFFFF00L);
6568 typeMask = ssize_t((int)0xFFFF8000);
6574 typeMask = ssize_t((int)0xFFFF0000L);
6578 if (srcType == TYP_UINT)
6580 signCheckOnly = true;
6584 typeMask = 0xFFFFFFFF80000000LL;
6591 if (srcType == TYP_INT)
6593 signCheckOnly = true;
6597 needScratchReg = true;
6602 noway_assert(srcType == TYP_ULONG);
6603 signCheckOnly = true;
6607 noway_assert((srcType == TYP_LONG) || (srcType == TYP_INT));
6608 signCheckOnly = true;
6612 NO_WAY("Unknown type");
6618 // We only need to check for a negative value in sourceReg
6619 inst_RV_IV(INS_cmp, sourceReg, 0, srcSize);
6620 genJumpToThrowHlpBlk(EJ_jl, SCK_OVERFLOW);
6624 // When we are converting from unsigned or to unsigned, we
6625 // will only have to check for any bits set using 'typeMask'
6626 if (isUnsignedSrc || isUnsignedDst)
6630 regNumber tmpReg = treeNode->GetSingleTempReg();
6631 inst_RV_RV(INS_mov, tmpReg, sourceReg, TYP_LONG); // Move the 64-bit value to a writeable temp reg
6632 inst_RV_SH(INS_SHIFT_RIGHT_LOGICAL, srcSize, tmpReg, 32); // Shift right by 32 bits
6633 genJumpToThrowHlpBlk(EJ_jne, SCK_OVERFLOW); // Throw if result shift is non-zero
6637 noway_assert(typeMask != 0);
6638 inst_RV_IV(INS_TEST, sourceReg, typeMask, srcSize);
6639 genJumpToThrowHlpBlk(EJ_jne, SCK_OVERFLOW);
6644 // For a narrowing signed cast
6646 // We must check the value is in a signed range.
6648 // Compare with the MAX
6650 noway_assert((typeMin != 0) && (typeMax != 0));
6652 inst_RV_IV(INS_cmp, sourceReg, typeMax, srcSize);
6653 genJumpToThrowHlpBlk(EJ_jg, SCK_OVERFLOW);
6655 // Compare with the MIN
6657 inst_RV_IV(INS_cmp, sourceReg, typeMin, srcSize);
6658 genJumpToThrowHlpBlk(EJ_jl, SCK_OVERFLOW);
6662 if (targetReg != sourceReg
6663 #ifdef _TARGET_AMD64_
6664 // On amd64, we can hit this path for a same-register
6665 // 4-byte to 8-byte widening conversion, and need to
6666 // emit the instruction to set the high bits correctly.
6667 || (dstSize == EA_8BYTE && srcSize == EA_4BYTE)
6668 #endif // _TARGET_AMD64_
6670 inst_RV_RV(ins, targetReg, sourceReg, srcType, srcSize);
6672 else // non-overflow checking cast
6674 // We may have code transformations that result in casts where srcType is the same as dstType.
6675 // e.g. Bug 824281, in which a comma is split by the rationalizer, leaving an assignment of a
6676 // long constant to a long lclVar.
6677 if (srcType == dstType)
6681 /* Is the value sitting in a non-byte-addressable register? */
6682 else if (castOp->InReg() && (dstSize == EA_1BYTE) && !isByteReg(sourceReg))
6686 // for unsigned values we can AND, so it need not be a byte register
6691 // Move the value into a byte register
6692 noway_assert(!"Signed byte convert from non-byte-addressable register");
6695 /* Generate "mov targetReg, castOp->gtReg */
6696 if (targetReg != sourceReg)
6698 inst_RV_RV(INS_mov, targetReg, sourceReg, srcType, srcSize);
6704 noway_assert(isUnsignedDst);
6706 /* Generate "and reg, MASK */
6707 unsigned fillPattern;
6708 if (dstSize == EA_1BYTE)
6712 else if (dstSize == EA_2BYTE)
6714 fillPattern = 0xffff;
6718 fillPattern = 0xffffffff;
6721 inst_RV_IV(INS_AND, targetReg, fillPattern, EA_4BYTE);
6723 #ifdef _TARGET_AMD64_
6724 else if (ins == INS_movsxd)
6726 inst_RV_RV(ins, targetReg, sourceReg, srcType, srcSize);
6728 #endif // _TARGET_AMD64_
6729 else if (ins == INS_mov)
6731 if (targetReg != sourceReg
6732 #ifdef _TARGET_AMD64_
6733 // On amd64, 'mov' is the opcode used to zero-extend from
6734 // 4 bytes to 8 bytes.
6735 || (dstSize == EA_8BYTE && srcSize == EA_4BYTE)
6736 #endif // _TARGET_AMD64_
6739 inst_RV_RV(ins, targetReg, sourceReg, srcType, srcSize);
6744 noway_assert(ins == INS_movsx || ins == INS_movzx);
6745 noway_assert(srcSize >= dstSize);
6747 /* Generate "mov targetReg, castOp->gtReg */
6748 inst_RV_RV(ins, targetReg, sourceReg, srcType, dstSize);
6752 genProduceReg(treeNode);
6755 //------------------------------------------------------------------------
6756 // genFloatToFloatCast: Generate code for a cast between float and double
6759 // treeNode - The GT_CAST node
6765 // Cast is a non-overflow conversion.
6766 // The treeNode must have an assigned register.
6767 // The cast is between float and double or vice versa.
6769 void CodeGen::genFloatToFloatCast(GenTreePtr treeNode)
6771 // float <--> double conversions are always non-overflow ones
6772 assert(treeNode->OperGet() == GT_CAST);
6773 assert(!treeNode->gtOverflow());
6775 regNumber targetReg = treeNode->gtRegNum;
6776 assert(genIsValidFloatReg(targetReg));
6778 GenTreePtr op1 = treeNode->gtOp.gtOp1;
6780 // If not contained, must be a valid float reg.
6781 if (op1->isUsedFromReg())
6783 assert(genIsValidFloatReg(op1->gtRegNum));
6787 var_types dstType = treeNode->CastToType();
6788 var_types srcType = op1->TypeGet();
6789 assert(varTypeIsFloating(srcType) && varTypeIsFloating(dstType));
6791 genConsumeOperands(treeNode->AsOp());
6792 if (srcType == dstType && (op1->isUsedFromReg() && (targetReg == op1->gtRegNum)))
6794 // source and destinations types are the same and also reside in the same register.
6795 // we just need to consume and produce the reg in this case.
6800 instruction ins = ins_FloatConv(dstType, srcType);
6801 getEmitter()->emitInsBinary(ins, emitTypeSize(dstType), treeNode, op1);
6804 genProduceReg(treeNode);
6807 //------------------------------------------------------------------------
6808 // genIntToFloatCast: Generate code to cast an int/long to float/double
6811 // treeNode - The GT_CAST node
6817 // Cast is a non-overflow conversion.
6818 // The treeNode must have an assigned register.
6819 // SrcType= int32/uint32/int64/uint64 and DstType=float/double.
6821 void CodeGen::genIntToFloatCast(GenTreePtr treeNode)
6823 // int type --> float/double conversions are always non-overflow ones
6824 assert(treeNode->OperGet() == GT_CAST);
6825 assert(!treeNode->gtOverflow());
6827 regNumber targetReg = treeNode->gtRegNum;
6828 assert(genIsValidFloatReg(targetReg));
6830 GenTreePtr op1 = treeNode->gtOp.gtOp1;
6832 if (op1->isUsedFromReg())
6834 assert(genIsValidIntReg(op1->gtRegNum));
6838 var_types dstType = treeNode->CastToType();
6839 var_types srcType = op1->TypeGet();
6840 assert(!varTypeIsFloating(srcType) && varTypeIsFloating(dstType));
6842 #if !defined(_TARGET_64BIT_)
6843 // We expect morph to replace long to float/double casts with helper calls
6844 noway_assert(!varTypeIsLong(srcType));
6845 #endif // !defined(_TARGET_64BIT_)
6847 // Since xarch emitter doesn't handle reporting gc-info correctly while casting away gc-ness we
6848 // ensure srcType of a cast is non gc-type. Codegen should never see BYREF as source type except
6849 // for GT_LCL_VAR_ADDR and GT_LCL_FLD_ADDR that represent stack addresses and can be considered
6850 // as TYP_I_IMPL. In all other cases where src operand is a gc-type and not known to be on stack,
6851 // Front-end (see fgMorphCast()) ensures this by assigning gc-type local to a non gc-type
6852 // temp and using temp as operand of cast operation.
6853 if (srcType == TYP_BYREF)
6855 noway_assert(op1->OperGet() == GT_LCL_VAR_ADDR || op1->OperGet() == GT_LCL_FLD_ADDR);
6856 srcType = TYP_I_IMPL;
6859 // force the srcType to unsigned if GT_UNSIGNED flag is set
6860 if (treeNode->gtFlags & GTF_UNSIGNED)
6862 srcType = genUnsignedType(srcType);
6865 noway_assert(!varTypeIsGC(srcType));
6867 // We should never be seeing srcType whose size is not sizeof(int) nor sizeof(long).
6868 // For conversions from byte/sbyte/int16/uint16 to float/double, we would expect
6869 // either the front-end or lowering phase to have generated two levels of cast.
6870 // The first one is for widening smaller int type to int32 and the second one is
6871 // to the float/double.
6872 emitAttr srcSize = EA_ATTR(genTypeSize(srcType));
6873 noway_assert((srcSize == EA_ATTR(genTypeSize(TYP_INT))) || (srcSize == EA_ATTR(genTypeSize(TYP_LONG))));
6875 // Also we don't expect to see uint32 -> float/double and uint64 -> float conversions
6876 // here since they should have been lowered apropriately.
6877 noway_assert(srcType != TYP_UINT);
6878 noway_assert((srcType != TYP_ULONG) || (dstType != TYP_FLOAT));
6880 // To convert int to a float/double, cvtsi2ss/sd SSE2 instruction is used
6881 // which does a partial write to lower 4/8 bytes of xmm register keeping the other
6882 // upper bytes unmodified. If "cvtsi2ss/sd xmmReg, r32/r64" occurs inside a loop,
6883 // the partial write could introduce a false dependency and could cause a stall
6884 // if there are further uses of xmmReg. We have such a case occuring with a
6885 // customer reported version of SpectralNorm benchmark, resulting in 2x perf
6886 // regression. To avoid false dependency, we emit "xorps xmmReg, xmmReg" before
6887 // cvtsi2ss/sd instruction.
6889 genConsumeOperands(treeNode->AsOp());
6890 getEmitter()->emitIns_R_R(INS_xorps, EA_4BYTE, treeNode->gtRegNum, treeNode->gtRegNum);
6892 // Note that here we need to specify srcType that will determine
6893 // the size of source reg/mem operand and rex.w prefix.
6894 instruction ins = ins_FloatConv(dstType, TYP_INT);
6895 getEmitter()->emitInsBinary(ins, emitTypeSize(srcType), treeNode, op1);
6897 // Handle the case of srcType = TYP_ULONG. SSE2 conversion instruction
6898 // will interpret ULONG value as LONG. Hence we need to adjust the
6899 // result if sign-bit of srcType is set.
6900 if (srcType == TYP_ULONG)
6902 // The instruction sequence below is less accurate than what clang
6903 // and gcc generate. However, we keep the current sequence for backward compatiblity.
6904 // If we change the instructions below, FloatingPointUtils::convertUInt64ToDobule
6905 // should be also updated for consistent conversion result.
6906 assert(dstType == TYP_DOUBLE);
6907 assert(op1->isUsedFromReg());
6909 // Set the flags without modifying op1.
6910 // test op1Reg, op1Reg
6911 inst_RV_RV(INS_test, op1->gtRegNum, op1->gtRegNum, srcType);
6913 // No need to adjust result if op1 >= 0 i.e. positive
6915 BasicBlock* label = genCreateTempLabel();
6916 inst_JMP(EJ_jge, label);
6918 // Adjust the result
6919 // result = result + 0x43f00000 00000000
6920 // addsd resultReg, 0x43f00000 00000000
6921 GenTreePtr* cns = &u8ToDblBitmask;
6922 if (*cns == nullptr)
6925 static_assert_no_msg(sizeof(double) == sizeof(__int64));
6926 *((__int64*)&d) = 0x43f0000000000000LL;
6928 *cns = genMakeConst(&d, dstType, treeNode, true);
6930 inst_RV_TT(INS_addsd, treeNode->gtRegNum, *cns);
6932 genDefineTempLabel(label);
6935 genProduceReg(treeNode);
6938 //------------------------------------------------------------------------
6939 // genFloatToIntCast: Generate code to cast float/double to int/long
6942 // treeNode - The GT_CAST node
6948 // Cast is a non-overflow conversion.
6949 // The treeNode must have an assigned register.
6950 // SrcType=float/double and DstType= int32/uint32/int64/uint64
6952 // TODO-XArch-CQ: (Low-pri) - generate in-line code when DstType = uint64
6954 void CodeGen::genFloatToIntCast(GenTreePtr treeNode)
6956 // we don't expect to see overflow detecting float/double --> int type conversions here
6957 // as they should have been converted into helper calls by front-end.
6958 assert(treeNode->OperGet() == GT_CAST);
6959 assert(!treeNode->gtOverflow());
6961 regNumber targetReg = treeNode->gtRegNum;
6962 assert(genIsValidIntReg(targetReg));
6964 GenTreePtr op1 = treeNode->gtOp.gtOp1;
6966 if (op1->isUsedFromReg())
6968 assert(genIsValidFloatReg(op1->gtRegNum));
6972 var_types dstType = treeNode->CastToType();
6973 var_types srcType = op1->TypeGet();
6974 assert(varTypeIsFloating(srcType) && !varTypeIsFloating(dstType));
6976 // We should never be seeing dstType whose size is neither sizeof(TYP_INT) nor sizeof(TYP_LONG).
6977 // For conversions to byte/sbyte/int16/uint16 from float/double, we would expect the
6978 // front-end or lowering phase to have generated two levels of cast. The first one is
6979 // for float or double to int32/uint32 and the second one for narrowing int32/uint32 to
6980 // the required smaller int type.
6981 emitAttr dstSize = EA_ATTR(genTypeSize(dstType));
6982 noway_assert((dstSize == EA_ATTR(genTypeSize(TYP_INT))) || (dstSize == EA_ATTR(genTypeSize(TYP_LONG))));
6984 // We shouldn't be seeing uint64 here as it should have been converted
6985 // into a helper call by either front-end or lowering phase.
6986 noway_assert(!varTypeIsUnsigned(dstType) || (dstSize != EA_ATTR(genTypeSize(TYP_LONG))));
6988 // If the dstType is TYP_UINT, we have 32-bits to encode the
6989 // float number. Any of 33rd or above bits can be the sign bit.
6990 // To acheive it we pretend as if we are converting it to a long.
6991 if (varTypeIsUnsigned(dstType) && (dstSize == EA_ATTR(genTypeSize(TYP_INT))))
6996 // Note that we need to specify dstType here so that it will determine
6997 // the size of destination integer register and also the rex.w prefix.
6998 genConsumeOperands(treeNode->AsOp());
6999 instruction ins = ins_FloatConv(TYP_INT, srcType);
7000 getEmitter()->emitInsBinary(ins, emitTypeSize(dstType), treeNode, op1);
7001 genProduceReg(treeNode);
7004 //------------------------------------------------------------------------
7005 // genCkfinite: Generate code for ckfinite opcode.
7008 // treeNode - The GT_CKFINITE node
7014 // GT_CKFINITE node has reserved an internal register.
7016 // TODO-XArch-CQ - mark the operand as contained if known to be in
7017 // memory (e.g. field or an array element).
7019 void CodeGen::genCkfinite(GenTreePtr treeNode)
7021 assert(treeNode->OperGet() == GT_CKFINITE);
7023 GenTreePtr op1 = treeNode->gtOp.gtOp1;
7024 var_types targetType = treeNode->TypeGet();
7025 int expMask = (targetType == TYP_FLOAT) ? 0x7F800000 : 0x7FF00000; // Bit mask to extract exponent.
7026 regNumber targetReg = treeNode->gtRegNum;
7028 // Extract exponent into a register.
7029 regNumber tmpReg = treeNode->GetSingleTempReg();
7033 #ifdef _TARGET_64BIT_
7035 // Copy the floating-point value to an integer register. If we copied a float to a long, then
7036 // right-shift the value so the high 32 bits of the floating-point value sit in the low 32
7037 // bits of the integer register.
7038 instruction ins = ins_CopyFloatToInt(targetType, (targetType == TYP_FLOAT) ? TYP_INT : TYP_LONG);
7039 inst_RV_RV(ins, op1->gtRegNum, tmpReg, targetType);
7040 if (targetType == TYP_DOUBLE)
7042 // right shift by 32 bits to get to exponent.
7043 inst_RV_SH(INS_shr, EA_8BYTE, tmpReg, 32);
7046 // Mask exponent with all 1's and check if the exponent is all 1's
7047 inst_RV_IV(INS_and, tmpReg, expMask, EA_4BYTE);
7048 inst_RV_IV(INS_cmp, tmpReg, expMask, EA_4BYTE);
7050 // If exponent is all 1's, throw ArithmeticException
7051 genJumpToThrowHlpBlk(EJ_je, SCK_ARITH_EXCPN);
7053 // if it is a finite value copy it to targetReg
7054 if (targetReg != op1->gtRegNum)
7056 inst_RV_RV(ins_Copy(targetType), targetReg, op1->gtRegNum, targetType);
7059 #else // !_TARGET_64BIT_
7061 // If the target type is TYP_DOUBLE, we want to extract the high 32 bits into the register.
7062 // There is no easy way to do this. To not require an extra register, we'll use shuffles
7063 // to move the high 32 bits into the low 32 bits, then then shuffle it back, since we
7064 // need to produce the value into the target register.
7066 // For TYP_DOUBLE, we'll generate (for targetReg != op1->gtRegNum):
7067 // movaps targetReg, op1->gtRegNum
7068 // shufps targetReg, targetReg, 0xB1 // WZYX => ZWXY
7069 // mov_xmm2i tmpReg, targetReg // tmpReg <= Y
7070 // and tmpReg, <mask>
7071 // cmp tmpReg, <mask>
7073 // movaps targetReg, op1->gtRegNum // copy the value again, instead of un-shuffling it
7075 // For TYP_DOUBLE with (targetReg == op1->gtRegNum):
7076 // shufps targetReg, targetReg, 0xB1 // WZYX => ZWXY
7077 // mov_xmm2i tmpReg, targetReg // tmpReg <= Y
7078 // and tmpReg, <mask>
7079 // cmp tmpReg, <mask>
7081 // shufps targetReg, targetReg, 0xB1 // ZWXY => WZYX
7083 // For TYP_FLOAT, it's the same as _TARGET_64BIT_:
7084 // mov_xmm2i tmpReg, targetReg // tmpReg <= low 32 bits
7085 // and tmpReg, <mask>
7086 // cmp tmpReg, <mask>
7088 // movaps targetReg, op1->gtRegNum // only if targetReg != op1->gtRegNum
7090 regNumber copyToTmpSrcReg; // The register we'll copy to the integer temp.
7092 if (targetType == TYP_DOUBLE)
7094 if (targetReg != op1->gtRegNum)
7096 inst_RV_RV(ins_Copy(targetType), targetReg, op1->gtRegNum, targetType);
7098 inst_RV_RV_IV(INS_shufps, EA_16BYTE, targetReg, targetReg, 0xb1);
7099 copyToTmpSrcReg = targetReg;
7103 copyToTmpSrcReg = op1->gtRegNum;
7106 // Copy only the low 32 bits. This will be the high order 32 bits of the floating-point
7107 // value, no matter the floating-point type.
7108 inst_RV_RV(ins_CopyFloatToInt(TYP_FLOAT, TYP_INT), copyToTmpSrcReg, tmpReg, TYP_FLOAT);
7110 // Mask exponent with all 1's and check if the exponent is all 1's
7111 inst_RV_IV(INS_and, tmpReg, expMask, EA_4BYTE);
7112 inst_RV_IV(INS_cmp, tmpReg, expMask, EA_4BYTE);
7114 // If exponent is all 1's, throw ArithmeticException
7115 genJumpToThrowHlpBlk(EJ_je, SCK_ARITH_EXCPN);
7117 if (targetReg != op1->gtRegNum)
7119 // In both the TYP_FLOAT and TYP_DOUBLE case, the op1 register is untouched,
7120 // so copy it to the targetReg. This is faster and smaller for TYP_DOUBLE
7121 // than re-shuffling the targetReg.
7122 inst_RV_RV(ins_Copy(targetType), targetReg, op1->gtRegNum, targetType);
7124 else if (targetType == TYP_DOUBLE)
7126 // We need to re-shuffle the targetReg to get the correct result.
7127 inst_RV_RV_IV(INS_shufps, EA_16BYTE, targetReg, targetReg, 0xb1);
7130 #endif // !_TARGET_64BIT_
7132 genProduceReg(treeNode);
7135 #ifdef _TARGET_AMD64_
7136 int CodeGenInterface::genSPtoFPdelta()
7140 #ifdef UNIX_AMD64_ABI
7142 // We require frame chaining on Unix to support native tool unwinding (such as
7143 // unwinding by the native debugger). We have a CLR-only extension to the
7144 // unwind codes (UWOP_SET_FPREG_LARGE) to support SP->FP offsets larger than 240.
7145 // If Unix ever supports EnC, the RSP == RBP assumption will have to be reevaluated.
7146 delta = genTotalFrameSize();
7148 #else // !UNIX_AMD64_ABI
7150 // As per Amd64 ABI, RBP offset from initial RSP can be between 0 and 240 if
7151 // RBP needs to be reported in unwind codes. This case would arise for methods
7153 if (compiler->compLocallocUsed)
7155 // We cannot base delta computation on compLclFrameSize since it changes from
7156 // tentative to final frame layout and hence there is a possibility of
7157 // under-estimating offset of vars from FP, which in turn results in under-
7158 // estimating instruction size.
7160 // To be predictive and so as never to under-estimate offset of vars from FP
7161 // we will always position FP at min(240, outgoing arg area size).
7162 delta = Min(240, (int)compiler->lvaOutgoingArgSpaceSize);
7164 else if (compiler->opts.compDbgEnC)
7166 // vm assumption on EnC methods is that rsp and rbp are equal
7171 delta = genTotalFrameSize();
7174 #endif // !UNIX_AMD64_ABI
7179 //---------------------------------------------------------------------
7180 // genTotalFrameSize - return the total size of the stack frame, including local size,
7181 // callee-saved register size, etc. For AMD64, this does not include the caller-pushed
7188 int CodeGenInterface::genTotalFrameSize()
7190 assert(!IsUninitialized(compiler->compCalleeRegsPushed));
7192 int totalFrameSize = compiler->compCalleeRegsPushed * REGSIZE_BYTES + compiler->compLclFrameSize;
7194 assert(totalFrameSize >= 0);
7195 return totalFrameSize;
7198 //---------------------------------------------------------------------
7199 // genCallerSPtoFPdelta - return the offset from Caller-SP to the frame pointer.
7200 // This number is going to be negative, since the Caller-SP is at a higher
7201 // address than the frame pointer.
7203 // There must be a frame pointer to call this function!
7205 // We can't compute this directly from the Caller-SP, since the frame pointer
7206 // is based on a maximum delta from Initial-SP, so first we find SP, then
7207 // compute the FP offset.
7209 int CodeGenInterface::genCallerSPtoFPdelta()
7211 assert(isFramePointerUsed());
7212 int callerSPtoFPdelta;
7214 callerSPtoFPdelta = genCallerSPtoInitialSPdelta() + genSPtoFPdelta();
7216 assert(callerSPtoFPdelta <= 0);
7217 return callerSPtoFPdelta;
7220 //---------------------------------------------------------------------
7221 // genCallerSPtoInitialSPdelta - return the offset from Caller-SP to Initial SP.
7223 // This number will be negative.
7225 int CodeGenInterface::genCallerSPtoInitialSPdelta()
7227 int callerSPtoSPdelta = 0;
7229 callerSPtoSPdelta -= genTotalFrameSize();
7230 callerSPtoSPdelta -= REGSIZE_BYTES; // caller-pushed return address
7232 // compCalleeRegsPushed does not account for the frame pointer
7233 // TODO-Cleanup: shouldn't this be part of genTotalFrameSize?
7234 if (isFramePointerUsed())
7236 callerSPtoSPdelta -= REGSIZE_BYTES;
7239 assert(callerSPtoSPdelta <= 0);
7240 return callerSPtoSPdelta;
7242 #endif // _TARGET_AMD64_
7244 //-----------------------------------------------------------------------------------------
7245 // genSSE2BitwiseOp - generate SSE2 code for the given oper as "Operand BitWiseOp BitMask"
7248 // treeNode - tree node
7254 // i) tree oper is one of GT_NEG or GT_INTRINSIC Abs()
7255 // ii) tree type is floating point type.
7256 // iii) caller of this routine needs to call genProduceReg()
7257 void CodeGen::genSSE2BitwiseOp(GenTreePtr treeNode)
7259 regNumber targetReg = treeNode->gtRegNum;
7260 var_types targetType = treeNode->TypeGet();
7261 assert(varTypeIsFloating(targetType));
7265 GenTreePtr* bitMask = nullptr;
7266 instruction ins = INS_invalid;
7267 void* cnsAddr = nullptr;
7268 bool dblAlign = false;
7270 switch (treeNode->OperGet())
7273 // Neg(x) = flip the sign bit.
7274 // Neg(f) = f ^ 0x80000000
7275 // Neg(d) = d ^ 0x8000000000000000
7276 ins = genGetInsForOper(GT_XOR, targetType);
7277 if (targetType == TYP_FLOAT)
7279 bitMask = &negBitmaskFlt;
7281 static_assert_no_msg(sizeof(float) == sizeof(int));
7282 *((int*)&f) = 0x80000000;
7287 bitMask = &negBitmaskDbl;
7289 static_assert_no_msg(sizeof(double) == sizeof(__int64));
7290 *((__int64*)&d) = 0x8000000000000000LL;
7297 assert(treeNode->gtIntrinsic.gtIntrinsicId == CORINFO_INTRINSIC_Abs);
7299 // Abs(x) = set sign-bit to zero
7300 // Abs(f) = f & 0x7fffffff
7301 // Abs(d) = d & 0x7fffffffffffffff
7302 ins = genGetInsForOper(GT_AND, targetType);
7303 if (targetType == TYP_FLOAT)
7305 bitMask = &absBitmaskFlt;
7307 static_assert_no_msg(sizeof(float) == sizeof(int));
7308 *((int*)&f) = 0x7fffffff;
7313 bitMask = &absBitmaskDbl;
7315 static_assert_no_msg(sizeof(double) == sizeof(__int64));
7316 *((__int64*)&d) = 0x7fffffffffffffffLL;
7323 assert(!"genSSE2: unsupported oper");
7328 if (*bitMask == nullptr)
7330 assert(cnsAddr != nullptr);
7331 *bitMask = genMakeConst(cnsAddr, targetType, treeNode, dblAlign);
7334 // We need an additional register for bitmask.
7335 regNumber tmpReg = treeNode->GetSingleTempReg();
7337 // Move operand into targetReg only if the reg reserved for
7338 // internal purpose is not the same as targetReg.
7339 GenTreePtr op1 = treeNode->gtOp.gtOp1;
7340 assert(op1->isUsedFromReg());
7341 regNumber operandReg = genConsumeReg(op1);
7342 if (tmpReg != targetReg)
7344 if (operandReg != targetReg)
7346 inst_RV_RV(ins_Copy(targetType), targetReg, operandReg, targetType);
7349 operandReg = tmpReg;
7352 inst_RV_TT(ins_Load(targetType, false), tmpReg, *bitMask);
7353 assert(ins != INS_invalid);
7354 inst_RV_RV(ins, targetReg, operandReg, targetType);
7357 //---------------------------------------------------------------------
7358 // genIntrinsic - generate code for a given intrinsic
7361 // treeNode - the GT_INTRINSIC node
7366 void CodeGen::genIntrinsic(GenTreePtr treeNode)
7368 // Right now only Sqrt/Abs are treated as math intrinsics.
7369 switch (treeNode->gtIntrinsic.gtIntrinsicId)
7371 case CORINFO_INTRINSIC_Sqrt:
7373 // Both operand and its result must be of the same floating point type.
7374 GenTreePtr srcNode = treeNode->gtOp.gtOp1;
7375 assert(varTypeIsFloating(srcNode));
7376 assert(srcNode->TypeGet() == treeNode->TypeGet());
7378 genConsumeOperands(treeNode->AsOp());
7379 getEmitter()->emitInsBinary(ins_FloatSqrt(treeNode->TypeGet()), emitTypeSize(treeNode), treeNode, srcNode);
7383 case CORINFO_INTRINSIC_Abs:
7384 genSSE2BitwiseOp(treeNode);
7388 assert(!"genIntrinsic: Unsupported intrinsic");
7392 genProduceReg(treeNode);
7395 //-------------------------------------------------------------------------- //
7396 // getBaseVarForPutArgStk - returns the baseVarNum for passing a stack arg.
7399 // treeNode - the GT_PUTARG_STK node
7402 // The number of the base variable.
7405 // If tail call the outgoing args are placed in the caller's incoming arg stack space.
7406 // Otherwise, they go in the outgoing arg area on the current frame.
7408 // On Windows the caller always creates slots (homing space) in its frame for the
7409 // first 4 arguments of a callee (register passed args). So, the baseVarNum is always 0.
7410 // For System V systems there is no such calling convention requirement, and the code needs to find
7411 // the first stack passed argument from the caller. This is done by iterating over
7412 // all the lvParam variables and finding the first with lvArgReg equals to REG_STK.
7414 unsigned CodeGen::getBaseVarForPutArgStk(GenTreePtr treeNode)
7416 assert(treeNode->OperGet() == GT_PUTARG_STK);
7418 unsigned baseVarNum;
7420 // Whether to setup stk arg in incoming or out-going arg area?
7421 // Fast tail calls implemented as epilog+jmp = stk arg is setup in incoming arg area.
7422 // All other calls - stk arg is setup in out-going arg area.
7423 if (treeNode->AsPutArgStk()->putInIncomingArgArea())
7425 // See the note in the function header re: finding the first stack passed argument.
7426 baseVarNum = getFirstArgWithStackSlot();
7427 assert(baseVarNum != BAD_VAR_NUM);
7430 // This must be a fast tail call.
7431 assert(treeNode->AsPutArgStk()->gtCall->AsCall()->IsFastTailCall());
7433 // Since it is a fast tail call, the existence of first incoming arg is guaranteed
7434 // because fast tail call requires that in-coming arg area of caller is >= out-going
7435 // arg area required for tail call.
7436 LclVarDsc* varDsc = &(compiler->lvaTable[baseVarNum]);
7437 assert(varDsc != nullptr);
7439 #ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
7440 assert(!varDsc->lvIsRegArg && varDsc->lvArgReg == REG_STK);
7441 #else // !FEATURE_UNIX_AMD64_STRUCT_PASSING
7442 // On Windows this assert is always true. The first argument will always be in REG_ARG_0 or REG_FLTARG_0.
7443 assert(varDsc->lvIsRegArg && (varDsc->lvArgReg == REG_ARG_0 || varDsc->lvArgReg == REG_FLTARG_0));
7444 #endif // !FEATURE_UNIX_AMD64_STRUCT_PASSING
7449 #if FEATURE_FIXED_OUT_ARGS
7450 baseVarNum = compiler->lvaOutgoingArgSpaceVar;
7451 #else // !FEATURE_FIXED_OUT_ARGS
7452 assert(!"No BaseVarForPutArgStk on x86");
7453 baseVarNum = BAD_VAR_NUM;
7454 #endif // !FEATURE_FIXED_OUT_ARGS
7460 //---------------------------------------------------------------------
7461 // genAlignStackBeforeCall: Align the stack if necessary before a call.
7464 // putArgStk - the putArgStk node.
7466 void CodeGen::genAlignStackBeforeCall(GenTreePutArgStk* putArgStk)
7468 #if defined(UNIX_X86_ABI)
7470 genAlignStackBeforeCall(putArgStk->gtCall);
7472 #endif // UNIX_X86_ABI
7475 //---------------------------------------------------------------------
7476 // genAlignStackBeforeCall: Align the stack if necessary before a call.
7479 // call - the call node.
7481 void CodeGen::genAlignStackBeforeCall(GenTreeCall* call)
7483 #if defined(UNIX_X86_ABI)
7485 // Have we aligned the stack yet?
7486 if (!call->fgArgInfo->IsStkAlignmentDone())
7488 // We haven't done any stack alignment yet for this call. We might need to create
7489 // an alignment adjustment, even if this function itself doesn't have any stack args.
7490 // This can happen if this function call is part of a nested call sequence, and the outer
7491 // call has already pushed some arguments.
7493 unsigned stkLevel = genStackLevel + call->fgArgInfo->GetStkSizeBytes();
7494 call->fgArgInfo->ComputeStackAlignment(stkLevel);
7496 unsigned padStkAlign = call->fgArgInfo->GetStkAlign();
7497 if (padStkAlign != 0)
7499 // Now generate the alignment
7500 inst_RV_IV(INS_sub, REG_SPBASE, padStkAlign, EA_PTRSIZE);
7501 AddStackLevel(padStkAlign);
7502 AddNestedAlignment(padStkAlign);
7505 call->fgArgInfo->SetStkAlignmentDone();
7508 #endif // UNIX_X86_ABI
7511 //---------------------------------------------------------------------
7512 // genRemoveAlignmentAfterCall: After a call, remove the alignment
7513 // added before the call, if any.
7516 // call - the call node.
7517 // bias - additional stack adjustment
7520 // When bias > 0, caller should adjust stack level appropriately as
7521 // bias is not considered when adjusting stack level.
7523 void CodeGen::genRemoveAlignmentAfterCall(GenTreeCall* call, unsigned bias)
7525 #if defined(_TARGET_X86_)
7526 #if defined(UNIX_X86_ABI)
7527 // Put back the stack pointer if there was any padding for stack alignment
7528 unsigned padStkAlign = call->fgArgInfo->GetStkAlign();
7529 unsigned padStkAdjust = padStkAlign + bias;
7531 if (padStkAdjust != 0)
7533 inst_RV_IV(INS_add, REG_SPBASE, padStkAdjust, EA_PTRSIZE);
7534 SubtractStackLevel(padStkAlign);
7535 SubtractNestedAlignment(padStkAlign);
7537 #else // UNIX_X86_ABI
7542 #endif // !UNIX_X86_ABI_
7543 #else // _TARGET_X86_
7545 #endif // !_TARGET_X86
7550 //---------------------------------------------------------------------
7551 // genAdjustStackForPutArgStk:
7552 // adjust the stack pointer for a putArgStk node if necessary.
7555 // putArgStk - the putArgStk node.
7557 // Returns: true if the stack pointer was adjusted; false otherwise.
7560 // Sets `m_pushStkArg` to true if the stack arg needs to be pushed,
7561 // false if the stack arg needs to be stored at the current stack
7562 // pointer address. This is exactly the opposite of the return value
7563 // of this function.
7565 bool CodeGen::genAdjustStackForPutArgStk(GenTreePutArgStk* putArgStk)
7568 if (varTypeIsSIMD(putArgStk))
7570 const unsigned argSize = genTypeSize(putArgStk);
7571 inst_RV_IV(INS_sub, REG_SPBASE, argSize, EA_PTRSIZE);
7572 AddStackLevel(argSize);
7573 m_pushStkArg = false;
7576 #endif // FEATURE_SIMD
7578 const unsigned argSize = putArgStk->getArgSize();
7580 // If the gtPutArgStkKind is one of the push types, we do not pre-adjust the stack.
7581 // This is set in Lowering, and is true if and only if:
7582 // - This argument contains any GC pointers OR
7583 // - It is a GT_FIELD_LIST OR
7584 // - It is less than 16 bytes in size.
7585 CLANG_FORMAT_COMMENT_ANCHOR;
7588 switch (putArgStk->gtPutArgStkKind)
7590 case GenTreePutArgStk::Kind::RepInstr:
7591 case GenTreePutArgStk::Kind::Unroll:
7592 assert((putArgStk->gtNumberReferenceSlots == 0) && (putArgStk->gtGetOp1()->OperGet() != GT_FIELD_LIST) &&
7595 case GenTreePutArgStk::Kind::Push:
7596 case GenTreePutArgStk::Kind::PushAllSlots:
7597 assert((putArgStk->gtNumberReferenceSlots != 0) || (putArgStk->gtGetOp1()->OperGet() == GT_FIELD_LIST) ||
7600 case GenTreePutArgStk::Kind::Invalid:
7602 assert(!"Uninitialized GenTreePutArgStk::Kind");
7607 if (putArgStk->isPushKind())
7609 m_pushStkArg = true;
7614 m_pushStkArg = false;
7615 inst_RV_IV(INS_sub, REG_SPBASE, argSize, EA_PTRSIZE);
7616 AddStackLevel(argSize);
7621 //---------------------------------------------------------------------
7622 // genPutArgStkFieldList - generate code for passing a GT_FIELD_LIST arg on the stack.
7625 // treeNode - the GT_PUTARG_STK node whose op1 is a GT_FIELD_LIST
7630 void CodeGen::genPutArgStkFieldList(GenTreePutArgStk* putArgStk)
7632 GenTreeFieldList* const fieldList = putArgStk->gtOp1->AsFieldList();
7633 assert(fieldList != nullptr);
7635 // Set m_pushStkArg and pre-adjust the stack if necessary.
7636 const bool preAdjustedStack = genAdjustStackForPutArgStk(putArgStk);
7638 // For now, we only support the "push" case; we will push a full slot for the first field of each slot
7639 // within the struct.
7640 assert((putArgStk->isPushKind()) && !preAdjustedStack && m_pushStkArg);
7642 // If we have pre-adjusted the stack and are simply storing the fields in order, set the offset to 0.
7643 // (Note that this mode is not currently being used.)
7644 // If we are pushing the arguments (i.e. we have not pre-adjusted the stack), then we are pushing them
7645 // in reverse order, so we start with the current field offset at the size of the struct arg (which must be
7646 // a multiple of the target pointer size).
7647 unsigned currentOffset = (preAdjustedStack) ? 0 : putArgStk->getArgSize();
7648 unsigned prevFieldOffset = currentOffset;
7649 regNumber intTmpReg = REG_NA;
7650 regNumber simdTmpReg = REG_NA;
7651 if (putArgStk->AvailableTempRegCount() != 0)
7653 regMaskTP rsvdRegs = putArgStk->gtRsvdRegs;
7654 if ((rsvdRegs & RBM_ALLINT) != 0)
7656 intTmpReg = putArgStk->GetSingleTempReg(RBM_ALLINT);
7657 assert(genIsValidIntReg(intTmpReg));
7659 if ((rsvdRegs & RBM_ALLFLOAT) != 0)
7661 simdTmpReg = putArgStk->GetSingleTempReg(RBM_ALLFLOAT);
7662 assert(genIsValidFloatReg(simdTmpReg));
7664 assert(genCountBits(rsvdRegs) == (unsigned)((intTmpReg == REG_NA) ? 0 : 1) + ((simdTmpReg == REG_NA) ? 0 : 1));
7667 for (GenTreeFieldList* current = fieldList; current != nullptr; current = current->Rest())
7669 GenTree* const fieldNode = current->Current();
7670 const unsigned fieldOffset = current->gtFieldOffset;
7671 var_types fieldType = current->gtFieldType;
7673 // Long-typed nodes should have been handled by the decomposition pass, and lowering should have sorted the
7674 // field list in descending order by offset.
7675 assert(!varTypeIsLong(fieldType));
7676 assert(fieldOffset <= prevFieldOffset);
7678 // Consume the register, if any, for this field. Note that genConsumeRegs() will appropriately
7679 // update the liveness info for a lclVar that has been marked RegOptional, which hasn't been
7680 // assigned a register, and which is therefore contained.
7681 // Unlike genConsumeReg(), it handles the case where no registers are being consumed.
7682 genConsumeRegs(fieldNode);
7683 regNumber argReg = fieldNode->isUsedFromSpillTemp() ? REG_NA : fieldNode->gtRegNum;
7685 // If the field is slot-like, we can use a push instruction to store the entire register no matter the type.
7687 // The GC encoder requires that the stack remain 4-byte aligned at all times. Round the adjustment up
7688 // to the next multiple of 4. If we are going to generate a `push` instruction, the adjustment must
7689 // not require rounding.
7690 // NOTE: if the field is of GC type, we must use a push instruction, since the emitter is not otherwise
7691 // able to detect stores into the outgoing argument area of the stack on x86.
7692 const bool fieldIsSlot = ((fieldOffset % 4) == 0) && ((prevFieldOffset - fieldOffset) >= 4);
7693 int adjustment = roundUp(currentOffset - fieldOffset, 4);
7694 if (fieldIsSlot && !varTypeIsSIMD(fieldType))
7696 fieldType = genActualType(fieldType);
7697 unsigned pushSize = genTypeSize(fieldType);
7698 assert((pushSize % 4) == 0);
7699 adjustment -= pushSize;
7700 while (adjustment != 0)
7702 inst_IV(INS_push, 0);
7703 currentOffset -= pushSize;
7704 AddStackLevel(pushSize);
7705 adjustment -= pushSize;
7707 m_pushStkArg = true;
7711 m_pushStkArg = false;
7713 // We always "push" floating point fields (i.e. they are full slot values that don't
7714 // require special handling).
7715 assert(varTypeIsIntegralOrI(fieldNode) || varTypeIsSIMD(fieldNode));
7717 // If we can't push this field, it needs to be in a register so that we can store
7718 // it to the stack location.
7719 if (adjustment != 0)
7721 // This moves the stack pointer to fieldOffset.
7722 // For this case, we must adjust the stack and generate stack-relative stores rather than pushes.
7723 // Adjust the stack pointer to the next slot boundary.
7724 inst_RV_IV(INS_sub, REG_SPBASE, adjustment, EA_PTRSIZE);
7725 currentOffset -= adjustment;
7726 AddStackLevel(adjustment);
7729 // Does it need to be in a byte register?
7730 // If so, we'll use intTmpReg, which must have been allocated as a byte register.
7731 // If it's already in a register, but not a byteable one, then move it.
7732 if (varTypeIsByte(fieldType) && ((argReg == REG_NA) || ((genRegMask(argReg) & RBM_BYTE_REGS) == 0)))
7734 assert(intTmpReg != REG_NA);
7735 noway_assert((genRegMask(intTmpReg) & RBM_BYTE_REGS) != 0);
7736 if (argReg != REG_NA)
7738 inst_RV_RV(INS_mov, intTmpReg, argReg, fieldType);
7744 if (argReg == REG_NA)
7748 if (fieldNode->isUsedFromSpillTemp())
7750 assert(!varTypeIsSIMD(fieldType)); // Q: can we get here with SIMD?
7751 assert(fieldNode->IsRegOptional());
7752 TempDsc* tmp = getSpillTempDsc(fieldNode);
7753 getEmitter()->emitIns_S(INS_push, emitActualTypeSize(fieldNode->TypeGet()), tmp->tdTempNum(), 0);
7754 compiler->tmpRlsTemp(tmp);
7758 assert(varTypeIsIntegralOrI(fieldNode));
7759 switch (fieldNode->OperGet())
7762 inst_TT(INS_push, fieldNode, 0, 0, emitActualTypeSize(fieldNode->TypeGet()));
7765 if (fieldNode->IsIconHandle())
7767 inst_IV_handle(INS_push, fieldNode->gtIntCon.gtIconVal);
7771 inst_IV(INS_push, fieldNode->gtIntCon.gtIconVal);
7778 currentOffset -= TARGET_POINTER_SIZE;
7779 AddStackLevel(TARGET_POINTER_SIZE);
7783 // The stack has been adjusted and we will load the field to intTmpReg and then store it on the stack.
7784 assert(varTypeIsIntegralOrI(fieldNode));
7785 switch (fieldNode->OperGet())
7788 inst_RV_TT(INS_mov, intTmpReg, fieldNode);
7791 genSetRegToConst(intTmpReg, fieldNode->TypeGet(), fieldNode);
7796 genStoreRegToStackArg(fieldType, intTmpReg, fieldOffset - currentOffset);
7801 #if defined(FEATURE_SIMD)
7802 if (fieldType == TYP_SIMD12)
7804 assert(genIsValidFloatReg(simdTmpReg));
7805 genStoreSIMD12ToStack(argReg, simdTmpReg);
7808 #endif // defined(FEATURE_SIMD)
7810 genStoreRegToStackArg(fieldType, argReg, fieldOffset - currentOffset);
7814 // We always push a slot-rounded size
7815 currentOffset -= genTypeSize(fieldType);
7819 prevFieldOffset = fieldOffset;
7821 if (currentOffset != 0)
7823 // We don't expect padding at the beginning of a struct, but it could happen with explicit layout.
7824 inst_RV_IV(INS_sub, REG_SPBASE, currentOffset, EA_PTRSIZE);
7825 AddStackLevel(currentOffset);
7828 #endif // _TARGET_X86_
7830 //---------------------------------------------------------------------
7831 // genPutArgStk - generate code for passing an arg on the stack.
7834 // treeNode - the GT_PUTARG_STK node
7835 // targetType - the type of the treeNode
7840 void CodeGen::genPutArgStk(GenTreePutArgStk* putArgStk)
7842 var_types targetType = putArgStk->TypeGet();
7846 genAlignStackBeforeCall(putArgStk);
7848 if (varTypeIsStruct(targetType))
7850 (void)genAdjustStackForPutArgStk(putArgStk);
7851 genPutStructArgStk(putArgStk);
7855 // The following logic is applicable for x86 arch.
7856 assert(!varTypeIsFloating(targetType) || (targetType == putArgStk->gtOp1->TypeGet()));
7858 GenTreePtr data = putArgStk->gtOp1;
7860 // On a 32-bit target, all of the long arguments are handled with GT_FIELD_LIST,
7861 // and the type of the putArgStk is TYP_VOID.
7862 assert(targetType != TYP_LONG);
7864 const unsigned argSize = putArgStk->getArgSize();
7865 assert((argSize % TARGET_POINTER_SIZE) == 0);
7867 if (data->isContainedIntOrIImmed())
7869 if (data->IsIconHandle())
7871 inst_IV_handle(INS_push, data->gtIntCon.gtIconVal);
7875 inst_IV(INS_push, data->gtIntCon.gtIconVal);
7877 AddStackLevel(argSize);
7879 else if (data->OperGet() == GT_FIELD_LIST)
7881 genPutArgStkFieldList(putArgStk);
7885 // We should not see any contained nodes that are not immediates.
7886 assert(data->isUsedFromReg());
7887 genConsumeReg(data);
7888 genPushReg(targetType, data->gtRegNum);
7890 #else // !_TARGET_X86_
7892 unsigned baseVarNum = getBaseVarForPutArgStk(putArgStk);
7894 #ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
7896 if (varTypeIsStruct(targetType))
7898 m_stkArgVarNum = baseVarNum;
7899 m_stkArgOffset = putArgStk->getArgOffset();
7900 genPutStructArgStk(putArgStk);
7901 m_stkArgVarNum = BAD_VAR_NUM;
7904 #endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
7906 noway_assert(targetType != TYP_STRUCT);
7907 assert(!varTypeIsFloating(targetType) || (targetType == putArgStk->gtOp1->TypeGet()));
7909 // Get argument offset on stack.
7910 // Here we cross check that argument offset hasn't changed from lowering to codegen since
7911 // we are storing arg slot number in GT_PUTARG_STK node in lowering phase.
7912 int argOffset = putArgStk->getArgOffset();
7915 fgArgTabEntryPtr curArgTabEntry = compiler->gtArgEntryByNode(putArgStk->gtCall, putArgStk);
7916 assert(curArgTabEntry);
7917 assert(argOffset == (int)curArgTabEntry->slotNum * TARGET_POINTER_SIZE);
7920 GenTreePtr data = putArgStk->gtOp1;
7922 if (data->isContainedIntOrIImmed())
7924 getEmitter()->emitIns_S_I(ins_Store(targetType), emitTypeSize(targetType), baseVarNum, argOffset,
7925 (int)data->AsIntConCommon()->IconValue());
7929 assert(data->isUsedFromReg());
7930 genConsumeReg(data);
7931 getEmitter()->emitIns_S_R(ins_Store(targetType), emitTypeSize(targetType), data->gtRegNum, baseVarNum,
7935 #endif // !_TARGET_X86_
7939 // genPushReg: Push a register value onto the stack and adjust the stack level
7942 // type - the type of value to be stored
7943 // reg - the register containing the value
7946 // For TYP_LONG, the srcReg must be a floating point register.
7947 // Otherwise, the register type must be consistent with the given type.
7949 void CodeGen::genPushReg(var_types type, regNumber srcReg)
7951 unsigned size = genTypeSize(type);
7952 if (varTypeIsIntegralOrI(type) && type != TYP_LONG)
7954 assert(genIsValidIntReg(srcReg));
7955 inst_RV(INS_push, srcReg, type);
7960 emitAttr attr = emitTypeSize(type);
7961 if (type == TYP_LONG)
7963 // On x86, the only way we can push a TYP_LONG from a register is if it is in an xmm reg.
7964 // This is only used when we are pushing a struct from memory to memory, and basically is
7965 // handling an 8-byte "chunk", as opposed to strictly a long type.
7970 ins = ins_Store(type);
7972 assert(genIsValidFloatReg(srcReg));
7973 inst_RV_IV(INS_sub, REG_SPBASE, size, EA_PTRSIZE);
7974 getEmitter()->emitIns_AR_R(ins, attr, srcReg, REG_SPBASE, 0);
7976 AddStackLevel(size);
7978 #endif // _TARGET_X86_
7980 #if defined(FEATURE_PUT_STRUCT_ARG_STK)
7981 // genStoreRegToStackArg: Store a register value into the stack argument area
7984 // type - the type of value to be stored
7985 // reg - the register containing the value
7986 // offset - the offset from the base (see Assumptions below)
7989 // A type of TYP_STRUCT instructs this method to store a 16-byte chunk
7990 // at the given offset (i.e. not the full struct).
7993 // The caller must set the context appropriately before calling this method:
7994 // - On x64, m_stkArgVarNum must be set according to whether this is a regular or tail call.
7995 // - On x86, the caller must set m_pushStkArg if this method should push the argument.
7996 // Otherwise, the argument is stored at the given offset from sp.
7998 // TODO: In the below code the load and store instructions are for 16 bytes, but the
7999 // type is EA_8BYTE. The movdqa/u are 16 byte instructions, so it works, but
8000 // this probably needs to be changed.
8002 void CodeGen::genStoreRegToStackArg(var_types type, regNumber srcReg, int offset)
8004 assert(srcReg != REG_NA);
8009 if (type == TYP_STRUCT)
8012 // This should be changed!
8019 if (varTypeIsSIMD(type))
8021 assert(genIsValidFloatReg(srcReg));
8022 ins = ins_Store(type); // TODO-CQ: pass 'aligned' correctly
8025 #endif // FEATURE_SIMD
8027 if (type == TYP_LONG)
8029 assert(genIsValidFloatReg(srcReg));
8033 #endif // _TARGET_X86_
8035 assert((varTypeIsFloating(type) && genIsValidFloatReg(srcReg)) ||
8036 (varTypeIsIntegralOrI(type) && genIsValidIntReg(srcReg)));
8037 ins = ins_Store(type);
8039 attr = emitTypeSize(type);
8040 size = genTypeSize(type);
8046 genPushReg(type, srcReg);
8050 getEmitter()->emitIns_AR_R(ins, attr, srcReg, REG_SPBASE, offset);
8052 #else // !_TARGET_X86_
8053 assert(m_stkArgVarNum != BAD_VAR_NUM);
8054 getEmitter()->emitIns_S_R(ins, attr, srcReg, m_stkArgVarNum, m_stkArgOffset + offset);
8055 #endif // !_TARGET_X86_
8058 //---------------------------------------------------------------------
8059 // genPutStructArgStk - generate code for copying a struct arg on the stack by value.
8060 // In case there are references to heap object in the struct,
8061 // it generates the gcinfo as well.
8064 // putArgStk - the GT_PUTARG_STK node
8067 // In the case of fixed out args, the caller must have set m_stkArgVarNum to the variable number
8068 // corresponding to the argument area (where we will put the argument on the stack).
8069 // For tail calls this is the baseVarNum = 0.
8070 // For non tail calls this is the outgoingArgSpace.
8071 void CodeGen::genPutStructArgStk(GenTreePutArgStk* putArgStk)
8073 var_types targetType = putArgStk->TypeGet();
8075 #if defined(_TARGET_X86_) && defined(FEATURE_SIMD)
8076 if (targetType == TYP_SIMD12)
8078 genPutArgStkSIMD12(putArgStk);
8081 #endif // defined(_TARGET_X86_) && defined(FEATURE_SIMD)
8083 if (varTypeIsSIMD(targetType))
8085 regNumber srcReg = genConsumeReg(putArgStk->gtGetOp1());
8086 assert((srcReg != REG_NA) && (genIsValidFloatReg(srcReg)));
8087 genStoreRegToStackArg(targetType, srcReg, 0);
8091 assert(targetType == TYP_STRUCT);
8093 if (putArgStk->gtNumberReferenceSlots == 0)
8095 switch (putArgStk->gtPutArgStkKind)
8097 case GenTreePutArgStk::Kind::RepInstr:
8098 genStructPutArgRepMovs(putArgStk);
8100 case GenTreePutArgStk::Kind::Unroll:
8101 genStructPutArgUnroll(putArgStk);
8103 case GenTreePutArgStk::Kind::Push:
8104 genStructPutArgUnroll(putArgStk);
8112 // No need to disable GC the way COPYOBJ does. Here the refs are copied in atomic operations always.
8113 CLANG_FORMAT_COMMENT_ANCHOR;
8116 // On x86, any struct that has contains GC references must be stored to the stack using `push` instructions so
8117 // that the emitter properly detects the need to update the method's GC information.
8119 // Strictly speaking, it is only necessary to use `push` to store the GC references themselves, so for structs
8120 // with large numbers of consecutive non-GC-ref-typed fields, we may be able to improve the code size in the
8122 assert(m_pushStkArg);
8124 GenTree* srcAddr = putArgStk->gtGetOp1()->gtGetOp1();
8125 BYTE* gcPtrs = putArgStk->gtGcPtrs;
8126 const unsigned numSlots = putArgStk->gtNumSlots;
8128 regNumber srcRegNum = srcAddr->gtRegNum;
8129 const bool srcAddrInReg = srcRegNum != REG_NA;
8131 unsigned srcLclNum = 0;
8132 unsigned srcLclOffset = 0;
8135 genConsumeReg(srcAddr);
8139 assert(srcAddr->OperIsLocalAddr());
8141 srcLclNum = srcAddr->AsLclVarCommon()->gtLclNum;
8142 if (srcAddr->OperGet() == GT_LCL_FLD_ADDR)
8144 srcLclOffset = srcAddr->AsLclFld()->gtLclOffs;
8148 for (int i = numSlots - 1; i >= 0; --i)
8151 if (gcPtrs[i] == TYPE_GC_NONE)
8153 slotAttr = EA_4BYTE;
8155 else if (gcPtrs[i] == TYPE_GC_REF)
8157 slotAttr = EA_GCREF;
8161 assert(gcPtrs[i] == TYPE_GC_BYREF);
8162 slotAttr = EA_BYREF;
8165 const unsigned offset = i * TARGET_POINTER_SIZE;
8168 getEmitter()->emitIns_AR_R(INS_push, slotAttr, REG_NA, srcRegNum, offset);
8172 getEmitter()->emitIns_S(INS_push, slotAttr, srcLclNum, srcLclOffset + offset);
8174 AddStackLevel(TARGET_POINTER_SIZE);
8176 #else // !defined(_TARGET_X86_)
8178 // Consume these registers.
8179 // They may now contain gc pointers (depending on their type; gcMarkRegPtrVal will "do the right thing").
8180 genConsumePutStructArgStk(putArgStk, REG_RDI, REG_RSI, REG_NA);
8182 const bool srcIsLocal = putArgStk->gtOp1->AsObj()->gtOp1->OperIsLocalAddr();
8183 const emitAttr srcAddrAttr = srcIsLocal ? EA_PTRSIZE : EA_BYREF;
8186 unsigned numGCSlotsCopied = 0;
8189 BYTE* gcPtrs = putArgStk->gtGcPtrs;
8190 const unsigned numSlots = putArgStk->gtNumSlots;
8191 for (unsigned i = 0; i < numSlots;)
8193 if (gcPtrs[i] == TYPE_GC_NONE)
8195 // Let's see if we can use rep movsp (alias for movsd or movsq for 32 and 64 bits respectively)
8196 // instead of a sequence of movsp instructions to save cycles and code size.
8197 unsigned adjacentNonGCSlotCount = 0;
8200 adjacentNonGCSlotCount++;
8202 } while ((i < numSlots) && (gcPtrs[i] == TYPE_GC_NONE));
8204 // If we have a very small contiguous non-ref region, it's better just to
8205 // emit a sequence of movsp instructions
8206 if (adjacentNonGCSlotCount < CPOBJ_NONGC_SLOTS_LIMIT)
8208 for (; adjacentNonGCSlotCount > 0; adjacentNonGCSlotCount--)
8215 getEmitter()->emitIns_R_I(INS_mov, EA_4BYTE, REG_RCX, adjacentNonGCSlotCount);
8216 instGen(INS_r_movsp);
8221 assert((gcPtrs[i] == TYPE_GC_REF) || (gcPtrs[i] == TYPE_GC_BYREF));
8223 // We have a GC (byref or ref) pointer
8224 // TODO-Amd64-Unix: Here a better solution (for code size and CQ) would be to use movsp instruction,
8225 // but the logic for emitting a GC info record is not available (it is internal for the emitter
8226 // only.) See emitGCVarLiveUpd function. If we could call it separately, we could do
8227 // instGen(INS_movsp); and emission of gc info.
8229 var_types memType = (gcPtrs[i] == TYPE_GC_REF) ? TYP_REF : TYP_BYREF;
8230 getEmitter()->emitIns_R_AR(ins_Load(memType), emitTypeSize(memType), REG_RCX, REG_RSI, 0);
8231 genStoreRegToStackArg(memType, REG_RCX, i * TARGET_POINTER_SIZE);
8240 // Source for the copy operation.
8241 // If a LocalAddr, use EA_PTRSIZE - copy from stack.
8242 // If not a LocalAddr, use EA_BYREF - the source location is not on the stack.
8243 getEmitter()->emitIns_R_I(INS_add, srcAddrAttr, REG_RSI, TARGET_POINTER_SIZE);
8245 // Always copying to the stack - outgoing arg area
8246 // (or the outgoing arg area of the caller for a tail call) - use EA_PTRSIZE.
8247 getEmitter()->emitIns_R_I(INS_add, EA_PTRSIZE, REG_RDI, TARGET_POINTER_SIZE);
8252 assert(numGCSlotsCopied == putArgStk->gtNumberReferenceSlots);
8253 #endif // _TARGET_X86_
8256 #endif // defined(FEATURE_PUT_STRUCT_ARG_STK)
8258 /*****************************************************************************
8260 * Create and record GC Info for the function.
8262 #ifndef JIT32_GCENCODER
8264 #else // !JIT32_GCENCODER
8266 #endif // !JIT32_GCENCODER
8267 CodeGen::genCreateAndStoreGCInfo(unsigned codeSize, unsigned prologSize, unsigned epilogSize DEBUGARG(void* codePtr))
8269 #ifdef JIT32_GCENCODER
8270 return genCreateAndStoreGCInfoJIT32(codeSize, prologSize, epilogSize DEBUGARG(codePtr));
8271 #else // !JIT32_GCENCODER
8272 genCreateAndStoreGCInfoX64(codeSize, prologSize DEBUGARG(codePtr));
8273 #endif // !JIT32_GCENCODER
8276 #ifdef JIT32_GCENCODER
8277 void* CodeGen::genCreateAndStoreGCInfoJIT32(unsigned codeSize,
8278 unsigned prologSize,
8279 unsigned epilogSize DEBUGARG(void* codePtr))
8288 compiler->compInfoBlkSize =
8289 gcInfo.gcInfoBlockHdrSave(headerBuf, 0, codeSize, prologSize, epilogSize, &header, &s_cached);
8291 size_t argTabOffset = 0;
8292 size_t ptrMapSize = gcInfo.gcPtrTableSize(header, codeSize, &argTabOffset);
8296 if (genInterruptible)
8298 gcHeaderISize += compiler->compInfoBlkSize;
8299 gcPtrMapISize += ptrMapSize;
8303 gcHeaderNSize += compiler->compInfoBlkSize;
8304 gcPtrMapNSize += ptrMapSize;
8307 #endif // DISPLAY_SIZES
8309 compiler->compInfoBlkSize += ptrMapSize;
8311 /* Allocate the info block for the method */
8313 compiler->compInfoBlkAddr = (BYTE*)compiler->info.compCompHnd->allocGCInfo(compiler->compInfoBlkSize);
8315 #if 0 // VERBOSE_SIZES
8316 // TODO-X86-Cleanup: 'dataSize', below, is not defined
8318 // if (compiler->compInfoBlkSize > codeSize && compiler->compInfoBlkSize > 100)
8320 printf("[%7u VM, %7u+%7u/%7u x86 %03u/%03u%%] %s.%s\n",
8321 compiler->info.compILCodeSize,
8322 compiler->compInfoBlkSize,
8323 codeSize + dataSize,
8324 codeSize + dataSize - prologSize - epilogSize,
8325 100 * (codeSize + dataSize) / compiler->info.compILCodeSize,
8326 100 * (codeSize + dataSize + compiler->compInfoBlkSize) / compiler->info.compILCodeSize,
8327 compiler->info.compClassName,
8328 compiler->info.compMethodName);
8333 /* Fill in the info block and return it to the caller */
8335 void* infoPtr = compiler->compInfoBlkAddr;
8337 /* Create the method info block: header followed by GC tracking tables */
8339 compiler->compInfoBlkAddr +=
8340 gcInfo.gcInfoBlockHdrSave(compiler->compInfoBlkAddr, -1, codeSize, prologSize, epilogSize, &header, &s_cached);
8342 assert(compiler->compInfoBlkAddr == (BYTE*)infoPtr + headerSize);
8343 compiler->compInfoBlkAddr = gcInfo.gcPtrTableSave(compiler->compInfoBlkAddr, header, codeSize, &argTabOffset);
8344 assert(compiler->compInfoBlkAddr == (BYTE*)infoPtr + headerSize + ptrMapSize);
8350 BYTE* temp = (BYTE*)infoPtr;
8351 unsigned size = compiler->compInfoBlkAddr - temp;
8352 BYTE* ptab = temp + headerSize;
8354 noway_assert(size == headerSize + ptrMapSize);
8356 printf("Method info block - header [%u bytes]:", headerSize);
8358 for (unsigned i = 0; i < size; i++)
8362 printf("\nMethod info block - ptrtab [%u bytes]:", ptrMapSize);
8363 printf("\n %04X: %*c", i & ~0xF, 3 * (i & 0xF), ' ');
8368 printf("\n %04X: ", i);
8371 printf("%02X ", *temp++);
8381 if (compiler->opts.dspGCtbls)
8383 const BYTE* base = (BYTE*)infoPtr;
8385 unsigned methodSize;
8388 printf("GC Info for method %s\n", compiler->info.compFullName);
8389 printf("GC info size = %3u\n", compiler->compInfoBlkSize);
8391 size = gcInfo.gcInfoBlockHdrDump(base, &dumpHeader, &methodSize);
8392 // printf("size of header encoding is %3u\n", size);
8395 if (compiler->opts.dspGCtbls)
8398 size = gcInfo.gcDumpPtrTable(base, dumpHeader, methodSize);
8399 // printf("size of pointer table is %3u\n", size);
8401 noway_assert(compiler->compInfoBlkAddr == (base + size));
8406 if (jitOpts.testMask & 128)
8408 for (unsigned offs = 0; offs < codeSize; offs++)
8410 gcInfo.gcFindPtrsInFrame(infoPtr, codePtr, offs);
8414 #endif // DUMP_GC_TABLES
8416 /* Make sure we ended up generating the expected number of bytes */
8418 noway_assert(compiler->compInfoBlkAddr == (BYTE*)infoPtr + compiler->compInfoBlkSize);
8423 #else // !JIT32_GCENCODER
8424 void CodeGen::genCreateAndStoreGCInfoX64(unsigned codeSize, unsigned prologSize DEBUGARG(void* codePtr))
8426 IAllocator* allowZeroAlloc = new (compiler, CMK_GC) AllowZeroAllocator(compiler->getAllocatorGC());
8427 GcInfoEncoder* gcInfoEncoder = new (compiler, CMK_GC)
8428 GcInfoEncoder(compiler->info.compCompHnd, compiler->info.compMethodInfo, allowZeroAlloc, NOMEM);
8429 assert(gcInfoEncoder);
8431 // Follow the code pattern of the x86 gc info encoder (genCreateAndStoreGCInfoJIT32).
8432 gcInfo.gcInfoBlockHdrSave(gcInfoEncoder, codeSize, prologSize);
8434 // We keep the call count for the second call to gcMakeRegPtrTable() below.
8435 unsigned callCnt = 0;
8436 // First we figure out the encoder ID's for the stack slots and registers.
8437 gcInfo.gcMakeRegPtrTable(gcInfoEncoder, codeSize, prologSize, GCInfo::MAKE_REG_PTR_MODE_ASSIGN_SLOTS, &callCnt);
8438 // Now we've requested all the slots we'll need; "finalize" these (make more compact data structures for them).
8439 gcInfoEncoder->FinalizeSlotIds();
8440 // Now we can actually use those slot ID's to declare live ranges.
8441 gcInfo.gcMakeRegPtrTable(gcInfoEncoder, codeSize, prologSize, GCInfo::MAKE_REG_PTR_MODE_DO_WORK, &callCnt);
8443 if (compiler->opts.compDbgEnC)
8445 // what we have to preserve is called the "frame header" (see comments in VM\eetwain.cpp)
8449 // -saved 'this' pointer and bool for synchronized methods
8451 // 4 slots for RBP + return address + RSI + RDI
8452 int preservedAreaSize = 4 * REGSIZE_BYTES;
8454 if (compiler->info.compFlags & CORINFO_FLG_SYNCH)
8456 if (!(compiler->info.compFlags & CORINFO_FLG_STATIC))
8458 preservedAreaSize += REGSIZE_BYTES;
8461 // bool in synchronized methods that tracks whether the lock has been taken (takes 4 bytes on stack)
8462 preservedAreaSize += 4;
8465 // Used to signal both that the method is compiled for EnC, and also the size of the block at the top of the
8467 gcInfoEncoder->SetSizeOfEditAndContinuePreservedArea(preservedAreaSize);
8470 if (compiler->opts.IsReversePInvoke())
8472 unsigned reversePInvokeFrameVarNumber = compiler->lvaReversePInvokeFrameVar;
8473 assert(reversePInvokeFrameVarNumber != BAD_VAR_NUM && reversePInvokeFrameVarNumber < compiler->lvaRefCount);
8474 LclVarDsc& reversePInvokeFrameVar = compiler->lvaTable[reversePInvokeFrameVarNumber];
8475 gcInfoEncoder->SetReversePInvokeFrameSlot(reversePInvokeFrameVar.lvStkOffs);
8478 gcInfoEncoder->Build();
8480 // GC Encoder automatically puts the GC info in the right spot using ICorJitInfo::allocGCInfo(size_t)
8481 // let's save the values anyway for debugging purposes
8482 compiler->compInfoBlkAddr = gcInfoEncoder->Emit();
8483 compiler->compInfoBlkSize = 0; // not exposed by the GCEncoder interface
8485 #endif // !JIT32_GCENCODER
8487 /*****************************************************************************
8488 * Emit a call to a helper function.
8492 void CodeGen::genEmitHelperCall(unsigned helper, int argSize, emitAttr retSize, regNumber callTargetReg)
8494 void* addr = nullptr;
8495 void* pAddr = nullptr;
8497 emitter::EmitCallType callType = emitter::EC_FUNC_TOKEN;
8498 addr = compiler->compGetHelperFtn((CorInfoHelpFunc)helper, &pAddr);
8499 regNumber callTarget = REG_NA;
8500 regMaskTP killMask = compiler->compHelperCallKillSet((CorInfoHelpFunc)helper);
8504 assert(pAddr != nullptr);
8506 // Absolute indirect call addr
8507 // Note: Order of checks is important. First always check for pc-relative and next
8508 // zero-relative. Because the former encoding is 1-byte smaller than the latter.
8509 if (genCodeIndirAddrCanBeEncodedAsPCRelOffset((size_t)pAddr) ||
8510 genCodeIndirAddrCanBeEncodedAsZeroRelOffset((size_t)pAddr))
8512 // generate call whose target is specified by 32-bit offset relative to PC or zero.
8513 callType = emitter::EC_FUNC_TOKEN_INDIR;
8518 #ifdef _TARGET_AMD64_
8519 // If this indirect address cannot be encoded as 32-bit offset relative to PC or Zero,
8520 // load it into REG_HELPER_CALL_TARGET and use register indirect addressing mode to
8525 if (callTargetReg == REG_NA)
8527 // If a callTargetReg has not been explicitly provided, we will use REG_DEFAULT_HELPER_CALL_TARGET, but
8528 // this is only a valid assumption if the helper call is known to kill REG_DEFAULT_HELPER_CALL_TARGET.
8529 callTargetReg = REG_DEFAULT_HELPER_CALL_TARGET;
8530 regMaskTP callTargetMask = genRegMask(callTargetReg);
8531 noway_assert((callTargetMask & killMask) == callTargetMask);
8535 // The call target must not overwrite any live variable, though it may not be in the
8536 // kill set for the call.
8537 regMaskTP callTargetMask = genRegMask(callTargetReg);
8538 noway_assert((callTargetMask & regSet.rsMaskVars) == RBM_NONE);
8542 callTarget = callTargetReg;
8543 CodeGen::genSetRegToIcon(callTarget, (ssize_t)pAddr, TYP_I_IMPL);
8544 callType = emitter::EC_INDIR_ARD;
8549 getEmitter()->emitIns_Call(callType,
8550 compiler->eeFindHelper(helper),
8551 INDEBUG_LDISASM_COMMA(nullptr) addr,
8554 MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(EA_UNKNOWN),
8555 gcInfo.gcVarPtrSetCur,
8556 gcInfo.gcRegGCrefSetCur,
8557 gcInfo.gcRegByrefSetCur,
8558 BAD_IL_OFFSET, // IL offset
8560 REG_NA, 0, 0, // xreg, xmul, disp
8562 emitter::emitNoGChelper(helper));
8565 regTracker.rsTrashRegSet(killMask);
8566 regTracker.rsTrashRegsForGCInterruptability();
8569 #if !defined(_TARGET_64BIT_)
8570 //-----------------------------------------------------------------------------
8572 // Code Generation for Long integers
8574 //-----------------------------------------------------------------------------
8576 //------------------------------------------------------------------------
8577 // genStoreLongLclVar: Generate code to store a non-enregistered long lclVar
8580 // treeNode - A TYP_LONG lclVar node.
8586 // 'treeNode' must be a TYP_LONG lclVar node for a lclVar that has NOT been promoted.
8587 // Its operand must be a GT_LONG node.
8589 void CodeGen::genStoreLongLclVar(GenTree* treeNode)
8591 emitter* emit = getEmitter();
8593 GenTreeLclVarCommon* lclNode = treeNode->AsLclVarCommon();
8594 unsigned lclNum = lclNode->gtLclNum;
8595 LclVarDsc* varDsc = &(compiler->lvaTable[lclNum]);
8596 assert(varDsc->TypeGet() == TYP_LONG);
8597 assert(!varDsc->lvPromoted);
8598 GenTreePtr op1 = treeNode->gtOp.gtOp1;
8599 noway_assert(op1->OperGet() == GT_LONG || op1->OperGet() == GT_MUL_LONG);
8600 genConsumeRegs(op1);
8602 if (op1->OperGet() == GT_LONG)
8604 // Definitions of register candidates will have been lowered to 2 int lclVars.
8605 assert(!treeNode->InReg());
8607 GenTreePtr loVal = op1->gtGetOp1();
8608 GenTreePtr hiVal = op1->gtGetOp2();
8610 // NYI: Contained immediates.
8611 NYI_IF((loVal->gtRegNum == REG_NA) || (hiVal->gtRegNum == REG_NA),
8612 "Store of long lclVar with contained immediate");
8614 emit->emitIns_S_R(ins_Store(TYP_INT), EA_4BYTE, loVal->gtRegNum, lclNum, 0);
8615 emit->emitIns_S_R(ins_Store(TYP_INT), EA_4BYTE, hiVal->gtRegNum, lclNum, genTypeSize(TYP_INT));
8617 else if (op1->OperGet() == GT_MUL_LONG)
8619 assert((op1->gtFlags & GTF_MUL_64RSLT) != 0);
8622 getEmitter()->emitIns_S_R(ins_Store(TYP_INT), emitTypeSize(TYP_INT), REG_LNGRET_LO, lclNum, 0);
8623 getEmitter()->emitIns_S_R(ins_Store(TYP_INT), emitTypeSize(TYP_INT), REG_LNGRET_HI, lclNum,
8624 genTypeSize(TYP_INT));
8627 #endif // !defined(_TARGET_64BIT_)
8629 /*****************************************************************************
8630 * Unit testing of the XArch emitter: generate a bunch of instructions into the prolog
8631 * (it's as good a place as any), then use COMPlus_JitLateDisasm=* to see if the late
8632 * disassembler thinks the instructions as the same as we do.
8635 // Uncomment "#define ALL_ARM64_EMITTER_UNIT_TESTS" to run all the unit tests here.
8636 // After adding a unit test, and verifying it works, put it under this #ifdef, so we don't see it run every time.
8637 //#define ALL_XARCH_EMITTER_UNIT_TESTS
8639 #if defined(DEBUG) && defined(LATE_DISASM) && defined(_TARGET_AMD64_)
8640 void CodeGen::genAmd64EmitterUnitTests()
8647 if (!compiler->opts.altJit)
8649 // No point doing this in a "real" JIT.
8653 // Mark the "fake" instructions in the output.
8654 printf("*************** In genAmd64EmitterUnitTests()\n");
8657 // genDefineTempLabel(genCreateTempLabel());
8658 // to create artificial labels to help separate groups of tests.
8663 CLANG_FORMAT_COMMENT_ANCHOR;
8665 #ifdef ALL_XARCH_EMITTER_UNIT_TESTS
8666 #ifdef FEATURE_AVX_SUPPORT
8667 genDefineTempLabel(genCreateTempLabel());
8669 // vhaddpd ymm0,ymm1,ymm2
8670 getEmitter()->emitIns_R_R_R(INS_haddpd, EA_32BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
8671 // vaddss xmm0,xmm1,xmm2
8672 getEmitter()->emitIns_R_R_R(INS_addss, EA_4BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
8673 // vaddsd xmm0,xmm1,xmm2
8674 getEmitter()->emitIns_R_R_R(INS_addsd, EA_8BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
8675 // vaddps xmm0,xmm1,xmm2
8676 getEmitter()->emitIns_R_R_R(INS_addps, EA_16BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
8677 // vaddps ymm0,ymm1,ymm2
8678 getEmitter()->emitIns_R_R_R(INS_addps, EA_32BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
8679 // vaddpd xmm0,xmm1,xmm2
8680 getEmitter()->emitIns_R_R_R(INS_addpd, EA_16BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
8681 // vaddpd ymm0,ymm1,ymm2
8682 getEmitter()->emitIns_R_R_R(INS_addpd, EA_32BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
8683 // vsubss xmm0,xmm1,xmm2
8684 getEmitter()->emitIns_R_R_R(INS_subss, EA_4BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
8685 // vsubsd xmm0,xmm1,xmm2
8686 getEmitter()->emitIns_R_R_R(INS_subsd, EA_8BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
8687 // vsubps ymm0,ymm1,ymm2
8688 getEmitter()->emitIns_R_R_R(INS_subps, EA_16BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
8689 // vsubps ymm0,ymm1,ymm2
8690 getEmitter()->emitIns_R_R_R(INS_subps, EA_32BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
8691 // vsubpd xmm0,xmm1,xmm2
8692 getEmitter()->emitIns_R_R_R(INS_subpd, EA_16BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
8693 // vsubpd ymm0,ymm1,ymm2
8694 getEmitter()->emitIns_R_R_R(INS_subpd, EA_32BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
8695 // vmulss xmm0,xmm1,xmm2
8696 getEmitter()->emitIns_R_R_R(INS_mulss, EA_4BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
8697 // vmulsd xmm0,xmm1,xmm2
8698 getEmitter()->emitIns_R_R_R(INS_mulsd, EA_8BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
8699 // vmulps xmm0,xmm1,xmm2
8700 getEmitter()->emitIns_R_R_R(INS_mulps, EA_16BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
8701 // vmulpd xmm0,xmm1,xmm2
8702 getEmitter()->emitIns_R_R_R(INS_mulpd, EA_16BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
8703 // vmulps ymm0,ymm1,ymm2
8704 getEmitter()->emitIns_R_R_R(INS_mulps, EA_32BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
8705 // vmulpd ymm0,ymm1,ymm2
8706 getEmitter()->emitIns_R_R_R(INS_mulpd, EA_32BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
8707 // vandps xmm0,xmm1,xmm2
8708 getEmitter()->emitIns_R_R_R(INS_andps, EA_16BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
8709 // vandpd xmm0,xmm1,xmm2
8710 getEmitter()->emitIns_R_R_R(INS_andpd, EA_16BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
8711 // vandps ymm0,ymm1,ymm2
8712 getEmitter()->emitIns_R_R_R(INS_andps, EA_32BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
8713 // vandpd ymm0,ymm1,ymm2
8714 getEmitter()->emitIns_R_R_R(INS_andpd, EA_32BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
8715 // vorps xmm0,xmm1,xmm2
8716 getEmitter()->emitIns_R_R_R(INS_orps, EA_16BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
8717 // vorpd xmm0,xmm1,xmm2
8718 getEmitter()->emitIns_R_R_R(INS_orpd, EA_16BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
8719 // vorps ymm0,ymm1,ymm2
8720 getEmitter()->emitIns_R_R_R(INS_orps, EA_32BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
8721 // vorpd ymm0,ymm1,ymm2
8722 getEmitter()->emitIns_R_R_R(INS_orpd, EA_32BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
8723 // vdivss xmm0,xmm1,xmm2
8724 getEmitter()->emitIns_R_R_R(INS_divss, EA_4BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
8725 // vdivsd xmm0,xmm1,xmm2
8726 getEmitter()->emitIns_R_R_R(INS_divsd, EA_8BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
8727 // vdivss xmm0,xmm1,xmm2
8728 getEmitter()->emitIns_R_R_R(INS_divss, EA_4BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
8729 // vdivsd xmm0,xmm1,xmm2
8730 getEmitter()->emitIns_R_R_R(INS_divsd, EA_8BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
8732 // vdivss xmm0,xmm1,xmm2
8733 getEmitter()->emitIns_R_R_R(INS_cvtss2sd, EA_4BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
8734 // vdivsd xmm0,xmm1,xmm2
8735 getEmitter()->emitIns_R_R_R(INS_cvtsd2ss, EA_8BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
8736 #endif // FEATURE_AVX_SUPPORT
8737 #endif // ALL_XARCH_EMITTER_UNIT_TESTS
8738 printf("*************** End of genAmd64EmitterUnitTests()\n");
8741 #endif // defined(DEBUG) && defined(LATE_DISASM) && defined(_TARGET_AMD64_)
8743 #endif // _TARGET_AMD64_
8745 #endif // !LEGACY_BACKEND