1 // Licensed to the .NET Foundation under one or more agreements.
2 // The .NET Foundation licenses this file to you under the MIT license.
3 // See the LICENSE file in the project root for more information.
5 /*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
6 XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
10 XX The interface to generate a machine-instruction. XX
12 XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
13 XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
25 /*****************************************************************************/
28 /*****************************************************************************
30 * Returns the string representation of the given CPU instruction.
33 const char* CodeGen::genInsName(instruction ins)
37 const char * const insNames[] =
39 #if defined(_TARGET_XARCH_)
40 #define INST0(id, nm, um, mr, flags) nm,
41 #define INST1(id, nm, um, mr, flags) nm,
42 #define INST2(id, nm, um, mr, mi, flags) nm,
43 #define INST3(id, nm, um, mr, mi, rm, flags) nm,
44 #define INST4(id, nm, um, mr, mi, rm, a4, flags) nm,
45 #define INST5(id, nm, um, mr, mi, rm, a4, rr, flags) nm,
48 #elif defined(_TARGET_ARM_)
49 #define INST1(id, nm, fp, ldst, fmt, e1 ) nm,
50 #define INST2(id, nm, fp, ldst, fmt, e1, e2 ) nm,
51 #define INST3(id, nm, fp, ldst, fmt, e1, e2, e3 ) nm,
52 #define INST4(id, nm, fp, ldst, fmt, e1, e2, e3, e4 ) nm,
53 #define INST5(id, nm, fp, ldst, fmt, e1, e2, e3, e4, e5 ) nm,
54 #define INST6(id, nm, fp, ldst, fmt, e1, e2, e3, e4, e5, e6 ) nm,
55 #define INST8(id, nm, fp, ldst, fmt, e1, e2, e3, e4, e5, e6, e7, e8 ) nm,
56 #define INST9(id, nm, fp, ldst, fmt, e1, e2, e3, e4, e5, e6, e7, e8, e9 ) nm,
59 #elif defined(_TARGET_ARM64_)
60 #define INST1(id, nm, fp, ldst, fmt, e1 ) nm,
61 #define INST2(id, nm, fp, ldst, fmt, e1, e2 ) nm,
62 #define INST3(id, nm, fp, ldst, fmt, e1, e2, e3 ) nm,
63 #define INST4(id, nm, fp, ldst, fmt, e1, e2, e3, e4 ) nm,
64 #define INST5(id, nm, fp, ldst, fmt, e1, e2, e3, e4, e5 ) nm,
65 #define INST6(id, nm, fp, ldst, fmt, e1, e2, e3, e4, e5, e6 ) nm,
66 #define INST9(id, nm, fp, ldst, fmt, e1, e2, e3, e4, e5, e6, e7, e8, e9 ) nm,
70 #error "Unknown _TARGET_"
75 assert((unsigned)ins < _countof(insNames));
76 assert(insNames[ins] != nullptr);
81 void __cdecl CodeGen::instDisp(instruction ins, bool noNL, const char* fmt, ...)
83 if (compiler->opts.dspCode)
85 /* Display the instruction offset within the emit block */
87 // printf("[%08X:%04X]", getEmitter().emitCodeCurBlock(), getEmitter().emitCodeOffsInBlock());
89 /* Display the FP stack depth (before the instruction is executed) */
91 // printf("[FP=%02u] ", genGetFPstkLevel());
93 /* Display the instruction mnemonic */
96 printf(" %-8s", genInsName(ins));
113 /*****************************************************************************/
115 /*****************************************************************************/
117 void CodeGen::instInit()
121 /*****************************************************************************
123 * Return the size string (e.g. "word ptr") appropriate for the given size.
128 const char* CodeGen::genSizeStr(emitAttr attr)
132 const char * const sizes[] =
151 nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr,
152 nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr,
157 unsigned size = EA_SIZE(attr);
159 assert(size == 0 || size == 1 || size == 2 || size == 4 || size == 8 || size == 16 || size == 32);
161 if (EA_ATTR(size) == attr)
165 else if (attr == EA_GCREF)
169 else if (attr == EA_BYREF)
173 else if (EA_IS_DSP_RELOC(attr))
179 assert(!"Unexpected");
186 /*****************************************************************************
188 * Generate an instruction.
191 void CodeGen::instGen(instruction ins)
194 getEmitter()->emitIns(ins);
196 #ifdef _TARGET_XARCH_
197 // A workaround necessitated by limitations of emitter
198 // if we are scheduled to insert a nop here, we have to delay it
199 // hopefully we have not missed any other prefix instructions or places
200 // they could be inserted
201 if (ins == INS_lock && getEmitter()->emitNextNop == 0)
203 getEmitter()->emitNextNop = 1;
208 /*****************************************************************************
210 * Returns non-zero if the given CPU instruction is a floating-point ins.
214 bool CodeGenInterface::instIsFP(instruction ins)
216 assert((unsigned)ins < _countof(instInfo));
218 #ifdef _TARGET_XARCH_
219 return (instInfo[ins] & INS_FLAGS_x87Instr) != 0;
221 return (instInfo[ins] & INST_FP) != 0;
225 #ifdef _TARGET_XARCH_
226 /*****************************************************************************
228 * Generate a multi-byte NOP instruction.
231 void CodeGen::instNop(unsigned size)
234 getEmitter()->emitIns_Nop(size);
238 /*****************************************************************************
240 * Generate a jump instruction.
243 void CodeGen::inst_JMP(emitJumpKind jmp, BasicBlock* tgtBlock)
245 #if !FEATURE_FIXED_OUT_ARGS
246 // On the x86 we are pushing (and changing the stack level), but on x64 and other archs we have
247 // a fixed outgoing args area that we store into and we never change the stack level when calling methods.
249 // Thus only on x86 do we need to assert that the stack level at the target block matches the current stack level.
251 CLANG_FORMAT_COMMENT_ANCHOR;
254 // bbTgtStkDepth is a (pure) argument count (stack alignment padding should be excluded).
255 assert((tgtBlock->bbTgtStkDepth * sizeof(int) == (genStackLevel - curNestedAlignment)) || isFramePointerUsed());
257 assert((tgtBlock->bbTgtStkDepth * sizeof(int) == genStackLevel) || isFramePointerUsed());
259 #endif // !FEATURE_FIXED_OUT_ARGS
261 getEmitter()->emitIns_J(emitter::emitJumpKindToIns(jmp), tgtBlock);
264 /*****************************************************************************
266 * Generate a set instruction.
269 void CodeGen::inst_SET(emitJumpKind condition, regNumber reg)
271 #ifdef _TARGET_XARCH_
274 /* Convert the condition to an instruction opcode */
325 NO_WAY("unexpected condition type");
329 assert(genRegMask(reg) & RBM_BYTE_REGS);
331 // These instructions only write the low byte of 'reg'
332 getEmitter()->emitIns_R(ins, EA_1BYTE, reg);
333 #elif defined(_TARGET_ARM64_)
335 /* Convert the condition to an insCond value */
385 NO_WAY("unexpected condition type");
388 getEmitter()->emitIns_R_COND(INS_cset, EA_8BYTE, reg, cond);
394 /*****************************************************************************
396 * Generate a "op reg" instruction.
399 void CodeGen::inst_RV(instruction ins, regNumber reg, var_types type, emitAttr size)
401 if (size == EA_UNKNOWN)
403 size = emitActualTypeSize(type);
406 getEmitter()->emitIns_R(ins, size, reg);
409 /*****************************************************************************
411 * Generate a "op reg1, reg2" instruction.
414 void CodeGen::inst_RV_RV(instruction ins,
419 insFlags flags /* = INS_FLAGS_DONT_CARE */)
421 if (size == EA_UNKNOWN)
423 size = emitActualTypeSize(type);
427 getEmitter()->emitIns_R_R(ins, size, reg1, reg2, flags);
429 getEmitter()->emitIns_R_R(ins, size, reg1, reg2);
433 /*****************************************************************************
435 * Generate a "op reg1, reg2, reg3" instruction.
438 void CodeGen::inst_RV_RV_RV(instruction ins,
443 insFlags flags /* = INS_FLAGS_DONT_CARE */)
446 getEmitter()->emitIns_R_R_R(ins, size, reg1, reg2, reg3, flags);
447 #elif defined(_TARGET_XARCH_)
448 getEmitter()->emitIns_R_R_R(ins, size, reg1, reg2, reg3);
450 NYI("inst_RV_RV_RV");
453 /*****************************************************************************
455 * Generate a "op icon" instruction.
458 void CodeGen::inst_IV(instruction ins, int val)
460 getEmitter()->emitIns_I(ins, EA_PTRSIZE, val);
463 /*****************************************************************************
465 * Generate a "op icon" instruction where icon is a handle of type specified
469 void CodeGen::inst_IV_handle(instruction ins, int val)
471 getEmitter()->emitIns_I(ins, EA_HANDLE_CNS_RELOC, val);
474 /*****************************************************************************
476 * Display a stack frame reference.
479 void CodeGen::inst_set_SV_var(GenTree* tree)
482 assert(tree && (tree->gtOper == GT_LCL_VAR || tree->gtOper == GT_LCL_VAR_ADDR || tree->gtOper == GT_STORE_LCL_VAR));
483 assert(tree->gtLclVarCommon.gtLclNum < compiler->lvaCount);
485 getEmitter()->emitVarRefOffs = tree->gtLclVar.gtLclILoffs;
490 /*****************************************************************************
492 * Generate a "op reg, icon" instruction.
495 void CodeGen::inst_RV_IV(
496 instruction ins, regNumber reg, target_ssize_t val, emitAttr size, insFlags flags /* = INS_FLAGS_DONT_CARE */)
498 #if !defined(_TARGET_64BIT_)
499 assert(size != EA_8BYTE);
503 if (arm_Valid_Imm_For_Instr(ins, val, flags))
505 getEmitter()->emitIns_R_I(ins, size, reg, val, flags);
507 else if (ins == INS_mov)
509 instGen_Set_Reg_To_Imm(size, reg, val);
513 // TODO-Cleanup: Add a comment about why this is unreached() for RyuJIT backend.
516 #elif defined(_TARGET_ARM64_)
517 // TODO-Arm64-Bug: handle large constants!
518 // Probably need something like the ARM case above: if (arm_Valid_Imm_For_Instr(ins, val)) ...
519 assert(ins != INS_cmp);
520 assert(ins != INS_tst);
521 assert(ins != INS_mov);
522 getEmitter()->emitIns_R_R_I(ins, size, reg, reg, val);
523 #else // !_TARGET_ARM_
524 #ifdef _TARGET_AMD64_
525 // Instead of an 8-byte immediate load, a 4-byte immediate will do fine
526 // as the high 4 bytes will be zero anyway.
527 if (size == EA_8BYTE && ins == INS_mov && ((val & 0xFFFFFFFF00000000LL) == 0))
530 getEmitter()->emitIns_R_I(ins, size, reg, val);
532 else if (EA_SIZE(size) == EA_8BYTE && ins != INS_mov && (((int)val != val) || EA_IS_CNS_RELOC(size)))
534 assert(!"Invalid immediate for inst_RV_IV");
537 #endif // _TARGET_AMD64_
539 getEmitter()->emitIns_R_I(ins, size, reg, val);
541 #endif // !_TARGET_ARM_
544 /*****************************************************************************
546 * Generate an instruction that has one operand given by a tree (which has
547 * been made addressable).
550 void CodeGen::inst_TT(instruction ins, GenTree* tree, unsigned offs, int shfv, emitAttr size)
552 bool sizeInferred = false;
554 if (size == EA_UNKNOWN)
559 size = EA_ATTR(genTypeSize(tree->TypeGet()));
563 size = emitTypeSize(tree->TypeGet());
569 /* Is this a spilled value? */
571 if (tree->gtFlags & GTF_SPILLED)
573 assert(!"ISSUE: If this can happen, we need to generate 'ins [ebp+spill]'");
576 switch (tree->gtOper)
582 inst_set_SV_var(tree);
587 offs += tree->gtLclFld.gtLclOffs;
591 varNum = tree->gtLclVarCommon.gtLclNum;
592 assert(varNum < compiler->lvaCount);
596 getEmitter()->emitIns_S_I(ins, size, varNum, offs, shfv);
600 getEmitter()->emitIns_S(ins, size, varNum, offs);
606 // Make sure FP instruction size matches the operand size
607 // (We optimized constant doubles to floats when we can, just want to
608 // make sure that we don't mistakenly use 8 bytes when the
610 assert(!isFloatRegType(tree->gtType) || genTypeSize(tree->gtType) == EA_SIZE_IN_BYTES(size));
614 getEmitter()->emitIns_C_I(ins, size, tree->gtClsVar.gtClsVarHnd, offs, shfv);
618 getEmitter()->emitIns_C(ins, size, tree->gtClsVar.gtClsVarHnd, offs);
626 assert(!"inst_TT not supported for GT_IND, GT_NULLCHECK or GT_ARR_ELEM");
632 // We will get here for GT_MKREFANY from CodeGen::genPushArgList
635 if (tree->IsIconHandle())
636 inst_IV_handle(ins, tree->gtIntCon.gtIconVal);
638 inst_IV(ins, tree->gtIntCon.gtIconVal);
643 // tree->gtOp.gtOp1 - already processed by genCreateAddrMode()
644 tree = tree->gtOp.gtOp2;
648 assert(!"invalid address");
652 /*****************************************************************************
654 * Generate an instruction that has one operand given by a tree (which has
655 * been made addressable) and another that is a register.
658 void CodeGen::inst_TT_RV(instruction ins, GenTree* tree, regNumber reg, unsigned offs, emitAttr size, insFlags flags)
660 assert(reg != REG_STK);
664 /* Is this a spilled value? */
666 if (tree->gtFlags & GTF_SPILLED)
668 assert(!"ISSUE: If this can happen, we need to generate 'ins [ebp+spill]'");
671 if (size == EA_UNKNOWN)
675 size = EA_ATTR(genTypeSize(tree->TypeGet()));
679 size = emitTypeSize(tree->TypeGet());
683 switch (tree->gtOper)
689 inst_set_SV_var(tree);
693 case GT_STORE_LCL_FLD:
694 offs += tree->gtLclFld.gtLclOffs;
699 varNum = tree->gtLclVarCommon.gtLclNum;
700 assert(varNum < compiler->lvaCount);
702 #if CPU_LOAD_STORE_ARCH
703 if (!getEmitter()->emitInsIsStore(ins))
705 // TODO-LdStArch-Bug: Should regTmp be a dst on the node or an internal reg?
706 // Either way, it is not currently being handled by Lowering.
707 regNumber regTmp = tree->gtRegNum;
708 assert(regTmp != REG_NA);
709 getEmitter()->emitIns_R_S(ins_Load(tree->TypeGet()), size, regTmp, varNum, offs);
710 getEmitter()->emitIns_R_R(ins, size, regTmp, reg, flags);
711 getEmitter()->emitIns_S_R(ins_Store(tree->TypeGet()), size, regTmp, varNum, offs);
713 regSet.verifyRegUsed(regTmp);
718 // ins is a Store instruction
720 getEmitter()->emitIns_S_R(ins, size, reg, varNum, offs);
722 // If we need to set the flags then add an extra movs reg,reg instruction
723 if (flags == INS_FLAGS_SET)
724 getEmitter()->emitIns_R_R(INS_mov, size, reg, reg, INS_FLAGS_SET);
730 // Make sure FP instruction size matches the operand size
731 // (We optimized constant doubles to floats when we can, just want to
732 // make sure that we don't mistakenly use 8 bytes when the
734 assert(!isFloatRegType(tree->gtType) || genTypeSize(tree->gtType) == EA_SIZE_IN_BYTES(size));
736 #if CPU_LOAD_STORE_ARCH
737 if (!getEmitter()->emitInsIsStore(ins))
739 NYI("Store of GT_CLS_VAR not supported for ARM");
742 #endif // CPU_LOAD_STORE_ARCH
744 getEmitter()->emitIns_C_R(ins, size, tree->gtClsVar.gtClsVarHnd, reg, offs);
752 assert(!"inst_TT_RV not supported for GT_IND, GT_NULLCHECK or GT_ARR_ELEM");
757 // tree->gtOp.gtOp1 - already processed by genCreateAddrMode()
758 tree = tree->gtOp.gtOp2;
762 assert(!"invalid address");
766 /*****************************************************************************
768 * Generate an instruction that has one operand given by a register and the
769 * other one by a tree (which has been made addressable).
772 void CodeGen::inst_RV_TT(instruction ins,
777 insFlags flags /* = INS_FLAGS_DONT_CARE */)
779 assert(reg != REG_STK);
781 if (size == EA_UNKNOWN)
785 size = emitTypeSize(tree->TypeGet());
789 size = EA_ATTR(genTypeSize(tree->TypeGet()));
793 #ifdef _TARGET_XARCH_
795 // If it is a GC type and the result is not, then either
797 // 2) optOptimizeBools() optimized if (ref != 0 && ref != 0) to if (ref & ref)
798 // 3) optOptimizeBools() optimized if (ref == 0 || ref == 0) to if (ref | ref)
799 // 4) byref - byref = int
800 if (tree->gtType == TYP_REF && !EA_IS_GCREF(size))
802 assert((EA_IS_BYREF(size) && ins == INS_add) || (ins == INS_lea || ins == INS_and || ins == INS_or));
804 if (tree->gtType == TYP_BYREF && !EA_IS_BYREF(size))
806 assert(ins == INS_lea || ins == INS_and || ins == INS_or || ins == INS_sub);
811 #if CPU_LOAD_STORE_ARCH
814 #if defined(_TARGET_ARM64_) || defined(_TARGET_ARM64_)
815 ins = ins_Move_Extend(tree->TypeGet(), false);
817 NYI("CodeGen::inst_RV_TT with INS_mov");
820 #endif // CPU_LOAD_STORE_ARCH
824 /* Is this a spilled value? */
826 if (tree->gtFlags & GTF_SPILLED)
828 assert(!"ISSUE: If this can happen, we need to generate 'ins [ebp+spill]'");
831 switch (tree->gtOper)
836 case GT_LCL_VAR_ADDR:
838 inst_set_SV_var(tree);
841 case GT_LCL_FLD_ADDR:
843 offs += tree->gtLclFld.gtLclOffs;
847 varNum = tree->gtLclVarCommon.gtLclNum;
848 assert(varNum < compiler->lvaCount);
854 ins = ins_Load(tree->TypeGet());
864 assert(flags != INS_FLAGS_SET);
865 getEmitter()->emitIns_R_S(ins, size, reg, varNum, offs);
870 regTmp = tree->gtRegNum;
872 getEmitter()->emitIns_R_S(ins_Load(tree->TypeGet()), size, regTmp, varNum, offs);
873 getEmitter()->emitIns_R_R(ins, size, reg, regTmp, flags);
875 regSet.verifyRegUsed(regTmp);
878 #else // !_TARGET_ARM_
879 getEmitter()->emitIns_R_S(ins, size, reg, varNum, offs);
881 #endif // !_TARGET_ARM_
884 // Make sure FP instruction size matches the operand size
885 // (We optimized constant doubles to floats when we can, just want to
886 // make sure that we don't mistakenly use 8 bytes when the
888 assert(!isFloatRegType(tree->gtType) || genTypeSize(tree->gtType) == EA_SIZE_IN_BYTES(size));
890 #if CPU_LOAD_STORE_ARCH
891 assert(!"GT_CLS_VAR not supported in ARM backend");
892 #else // CPU_LOAD_STORE_ARCH
893 getEmitter()->emitIns_R_C(ins, size, reg, tree->gtClsVar.gtClsVarHnd, offs);
894 #endif // CPU_LOAD_STORE_ARCH
902 assert(!"inst_RV_TT not supported for GT_IND, GT_NULLCHECK, GT_ARR_ELEM or GT_LEA");
910 // TODO-CrossBitness: we wouldn't need the cast below if GenTreeIntCon::gtIconVal had target_ssize_t type.
911 inst_RV_IV(ins, reg, (target_ssize_t)tree->gtIntCon.gtIconVal, emitActualTypeSize(tree->TypeGet()), flags);
916 assert(size == EA_4BYTE || size == EA_8BYTE);
918 #ifdef _TARGET_AMD64_
920 #endif // _TARGET_AMD64_
922 target_ssize_t constVal;
926 constVal = (target_ssize_t)(tree->gtLngCon.gtLconVal);
931 constVal = (target_ssize_t)(tree->gtLngCon.gtLconVal >> 32);
935 inst_RV_IV(ins, reg, constVal, size, flags);
939 tree = tree->gtOp.gtOp2;
943 assert(!"invalid address");
947 /*****************************************************************************
949 * Generate a "shift reg, icon" instruction.
952 void CodeGen::inst_RV_SH(
953 instruction ins, emitAttr size, regNumber reg, unsigned val, insFlags flags /* = INS_FLAGS_DONT_CARE */)
955 #if defined(_TARGET_ARM_)
960 getEmitter()->emitIns_R_I(ins, size, reg, val, flags);
962 #elif defined(_TARGET_XARCH_)
964 #ifdef _TARGET_AMD64_
965 // X64 JB BE insures only encodable values make it here.
966 // x86 can encode 8 bits, though it masks down to 5 or 6
967 // depending on 32-bit or 64-bit registers are used.
968 // Here we will allow anything that is encodable.
972 ins = genMapShiftInsToShiftByConstantIns(ins, val);
976 getEmitter()->emitIns_R(ins, size, reg);
980 getEmitter()->emitIns_R_I(ins, size, reg, val);
984 NYI("inst_RV_SH - unknown target");
988 /*****************************************************************************
990 * Generate a "shift [r/m], icon" instruction.
993 void CodeGen::inst_TT_SH(instruction ins, GenTree* tree, unsigned val, unsigned offs)
995 #ifdef _TARGET_XARCH_
998 // Shift by 0 - why are you wasting our precious time????
1002 ins = genMapShiftInsToShiftByConstantIns(ins, val);
1005 inst_TT(ins, tree, offs, 0, emitTypeSize(tree->TypeGet()));
1009 inst_TT(ins, tree, offs, val, emitTypeSize(tree->TypeGet()));
1011 #endif // _TARGET_XARCH_
1014 inst_TT(ins, tree, offs, val, emitTypeSize(tree->TypeGet()));
1018 /*****************************************************************************
1020 * Generate a "shift [addr], cl" instruction.
1023 void CodeGen::inst_TT_CL(instruction ins, GenTree* tree, unsigned offs)
1025 inst_TT(ins, tree, offs, 0, emitTypeSize(tree->TypeGet()));
1028 /*****************************************************************************
1030 * Generate an instruction of the form "op reg1, reg2, icon".
1033 #if defined(_TARGET_XARCH_)
1034 void CodeGen::inst_RV_RV_IV(instruction ins, emitAttr size, regNumber reg1, regNumber reg2, unsigned ival)
1036 assert(ins == INS_shld || ins == INS_shrd || ins == INS_shufps || ins == INS_shufpd || ins == INS_pshufd ||
1037 ins == INS_cmpps || ins == INS_cmppd || ins == INS_dppd || ins == INS_dpps || ins == INS_insertps ||
1038 ins == INS_roundps || ins == INS_roundss || ins == INS_roundpd || ins == INS_roundsd);
1040 getEmitter()->emitIns_R_R_I(ins, size, reg1, reg2, ival);
1044 /*****************************************************************************
1046 * Generate an instruction with two registers, the second one being a byte
1047 * or word register (i.e. this is something like "movzx eax, cl").
1050 void CodeGen::inst_RV_RR(instruction ins, emitAttr size, regNumber reg1, regNumber reg2)
1052 assert(size == EA_1BYTE || size == EA_2BYTE);
1053 #ifdef _TARGET_XARCH_
1054 assert(ins == INS_movsx || ins == INS_movzx);
1055 assert(size != EA_1BYTE || (genRegMask(reg2) & RBM_BYTE_REGS));
1058 getEmitter()->emitIns_R_R(ins, size, reg1, reg2);
1061 /*****************************************************************************
1063 * The following should all end up inline in compiler.hpp at some point.
1066 void CodeGen::inst_ST_RV(instruction ins, TempDsc* tmp, unsigned ofs, regNumber reg, var_types type)
1068 getEmitter()->emitIns_S_R(ins, emitActualTypeSize(type), reg, tmp->tdTempNum(), ofs);
1071 void CodeGen::inst_ST_IV(instruction ins, TempDsc* tmp, unsigned ofs, int val, var_types type)
1073 getEmitter()->emitIns_S_I(ins, emitActualTypeSize(type), tmp->tdTempNum(), ofs, val);
1076 #if FEATURE_FIXED_OUT_ARGS
1077 /*****************************************************************************
1079 * Generate an instruction that references the outgoing argument space
1080 * like "str r3, [sp+0x04]"
1083 void CodeGen::inst_SA_RV(instruction ins, unsigned ofs, regNumber reg, var_types type)
1085 assert(ofs < compiler->lvaOutgoingArgSpaceSize);
1087 getEmitter()->emitIns_S_R(ins, emitActualTypeSize(type), reg, compiler->lvaOutgoingArgSpaceVar, ofs);
1090 void CodeGen::inst_SA_IV(instruction ins, unsigned ofs, int val, var_types type)
1092 assert(ofs < compiler->lvaOutgoingArgSpaceSize);
1094 getEmitter()->emitIns_S_I(ins, emitActualTypeSize(type), compiler->lvaOutgoingArgSpaceVar, ofs, val);
1096 #endif // FEATURE_FIXED_OUT_ARGS
1098 /*****************************************************************************
1100 * Generate an instruction with one register and one operand that is byte
1101 * or short (e.g. something like "movzx eax, byte ptr [edx]").
1104 void CodeGen::inst_RV_ST(instruction ins, emitAttr size, regNumber reg, GenTree* tree)
1106 assert(size == EA_1BYTE || size == EA_2BYTE);
1108 inst_RV_TT(ins, reg, tree, 0, size);
1111 void CodeGen::inst_RV_ST(instruction ins, regNumber reg, TempDsc* tmp, unsigned ofs, var_types type, emitAttr size)
1113 if (size == EA_UNKNOWN)
1115 size = emitActualTypeSize(type);
1122 assert(!"Please call ins_Load(type) to get the load instruction");
1133 getEmitter()->emitIns_R_S(ins, size, reg, tmp->tdTempNum(), ofs);
1137 assert(!"Default inst_RV_ST case not supported for Arm");
1140 #else // !_TARGET_ARM_
1141 getEmitter()->emitIns_R_S(ins, size, reg, tmp->tdTempNum(), ofs);
1142 #endif // !_TARGET_ARM_
1145 void CodeGen::inst_mov_RV_ST(regNumber reg, GenTree* tree)
1147 /* Figure out the size of the value being loaded */
1149 emitAttr size = EA_ATTR(genTypeSize(tree->gtType));
1150 instruction loadIns = ins_Move_Extend(tree->TypeGet(), false);
1152 if (size < EA_4BYTE)
1154 /* Generate the "movsx/movzx" opcode */
1156 inst_RV_ST(loadIns, size, reg, tree);
1160 /* Compute op1 into the target register */
1162 inst_RV_TT(loadIns, reg, tree);
1165 #ifdef _TARGET_XARCH_
1166 void CodeGen::inst_FS_ST(instruction ins, emitAttr size, TempDsc* tmp, unsigned ofs)
1168 getEmitter()->emitIns_S(ins, size, tmp->tdTempNum(), ofs);
1173 bool CodeGenInterface::validImmForInstr(instruction ins, target_ssize_t imm, insFlags flags)
1175 if (getEmitter()->emitInsIsLoadOrStore(ins) && !instIsFP(ins))
1177 return validDispForLdSt(imm, TYP_INT);
1180 bool result = false;
1185 if (validImmForAlu(imm) || validImmForAlu(-imm))
1194 if (validImmForAlu(imm) || validImmForAlu(~imm))
1199 if (validImmForMov(imm))
1205 if ((unsigned_abs(imm) <= 0x00000fff) && (flags != INS_FLAGS_SET)) // 12-bit immediate
1211 if (validImmForAdd(imm, flags))
1221 if (validImmForAlu(imm))
1229 if (imm > 0 && imm <= 32)
1235 if ((imm & 0x3FC) == imm)
1244 bool CodeGen::arm_Valid_Imm_For_Instr(instruction ins, target_ssize_t imm, insFlags flags)
1246 return validImmForInstr(ins, imm, flags);
1249 bool CodeGenInterface::validDispForLdSt(target_ssize_t disp, var_types type)
1251 if (varTypeIsFloating(type))
1253 if ((disp & 0x3FC) == disp)
1260 if ((disp >= -0x00ff) && (disp <= 0x0fff))
1266 bool CodeGen::arm_Valid_Disp_For_LdSt(target_ssize_t disp, var_types type)
1268 return validDispForLdSt(disp, type);
1271 bool CodeGenInterface::validImmForAlu(target_ssize_t imm)
1273 return emitter::emitIns_valid_imm_for_alu(imm);
1275 bool CodeGen::arm_Valid_Imm_For_Alu(target_ssize_t imm)
1277 return validImmForAlu(imm);
1280 bool CodeGenInterface::validImmForMov(target_ssize_t imm)
1282 return emitter::emitIns_valid_imm_for_mov(imm);
1284 bool CodeGen::arm_Valid_Imm_For_Mov(target_ssize_t imm)
1286 return validImmForMov(imm);
1289 bool CodeGen::arm_Valid_Imm_For_Small_Mov(regNumber reg, target_ssize_t imm, insFlags flags)
1291 return emitter::emitIns_valid_imm_for_small_mov(reg, imm, flags);
1294 bool CodeGenInterface::validImmForAdd(target_ssize_t imm, insFlags flags)
1296 return emitter::emitIns_valid_imm_for_add(imm, flags);
1298 bool CodeGen::arm_Valid_Imm_For_Add(target_ssize_t imm, insFlags flags)
1300 return emitter::emitIns_valid_imm_for_add(imm, flags);
1303 // Check "add Rd,SP,i10"
1304 bool CodeGen::arm_Valid_Imm_For_Add_SP(target_ssize_t imm)
1306 return emitter::emitIns_valid_imm_for_add_sp(imm);
1309 bool CodeGenInterface::validImmForBL(ssize_t addr)
1312 // If we are running the altjit for NGEN, then assume we can use the "BL" instruction.
1313 // This matches the usual behavior for NGEN, since we normally do generate "BL".
1314 (!compiler->info.compMatchedVM && compiler->opts.jitFlags->IsSet(JitFlags::JIT_FLAG_PREJIT)) ||
1315 (compiler->eeGetRelocTypeHint((void*)addr) == IMAGE_REL_BASED_THUMB_BRANCH24);
1317 bool CodeGen::arm_Valid_Imm_For_BL(ssize_t addr)
1319 return validImmForBL(addr);
1322 // Returns true if this instruction writes to a destination register
1324 bool CodeGen::ins_Writes_Dest(instruction ins)
1339 #endif // _TARGET_ARM_
1341 #if defined(_TARGET_ARM64_)
1342 bool CodeGenInterface::validImmForBL(ssize_t addr)
1344 // On arm64, we always assume a call target is in range and generate a 28-bit relative
1345 // 'bl' instruction. If this isn't sufficient range, the VM will generate a jump stub when
1346 // we call recordRelocation(). See the IMAGE_REL_ARM64_BRANCH26 case in jitinterface.cpp
1347 // (for JIT) or zapinfo.cpp (for NGEN). If we cannot allocate a jump stub, it is fatal.
1350 #endif // _TARGET_ARM64_
1352 /*****************************************************************************
1354 * Get the machine dependent instruction for performing sign/zero extension.
1357 * srcType - source type
1358 * srcInReg - whether source is in a register
1360 instruction CodeGen::ins_Move_Extend(var_types srcType, bool srcInReg)
1362 instruction ins = INS_invalid;
1364 if (varTypeIsSIMD(srcType))
1366 #if defined(_TARGET_XARCH_)
1367 // SSE2/AVX requires destination to be a reg always.
1368 // If src is in reg means, it is a reg-reg move.
1370 // SSE2 Note: always prefer movaps/movups over movapd/movupd since the
1371 // former doesn't require 66h prefix and one byte smaller than the
1374 // TODO-CQ: based on whether src type is aligned use movaps instead
1376 return (srcInReg) ? INS_movaps : INS_movups;
1377 #elif defined(_TARGET_ARM64_)
1378 return (srcInReg) ? INS_mov : ins_Load(srcType);
1379 #else // !defined(_TARGET_ARM64_) && !defined(_TARGET_XARCH_)
1380 assert(!"unhandled SIMD type");
1381 #endif // !defined(_TARGET_ARM64_) && !defined(_TARGET_XARCH_)
1384 #if defined(_TARGET_XARCH_)
1385 if (varTypeIsFloating(srcType))
1387 if (srcType == TYP_DOUBLE)
1389 return (srcInReg) ? INS_movaps : INS_movsdsse2;
1391 else if (srcType == TYP_FLOAT)
1393 return (srcInReg) ? INS_movaps : INS_movss;
1397 assert(!"unhandled floating type");
1400 #elif defined(_TARGET_ARM_)
1401 if (varTypeIsFloating(srcType))
1404 assert(!varTypeIsFloating(srcType));
1407 #if defined(_TARGET_XARCH_)
1408 if (!varTypeIsSmall(srcType))
1412 else if (varTypeIsUnsigned(srcType))
1420 #elif defined(_TARGET_ARM_)
1422 // Register to Register zero/sign extend operation
1426 if (!varTypeIsSmall(srcType))
1430 else if (varTypeIsUnsigned(srcType))
1432 if (varTypeIsByte(srcType))
1439 if (varTypeIsByte(srcType))
1447 ins = ins_Load(srcType);
1449 #elif defined(_TARGET_ARM64_)
1451 // Register to Register zero/sign extend operation
1455 if (varTypeIsUnsigned(srcType))
1457 if (varTypeIsByte(srcType))
1461 else if (varTypeIsShort(srcType))
1467 // A mov Rd, Rm instruction performs the zero extend
1468 // for the upper 32 bits when the size is EA_4BYTE
1475 if (varTypeIsByte(srcType))
1479 else if (varTypeIsShort(srcType))
1485 if (srcType == TYP_INT)
1498 ins = ins_Load(srcType);
1501 NYI("ins_Move_Extend");
1503 assert(ins != INS_invalid);
1507 /*****************************************************************************
1509 * Get the machine dependent instruction for performing a load for srcType
1512 * srcType - source type
1513 * aligned - whether source is properly aligned if srcType is a SIMD type
1515 instruction CodeGenInterface::ins_Load(var_types srcType, bool aligned /*=false*/)
1517 instruction ins = INS_invalid;
1519 if (varTypeIsSIMD(srcType))
1521 #if defined(_TARGET_XARCH_)
1523 if (srcType == TYP_SIMD8)
1525 return INS_movsdsse2;
1528 #endif // FEATURE_SIMD
1529 if (compiler->canUseVexEncoding())
1531 return (aligned) ? INS_movapd : INS_movupd;
1535 // SSE2 Note: always prefer movaps/movups over movapd/movupd since the
1536 // former doesn't require 66h prefix and one byte smaller than the
1538 return (aligned) ? INS_movaps : INS_movups;
1540 #elif defined(_TARGET_ARM64_)
1543 assert(!"ins_Load with SIMD type");
1547 if (varTypeIsFloating(srcType))
1549 #if defined(_TARGET_XARCH_)
1550 if (srcType == TYP_DOUBLE)
1552 return INS_movsdsse2;
1554 else if (srcType == TYP_FLOAT)
1560 assert(!"unhandled floating type");
1562 #elif defined(_TARGET_ARM64_)
1564 #elif defined(_TARGET_ARM_)
1567 assert(!varTypeIsFloating(srcType));
1571 #if defined(_TARGET_XARCH_)
1572 if (!varTypeIsSmall(srcType))
1576 else if (varTypeIsUnsigned(srcType))
1585 #elif defined(_TARGET_ARMARCH_)
1586 if (!varTypeIsSmall(srcType))
1588 #if defined(_TARGET_ARM64_)
1589 if (!varTypeIsI(srcType) && !varTypeIsUnsigned(srcType))
1594 #endif // defined(_TARGET_ARM64_)
1599 else if (varTypeIsByte(srcType))
1601 if (varTypeIsUnsigned(srcType))
1606 else if (varTypeIsShort(srcType))
1608 if (varTypeIsUnsigned(srcType))
1617 assert(ins != INS_invalid);
1621 /*****************************************************************************
1623 * Get the machine dependent instruction for performing a reg-reg copy for dstType
1626 * dstType - destination type
1628 instruction CodeGen::ins_Copy(var_types dstType)
1630 #if defined(_TARGET_XARCH_)
1631 if (varTypeIsSIMD(dstType))
1635 else if (varTypeIsFloating(dstType))
1637 // Both float and double copy can use movaps
1644 #elif defined(_TARGET_ARM64_)
1645 if (varTypeIsFloating(dstType))
1653 #elif defined(_TARGET_ARM_)
1654 assert(!varTypeIsSIMD(dstType));
1655 if (varTypeIsFloating(dstType))
1663 #elif defined(_TARGET_X86_)
1664 assert(!varTypeIsSIMD(dstType));
1665 assert(!varTypeIsFloating(dstType));
1668 #error "Unknown _TARGET_"
1672 /*****************************************************************************
1674 * Get the machine dependent instruction for performing a store for dstType
1677 * dstType - destination type
1678 * aligned - whether destination is properly aligned if dstType is a SIMD type
1680 instruction CodeGenInterface::ins_Store(var_types dstType, bool aligned /*=false*/)
1682 instruction ins = INS_invalid;
1684 #if defined(_TARGET_XARCH_)
1685 if (varTypeIsSIMD(dstType))
1688 if (dstType == TYP_SIMD8)
1690 return INS_movsdsse2;
1693 #endif // FEATURE_SIMD
1694 if (compiler->canUseVexEncoding())
1696 return (aligned) ? INS_movapd : INS_movupd;
1700 // SSE2 Note: always prefer movaps/movups over movapd/movupd since the
1701 // former doesn't require 66h prefix and one byte smaller than the
1703 return (aligned) ? INS_movaps : INS_movups;
1706 else if (varTypeIsFloating(dstType))
1708 if (dstType == TYP_DOUBLE)
1710 return INS_movsdsse2;
1712 else if (dstType == TYP_FLOAT)
1718 assert(!"unhandled floating type");
1721 #elif defined(_TARGET_ARM64_)
1722 if (varTypeIsSIMD(dstType) || varTypeIsFloating(dstType))
1724 // All sizes of SIMD and FP instructions use INS_str
1727 #elif defined(_TARGET_ARM_)
1728 assert(!varTypeIsSIMD(dstType));
1729 if (varTypeIsFloating(dstType))
1734 assert(!varTypeIsSIMD(dstType));
1735 assert(!varTypeIsFloating(dstType));
1738 #if defined(_TARGET_XARCH_)
1740 #elif defined(_TARGET_ARMARCH_)
1741 if (!varTypeIsSmall(dstType))
1743 else if (varTypeIsByte(dstType))
1745 else if (varTypeIsShort(dstType))
1751 assert(ins != INS_invalid);
1755 #if defined(_TARGET_XARCH_)
1757 bool CodeGen::isMoveIns(instruction ins)
1759 return (ins == INS_mov);
1762 instruction CodeGenInterface::ins_FloatLoad(var_types type)
1764 // Do Not use this routine in RyuJIT backend. Instead use ins_Load()/ins_Store()
1768 // everything is just an addressing mode variation on x64
1769 instruction CodeGen::ins_FloatStore(var_types type)
1771 // Do Not use this routine in RyuJIT backend. Instead use ins_Store()
1775 instruction CodeGen::ins_FloatCopy(var_types type)
1777 // Do Not use this routine in RyuJIT backend. Instead use ins_Load().
1781 instruction CodeGen::ins_FloatCompare(var_types type)
1783 return (type == TYP_FLOAT) ? INS_ucomiss : INS_ucomisd;
1786 instruction CodeGen::ins_CopyIntToFloat(var_types srcType, var_types dstType)
1788 // On SSE2/AVX - the same instruction is used for moving double/quad word to XMM/YMM register.
1789 assert((srcType == TYP_INT) || (srcType == TYP_UINT) || (srcType == TYP_LONG) || (srcType == TYP_ULONG));
1791 #if !defined(_TARGET_64BIT_)
1792 // No 64-bit registers on x86.
1793 assert((srcType != TYP_LONG) && (srcType != TYP_ULONG));
1794 #endif // !defined(_TARGET_64BIT_)
1796 return INS_mov_i2xmm;
1799 instruction CodeGen::ins_CopyFloatToInt(var_types srcType, var_types dstType)
1801 // On SSE2/AVX - the same instruction is used for moving double/quad word of XMM/YMM to an integer register.
1802 assert((dstType == TYP_INT) || (dstType == TYP_UINT) || (dstType == TYP_LONG) || (dstType == TYP_ULONG));
1804 #if !defined(_TARGET_64BIT_)
1805 // No 64-bit registers on x86.
1806 assert((dstType != TYP_LONG) && (dstType != TYP_ULONG));
1807 #endif // !defined(_TARGET_64BIT_)
1809 return INS_mov_xmm2i;
1812 instruction CodeGen::ins_MathOp(genTreeOps oper, var_types type)
1817 return type == TYP_DOUBLE ? INS_addsd : INS_addss;
1819 return type == TYP_DOUBLE ? INS_subsd : INS_subss;
1821 return type == TYP_DOUBLE ? INS_mulsd : INS_mulss;
1823 return type == TYP_DOUBLE ? INS_divsd : INS_divss;
1829 instruction CodeGen::ins_FloatSqrt(var_types type)
1831 instruction ins = INS_invalid;
1833 if (type == TYP_DOUBLE)
1837 else if (type == TYP_FLOAT)
1843 assert(!"ins_FloatSqrt: Unsupported type");
1850 // Conversions to or from floating point values
1851 instruction CodeGen::ins_FloatConv(var_types to, var_types from)
1853 // AVX: For now we support only conversion from Int/Long -> float
1857 // int/long -> float/double use the same instruction but type size would be different.
1863 return INS_cvtsi2ss;
1865 return INS_cvtsi2sd;
1875 return INS_cvttss2si;
1877 return INS_cvttss2si;
1879 return ins_Move_Extend(TYP_FLOAT, false);
1881 return INS_cvtss2sd;
1891 return INS_cvttsd2si;
1893 return INS_cvttsd2si;
1895 return INS_cvtsd2ss;
1897 return ins_Move_Extend(TYP_DOUBLE, false);
1908 #elif defined(_TARGET_ARM_)
1910 bool CodeGen::isMoveIns(instruction ins)
1912 return (ins == INS_vmov) || (ins == INS_mov);
1915 instruction CodeGenInterface::ins_FloatLoad(var_types type)
1917 assert(type == TYP_DOUBLE || type == TYP_FLOAT);
1920 instruction CodeGen::ins_FloatStore(var_types type)
1922 assert(type == TYP_DOUBLE || type == TYP_FLOAT);
1925 instruction CodeGen::ins_FloatCopy(var_types type)
1927 assert(type == TYP_DOUBLE || type == TYP_FLOAT);
1931 instruction CodeGen::ins_CopyIntToFloat(var_types srcType, var_types dstType)
1933 assert((dstType == TYP_FLOAT) || (dstType == TYP_DOUBLE));
1934 assert((srcType == TYP_INT) || (srcType == TYP_UINT) || (srcType == TYP_LONG) || (srcType == TYP_ULONG));
1936 if ((srcType == TYP_LONG) || (srcType == TYP_ULONG))
1938 return INS_vmov_i2d;
1942 return INS_vmov_i2f;
1946 instruction CodeGen::ins_CopyFloatToInt(var_types srcType, var_types dstType)
1948 assert((srcType == TYP_FLOAT) || (srcType == TYP_DOUBLE));
1949 assert((dstType == TYP_INT) || (dstType == TYP_UINT) || (dstType == TYP_LONG) || (dstType == TYP_ULONG));
1951 if ((dstType == TYP_LONG) || (dstType == TYP_ULONG))
1953 return INS_vmov_d2i;
1957 return INS_vmov_f2i;
1961 instruction CodeGen::ins_FloatCompare(var_types type)
1963 // Not used and not implemented
1967 instruction CodeGen::ins_FloatSqrt(var_types type)
1969 // Not used and not implemented
1973 instruction CodeGen::ins_MathOp(genTreeOps oper, var_types type)
1992 instruction CodeGen::ins_FloatConv(var_types to, var_types from)
2000 return INS_vcvt_i2f;
2002 return INS_vcvt_i2d;
2011 return INS_vcvt_u2f;
2013 return INS_vcvt_u2d;
2022 NYI("long to float");
2024 NYI("long to double");
2033 return INS_vcvt_f2i;
2035 return INS_vcvt_f2u;
2037 NYI("float to long");
2039 return INS_vcvt_f2d;
2050 return INS_vcvt_d2i;
2052 return INS_vcvt_d2u;
2054 NYI("double to long");
2056 return INS_vcvt_d2f;
2068 #endif // #elif defined(_TARGET_ARM_)
2070 /*****************************************************************************
2072 * Machine independent way to return
2074 void CodeGen::instGen_Return(unsigned stkArgSize)
2076 #if defined(_TARGET_XARCH_)
2077 if (stkArgSize == 0)
2083 inst_IV(INS_ret, stkArgSize);
2085 #elif defined(_TARGET_ARM_)
2087 // The return on ARM is folded into the pop multiple instruction
2088 // and as we do not know the exact set of registers that we will
2089 // need to restore (pop) when we first call instGen_Return we will
2090 // instead just not emit anything for this method on the ARM
2091 // The return will be part of the pop multiple and that will be
2092 // part of the epilog that is generated by genFnEpilog()
2093 #elif defined(_TARGET_ARM64_)
2094 // This function shouldn't be used on ARM64.
2097 NYI("instGen_Return");
2101 /*****************************************************************************
2103 * Emit a MemoryBarrier instruction
2105 * Note: all MemoryBarriers instructions can be removed by
2106 * SET COMPlus_JitNoMemoryBarriers=1
2108 #ifdef _TARGET_ARM64_
2109 void CodeGen::instGen_MemoryBarrier(insBarrier barrierType)
2111 void CodeGen::instGen_MemoryBarrier()
2115 if (JitConfig.JitNoMemoryBarriers() == 1)
2121 #if defined(_TARGET_XARCH_)
2123 getEmitter()->emitIns_I_AR(INS_or, EA_4BYTE, 0, REG_SPBASE, 0);
2124 #elif defined(_TARGET_ARM_)
2125 getEmitter()->emitIns_I(INS_dmb, EA_4BYTE, 0xf);
2126 #elif defined(_TARGET_ARM64_)
2127 getEmitter()->emitIns_BARR(INS_dmb, barrierType);
2129 #error "Unknown _TARGET_"
2133 /*****************************************************************************
2135 * Machine independent way to move a Zero value into a register
2137 void CodeGen::instGen_Set_Reg_To_Zero(emitAttr size, regNumber reg, insFlags flags)
2139 #if defined(_TARGET_XARCH_)
2140 getEmitter()->emitIns_R_R(INS_xor, size, reg, reg);
2141 #elif defined(_TARGET_ARMARCH_)
2142 getEmitter()->emitIns_R_I(INS_mov, size, reg, 0 ARM_ARG(flags));
2144 #error "Unknown _TARGET_"
2146 regSet.verifyRegUsed(reg);
2149 /*****************************************************************************
2151 * Machine independent way to set the flags based on
2152 * comparing a register with zero
2154 void CodeGen::instGen_Compare_Reg_To_Zero(emitAttr size, regNumber reg)
2156 #if defined(_TARGET_XARCH_)
2157 getEmitter()->emitIns_R_R(INS_test, size, reg, reg);
2158 #elif defined(_TARGET_ARMARCH_)
2159 getEmitter()->emitIns_R_I(INS_cmp, size, reg, 0);
2161 #error "Unknown _TARGET_"
2165 /*****************************************************************************
2167 * Machine independent way to set the flags based upon
2168 * comparing a register with another register
2170 void CodeGen::instGen_Compare_Reg_To_Reg(emitAttr size, regNumber reg1, regNumber reg2)
2172 #if defined(_TARGET_XARCH_) || defined(_TARGET_ARMARCH_)
2173 getEmitter()->emitIns_R_R(INS_cmp, size, reg1, reg2);
2175 #error "Unknown _TARGET_"
2179 /*****************************************************************************
2181 * Machine independent way to set the flags based upon
2182 * comparing a register with an immediate
2184 void CodeGen::instGen_Compare_Reg_To_Imm(emitAttr size, regNumber reg, target_ssize_t imm)
2188 instGen_Compare_Reg_To_Zero(size, reg);
2192 #if defined(_TARGET_XARCH_)
2193 #if defined(_TARGET_AMD64_)
2194 if ((EA_SIZE(size) == EA_8BYTE) && (((int)imm != (ssize_t)imm) || EA_IS_CNS_RELOC(size)))
2196 assert(!"Invalid immediate for instGen_Compare_Reg_To_Imm");
2199 #endif // _TARGET_AMD64_
2201 getEmitter()->emitIns_R_I(INS_cmp, size, reg, imm);
2203 #elif defined(_TARGET_ARM_)
2204 if (arm_Valid_Imm_For_Alu(imm) || arm_Valid_Imm_For_Alu(-imm))
2206 getEmitter()->emitIns_R_I(INS_cmp, size, reg, imm);
2208 else // We need a scratch register
2210 assert(!"Invalid immediate for instGen_Compare_Reg_To_Imm");
2212 #elif defined(_TARGET_ARM64_)
2213 if (true) // TODO-ARM64-NYI: arm_Valid_Imm_For_Alu(imm) || arm_Valid_Imm_For_Alu(-imm))
2215 getEmitter()->emitIns_R_I(INS_cmp, size, reg, imm);
2217 else // We need a scratch register
2219 assert(!"Invalid immediate for instGen_Compare_Reg_To_Imm");
2222 #error "Unknown _TARGET_"
2227 /*****************************************************************************
2229 * Machine independent way to move a stack based local variable into a register
2231 void CodeGen::instGen_Load_Reg_From_Lcl(var_types srcType, regNumber dstReg, int varNum, int offs)
2233 emitAttr size = emitTypeSize(srcType);
2235 getEmitter()->emitIns_R_S(ins_Load(srcType), size, dstReg, varNum, offs);
2238 /*****************************************************************************
2240 * Machine independent way to move a register into a stack based local variable
2242 void CodeGen::instGen_Store_Reg_Into_Lcl(var_types dstType, regNumber srcReg, int varNum, int offs)
2244 emitAttr size = emitTypeSize(dstType);
2246 getEmitter()->emitIns_S_R(ins_Store(dstType), size, srcReg, varNum, offs);
2249 /*****************************************************************************
2251 * Machine independent way to move an immediate into a stack based local variable
2253 void CodeGen::instGen_Store_Imm_Into_Lcl(
2254 var_types dstType, emitAttr sizeAttr, ssize_t imm, int varNum, int offs, regNumber regToUse)
2256 #ifdef _TARGET_XARCH_
2257 #ifdef _TARGET_AMD64_
2258 if ((EA_SIZE(sizeAttr) == EA_8BYTE) && (((int)imm != (ssize_t)imm) || EA_IS_CNS_RELOC(sizeAttr)))
2260 assert(!"Invalid immediate for instGen_Store_Imm_Into_Lcl");
2263 #endif // _TARGET_AMD64_
2265 getEmitter()->emitIns_S_I(ins_Store(dstType), sizeAttr, varNum, offs, (int)imm);
2267 #elif defined(_TARGET_ARMARCH_)
2268 // Load imm into a register
2269 regNumber immReg = regToUse;
2270 assert(regToUse != REG_NA);
2271 instGen_Set_Reg_To_Imm(sizeAttr, immReg, (ssize_t)imm);
2272 instGen_Store_Reg_Into_Lcl(dstType, immReg, varNum, offs);
2273 if (EA_IS_RELOC(sizeAttr))
2275 regSet.verifyRegUsed(immReg);
2278 #error "Unknown _TARGET_"
2282 /*****************************************************************************/
2283 /*****************************************************************************/
2284 /*****************************************************************************/