*/
instruction CodeGen::ins_Move_Extend(var_types srcType, bool srcInReg)
{
- NYI_LOONGARCH64("ins_Move_Extend");
- NYI_RISCV64("ins_Move_Extend");
-
- instruction ins = INS_invalid;
-
- if (varTypeIsSIMD(srcType))
+ if (varTypeUsesIntReg(srcType))
{
-#if defined(TARGET_XARCH)
- // SSE2/AVX requires destination to be a reg always.
- // If src is in reg means, it is a reg-reg move.
- //
- // SSE2 Note: always prefer movaps/movups over movapd/movupd since the
- // former doesn't require 66h prefix and one byte smaller than the
- // latter.
- //
- // TODO-CQ: based on whether src type is aligned use movaps instead
-
- return (srcInReg) ? INS_movaps : INS_movups;
-#elif defined(TARGET_ARM64)
- return (srcInReg) ? INS_mov : ins_Load(srcType);
-#else // !defined(TARGET_ARM64) && !defined(TARGET_XARCH)
- assert(!"unhandled SIMD type");
-#endif // !defined(TARGET_ARM64) && !defined(TARGET_XARCH)
- }
-
-#if defined(TARGET_XARCH)
- if (varTypeIsFloating(srcType))
- {
- if (srcType == TYP_DOUBLE)
- {
- return (srcInReg) ? INS_movaps : INS_movsd_simd;
- }
- else if (srcType == TYP_FLOAT)
- {
- return (srcInReg) ? INS_movaps : INS_movss;
- }
- else
- {
- assert(!"unhandled floating type");
- }
- }
-#elif defined(TARGET_ARM)
- if (varTypeIsFloating(srcType))
- return INS_vmov;
-#else
- if (varTypeIsFloating(srcType))
- return INS_mov;
-#endif
+ instruction ins = INS_invalid;
#if defined(TARGET_XARCH)
- if (!varTypeIsSmall(srcType))
- {
- ins = INS_mov;
- }
- else if (varTypeIsUnsigned(srcType))
- {
- ins = INS_movzx;
- }
- else
- {
- ins = INS_movsx;
- }
-#elif defined(TARGET_ARM)
- //
- // Register to Register zero/sign extend operation
- //
- if (srcInReg)
- {
if (!varTypeIsSmall(srcType))
{
ins = INS_mov;
}
else if (varTypeIsUnsigned(srcType))
{
- if (varTypeIsByte(srcType))
- ins = INS_uxtb;
- else
- ins = INS_uxth;
+ ins = INS_movzx;
}
else
{
- if (varTypeIsByte(srcType))
- ins = INS_sxtb;
- else
- ins = INS_sxth;
+ ins = INS_movsx;
}
- }
- else
- {
- ins = ins_Load(srcType);
- }
-#elif defined(TARGET_ARM64)
- //
- // Register to Register zero/sign extend operation
- //
- if (srcInReg)
- {
- if (varTypeIsUnsigned(srcType))
+#elif defined(TARGET_ARM)
+ //
+ // Register to Register zero/sign extend operation
+ //
+ if (srcInReg)
{
- if (varTypeIsByte(srcType))
+ if (!varTypeIsSmall(srcType))
{
- ins = INS_uxtb;
+ ins = INS_mov;
}
- else if (varTypeIsShort(srcType))
+ else if (varTypeIsUnsigned(srcType))
{
- ins = INS_uxth;
+ if (varTypeIsByte(srcType))
+ ins = INS_uxtb;
+ else
+ ins = INS_uxth;
}
else
{
- // A mov Rd, Rm instruction performs the zero extend
- // for the upper 32 bits when the size is EA_4BYTE
-
- ins = INS_mov;
+ if (varTypeIsByte(srcType))
+ ins = INS_sxtb;
+ else
+ ins = INS_sxth;
}
}
else
{
- if (varTypeIsByte(srcType))
- {
- ins = INS_sxtb;
- }
- else if (varTypeIsShort(srcType))
+ ins = ins_Load(srcType);
+ }
+#elif defined(TARGET_ARM64)
+ //
+ // Register to Register zero/sign extend operation
+ //
+ if (srcInReg)
+ {
+ if (varTypeIsUnsigned(srcType))
{
- ins = INS_sxth;
+ if (varTypeIsByte(srcType))
+ {
+ ins = INS_uxtb;
+ }
+ else if (varTypeIsShort(srcType))
+ {
+ ins = INS_uxth;
+ }
+ else
+ {
+ // A mov Rd, Rm instruction performs the zero extend
+ // for the upper 32 bits when the size is EA_4BYTE
+
+ ins = INS_mov;
+ }
}
else
{
- if (srcType == TYP_INT)
+ if (varTypeIsByte(srcType))
{
- ins = INS_sxtw;
+ ins = INS_sxtb;
+ }
+ else if (varTypeIsShort(srcType))
+ {
+ ins = INS_sxth;
}
else
{
- ins = INS_mov;
+ if (srcType == TYP_INT)
+ {
+ ins = INS_sxtw;
+ }
+ else
+ {
+ ins = INS_mov;
+ }
}
}
}
+ else
+ {
+ ins = ins_Load(srcType);
+ }
+#else
+ NYI("ins_Move_Extend");
+#endif
+
+ assert(ins != INS_invalid);
+ return ins;
+ }
+
+#if defined(TARGET_XARCH) && defined(FEATURE_SIMD)
+ if (varTypeUsesMaskReg(srcType))
+ {
+ return INS_kmovq_msk;
+ }
+#endif // TARGET_XARCH && FEATURE_SIMD
+
+ assert(varTypeUsesFloatReg(srcType));
+
+#if defined(TARGET_XARCH)
+ // SSE2/AVX requires destination to be a reg always.
+ // If src is in reg means, it is a reg-reg move.
+ //
+ // SSE2 Note: always prefer movaps/movups over movapd/movupd since the
+ // former doesn't require 66h prefix and one byte smaller than the
+ // latter.
+ //
+ // TODO-CQ: based on whether src type is aligned use movaps instead
+
+ if (srcInReg)
+ {
+ return INS_movaps;
+ }
+
+ unsigned srcSize = genTypeSize(srcType);
+
+ if (srcSize == 4)
+ {
+ return INS_movss;
+ }
+ else if (srcSize == 8)
+ {
+ return INS_movsd_simd;
}
else
{
- ins = ins_Load(srcType);
+ assert((srcSize == 12) || (srcSize == 16) || (srcSize == 32) || (srcSize == 64));
+ return INS_movups;
}
+#elif defined(TARGET_ARM64)
+ return (srcInReg) ? INS_mov : ins_Load(srcType);
+#elif defined(TARGET_ARM)
+ assert(!varTypeIsSIMD(srcType));
+ return INS_vmov;
#else
NYI("ins_Move_Extend");
#endif
- assert(ins != INS_invalid);
- return ins;
}
/*****************************************************************************
*/
instruction CodeGenInterface::ins_Load(var_types srcType, bool aligned /*=false*/)
{
- instruction ins = INS_invalid;
-
- if (varTypeIsSIMD(srcType))
+ if (varTypeUsesIntReg(srcType))
{
+ instruction ins = INS_invalid;
+
#if defined(TARGET_XARCH)
-#ifdef FEATURE_SIMD
- if (srcType == TYP_SIMD8)
+ if (!varTypeIsSmall(srcType))
{
- return INS_movsd_simd;
+ ins = INS_mov;
+ }
+ else if (varTypeIsUnsigned(srcType))
+ {
+ ins = INS_movzx;
}
else
-#endif // FEATURE_SIMD
{
- // SSE2 Note: always prefer movaps/movups over movapd/movupd since the
- // former doesn't require 66h prefix and one byte smaller than the
- // latter.
- return (aligned) ? INS_movaps : INS_movups;
+ ins = INS_movsx;
}
-#elif defined(TARGET_ARM64)
- return INS_ldr;
-#else
- assert(!"ins_Load with SIMD type");
-#endif
- }
-
- if (varTypeIsFloating(srcType))
- {
-#if defined(TARGET_XARCH)
- if (srcType == TYP_DOUBLE)
+#elif defined(TARGET_ARMARCH)
+ if (!varTypeIsSmall(srcType))
{
- return INS_movsd_simd;
+ ins = INS_ldr;
}
- else if (srcType == TYP_FLOAT)
+ else if (varTypeIsByte(srcType))
{
- return INS_movss;
+ if (varTypeIsUnsigned(srcType))
+ ins = INS_ldrb;
+ else
+ ins = INS_ldrsb;
}
- else
+ else if (varTypeIsShort(srcType))
{
- assert(!"unhandled floating type");
+ if (varTypeIsUnsigned(srcType))
+ ins = INS_ldrh;
+ else
+ ins = INS_ldrsh;
}
-#elif defined(TARGET_ARM64)
- return INS_ldr;
-#elif defined(TARGET_ARM)
- return INS_vldr;
#elif defined(TARGET_LOONGARCH64)
- if (srcType == TYP_DOUBLE)
+ if (varTypeIsByte(srcType))
{
- return INS_fld_d;
+ if (varTypeIsUnsigned(srcType))
+ ins = INS_ld_bu;
+ else
+ ins = INS_ld_b;
+ }
+ else if (varTypeIsShort(srcType))
+ {
+ if (varTypeIsUnsigned(srcType))
+ ins = INS_ld_hu;
+ else
+ ins = INS_ld_h;
}
- else if (srcType == TYP_FLOAT)
+ else if (TYP_INT == srcType)
{
- return INS_fld_s;
+ ins = INS_ld_w;
}
else
{
- assert(!"unhandled floating type");
+ ins = INS_ld_d; // default ld_d.
}
#elif defined(TARGET_RISCV64)
- if (srcType == TYP_DOUBLE)
+ if (varTypeIsByte(srcType))
+ {
+ if (varTypeIsUnsigned(srcType))
+ ins = INS_lbu;
+ else
+ ins = INS_lb;
+ }
+ else if (varTypeIsShort(srcType))
{
- return INS_fld;
+ if (varTypeIsUnsigned(srcType))
+ ins = INS_lhu;
+ else
+ ins = INS_lh;
}
- else if (srcType == TYP_FLOAT)
+ else if (TYP_INT == srcType)
{
- return INS_flw;
+ ins = INS_lw;
}
else
{
- assert(!"unhandled floating type");
+ ins = INS_ld; // default ld.
}
#else
- assert(!varTypeIsFloating(srcType));
+ NYI("ins_Load");
#endif
- }
-#if defined(TARGET_XARCH)
- if (!varTypeIsSmall(srcType))
- {
- ins = INS_mov;
+ assert(ins != INS_invalid);
+ return ins;
}
- else if (varTypeIsUnsigned(srcType))
- {
- ins = INS_movzx;
- }
- else
+
+#if defined(TARGET_XARCH) && defined(FEATURE_SIMD)
+ if (varTypeUsesMaskReg(srcType))
{
- ins = INS_movsx;
+ return INS_kmovq_msk;
}
+#endif // TARGET_XARCH && FEATURE_SIMD
-#elif defined(TARGET_ARMARCH)
- if (!varTypeIsSmall(srcType))
+ assert(varTypeUsesFloatReg(srcType));
+
+#if defined(TARGET_XARCH)
+ unsigned srcSize = genTypeSize(srcType);
+
+ if (srcSize == 4)
{
- ins = INS_ldr;
+ return INS_movss;
}
- else if (varTypeIsByte(srcType))
+ else if (srcSize == 8)
{
- if (varTypeIsUnsigned(srcType))
- ins = INS_ldrb;
- else
- ins = INS_ldrsb;
+ return INS_movsd_simd;
}
- else if (varTypeIsShort(srcType))
+ else
{
- if (varTypeIsUnsigned(srcType))
- ins = INS_ldrh;
- else
- ins = INS_ldrsh;
+ assert((srcSize == 12) || (srcSize == 16) || (srcSize == 32) || (srcSize == 64));
+
+ // SSE2 Note: always prefer movaps/movups over movapd/movupd since the
+ // former doesn't require 66h prefix and one byte smaller than the
+ // latter.
+
+ return (aligned) ? INS_movaps : INS_movups;
}
+#elif defined(TARGET_ARM64)
+ return INS_ldr;
+#elif defined(TARGET_ARM)
+ assert(!varTypeIsSIMD(srcType));
+ return INS_vldr;
#elif defined(TARGET_LOONGARCH64)
- if (varTypeIsByte(srcType))
- {
- if (varTypeIsUnsigned(srcType))
- ins = INS_ld_bu;
- else
- ins = INS_ld_b;
- }
- else if (varTypeIsShort(srcType))
- {
- if (varTypeIsUnsigned(srcType))
- ins = INS_ld_hu;
- else
- ins = INS_ld_h;
- }
- else if (TYP_INT == srcType)
+ assert(!varTypeIsSIMD(srcType));
+
+ if (srcType == TYP_DOUBLE)
{
- ins = INS_ld_w;
+ return INS_fld_d;
}
else
{
- ins = INS_ld_d; // default ld_d.
+ assert(srcType == TYP_FLOAT);
+ return INS_fld_s;
}
#elif defined(TARGET_RISCV64)
- if (varTypeIsByte(srcType))
- {
- if (varTypeIsUnsigned(srcType))
- ins = INS_lbu;
- else
- ins = INS_lb;
- }
- else if (varTypeIsShort(srcType))
- {
- if (varTypeIsUnsigned(srcType))
- ins = INS_lhu;
- else
- ins = INS_lh;
- }
- else if (TYP_INT == srcType)
+ assert(!varTypeIsSIMD(srcType));
+
+ if (srcType == TYP_DOUBLE)
{
- ins = INS_lw;
+ return INS_fld;
}
else
{
- ins = INS_ld; // default ld.
+ assert(srcType == TYP_FLOAT);
+ return INS_flw;
}
#else
NYI("ins_Load");
#endif
-
- assert(ins != INS_invalid);
- return ins;
}
/*****************************************************************************
instruction CodeGen::ins_Copy(var_types dstType)
{
assert(emitTypeActSz[dstType] != 0);
-#if defined(TARGET_XARCH)
- if (varTypeIsSIMD(dstType))
- {
- return INS_movaps;
- }
- else if (varTypeIsFloating(dstType))
+
+ if (varTypeUsesIntReg(dstType))
{
- // Both float and double copy can use movaps
- return INS_movaps;
+#if defined(TARGET_XARCH) || defined(TARGET_ARMARCH) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64)
+ return INS_mov;
+#else
+ NYI("ins_Copy");
+#endif
}
- else
+
+#if defined(TARGET_XARCH) && defined(FEATURE_SIMD)
+ if (varTypeUsesMaskReg(dstType))
{
- return INS_mov;
+ return INS_kmovq_msk;
}
+#endif // TARGET_XARCH && FEATURE_SIMD
+
+ assert(varTypeUsesFloatReg(dstType));
+
+#if defined(TARGET_XARCH)
+ return INS_movaps;
#elif defined(TARGET_ARM64)
- if (varTypeIsFloating(dstType))
+ if (varTypeIsSIMD(dstType))
{
- return INS_fmov;
+ return INS_mov;
}
else
{
- return INS_mov;
+ assert(varTypeIsFloating(dstType));
+ return INS_fmov;
}
#elif defined(TARGET_ARM)
assert(!varTypeIsSIMD(dstType));
- if (varTypeIsFloating(dstType))
- {
- return INS_vmov;
- }
- else
- {
- return INS_mov;
- }
+ return INS_vmov;
#elif defined(TARGET_LOONGARCH64)
- if (varTypeIsFloating(dstType))
+ assert(!varTypeIsSIMD(dstType));
+
+ if (dstType == TYP_DOUBLE)
{
- return dstType == TYP_FLOAT ? INS_fmov_s : INS_fmov_d;
+ return INS_fmov_d;
}
else
{
- return INS_mov;
+ assert(dstType == TYP_FLOAT);
+ return INS_fmov_s;
}
#elif defined(TARGET_RISCV64)
- if (varTypeIsFloating(dstType))
+ assert(!varTypeIsSIMD(dstType));
+
+ if (dstType == TYP_DOUBLE)
{
- return dstType == TYP_FLOAT ? INS_fsgnj_s : INS_fsgnj_d;
+ return INS_fsgnj_d;
}
else
{
- return INS_mov;
+ assert(dstType == TYP_FLOAT);
+ return INS_fsgnj_s;
}
-#else // TARGET_*
-#error "Unknown TARGET"
+#else
+ NYI("ins_Copy");
#endif
}
//
instruction CodeGen::ins_Copy(regNumber srcReg, var_types dstType)
{
- bool dstIsFloatReg = isFloatRegType(dstType);
- bool srcIsFloatReg = genIsValidFloatReg(srcReg);
- if (srcIsFloatReg == dstIsFloatReg)
+ assert(srcReg != REG_NA);
+
+ if (varTypeUsesIntReg(dstType))
{
- return ins_Copy(dstType);
- }
+ if (genIsValidIntOrFakeReg(srcReg))
+ {
+ // int to int
+ return ins_Copy(dstType);
+ }
+
+#if defined(TARGET_XARCH) && defined(FEATURE_SIMD)
+ if (genIsValidMaskReg(srcReg))
+ {
+ // mask to int
+ return INS_kmovq_gpr;
+ }
+#endif // TARGET_XARCH && FEATURE_SIMD
+
+ // float to int
+ assert(genIsValidFloatReg(srcReg));
+
#if defined(TARGET_XARCH)
- return INS_movd;
+ return INS_movd;
#elif defined(TARGET_ARM64)
- if (dstIsFloatReg)
- {
- return INS_fmov;
- }
- else
- {
return INS_mov;
- }
#elif defined(TARGET_ARM)
- // No SIMD support yet.
- assert(!varTypeIsSIMD(dstType));
- if (dstIsFloatReg)
- {
- // Can't have LONG in a register.
- assert(dstType == TYP_FLOAT);
- return INS_vmov_i2f;
- }
- else
- {
// Can't have LONG in a register.
assert(dstType == TYP_INT);
+
+ assert(!varTypeIsSIMD(dstType));
return INS_vmov_f2i;
+#elif defined(TARGET_LOONGARCH64)
+ assert(!varTypeIsSIMD(dstType));
+ return EA_SIZE(emitActualTypeSize(dstType)) == EA_4BYTE ? INS_movfr2gr_s : INS_movfr2gr_d;
+#elif defined(TARGET_RISCV64)
+ assert(!varTypeIsSIMD(dstType));
+ return EA_SIZE(emitActualTypeSize(dstType)) == EA_4BYTE ? INS_fcvt_w_d : INS_fcvt_l_d;
+#else
+ NYI("ins_Copy");
+#endif
}
+
+#if defined(TARGET_XARCH) && defined(FEATURE_SIMD)
+ if (varTypeUsesMaskReg(dstType))
+ {
+ if (genIsValidMaskReg(srcReg))
+ {
+ // mask to mask
+ return ins_Copy(dstType);
+ }
+
+ // mask to int
+ assert(genIsValidIntOrFakeReg(srcReg));
+ return INS_kmovq_gpr;
+ }
+#endif // TARGET_XARCH && FEATURE_SIMD
+
+ assert(varTypeUsesFloatReg(dstType));
+
+ if (genIsValidFloatReg(srcReg))
+ {
+ // float to float
+ return ins_Copy(dstType);
+ }
+
+ // int to float
+ assert(genIsValidIntOrFakeReg(srcReg));
+
+#if defined(TARGET_XARCH)
+ return INS_movd;
+#elif defined(TARGET_ARM64)
+ return INS_fmov;
+#elif defined(TARGET_ARM)
+ // Can't have LONG in a register.
+ assert(dstType == TYP_FLOAT);
+
+ assert(!varTypeIsSIMD(dstType));
+ return INS_vmov_i2f;
#elif defined(TARGET_LOONGARCH64)
- // TODO-LoongArch64-CQ: supporting SIMD.
assert(!varTypeIsSIMD(dstType));
- if (dstIsFloatReg)
+
+ if (dstType == TYP_DOUBLE)
{
- assert(!genIsValidFloatReg(srcReg));
- return dstType == TYP_FLOAT ? INS_movgr2fr_w : INS_movgr2fr_d;
+ return INS_movgr2fr_d;
}
else
{
- assert(genIsValidFloatReg(srcReg));
- return EA_SIZE(emitActualTypeSize(dstType)) == EA_4BYTE ? INS_movfr2gr_s : INS_movfr2gr_d;
+ assert(dstType == TYP_FLOAT);
+ return INS_movgr2fr_w;
}
#elif defined(TARGET_RISCV64)
- // TODO-RISCV64-CQ: supporting SIMD.
assert(!varTypeIsSIMD(dstType));
- if (dstIsFloatReg)
+
+ if (dstType == TYP_DOUBLE)
{
- assert(!genIsValidFloatReg(srcReg));
- return dstType == TYP_FLOAT ? INS_fcvt_s_l : INS_fcvt_d_l;
+ return INS_fcvt_d_l;
}
else
{
- assert(genIsValidFloatReg(srcReg));
- return EA_SIZE(emitActualTypeSize(dstType)) == EA_4BYTE ? INS_fcvt_w_d : INS_fcvt_l_d;
+ assert(dstType == TYP_FLOAT);
+ return INS_fcvt_s_l;
}
- return INS_invalid;
-#else // TARGET*
-#error "Unknown TARGET"
+#else
+ NYI("ins_Copy");
#endif
}
*/
instruction CodeGenInterface::ins_Store(var_types dstType, bool aligned /*=false*/)
{
- instruction ins = INS_invalid;
+ if (varTypeUsesIntReg(dstType))
+ {
+ instruction ins = INS_invalid;
#if defined(TARGET_XARCH)
- if (varTypeIsSIMD(dstType))
- {
-#ifdef FEATURE_SIMD
- if (dstType == TYP_SIMD8)
- {
- return INS_movsd_simd;
- }
+ ins = INS_mov;
+#elif defined(TARGET_ARMARCH)
+ if (!varTypeIsSmall(dstType))
+ ins = INS_str;
+ else if (varTypeIsByte(dstType))
+ ins = INS_strb;
+ else if (varTypeIsShort(dstType))
+ ins = INS_strh;
+#elif defined(TARGET_LOONGARCH64)
+ if (varTypeIsByte(dstType))
+ ins = aligned ? INS_stx_b : INS_st_b;
+ else if (varTypeIsShort(dstType))
+ ins = aligned ? INS_stx_h : INS_st_h;
+ else if (TYP_INT == dstType)
+ ins = aligned ? INS_stx_w : INS_st_w;
else
-#endif // FEATURE_SIMD
- {
- // SSE2 Note: always prefer movaps/movups over movapd/movupd since the
- // former doesn't require 66h prefix and one byte smaller than the
- // latter.
- return (aligned) ? INS_movaps : INS_movups;
- }
+ ins = aligned ? INS_stx_d : INS_st_d;
+#elif defined(TARGET_RISCV64)
+ if (varTypeIsByte(dstType))
+ ins = INS_sb;
+ else if (varTypeIsShort(dstType))
+ ins = INS_sh;
+ else if (TYP_INT == dstType)
+ ins = INS_sw;
+ else
+ ins = INS_sd;
+#else
+ NYI("ins_Store");
+#endif
+ assert(ins != INS_invalid);
+ return ins;
}
- else if (varTypeIsFloating(dstType))
+
+#if defined(TARGET_XARCH) && defined(FEATURE_SIMD)
+ if (varTypeUsesMaskReg(dstType))
{
- if (dstType == TYP_DOUBLE)
- {
- return INS_movsd_simd;
- }
- else if (dstType == TYP_FLOAT)
- {
- return INS_movss;
- }
- else
- {
- assert(!"unhandled floating type");
- }
+ return INS_kmovq_msk;
}
-#elif defined(TARGET_ARM64)
- if (varTypeIsSIMD(dstType) || varTypeIsFloating(dstType))
+#endif // TARGET_XARCH && FEATURE_SIMD
+
+ assert(varTypeUsesFloatReg(dstType));
+
+#if defined(TARGET_XARCH)
+ unsigned dstSize = genTypeSize(dstType);
+
+ if (dstSize == 4)
{
- // All sizes of SIMD and FP instructions use INS_str
- return INS_str;
+ return INS_movss;
}
-#elif defined(TARGET_ARM)
- assert(!varTypeIsSIMD(dstType));
- if (varTypeIsFloating(dstType))
+ else if (dstSize == 8)
{
- return INS_vstr;
+ return INS_movsd_simd;
}
+ else
+ {
+ assert((dstSize == 12) || (dstSize == 16) || (dstSize == 32) || (dstSize == 64));
+
+ // SSE2 Note: always prefer movaps/movups over movapd/movupd since the
+ // former doesn't require 66h prefix and one byte smaller than the
+ // latter.
+
+ return (aligned) ? INS_movaps : INS_movups;
+ }
+#elif defined(TARGET_ARM64)
+ return INS_str;
+#elif defined(TARGET_ARM)
+ assert(!varTypeIsSIMD(dstType));
+ return INS_vstr;
#elif defined(TARGET_LOONGARCH64)
assert(!varTypeIsSIMD(dstType));
- if (varTypeIsFloating(dstType))
+
+ if (dstType == TYP_DOUBLE)
{
- if (dstType == TYP_DOUBLE)
- {
- return aligned ? INS_fstx_d : INS_fst_d;
- }
- else if (dstType == TYP_FLOAT)
- {
- return aligned ? INS_fstx_s : INS_fst_s;
- }
+ return aligned ? INS_fstx_d : INS_fst_d;
}
-#elif defined(TARGET_RISCV64)
- assert(!varTypeIsSIMD(dstType));
- if (varTypeIsFloating(dstType))
+ else
{
- if (dstType == TYP_DOUBLE)
- {
- return INS_fsd;
- }
- else if (dstType == TYP_FLOAT)
- {
- return INS_fsw;
- }
+ assert(dstType == TYP_FLOAT);
+ return aligned ? INS_fstx_s : INS_fst_s;
}
-#else
+#elif defined(TARGET_RISCV64)
assert(!varTypeIsSIMD(dstType));
- assert(!varTypeIsFloating(dstType));
-#endif
-#if defined(TARGET_XARCH)
- ins = INS_mov;
-#elif defined(TARGET_ARMARCH)
- if (!varTypeIsSmall(dstType))
- ins = INS_str;
- else if (varTypeIsByte(dstType))
- ins = INS_strb;
- else if (varTypeIsShort(dstType))
- ins = INS_strh;
-#elif defined(TARGET_LOONGARCH64)
- if (varTypeIsByte(dstType))
- ins = aligned ? INS_stx_b : INS_st_b;
- else if (varTypeIsShort(dstType))
- ins = aligned ? INS_stx_h : INS_st_h;
- else if (TYP_INT == dstType)
- ins = aligned ? INS_stx_w : INS_st_w;
- else
- ins = aligned ? INS_stx_d : INS_st_d;
-#elif defined(TARGET_RISCV64)
- if (varTypeIsByte(dstType))
- ins = INS_sb;
- else if (varTypeIsShort(dstType))
- ins = INS_sh;
- else if (TYP_INT == dstType)
- ins = INS_sw;
+ if (dstType == TYP_DOUBLE)
+ {
+ return INS_fsd;
+ }
else
- ins = INS_sd;
+ {
+ assert(dstType == TYP_FLOAT);
+ return INS_fsw;
+ }
#else
NYI("ins_Store");
#endif
-
- assert(ins != INS_invalid);
- return ins;
}
//------------------------------------------------------------------------
{
assert(srcReg != REG_NA);
- bool dstIsFloatType = isFloatRegType(dstType);
- bool srcIsFloatReg = genIsValidFloatReg(srcReg);
- if (srcIsFloatReg == dstIsFloatType)
+ if (varTypeUsesIntReg(dstType))
+ {
+ if (genIsValidIntOrFakeReg(srcReg))
+ {
+ // int to int
+ return ins_Store(dstType, aligned);
+ }
+
+#if defined(TARGET_XARCH) && defined(FEATURE_SIMD)
+ if (genIsValidMaskReg(srcReg))
+ {
+ // mask to int, treat as mask so it works on 32-bit
+ return ins_Store(TYP_MASK, aligned);
+ }
+#endif // TARGET_XARCH && FEATURE_SIMD
+
+ // float to int, treat as float to float
+ assert(genIsValidFloatReg(srcReg));
+
+ unsigned dstSize = genTypeSize(dstType);
+
+ if (dstSize == 4)
+ {
+ dstType = TYP_FLOAT;
+ }
+ else
+ {
+#if defined(TARGET_64BIT)
+ assert(dstSize == 8);
+ dstType = TYP_DOUBLE;
+#else
+ unreached();
+#endif
+ }
+
+ return ins_Store(dstType, aligned);
+ }
+
+#if defined(TARGET_XARCH) && defined(FEATURE_SIMD)
+ if (varTypeUsesMaskReg(dstType))
{
+ if (genIsValidMaskReg(srcReg))
+ {
+ // mask to mask
+ return ins_Store(dstType, aligned);
+ }
+
+ // mask to int, keep as mask so it works on 32-bit
+ assert(genIsValidIntOrFakeReg(srcReg));
return ins_Store(dstType, aligned);
}
- else
+#endif // TARGET_XARCH && FEATURE_SIMD
+
+ assert(varTypeUsesFloatReg(dstType));
+
+ if (genIsValidIntOrFakeReg(srcReg))
{
- // We know that we are writing to memory, so make the destination type same
- // as the source type.
- var_types dstTypeForStore = TYP_UNDEF;
- unsigned dstSize = genTypeSize(dstType);
- switch (dstSize)
+ // int to float, treat as int to int
+
+ unsigned dstSize = genTypeSize(dstType);
+
+ if (dstSize == 4)
+ {
+ dstType = TYP_INT;
+ }
+ else
{
- case 4:
- dstTypeForStore = srcIsFloatReg ? TYP_FLOAT : TYP_INT;
- break;
#if defined(TARGET_64BIT)
- case 8:
- dstTypeForStore = srcIsFloatReg ? TYP_DOUBLE : TYP_LONG;
- break;
-#endif // TARGET_64BIT
- default:
- assert(!"unexpected write to the stack.");
- break;
+ assert(dstSize == 8);
+ dstType = TYP_LONG;
+#else
+ unreached();
+#endif
}
- return ins_Store(dstTypeForStore, aligned);
}
+ else
+ {
+ // float to float
+ assert(genIsValidFloatReg(srcReg));
+ }
+
+ return ins_Store(dstType, aligned);
}
#if defined(TARGET_XARCH)
*/
void CodeGen::instGen_Set_Reg_To_Zero(emitAttr size, regNumber reg, insFlags flags)
{
+ assert(genIsValidIntOrFakeReg(reg));
+
#if defined(TARGET_XARCH)
GetEmitter()->emitIns_R_R(INS_xor, size, reg, reg);
#elif defined(TARGET_ARM)
#else
#error "Unknown TARGET"
#endif
+
regSet.verifyRegUsed(reg);
}