if (immFitsInIns)
{
// generate a single instruction that encodes the immediate directly
- getEmitter()->emitIns_R_R_I(ins, attr, reg1, reg2, imm);
+ GetEmitter()->emitIns_R_R_I(ins, attr, reg1, reg2, imm);
}
else
{
instGen_Set_Reg_To_Imm(attr, tmpReg, imm);
// generate the instruction using a three register encoding with the immediate in tmpReg
- getEmitter()->emitIns_R_R_R(ins, attr, reg1, reg2, tmpReg);
+ GetEmitter()->emitIns_R_R_R(ins, attr, reg1, reg2, tmpReg);
}
return immFitsInIns;
}
const int val32 = (int)imm;
if (arm_Valid_Imm_For_Mov(val32))
{
- getEmitter()->emitIns_R_I(INS_mov, size, reg, val32, flags);
+ GetEmitter()->emitIns_R_I(INS_mov, size, reg, val32, flags);
}
else // We have to use a movw/movt pair of instructions
{
assert(arm_Valid_Imm_For_Mov(imm_lo16));
assert(imm_hi16 != 0);
- getEmitter()->emitIns_R_I(INS_movw, size, reg, imm_lo16);
+ GetEmitter()->emitIns_R_I(INS_movw, size, reg, imm_lo16);
// If we've got a low register, the high word is all bits set,
// and the high bit of the low word is set, we can sign extend
// halfword and save two bytes of encoding. This can happen for
// small magnitude negative numbers 'n' for -32768 <= n <= -1.
- if (getEmitter()->isLowRegister(reg) && (imm_hi16 == 0xffff) && ((imm_lo16 & 0x8000) == 0x8000))
+ if (GetEmitter()->isLowRegister(reg) && (imm_hi16 == 0xffff) && ((imm_lo16 & 0x8000) == 0x8000))
{
- getEmitter()->emitIns_R_R(INS_sxth, EA_4BYTE, reg, reg);
+ GetEmitter()->emitIns_R_R(INS_sxth, EA_4BYTE, reg, reg);
}
else
{
- getEmitter()->emitIns_R_I(INS_movt, size, reg, imm_hi16);
+ GetEmitter()->emitIns_R_I(INS_movt, size, reg, imm_hi16);
}
if (flags == INS_FLAGS_SET)
- getEmitter()->emitIns_R_R(INS_mov, size, reg, reg, INS_FLAGS_SET);
+ GetEmitter()->emitIns_R_R(INS_mov, size, reg, reg, INS_FLAGS_SET);
}
}
float f = forceCastToFloat(constValue);
genSetRegToIcon(tmpReg, *((int*)(&f)));
- getEmitter()->emitIns_R_R(INS_vmov_i2f, EA_4BYTE, targetReg, tmpReg);
+ GetEmitter()->emitIns_R_R(INS_vmov_i2f, EA_4BYTE, targetReg, tmpReg);
}
else
{
genSetRegToIcon(tmpReg1, cv[0]);
genSetRegToIcon(tmpReg2, cv[1]);
- getEmitter()->emitIns_R_R_R(INS_vmov_i2d, EA_8BYTE, targetReg, tmpReg1, tmpReg2);
+ GetEmitter()->emitIns_R_R_R(INS_vmov_i2d, EA_8BYTE, targetReg, tmpReg1, tmpReg2);
}
}
break;
const genTreeOps oper = treeNode->OperGet();
regNumber targetReg = treeNode->gtRegNum;
var_types targetType = treeNode->TypeGet();
- emitter* emit = getEmitter();
+ emitter* emit = GetEmitter();
assert(oper == GT_ADD || oper == GT_SUB || oper == GT_MUL || oper == GT_ADD_LO || oper == GT_ADD_HI ||
oper == GT_SUB_LO || oper == GT_SUB_HI || oper == GT_OR || oper == GT_XOR || oper == GT_AND);
// If 0 bail out by returning null in regCnt
genConsumeRegAndCopy(size, regCnt);
endLabel = genCreateTempLabel();
- getEmitter()->emitIns_R_R(INS_TEST, easz, regCnt, regCnt);
+ GetEmitter()->emitIns_R_R(INS_TEST, easz, regCnt, regCnt);
inst_JMP(EJ_eq, endLabel);
}
// Since the size is less than a page, simply adjust the SP value.
// The SP might already be in the guard page, must touch it BEFORE
// the alloc, not after.
- getEmitter()->emitIns_R_R_I(INS_ldr, EA_4BYTE, regCnt, REG_SP, 0);
+ GetEmitter()->emitIns_R_R_I(INS_ldr, EA_4BYTE, regCnt, REG_SP, 0);
inst_RV_IV(INS_sub, REG_SP, amount, EA_PTRSIZE);
lastTouchDelta = amount;
// If not done, loop
// Note that regCnt is the number of bytes to stack allocate.
assert(genIsValidIntReg(regCnt));
- getEmitter()->emitIns_R_I(INS_sub, EA_PTRSIZE, regCnt, STACK_ALIGN, INS_FLAGS_SET);
+ GetEmitter()->emitIns_R_I(INS_sub, EA_PTRSIZE, regCnt, STACK_ALIGN, INS_FLAGS_SET);
inst_JMP(EJ_ne, loop);
lastTouchDelta = 0;
BasicBlock* done = genCreateTempLabel();
// subs regCnt, SP, regCnt // regCnt now holds ultimate SP
- getEmitter()->emitIns_R_R_R(INS_sub, EA_PTRSIZE, regCnt, REG_SPBASE, regCnt, INS_FLAGS_SET);
+ GetEmitter()->emitIns_R_R_R(INS_sub, EA_PTRSIZE, regCnt, REG_SPBASE, regCnt, INS_FLAGS_SET);
inst_JMP(EJ_vc, loop); // branch if the V flag is not set
genDefineTempLabel(loop);
// tickle the page - Read from the updated SP - this triggers a page fault when on the guard page
- getEmitter()->emitIns_R_R_I(INS_ldr, EA_4BYTE, regTmp, REG_SPBASE, 0);
+ GetEmitter()->emitIns_R_R_I(INS_ldr, EA_4BYTE, regTmp, REG_SPBASE, 0);
// decrement SP by eeGetPageSize()
- getEmitter()->emitIns_R_R_I(INS_sub, EA_PTRSIZE, regTmp, REG_SPBASE, compiler->eeGetPageSize());
+ GetEmitter()->emitIns_R_R_I(INS_sub, EA_PTRSIZE, regTmp, REG_SPBASE, compiler->eeGetPageSize());
- getEmitter()->emitIns_R_R(INS_cmp, EA_PTRSIZE, regTmp, regCnt);
+ GetEmitter()->emitIns_R_R(INS_cmp, EA_PTRSIZE, regTmp, regCnt);
inst_JMP(EJ_lo, done);
// Update SP to be at the next page of stack that we will tickle
- getEmitter()->emitIns_R_R(INS_mov, EA_PTRSIZE, REG_SPBASE, regTmp);
+ GetEmitter()->emitIns_R_R(INS_mov, EA_PTRSIZE, REG_SPBASE, regTmp);
// Jump to loop and tickle new stack address
inst_JMP(EJ_jmp, loop);
genDefineTempLabel(done);
// Now just move the final value to SP
- getEmitter()->emitIns_R_R(INS_mov, EA_PTRSIZE, REG_SPBASE, regCnt);
+ GetEmitter()->emitIns_R_R(INS_mov, EA_PTRSIZE, REG_SPBASE, regCnt);
// lastTouchDelta is dynamic, and can be up to a page. So if we have outgoing arg space,
// we're going to assume the worst and probe.
regNumber idxReg = treeNode->gtOp.gtOp1->gtRegNum;
regNumber baseReg = treeNode->gtOp.gtOp2->gtRegNum;
- getEmitter()->emitIns_R_ARX(INS_ldr, EA_4BYTE, REG_PC, baseReg, idxReg, TARGET_POINTER_SIZE, 0);
+ GetEmitter()->emitIns_R_ARX(INS_ldr, EA_4BYTE, REG_PC, baseReg, idxReg, TARGET_POINTER_SIZE, 0);
}
//------------------------------------------------------------------------
BasicBlock** jumpTable = compiler->compCurBB->bbJumpSwt->bbsDstTab;
unsigned jmpTabBase;
- jmpTabBase = getEmitter()->emitBBTableDataGenBeg(jumpCount, false);
+ jmpTabBase = GetEmitter()->emitBBTableDataGenBeg(jumpCount, false);
JITDUMP("\n J_M%03u_DS%02u LABEL DWORD\n", Compiler::s_compMethodsCount, jmpTabBase);
JITDUMP(" DD L_M%03u_" FMT_BB "\n", Compiler::s_compMethodsCount, target->bbNum);
- getEmitter()->emitDataGenData(i, target);
+ GetEmitter()->emitDataGenData(i, target);
}
- getEmitter()->emitDataGenEnd();
+ GetEmitter()->emitDataGenEnd();
genMov32RelocatableDataLabel(jmpTabBase, treeNode->gtRegNum);
if (ins == INS_vneg)
{
- getEmitter()->emitIns_R_R(ins, emitTypeSize(tree), targetReg, operandReg);
+ GetEmitter()->emitIns_R_R(ins, emitTypeSize(tree), targetReg, operandReg);
}
else
{
- getEmitter()->emitIns_R_R_I(ins, emitTypeSize(tree), targetReg, operandReg, 0, INS_FLAGS_SET);
+ GetEmitter()->emitIns_R_R_I(ins, emitTypeSize(tree), targetReg, operandReg, 0, INS_FLAGS_SET);
}
genProduceReg(tree);
instGen_MemoryBarrier();
}
- emitter* emit = getEmitter();
+ emitter* emit = GetEmitter();
ClassLayout* layout = cpObjNode->GetLayout();
unsigned slots = layout->GetSlotCount();
if (oper == GT_LSH_HI)
{
inst_RV_SH(ins, EA_4BYTE, tree->gtRegNum, count);
- getEmitter()->emitIns_R_R_R_I(INS_OR, EA_4BYTE, tree->gtRegNum, tree->gtRegNum, regLo, 32 - count,
+ GetEmitter()->emitIns_R_R_R_I(INS_OR, EA_4BYTE, tree->gtRegNum, tree->gtRegNum, regLo, 32 - count,
INS_FLAGS_DONT_CARE, INS_OPTS_LSR);
}
else
{
assert(oper == GT_RSH_LO);
inst_RV_SH(INS_SHIFT_RIGHT_LOGICAL, EA_4BYTE, tree->gtRegNum, count);
- getEmitter()->emitIns_R_R_R_I(INS_OR, EA_4BYTE, tree->gtRegNum, tree->gtRegNum, regHi, 32 - count,
+ GetEmitter()->emitIns_R_R_R_I(INS_OR, EA_4BYTE, tree->gtRegNum, tree->gtRegNum, regHi, 32 - count,
INS_FLAGS_DONT_CARE, INS_OPTS_LSL);
}
if (!isRegCandidate && !(tree->gtFlags & GTF_SPILLED))
{
- getEmitter()->emitIns_R_S(ins_Load(tree->TypeGet()), emitTypeSize(tree), tree->gtRegNum, tree->gtLclNum, 0);
+ GetEmitter()->emitIns_R_S(ins_Load(tree->TypeGet()), emitTypeSize(tree), tree->gtRegNum, tree->gtLclNum, 0);
genProduceReg(tree);
}
}
{
var_types targetType = tree->TypeGet();
regNumber targetReg = tree->gtRegNum;
- emitter* emit = getEmitter();
+ emitter* emit = GetEmitter();
noway_assert(targetType != TYP_STRUCT);
{
var_types targetType = tree->TypeGet();
regNumber targetReg = tree->gtRegNum;
- emitter* emit = getEmitter();
+ emitter* emit = GetEmitter();
unsigned varNum = tree->gtLclNum;
assert(varNum < compiler->lvaCount);
var_types targetType = tree->TypeGet();
regNumber targetReg = tree->gtRegNum;
- emitter* emit = getEmitter();
+ emitter* emit = GetEmitter();
genConsumeOperands(tree);
{
assert(treeNode->OperGet() == GT_CKFINITE);
- emitter* emit = getEmitter();
+ emitter* emit = GetEmitter();
var_types targetType = treeNode->TypeGet();
regNumber intReg = treeNode->GetSingleTempReg();
regNumber fpReg = genConsumeReg(treeNode->gtOp.gtOp1);
assert(!varTypeIsLong(op2Type));
regNumber targetReg = tree->gtRegNum;
- emitter* emit = getEmitter();
+ emitter* emit = GetEmitter();
genConsumeIfReg(op1);
genConsumeIfReg(op2);
genConsumeIfReg(data);
GenTreeIntCon cns = intForm(TYP_INT, 0);
cns.SetContained();
- getEmitter()->emitInsBinary(INS_cmp, emitTypeSize(TYP_INT), data, &cns);
+ GetEmitter()->emitInsBinary(INS_cmp, emitTypeSize(TYP_INT), data, &cns);
BasicBlock* skipLabel = genCreateTempLabel();
instGen_MemoryBarrier();
}
- getEmitter()->emitInsLoadStoreOp(ins_Store(type), emitActualTypeSize(type), data->gtRegNum, tree);
+ GetEmitter()->emitInsLoadStoreOp(ins_Store(type), emitActualTypeSize(type), data->gtRegNum, tree);
}
}
genConsumeOperands(treeNode->AsOp());
assert(insVcvt != INS_invalid);
- getEmitter()->emitIns_R_R(INS_vmov_i2f, srcSize, treeNode->gtRegNum, op1->gtRegNum);
- getEmitter()->emitIns_R_R(insVcvt, srcSize, treeNode->gtRegNum, treeNode->gtRegNum);
+ GetEmitter()->emitIns_R_R(INS_vmov_i2f, srcSize, treeNode->gtRegNum, op1->gtRegNum);
+ GetEmitter()->emitIns_R_R(insVcvt, srcSize, treeNode->gtRegNum, treeNode->gtRegNum);
genProduceReg(treeNode);
}
regNumber tmpReg = treeNode->GetSingleTempReg();
assert(insVcvt != INS_invalid);
- getEmitter()->emitIns_R_R(insVcvt, dstSize, tmpReg, op1->gtRegNum);
- getEmitter()->emitIns_R_R(INS_vmov_f2i, dstSize, treeNode->gtRegNum, tmpReg);
+ GetEmitter()->emitIns_R_R(insVcvt, dstSize, tmpReg, op1->gtRegNum);
+ GetEmitter()->emitIns_R_R(INS_vmov_f2i, dstSize, treeNode->gtRegNum, tmpReg);
genProduceReg(treeNode);
}
}
else
{
- getEmitter()->emitIns_R_AI(INS_ldr, EA_PTR_DSP_RELOC, callTargetReg, (ssize_t)pAddr);
+ GetEmitter()->emitIns_R_AI(INS_ldr, EA_PTR_DSP_RELOC, callTargetReg, (ssize_t)pAddr);
regSet.verifyRegUsed(callTargetReg);
}
- getEmitter()->emitIns_Call(emitter::EC_INDIR_R, compiler->eeFindHelper(helper),
+ GetEmitter()->emitIns_Call(emitter::EC_INDIR_R, compiler->eeFindHelper(helper),
INDEBUG_LDISASM_COMMA(nullptr) NULL, // addr
argSize, retSize, gcInfo.gcVarPtrSetCur, gcInfo.gcRegGCrefSetCur,
gcInfo.gcRegByrefSetCur,
}
else
{
- getEmitter()->emitIns_Call(emitter::EC_FUNC_TOKEN, compiler->eeFindHelper(helper),
+ GetEmitter()->emitIns_Call(emitter::EC_FUNC_TOKEN, compiler->eeFindHelper(helper),
INDEBUG_LDISASM_COMMA(nullptr) addr, argSize, retSize, gcInfo.gcVarPtrSetCur,
gcInfo.gcRegGCrefSetCur, gcInfo.gcRegByrefSetCur, BAD_IL_OFFSET, REG_NA, REG_NA, 0,
0, /* ilOffset, ireg, xreg, xmul, disp */
GenTree* src1 = node->gtOp1;
GenTree* src2 = node->gtOp2;
instruction ins = node->IsUnsigned() ? INS_umull : INS_smull;
- getEmitter()->emitIns_R_R_R_R(ins, EA_4BYTE, node->gtRegNum, node->gtOtherReg, src1->gtRegNum, src2->gtRegNum);
+ GetEmitter()->emitIns_R_R_R_R(ins, EA_4BYTE, node->gtRegNum, node->gtOtherReg, src1->gtRegNum, src2->gtRegNum);
genProduceReg(node);
}
if (compiler->compProfilerMethHndIndirected)
{
- getEmitter()->emitIns_R_AI(INS_ldr, EA_PTR_DSP_RELOC, argReg, (ssize_t)compiler->compProfilerMethHnd);
+ GetEmitter()->emitIns_R_AI(INS_ldr, EA_PTR_DSP_RELOC, argReg, (ssize_t)compiler->compProfilerMethHnd);
regSet.verifyRegUsed(argReg);
}
else
{
// Has a return value and r0 is in use. For emitting Leave profiler callout we would need r0 for passing
// profiler handle. Therefore, r0 is moved to REG_PROFILER_RETURN_SCRATCH as per contract.
- getEmitter()->emitIns_R_R(INS_mov, attr, REG_PROFILER_RET_SCRATCH, REG_R0);
+ GetEmitter()->emitIns_R_R(INS_mov, attr, REG_PROFILER_RET_SCRATCH, REG_R0);
genTransferRegGCState(REG_PROFILER_RET_SCRATCH, REG_R0);
regSet.verifyRegUsed(REG_PROFILER_RET_SCRATCH);
}
if (compiler->compProfilerMethHndIndirected)
{
- getEmitter()->emitIns_R_AI(INS_ldr, EA_PTR_DSP_RELOC, REG_R0, (ssize_t)compiler->compProfilerMethHnd);
+ GetEmitter()->emitIns_R_AI(INS_ldr, EA_PTR_DSP_RELOC, REG_R0, (ssize_t)compiler->compProfilerMethHnd);
}
else
{
// Restore state that existed before profiler callback
if (r0InUse)
{
- getEmitter()->emitIns_R_R(INS_mov, attr, REG_R0, REG_PROFILER_RET_SCRATCH);
+ GetEmitter()->emitIns_R_R(INS_mov, attr, REG_R0, REG_PROFILER_RET_SCRATCH);
genTransferRegGCState(REG_R0, REG_PROFILER_RET_SCRATCH);
gcInfo.gcMarkRegSetNpt(RBM_PROFILER_RET_SCRATCH);
}
if (immFitsInIns)
{
// generate a single instruction that encodes the immediate directly
- getEmitter()->emitIns_R_R_I(ins, attr, reg1, reg2, imm);
+ GetEmitter()->emitIns_R_R_I(ins, attr, reg1, reg2, imm);
}
else
{
}
// generate the instruction using a three register encoding with the immediate in tmpReg
- getEmitter()->emitIns_R_R_R(ins, attr, reg1, reg2, tmpReg);
+ GetEmitter()->emitIns_R_R_R(ins, attr, reg1, reg2, tmpReg);
}
return immFitsInIns;
}
// We can use pre-indexed addressing.
// stp REG, REG + 1, [SP, #spDelta]!
// 64-bit STP offset range: -512 to 504, multiple of 8.
- getEmitter()->emitIns_R_R_R_I(INS_stp, EA_PTRSIZE, reg1, reg2, REG_SPBASE, spDelta, INS_OPTS_PRE_INDEX);
+ GetEmitter()->emitIns_R_R_R_I(INS_stp, EA_PTRSIZE, reg1, reg2, REG_SPBASE, spDelta, INS_OPTS_PRE_INDEX);
compiler->unwindSaveRegPairPreindexed(reg1, reg2, spDelta);
needToSaveRegs = false;
// stp REG, REG + 1, [SP, #offset]
// 64-bit STP offset range: -512 to 504, multiple of 8.
assert(spOffset <= 504);
- getEmitter()->emitIns_R_R_R_I(INS_stp, EA_PTRSIZE, reg1, reg2, REG_SPBASE, spOffset);
+ GetEmitter()->emitIns_R_R_R_I(INS_stp, EA_PTRSIZE, reg1, reg2, REG_SPBASE, spOffset);
if (useSaveNextPair)
{
{
// We can use pre-index addressing.
// str REG, [SP, #spDelta]!
- getEmitter()->emitIns_R_R_I(INS_str, EA_PTRSIZE, reg1, REG_SPBASE, spDelta, INS_OPTS_PRE_INDEX);
+ GetEmitter()->emitIns_R_R_I(INS_str, EA_PTRSIZE, reg1, REG_SPBASE, spDelta, INS_OPTS_PRE_INDEX);
compiler->unwindSaveRegPreindexed(reg1, spDelta);
needToSaveRegs = false;
{
// str REG, [SP, #offset]
// 64-bit STR offset range: 0 to 32760, multiple of 8.
- getEmitter()->emitIns_R_R_I(INS_str, EA_PTRSIZE, reg1, REG_SPBASE, spOffset);
+ GetEmitter()->emitIns_R_R_I(INS_str, EA_PTRSIZE, reg1, REG_SPBASE, spOffset);
compiler->unwindSaveReg(reg1, spOffset);
}
}
{
// Fold the SP change into this instruction.
// ldp reg1, reg2, [SP], #spDelta
- getEmitter()->emitIns_R_R_R_I(INS_ldp, EA_PTRSIZE, reg1, reg2, REG_SPBASE, spDelta, INS_OPTS_POST_INDEX);
+ GetEmitter()->emitIns_R_R_R_I(INS_ldp, EA_PTRSIZE, reg1, reg2, REG_SPBASE, spDelta, INS_OPTS_POST_INDEX);
compiler->unwindSaveRegPairPreindexed(reg1, reg2, -spDelta);
}
else // (spOffset != 0) || (spDelta > 504)
// Can't fold in the SP change; need to use a separate ADD instruction.
// ldp reg1, reg2, [SP, #offset]
- getEmitter()->emitIns_R_R_R_I(INS_ldp, EA_PTRSIZE, reg1, reg2, REG_SPBASE, spOffset);
+ GetEmitter()->emitIns_R_R_R_I(INS_ldp, EA_PTRSIZE, reg1, reg2, REG_SPBASE, spOffset);
compiler->unwindSaveRegPair(reg1, reg2, spOffset);
// generate add SP,SP,imm
}
else
{
- getEmitter()->emitIns_R_R_R_I(INS_ldp, EA_PTRSIZE, reg1, reg2, REG_SPBASE, spOffset);
+ GetEmitter()->emitIns_R_R_R_I(INS_ldp, EA_PTRSIZE, reg1, reg2, REG_SPBASE, spOffset);
if (useSaveNextPair)
{
{
// We can use post-index addressing.
// ldr REG, [SP], #spDelta
- getEmitter()->emitIns_R_R_I(INS_ldr, EA_PTRSIZE, reg1, REG_SPBASE, spDelta, INS_OPTS_POST_INDEX);
+ GetEmitter()->emitIns_R_R_I(INS_ldr, EA_PTRSIZE, reg1, REG_SPBASE, spDelta, INS_OPTS_POST_INDEX);
compiler->unwindSaveRegPreindexed(reg1, -spDelta);
}
else // (spOffset != 0) || (spDelta > 255)
{
// ldr reg1, [SP, #offset]
- getEmitter()->emitIns_R_R_I(INS_ldr, EA_PTRSIZE, reg1, REG_SPBASE, spOffset);
+ GetEmitter()->emitIns_R_R_I(INS_ldr, EA_PTRSIZE, reg1, REG_SPBASE, spOffset);
compiler->unwindSaveReg(reg1, spOffset);
// generate add SP,SP,imm
else
{
// ldr reg1, [SP, #offset]
- getEmitter()->emitIns_R_R_I(INS_ldr, EA_PTRSIZE, reg1, REG_SPBASE, spOffset);
+ GetEmitter()->emitIns_R_R_I(INS_ldr, EA_PTRSIZE, reg1, REG_SPBASE, spOffset);
compiler->unwindSaveReg(reg1, spOffset);
}
}
if (genFuncletInfo.fiFrameType == 1)
{
- getEmitter()->emitIns_R_R_R_I(INS_stp, EA_PTRSIZE, REG_FP, REG_LR, REG_SPBASE, genFuncletInfo.fiSpDelta1,
+ GetEmitter()->emitIns_R_R_R_I(INS_stp, EA_PTRSIZE, REG_FP, REG_LR, REG_SPBASE, genFuncletInfo.fiSpDelta1,
INS_OPTS_PRE_INDEX);
compiler->unwindSaveRegPairPreindexed(REG_FP, REG_LR, genFuncletInfo.fiSpDelta1);
assert(genFuncletInfo.fiSpDelta2 == 0);
- getEmitter()->emitIns_R_R_R_I(INS_stp, EA_PTRSIZE, REG_FP, REG_LR, REG_SPBASE,
+ GetEmitter()->emitIns_R_R_R_I(INS_stp, EA_PTRSIZE, REG_FP, REG_LR, REG_SPBASE,
genFuncletInfo.fiSP_to_FPLR_save_delta);
compiler->unwindSaveRegPair(REG_FP, REG_LR, genFuncletInfo.fiSP_to_FPLR_save_delta);
}
else if (genFuncletInfo.fiFrameType == 3)
{
- getEmitter()->emitIns_R_R_R_I(INS_stp, EA_PTRSIZE, REG_FP, REG_LR, REG_SPBASE, genFuncletInfo.fiSpDelta1,
+ GetEmitter()->emitIns_R_R_R_I(INS_stp, EA_PTRSIZE, REG_FP, REG_LR, REG_SPBASE, genFuncletInfo.fiSpDelta1,
INS_OPTS_PRE_INDEX);
compiler->unwindSaveRegPairPreindexed(REG_FP, REG_LR, genFuncletInfo.fiSpDelta1);
if (genFuncletInfo.fiFrameType == 1)
{
- getEmitter()->emitIns_R_R_R_I(INS_ldp, EA_PTRSIZE, REG_FP, REG_LR, REG_SPBASE, -genFuncletInfo.fiSpDelta1,
+ GetEmitter()->emitIns_R_R_R_I(INS_ldp, EA_PTRSIZE, REG_FP, REG_LR, REG_SPBASE, -genFuncletInfo.fiSpDelta1,
INS_OPTS_POST_INDEX);
compiler->unwindSaveRegPairPreindexed(REG_FP, REG_LR, genFuncletInfo.fiSpDelta1);
}
else if (genFuncletInfo.fiFrameType == 2)
{
- getEmitter()->emitIns_R_R_R_I(INS_ldp, EA_PTRSIZE, REG_FP, REG_LR, REG_SPBASE,
+ GetEmitter()->emitIns_R_R_R_I(INS_ldp, EA_PTRSIZE, REG_FP, REG_LR, REG_SPBASE,
genFuncletInfo.fiSP_to_FPLR_save_delta);
compiler->unwindSaveRegPair(REG_FP, REG_LR, genFuncletInfo.fiSP_to_FPLR_save_delta);
}
else if (genFuncletInfo.fiFrameType == 3)
{
- getEmitter()->emitIns_R_R_R_I(INS_ldp, EA_PTRSIZE, REG_FP, REG_LR, REG_SPBASE, -genFuncletInfo.fiSpDelta1,
+ GetEmitter()->emitIns_R_R_R_I(INS_ldp, EA_PTRSIZE, REG_FP, REG_LR, REG_SPBASE, -genFuncletInfo.fiSpDelta1,
INS_OPTS_POST_INDEX);
compiler->unwindSaveRegPairPreindexed(REG_FP, REG_LR, genFuncletInfo.fiSpDelta1);
}
if (compiler->lvaPSPSym != BAD_VAR_NUM)
{
- getEmitter()->emitIns_R_S(INS_ldr, EA_PTRSIZE, REG_R0, compiler->lvaPSPSym, 0);
+ GetEmitter()->emitIns_R_S(INS_ldr, EA_PTRSIZE, REG_R0, compiler->lvaPSPSym, 0);
}
else
{
- getEmitter()->emitIns_R_R(INS_mov, EA_PTRSIZE, REG_R0, REG_SPBASE);
+ GetEmitter()->emitIns_R_R(INS_mov, EA_PTRSIZE, REG_R0, REG_SPBASE);
}
- getEmitter()->emitIns_J(INS_bl_local, block->bbJumpDest);
+ GetEmitter()->emitIns_J(INS_bl_local, block->bbJumpDest);
if (block->bbFlags & BBF_RETLESS_CALL)
{
// Because of the way the flowgraph is connected, the liveness info for this one instruction
// after the call is not (can not be) correct in cases where a variable has a last use in the
// handler. So turn off GC reporting for this single instruction.
- getEmitter()->emitDisableGC();
+ GetEmitter()->emitDisableGC();
// Now go to where the finally funclet needs to return to.
if (block->bbNext->bbJumpDest == block->bbNext->bbNext)
inst_JMP(EJ_jmp, block->bbNext->bbJumpDest);
}
- getEmitter()->emitEnableGC();
+ GetEmitter()->emitEnableGC();
}
// The BBJ_ALWAYS is used because the BBJ_CALLFINALLY can't point to the
{
// For long address (default): `adrp + add` will be emitted.
// For short address (proven later): `adr` will be emitted.
- getEmitter()->emitIns_R_L(INS_adr, EA_PTRSIZE, block->bbJumpDest, REG_INTRET);
+ GetEmitter()->emitIns_R_L(INS_adr, EA_PTRSIZE, block->bbJumpDest, REG_INTRET);
}
// move an immediate value into an integer register
if (EA_IS_RELOC(size))
{
// This emits a pair of adrp/add (two instructions) with fix-ups.
- getEmitter()->emitIns_R_AI(INS_adrp, size, reg, imm);
+ GetEmitter()->emitIns_R_AI(INS_adrp, size, reg, imm);
}
else if (imm == 0)
{
{
if (emitter::emitIns_valid_imm_for_mov(imm, size))
{
- getEmitter()->emitIns_R_I(INS_mov, size, reg, imm);
+ GetEmitter()->emitIns_R_I(INS_mov, size, reg, imm);
}
else
{
imm16 = ~imm16;
}
- getEmitter()->emitIns_R_I_I(ins, size, reg, imm16, i, INS_OPTS_LSL);
+ GetEmitter()->emitIns_R_I_I(ins, size, reg, imm16, i, INS_OPTS_LSL);
// Once the initial movz/movn is emitted the remaining instructions will all use movk
ins = INS_movk;
// The caller may have requested that the flags be set on this mov (rarely/never)
if (flags == INS_FLAGS_SET)
{
- getEmitter()->emitIns_R_I(INS_tst, size, reg, 0);
+ GetEmitter()->emitIns_R_I(INS_tst, size, reg, 0);
}
}
case GT_CNS_DBL:
{
- emitter* emit = getEmitter();
+ emitter* emit = GetEmitter();
emitAttr size = emitActualTypeSize(tree);
double constValue = tree->AsDblCon()->gtDconVal;
regNumber targetReg = treeNode->gtRegNum;
var_types targetType = treeNode->TypeGet();
- emitter* emit = getEmitter();
+ emitter* emit = GetEmitter();
emitAttr attr = emitActualTypeSize(treeNode);
unsigned isUnsigned = (treeNode->gtFlags & GTF_UNSIGNED);
const genTreeOps oper = treeNode->OperGet();
regNumber targetReg = treeNode->gtRegNum;
var_types targetType = treeNode->TypeGet();
- emitter* emit = getEmitter();
+ emitter* emit = GetEmitter();
assert(oper == GT_ADD || oper == GT_SUB || oper == GT_MUL || oper == GT_DIV || oper == GT_UDIV || oper == GT_AND ||
oper == GT_OR || oper == GT_XOR);
void CodeGen::genCodeForLclVar(GenTreeLclVar* tree)
{
var_types targetType = tree->TypeGet();
- emitter* emit = getEmitter();
+ emitter* emit = GetEmitter();
unsigned varNum = tree->gtLclNum;
assert(varNum < compiler->lvaCount);
{
var_types targetType = tree->TypeGet();
regNumber targetReg = tree->gtRegNum;
- emitter* emit = getEmitter();
+ emitter* emit = GetEmitter();
noway_assert(targetType != TYP_STRUCT);
#ifdef FEATURE_SIMD
{
var_types targetType = tree->TypeGet();
regNumber targetReg = tree->gtRegNum;
- emitter* emit = getEmitter();
+ emitter* emit = GetEmitter();
unsigned varNum = tree->gtLclNum;
assert(varNum < compiler->lvaCount);
if (varTypeIsSIMD(targetType))
{
assert(targetReg != REG_NA);
- getEmitter()->emitIns_R_I(INS_movi, EA_16BYTE, targetReg, 0x00, INS_OPTS_16B);
+ GetEmitter()->emitIns_R_I(INS_movi, EA_16BYTE, targetReg, 0x00, INS_OPTS_16B);
genProduceReg(tree);
return;
}
if (movRequired)
{
emitAttr attr = emitActualTypeSize(targetType);
- getEmitter()->emitIns_R_R(INS_mov, attr, retReg, op1->gtRegNum);
+ GetEmitter()->emitIns_R_R(INS_mov, attr, retReg, op1->gtRegNum);
}
}
// If 0 bail out by returning null in targetReg
genConsumeRegAndCopy(size, targetReg);
endLabel = genCreateTempLabel();
- getEmitter()->emitIns_R_R(INS_tst, easz, targetReg, targetReg);
+ GetEmitter()->emitIns_R_R(INS_tst, easz, targetReg, targetReg);
inst_JMP(EJ_eq, endLabel);
// Compute the size of the block to allocate and perform alignment.
{
// We can use pre-indexed addressing.
// stp ZR, ZR, [SP, #-16]! // STACK_ALIGN is 16
- getEmitter()->emitIns_R_R_R_I(INS_stp, EA_PTRSIZE, REG_ZR, REG_ZR, REG_SPBASE, -16, INS_OPTS_PRE_INDEX);
+ GetEmitter()->emitIns_R_R_R_I(INS_stp, EA_PTRSIZE, REG_ZR, REG_ZR, REG_SPBASE, -16, INS_OPTS_PRE_INDEX);
stpCount -= 1;
}
// the alloc, not after.
// ldr wz, [SP, #0]
- getEmitter()->emitIns_R_R_I(INS_ldr, EA_4BYTE, REG_ZR, REG_SP, 0);
+ GetEmitter()->emitIns_R_R_I(INS_ldr, EA_4BYTE, REG_ZR, REG_SP, 0);
inst_RV_IV(INS_sub, REG_SP, amount, EA_PTRSIZE);
// We can use pre-indexed addressing.
// stp ZR, ZR, [SP, #-16]!
- getEmitter()->emitIns_R_R_R_I(INS_stp, EA_PTRSIZE, REG_ZR, REG_ZR, REG_SPBASE, -16, INS_OPTS_PRE_INDEX);
+ GetEmitter()->emitIns_R_R_R_I(INS_stp, EA_PTRSIZE, REG_ZR, REG_ZR, REG_SPBASE, -16, INS_OPTS_PRE_INDEX);
// If not done, loop
// Note that regCnt is the number of bytes to stack allocate.
BasicBlock* done = genCreateTempLabel();
// subs regCnt, SP, regCnt // regCnt now holds ultimate SP
- getEmitter()->emitIns_R_R_R(INS_subs, EA_PTRSIZE, regCnt, REG_SPBASE, regCnt);
+ GetEmitter()->emitIns_R_R_R(INS_subs, EA_PTRSIZE, regCnt, REG_SPBASE, regCnt);
inst_JMP(EJ_vc, loop); // branch if the V flag is not set
genDefineTempLabel(loop);
// tickle the page - Read from the updated SP - this triggers a page fault when on the guard page
- getEmitter()->emitIns_R_R_I(INS_ldr, EA_4BYTE, REG_ZR, REG_SPBASE, 0);
+ GetEmitter()->emitIns_R_R_I(INS_ldr, EA_4BYTE, REG_ZR, REG_SPBASE, 0);
// decrement SP by eeGetPageSize()
- getEmitter()->emitIns_R_R_I(INS_sub, EA_PTRSIZE, regTmp, REG_SPBASE, compiler->eeGetPageSize());
+ GetEmitter()->emitIns_R_R_I(INS_sub, EA_PTRSIZE, regTmp, REG_SPBASE, compiler->eeGetPageSize());
- getEmitter()->emitIns_R_R(INS_cmp, EA_PTRSIZE, regTmp, regCnt);
+ GetEmitter()->emitIns_R_R(INS_cmp, EA_PTRSIZE, regTmp, regCnt);
inst_JMP(EJ_lo, done);
// Update SP to be at the next page of stack that we will tickle
- getEmitter()->emitIns_R_R(INS_mov, EA_PTRSIZE, REG_SPBASE, regTmp);
+ GetEmitter()->emitIns_R_R(INS_mov, EA_PTRSIZE, REG_SPBASE, regTmp);
// Jump to loop and tickle new stack address
inst_JMP(EJ_jmp, loop);
genDefineTempLabel(done);
// Now just move the final value to SP
- getEmitter()->emitIns_R_R(INS_mov, EA_PTRSIZE, REG_SPBASE, regCnt);
+ GetEmitter()->emitIns_R_R(INS_mov, EA_PTRSIZE, REG_SPBASE, regCnt);
// lastTouchDelta is dynamic, and can be up to a page. So if we have outgoing arg space,
// we're going to assume the worst and probe.
// The src must be a register.
regNumber operandReg = genConsumeReg(operand);
- getEmitter()->emitIns_R_R(ins, emitActualTypeSize(tree), targetReg, operandReg);
+ GetEmitter()->emitIns_R_R(ins, emitActualTypeSize(tree), targetReg, operandReg);
genProduceReg(tree);
}
assert(tree->OperIs(GT_DIV, GT_UDIV));
var_types targetType = tree->TypeGet();
- emitter* emit = getEmitter();
+ emitter* emit = GetEmitter();
genConsumeOperands(tree);
assert(size != 0);
assert(size <= INITBLK_UNROLL_LIMIT);
- emitter* emit = getEmitter();
+ emitter* emit = GetEmitter();
genConsumeOperands(initBlkNode);
// offset: distance from the base from which to load
void CodeGen::genCodeForLoadPairOffset(regNumber dst, regNumber dst2, GenTree* base, unsigned offset)
{
- emitter* emit = getEmitter();
+ emitter* emit = GetEmitter();
if (base->OperIsLocalAddr())
{
// offset: distance from the base from which to load
void CodeGen::genCodeForStorePairOffset(regNumber src, regNumber src2, GenTree* base, unsigned offset)
{
- emitter* emit = getEmitter();
+ emitter* emit = GetEmitter();
if (base->OperIsLocalAddr())
{
instGen_MemoryBarrier();
}
- emitter* emit = getEmitter();
+ emitter* emit = GetEmitter();
// If we can prove it's on the stack we don't need to use the write barrier.
if (dstOnStack)
regNumber tmpReg = treeNode->GetSingleTempReg();
// load the ip-relative offset (which is relative to start of fgFirstBB)
- getEmitter()->emitIns_R_R_R(INS_ldr, EA_4BYTE, baseReg, baseReg, idxReg, INS_OPTS_LSL);
+ GetEmitter()->emitIns_R_R_R(INS_ldr, EA_4BYTE, baseReg, baseReg, idxReg, INS_OPTS_LSL);
// add it to the absolute address of fgFirstBB
compiler->fgFirstBB->bbFlags |= BBF_JMP_TARGET;
- getEmitter()->emitIns_R_L(INS_adr, EA_PTRSIZE, compiler->fgFirstBB, tmpReg);
- getEmitter()->emitIns_R_R_R(INS_add, EA_PTRSIZE, baseReg, baseReg, tmpReg);
+ GetEmitter()->emitIns_R_L(INS_adr, EA_PTRSIZE, compiler->fgFirstBB, tmpReg);
+ GetEmitter()->emitIns_R_R_R(INS_add, EA_PTRSIZE, baseReg, baseReg, tmpReg);
// br baseReg
- getEmitter()->emitIns_R(INS_br, emitActualTypeSize(TYP_I_IMPL), baseReg);
+ GetEmitter()->emitIns_R(INS_br, emitActualTypeSize(TYP_I_IMPL), baseReg);
}
// emits the table and an instruction to get the address of the first element
unsigned jmpTabOffs;
unsigned jmpTabBase;
- jmpTabBase = getEmitter()->emitBBTableDataGenBeg(jumpCount, true);
+ jmpTabBase = GetEmitter()->emitBBTableDataGenBeg(jumpCount, true);
jmpTabOffs = 0;
JITDUMP(" DD L_M%03u_" FMT_BB "\n", Compiler::s_compMethodsCount, target->bbNum);
- getEmitter()->emitDataGenData(i, target);
+ GetEmitter()->emitDataGenData(i, target);
};
- getEmitter()->emitDataGenEnd();
+ GetEmitter()->emitDataGenEnd();
// Access to inline data is 'abstracted' by a special type of static member
// (produced by eeFindJitDataOffs) which the emitter recognizes as being a reference
// to constant data, not a real static field.
- getEmitter()->emitIns_R_C(INS_adr, emitActualTypeSize(TYP_I_IMPL), treeNode->gtRegNum, REG_NA,
+ GetEmitter()->emitIns_R_C(INS_adr, emitActualTypeSize(TYP_I_IMPL), treeNode->gtRegNum, REG_NA,
compiler->eeFindJitDataOffs(jmpTabBase), 0);
genProduceReg(treeNode);
}
switch (treeNode->gtOper)
{
case GT_XCHG:
- getEmitter()->emitIns_R_R_R(INS_swpal, dataSize, dataReg, targetReg, addrReg);
+ GetEmitter()->emitIns_R_R_R(INS_swpal, dataSize, dataReg, targetReg, addrReg);
break;
case GT_XADD:
if ((targetReg == REG_NA) || (targetReg == REG_ZR))
{
- getEmitter()->emitIns_R_R(INS_staddl, dataSize, dataReg, addrReg);
+ GetEmitter()->emitIns_R_R(INS_staddl, dataSize, dataReg, addrReg);
}
else
{
- getEmitter()->emitIns_R_R_R(INS_ldaddal, dataSize, dataReg, targetReg, addrReg);
+ GetEmitter()->emitIns_R_R_R(INS_ldaddal, dataSize, dataReg, targetReg, addrReg);
}
break;
default:
genDefineTempLabel(labelRetry);
// The following instruction includes a acquire half barrier
- getEmitter()->emitIns_R_R(INS_ldaxr, dataSize, loadReg, addrReg);
+ GetEmitter()->emitIns_R_R(INS_ldaxr, dataSize, loadReg, addrReg);
switch (treeNode->OperGet())
{
}
else
{
- getEmitter()->emitIns_R_R_R(INS_add, dataSize, storeDataReg, loadReg, dataReg);
+ GetEmitter()->emitIns_R_R_R(INS_add, dataSize, storeDataReg, loadReg, dataReg);
}
break;
case GT_XCHG:
}
// The following instruction includes a release half barrier
- getEmitter()->emitIns_R_R_R(INS_stlxr, dataSize, exResultReg, storeDataReg, addrReg);
+ GetEmitter()->emitIns_R_R_R(INS_stlxr, dataSize, exResultReg, storeDataReg, addrReg);
- getEmitter()->emitIns_J_R(INS_cbnz, EA_4BYTE, labelRetry, exResultReg);
+ GetEmitter()->emitIns_J_R(INS_cbnz, EA_4BYTE, labelRetry, exResultReg);
instGen_MemoryBarrier(INS_BARRIER_ISH);
// casal use the comparand as the target reg
if (targetReg != comparandReg)
{
- getEmitter()->emitIns_R_R(INS_mov, dataSize, targetReg, comparandReg);
+ GetEmitter()->emitIns_R_R(INS_mov, dataSize, targetReg, comparandReg);
// Catch case we destroyed data or address before use
noway_assert(addrReg != targetReg);
noway_assert(dataReg != targetReg);
}
- getEmitter()->emitIns_R_R_R(INS_casal, dataSize, targetReg, dataReg, addrReg);
+ GetEmitter()->emitIns_R_R_R(INS_casal, dataSize, targetReg, dataReg, addrReg);
instGen_MemoryBarrier(INS_BARRIER_ISH);
}
genDefineTempLabel(labelRetry);
// The following instruction includes a acquire half barrier
- getEmitter()->emitIns_R_R(INS_ldaxr, emitTypeSize(treeNode), targetReg, addrReg);
+ GetEmitter()->emitIns_R_R(INS_ldaxr, emitTypeSize(treeNode), targetReg, addrReg);
if (comparand->isContainedIntOrIImmed())
{
if (comparand->IsIntegralConst(0))
{
- getEmitter()->emitIns_J_R(INS_cbnz, emitActualTypeSize(treeNode), labelCompareFail, targetReg);
+ GetEmitter()->emitIns_J_R(INS_cbnz, emitActualTypeSize(treeNode), labelCompareFail, targetReg);
}
else
{
- getEmitter()->emitIns_R_I(INS_cmp, emitActualTypeSize(treeNode), targetReg,
+ GetEmitter()->emitIns_R_I(INS_cmp, emitActualTypeSize(treeNode), targetReg,
comparand->AsIntConCommon()->IconValue());
- getEmitter()->emitIns_J(INS_bne, labelCompareFail);
+ GetEmitter()->emitIns_J(INS_bne, labelCompareFail);
}
}
else
{
- getEmitter()->emitIns_R_R(INS_cmp, emitActualTypeSize(treeNode), targetReg, comparandReg);
- getEmitter()->emitIns_J(INS_bne, labelCompareFail);
+ GetEmitter()->emitIns_R_R(INS_cmp, emitActualTypeSize(treeNode), targetReg, comparandReg);
+ GetEmitter()->emitIns_J(INS_bne, labelCompareFail);
}
// The following instruction includes a release half barrier
- getEmitter()->emitIns_R_R_R(INS_stlxr, emitTypeSize(treeNode), exResultReg, dataReg, addrReg);
+ GetEmitter()->emitIns_R_R_R(INS_stlxr, emitTypeSize(treeNode), exResultReg, dataReg, addrReg);
- getEmitter()->emitIns_J_R(INS_cbnz, EA_4BYTE, labelRetry, exResultReg);
+ GetEmitter()->emitIns_J_R(INS_cbnz, EA_4BYTE, labelRetry, exResultReg);
genDefineTempLabel(labelCompareFail);
GenTree* data = tree->gtOp1;
genConsumeRegs(data);
- getEmitter()->emitIns_R_I(INS_cmp, EA_4BYTE, data->gtRegNum, 0);
+ GetEmitter()->emitIns_R_I(INS_cmp, EA_4BYTE, data->gtRegNum, 0);
BasicBlock* skipLabel = genCreateTempLabel();
}
}
- getEmitter()->emitInsLoadStoreOp(ins, emitActualTypeSize(type), dataReg, tree);
+ GetEmitter()->emitInsLoadStoreOp(ins, emitActualTypeSize(type), dataReg, tree);
}
}
genConsumeOperands(treeNode->AsOp());
- getEmitter()->emitIns_R_R(ins, emitActualTypeSize(dstType), treeNode->gtRegNum, op1->gtRegNum, cvtOption);
+ GetEmitter()->emitIns_R_R(ins, emitActualTypeSize(dstType), treeNode->gtRegNum, op1->gtRegNum, cvtOption);
genProduceReg(treeNode);
}
genConsumeOperands(treeNode->AsOp());
- getEmitter()->emitIns_R_R(ins, dstSize, treeNode->gtRegNum, op1->gtRegNum, cvtOption);
+ GetEmitter()->emitIns_R_R(ins, dstSize, treeNode->gtRegNum, op1->gtRegNum, cvtOption);
genProduceReg(treeNode);
}
int expMask = (targetType == TYP_FLOAT) ? 0x7F8 : 0x7FF; // Bit mask to extract exponent.
int shiftAmount = targetType == TYP_FLOAT ? 20 : 52;
- emitter* emit = getEmitter();
+ emitter* emit = GetEmitter();
// Extract exponent into a register.
regNumber intReg = treeNode->GetSingleTempReg();
void CodeGen::genCodeForCompare(GenTreeOp* tree)
{
regNumber targetReg = tree->gtRegNum;
- emitter* emit = getEmitter();
+ emitter* emit = GetEmitter();
GenTree* op1 = tree->gtOp1;
GenTree* op2 = tree->gtOp2;
instruction ins = (tree->gtFlags & GTF_JCMP_EQ) ? INS_tbz : INS_tbnz;
int imm = genLog2((size_t)compareImm);
- getEmitter()->emitIns_J_R_I(ins, attr, compiler->compCurBB->bbJumpDest, reg, imm);
+ GetEmitter()->emitIns_J_R_I(ins, attr, compiler->compCurBB->bbJumpDest, reg, imm);
}
else
{
instruction ins = (tree->gtFlags & GTF_JCMP_EQ) ? INS_cbz : INS_cbnz;
- getEmitter()->emitIns_J_R(ins, attr, compiler->compCurBB->bbJumpDest, reg);
+ GetEmitter()->emitIns_J_R(ins, attr, compiler->compCurBB->bbJumpDest, reg);
}
}
callTarget = callTargetReg;
// adrp + add with relocations will be emitted
- getEmitter()->emitIns_R_AI(INS_adrp, EA_PTR_DSP_RELOC, callTarget, (ssize_t)pAddr);
- getEmitter()->emitIns_R_R(INS_ldr, EA_PTRSIZE, callTarget, callTarget);
+ GetEmitter()->emitIns_R_AI(INS_adrp, EA_PTR_DSP_RELOC, callTarget, (ssize_t)pAddr);
+ GetEmitter()->emitIns_R_R(INS_ldr, EA_PTRSIZE, callTarget, callTarget);
callType = emitter::EC_INDIR_R;
}
- getEmitter()->emitIns_Call(callType, compiler->eeFindHelper(helper), INDEBUG_LDISASM_COMMA(nullptr) addr, argSize,
+ GetEmitter()->emitIns_Call(callType, compiler->eeFindHelper(helper), INDEBUG_LDISASM_COMMA(nullptr) addr, argSize,
retSize, EA_UNKNOWN, gcInfo.gcVarPtrSetCur, gcInfo.gcRegGCrefSetCur,
gcInfo.gcRegByrefSetCur, BAD_IL_OFFSET, /* IL offset */
callTarget, /* ireg */
if (genIsValidIntReg(op1Reg))
{
- getEmitter()->emitIns_R_R(INS_dup, attr, targetReg, op1Reg, opt);
+ GetEmitter()->emitIns_R_R(INS_dup, attr, targetReg, op1Reg, opt);
}
else
{
- getEmitter()->emitIns_R_R_I(INS_dup, attr, targetReg, op1Reg, 0, opt);
+ GetEmitter()->emitIns_R_R_I(INS_dup, attr, targetReg, op1Reg, 0, opt);
}
genProduceReg(simdNode);
if (initCount * baseTypeSize < EA_16BYTE)
{
- getEmitter()->emitIns_R_I(INS_movi, EA_16BYTE, vectorReg, 0x00, INS_OPTS_16B);
+ GetEmitter()->emitIns_R_I(INS_movi, EA_16BYTE, vectorReg, 0x00, INS_OPTS_16B);
}
if (varTypeIsIntegral(baseType))
{
for (unsigned i = 0; i < initCount; i++)
{
- getEmitter()->emitIns_R_R_I(INS_ins, baseTypeSize, vectorReg, operandRegs[i], i);
+ GetEmitter()->emitIns_R_R_I(INS_ins, baseTypeSize, vectorReg, operandRegs[i], i);
}
}
else
{
for (unsigned i = 0; i < initCount; i++)
{
- getEmitter()->emitIns_R_R_I_I(INS_ins, baseTypeSize, vectorReg, operandRegs[i], i, 0);
+ GetEmitter()->emitIns_R_R_I_I(INS_ins, baseTypeSize, vectorReg, operandRegs[i], i, 0);
}
}
// Load the initialized value.
if (targetReg != vectorReg)
{
- getEmitter()->emitIns_R_R(INS_mov, EA_16BYTE, targetReg, vectorReg);
+ GetEmitter()->emitIns_R_R(INS_mov, EA_16BYTE, targetReg, vectorReg);
}
genProduceReg(simdNode);
emitAttr attr = (simdNode->gtSIMDSize > 8) ? EA_16BYTE : EA_8BYTE;
insOpts opt = (ins == INS_mov) ? INS_OPTS_NONE : genGetSimdInsOpt(attr, baseType);
- getEmitter()->emitIns_R_R(ins, attr, targetReg, op1Reg, opt);
+ GetEmitter()->emitIns_R_R(ins, attr, targetReg, op1Reg, opt);
genProduceReg(simdNode);
}
if (varTypeIsFloating(baseType))
{
- getEmitter()->emitIns_R_R(ins, EA_8BYTE, targetReg, op1Reg);
+ GetEmitter()->emitIns_R_R(ins, EA_8BYTE, targetReg, op1Reg);
}
else
{
emitAttr attr = (simdNode->gtSIMDIntrinsicID == SIMDIntrinsicWidenHi) ? EA_16BYTE : EA_8BYTE;
insOpts opt = genGetSimdInsOpt(attr, baseType);
- getEmitter()->emitIns_R_R(ins, attr, targetReg, op1Reg, opt);
+ GetEmitter()->emitIns_R_R(ins, attr, targetReg, op1Reg, opt);
}
genProduceReg(simdNode);
if (ins == INS_fcvtn)
{
- getEmitter()->emitIns_R_R(INS_fcvtn, EA_8BYTE, targetReg, op1Reg);
- getEmitter()->emitIns_R_R(INS_fcvtn2, EA_8BYTE, targetReg, op2Reg);
+ GetEmitter()->emitIns_R_R(INS_fcvtn, EA_8BYTE, targetReg, op1Reg);
+ GetEmitter()->emitIns_R_R(INS_fcvtn2, EA_8BYTE, targetReg, op2Reg);
}
else
{
assert(!"Unsupported narrowing element type");
unreached();
}
- getEmitter()->emitIns_R_R(INS_xtn, EA_8BYTE, targetReg, op1Reg, opt);
- getEmitter()->emitIns_R_R(INS_xtn2, EA_16BYTE, targetReg, op2Reg, opt2);
+ GetEmitter()->emitIns_R_R(INS_xtn, EA_8BYTE, targetReg, op1Reg, opt);
+ GetEmitter()->emitIns_R_R(INS_xtn2, EA_16BYTE, targetReg, op2Reg, opt2);
}
genProduceReg(simdNode);
emitAttr attr = (simdNode->gtSIMDSize > 8) ? EA_16BYTE : EA_8BYTE;
insOpts opt = genGetSimdInsOpt(attr, baseType);
- getEmitter()->emitIns_R_R_R(ins, attr, targetReg, op1Reg, op2Reg, opt);
+ GetEmitter()->emitIns_R_R_R(ins, attr, targetReg, op1Reg, op2Reg, opt);
genProduceReg(simdNode);
}
regNumber tmpFloatReg = simdNode->GetSingleTempReg(RBM_ALLFLOAT);
- getEmitter()->emitIns_R_R_R(ins, attr, tmpFloatReg, op1Reg, op2Reg, opt);
+ GetEmitter()->emitIns_R_R_R(ins, attr, tmpFloatReg, op1Reg, op2Reg, opt);
if ((simdNode->gtFlags & GTF_SIMD12_OP) != 0)
{
// For 12Byte vectors we must set upper bits to get correct comparison
// We do not assume upper bits are zero.
instGen_Set_Reg_To_Imm(EA_4BYTE, targetReg, -1);
- getEmitter()->emitIns_R_R_I(INS_ins, EA_4BYTE, tmpFloatReg, targetReg, 3);
+ GetEmitter()->emitIns_R_R_I(INS_ins, EA_4BYTE, tmpFloatReg, targetReg, 3);
}
- getEmitter()->emitIns_R_R(INS_uminv, attr, tmpFloatReg, tmpFloatReg,
+ GetEmitter()->emitIns_R_R(INS_uminv, attr, tmpFloatReg, tmpFloatReg,
(simdNode->gtSIMDSize > 8) ? INS_OPTS_16B : INS_OPTS_8B);
- getEmitter()->emitIns_R_R_I(INS_mov, EA_1BYTE, targetReg, tmpFloatReg, 0);
+ GetEmitter()->emitIns_R_R_I(INS_mov, EA_1BYTE, targetReg, tmpFloatReg, 0);
if (simdNode->gtSIMDIntrinsicID == SIMDIntrinsicOpInEquality)
{
- getEmitter()->emitIns_R_R_I(INS_eor, EA_4BYTE, targetReg, targetReg, 0x1);
+ GetEmitter()->emitIns_R_R_I(INS_eor, EA_4BYTE, targetReg, targetReg, 0x1);
}
- getEmitter()->emitIns_R_R_I(INS_and, EA_4BYTE, targetReg, targetReg, 0x1);
+ GetEmitter()->emitIns_R_R_I(INS_and, EA_4BYTE, targetReg, targetReg, 0x1);
genProduceReg(simdNode);
}
insOpts opt = genGetSimdInsOpt(attr, baseType);
// Vector multiply
- getEmitter()->emitIns_R_R_R(ins, attr, tmpReg, op1Reg, op2Reg, opt);
+ GetEmitter()->emitIns_R_R_R(ins, attr, tmpReg, op1Reg, op2Reg, opt);
if ((simdNode->gtFlags & GTF_SIMD12_OP) != 0)
{
// For 12Byte vectors we must zero upper bits to get correct dot product
// We do not assume upper bits are zero.
- getEmitter()->emitIns_R_R_I(INS_ins, EA_4BYTE, tmpReg, REG_ZR, 3);
+ GetEmitter()->emitIns_R_R_I(INS_ins, EA_4BYTE, tmpReg, REG_ZR, 3);
}
// Vector add horizontal
{
if (opt == INS_OPTS_4S)
{
- getEmitter()->emitIns_R_R_R(INS_faddp, attr, tmpReg, tmpReg, tmpReg, INS_OPTS_4S);
+ GetEmitter()->emitIns_R_R_R(INS_faddp, attr, tmpReg, tmpReg, tmpReg, INS_OPTS_4S);
}
- getEmitter()->emitIns_R_R(INS_faddp, EA_4BYTE, targetReg, tmpReg);
+ GetEmitter()->emitIns_R_R(INS_faddp, EA_4BYTE, targetReg, tmpReg);
}
else
{
- getEmitter()->emitIns_R_R(INS_faddp, EA_8BYTE, targetReg, tmpReg);
+ GetEmitter()->emitIns_R_R(INS_faddp, EA_8BYTE, targetReg, tmpReg);
}
}
else
{
ins = varTypeIsUnsigned(baseType) ? INS_uaddlv : INS_saddlv;
- getEmitter()->emitIns_R_R(ins, attr, tmpReg, tmpReg, opt);
+ GetEmitter()->emitIns_R_R(ins, attr, tmpReg, tmpReg, opt);
// Mov to integer register
if (varTypeIsUnsigned(baseType) || (genTypeSize(baseType) < 4))
{
- getEmitter()->emitIns_R_R_I(INS_mov, emitTypeSize(baseType), targetReg, tmpReg, 0);
+ GetEmitter()->emitIns_R_R_I(INS_mov, emitTypeSize(baseType), targetReg, tmpReg, 0);
}
else
{
- getEmitter()->emitIns_R_R_I(INS_smov, emitActualTypeSize(baseType), targetReg, tmpReg, 0);
+ GetEmitter()->emitIns_R_R_I(INS_smov, emitActualTypeSize(baseType), targetReg, tmpReg, 0);
}
}
// We only need to generate code for the get if the index is valid
// If the index is invalid, previously generated for the range check will throw
- if (getEmitter()->isValidVectorIndex(emitTypeSize(simdType), baseTypeSize, index))
+ if (GetEmitter()->isValidVectorIndex(emitTypeSize(simdType), baseTypeSize, index))
{
if (op1->isContained())
{
{
unsigned varNum = op1->gtLclVarCommon.gtLclNum;
- getEmitter()->emitIns_R_S(ins, emitActualTypeSize(baseType), targetReg, varNum, offset);
+ GetEmitter()->emitIns_R_S(ins, emitActualTypeSize(baseType), targetReg, varNum, offset);
}
else
{
regNumber baseReg = addr->gtRegNum;
// ldr targetReg, [baseReg, #offset]
- getEmitter()->emitIns_R_R_I(ins, emitActualTypeSize(baseType), targetReg, baseReg, offset);
+ GetEmitter()->emitIns_R_R_I(ins, emitActualTypeSize(baseType), targetReg, baseReg, offset);
}
}
else
ins = INS_smov;
}
}
- getEmitter()->emitIns_R_R_I(ins, baseTypeSize, targetReg, srcReg, index);
+ GetEmitter()->emitIns_R_R_I(ins, baseTypeSize, targetReg, srcReg, index);
}
}
}
baseReg = simdNode->ExtractTempReg();
// Load the address of varNum
- getEmitter()->emitIns_R_S(INS_lea, EA_PTRSIZE, baseReg, varNum, 0);
+ GetEmitter()->emitIns_R_S(INS_lea, EA_PTRSIZE, baseReg, varNum, 0);
}
else
{
baseReg = simdNode->ExtractTempReg();
// Load the address of simdInitTempVarNum
- getEmitter()->emitIns_R_S(INS_lea, EA_PTRSIZE, baseReg, simdInitTempVarNum, 0);
+ GetEmitter()->emitIns_R_S(INS_lea, EA_PTRSIZE, baseReg, simdInitTempVarNum, 0);
// Store the vector to simdInitTempVarNum
- getEmitter()->emitIns_R_R(INS_str, emitTypeSize(simdType), srcReg, baseReg);
+ GetEmitter()->emitIns_R_R(INS_str, emitTypeSize(simdType), srcReg, baseReg);
}
assert(genIsValidIntReg(indexReg));
assert(baseReg != indexReg);
// Load item at baseReg[index]
- getEmitter()->emitIns_R_R_R_Ext(ins_Load(baseType), baseTypeSize, targetReg, baseReg, indexReg, INS_OPTS_LSL,
+ GetEmitter()->emitIns_R_R_R_Ext(ins_Load(baseType), baseTypeSize, targetReg, baseReg, indexReg, INS_OPTS_LSL,
baseTypeScale);
}
emitAttr attr = emitTypeSize(baseType);
// Insert mov if register assignment requires it
- getEmitter()->emitIns_R_R(INS_mov, EA_16BYTE, targetReg, op1Reg);
+ GetEmitter()->emitIns_R_R(INS_mov, EA_16BYTE, targetReg, op1Reg);
if (genIsValidIntReg(op2Reg))
{
- getEmitter()->emitIns_R_R_I(INS_ins, attr, targetReg, op2Reg, index);
+ GetEmitter()->emitIns_R_R_I(INS_ins, attr, targetReg, op2Reg, index);
}
else
{
- getEmitter()->emitIns_R_R_I_I(INS_ins, attr, targetReg, op2Reg, index, 0);
+ GetEmitter()->emitIns_R_R_I_I(INS_ins, attr, targetReg, op2Reg, index, 0);
}
genProduceReg(simdNode);
regNumber op1Reg = genConsumeReg(op1);
assert(op1Reg != REG_NA);
assert(targetReg != REG_NA);
- getEmitter()->emitIns_R_R_I_I(INS_mov, EA_8BYTE, targetReg, op1Reg, 0, 1);
+ GetEmitter()->emitIns_R_R_I_I(INS_mov, EA_8BYTE, targetReg, op1Reg, 0, 1);
if ((simdNode->gtFlags & GTF_SPILL) != 0)
{
int offset = 8;
emitAttr attr = emitTypeSize(TYP_SIMD8);
- getEmitter()->emitIns_S_R(INS_str, attr, targetReg, varNum, offset);
+ GetEmitter()->emitIns_S_R(INS_str, attr, targetReg, varNum, offset);
}
else
{
int offset = 8;
emitAttr attr = emitTypeSize(TYP_SIMD8);
- getEmitter()->emitIns_R_S(INS_ldr, attr, srcReg, varNum, offset);
+ GetEmitter()->emitIns_R_S(INS_ldr, attr, srcReg, varNum, offset);
}
- getEmitter()->emitIns_R_R_I_I(INS_mov, EA_8BYTE, lclVarReg, srcReg, 1, 0);
+ GetEmitter()->emitIns_R_R_I_I(INS_mov, EA_8BYTE, lclVarReg, srcReg, 1, 0);
}
//-----------------------------------------------------------------------------
assert(tmpReg != addr->gtRegNum);
// 8-byte write
- getEmitter()->emitIns_R_R(INS_str, EA_8BYTE, data->gtRegNum, addr->gtRegNum);
+ GetEmitter()->emitIns_R_R(INS_str, EA_8BYTE, data->gtRegNum, addr->gtRegNum);
// Extract upper 4-bytes from data
- getEmitter()->emitIns_R_R_I(INS_mov, EA_4BYTE, tmpReg, data->gtRegNum, 2);
+ GetEmitter()->emitIns_R_R_I(INS_mov, EA_4BYTE, tmpReg, data->gtRegNum, 2);
// 4-byte write
- getEmitter()->emitIns_R_R_I(INS_str, EA_4BYTE, tmpReg, addr->gtRegNum, 8);
+ GetEmitter()->emitIns_R_R_I(INS_str, EA_4BYTE, tmpReg, addr->gtRegNum, 8);
}
//-----------------------------------------------------------------------------
regNumber tmpReg = treeNode->GetSingleTempReg();
// 8-byte read
- getEmitter()->emitIns_R_R(INS_ldr, EA_8BYTE, targetReg, addr->gtRegNum);
+ GetEmitter()->emitIns_R_R(INS_ldr, EA_8BYTE, targetReg, addr->gtRegNum);
// 4-byte read
- getEmitter()->emitIns_R_R_I(INS_ldr, EA_4BYTE, tmpReg, addr->gtRegNum, 8);
+ GetEmitter()->emitIns_R_R_I(INS_ldr, EA_4BYTE, tmpReg, addr->gtRegNum, 8);
// Insert upper 4-bytes into data
- getEmitter()->emitIns_R_R_I(INS_mov, EA_4BYTE, targetReg, tmpReg, 2);
+ GetEmitter()->emitIns_R_R_I(INS_mov, EA_4BYTE, targetReg, tmpReg, 2);
genProduceReg(treeNode);
}
regNumber tmpReg = treeNode->GetSingleTempReg();
// store lower 8 bytes
- getEmitter()->emitIns_S_R(INS_str, EA_8BYTE, operandReg, varNum, offs);
+ GetEmitter()->emitIns_S_R(INS_str, EA_8BYTE, operandReg, varNum, offs);
// Extract upper 4-bytes from data
- getEmitter()->emitIns_R_R_I(INS_mov, EA_4BYTE, tmpReg, operandReg, 2);
+ GetEmitter()->emitIns_R_R_I(INS_mov, EA_4BYTE, tmpReg, operandReg, 2);
// 4-byte write
- getEmitter()->emitIns_S_R(INS_str, EA_4BYTE, tmpReg, varNum, offs + 8);
+ GetEmitter()->emitIns_S_R(INS_str, EA_4BYTE, tmpReg, varNum, offs + 8);
}
#endif // FEATURE_SIMD
instruction ins = getOpForHWIntrinsic(node, node->TypeGet());
- getEmitter()->emitIns_R_R(ins, attr, targetReg, op1Reg);
+ GetEmitter()->emitIns_R_R(ins, attr, targetReg, op1Reg);
genProduceReg(node);
}
emitAttr attr = (node->gtSIMDSize > 8) ? EA_16BYTE : EA_8BYTE;
insOpts opt = genGetSimdInsOpt(attr, baseType);
- getEmitter()->emitIns_R_R_R(ins, attr, targetReg, op1Reg, op2Reg, opt);
+ GetEmitter()->emitIns_R_R_R(ins, attr, targetReg, op1Reg, op2Reg, opt);
genProduceReg(node);
}
switchTableBeg->bbFlags |= BBF_JMP_TARGET;
// tmpReg = switchTableBeg
- getEmitter()->emitIns_R_L(INS_adr, EA_PTRSIZE, switchTableBeg, tmpReg);
+ GetEmitter()->emitIns_R_L(INS_adr, EA_PTRSIZE, switchTableBeg, tmpReg);
// tmpReg = switchTableBeg + swReg * 8
- getEmitter()->emitIns_R_R_R_I(INS_add, EA_PTRSIZE, tmpReg, tmpReg, swReg, 3, INS_OPTS_LSL);
+ GetEmitter()->emitIns_R_R_R_I(INS_add, EA_PTRSIZE, tmpReg, tmpReg, swReg, 3, INS_OPTS_LSL);
// br tmpReg
- getEmitter()->emitIns_R(INS_br, EA_PTRSIZE, tmpReg);
+ GetEmitter()->emitIns_R(INS_br, EA_PTRSIZE, tmpReg);
genDefineTempLabel(switchTableBeg);
for (int i = 0; i < swMax; ++i)
{
- unsigned prevInsCount = getEmitter()->emitInsCount;
+ unsigned prevInsCount = GetEmitter()->emitInsCount;
emitSwCase(i);
- assert(getEmitter()->emitInsCount == prevInsCount + 1);
+ assert(GetEmitter()->emitInsCount == prevInsCount + 1);
inst_JMP(EJ_jmp, switchTableEnd);
- assert(getEmitter()->emitInsCount == prevInsCount + 2);
+ assert(GetEmitter()->emitInsCount == prevInsCount + 2);
}
genDefineTempLabel(switchTableEnd);
}
if (varTypeIsFloating(targetType))
{
assert(genIsValidFloatReg(targetReg));
- getEmitter()->emitIns_R_R_I_I(INS_mov, baseTypeSize, targetReg, op1Reg, 0, element);
+ GetEmitter()->emitIns_R_R_I_I(INS_mov, baseTypeSize, targetReg, op1Reg, 0, element);
}
else if (varTypeIsUnsigned(targetType) || (baseTypeSize == EA_8BYTE))
{
assert(genIsValidIntReg(targetReg));
- getEmitter()->emitIns_R_R_I(INS_umov, baseTypeSize, targetReg, op1Reg, element);
+ GetEmitter()->emitIns_R_R_I(INS_umov, baseTypeSize, targetReg, op1Reg, element);
}
else
{
assert(genIsValidIntReg(targetReg));
- getEmitter()->emitIns_R_R_I(INS_smov, baseTypeSize, targetReg, op1Reg, element);
+ GetEmitter()->emitIns_R_R_I(INS_smov, baseTypeSize, targetReg, op1Reg, element);
}
};
if (targetReg != op1Reg)
{
- getEmitter()->emitIns_R_R(INS_mov, baseTypeSize, targetReg, op1Reg);
+ GetEmitter()->emitIns_R_R(INS_mov, baseTypeSize, targetReg, op1Reg);
}
if (op3->isContained())
int srcLane = (int)op3->gtGetOp2()->AsIntConCommon()->IconValue();
// Emit mov targetReg[element], op3Reg[srcLane]
- getEmitter()->emitIns_R_R_I_I(INS_mov, baseTypeSize, targetReg, op3Reg, element, srcLane);
+ GetEmitter()->emitIns_R_R_I_I(INS_mov, baseTypeSize, targetReg, op3Reg, element, srcLane);
}
else
{
if (varTypeIsFloating(baseType))
{
assert(genIsValidFloatReg(op3Reg));
- getEmitter()->emitIns_R_R_I_I(INS_mov, baseTypeSize, targetReg, op3Reg, element, 0);
+ GetEmitter()->emitIns_R_R_I_I(INS_mov, baseTypeSize, targetReg, op3Reg, element, 0);
}
else
{
assert(genIsValidIntReg(op3Reg));
- getEmitter()->emitIns_R_R_I(INS_mov, baseTypeSize, targetReg, op3Reg, element);
+ GetEmitter()->emitIns_R_R_I(INS_mov, baseTypeSize, targetReg, op3Reg, element);
}
};
{
// op3 is target use bit insert if true
// op3 = op3 ^ (op1 & (op2 ^ op3))
- getEmitter()->emitIns_R_R_R(INS_bit, attr, op3Reg, op2Reg, op1Reg);
+ GetEmitter()->emitIns_R_R_R(INS_bit, attr, op3Reg, op2Reg, op1Reg);
}
else if (targetReg == op2Reg)
{
// op2 is target use bit insert if false
// op2 = op2 ^ (~op1 & (op2 ^ op3))
- getEmitter()->emitIns_R_R_R(INS_bif, attr, op2Reg, op3Reg, op1Reg);
+ GetEmitter()->emitIns_R_R_R(INS_bif, attr, op2Reg, op3Reg, op1Reg);
}
else
{
if (targetReg != op1Reg)
{
// target is not one of the sources, copy op1 to use bit select form
- getEmitter()->emitIns_R_R(INS_mov, attr, targetReg, op1Reg);
+ GetEmitter()->emitIns_R_R(INS_mov, attr, targetReg, op1Reg);
}
// use bit select
// targetReg = op3 ^ (targetReg & (op2 ^ op3))
- getEmitter()->emitIns_R_R_R(INS_bsl, attr, targetReg, op2Reg, op3Reg);
+ GetEmitter()->emitIns_R_R_R(INS_bsl, attr, targetReg, op2Reg, op3Reg);
}
genProduceReg(node);
if (genIsValidIntReg(op1Reg))
{
- getEmitter()->emitIns_R_R(ins, attr, targetReg, op1Reg, opt);
+ GetEmitter()->emitIns_R_R(ins, attr, targetReg, op1Reg, opt);
}
else
{
- getEmitter()->emitIns_R_R_I(ins, attr, targetReg, op1Reg, 0, opt);
+ GetEmitter()->emitIns_R_R_I(ins, attr, targetReg, op1Reg, 0, opt);
}
genProduceReg(node);
emitAttr attr = (node->gtSIMDSize > 8) ? EA_16BYTE : EA_8BYTE;
insOpts opt = genGetSimdInsOpt(attr, baseType);
- getEmitter()->emitIns_R_R(ins, attr, targetReg, op1Reg, opt);
+ GetEmitter()->emitIns_R_R(ins, attr, targetReg, op1Reg, opt);
genProduceReg(node);
}
if (targetReg != op1Reg)
{
- getEmitter()->emitIns_R_R(INS_mov, attr, targetReg, op1Reg);
+ GetEmitter()->emitIns_R_R(INS_mov, attr, targetReg, op1Reg);
}
- getEmitter()->emitIns_R_R(ins, attr, targetReg, op2Reg, opt);
+ GetEmitter()->emitIns_R_R(ins, attr, targetReg, op2Reg, opt);
genProduceReg(node);
}
if (targetReg != op1Reg)
{
- getEmitter()->emitIns_R_R(INS_mov, attr, targetReg, op1Reg);
+ GetEmitter()->emitIns_R_R(INS_mov, attr, targetReg, op1Reg);
}
- getEmitter()->emitIns_R_R_R(ins, attr, targetReg, op2Reg, op3Reg);
+ GetEmitter()->emitIns_R_R_R(ins, attr, targetReg, op2Reg, op3Reg);
genProduceReg(node);
}
regNumber elementReg = op2->gtRegNum;
regNumber tmpReg = node->GetSingleTempReg(RBM_ALLFLOAT);
- getEmitter()->emitIns_R_R(INS_fmov, EA_4BYTE, tmpReg, elementReg);
+ GetEmitter()->emitIns_R_R(INS_fmov, EA_4BYTE, tmpReg, elementReg);
if (targetReg != op1Reg)
{
- getEmitter()->emitIns_R_R(INS_mov, attr, targetReg, op1Reg);
+ GetEmitter()->emitIns_R_R(INS_mov, attr, targetReg, op1Reg);
}
- getEmitter()->emitIns_R_R_R(ins, attr, targetReg, tmpReg, op3Reg);
+ GetEmitter()->emitIns_R_R_R(ins, attr, targetReg, tmpReg, op3Reg);
genProduceReg(node);
}
regNumber elementReg = op1->gtRegNum;
regNumber tmpReg = node->GetSingleTempReg(RBM_ALLFLOAT);
- getEmitter()->emitIns_R_R(INS_fmov, EA_4BYTE, tmpReg, elementReg);
- getEmitter()->emitIns_R_R(ins, EA_4BYTE, tmpReg, tmpReg);
- getEmitter()->emitIns_R_R(INS_fmov, attr, targetReg, tmpReg);
+ GetEmitter()->emitIns_R_R(INS_fmov, EA_4BYTE, tmpReg, elementReg);
+ GetEmitter()->emitIns_R_R(ins, EA_4BYTE, tmpReg, tmpReg);
+ GetEmitter()->emitIns_R_R(INS_fmov, attr, targetReg, tmpReg);
genProduceReg(node);
}
{
instGen_Set_Reg_To_Imm(EA_PTR_DSP_RELOC, REG_PROFILER_ENTER_ARG_FUNC_ID,
(ssize_t)compiler->compProfilerMethHnd);
- getEmitter()->emitIns_R_R(INS_ldr, EA_PTRSIZE, REG_PROFILER_ENTER_ARG_FUNC_ID, REG_PROFILER_ENTER_ARG_FUNC_ID);
+ GetEmitter()->emitIns_R_R(INS_ldr, EA_PTRSIZE, REG_PROFILER_ENTER_ARG_FUNC_ID, REG_PROFILER_ENTER_ARG_FUNC_ID);
}
else
{
{
instGen_Set_Reg_To_Imm(EA_PTR_DSP_RELOC, REG_PROFILER_LEAVE_ARG_FUNC_ID,
(ssize_t)compiler->compProfilerMethHnd);
- getEmitter()->emitIns_R_R(INS_ldr, EA_PTRSIZE, REG_PROFILER_LEAVE_ARG_FUNC_ID, REG_PROFILER_LEAVE_ARG_FUNC_ID);
+ GetEmitter()->emitIns_R_R(INS_ldr, EA_PTRSIZE, REG_PROFILER_LEAVE_ARG_FUNC_ID, REG_PROFILER_LEAVE_ARG_FUNC_ID);
}
else
{
// Mark the "fake" instructions in the output.
printf("*************** In genArm64EmitterUnitTests()\n");
- emitter* theEmitter = getEmitter();
+ emitter* theEmitter = GetEmitter();
#ifdef ALL_ARM64_EMITTER_UNIT_TESTS
// We use this:
//
void CodeGen::genStackPointerConstantAdjustmentWithProbe(ssize_t spDelta, regNumber regTmp)
{
- getEmitter()->emitIns_R_R_I(INS_ldr, EA_4BYTE, regTmp, REG_SP, 0);
+ GetEmitter()->emitIns_R_R_I(INS_ldr, EA_4BYTE, regTmp, REG_SP, 0);
genStackPointerConstantAdjustment(spDelta);
}
// happen on x86, for example, when we copy an argument to the stack using a "SUB ESP; REP MOV"
// strategy.
- getEmitter()->emitIns_R_R_I(INS_ldr, EA_4BYTE, regTmp, REG_SP, 0);
+ GetEmitter()->emitIns_R_R_I(INS_ldr, EA_4BYTE, regTmp, REG_SP, 0);
lastTouchDelta = 0;
}
{
regNumber targetReg = treeNode->gtRegNum;
var_types targetType = treeNode->TypeGet();
- emitter* emit = getEmitter();
+ emitter* emit = GetEmitter();
#ifdef DEBUG
// Validate that all the operands for the current node are consumed in order.
switch (treeNode->gtOper)
{
case GT_START_NONGC:
- getEmitter()->emitDisableGC();
+ GetEmitter()->emitDisableGC();
break;
case GT_START_PREEMPTGC:
noway_assert(compiler->gsGlobalSecurityCookieVal != 0);
// initReg = #GlobalSecurityCookieVal; [frame.GSSecurityCookie] = initReg
genSetRegToIcon(initReg, compiler->gsGlobalSecurityCookieVal, TYP_I_IMPL);
- getEmitter()->emitIns_S_R(INS_str, EA_PTRSIZE, initReg, compiler->lvaGSSecurityCookie, 0);
+ GetEmitter()->emitIns_S_R(INS_str, EA_PTRSIZE, initReg, compiler->lvaGSSecurityCookie, 0);
}
else
{
instGen_Set_Reg_To_Imm(EA_PTR_DSP_RELOC, initReg, (ssize_t)compiler->gsGlobalSecurityCookieAddr);
- getEmitter()->emitIns_R_R_I(INS_ldr, EA_PTRSIZE, initReg, initReg, 0);
+ GetEmitter()->emitIns_R_R_I(INS_ldr, EA_PTRSIZE, initReg, initReg, 0);
regSet.verifyRegUsed(initReg);
- getEmitter()->emitIns_S_R(INS_str, EA_PTRSIZE, initReg, compiler->lvaGSSecurityCookie, 0);
+ GetEmitter()->emitIns_S_R(INS_str, EA_PTRSIZE, initReg, compiler->lvaGSSecurityCookie, 0);
}
*pInitRegZeroed = false;
{
case CORINFO_INTRINSIC_Abs:
genConsumeOperands(treeNode->AsOp());
- getEmitter()->emitInsBinary(INS_ABS, emitActualTypeSize(treeNode), treeNode, srcNode);
+ GetEmitter()->emitInsBinary(INS_ABS, emitActualTypeSize(treeNode), treeNode, srcNode);
break;
#ifdef _TARGET_ARM64_
case CORINFO_INTRINSIC_Ceiling:
genConsumeOperands(treeNode->AsOp());
- getEmitter()->emitInsBinary(INS_frintp, emitActualTypeSize(treeNode), treeNode, srcNode);
+ GetEmitter()->emitInsBinary(INS_frintp, emitActualTypeSize(treeNode), treeNode, srcNode);
break;
case CORINFO_INTRINSIC_Floor:
genConsumeOperands(treeNode->AsOp());
- getEmitter()->emitInsBinary(INS_frintm, emitActualTypeSize(treeNode), treeNode, srcNode);
+ GetEmitter()->emitInsBinary(INS_frintm, emitActualTypeSize(treeNode), treeNode, srcNode);
break;
case CORINFO_INTRINSIC_Round:
genConsumeOperands(treeNode->AsOp());
- getEmitter()->emitInsBinary(INS_frintn, emitActualTypeSize(treeNode), treeNode, srcNode);
+ GetEmitter()->emitInsBinary(INS_frintn, emitActualTypeSize(treeNode), treeNode, srcNode);
break;
#endif // _TARGET_ARM64_
case CORINFO_INTRINSIC_Sqrt:
genConsumeOperands(treeNode->AsOp());
- getEmitter()->emitInsBinary(INS_SQRT, emitActualTypeSize(treeNode), treeNode, srcNode);
+ GetEmitter()->emitInsBinary(INS_SQRT, emitActualTypeSize(treeNode), treeNode, srcNode);
break;
default:
assert(treeNode->OperIs(GT_PUTARG_STK));
GenTree* source = treeNode->gtOp1;
var_types targetType = genActualType(source->TypeGet());
- emitter* emit = getEmitter();
+ emitter* emit = GetEmitter();
// This is the varNum for our store operations,
// typically this is the varNum for the Outgoing arg space
assert(treeNode->OperIs(GT_PUTARG_SPLIT));
GenTree* source = treeNode->gtOp1;
- emitter* emit = getEmitter();
+ emitter* emit = GetEmitter();
unsigned varNumOut = compiler->lvaOutgoingArgSpaceVar;
unsigned argOffsetMax = compiler->lvaOutgoingArgSpaceSize;
unsigned argOffsetOut = treeNode->gtSlotNum * TARGET_POINTER_SIZE;
// iterations
// For the case where reg == dst, if we iterate so that we write dst[0] last, we eliminate the need for
// a temporary
- getEmitter()->emitIns_R_R_I_I(INS_mov, emitTypeSize(type), dst, reg, i, 0);
+ GetEmitter()->emitIns_R_R_I_I(INS_mov, emitTypeSize(type), dst, reg, i, 0);
}
else
{
// Use a vector mov from general purpose register instruction
// mov dst[i], reg
// This effectively moves from `reg` to `dst[i]`
- getEmitter()->emitIns_R_R_I(INS_mov, emitTypeSize(type), dst, reg, i);
+ GetEmitter()->emitIns_R_R_I(INS_mov, emitTypeSize(type), dst, reg, i);
}
}
}
assert(reg != REG_NA);
- getEmitter()->emitIns_S_R(ins_Store(type), emitTypeSize(type), reg, lclNum, offset);
+ GetEmitter()->emitIns_S_R(ins_Store(type), emitTypeSize(type), reg, lclNum, offset);
offset += genTypeSize(type);
}
assert(emitTypeSize(bndsChkType) >= emitActualTypeSize(src1->TypeGet()));
#endif // DEBUG
- getEmitter()->emitInsBinary(INS_cmp, emitActualTypeSize(bndsChkType), src1, src2);
+ GetEmitter()->emitInsBinary(INS_cmp, emitActualTypeSize(bndsChkType), src1, src2);
genJumpToThrowHlpBlk(jmpKind, bndsChk->gtThrowKind, bndsChk->gtIndRngFailBB);
}
regNumber targetReg = tree->GetSingleTempReg();
#endif
- getEmitter()->emitIns_R_R_I(INS_ldr, EA_4BYTE, targetReg, addrReg, 0);
+ GetEmitter()->emitIns_R_R_I(INS_ldr, EA_4BYTE, targetReg, addrReg, 0);
}
//------------------------------------------------------------------------
//
void CodeGen::genCodeForArrIndex(GenTreeArrIndex* arrIndex)
{
- emitter* emit = getEmitter();
+ emitter* emit = GetEmitter();
GenTree* arrObj = arrIndex->ArrObj();
GenTree* indexNode = arrIndex->IndexExpr();
regNumber arrReg = genConsumeReg(arrObj);
if (!offsetNode->IsIntegralConst(0))
{
- emitter* emit = getEmitter();
+ emitter* emit = GetEmitter();
regNumber offsetReg = genConsumeReg(offsetNode);
regNumber indexReg = genConsumeReg(indexNode);
regNumber arrReg = genConsumeReg(arrOffset->gtArrObj);
GenTree* shiftBy = tree->gtGetOp2();
if (!shiftBy->IsCnsIntOrI())
{
- getEmitter()->emitIns_R_R_R(ins, size, tree->gtRegNum, operand->gtRegNum, shiftBy->gtRegNum);
+ GetEmitter()->emitIns_R_R_R(ins, size, tree->gtRegNum, operand->gtRegNum, shiftBy->gtRegNum);
}
else
{
unsigned immWidth = emitter::getBitWidth(size); // For ARM64, immWidth will be set to 32 or 64
unsigned shiftByImm = (unsigned)shiftBy->gtIntCon.gtIconVal & (immWidth - 1);
- getEmitter()->emitIns_R_R_I(ins, size, tree->gtRegNum, operand->gtRegNum, shiftByImm);
+ GetEmitter()->emitIns_R_R_I(ins, size, tree->gtRegNum, operand->gtRegNum, shiftByImm);
}
genProduceReg(tree);
var_types targetType = tree->TypeGet();
regNumber targetReg = tree->gtRegNum;
- emitter* emit = getEmitter();
+ emitter* emit = GetEmitter();
NYI_IF(targetType == TYP_STRUCT, "GT_LCL_FLD: struct load local field not supported");
assert(targetReg != REG_NA);
// Generate the bounds check if necessary.
if ((node->gtFlags & GTF_INX_RNGCHK) != 0)
{
- getEmitter()->emitIns_R_R_I(INS_ldr, EA_4BYTE, tmpReg, base->gtRegNum, node->gtLenOffset);
- getEmitter()->emitIns_R_R(INS_cmp, emitActualTypeSize(index->TypeGet()), index->gtRegNum, tmpReg);
+ GetEmitter()->emitIns_R_R_I(INS_ldr, EA_4BYTE, tmpReg, base->gtRegNum, node->gtLenOffset);
+ GetEmitter()->emitIns_R_R(INS_cmp, emitActualTypeSize(index->TypeGet()), index->gtRegNum, tmpReg);
genJumpToThrowHlpBlk(EJ_hs, SCK_RNGCHK_FAIL, node->gtIndRngFailBB);
}
CodeGen::genSetRegToIcon(tmpReg, (ssize_t)node->gtElemSize, TYP_INT);
// dest = index * tmpReg + base
- getEmitter()->emitIns_R_R_R_R(INS_MULADD, emitActualTypeSize(node), node->gtRegNum, index->gtRegNum, tmpReg,
+ GetEmitter()->emitIns_R_R_R_R(INS_MULADD, emitActualTypeSize(node), node->gtRegNum, index->gtRegNum, tmpReg,
base->gtRegNum);
}
// dest = dest + elemOffs
- getEmitter()->emitIns_R_R_I(INS_add, emitActualTypeSize(node), node->gtRegNum, node->gtRegNum, node->gtElemOffset);
+ GetEmitter()->emitIns_R_R_I(INS_add, emitActualTypeSize(node), node->gtRegNum, node->gtRegNum, node->gtElemOffset);
gcInfo.gcMarkRegSetNpt(base->gtGetRegMask());
}
}
- getEmitter()->emitInsLoadStoreOp(ins, emitActualTypeSize(type), targetReg, tree);
+ GetEmitter()->emitInsLoadStoreOp(ins, emitActualTypeSize(type), targetReg, tree);
if (emitBarrier)
{
assert((size != 0) && (size <= CPBLK_UNROLL_LIMIT));
- emitter* emit = getEmitter();
+ emitter* emit = GetEmitter();
if (dstAddr->isUsedFromReg())
{
// offset: distance from the base from which to load
void CodeGen::genCodeForLoadOffset(instruction ins, emitAttr size, regNumber dst, GenTree* base, unsigned offset)
{
- emitter* emit = getEmitter();
+ emitter* emit = GetEmitter();
if (base->OperIsLocalAddr())
{
// offset: distance from the base from which to load
void CodeGen::genCodeForStoreOffset(instruction ins, emitAttr size, regNumber src, GenTree* base, unsigned offset)
{
- emitter* emit = getEmitter();
+ emitter* emit = GetEmitter();
if (base->OperIsLocalAddr())
{
#if defined(_TARGET_ARM_)
const regNumber tmpReg = call->ExtractTempReg();
- getEmitter()->emitIns_R_R_I(INS_ldr, EA_4BYTE, tmpReg, regThis, 0);
+ GetEmitter()->emitIns_R_R_I(INS_ldr, EA_4BYTE, tmpReg, regThis, 0);
#elif defined(_TARGET_ARM64_)
- getEmitter()->emitIns_R_R_I(INS_ldr, EA_4BYTE, REG_ZR, regThis, 0);
+ GetEmitter()->emitIns_R_R_I(INS_ldr, EA_4BYTE, REG_ZR, regThis, 0);
#endif // _TARGET_*
}
if (varDsc->TypeGet() == TYP_LONG)
{
// long - at least the low half must be enregistered
- getEmitter()->emitIns_S_R(INS_str, EA_4BYTE, varDsc->lvRegNum, varNum, 0);
+ GetEmitter()->emitIns_S_R(INS_str, EA_4BYTE, varDsc->lvRegNum, varNum, 0);
// Is the upper half also enregistered?
if (varDsc->lvOtherReg != REG_STK)
{
- getEmitter()->emitIns_S_R(INS_str, EA_4BYTE, varDsc->lvOtherReg, varNum, sizeof(int));
+ GetEmitter()->emitIns_S_R(INS_str, EA_4BYTE, varDsc->lvOtherReg, varNum, sizeof(int));
}
}
else
#endif // _TARGET_ARM_
{
- getEmitter()->emitIns_S_R(ins_Store(storeType), storeSize, varDsc->lvRegNum, varNum, 0);
+ GetEmitter()->emitIns_S_R(ins_Store(storeType), storeSize, varDsc->lvRegNum, varNum, 0);
}
// Update lvRegNum life and GC info to indicate lvRegNum is dead and varDsc stack slot is going live.
// Note that we cannot modify varDsc->lvRegNum here because another basic block may not be expecting it.
for (unsigned ofs = 0, cSlot = 0; cSlot < cSlots; cSlot++, ofs += (unsigned)loadSize)
{
- getEmitter()->emitIns_R_S(ins_Load(loadType), loadSize, fieldReg, varNum, ofs);
+ GetEmitter()->emitIns_R_S(ins_Load(loadType), loadSize, fieldReg, varNum, ofs);
assert(genIsValidFloatReg(fieldReg)); // No GC register tracking for floating point registers.
fieldReg = regNextOfType(fieldReg, loadType);
}
loadType = compiler->mangleVarArgsType(genActualType(varDsc->TypeGet()));
}
emitAttr loadSize = emitActualTypeSize(loadType);
- getEmitter()->emitIns_R_S(ins_Load(loadType), loadSize, argReg, varNum, 0);
+ GetEmitter()->emitIns_R_S(ins_Load(loadType), loadSize, argReg, varNum, 0);
// Update argReg life and GC Info to indicate varDsc stack slot is dead and argReg is going live.
// Note that we cannot modify varDsc->lvRegNum here because another basic block may not be expecting it.
loadType = varDsc->GetLayout()->GetGCPtrType(1);
loadSize = emitActualTypeSize(loadType);
- getEmitter()->emitIns_R_S(ins_Load(loadType), loadSize, argRegNext, varNum, TARGET_POINTER_SIZE);
+ GetEmitter()->emitIns_R_S(ins_Load(loadType), loadSize, argRegNext, varNum, TARGET_POINTER_SIZE);
regSet.AddMaskVars(genRegMask(argRegNext));
gcInfo.gcMarkRegPtrVal(argRegNext, loadType);
if (varDsc->lvRegNum != argReg)
{
- getEmitter()->emitIns_R_S(INS_ldr, EA_PTRSIZE, argReg, varNum, 0);
- getEmitter()->emitIns_R_S(INS_ldr, EA_PTRSIZE, argRegNext, varNum, REGSIZE_BYTES);
+ GetEmitter()->emitIns_R_S(INS_ldr, EA_PTRSIZE, argReg, varNum, 0);
+ GetEmitter()->emitIns_R_S(INS_ldr, EA_PTRSIZE, argRegNext, varNum, REGSIZE_BYTES);
}
if (compiler->info.compIsVarArgs)
{
if (varDsc->lvRegNum != argReg)
{
- getEmitter()->emitIns_R_S(ins_Load(loadType), loadSize, fieldReg, varNum, ofs);
+ GetEmitter()->emitIns_R_S(ins_Load(loadType), loadSize, fieldReg, varNum, ofs);
}
assert(genIsValidFloatReg(fieldReg)); // we don't use register tracking for FP
fieldReg = regNextOfType(fieldReg, loadType);
{
emitAttr loadSize = emitActualTypeSize(loadType);
- getEmitter()->emitIns_R_S(ins_Load(loadType), loadSize, slotReg, varNum, ofs);
+ GetEmitter()->emitIns_R_S(ins_Load(loadType), loadSize, slotReg, varNum, ofs);
}
regSet.AddMaskVars(genRegMask(slotReg));
if (varDsc->lvRegNum != argReg)
{
- getEmitter()->emitIns_R_S(ins_Load(loadType), emitTypeSize(loadType), argReg, varNum, 0);
+ GetEmitter()->emitIns_R_S(ins_Load(loadType), emitTypeSize(loadType), argReg, varNum, 0);
}
regSet.AddMaskVars(genRegMask(argReg));
regMaskTP remainingIntArgMask = RBM_ARG_REGS & ~fixedIntArgMask;
if (remainingIntArgMask != RBM_NONE)
{
- getEmitter()->emitDisableGC();
+ GetEmitter()->emitDisableGC();
for (int argNum = 0, argOffset = 0; argNum < MAX_REG_ARG; ++argNum)
{
regNumber argReg = intArgRegs[argNum];
if ((remainingIntArgMask & argRegMask) != 0)
{
remainingIntArgMask &= ~argRegMask;
- getEmitter()->emitIns_R_S(INS_ldr, EA_PTRSIZE, argReg, firstArgVarNum, argOffset);
+ GetEmitter()->emitIns_R_S(INS_ldr, EA_PTRSIZE, argReg, firstArgVarNum, argOffset);
}
argOffset += REGSIZE_BYTES;
}
- getEmitter()->emitEnableGC();
+ GetEmitter()->emitEnableGC();
}
}
}
switch (desc.CheckKind())
{
case GenIntCastDesc::CHECK_POSITIVE:
- getEmitter()->emitIns_R_I(INS_cmp, EA_ATTR(desc.CheckSrcSize()), reg, 0);
+ GetEmitter()->emitIns_R_I(INS_cmp, EA_ATTR(desc.CheckSrcSize()), reg, 0);
genJumpToThrowHlpBlk(EJ_lt, SCK_OVERFLOW);
break;
// We need to check if the value is not greater than 0xFFFFFFFF but this value
// cannot be encoded in the immediate operand of CMP. Use TST instead to check
// if the upper 32 bits are zero.
- getEmitter()->emitIns_R_I(INS_tst, EA_8BYTE, reg, 0xFFFFFFFF00000000LL);
+ GetEmitter()->emitIns_R_I(INS_tst, EA_8BYTE, reg, 0xFFFFFFFF00000000LL);
genJumpToThrowHlpBlk(EJ_ne, SCK_OVERFLOW);
break;
// We need to check if the value is not greater than 0x7FFFFFFF but this value
// cannot be encoded in the immediate operand of CMP. Use TST instead to check
// if the upper 33 bits are zero.
- getEmitter()->emitIns_R_I(INS_tst, EA_8BYTE, reg, 0xFFFFFFFF80000000LL);
+ GetEmitter()->emitIns_R_I(INS_tst, EA_8BYTE, reg, 0xFFFFFFFF80000000LL);
genJumpToThrowHlpBlk(EJ_ne, SCK_OVERFLOW);
break;
const regNumber tempReg = cast->GetSingleTempReg();
assert(tempReg != reg);
instGen_Set_Reg_To_Imm(EA_8BYTE, tempReg, INT32_MAX);
- getEmitter()->emitIns_R_R(INS_cmp, EA_8BYTE, reg, tempReg);
+ GetEmitter()->emitIns_R_R(INS_cmp, EA_8BYTE, reg, tempReg);
genJumpToThrowHlpBlk(EJ_gt, SCK_OVERFLOW);
instGen_Set_Reg_To_Imm(EA_8BYTE, tempReg, INT32_MIN);
- getEmitter()->emitIns_R_R(INS_cmp, EA_8BYTE, reg, tempReg);
+ GetEmitter()->emitIns_R_R(INS_cmp, EA_8BYTE, reg, tempReg);
genJumpToThrowHlpBlk(EJ_lt, SCK_OVERFLOW);
}
break;
if (castMaxValue > 255)
{
assert((castMaxValue == 32767) || (castMaxValue == 65535));
- getEmitter()->emitIns_R_I(INS_cmp, EA_SIZE(desc.CheckSrcSize()), reg, castMaxValue + 1);
+ GetEmitter()->emitIns_R_I(INS_cmp, EA_SIZE(desc.CheckSrcSize()), reg, castMaxValue + 1);
genJumpToThrowHlpBlk((castMinValue == 0) ? EJ_hs : EJ_ge, SCK_OVERFLOW);
}
else
{
- getEmitter()->emitIns_R_I(INS_cmp, EA_SIZE(desc.CheckSrcSize()), reg, castMaxValue);
+ GetEmitter()->emitIns_R_I(INS_cmp, EA_SIZE(desc.CheckSrcSize()), reg, castMaxValue);
genJumpToThrowHlpBlk((castMinValue == 0) ? EJ_hi : EJ_gt, SCK_OVERFLOW);
}
if (castMinValue != 0)
{
- getEmitter()->emitIns_R_I(INS_cmp, EA_SIZE(desc.CheckSrcSize()), reg, castMinValue);
+ GetEmitter()->emitIns_R_I(INS_cmp, EA_SIZE(desc.CheckSrcSize()), reg, castMinValue);
genJumpToThrowHlpBlk(EJ_lt, SCK_OVERFLOW);
}
}
break;
}
- getEmitter()->emitIns_R_R(ins, EA_ATTR(insSize), dstReg, srcReg);
+ GetEmitter()->emitIns_R_R(ins, EA_ATTR(insSize), dstReg, srcReg);
}
genProduceReg(cast);
instruction insVcvt = (srcType == TYP_FLOAT) ? INS_vcvt_f2d // convert Float to Double
: INS_vcvt_d2f; // convert Double to Float
- getEmitter()->emitIns_R_R(insVcvt, emitTypeSize(treeNode), treeNode->gtRegNum, op1->gtRegNum);
+ GetEmitter()->emitIns_R_R(insVcvt, emitTypeSize(treeNode), treeNode->gtRegNum, op1->gtRegNum);
}
else if (treeNode->gtRegNum != op1->gtRegNum)
{
- getEmitter()->emitIns_R_R(INS_vmov, emitTypeSize(treeNode), treeNode->gtRegNum, op1->gtRegNum);
+ GetEmitter()->emitIns_R_R(INS_vmov, emitTypeSize(treeNode), treeNode->gtRegNum, op1->gtRegNum);
}
#elif defined(_TARGET_ARM64_)
insOpts cvtOption = (srcType == TYP_FLOAT) ? INS_OPTS_S_TO_D // convert Single to Double
: INS_OPTS_D_TO_S; // convert Double to Single
- getEmitter()->emitIns_R_R(INS_fcvt, emitActualTypeSize(treeNode), treeNode->gtRegNum, op1->gtRegNum, cvtOption);
+ GetEmitter()->emitIns_R_R(INS_fcvt, emitActualTypeSize(treeNode), treeNode->gtRegNum, op1->gtRegNum, cvtOption);
}
else if (treeNode->gtRegNum != op1->gtRegNum)
{
// If double to double cast or float to float cast. Emit a move instruction.
- getEmitter()->emitIns_R_R(INS_mov, emitActualTypeSize(treeNode), treeNode->gtRegNum, op1->gtRegNum);
+ GetEmitter()->emitIns_R_R(INS_mov, emitActualTypeSize(treeNode), treeNode->gtRegNum, op1->gtRegNum);
}
#endif // _TARGET_*
BasicBlock* labelTrue = genCreateTempLabel();
inst_JCC(condition, labelTrue);
- getEmitter()->emitIns_R_I(INS_mov, emitActualTypeSize(type), dstReg, 0);
+ GetEmitter()->emitIns_R_I(INS_mov, emitActualTypeSize(type), dstReg, 0);
BasicBlock* labelNext = genCreateTempLabel();
- getEmitter()->emitIns_J(INS_b, labelNext);
+ GetEmitter()->emitIns_J(INS_b, labelNext);
genDefineTempLabel(labelTrue);
- getEmitter()->emitIns_R_I(INS_mov, emitActualTypeSize(type), dstReg, 1);
+ GetEmitter()->emitIns_R_I(INS_mov, emitActualTypeSize(type), dstReg, 1);
genDefineTempLabel(labelNext);
#endif
}
if (blkOp->gtBlkOpGcUnsafe)
{
- getEmitter()->emitDisableGC();
+ GetEmitter()->emitDisableGC();
}
bool isCopyBlk = blkOp->OperIsCopyBlkOp();
if (blkOp->gtBlkOpGcUnsafe)
{
- getEmitter()->emitEnableGC();
+ GetEmitter()->emitEnableGC();
}
}
//
void CodeGen::genScaledAdd(emitAttr attr, regNumber targetReg, regNumber baseReg, regNumber indexReg, int scale)
{
- emitter* emit = getEmitter();
+ emitter* emit = GetEmitter();
if (scale == 0)
{
// target = base + index
- getEmitter()->emitIns_R_R_R(INS_add, attr, targetReg, baseReg, indexReg);
+ GetEmitter()->emitIns_R_R_R(INS_add, attr, targetReg, baseReg, indexReg);
}
else
{
void CodeGen::genLeaInstruction(GenTreeAddrMode* lea)
{
genConsumeOperands(lea);
- emitter* emit = getEmitter();
+ emitter* emit = GetEmitter();
emitAttr size = emitTypeSize(lea);
int offset = lea->Offset();
{
var_types type = retTypeDesc.GetReturnRegType(i);
regNumber reg = retTypeDesc.GetABIReturnReg(i);
- getEmitter()->emitIns_R_S(ins_Load(type), emitTypeSize(type), reg, lclVar->gtLclNum, offset);
+ GetEmitter()->emitIns_R_S(ins_Load(type), emitTypeSize(type), reg, lclVar->gtLclNum, offset);
offset += genTypeSize(type);
}
}
// This effectively moves from `src[i]` to `reg[0]`, upper bits of reg remain unchanged
// For the case where src == reg, since we are only writing reg[0], as long as we iterate
// so that src[0] is consumed before writing reg[0], we do not need a temporary.
- getEmitter()->emitIns_R_R_I_I(INS_mov, emitTypeSize(type), reg, src, 0, i);
+ GetEmitter()->emitIns_R_R_I_I(INS_mov, emitTypeSize(type), reg, src, 0, i);
}
else
{
// Use a vector mov to general purpose register instruction
// mov reg, src[i]
// This effectively moves from `src[i]` to `reg`
- getEmitter()->emitIns_R_R_I(INS_mov, emitTypeSize(type), reg, src, i);
+ GetEmitter()->emitIns_R_R_I(INS_mov, emitTypeSize(type), reg, src, i);
}
}
#endif // _TARGET_ARM64_
// ldr rTemp, [SP + initReg] // load into initReg on arm32, wzr on ARM64
instGen_Set_Reg_To_Imm(EA_PTRSIZE, initReg, -(ssize_t)probeOffset);
- getEmitter()->emitIns_R_R_R(INS_ldr, EA_4BYTE, rTemp, REG_SPBASE, initReg);
+ GetEmitter()->emitIns_R_R_R(INS_ldr, EA_4BYTE, rTemp, REG_SPBASE, initReg);
regSet.verifyRegUsed(initReg);
*pInitRegZeroed = false; // The initReg does not contain zero
#else // !_TARGET_ARM64_
instGen_Set_Reg_To_Imm(EA_PTRSIZE, initReg, frameSize);
compiler->unwindPadding();
- getEmitter()->emitIns_R_R_R(INS_sub, EA_4BYTE, REG_SPBASE, REG_SPBASE, initReg);
+ GetEmitter()->emitIns_R_R_R(INS_sub, EA_4BYTE, REG_SPBASE, REG_SPBASE, initReg);
#endif // !_TARGET_ARM64_
}
else
// There's a "virtual" label here. But we can't create a label in the prolog, so we use the magic
// `emitIns_J` with a negative `instrCount` to branch back a specific number of instructions.
- getEmitter()->emitIns_R_R_R(INS_ldr, EA_4BYTE, rTemp, REG_SPBASE, rOffset);
+ GetEmitter()->emitIns_R_R_R(INS_ldr, EA_4BYTE, rTemp, REG_SPBASE, rOffset);
#if defined(_TARGET_ARM_)
regSet.verifyRegUsed(rTemp);
- getEmitter()->emitIns_R_I(INS_sub, EA_PTRSIZE, rOffset, pageSize);
+ GetEmitter()->emitIns_R_I(INS_sub, EA_PTRSIZE, rOffset, pageSize);
#elif defined(_TARGET_ARM64_)
- getEmitter()->emitIns_R_R_I(INS_sub, EA_PTRSIZE, rOffset, rOffset, pageSize);
+ GetEmitter()->emitIns_R_R_I(INS_sub, EA_PTRSIZE, rOffset, rOffset, pageSize);
#endif
- getEmitter()->emitIns_R_R(INS_cmp, EA_PTRSIZE, rLimit, rOffset); // If equal, we need to probe again
- getEmitter()->emitIns_J(INS_bls, NULL, -4);
+ GetEmitter()->emitIns_R_R(INS_cmp, EA_PTRSIZE, rLimit, rOffset); // If equal, we need to probe again
+ GetEmitter()->emitIns_J(INS_bls, NULL, -4);
*pInitRegZeroed = false; // The initReg does not contain zero
{
assert(lastTouchDelta + STACK_PROBE_BOUNDARY_THRESHOLD_BYTES < 2 * pageSize);
instGen_Set_Reg_To_Imm(EA_PTRSIZE, initReg, -(ssize_t)frameSize);
- getEmitter()->emitIns_R_R_R(INS_ldr, EA_4BYTE, REG_ZR, REG_SPBASE, initReg);
+ GetEmitter()->emitIns_R_R_R(INS_ldr, EA_4BYTE, REG_ZR, REG_SPBASE, initReg);
compiler->unwindPadding();
regSet.verifyRegUsed(initReg);
*/
void CodeGenInterface::spillReg(var_types type, TempDsc* tmp, regNumber reg)
{
- getEmitter()->emitIns_S_R(ins_Store(type), emitActualTypeSize(type), reg, tmp->tdTempNum(), 0);
+ GetEmitter()->emitIns_S_R(ins_Store(type), emitActualTypeSize(type), reg, tmp->tdTempNum(), 0);
}
/*****************************************************************************
*/
void CodeGenInterface::reloadReg(var_types type, TempDsc* tmp, regNumber reg)
{
- getEmitter()->emitIns_R_S(ins_Load(type), emitActualTypeSize(type), reg, tmp->tdTempNum(), 0);
+ GetEmitter()->emitIns_R_S(ins_Load(type), emitActualTypeSize(type), reg, tmp->tdTempNum(), 0);
}
// inline
#endif
label->bbEmitCookie =
- getEmitter()->emitAddLabel(gcInfo.gcVarPtrSetCur, gcInfo.gcRegGCrefSetCur, gcInfo.gcRegByrefSetCur);
+ GetEmitter()->emitAddLabel(gcInfo.gcVarPtrSetCur, gcInfo.gcRegGCrefSetCur, gcInfo.gcRegByrefSetCur);
}
/*****************************************************************************
// at this point if a jump to this block is made in the middle of pushing arugments.
//
// Here we restore SP to prevent potential stack alignment issues.
- getEmitter()->emitIns_R_AR(INS_lea, EA_PTRSIZE, REG_SPBASE, REG_FPBASE, -genSPtoFPdelta());
+ GetEmitter()->emitIns_R_AR(INS_lea, EA_PTRSIZE, REG_SPBASE, REG_FPBASE, -genSPtoFPdelta());
}
#endif
if (genStackLevel != 0)
{
#ifdef _TARGET_X86_
- getEmitter()->emitMarkStackLvl(genStackLevel);
+ GetEmitter()->emitMarkStackLvl(genStackLevel);
inst_RV_IV(INS_add, REG_SPBASE, genStackLevel, EA_PTRSIZE);
SetStackLevel(0);
#else // _TARGET_X86_
{
// Ngen case - GS cookie constant needs to be accessed through an indirection.
instGen_Set_Reg_To_Imm(EA_HANDLE_CNS_RELOC, regGSConst, (ssize_t)compiler->gsGlobalSecurityCookieAddr);
- getEmitter()->emitIns_R_R_I(INS_ldr, EA_PTRSIZE, regGSConst, regGSConst, 0);
+ GetEmitter()->emitIns_R_R_I(INS_ldr, EA_PTRSIZE, regGSConst, regGSConst, 0);
}
// Load this method's GS value from the stack frame
- getEmitter()->emitIns_R_S(INS_ldr, EA_PTRSIZE, regGSValue, compiler->lvaGSSecurityCookie, 0);
+ GetEmitter()->emitIns_R_S(INS_ldr, EA_PTRSIZE, regGSValue, compiler->lvaGSSecurityCookie, 0);
// Compare with the GC cookie constant
- getEmitter()->emitIns_R_R(INS_cmp, EA_PTRSIZE, regGSConst, regGSValue);
+ GetEmitter()->emitIns_R_R(INS_cmp, EA_PTRSIZE, regGSConst, regGSValue);
BasicBlock* gsCheckBlk = genCreateTempLabel();
inst_JMP(EJ_eq, gsCheckBlk);
gcInfo.gcMarkRegPtrVal(varDsc->lvArgReg, varDsc->TypeGet());
}
- getEmitter()->emitThisGCrefRegs = getEmitter()->emitInitGCrefRegs = gcInfo.gcRegGCrefSetCur;
- getEmitter()->emitThisByrefRegs = getEmitter()->emitInitByrefRegs = gcInfo.gcRegByrefSetCur;
+ GetEmitter()->emitThisGCrefRegs = GetEmitter()->emitInitGCrefRegs = gcInfo.gcRegGCrefSetCur;
+ GetEmitter()->emitThisByrefRegs = GetEmitter()->emitInitByrefRegs = gcInfo.gcRegByrefSetCur;
}
}
// would be executed, which we would prefer not to do.
block->bbUnwindNopEmitCookie =
- getEmitter()->emitAddLabel(gcInfo.gcVarPtrSetCur, gcInfo.gcRegGCrefSetCur, gcInfo.gcRegByrefSetCur);
+ GetEmitter()->emitAddLabel(gcInfo.gcVarPtrSetCur, gcInfo.gcRegGCrefSetCur, gcInfo.gcRegByrefSetCur);
instGen(INS_nop);
}
genPrepForCompiler();
/* Prepare the emitter */
- getEmitter()->Init();
+ GetEmitter()->Init();
#ifdef DEBUG
VarSetOps::AssignNoCopy(compiler, genTempOldLife, VarSetOps::MakeEmpty(compiler));
#endif
unsigned maxTmpSize = regSet.tmpGetTotalSize(); // This is precise after LSRA has pre-allocated the temps.
- getEmitter()->emitBegFN(isFramePointerUsed()
+ GetEmitter()->emitBegFN(isFramePointerUsed()
#if defined(DEBUG)
,
(compiler->compCodeOpt() != Compiler::SMALL_CODE) &&
/* Bind jump distances */
- getEmitter()->emitJumpDistBind();
+ GetEmitter()->emitJumpDistBind();
/* The code is now complete and final; it should not change after this. */
(Note: an example of a case where we emit less code would be useful.)
*/
- getEmitter()->emitComputeCodeSizes();
+ GetEmitter()->emitComputeCodeSizes();
#ifdef DEBUG
#if DISPLAY_SIZES
- size_t dataSize = getEmitter()->emitDataSize();
+ size_t dataSize = GetEmitter()->emitDataSize();
#endif // DISPLAY_SIZES
compiler->EndPhase(PHASE_GENERATE_CODE);
- codeSize = getEmitter()->emitEndCodeGen(compiler, trackedStackPtrsContig, genInterruptible, genFullPtrRegMap,
+ codeSize = GetEmitter()->emitEndCodeGen(compiler, trackedStackPtrsContig, genInterruptible, genFullPtrRegMap,
(compiler->info.compRetType == TYP_REF), compiler->compHndBBtabCount,
&prologSize, &epilogSize, codePtr, &coldCodePtr, &consPtr);
if (verbose)
{
printf("*************** After end code gen, before unwindEmit()\n");
- getEmitter()->emitDispIGlist(true);
+ GetEmitter()->emitDispIGlist(true);
}
#endif
assert(maxNestedAlignment % sizeof(int) == 0);
maxAllowedStackDepth += maxNestedAlignment / sizeof(int);
#endif
- noway_assert(getEmitter()->emitMaxStackDepth <= maxAllowedStackDepth);
+ noway_assert(GetEmitter()->emitMaxStackDepth <= maxAllowedStackDepth);
}
#endif // EMIT_TRACK_STACK_DEPTH
}
if (compiler->opts.dmpHex)
{
- size_t consSize = getEmitter()->emitDataSize();
+ size_t consSize = GetEmitter()->emitDataSize();
fprintf(dmpf, "Generated code for %s:\n", compiler->info.compFullName);
fprintf(dmpf, "\n");
/* Tell the emitter that we're done with this function */
- getEmitter()->emitEndFN();
+ GetEmitter()->emitEndFN();
/* Shut down the spill logic */
// Since slot is typically 1, baseOffset is typically 0
int baseOffset = (regArgTab[argNum].slot - 1) * slotSize;
- getEmitter()->emitIns_S_R(ins_Store(storeType), size, srcRegNum, varNum, baseOffset);
+ GetEmitter()->emitIns_S_R(ins_Store(storeType), size, srcRegNum, varNum, baseOffset);
#ifndef UNIX_AMD64_ABI
// Check if we are writing past the end of the struct
noway_assert(varDscDest->lvArgReg == varDscSrc->lvRegNum);
- getEmitter()->emitIns_R_R(INS_xchg, size, varDscSrc->lvRegNum, varDscSrc->lvArgReg);
+ GetEmitter()->emitIns_R_R(INS_xchg, size, varDscSrc->lvRegNum, varDscSrc->lvArgReg);
regSet.verifyRegUsed(varDscSrc->lvRegNum);
regSet.verifyRegUsed(varDscSrc->lvArgReg);
regNumber begRegNum = genMapRegArgNumToRegNum(begReg, destMemType);
- getEmitter()->emitIns_R_R(insCopy, size, xtraReg, begRegNum);
+ GetEmitter()->emitIns_R_R(insCopy, size, xtraReg, begRegNum);
regSet.verifyRegUsed(xtraReg);
regNumber destRegNum = genMapRegArgNumToRegNum(destReg, destMemType);
regNumber srcRegNum = genMapRegArgNumToRegNum(srcReg, destMemType);
- getEmitter()->emitIns_R_R(insCopy, size, destRegNum, srcRegNum);
+ GetEmitter()->emitIns_R_R(insCopy, size, destRegNum, srcRegNum);
regSet.verifyRegUsed(destRegNum);
regNumber destRegNum = genMapRegArgNumToRegNum(destReg, destMemType);
- getEmitter()->emitIns_R_R(insCopy, size, destRegNum, xtraReg);
+ GetEmitter()->emitIns_R_R(insCopy, size, destRegNum, xtraReg);
regSet.verifyRegUsed(destRegNum);
#ifdef USING_SCOPE_INFO
}
#endif
- getEmitter()->emitIns_R_R(ins_Copy(destMemType), size, destRegNum, regNum);
+ GetEmitter()->emitIns_R_R(ins_Copy(destMemType), size, destRegNum, regNum);
#ifdef USING_SCOPE_INFO
psiMoveToReg(varNum);
#endif // USING_SCOPE_INFO
noway_assert(regArgTab[nextArgNum].varNum == varNum);
// Emit a shufpd with a 0 immediate, which preserves the 0th element of the dest reg
// and moves the 0th element of the src reg into the 1st element of the dest reg.
- getEmitter()->emitIns_R_R_I(INS_shufpd, emitActualTypeSize(varDsc->lvType), destRegNum, nextRegNum, 0);
+ GetEmitter()->emitIns_R_R_I(INS_shufpd, emitActualTypeSize(varDsc->lvType), destRegNum, nextRegNum, 0);
// Set destRegNum to regNum so that we skip the setting of the register below,
// but mark argNum as processed and clear regNum from the live mask.
destRegNum = regNum;
noway_assert(regArgTab[nextArgNum].varNum == varNum);
noway_assert(genIsValidIntReg(nextRegNum));
noway_assert(genIsValidFloatReg(destRegNum));
- getEmitter()->emitIns_R_R_I(INS_mov, EA_8BYTE, destRegNum, nextRegNum, 1);
+ GetEmitter()->emitIns_R_R_I(INS_mov, EA_8BYTE, destRegNum, nextRegNum, 1);
}
#endif // defined(_TARGET_ARM64_) && defined(FEATURE_SIMD)
regNumber regNum = varDsc->lvArgInitReg;
assert(regNum != REG_STK);
- getEmitter()->emitIns_R_S(ins_Load(type), emitTypeSize(type), regNum, varNum, 0);
+ GetEmitter()->emitIns_R_S(ins_Load(type), emitTypeSize(type), regNum, varNum, 0);
regSet.verifyRegUsed(regNum);
#ifdef USING_SCOPE_INFO
psiMoveToReg(varNum);
assert(totalFrameSize <= STACK_PROBE_BOUNDARY_THRESHOLD_BYTES);
- getEmitter()->emitIns_R_R_R_I(INS_stp, EA_PTRSIZE, REG_FP, REG_LR, REG_SPBASE, -totalFrameSize,
+ GetEmitter()->emitIns_R_R_R_I(INS_stp, EA_PTRSIZE, REG_FP, REG_LR, REG_SPBASE, -totalFrameSize,
INS_OPTS_PRE_INDEX);
compiler->unwindSaveRegPairPreindexed(REG_FP, REG_LR, -totalFrameSize);
assert(totalFrameSize - compiler->lvaOutgoingArgSpaceSize <= STACK_PROBE_BOUNDARY_THRESHOLD_BYTES);
- getEmitter()->emitIns_R_R_I(INS_sub, EA_PTRSIZE, REG_SPBASE, REG_SPBASE, totalFrameSize);
+ GetEmitter()->emitIns_R_R_I(INS_sub, EA_PTRSIZE, REG_SPBASE, REG_SPBASE, totalFrameSize);
compiler->unwindAllocStack(totalFrameSize);
assert(compiler->lvaOutgoingArgSpaceSize + 2 * REGSIZE_BYTES <= (unsigned)totalFrameSize);
- getEmitter()->emitIns_R_R_R_I(INS_stp, EA_PTRSIZE, REG_FP, REG_LR, REG_SPBASE,
+ GetEmitter()->emitIns_R_R_R_I(INS_stp, EA_PTRSIZE, REG_FP, REG_LR, REG_SPBASE,
compiler->lvaOutgoingArgSpaceSize);
compiler->unwindSaveRegPair(REG_FP, REG_LR, compiler->lvaOutgoingArgSpaceSize);
{
regNumber reg2 = REG_NEXT(reg1);
// stp REG, REG + 1, [SP, #offset]
- getEmitter()->emitIns_R_R_R_I(INS_stp, EA_PTRSIZE, reg1, reg2, REG_SPBASE, offset);
+ GetEmitter()->emitIns_R_R_R_I(INS_stp, EA_PTRSIZE, reg1, reg2, REG_SPBASE, offset);
compiler->unwindNop();
offset += 2 * REGSIZE_BYTES;
}
noway_assert(floatRegCanHoldType(lowReg, TYP_DOUBLE));
noway_assert((slots % 2) == 0);
- getEmitter()->emitIns_R_I(INS_vpush, EA_8BYTE, lowReg, slots / 2);
+ GetEmitter()->emitIns_R_I(INS_vpush, EA_8BYTE, lowReg, slots / 2);
}
void CodeGen::genPopFltRegs(regMaskTP regMask)
noway_assert(floatRegCanHoldType(lowReg, TYP_DOUBLE));
noway_assert((slots % 2) == 0);
- getEmitter()->emitIns_R_I(INS_vpop, EA_8BYTE, lowReg, slots / 2);
+ GetEmitter()->emitIns_R_I(INS_vpop, EA_8BYTE, lowReg, slots / 2);
}
/*-----------------------------------------------------------------------------
*pUnwindStarted = true;
}
- getEmitter()->emitIns_R_I(INS_add, EA_PTRSIZE, REG_SPBASE, frameSize, INS_FLAGS_DONT_CARE);
+ GetEmitter()->emitIns_R_I(INS_add, EA_PTRSIZE, REG_SPBASE, frameSize, INS_FLAGS_DONT_CARE);
}
else
{
*pUnwindStarted = true;
}
- getEmitter()->emitIns_R_R(INS_add, EA_PTRSIZE, REG_SPBASE, tmpReg, INS_FLAGS_DONT_CARE);
+ GetEmitter()->emitIns_R_R(INS_add, EA_PTRSIZE, REG_SPBASE, tmpReg, INS_FLAGS_DONT_CARE);
}
compiler->unwindAllocStack(frameSize);
*/
void CodeGen::genMov32RelocatableDisplacement(BasicBlock* block, regNumber reg)
{
- getEmitter()->emitIns_R_L(INS_movw, EA_4BYTE_DSP_RELOC, block, reg);
- getEmitter()->emitIns_R_L(INS_movt, EA_4BYTE_DSP_RELOC, block, reg);
+ GetEmitter()->emitIns_R_L(INS_movw, EA_4BYTE_DSP_RELOC, block, reg);
+ GetEmitter()->emitIns_R_L(INS_movt, EA_4BYTE_DSP_RELOC, block, reg);
if (compiler->opts.jitFlags->IsSet(JitFlags::JIT_FLAG_RELATIVE_CODE_RELOCS))
{
- getEmitter()->emitIns_R_R_R(INS_add, EA_4BYTE_DSP_RELOC, reg, reg, REG_PC);
+ GetEmitter()->emitIns_R_R_R(INS_add, EA_4BYTE_DSP_RELOC, reg, reg, REG_PC);
}
}
*/
void CodeGen::genMov32RelocatableDataLabel(unsigned value, regNumber reg)
{
- getEmitter()->emitIns_R_D(INS_movw, EA_HANDLE_CNS_RELOC, value, reg);
- getEmitter()->emitIns_R_D(INS_movt, EA_HANDLE_CNS_RELOC, value, reg);
+ GetEmitter()->emitIns_R_D(INS_movw, EA_HANDLE_CNS_RELOC, value, reg);
+ GetEmitter()->emitIns_R_D(INS_movt, EA_HANDLE_CNS_RELOC, value, reg);
if (compiler->opts.jitFlags->IsSet(JitFlags::JIT_FLAG_RELATIVE_CODE_RELOCS))
{
- getEmitter()->emitIns_R_R_R(INS_add, EA_HANDLE_CNS_RELOC, reg, reg, REG_PC);
+ GetEmitter()->emitIns_R_R_R(INS_add, EA_HANDLE_CNS_RELOC, reg, reg, REG_PC);
}
}
{
_ASSERTE(EA_IS_RELOC(size));
- getEmitter()->emitIns_MovRelocatableImmediate(INS_movw, size, reg, addr);
- getEmitter()->emitIns_MovRelocatableImmediate(INS_movt, size, reg, addr);
+ GetEmitter()->emitIns_MovRelocatableImmediate(INS_movw, size, reg, addr);
+ GetEmitter()->emitIns_MovRelocatableImmediate(INS_movt, size, reg, addr);
if (compiler->opts.jitFlags->IsSet(JitFlags::JIT_FLAG_RELATIVE_CODE_RELOCS))
{
- getEmitter()->emitIns_R_R_R(INS_add, size, reg, reg, REG_PC);
+ GetEmitter()->emitIns_R_R_R(INS_add, size, reg, reg, REG_PC);
}
}
dblInitReg = reg;
#elif defined(_TARGET_ARM64_)
// We will just zero out the entire vector register. This sets it to a double/float zero value
- getEmitter()->emitIns_R_I(INS_movi, EA_16BYTE, reg, 0x00, INS_OPTS_16B);
+ GetEmitter()->emitIns_R_I(INS_movi, EA_16BYTE, reg, 0x00, INS_OPTS_16B);
#else // _TARGET_*
#error Unsupported or unset target architecture
#endif
fltInitReg = reg;
#elif defined(_TARGET_ARM64_)
// We will just zero out the entire vector register. This sets it to a double/float zero value
- getEmitter()->emitIns_R_I(INS_movi, EA_16BYTE, reg, 0x00, INS_OPTS_16B);
+ GetEmitter()->emitIns_R_I(INS_movi, EA_16BYTE, reg, 0x00, INS_OPTS_16B);
#else // _TARGET_*
#error Unsupported or unset target architecture
#endif
// Restore sp from fp
// sub sp, fp, #outsz // Uses #outsz if FP/LR stored at bottom
int SPtoFPdelta = genSPtoFPdelta();
- getEmitter()->emitIns_R_R_I(INS_sub, EA_PTRSIZE, REG_SPBASE, REG_FPBASE, SPtoFPdelta);
+ GetEmitter()->emitIns_R_R_I(INS_sub, EA_PTRSIZE, REG_SPBASE, REG_FPBASE, SPtoFPdelta);
compiler->unwindSetFrameReg(REG_FPBASE, SPtoFPdelta);
}
// Restore sp from fp. No need to update sp after this since we've set up fp before adjusting sp
// in prolog.
// sub sp, fp, #alignmentAdjustment2
- getEmitter()->emitIns_R_R_I(INS_sub, EA_PTRSIZE, REG_SPBASE, REG_FPBASE, alignmentAdjustment2);
+ GetEmitter()->emitIns_R_R_I(INS_sub, EA_PTRSIZE, REG_SPBASE, REG_FPBASE, alignmentAdjustment2);
compiler->unwindSetFrameReg(REG_FPBASE, alignmentAdjustment2);
// Generate:
// sub sp, fp, #outsz
int SPtoFPdelta = genSPtoFPdelta();
assert(SPtoFPdelta == (int)compiler->lvaOutgoingArgSpaceSize);
- getEmitter()->emitIns_R_R_I(INS_sub, EA_PTRSIZE, REG_SPBASE, REG_FPBASE, SPtoFPdelta);
+ GetEmitter()->emitIns_R_R_I(INS_sub, EA_PTRSIZE, REG_SPBASE, REG_FPBASE, SPtoFPdelta);
compiler->unwindSetFrameReg(REG_FPBASE, SPtoFPdelta);
}
int offsetSpToSavedFp = calleeSaveSPDelta -
(compiler->info.compIsVarArgs ? MAX_REG_ARG * REGSIZE_BYTES : 0) -
2 * REGSIZE_BYTES; // -2 for FP, LR
- getEmitter()->emitIns_R_R_I(INS_sub, EA_PTRSIZE, REG_SPBASE, REG_FPBASE, offsetSpToSavedFp);
+ GetEmitter()->emitIns_R_R_I(INS_sub, EA_PTRSIZE, REG_SPBASE, REG_FPBASE, offsetSpToSavedFp);
compiler->unwindSetFrameReg(REG_FPBASE, offsetSpToSavedFp);
}
}
// Generate:
// ldp fp,lr,[sp],#framesz
- getEmitter()->emitIns_R_R_R_I(INS_ldp, EA_PTRSIZE, REG_FP, REG_LR, REG_SPBASE, totalFrameSize,
+ GetEmitter()->emitIns_R_R_R_I(INS_ldp, EA_PTRSIZE, REG_FP, REG_LR, REG_SPBASE, totalFrameSize,
INS_OPTS_POST_INDEX);
compiler->unwindSaveRegPairPreindexed(REG_FP, REG_LR, -totalFrameSize);
}
// ldr fp,lr,[sp,#outsz]
// add sp,sp,#framesz
- getEmitter()->emitIns_R_R_R_I(INS_ldp, EA_PTRSIZE, REG_FP, REG_LR, REG_SPBASE,
+ GetEmitter()->emitIns_R_R_R_I(INS_ldp, EA_PTRSIZE, REG_FP, REG_LR, REG_SPBASE,
compiler->lvaOutgoingArgSpaceSize);
compiler->unwindSaveRegPair(REG_FP, REG_LR, compiler->lvaOutgoingArgSpaceSize);
- getEmitter()->emitIns_R_R_I(INS_add, EA_PTRSIZE, REG_SPBASE, REG_SPBASE, totalFrameSize);
+ GetEmitter()->emitIns_R_R_I(INS_add, EA_PTRSIZE, REG_SPBASE, REG_SPBASE, totalFrameSize);
compiler->unwindAllocStack(totalFrameSize);
}
else if (frameType == 3)
if (emitter::emitIns_valid_imm_for_add(untrLclLo, EA_PTRSIZE))
#endif // !_TARGET_ARM_
{
- getEmitter()->emitIns_R_R_I(INS_add, EA_PTRSIZE, rAddr, genFramePointerReg(), untrLclLo);
+ GetEmitter()->emitIns_R_R_I(INS_add, EA_PTRSIZE, rAddr, genFramePointerReg(), untrLclLo);
}
else
{
// Load immediate into the InitReg register
instGen_Set_Reg_To_Imm(EA_PTRSIZE, initReg, (ssize_t)untrLclLo);
- getEmitter()->emitIns_R_R_R(INS_add, EA_PTRSIZE, rAddr, genFramePointerReg(), initReg);
+ GetEmitter()->emitIns_R_R_R(INS_add, EA_PTRSIZE, rAddr, genFramePointerReg(), initReg);
*pInitRegZeroed = false;
}
while (uCntBytes >= REGSIZE_BYTES * 2)
{
#ifdef _TARGET_ARM_
- getEmitter()->emitIns_R_I(INS_stm, EA_PTRSIZE, rAddr, stmImm);
+ GetEmitter()->emitIns_R_I(INS_stm, EA_PTRSIZE, rAddr, stmImm);
#else // !_TARGET_ARM_
- getEmitter()->emitIns_R_R_R_I(INS_stp, EA_PTRSIZE, REG_ZR, REG_ZR, rAddr, 2 * REGSIZE_BYTES,
+ GetEmitter()->emitIns_R_R_R_I(INS_stp, EA_PTRSIZE, REG_ZR, REG_ZR, rAddr, 2 * REGSIZE_BYTES,
INS_OPTS_POST_INDEX);
#endif // !_TARGET_ARM_
uCntBytes -= REGSIZE_BYTES * 2;
else // useLoop is true
{
#ifdef _TARGET_ARM_
- getEmitter()->emitIns_R_I(INS_stm, EA_PTRSIZE, rAddr, stmImm); // zero stack slots
- getEmitter()->emitIns_R_I(INS_sub, EA_PTRSIZE, rCnt, 1, INS_FLAGS_SET);
+ GetEmitter()->emitIns_R_I(INS_stm, EA_PTRSIZE, rAddr, stmImm); // zero stack slots
+ GetEmitter()->emitIns_R_I(INS_sub, EA_PTRSIZE, rCnt, 1, INS_FLAGS_SET);
#else // !_TARGET_ARM_
- getEmitter()->emitIns_R_R_R_I(INS_stp, EA_PTRSIZE, REG_ZR, REG_ZR, rAddr, 2 * REGSIZE_BYTES,
+ GetEmitter()->emitIns_R_R_R_I(INS_stp, EA_PTRSIZE, REG_ZR, REG_ZR, rAddr, 2 * REGSIZE_BYTES,
INS_OPTS_POST_INDEX); // zero stack slots
- getEmitter()->emitIns_R_R_I(INS_subs, EA_PTRSIZE, rCnt, rCnt, 1);
+ GetEmitter()->emitIns_R_R_I(INS_subs, EA_PTRSIZE, rCnt, rCnt, 1);
#endif // !_TARGET_ARM_
- getEmitter()->emitIns_J(INS_bhi, NULL, -3);
+ GetEmitter()->emitIns_J(INS_bhi, NULL, -3);
uCntBytes %= REGSIZE_BYTES * 2;
}
if (uCntBytes >= REGSIZE_BYTES) // check and zero the last register-sized stack slot (odd number)
{
#ifdef _TARGET_ARM_
- getEmitter()->emitIns_R_R_I(INS_str, EA_PTRSIZE, rZero1, rAddr, 0);
+ GetEmitter()->emitIns_R_R_I(INS_str, EA_PTRSIZE, rZero1, rAddr, 0);
#else // _TARGET_ARM_
if ((uCntBytes - REGSIZE_BYTES) == 0)
{
- getEmitter()->emitIns_R_R_I(INS_str, EA_PTRSIZE, REG_ZR, rAddr, 0);
+ GetEmitter()->emitIns_R_R_I(INS_str, EA_PTRSIZE, REG_ZR, rAddr, 0);
}
else
{
- getEmitter()->emitIns_R_R_I(INS_str, EA_PTRSIZE, REG_ZR, rAddr, REGSIZE_BYTES, INS_OPTS_POST_INDEX);
+ GetEmitter()->emitIns_R_R_I(INS_str, EA_PTRSIZE, REG_ZR, rAddr, REGSIZE_BYTES, INS_OPTS_POST_INDEX);
}
#endif // !_TARGET_ARM_
uCntBytes -= REGSIZE_BYTES;
if (uCntBytes > 0)
{
assert(uCntBytes == sizeof(int));
- getEmitter()->emitIns_R_R_I(INS_str, EA_4BYTE, REG_ZR, rAddr, 0);
+ GetEmitter()->emitIns_R_R_I(INS_str, EA_4BYTE, REG_ZR, rAddr, 0);
uCntBytes -= sizeof(int);
}
#endif // _TARGET_ARM64_
noway_assert((intRegState.rsCalleeRegArgMaskLiveIn & RBM_EAX) == 0);
- getEmitter()->emitIns_R_AR(INS_lea, EA_PTRSIZE, REG_EDI, genFramePointerReg(), untrLclLo);
+ GetEmitter()->emitIns_R_AR(INS_lea, EA_PTRSIZE, REG_EDI, genFramePointerReg(), untrLclLo);
regSet.verifyRegUsed(REG_EDI);
inst_RV_IV(INS_mov, REG_ECX, (untrLclHi - untrLclLo) / sizeof(int), EA_4BYTE);
{
if (layout->IsGCPtr(i))
{
- getEmitter()->emitIns_S_R(ins_Store(TYP_I_IMPL), EA_PTRSIZE,
+ GetEmitter()->emitIns_S_R(ins_Store(TYP_I_IMPL), EA_PTRSIZE,
genGetZeroReg(initReg, pInitRegZeroed), varNum, i * REGSIZE_BYTES);
}
}
unsigned i;
for (i = 0; i + REGSIZE_BYTES <= lclSize; i += REGSIZE_BYTES)
{
- getEmitter()->emitIns_S_R(ins_Store(TYP_I_IMPL), EA_PTRSIZE, zeroReg, varNum, i);
+ GetEmitter()->emitIns_S_R(ins_Store(TYP_I_IMPL), EA_PTRSIZE, zeroReg, varNum, i);
}
#ifdef _TARGET_64BIT_
assert(i == lclSize || (i + sizeof(int) == lclSize));
if (i != lclSize)
{
- getEmitter()->emitIns_S_R(ins_Store(TYP_INT), EA_4BYTE, zeroReg, varNum, i);
+ GetEmitter()->emitIns_S_R(ins_Store(TYP_INT), EA_4BYTE, zeroReg, varNum, i);
i += sizeof(int);
}
#endif // _TARGET_64BIT_
*pInitRegZeroed = false;
// mov reg, [compiler->info.compTypeCtxtArg]
- getEmitter()->emitIns_R_AR(ins_Load(TYP_I_IMPL), EA_PTRSIZE, reg, genFramePointerReg(), varDsc->lvStkOffs);
+ GetEmitter()->emitIns_R_AR(ins_Load(TYP_I_IMPL), EA_PTRSIZE, reg, genFramePointerReg(), varDsc->lvStkOffs);
regSet.verifyRegUsed(reg);
}
compiler->lvaCachedGenericContextArgOffset(), rsGetRsvdReg());
#elif defined(_TARGET_ARM_)
// ARM's emitIns_R_R_I automatically uses the reserved register if necessary.
- getEmitter()->emitIns_R_R_I(ins_Store(TYP_I_IMPL), EA_PTRSIZE, reg, genFramePointerReg(),
+ GetEmitter()->emitIns_R_R_I(ins_Store(TYP_I_IMPL), EA_PTRSIZE, reg, genFramePointerReg(),
compiler->lvaCachedGenericContextArgOffset());
#else // !ARM64 !ARM
// mov [ebp-lvaCachedGenericContextArgOffset()], reg
- getEmitter()->emitIns_AR_R(ins_Store(TYP_I_IMPL), EA_PTRSIZE, reg, genFramePointerReg(),
+ GetEmitter()->emitIns_AR_R(ins_Store(TYP_I_IMPL), EA_PTRSIZE, reg, genFramePointerReg(),
compiler->lvaCachedGenericContextArgOffset());
#endif // !ARM64 !ARM
}
#endif // FEATURE_EH_FUNCLETS
- unsigned size = getEmitter()->emitGetPrologOffsetEstimate();
+ unsigned size = GetEmitter()->emitGetPrologOffsetEstimate();
if (size < 5)
{
instNop(5 - size);
/* Nothing is live on entry to the prolog */
- getEmitter()->emitCreatePlaceholderIG(IGPT_PROLOG, block, VarSetOps::MakeEmpty(compiler), 0, 0, false);
+ GetEmitter()->emitCreatePlaceholderIG(IGPT_PROLOG, block, VarSetOps::MakeEmpty(compiler), 0, 0, false);
}
/*****************************************************************************
JITDUMP("Reserving epilog IG for block " FMT_BB "\n", block->bbNum);
assert(block != nullptr);
- const VARSET_TP& gcrefVarsArg(getEmitter()->emitThisGCrefVars);
+ const VARSET_TP& gcrefVarsArg(GetEmitter()->emitThisGCrefVars);
bool last = (block->bbNext == nullptr);
- getEmitter()->emitCreatePlaceholderIG(IGPT_EPILOG, block, gcrefVarsArg, gcrefRegsArg, byrefRegsArg, last);
+ GetEmitter()->emitCreatePlaceholderIG(IGPT_EPILOG, block, gcrefVarsArg, gcrefRegsArg, byrefRegsArg, last);
}
#if defined(FEATURE_EH_FUNCLETS)
JITDUMP("Reserving funclet prolog IG for block " FMT_BB "\n", block->bbNum);
- getEmitter()->emitCreatePlaceholderIG(IGPT_FUNCLET_PROLOG, block, gcInfo.gcVarPtrSetCur, gcInfo.gcRegGCrefSetCur,
+ GetEmitter()->emitCreatePlaceholderIG(IGPT_FUNCLET_PROLOG, block, gcInfo.gcVarPtrSetCur, gcInfo.gcRegGCrefSetCur,
gcInfo.gcRegByrefSetCur, false);
}
JITDUMP("Reserving funclet epilog IG for block " FMT_BB "\n", block->bbNum);
bool last = (block->bbNext == nullptr);
- getEmitter()->emitCreatePlaceholderIG(IGPT_FUNCLET_EPILOG, block, gcInfo.gcVarPtrSetCur, gcInfo.gcRegGCrefSetCur,
+ GetEmitter()->emitCreatePlaceholderIG(IGPT_FUNCLET_EPILOG, block, gcInfo.gcVarPtrSetCur, gcInfo.gcRegGCrefSetCur,
gcInfo.gcRegByrefSetCur, last);
}
here (where we have committed to the final numbers for the frame offsets)
This will ensure that the prolog size is always correct
*/
- getEmitter()->emitMaxTmpSize = regSet.tmpGetTotalSize();
+ GetEmitter()->emitMaxTmpSize = regSet.tmpGetTotalSize();
#ifdef DEBUG
if (compiler->opts.dspCode || compiler->opts.disAsm || compiler->opts.disAsm2 || verbose)
if (delta == 0)
{
- getEmitter()->emitIns_R_R(INS_mov, EA_PTRSIZE, REG_FPBASE, REG_SPBASE);
+ GetEmitter()->emitIns_R_R(INS_mov, EA_PTRSIZE, REG_FPBASE, REG_SPBASE);
#ifdef USING_SCOPE_INFO
psiMoveESPtoEBP();
#endif // USING_SCOPE_INFO
}
else
{
- getEmitter()->emitIns_R_AR(INS_lea, EA_PTRSIZE, REG_FPBASE, REG_SPBASE, delta);
+ GetEmitter()->emitIns_R_AR(INS_lea, EA_PTRSIZE, REG_FPBASE, REG_SPBASE, delta);
// We don't update prolog scope info (there is no function to handle lea), but that is currently dead code
// anyway.
}
#elif defined(_TARGET_ARM_)
assert(arm_Valid_Imm_For_Add_SP(delta));
- getEmitter()->emitIns_R_R_I(INS_add, EA_PTRSIZE, REG_FPBASE, REG_SPBASE, delta);
+ GetEmitter()->emitIns_R_R_I(INS_add, EA_PTRSIZE, REG_FPBASE, REG_SPBASE, delta);
if (reportUnwindData)
{
if (delta == 0)
{
- getEmitter()->emitIns_R_R(INS_mov, EA_PTRSIZE, REG_FPBASE, REG_SPBASE);
+ GetEmitter()->emitIns_R_R(INS_mov, EA_PTRSIZE, REG_FPBASE, REG_SPBASE);
}
else
{
- getEmitter()->emitIns_R_R_I(INS_add, EA_PTRSIZE, REG_FPBASE, REG_SPBASE, delta);
+ GetEmitter()->emitIns_R_R_I(INS_add, EA_PTRSIZE, REG_FPBASE, REG_SPBASE, delta);
}
if (reportUnwindData)
/* Ready to start on the prolog proper */
- getEmitter()->emitBegProlog();
+ GetEmitter()->emitBegProlog();
compiler->unwindBegProlog();
// Do this so we can put the prolog instruction group ahead of
// args will be contiguous.
if (compiler->info.compIsVarArgs)
{
- getEmitter()->spillIntArgRegsToShadowSlots();
+ GetEmitter()->spillIntArgRegsToShadowSlots();
}
#endif // _TARGET_AMD64_
#ifdef _TARGET_ARM_
if (compiler->compLocallocUsed)
{
- getEmitter()->emitIns_R_R(INS_mov, EA_4BYTE, REG_SAVED_LOCALLOC_SP, REG_SPBASE);
+ GetEmitter()->emitIns_R_R(INS_mov, EA_4BYTE, REG_SAVED_LOCALLOC_SP, REG_SPBASE);
regSet.verifyRegUsed(REG_SAVED_LOCALLOC_SP);
compiler->unwindSetFrameReg(REG_SAVED_LOCALLOC_SP, 0);
}
if (compiler->info.compPublishStubParam)
{
#if CPU_LOAD_STORE_ARCH
- getEmitter()->emitIns_S_R(ins_Store(TYP_I_IMPL), EA_PTRSIZE, REG_SECRET_STUB_PARAM,
+ GetEmitter()->emitIns_S_R(ins_Store(TYP_I_IMPL), EA_PTRSIZE, REG_SECRET_STUB_PARAM,
compiler->lvaStubArgumentVar, 0);
#else
// mov [lvaStubArgumentVar], EAX
- getEmitter()->emitIns_AR_R(ins_Store(TYP_I_IMPL), EA_PTRSIZE, REG_SECRET_STUB_PARAM, genFramePointerReg(),
+ GetEmitter()->emitIns_AR_R(ins_Store(TYP_I_IMPL), EA_PTRSIZE, REG_SECRET_STUB_PARAM, genFramePointerReg(),
compiler->lvaTable[compiler->lvaStubArgumentVar].lvStkOffs);
#endif
assert(intRegState.rsCalleeRegArgMaskLiveIn & RBM_SECRET_STUB_PARAM);
initRegZeroed = true;
}
- getEmitter()->emitIns_S_R(ins_Store(TYP_I_IMPL), EA_PTRSIZE, initReg, compiler->lvaShadowSPslotsVar,
+ GetEmitter()->emitIns_S_R(ins_Store(TYP_I_IMPL), EA_PTRSIZE, initReg, compiler->lvaShadowSPslotsVar,
firstSlotOffs);
}
// Initialize the LocalAllocSP slot if there is localloc in the function.
if (compiler->lvaLocAllocSPvar != BAD_VAR_NUM)
{
- getEmitter()->emitIns_S_R(ins_Store(TYP_I_IMPL), EA_PTRSIZE, REG_SPBASE, compiler->lvaLocAllocSPvar, 0);
+ GetEmitter()->emitIns_S_R(ins_Store(TYP_I_IMPL), EA_PTRSIZE, REG_SPBASE, compiler->lvaLocAllocSPvar, 0);
}
#endif // JIT32_GCENCODER
*
*/
genPrologPadForReJit();
- getEmitter()->emitMarkPrologEnd();
+ GetEmitter()->emitMarkPrologEnd();
}
#if defined(UNIX_AMD64_ABI) && defined(FEATURE_SIMD)
if (genInterruptible)
{
genPrologPadForReJit();
- getEmitter()->emitMarkPrologEnd();
+ GetEmitter()->emitMarkPrologEnd();
}
if (compiler->opts.compScopeInfo && (compiler->info.compVarScopesCount > 0))
{
if (hasGCRef)
{
- getEmitter()->emitSetFrameRangeGCRs(GCrefLo, GCrefHi);
+ GetEmitter()->emitSetFrameRangeGCRs(GCrefLo, GCrefHi);
}
else
{
noway_assert(compiler->info.compArgsCount > 0);
// MOV EAX, <VARARGS HANDLE>
- getEmitter()->emitIns_R_S(ins_Load(TYP_I_IMPL), EA_PTRSIZE, REG_EAX, compiler->info.compArgsCount - 1, 0);
+ GetEmitter()->emitIns_R_S(ins_Load(TYP_I_IMPL), EA_PTRSIZE, REG_EAX, compiler->info.compArgsCount - 1, 0);
regSet.verifyRegUsed(REG_EAX);
// MOV EAX, [EAX]
- getEmitter()->emitIns_R_AR(ins_Load(TYP_I_IMPL), EA_PTRSIZE, REG_EAX, REG_EAX, 0);
+ GetEmitter()->emitIns_R_AR(ins_Load(TYP_I_IMPL), EA_PTRSIZE, REG_EAX, REG_EAX, 0);
// EDX might actually be holding something here. So make sure to only use EAX for this code
// sequence.
noway_assert(lastArg->lvFramePointerBased);
// LEA EAX, &<VARARGS HANDLE> + EAX
- getEmitter()->emitIns_R_ARR(INS_lea, EA_PTRSIZE, REG_EAX, genFramePointerReg(), REG_EAX, offset);
+ GetEmitter()->emitIns_R_ARR(INS_lea, EA_PTRSIZE, REG_EAX, genFramePointerReg(), REG_EAX, offset);
if (varDsc->lvIsInReg())
{
if (varDsc->lvRegNum != REG_EAX)
{
- getEmitter()->emitIns_R_R(INS_mov, EA_PTRSIZE, varDsc->lvRegNum, REG_EAX);
+ GetEmitter()->emitIns_R_R(INS_mov, EA_PTRSIZE, varDsc->lvRegNum, REG_EAX);
regSet.verifyRegUsed(varDsc->lvRegNum);
}
}
else
{
- getEmitter()->emitIns_S_R(ins_Store(TYP_I_IMPL), EA_PTRSIZE, REG_EAX, argsStartVar, 0);
+ GetEmitter()->emitIns_S_R(ins_Store(TYP_I_IMPL), EA_PTRSIZE, REG_EAX, argsStartVar, 0);
}
}
noway_assert(compiler->lvaReturnSpCheck != 0xCCCCCCCC &&
compiler->lvaTable[compiler->lvaReturnSpCheck].lvDoNotEnregister &&
compiler->lvaTable[compiler->lvaReturnSpCheck].lvOnFrame);
- getEmitter()->emitIns_S_R(ins_Store(TYP_I_IMPL), EA_PTRSIZE, REG_SPBASE, compiler->lvaReturnSpCheck, 0);
+ GetEmitter()->emitIns_S_R(ins_Store(TYP_I_IMPL), EA_PTRSIZE, REG_SPBASE, compiler->lvaReturnSpCheck, 0);
}
#endif // defined(DEBUG) && defined(_TARGET_XARCH_)
- getEmitter()->emitEndProlog();
+ GetEmitter()->emitEndProlog();
compiler->unwindEndProlog();
- noway_assert(getEmitter()->emitMaxTmpSize == regSet.tmpGetTotalSize());
+ noway_assert(GetEmitter()->emitMaxTmpSize == regSet.tmpGetTotalSize());
}
#ifdef _PREFAST_
#pragma warning(pop)
ScopedSetVariable<bool> _setGeneratingEpilog(&compiler->compGeneratingEpilog, true);
- VarSetOps::Assign(compiler, gcInfo.gcVarPtrSetCur, getEmitter()->emitInitGCrefVars);
- gcInfo.gcRegGCrefSetCur = getEmitter()->emitInitGCrefRegs;
- gcInfo.gcRegByrefSetCur = getEmitter()->emitInitByrefRegs;
+ VarSetOps::Assign(compiler, gcInfo.gcVarPtrSetCur, GetEmitter()->emitInitGCrefVars);
+ gcInfo.gcRegGCrefSetCur = GetEmitter()->emitInitGCrefRegs;
+ gcInfo.gcRegByrefSetCur = GetEmitter()->emitInitByrefRegs;
#ifdef DEBUG
if (compiler->opts.dspCode)
dumpConvertedVarSet(compiler, gcInfo.gcVarPtrSetCur);
printf(", gcRegGCrefSetCur=");
printRegMaskInt(gcInfo.gcRegGCrefSetCur);
- getEmitter()->emitDispRegSet(gcInfo.gcRegGCrefSetCur);
+ GetEmitter()->emitDispRegSet(gcInfo.gcRegGCrefSetCur);
printf(", gcRegByrefSetCur=");
printRegMaskInt(gcInfo.gcRegByrefSetCur);
- getEmitter()->emitDispRegSet(gcInfo.gcRegByrefSetCur);
+ GetEmitter()->emitDispRegSet(gcInfo.gcRegByrefSetCur);
printf("\n");
}
#endif // DEBUG
regNumber vptrReg1 = REG_LR;
instGen_Set_Reg_To_Imm(EA_HANDLE_CNS_RELOC, indCallReg, (ssize_t)addrInfo.addr);
- getEmitter()->emitIns_R_R(INS_mov, EA_PTRSIZE, vptrReg1, indCallReg);
- getEmitter()->emitIns_R_R_I(INS_ldr, EA_PTRSIZE, indCallReg, indCallReg, 0);
- getEmitter()->emitIns_R_R(INS_add, EA_PTRSIZE, indCallReg, vptrReg1);
+ GetEmitter()->emitIns_R_R(INS_mov, EA_PTRSIZE, vptrReg1, indCallReg);
+ GetEmitter()->emitIns_R_R_I(INS_ldr, EA_PTRSIZE, indCallReg, indCallReg, 0);
+ GetEmitter()->emitIns_R_R(INS_add, EA_PTRSIZE, indCallReg, vptrReg1);
}
genPopCalleeSavedRegisters(jmpEpilog);
instGen_Set_Reg_To_Imm(EA_HANDLE_CNS_RELOC, indCallReg, (ssize_t)addrInfo.addr);
if (addrInfo.accessType == IAT_PVALUE)
{
- getEmitter()->emitIns_R_R_I(INS_ldr, EA_PTRSIZE, indCallReg, indCallReg, 0);
+ GetEmitter()->emitIns_R_R_I(INS_ldr, EA_PTRSIZE, indCallReg, indCallReg, 0);
regSet.verifyRegUsed(indCallReg);
}
break;
*/
// clang-format off
- getEmitter()->emitIns_Call(callType,
+ GetEmitter()->emitIns_Call(callType,
methHnd,
INDEBUG_LDISASM_COMMA(nullptr)
addr,
{
assert(call->gtCallMethHnd != nullptr);
// clang-format off
- getEmitter()->emitIns_Call(emitter::EC_FUNC_TOKEN,
+ GetEmitter()->emitIns_Call(emitter::EC_FUNC_TOKEN,
call->gtCallMethHnd,
INDEBUG_LDISASM_COMMA(nullptr)
call->gtDirectCallAddress,
{
// Target requires indirection to obtain. genCallInstruction will have materialized
// it into REG_FASTTAILCALL_TARGET already, so just branch to it.
- getEmitter()->emitIns_R(INS_br, emitTypeSize(TYP_I_IMPL), REG_FASTTAILCALL_TARGET);
+ GetEmitter()->emitIns_R(INS_br, emitTypeSize(TYP_I_IMPL), REG_FASTTAILCALL_TARGET);
}
}
#endif // FEATURE_FASTTAILCALL
ScopedSetVariable<bool> _setGeneratingEpilog(&compiler->compGeneratingEpilog, true);
- VarSetOps::Assign(compiler, gcInfo.gcVarPtrSetCur, getEmitter()->emitInitGCrefVars);
- gcInfo.gcRegGCrefSetCur = getEmitter()->emitInitGCrefRegs;
- gcInfo.gcRegByrefSetCur = getEmitter()->emitInitByrefRegs;
+ VarSetOps::Assign(compiler, gcInfo.gcVarPtrSetCur, GetEmitter()->emitInitGCrefVars);
+ gcInfo.gcRegGCrefSetCur = GetEmitter()->emitInitGCrefRegs;
+ gcInfo.gcRegByrefSetCur = GetEmitter()->emitInitByrefRegs;
noway_assert(!compiler->opts.MinOpts() || isFramePointerUsed()); // FPO not allowed with minOpts
dumpConvertedVarSet(compiler, gcInfo.gcVarPtrSetCur);
printf(", gcRegGCrefSetCur=");
printRegMaskInt(gcInfo.gcRegGCrefSetCur);
- getEmitter()->emitDispRegSet(gcInfo.gcRegGCrefSetCur);
+ GetEmitter()->emitDispRegSet(gcInfo.gcRegGCrefSetCur);
printf(", gcRegByrefSetCur=");
printRegMaskInt(gcInfo.gcRegByrefSetCur);
- getEmitter()->emitDispRegSet(gcInfo.gcRegByrefSetCur);
+ GetEmitter()->emitDispRegSet(gcInfo.gcRegByrefSetCur);
printf("\n");
}
#endif
// unwinder (and break binary compat with older versions of the runtime) by starting the epilog
// after any `vzeroupper` instruction has been emitted. If either of the above conditions changes,
// we will need to rethink this.
- getEmitter()->emitStartEpilog();
+ GetEmitter()->emitStartEpilog();
#endif
/* Compute the size in bytes we've pushed/popped */
noway_assert(offset < UCHAR_MAX); // the offset fits in a byte
#endif
- getEmitter()->emitIns_R_AR(INS_lea, EA_PTRSIZE, REG_SPBASE, REG_FPBASE, -offset);
+ GetEmitter()->emitIns_R_AR(INS_lea, EA_PTRSIZE, REG_SPBASE, REG_FPBASE, -offset);
}
}
inst_RV(INS_pop, REG_EBP, TYP_I_IMPL);
}
- getEmitter()->emitStartExitSeq(); // Mark the start of the "return" sequence
+ GetEmitter()->emitStartExitSeq(); // Mark the start of the "return" sequence
/* Check if this a special return block i.e.
* CEE_JMP instruction */
}
// clang-format off
- getEmitter()->emitIns_Call(callType,
+ GetEmitter()->emitIns_Call(callType,
methHnd,
INDEBUG_LDISASM_COMMA(nullptr)
addr,
{
assert(call->gtCallMethHnd != nullptr);
// clang-format off
- getEmitter()->emitIns_Call(
+ GetEmitter()->emitIns_Call(
emitter::EC_FUNC_TOKEN,
call->gtCallMethHnd,
INDEBUG_LDISASM_COMMA(nullptr)
// Target requires indirection to obtain. genCallInstruction will have materialized
// it into RAX already, so just jump to it. The stack walker requires that a register
// indirect tail call be rex.w prefixed.
- getEmitter()->emitIns_R(INS_rex_jmp, emitTypeSize(TYP_I_IMPL), REG_RAX);
+ GetEmitter()->emitIns_R(INS_rex_jmp, emitTypeSize(TYP_I_IMPL), REG_RAX);
}
#else
{
// This is the first block of a filter
- getEmitter()->emitIns_R_R_I(INS_ldr, EA_PTRSIZE, REG_R1, REG_R1, genFuncletInfo.fiPSP_slot_CallerSP_offset);
+ GetEmitter()->emitIns_R_R_I(INS_ldr, EA_PTRSIZE, REG_R1, REG_R1, genFuncletInfo.fiPSP_slot_CallerSP_offset);
regSet.verifyRegUsed(REG_R1);
- getEmitter()->emitIns_R_R_I(INS_str, EA_PTRSIZE, REG_R1, REG_SPBASE, genFuncletInfo.fiPSP_slot_SP_offset);
- getEmitter()->emitIns_R_R_I(INS_sub, EA_PTRSIZE, REG_FPBASE, REG_R1,
+ GetEmitter()->emitIns_R_R_I(INS_str, EA_PTRSIZE, REG_R1, REG_SPBASE, genFuncletInfo.fiPSP_slot_SP_offset);
+ GetEmitter()->emitIns_R_R_I(INS_sub, EA_PTRSIZE, REG_FPBASE, REG_R1,
genFuncletInfo.fiFunctionCallerSPtoFPdelta);
}
else
{
// This is a non-filter funclet
- getEmitter()->emitIns_R_R_I(INS_add, EA_PTRSIZE, REG_R3, REG_FPBASE,
+ GetEmitter()->emitIns_R_R_I(INS_add, EA_PTRSIZE, REG_R3, REG_FPBASE,
genFuncletInfo.fiFunctionCallerSPtoFPdelta);
regSet.verifyRegUsed(REG_R3);
- getEmitter()->emitIns_R_R_I(INS_str, EA_PTRSIZE, REG_R3, REG_SPBASE, genFuncletInfo.fiPSP_slot_SP_offset);
+ GetEmitter()->emitIns_R_R_I(INS_str, EA_PTRSIZE, REG_R3, REG_SPBASE, genFuncletInfo.fiPSP_slot_SP_offset);
}
}
return;
}
- getEmitter()->emitIns_R_AR(INS_mov, EA_PTRSIZE, REG_FPBASE, REG_ARG_0, genFuncletInfo.fiPSP_slot_InitialSP_offset);
+ GetEmitter()->emitIns_R_AR(INS_mov, EA_PTRSIZE, REG_FPBASE, REG_ARG_0, genFuncletInfo.fiPSP_slot_InitialSP_offset);
regSet.verifyRegUsed(REG_FPBASE);
- getEmitter()->emitIns_AR_R(INS_mov, EA_PTRSIZE, REG_FPBASE, REG_SPBASE, genFuncletInfo.fiPSP_slot_InitialSP_offset);
+ GetEmitter()->emitIns_AR_R(INS_mov, EA_PTRSIZE, REG_FPBASE, REG_SPBASE, genFuncletInfo.fiPSP_slot_InitialSP_offset);
if (genFuncletInfo.fiFunction_InitialSP_to_FP_delta != 0)
{
- getEmitter()->emitIns_R_AR(INS_lea, EA_PTRSIZE, REG_FPBASE, REG_FPBASE,
+ GetEmitter()->emitIns_R_AR(INS_lea, EA_PTRSIZE, REG_FPBASE, REG_FPBASE,
genFuncletInfo.fiFunction_InitialSP_to_FP_delta);
}
regNumber regTmp = initReg;
*pInitRegZeroed = false;
- getEmitter()->emitIns_R_R_I(INS_add, EA_PTRSIZE, regTmp, regBase, callerSPOffs);
- getEmitter()->emitIns_S_R(INS_str, EA_PTRSIZE, regTmp, compiler->lvaPSPSym, 0);
+ GetEmitter()->emitIns_R_R_I(INS_add, EA_PTRSIZE, regTmp, regBase, callerSPOffs);
+ GetEmitter()->emitIns_S_R(INS_str, EA_PTRSIZE, regTmp, compiler->lvaPSPSym, 0);
#elif defined(_TARGET_ARM64_)
regNumber regTmp = initReg;
*pInitRegZeroed = false;
- getEmitter()->emitIns_R_R_Imm(INS_add, EA_PTRSIZE, regTmp, REG_SPBASE, SPtoCallerSPdelta);
- getEmitter()->emitIns_S_R(INS_str, EA_PTRSIZE, regTmp, compiler->lvaPSPSym, 0);
+ GetEmitter()->emitIns_R_R_Imm(INS_add, EA_PTRSIZE, regTmp, REG_SPBASE, SPtoCallerSPdelta);
+ GetEmitter()->emitIns_S_R(INS_str, EA_PTRSIZE, regTmp, compiler->lvaPSPSym, 0);
#elif defined(_TARGET_AMD64_)
// We generate:
// mov [rbp-20h], rsp // store the Initial-SP (our current rsp) in the PSPsym
- getEmitter()->emitIns_S_R(ins_Store(TYP_I_IMPL), EA_PTRSIZE, REG_SPBASE, compiler->lvaPSPSym, 0);
+ GetEmitter()->emitIns_S_R(ins_Store(TYP_I_IMPL), EA_PTRSIZE, REG_SPBASE, compiler->lvaPSPSym, 0);
#else // _TARGET_*
if (verbose)
{
printf("*************** Before prolog / epilog generation\n");
- getEmitter()->emitDispIGlist(false);
+ GetEmitter()->emitDispIGlist(false);
}
#endif
// Tell the emitter we're done with main code generation, and are going to start prolog and epilog generation.
- getEmitter()->emitStartPrologEpilogGeneration();
+ GetEmitter()->emitStartPrologEpilogGeneration();
gcInfo.gcResetForBB();
genFnProlog();
// have the insGroup list, which serves well, so we don't need the extra allocations
// for a prolog/epilog list in the code generator.
- getEmitter()->emitGeneratePrologEpilog();
+ GetEmitter()->emitGeneratePrologEpilog();
// Tell the emitter we're done with all prolog and epilog generation.
- getEmitter()->emitFinishPrologEpilogGeneration();
+ GetEmitter()->emitFinishPrologEpilogGeneration();
#ifdef DEBUG
if (verbose)
{
printf("*************** After prolog / epilog generation\n");
- getEmitter()->emitDispIGlist(false);
+ GetEmitter()->emitDispIGlist(false);
}
#endif
}
if ((regBit & regMask) != 0)
{
// ABI requires us to preserve lower 128-bits of YMM register.
- getEmitter()->emitIns_AR_R(copyIns,
+ GetEmitter()->emitIns_AR_R(copyIns,
EA_8BYTE, // TODO-XArch-Cleanup: size specified here doesn't matter but should be
// EA_16BYTE
reg, REG_SPBASE, offset);
if ((regBit & regMask) != 0)
{
// ABI requires us to restore lower 128-bits of YMM register.
- getEmitter()->emitIns_R_AR(copyIns,
+ GetEmitter()->emitIns_R_AR(copyIns,
EA_8BYTE, // TODO-XArch-Cleanup: size specified here doesn't matter but should be
// EA_16BYTE
reg, regBase, offset);
bool emitVzeroUpper = false;
if (check256bitOnly)
{
- emitVzeroUpper = getEmitter()->Contains256bitAVX();
+ emitVzeroUpper = GetEmitter()->Contains256bitAVX();
}
else
{
- emitVzeroUpper = getEmitter()->ContainsAVX();
+ emitVzeroUpper = GetEmitter()->ContainsAVX();
}
if (emitVzeroUpper)
noway_assert(scopeP->scStartLoc.Valid());
noway_assert(scopeP->scEndLoc.Valid());
- UNATIVE_OFFSET startOffs = scopeP->scStartLoc.CodeOffset(getEmitter());
- UNATIVE_OFFSET endOffs = scopeP->scEndLoc.CodeOffset(getEmitter());
+ UNATIVE_OFFSET startOffs = scopeP->scStartLoc.CodeOffset(GetEmitter());
+ UNATIVE_OFFSET endOffs = scopeP->scEndLoc.CodeOffset(GetEmitter());
unsigned varNum = scopeP->scSlotNum;
noway_assert(startOffs <= endOffs);
// Find the start and end IP
- UNATIVE_OFFSET startOffs = scopeL->scStartLoc.CodeOffset(getEmitter());
- UNATIVE_OFFSET endOffs = scopeL->scEndLoc.CodeOffset(getEmitter());
+ UNATIVE_OFFSET startOffs = scopeL->scStartLoc.CodeOffset(GetEmitter());
+ UNATIVE_OFFSET endOffs = scopeL->scEndLoc.CodeOffset(GetEmitter());
noway_assert(scopeL->scStartLoc != scopeL->scEndLoc);
}
for (VariableLiveKeeper::VariableLiveRange& liveRange : *liveRanges)
{
- UNATIVE_OFFSET startOffs = liveRange.m_StartEmitLocation.CodeOffset(getEmitter());
- UNATIVE_OFFSET endOffs = liveRange.m_EndEmitLocation.CodeOffset(getEmitter());
+ UNATIVE_OFFSET startOffs = liveRange.m_StartEmitLocation.CodeOffset(GetEmitter());
+ UNATIVE_OFFSET endOffs = liveRange.m_EndEmitLocation.CodeOffset(GetEmitter());
if (varDsc->lvIsParam && (startOffs == endOffs))
{
printf(" ");
ipMapping->ipmdNativeLoc.Print();
// We can only call this after code generation. Is there any way to tell when it's legal to call?
- // printf(" [%x]", ipMapping->ipmdNativeLoc.CodeOffset(getEmitter()));
+ // printf(" [%x]", ipMapping->ipmdNativeLoc.CodeOffset(GetEmitter()));
if (ipMapping->ipmdIsLabel)
{
/* Create a mapping entry and append it to the list */
Compiler::IPmappingDsc* addMapping = compiler->getAllocator(CMK_DebugInfo).allocate<Compiler::IPmappingDsc>(1);
- addMapping->ipmdNativeLoc.CaptureLocation(getEmitter());
+ addMapping->ipmdNativeLoc.CaptureLocation(GetEmitter());
addMapping->ipmdILoffsx = offsx;
addMapping->ipmdIsLabel = isLabel;
addMapping->ipmdNext = nullptr;
/* Create a mapping entry and prepend it to the list */
Compiler::IPmappingDsc* addMapping = compiler->getAllocator(CMK_DebugInfo).allocate<Compiler::IPmappingDsc>(1);
- addMapping->ipmdNativeLoc.CaptureLocation(getEmitter());
+ addMapping->ipmdNativeLoc.CaptureLocation(GetEmitter());
addMapping->ipmdILoffsx = offsx;
addMapping->ipmdIsLabel = true;
addMapping->ipmdNext = nullptr;
/* offsx was the last reported offset. Make sure that we generated native code */
- if (compiler->genIPmappingLast->ipmdNativeLoc.IsCurrentLocation(getEmitter()))
+ if (compiler->genIPmappingLast->ipmdNativeLoc.IsCurrentLocation(GetEmitter()))
{
instGen(INS_nop);
}
continue;
}
- UNATIVE_OFFSET nextNativeOfs = tmpMapping->ipmdNativeLoc.CodeOffset(getEmitter());
+ UNATIVE_OFFSET nextNativeOfs = tmpMapping->ipmdNativeLoc.CodeOffset(GetEmitter());
if (nextNativeOfs != lastNativeOfs)
{
{
noway_assert(prevMapping != nullptr);
noway_assert(!prevMapping->ipmdNativeLoc.Valid() ||
- lastNativeOfs == prevMapping->ipmdNativeLoc.CodeOffset(getEmitter()));
+ lastNativeOfs == prevMapping->ipmdNativeLoc.CodeOffset(GetEmitter()));
/* The previous block had the same native offset. We have to
discard one of the mappings. Simply reinitialize ipmdNativeLoc
continue;
}
- UNATIVE_OFFSET nextNativeOfs = tmpMapping->ipmdNativeLoc.CodeOffset(getEmitter());
+ UNATIVE_OFFSET nextNativeOfs = tmpMapping->ipmdNativeLoc.CodeOffset(GetEmitter());
IL_OFFSETX srcIP = tmpMapping->ipmdILoffsx;
if (jitIsCallInstruction(srcIP))
{
if (targetType == TYP_FLOAT)
{
- getEmitter()->emitIns_R_R(INS_vmov_f2i, EA_4BYTE, REG_INTRET, op1->gtRegNum);
+ GetEmitter()->emitIns_R_R(INS_vmov_f2i, EA_4BYTE, REG_INTRET, op1->gtRegNum);
}
else
{
assert(targetType == TYP_DOUBLE);
- getEmitter()->emitIns_R_R_R(INS_vmov_d2i, EA_8BYTE, REG_INTRET, REG_NEXT(REG_INTRET),
+ GetEmitter()->emitIns_R_R_R(INS_vmov_d2i, EA_8BYTE, REG_INTRET, REG_NEXT(REG_INTRET),
op1->gtRegNum);
}
}
{
noway_assert(lvaStackPointerVar != 0xCCCCCCCC && compiler->lvaTable[lvaStackPointerVar].lvDoNotEnregister &&
compiler->lvaTable[lvaStackPointerVar].lvOnFrame);
- getEmitter()->emitIns_S_R(INS_cmp, EA_PTRSIZE, REG_SPBASE, lvaStackPointerVar, 0);
+ GetEmitter()->emitIns_S_R(INS_cmp, EA_PTRSIZE, REG_SPBASE, lvaStackPointerVar, 0);
BasicBlock* sp_check = genCreateTempLabel();
- getEmitter()->emitIns_J(INS_je, sp_check);
+ GetEmitter()->emitIns_J(INS_je, sp_check);
instGen(INS_BREAKPOINT);
genDefineTempLabel(sp_check);
}
VariableLiveDescriptor* varLiveDsc = &m_vlrLiveDsc[varNum];
// this variable live range is valid from this point
- varLiveDsc->startLiveRangeFromEmitter(varLocation, m_Compiler->getEmitter());
+ varLiveDsc->startLiveRangeFromEmitter(varLocation, m_Compiler->GetEmitter());
}
}
if (m_Compiler->opts.compDbgInfo && varNum < m_LiveDscCount && !m_LastBasicBlockHasBeenEmited)
{
// this variable live range is no longer valid from this point
- m_vlrLiveDsc[varNum].endLiveRangeAtEmitter(m_Compiler->getEmitter());
+ m_vlrLiveDsc[varNum].endLiveRangeAtEmitter(m_Compiler->GetEmitter());
}
}
// Report the home change for this variable
VariableLiveDescriptor* varLiveDsc = &m_vlrLiveDsc[varNum];
- varLiveDsc->updateLiveRangeAtEmitter(siVarLoc, m_Compiler->getEmitter());
+ varLiveDsc->updateLiveRangeAtEmitter(siVarLoc, m_Compiler->GetEmitter());
}
}
noway_assert(varNum < m_LiveArgsCount);
VariableLiveDescriptor* varLiveDsc = &m_vlrLiveDscForProlog[varNum];
- varLiveDsc->startLiveRangeFromEmitter(varLocation, m_Compiler->getEmitter());
+ varLiveDsc->startLiveRangeFromEmitter(varLocation, m_Compiler->GetEmitter());
}
//------------------------------------------------------------------------
if (varLiveDsc->hasVariableLiveRangeOpen())
{
- varLiveDsc->endLiveRangeAtEmitter(m_Compiler->getEmitter());
+ varLiveDsc->endLiveRangeAtEmitter(m_Compiler->GetEmitter());
}
}
}
{
hasDumpedHistory = true;
printf("IL Var Num %d:\n", m_Compiler->compMap2ILvarNum(varNum));
- varLiveDsc->dumpAllRegisterLiveRangesForBlock(m_Compiler->getEmitter(), m_Compiler->codeGen);
+ varLiveDsc->dumpAllRegisterLiveRangesForBlock(m_Compiler->GetEmitter(), m_Compiler->codeGen);
}
}
}
TempDsc* getSpillTempDsc(GenTree* tree);
public:
- emitter* getEmitter() const
+ emitter* GetEmitter() const
{
return m_cgEmitter;
}
#ifdef _TARGET_XARCH_
if (genAlignLoops && block->bbFlags & BBF_LOOP_HEAD)
{
- getEmitter()->emitLoopAlign();
+ GetEmitter()->emitLoopAlign();
}
#endif
{
// Mark a label and update the current set of live GC refs
- block->bbEmitCookie = getEmitter()->emitAddLabel(gcInfo.gcVarPtrSetCur, gcInfo.gcRegGCrefSetCur,
+ block->bbEmitCookie = GetEmitter()->emitAddLabel(gcInfo.gcVarPtrSetCur, gcInfo.gcRegGCrefSetCur,
gcInfo.gcRegByrefSetCur, FALSE);
}
{
// We require the block that starts the Cold section to have a label
noway_assert(block->bbEmitCookie);
- getEmitter()->emitSetFirstColdIGCookie(block->bbEmitCookie);
+ GetEmitter()->emitSetFirstColdIGCookie(block->bbEmitCookie);
}
/* Both stacks are always empty on entry to a basic block */
{
printf("Regset after " FMT_BB " gcr=", block->bbNum);
printRegMaskInt(gcInfo.gcRegGCrefSetCur & ~regSet.rsMaskVars);
- compiler->getEmitter()->emitDispRegSet(gcInfo.gcRegGCrefSetCur & ~regSet.rsMaskVars);
+ compiler->GetEmitter()->emitDispRegSet(gcInfo.gcRegGCrefSetCur & ~regSet.rsMaskVars);
printf(", byr=");
printRegMaskInt(gcInfo.gcRegByrefSetCur & ~regSet.rsMaskVars);
- compiler->getEmitter()->emitDispRegSet(gcInfo.gcRegByrefSetCur & ~regSet.rsMaskVars);
+ compiler->GetEmitter()->emitDispRegSet(gcInfo.gcRegByrefSetCur & ~regSet.rsMaskVars);
printf(", regVars=");
printRegMaskInt(regSet.rsMaskVars);
- compiler->getEmitter()->emitDispRegSet(regSet.rsMaskVars);
+ compiler->GetEmitter()->emitDispRegSet(regSet.rsMaskVars);
printf("\n");
}
// be slightly different from what the OS considers an epilog, and it is the OS-reported epilog that matters
// here.)
// We handle case #1 here, and case #2 in the emitter.
- if (getEmitter()->emitIsLastInsCall())
+ if (GetEmitter()->emitIsLastInsCall())
{
// Ok, the last instruction generated is a call instruction. Do any of the other conditions hold?
// Note: we may be generating a few too many NOPs for the case of call preceding an epilog. Technically,
}
instruction ins = ins_Load(targetType, compiler->isSIMDTypeLocalAligned(lcl->gtLclNum));
emitAttr attr = emitActualTypeSize(targetType);
- emitter* emit = getEmitter();
+ emitter* emit = GetEmitter();
// Load local variable from its home location.
inst_RV_TT(ins, dstReg, unspillTree, 0, attr);
}
TempDsc* t = regSet.rsUnspillInPlace(call, unspillTreeReg, i);
- getEmitter()->emitIns_R_S(ins_Load(dstType), emitActualTypeSize(dstType), dstReg, t->tdTempNum(),
+ GetEmitter()->emitIns_R_S(ins_Load(dstType), emitActualTypeSize(dstType), dstReg, t->tdTempNum(),
0);
regSet.tmpRlsTemp(t);
gcInfo.gcMarkRegPtrVal(dstReg, dstType);
regNumber dstReg = splitArg->GetRegNumByIdx(i);
TempDsc* t = regSet.rsUnspillInPlace(splitArg, dstReg, i);
- getEmitter()->emitIns_R_S(ins_Load(dstType), emitActualTypeSize(dstType), dstReg, t->tdTempNum(),
+ GetEmitter()->emitIns_R_S(ins_Load(dstType), emitActualTypeSize(dstType), dstReg, t->tdTempNum(),
0);
regSet.tmpRlsTemp(t);
gcInfo.gcMarkRegPtrVal(dstReg, dstType);
regNumber dstReg = multiReg->GetRegNumByIdx(i);
TempDsc* t = regSet.rsUnspillInPlace(multiReg, dstReg, i);
- getEmitter()->emitIns_R_S(ins_Load(dstType), emitActualTypeSize(dstType), dstReg, t->tdTempNum(),
+ GetEmitter()->emitIns_R_S(ins_Load(dstType), emitActualTypeSize(dstType), dstReg, t->tdTempNum(),
0);
regSet.tmpRlsTemp(t);
gcInfo.gcMarkRegPtrVal(dstReg, dstType);
else
{
TempDsc* t = regSet.rsUnspillInPlace(unspillTree, unspillTree->gtRegNum);
- getEmitter()->emitIns_R_S(ins_Load(unspillTree->gtType), emitActualTypeSize(unspillTree->TypeGet()), dstReg,
+ GetEmitter()->emitIns_R_S(ins_Load(unspillTree->gtType), emitActualTypeSize(unspillTree->TypeGet()), dstReg,
t->tdTempNum(), 0);
regSet.tmpRlsTemp(t);
// for tail calls) in RDI.
// Destination is always local (on the stack) - use EA_PTRSIZE.
assert(m_stkArgVarNum != BAD_VAR_NUM);
- getEmitter()->emitIns_R_S(INS_lea, EA_PTRSIZE, dstReg, m_stkArgVarNum, putArgNode->getArgOffset());
+ GetEmitter()->emitIns_R_S(INS_lea, EA_PTRSIZE, dstReg, m_stkArgVarNum, putArgNode->getArgOffset());
}
#endif // !_TARGET_X86_
{
offset = srcAddr->AsLclFld()->gtLclOffs;
}
- getEmitter()->emitIns_R_S(INS_lea, EA_PTRSIZE, srcReg, lclNode->gtLclNum, offset);
+ GetEmitter()->emitIns_R_S(INS_lea, EA_PTRSIZE, srcReg, lclNode->gtLclNum, offset);
}
else
{
assert(srcAddr->gtRegNum != REG_NA);
// Source is not known to be on the stack. Use EA_BYREF.
- getEmitter()->emitIns_R_R(INS_mov, EA_BYREF, srcReg, srcAddr->gtRegNum);
+ GetEmitter()->emitIns_R_R(INS_mov, EA_BYREF, srcReg, srcAddr->gtRegNum);
}
}
// Emit store instructions to store the registers produced by the GT_FIELD_LIST into the outgoing
// argument area.
unsigned thisFieldOffset = argOffset + fieldListPtr->gtFieldOffset;
- getEmitter()->emitIns_S_R(ins_Store(type), attr, reg, outArgVarNum, thisFieldOffset);
+ GetEmitter()->emitIns_S_R(ins_Store(type), attr, reg, outArgVarNum, thisFieldOffset);
// We can't write beyond the arg area unless this is a tail call, in which case we use
// the first stack arg as the base of the incoming arg area.
#if !defined(_TARGET_X86_)
int argSize = 0;
#endif // !defined(_TARGET_X86_)
- getEmitter()->emitIns_Call(emitter::EmitCallType(callType),
+ GetEmitter()->emitIns_Call(emitter::EmitCallType(callType),
methHnd,
INDEBUG_LDISASM_COMMA(sigInfo)
addr,
#endif // !defined(_TARGET_X86_)
genConsumeAddress(indir->Addr());
- getEmitter()->emitIns_Call(emitter::EmitCallType(callType),
+ GetEmitter()->emitIns_Call(emitter::EmitCallType(callType),
methHnd,
INDEBUG_LDISASM_COMMA(sigInfo)
nullptr,
//
void CodeGen::genStoreLongLclVar(GenTree* treeNode)
{
- emitter* emit = getEmitter();
+ emitter* emit = GetEmitter();
GenTreeLclVarCommon* lclNode = treeNode->AsLclVarCommon();
unsigned lclNum = lclNode->gtLclNum;
else
{
// TODO-XArch-CQ: needs all the optimized cases
- getEmitter()->emitIns_R_I(INS_mov, emitActualTypeSize(type), reg, val);
+ GetEmitter()->emitIns_R_I(INS_mov, emitActualTypeSize(type), reg, val);
}
}
{
// initReg = #GlobalSecurityCookieVal64; [frame.GSSecurityCookie] = initReg
genSetRegToIcon(initReg, compiler->gsGlobalSecurityCookieVal, TYP_I_IMPL);
- getEmitter()->emitIns_S_R(INS_mov, EA_PTRSIZE, initReg, compiler->lvaGSSecurityCookie, 0);
+ GetEmitter()->emitIns_S_R(INS_mov, EA_PTRSIZE, initReg, compiler->lvaGSSecurityCookie, 0);
*pInitRegZeroed = false;
}
else
#endif
{
// mov dword ptr [frame.GSSecurityCookie], #GlobalSecurityCookieVal
- getEmitter()->emitIns_S_I(INS_mov, EA_PTRSIZE, compiler->lvaGSSecurityCookie, 0,
+ GetEmitter()->emitIns_S_I(INS_mov, EA_PTRSIZE, compiler->lvaGSSecurityCookie, 0,
(int)compiler->gsGlobalSecurityCookieVal);
}
}
// On x64, if we're not moving into RAX, and the address isn't RIP relative, we can't encode it.
// mov eax, dword ptr [compiler->gsGlobalSecurityCookieAddr]
// mov dword ptr [frame.GSSecurityCookie], eax
- getEmitter()->emitIns_R_AI(INS_mov, EA_PTR_DSP_RELOC, REG_EAX, (ssize_t)compiler->gsGlobalSecurityCookieAddr);
+ GetEmitter()->emitIns_R_AI(INS_mov, EA_PTR_DSP_RELOC, REG_EAX, (ssize_t)compiler->gsGlobalSecurityCookieAddr);
regSet.verifyRegUsed(REG_EAX);
- getEmitter()->emitIns_S_R(INS_mov, EA_PTRSIZE, REG_EAX, compiler->lvaGSSecurityCookie, 0);
+ GetEmitter()->emitIns_S_R(INS_mov, EA_PTRSIZE, REG_EAX, compiler->lvaGSSecurityCookie, 0);
if (initReg == REG_EAX)
{
*pInitRegZeroed = false;
if ((int)compiler->gsGlobalSecurityCookieVal != (ssize_t)compiler->gsGlobalSecurityCookieVal)
{
genSetRegToIcon(regGSCheck, compiler->gsGlobalSecurityCookieVal, TYP_I_IMPL);
- getEmitter()->emitIns_S_R(INS_cmp, EA_PTRSIZE, regGSCheck, compiler->lvaGSSecurityCookie, 0);
+ GetEmitter()->emitIns_S_R(INS_cmp, EA_PTRSIZE, regGSCheck, compiler->lvaGSSecurityCookie, 0);
}
else
#endif // defined(_TARGET_AMD64_)
{
assert((int)compiler->gsGlobalSecurityCookieVal == (ssize_t)compiler->gsGlobalSecurityCookieVal);
- getEmitter()->emitIns_S_I(INS_cmp, EA_PTRSIZE, compiler->lvaGSSecurityCookie, 0,
+ GetEmitter()->emitIns_S_I(INS_cmp, EA_PTRSIZE, compiler->lvaGSSecurityCookie, 0,
(int)compiler->gsGlobalSecurityCookieVal);
}
}
pushedRegs = genPushRegs(regMaskGSCheck, &byrefPushedRegs, &norefPushedRegs);
instGen_Set_Reg_To_Imm(EA_HANDLE_CNS_RELOC, regGSCheck, (ssize_t)compiler->gsGlobalSecurityCookieAddr);
- getEmitter()->emitIns_R_AR(ins_Load(TYP_I_IMPL), EA_PTRSIZE, regGSCheck, regGSCheck, 0);
- getEmitter()->emitIns_S_R(INS_cmp, EA_PTRSIZE, regGSCheck, compiler->lvaGSSecurityCookie, 0);
+ GetEmitter()->emitIns_R_AR(ins_Load(TYP_I_IMPL), EA_PTRSIZE, regGSCheck, regGSCheck, 0);
+ GetEmitter()->emitIns_S_R(INS_cmp, EA_PTRSIZE, regGSCheck, compiler->lvaGSSecurityCookie, 0);
}
BasicBlock* gsCheckBlk = genCreateTempLabel();
}
else
{
- getEmitter()->emitIns_R_S(ins_Load(TYP_I_IMPL), EA_PTRSIZE, REG_ARG_0, compiler->lvaPSPSym, 0);
+ GetEmitter()->emitIns_R_S(ins_Load(TYP_I_IMPL), EA_PTRSIZE, REG_ARG_0, compiler->lvaPSPSym, 0);
}
- getEmitter()->emitIns_J(INS_call, block->bbJumpDest);
+ GetEmitter()->emitIns_J(INS_call, block->bbJumpDest);
if (block->bbFlags & BBF_RETLESS_CALL)
{
// Because of the way the flowgraph is connected, the liveness info for this one instruction
// after the call is not (can not be) correct in cases where a variable has a last use in the
// handler. So turn off GC reporting for this single instruction.
- getEmitter()->emitDisableGC();
+ GetEmitter()->emitDisableGC();
#endif // JIT32_GCENCODER
// Now go to where the finally funclet needs to return to.
}
#ifndef JIT32_GCENCODER
- getEmitter()->emitEnableGC();
+ GetEmitter()->emitEnableGC();
#endif // JIT32_GCENCODER
}
curNestingSlotOffs = (unsigned)(filterEndOffsetSlotOffs - ((finallyNesting + 1) * TARGET_POINTER_SIZE));
// Zero out the slot for the next nesting level
- getEmitter()->emitIns_S_I(INS_mov, EA_PTRSIZE, compiler->lvaShadowSPslotsVar,
+ GetEmitter()->emitIns_S_I(INS_mov, EA_PTRSIZE, compiler->lvaShadowSPslotsVar,
curNestingSlotOffs - TARGET_POINTER_SIZE, 0);
- getEmitter()->emitIns_S_I(INS_mov, EA_PTRSIZE, compiler->lvaShadowSPslotsVar, curNestingSlotOffs, LCL_FINALLY_MARK);
+ GetEmitter()->emitIns_S_I(INS_mov, EA_PTRSIZE, compiler->lvaShadowSPslotsVar, curNestingSlotOffs, LCL_FINALLY_MARK);
// Now push the address where the finally funclet should return to directly.
if (!(block->bbFlags & BBF_RETLESS_CALL))
{
assert(block->isBBCallAlwaysPair());
- getEmitter()->emitIns_J(INS_push_hide, block->bbNext->bbJumpDest);
+ GetEmitter()->emitIns_J(INS_push_hide, block->bbNext->bbJumpDest);
}
else
{
// Generate a RIP-relative
// lea reg, [rip + disp32] ; the RIP is implicit
// which will be position-independent.
- getEmitter()->emitIns_R_L(INS_lea, EA_PTR_DSP_RELOC, block->bbJumpDest, REG_INTRET);
+ GetEmitter()->emitIns_R_L(INS_lea, EA_PTR_DSP_RELOC, block->bbJumpDest, REG_INTRET);
}
#else // !FEATURE_EH_FUNCLETS
newSize = EA_SET_FLG(newSize, EA_BYREF_FLG);
}
- getEmitter()->emitIns_R_AI(INS_lea, newSize, reg, imm);
+ GetEmitter()->emitIns_R_AI(INS_lea, newSize, reg, imm);
}
else
{
- getEmitter()->emitIns_R_I(INS_mov, size, reg, imm);
+ GetEmitter()->emitIns_R_I(INS_mov, size, reg, imm);
}
}
regSet.verifyRegUsed(reg);
case GT_CNS_DBL:
{
- emitter* emit = getEmitter();
+ emitter* emit = GetEmitter();
emitAttr size = emitTypeSize(targetType);
double constValue = tree->gtDblCon.gtDconVal;
regNumber targetReg = treeNode->gtRegNum;
var_types targetType = treeNode->TypeGet();
- emitter* emit = getEmitter();
+ emitter* emit = GetEmitter();
emitAttr size = emitTypeSize(treeNode);
GenTree* op1 = treeNode->gtOp.gtOp1;
GenTree* op2 = treeNode->gtOp.gtOp2;
emitAttr size = emitTypeSize(treeNode);
regNumber targetReg = treeNode->gtRegNum;
var_types targetType = treeNode->TypeGet();
- emitter* emit = getEmitter();
+ emitter* emit = GetEmitter();
// Node's type must be int/native int, small integer types are not
// supported and floating point types are handled by genCodeForBinary.
const genTreeOps oper = treeNode->OperGet();
regNumber targetReg = treeNode->gtRegNum;
var_types targetType = treeNode->TypeGet();
- emitter* emit = getEmitter();
+ emitter* emit = GetEmitter();
GenTree* op1 = treeNode->gtGetOp1();
GenTree* op2 = treeNode->gtGetOp2();
regNumber targetReg = treeNode->gtRegNum;
var_types targetType = treeNode->TypeGet();
- emitter* emit = getEmitter();
+ emitter* emit = GetEmitter();
// Node's type must be int or long (only on x64), small integer types are not
// supported and floating point types are handled by genCodeForBinary.
// We will use the LEA instruction to perform this multiply
// Note that an LEA with base=x, index=x and scale=(imm-1) computes x*imm when imm=3,5 or 9.
unsigned int scale = (unsigned int)(imm - 1);
- getEmitter()->emitIns_R_ARX(INS_lea, size, targetReg, rmOp->gtRegNum, rmOp->gtRegNum, scale, 0);
+ GetEmitter()->emitIns_R_ARX(INS_lea, size, targetReg, rmOp->gtRegNum, rmOp->gtRegNum, scale, 0);
}
else if (!requiresOverflowCheck && rmOp->isUsedFromReg() && (imm == genFindLowestBit(imm)) && (imm != 0))
{
else
{
// use the 3-op form with immediate
- ins = getEmitter()->inst3opImulForReg(targetReg);
+ ins = GetEmitter()->inst3opImulForReg(targetReg);
emit->emitInsBinary(ins, size, rmOp, immOp);
}
}
{
var_types type = retTypeDesc.GetReturnRegType(i);
regNumber reg = retTypeDesc.GetABIReturnReg(i);
- getEmitter()->emitIns_R_S(ins_Load(type), emitTypeSize(type), reg, lclVar->gtLclNum, offset);
+ GetEmitter()->emitIns_R_S(ins_Load(type), emitTypeSize(type), reg, lclVar->gtLclNum, offset);
offset += genTypeSize(type);
}
}
op1->gtRegNum);
}
// Now, load it to the fp stack.
- getEmitter()->emitIns_S(INS_fld, emitTypeSize(op1), op1->AsLclVarCommon()->gtLclNum, 0);
+ GetEmitter()->emitIns_S(INS_fld, emitTypeSize(op1), op1->AsLclVarCommon()->gtLclNum, 0);
}
else
{
// Note that the emitter doesn't fully support INS_bt, it only supports the reg,reg
// form and encodes the registers in reverse order. To get the correct order we need
// to reverse the operands when calling emitIns_R_R.
- getEmitter()->emitIns_R_R(INS_bt, emitTypeSize(type), op2->gtRegNum, op1->gtRegNum);
+ GetEmitter()->emitIns_R_R(INS_bt, emitTypeSize(type), op2->gtRegNum, op1->gtRegNum);
}
// clang-format off
if (!varTypeIsByte(type))
{
- getEmitter()->emitIns_R_R(INS_movzx, EA_1BYTE, dstReg, dstReg);
+ GetEmitter()->emitIns_R_R(INS_movzx, EA_1BYTE, dstReg, dstReg);
}
}
genConsumeRegs(data);
GenTreeIntCon cns = intForm(TYP_INT, 0);
cns.SetContained();
- getEmitter()->emitInsBinary(INS_cmp, emitTypeSize(TYP_INT), data, &cns);
+ GetEmitter()->emitInsBinary(INS_cmp, emitTypeSize(TYP_INT), data, &cns);
BasicBlock* skipLabel = genCreateTempLabel();
targetReg = treeNode->gtRegNum;
}
var_types targetType = treeNode->TypeGet();
- emitter* emit = getEmitter();
+ emitter* emit = GetEmitter();
#ifdef DEBUG
// Validate that all the operands for the current node are consumed in order.
{
#ifndef JIT32_GCENCODER
case GT_START_NONGC:
- getEmitter()->emitDisableGC();
+ GetEmitter()->emitDisableGC();
break;
#endif // !defined(JIT32_GCENCODER)
break;
case GT_NO_OP:
- getEmitter()->emitIns_Nop(1);
+ GetEmitter()->emitIns_Nop(1);
break;
case GT_ARR_BOUNDS_CHECK:
unsigned curNestingSlotOffs;
curNestingSlotOffs = filterEndOffsetSlotOffs - ((finallyNesting + 1) * TARGET_POINTER_SIZE);
- getEmitter()->emitIns_S_I(INS_mov, EA_PTRSIZE, compiler->lvaShadowSPslotsVar, curNestingSlotOffs, 0);
+ GetEmitter()->emitIns_S_I(INS_mov, EA_PTRSIZE, compiler->lvaShadowSPslotsVar, curNestingSlotOffs, 0);
break;
#endif // !FEATURE_EH_FUNCLETS
}
assert(reg != REG_NA);
- getEmitter()->emitIns_S_R(ins_Store(type), emitTypeSize(type), reg, lclNum, offset);
+ GetEmitter()->emitIns_S_R(ins_Store(type), emitTypeSize(type), reg, lclNum, offset);
offset += genTypeSize(type);
}
}
assert(reg != REG_NA);
- getEmitter()->emitIns_S_R(ins_Store(type), emitTypeSize(type), reg, lclNum, offset);
+ GetEmitter()->emitIns_S_R(ins_Store(type), emitTypeSize(type), reg, lclNum, offset);
offset += genTypeSize(type);
}
// Frame size is (0x1000..0x3000)
- getEmitter()->emitIns_AR_R(INS_test, EA_PTRSIZE, REG_EAX, REG_SPBASE, -(int)pageSize);
+ GetEmitter()->emitIns_AR_R(INS_test, EA_PTRSIZE, REG_EAX, REG_SPBASE, -(int)pageSize);
lastTouchDelta -= pageSize;
if (frameSize >= 0x2000)
{
- getEmitter()->emitIns_AR_R(INS_test, EA_PTRSIZE, REG_EAX, REG_SPBASE, -2 * (int)pageSize);
+ GetEmitter()->emitIns_AR_R(INS_test, EA_PTRSIZE, REG_EAX, REG_SPBASE, -2 * (int)pageSize);
lastTouchDelta -= pageSize;
}
// cmp rbp, -frameSize 7
// jge loop 2
- getEmitter()->emitIns_R_ARR(INS_test, EA_PTRSIZE, initReg, REG_SPBASE, initReg, 0);
+ GetEmitter()->emitIns_R_ARR(INS_test, EA_PTRSIZE, initReg, REG_SPBASE, initReg, 0);
inst_RV_IV(INS_sub, initReg, pageSize, EA_PTRSIZE);
inst_RV_IV(INS_cmp, initReg, -((ssize_t)frameSize), EA_PTRSIZE);
int sPageSize = (int)pageSize;
- getEmitter()->emitIns_R_AR(INS_lea, EA_PTRSIZE, initReg, REG_SPBASE, -((ssize_t)frameSize)); // get frame border
+ GetEmitter()->emitIns_R_AR(INS_lea, EA_PTRSIZE, initReg, REG_SPBASE, -((ssize_t)frameSize)); // get frame border
- getEmitter()->emitIns_R_AR(INS_lea, EA_PTRSIZE, REG_SPBASE, REG_SPBASE, -sPageSize);
- getEmitter()->emitIns_R_AR(INS_test, EA_PTRSIZE, initReg, REG_SPBASE, 0);
+ GetEmitter()->emitIns_R_AR(INS_lea, EA_PTRSIZE, REG_SPBASE, REG_SPBASE, -sPageSize);
+ GetEmitter()->emitIns_R_AR(INS_test, EA_PTRSIZE, initReg, REG_SPBASE, 0);
inst_RV_RV(INS_cmp, REG_SPBASE, initReg);
int bytesForBackwardJump;
inst_IV(INS_jge, bytesForBackwardJump); // Branch backwards to start of loop
- getEmitter()->emitIns_R_AR(INS_lea, EA_PTRSIZE, REG_SPBASE, initReg, frameSize); // restore stack pointer
+ GetEmitter()->emitIns_R_AR(INS_lea, EA_PTRSIZE, REG_SPBASE, initReg, frameSize); // restore stack pointer
lastTouchDelta = 0; // The loop code above actually over-probes: it always probes beyond the final SP we need.
// happen on x86, for example, when we copy an argument to the stack using a "SUB ESP; REP MOV"
// strategy.
- getEmitter()->emitIns_AR_R(INS_test, EA_PTRSIZE, REG_EAX, REG_SPBASE, 0);
+ GetEmitter()->emitIns_AR_R(INS_test, EA_PTRSIZE, REG_EAX, REG_SPBASE, 0);
}
compiler->unwindAllocStack(frameSize);
//
void CodeGen::genStackPointerConstantAdjustmentWithProbe(ssize_t spDelta, regNumber regTmp)
{
- getEmitter()->emitIns_AR_R(INS_TEST, EA_4BYTE, REG_SPBASE, REG_SPBASE, 0);
+ GetEmitter()->emitIns_AR_R(INS_TEST, EA_4BYTE, REG_SPBASE, REG_SPBASE, 0);
genStackPointerConstantAdjustment(spDelta, regTmp);
}
// happen on x86, for example, when we copy an argument to the stack using a "SUB ESP; REP MOV"
// strategy.
- getEmitter()->emitIns_AR_R(INS_test, EA_PTRSIZE, REG_EAX, REG_SPBASE, 0);
+ GetEmitter()->emitIns_AR_R(INS_test, EA_PTRSIZE, REG_EAX, REG_SPBASE, 0);
lastTouchDelta = 0;
}
// Tickle the decremented value. Note that it must be done BEFORE the update of ESP since ESP might already
// be on the guard page. It is OK to leave the final value of ESP on the guard page.
- getEmitter()->emitIns_AR_R(INS_TEST, EA_4BYTE, REG_SPBASE, REG_SPBASE, 0);
+ GetEmitter()->emitIns_AR_R(INS_TEST, EA_4BYTE, REG_SPBASE, REG_SPBASE, 0);
// Subtract a page from ESP. This is a trick to avoid the emitter trying to track the
// decrement of the ESP - we do the subtraction in another reg instead of adjusting ESP directly.
// Put the size value in targetReg. If it is zero, bail out by returning null in targetReg.
genConsumeRegAndCopy(size, targetReg);
endLabel = genCreateTempLabel();
- getEmitter()->emitIns_R_R(INS_test, easz, targetReg, targetReg);
+ GetEmitter()->emitIns_R_R(INS_test, easz, targetReg, targetReg);
inst_JMP(EJ_je, endLabel);
// Compute the size of the block to allocate and perform alignment.
// Return the stackalloc'ed address in result register.
// TargetReg = RSP + stackAdjustment.
- getEmitter()->emitIns_R_AR(INS_lea, EA_PTRSIZE, targetReg, REG_SPBASE, stackAdjustment);
+ GetEmitter()->emitIns_R_AR(INS_lea, EA_PTRSIZE, targetReg, REG_SPBASE, stackAdjustment);
if (endLabel != nullptr)
{
#ifdef JIT32_GCENCODER
if (compiler->lvaLocAllocSPvar != BAD_VAR_NUM)
{
- getEmitter()->emitIns_S_R(ins_Store(TYP_I_IMPL), EA_PTRSIZE, REG_SPBASE, compiler->lvaLocAllocSPvar, 0);
+ GetEmitter()->emitIns_S_R(ins_Store(TYP_I_IMPL), EA_PTRSIZE, REG_SPBASE, compiler->lvaLocAllocSPvar, 0);
}
#endif // JIT32_GCENCODER
noway_assert(compiler->lvaReturnSpCheck != 0xCCCCCCCC &&
compiler->lvaTable[compiler->lvaReturnSpCheck].lvDoNotEnregister &&
compiler->lvaTable[compiler->lvaReturnSpCheck].lvOnFrame);
- getEmitter()->emitIns_S_R(ins_Store(TYP_I_IMPL), EA_PTRSIZE, REG_SPBASE, compiler->lvaReturnSpCheck, 0);
+ GetEmitter()->emitIns_S_R(ins_Store(TYP_I_IMPL), EA_PTRSIZE, REG_SPBASE, compiler->lvaReturnSpCheck, 0);
}
#endif
#else
if (storeBlkNode->gtBlkOpGcUnsafe)
{
- getEmitter()->emitDisableGC();
+ GetEmitter()->emitDisableGC();
}
#endif // JIT32_GCENCODER
#ifndef JIT32_GCENCODER
if (storeBlkNode->gtBlkOpGcUnsafe)
{
- getEmitter()->emitEnableGC();
+ GetEmitter()->emitEnableGC();
}
#endif // !defined(JIT32_GCENCODER)
}
assert(size <= INITBLK_UNROLL_LIMIT);
assert(initVal->gtSkipReloadOrCopy()->IsCnsIntOrI());
- emitter* emit = getEmitter();
+ emitter* emit = GetEmitter();
genConsumeOperands(initBlkNode);
// offset: distance from the baseNode from which to load
void CodeGen::genCodeForLoadOffset(instruction ins, emitAttr size, regNumber dst, GenTree* baseNode, unsigned offset)
{
- emitter* emit = getEmitter();
+ emitter* emit = GetEmitter();
if (baseNode->OperIsLocalAddr())
{
//
void CodeGen::genCodeForStoreOffset(instruction ins, emitAttr size, regNumber src, GenTree* baseNode, unsigned offset)
{
- emitter* emit = getEmitter();
+ emitter* emit = GetEmitter();
if (baseNode->OperIsLocalAddr())
{
GenTree* srcAddr = nullptr;
assert(size <= CPBLK_UNROLL_LIMIT);
- emitter* emit = getEmitter();
+ emitter* emit = GetEmitter();
if (dstAddr->isUsedFromReg())
{
unsigned size = putArgNode->getArgSize();
assert(size <= CPBLK_UNROLL_LIMIT);
- emitter* emit = getEmitter();
+ emitter* emit = GetEmitter();
unsigned putArgOffset = putArgNode->getArgOffset();
assert(src->isContained());
if (!varDsc->lvIsRegArg)
{
// Clear the upper 32 bits by mov dword ptr [V_ARG_BASE+0xC], 0
- getEmitter()->emitIns_S_I(ins_Store(TYP_INT), EA_4BYTE, varNum, genTypeSize(TYP_FLOAT) * 3, 0);
+ GetEmitter()->emitIns_S_I(ins_Store(TYP_INT), EA_4BYTE, varNum, genTypeSize(TYP_FLOAT) * 3, 0);
}
else
{
// Clear the upper 32 bits by two shift instructions.
// argReg = argReg << 96
- getEmitter()->emitIns_R_I(INS_pslldq, emitActualTypeSize(TYP_SIMD12), argReg, 12);
+ GetEmitter()->emitIns_R_I(INS_pslldq, emitActualTypeSize(TYP_SIMD12), argReg, 12);
// argReg = argReg >> 96
- getEmitter()->emitIns_R_I(INS_psrldq, emitActualTypeSize(TYP_SIMD12), argReg, 12);
+ GetEmitter()->emitIns_R_I(INS_psrldq, emitActualTypeSize(TYP_SIMD12), argReg, 12);
}
}
}
// RCX to emit the movsp (alias for movsd or movsq for 32 and 64 bits respectively).
assert((cpObjNode->gtRsvdRegs & RBM_RCX) != 0);
- getEmitter()->emitIns_R_I(INS_mov, EA_4BYTE, REG_RCX, slots);
+ GetEmitter()->emitIns_R_I(INS_mov, EA_4BYTE, REG_RCX, slots);
instGen(INS_r_movsp);
}
else
// rep movsp (alias for movsd/movsq for x86/x64)
assert((cpObjNode->gtRsvdRegs & RBM_RCX) != 0);
- getEmitter()->emitIns_R_I(INS_mov, EA_4BYTE, REG_RCX, nonGcSlotCount);
+ GetEmitter()->emitIns_R_I(INS_mov, EA_4BYTE, REG_RCX, nonGcSlotCount);
instGen(INS_r_movsp);
}
}
regNumber tmpReg = treeNode->GetSingleTempReg();
// load the ip-relative offset (which is relative to start of fgFirstBB)
- getEmitter()->emitIns_R_ARX(INS_mov, EA_4BYTE, baseReg, baseReg, idxReg, 4, 0);
+ GetEmitter()->emitIns_R_ARX(INS_mov, EA_4BYTE, baseReg, baseReg, idxReg, 4, 0);
// add it to the absolute address of fgFirstBB
compiler->fgFirstBB->bbFlags |= BBF_JMP_TARGET;
- getEmitter()->emitIns_R_L(INS_lea, EA_PTR_DSP_RELOC, compiler->fgFirstBB, tmpReg);
- getEmitter()->emitIns_R_R(INS_add, EA_PTRSIZE, baseReg, tmpReg);
+ GetEmitter()->emitIns_R_L(INS_lea, EA_PTR_DSP_RELOC, compiler->fgFirstBB, tmpReg);
+ GetEmitter()->emitIns_R_R(INS_add, EA_PTRSIZE, baseReg, tmpReg);
// jmp baseReg
- getEmitter()->emitIns_R(INS_i_jmp, emitTypeSize(TYP_I_IMPL), baseReg);
+ GetEmitter()->emitIns_R(INS_i_jmp, emitTypeSize(TYP_I_IMPL), baseReg);
}
// emits the table and an instruction to get the address of the first element
unsigned jmpTabOffs;
unsigned jmpTabBase;
- jmpTabBase = getEmitter()->emitBBTableDataGenBeg(jumpCount, true);
+ jmpTabBase = GetEmitter()->emitBBTableDataGenBeg(jumpCount, true);
jmpTabOffs = 0;
JITDUMP(" DD L_M%03u_" FMT_BB "\n", Compiler::s_compMethodsCount, target->bbNum);
- getEmitter()->emitDataGenData(i, target);
+ GetEmitter()->emitDataGenData(i, target);
};
- getEmitter()->emitDataGenEnd();
+ GetEmitter()->emitDataGenEnd();
// Access to inline data is 'abstracted' by a special type of static member
// (produced by eeFindJitDataOffs) which the emitter recognizes as being a reference
// to constant data, not a real static field.
- getEmitter()->emitIns_R_C(INS_lea, emitTypeSize(TYP_I_IMPL), treeNode->gtRegNum,
+ GetEmitter()->emitIns_R_C(INS_lea, emitTypeSize(TYP_I_IMPL), treeNode->gtRegNum,
compiler->eeFindJitDataOffs(jmpTabBase), 0);
genProduceReg(treeNode);
}
{
int imm = static_cast<int>(data->AsIntCon()->IconValue());
assert(imm == data->AsIntCon()->IconValue());
- getEmitter()->emitIns_I_AR(INS_add, size, imm, addr->gtRegNum, 0);
+ GetEmitter()->emitIns_I_AR(INS_add, size, imm, addr->gtRegNum, 0);
}
else
{
- getEmitter()->emitIns_AR_R(INS_add, size, data->gtRegNum, addr->gtRegNum, 0);
+ GetEmitter()->emitIns_AR_R(INS_add, size, data->gtRegNum, addr->gtRegNum, 0);
}
}
// to first move the data to the target register. Make sure we don't overwrite
// the address, the register allocator should have taken care of this.
assert(node->gtRegNum != addr->gtRegNum);
- getEmitter()->emitIns_R_R(INS_mov, size, node->gtRegNum, data->gtRegNum);
+ GetEmitter()->emitIns_R_R(INS_mov, size, node->gtRegNum, data->gtRegNum);
}
instruction ins = node->OperIs(GT_XADD) ? INS_xadd : INS_xchg;
instGen(INS_lock);
}
- getEmitter()->emitIns_AR_R(ins, size, node->gtRegNum, addr->gtRegNum, 0);
+ GetEmitter()->emitIns_AR_R(ins, size, node->gtRegNum, addr->gtRegNum, 0);
genProduceReg(node);
}
// location is Rm
instGen(INS_lock);
- getEmitter()->emitIns_AR_R(INS_cmpxchg, emitTypeSize(targetType), value->gtRegNum, location->gtRegNum, 0);
+ GetEmitter()->emitIns_AR_R(INS_cmpxchg, emitTypeSize(targetType), value->gtRegNum, location->gtRegNum, 0);
// Result is in RAX
if (targetReg != REG_RAX)
assert(emitTypeSize(bndsChkType) >= emitTypeSize(src1->TypeGet()));
#endif // DEBUG
- getEmitter()->emitInsBinary(INS_cmp, emitTypeSize(bndsChkType), src1, src2);
+ GetEmitter()->emitInsBinary(INS_cmp, emitTypeSize(bndsChkType), src1, src2);
genJumpToThrowHlpBlk(jmpKind, bndsChk->gtThrowKind, bndsChk->gtIndRngFailBB);
}
assert(tree->gtOp1->isUsedFromReg());
regNumber reg = genConsumeReg(tree->gtOp1);
- getEmitter()->emitIns_AR_R(INS_cmp, EA_4BYTE, reg, reg, 0);
+ GetEmitter()->emitIns_AR_R(INS_cmp, EA_4BYTE, reg, reg, 0);
}
//------------------------------------------------------------------------
{
inst_RV_RV(INS_mov, tgtReg, indexReg, indexNode->TypeGet());
}
- getEmitter()->emitIns_R_AR(INS_sub, emitActualTypeSize(TYP_INT), tgtReg, arrReg,
+ GetEmitter()->emitIns_R_AR(INS_sub, emitActualTypeSize(TYP_INT), tgtReg, arrReg,
genOffsetOfMDArrayLowerBound(elemType, rank, dim));
- getEmitter()->emitIns_R_AR(INS_cmp, emitActualTypeSize(TYP_INT), tgtReg, arrReg,
+ GetEmitter()->emitIns_R_AR(INS_cmp, emitActualTypeSize(TYP_INT), tgtReg, arrReg,
genOffsetOfMDArrayDimensionSize(elemType, rank, dim));
genJumpToThrowHlpBlk(EJ_jae, SCK_RNGCHK_FAIL);
// tmpReg is used to load dim_size and the result of the multiplication.
// Note that dim_size will never be negative.
- getEmitter()->emitIns_R_AR(INS_mov, emitActualTypeSize(TYP_INT), tmpReg, arrReg,
+ GetEmitter()->emitIns_R_AR(INS_mov, emitActualTypeSize(TYP_INT), tmpReg, arrReg,
genOffsetOfMDArrayDimensionSize(elemType, rank, dim));
inst_RV_RV(INS_imul, tmpReg, offsetReg);
if (shiftByValue == 1)
{
// There is no source in this case, as the shift by count is embedded in the instruction opcode itself.
- getEmitter()->emitInsRMW(ins, attr, storeInd);
+ GetEmitter()->emitInsRMW(ins, attr, storeInd);
}
else
{
- getEmitter()->emitInsRMW(ins, attr, storeInd, shiftBy);
+ GetEmitter()->emitInsRMW(ins, attr, storeInd, shiftBy);
}
}
else
genCopyRegIfNeeded(shiftBy, REG_RCX);
// The shiftBy operand is implicit, so call the unary version of emitInsRMW.
- getEmitter()->emitInsRMW(ins, attr, storeInd);
+ GetEmitter()->emitInsRMW(ins, attr, storeInd);
}
}
unsigned varNum = tree->gtLclNum;
assert(varNum < compiler->lvaCount);
- getEmitter()->emitIns_R_S(ins_Load(targetType), size, targetReg, varNum, offs);
+ GetEmitter()->emitIns_R_S(ins_Load(targetType), size, targetReg, varNum, offs);
genProduceReg(tree);
}
}
#endif // defined(FEATURE_SIMD) && defined(_TARGET_X86_)
- getEmitter()->emitIns_R_S(ins_Load(tree->TypeGet(), compiler->isSIMDTypeLocalAligned(tree->gtLclNum)),
+ GetEmitter()->emitIns_R_S(ins_Load(tree->TypeGet(), compiler->isSIMDTypeLocalAligned(tree->gtLclNum)),
emitTypeSize(tree), tree->gtRegNum, tree->gtLclNum, 0);
genProduceReg(tree);
}
GenTree* op1 = tree->gtGetOp1();
genConsumeRegs(op1);
- getEmitter()->emitInsBinary(ins_Store(targetType), emitTypeSize(tree), tree, op1);
+ GetEmitter()->emitInsBinary(ins_Store(targetType), emitTypeSize(tree), tree, op1);
// Updating variable liveness after instruction was emitted
genUpdateLife(tree);
var_types targetType = tree->TypeGet();
regNumber targetReg = tree->gtRegNum;
- emitter* emit = getEmitter();
+ emitter* emit = GetEmitter();
GenTree* op1 = tree->gtGetOp1();
// is a native int on a 64-bit platform, we will need to widen the array length and then compare.
if (index->TypeGet() == TYP_I_IMPL)
{
- getEmitter()->emitIns_R_AR(INS_mov, EA_4BYTE, tmpReg, baseReg, static_cast<int>(node->gtLenOffset));
- getEmitter()->emitIns_R_R(INS_cmp, EA_8BYTE, indexReg, tmpReg);
+ GetEmitter()->emitIns_R_AR(INS_mov, EA_4BYTE, tmpReg, baseReg, static_cast<int>(node->gtLenOffset));
+ GetEmitter()->emitIns_R_R(INS_cmp, EA_8BYTE, indexReg, tmpReg);
}
else
#endif // _TARGET_64BIT_
{
- getEmitter()->emitIns_R_AR(INS_cmp, EA_4BYTE, indexReg, baseReg, static_cast<int>(node->gtLenOffset));
+ GetEmitter()->emitIns_R_AR(INS_cmp, EA_4BYTE, indexReg, baseReg, static_cast<int>(node->gtLenOffset));
}
genJumpToThrowHlpBlk(EJ_jae, SCK_RNGCHK_FAIL, node->gtIndRngFailBB);
if (index->TypeGet() != TYP_I_IMPL)
{
// LEA needs 64-bit operands so we need to widen the index if it's TYP_INT.
- getEmitter()->emitIns_R_R(INS_mov, EA_4BYTE, tmpReg, indexReg);
+ GetEmitter()->emitIns_R_R(INS_mov, EA_4BYTE, tmpReg, indexReg);
indexReg = tmpReg;
}
#endif // _TARGET_64BIT_
tmpReg = node->GetSingleTempReg();
#endif // !_TARGET_64BIT_
- getEmitter()->emitIns_R_I(emitter::inst3opImulForReg(tmpReg), EA_PTRSIZE, indexReg,
+ GetEmitter()->emitIns_R_I(emitter::inst3opImulForReg(tmpReg), EA_PTRSIZE, indexReg,
static_cast<ssize_t>(scale));
scale = 1;
break;
}
- getEmitter()->emitIns_R_ARX(INS_lea, emitTypeSize(node->TypeGet()), dstReg, baseReg, tmpReg, scale,
+ GetEmitter()->emitIns_R_ARX(INS_lea, emitTypeSize(node->TypeGet()), dstReg, baseReg, tmpReg, scale,
static_cast<int>(node->gtElemOffset));
gcInfo.gcMarkRegSetNpt(base->gtGetRegMask());
#endif // FEATURE_SIMD
var_types targetType = tree->TypeGet();
- emitter* emit = getEmitter();
+ emitter* emit = GetEmitter();
GenTree* addr = tree->Addr();
if (addr->IsCnsIntOrI() && addr->IsIconHandle(GTF_ICON_TLS_HDL))
if (dataIsUnary)
{
// generate code for unary RMW memory ops like neg/not
- getEmitter()->emitInsRMW(genGetInsForOper(data->OperGet(), data->TypeGet()), emitTypeSize(tree), tree);
+ GetEmitter()->emitInsRMW(genGetInsForOper(data->OperGet(), data->TypeGet()), emitTypeSize(tree), tree);
}
else
{
// the above if condition once Decode() routine is fixed.
assert(rmwSrc->isContainedIntOrIImmed());
instruction ins = rmwSrc->IsIntegralConst(1) ? INS_inc : INS_dec;
- getEmitter()->emitInsRMW(ins, emitTypeSize(tree), tree);
+ GetEmitter()->emitInsRMW(ins, emitTypeSize(tree), tree);
}
else
{
// generate code for remaining binary RMW memory ops like add/sub/and/or/xor
- getEmitter()->emitInsRMW(genGetInsForOper(data->OperGet(), data->TypeGet()), emitTypeSize(tree),
+ GetEmitter()->emitInsRMW(genGetInsForOper(data->OperGet(), data->TypeGet()), emitTypeSize(tree),
tree, rmwSrc);
}
}
}
else
{
- getEmitter()->emitInsStoreInd(ins_Store(data->TypeGet()), emitTypeSize(tree), tree);
+ GetEmitter()->emitInsStoreInd(ins_Store(data->TypeGet()), emitTypeSize(tree), tree);
}
}
}
if (call->NeedsNullCheck())
{
const regNumber regThis = genGetThisArgReg(call);
- getEmitter()->emitIns_AR_R(INS_cmp, EA_4BYTE, regThis, regThis, 0);
+ GetEmitter()->emitIns_AR_R(INS_cmp, EA_4BYTE, regThis, regThis, 0);
}
// Either gtControlExpr != null or gtCallAddr != null or it is a direct non-virtual call to a user or helper method.
noway_assert(compiler->lvaCallSpCheck != 0xCCCCCCCC &&
compiler->lvaTable[compiler->lvaCallSpCheck].lvDoNotEnregister &&
compiler->lvaTable[compiler->lvaCallSpCheck].lvOnFrame);
- getEmitter()->emitIns_S_R(ins_Store(TYP_I_IMPL), EA_PTRSIZE, REG_SPBASE, compiler->lvaCallSpCheck, 0);
+ GetEmitter()->emitIns_S_R(ins_Store(TYP_I_IMPL), EA_PTRSIZE, REG_SPBASE, compiler->lvaCallSpCheck, 0);
}
#endif // defined(DEBUG) && defined(_TARGET_X86_)
// To limit code size increase impact: we only issue VZEROUPPER before PInvoke call, not issue
// VZEROUPPER after PInvoke call because transition penalty from legacy SSE to AVX only happens
// when there's preceding 256-bit AVX to legacy SSE transition penalty.
- if (call->IsPInvoke() && (call->gtCallType == CT_USER_FUNC) && getEmitter()->Contains256bitAVX())
+ if (call->IsPInvoke() && (call->gtCallType == CT_USER_FUNC) && GetEmitter()->Contains256bitAVX())
{
assert(compiler->canUseVexEncoding());
instGen(INS_vzeroupper);
genConsumeReg(addr);
genCopyRegIfNeeded(addr, REG_VIRTUAL_STUB_TARGET);
- getEmitter()->emitIns_Nop(3);
+ GetEmitter()->emitIns_Nop(3);
// clang-format off
- getEmitter()->emitIns_Call(emitter::EmitCallType(emitter::EC_INDIR_ARD),
+ GetEmitter()->emitIns_Call(emitter::EmitCallType(emitter::EC_INDIR_ARD),
methHnd,
INDEBUG_LDISASM_COMMA(sigInfo)
nullptr,
// Clear the upper 32 bits by two shift instructions.
// retReg = retReg << 96
// retReg = retReg >> 96
- getEmitter()->emitIns_R_I(INS_pslldq, emitActualTypeSize(TYP_SIMD12), returnReg, 12);
- getEmitter()->emitIns_R_I(INS_psrldq, emitActualTypeSize(TYP_SIMD12), returnReg, 12);
+ GetEmitter()->emitIns_R_I(INS_pslldq, emitActualTypeSize(TYP_SIMD12), returnReg, 12);
+ GetEmitter()->emitIns_R_I(INS_psrldq, emitActualTypeSize(TYP_SIMD12), returnReg, 12);
}
#endif // FEATURE_SIMD
}
// ECX is trashed, so can be used to compute the expected SP. We saved the value of SP
// after pushing all the stack arguments, but the caller popped the arguments, so we need
// to do some math to figure a good comparison.
- getEmitter()->emitIns_R_R(INS_mov, EA_4BYTE, REG_ARG_0, REG_SPBASE);
- getEmitter()->emitIns_R_I(INS_sub, EA_4BYTE, REG_ARG_0, stackArgBytes);
- getEmitter()->emitIns_S_R(INS_cmp, EA_4BYTE, REG_ARG_0, compiler->lvaCallSpCheck, 0);
+ GetEmitter()->emitIns_R_R(INS_mov, EA_4BYTE, REG_ARG_0, REG_SPBASE);
+ GetEmitter()->emitIns_R_I(INS_sub, EA_4BYTE, REG_ARG_0, stackArgBytes);
+ GetEmitter()->emitIns_S_R(INS_cmp, EA_4BYTE, REG_ARG_0, compiler->lvaCallSpCheck, 0);
}
else
{
- getEmitter()->emitIns_S_R(INS_cmp, EA_4BYTE, REG_SPBASE, compiler->lvaCallSpCheck, 0);
+ GetEmitter()->emitIns_S_R(INS_cmp, EA_4BYTE, REG_SPBASE, compiler->lvaCallSpCheck, 0);
}
BasicBlock* sp_check = genCreateTempLabel();
- getEmitter()->emitIns_J(INS_je, sp_check);
+ GetEmitter()->emitIns_J(INS_je, sp_check);
instGen(INS_BREAKPOINT);
genDefineTempLabel(sp_check);
}
case CORINFO_HELP_MON_ENTER_STATIC:
noway_assert(compiler->syncStartEmitCookie == NULL);
compiler->syncStartEmitCookie =
- getEmitter()->emitAddLabel(gcInfo.gcVarPtrSetCur, gcInfo.gcRegGCrefSetCur, gcInfo.gcRegByrefSetCur);
+ GetEmitter()->emitAddLabel(gcInfo.gcVarPtrSetCur, gcInfo.gcRegGCrefSetCur, gcInfo.gcRegByrefSetCur);
noway_assert(compiler->syncStartEmitCookie != NULL);
break;
case CORINFO_HELP_MON_EXIT:
case CORINFO_HELP_MON_EXIT_STATIC:
noway_assert(compiler->syncEndEmitCookie == NULL);
compiler->syncEndEmitCookie =
- getEmitter()->emitAddLabel(gcInfo.gcVarPtrSetCur, gcInfo.gcRegGCrefSetCur, gcInfo.gcRegByrefSetCur);
+ GetEmitter()->emitAddLabel(gcInfo.gcVarPtrSetCur, gcInfo.gcRegGCrefSetCur, gcInfo.gcRegByrefSetCur);
noway_assert(compiler->syncEndEmitCookie != NULL);
break;
default:
assert(!varDsc->lvIsStructField || (compiler->lvaTable[varDsc->lvParentLcl].lvFieldCnt == 1));
var_types storeType = genActualType(varDsc->lvaArgType()); // We own the memory and can use the full move.
- getEmitter()->emitIns_S_R(ins_Store(storeType), emitTypeSize(storeType), varDsc->lvRegNum, varNum, 0);
+ GetEmitter()->emitIns_S_R(ins_Store(storeType), emitTypeSize(storeType), varDsc->lvRegNum, varNum, 0);
// Update lvRegNum life and GC info to indicate lvRegNum is dead and varDsc stack slot is going live.
// Note that we cannot modify varDsc->lvRegNum here because another basic block may not be expecting it.
// genCodeForBBList().
if (type0 != TYP_UNKNOWN)
{
- getEmitter()->emitIns_R_S(ins_Load(type0), emitTypeSize(type0), varDsc->lvArgReg, varNum, offset0);
+ GetEmitter()->emitIns_R_S(ins_Load(type0), emitTypeSize(type0), varDsc->lvArgReg, varNum, offset0);
regSet.rsMaskVars |= genRegMask(varDsc->lvArgReg);
gcInfo.gcMarkRegPtrVal(varDsc->lvArgReg, type0);
}
if (type1 != TYP_UNKNOWN)
{
- getEmitter()->emitIns_R_S(ins_Load(type1), emitTypeSize(type1), varDsc->lvOtherArgReg, varNum, offset1);
+ GetEmitter()->emitIns_R_S(ins_Load(type1), emitTypeSize(type1), varDsc->lvOtherArgReg, varNum, offset1);
regSet.rsMaskVars |= genRegMask(varDsc->lvOtherArgReg);
gcInfo.gcMarkRegPtrVal(varDsc->lvOtherArgReg, type1);
}
if (varDsc->lvRegNum != argReg)
{
assert(genIsValidReg(argReg));
- getEmitter()->emitIns_R_S(ins_Load(loadType), emitTypeSize(loadType), argReg, varNum, 0);
+ GetEmitter()->emitIns_R_S(ins_Load(loadType), emitTypeSize(loadType), argReg, varNum, 0);
// Update argReg life and GC Info to indicate varDsc stack slot is dead and argReg is going live.
// Note that we cannot modify varDsc->lvRegNum here because another basic block may not be expecting it.
if (remainingIntArgMask != RBM_NONE)
{
instruction insCopyIntToFloat = ins_CopyIntToFloat(TYP_LONG, TYP_DOUBLE);
- getEmitter()->emitDisableGC();
+ GetEmitter()->emitDisableGC();
for (int argNum = 0, argOffset = 0; argNum < MAX_REG_ARG; ++argNum)
{
regNumber argReg = intArgRegs[argNum];
if ((remainingIntArgMask & argRegMask) != 0)
{
remainingIntArgMask &= ~argRegMask;
- getEmitter()->emitIns_R_S(INS_mov, EA_8BYTE, argReg, firstArgVarNum, argOffset);
+ GetEmitter()->emitIns_R_S(INS_mov, EA_8BYTE, argReg, firstArgVarNum, argOffset);
// also load it in corresponding float arg reg
regNumber floatReg = compiler->getCallArgFloatRegister(argReg);
argOffset += REGSIZE_BYTES;
}
- getEmitter()->emitEnableGC();
+ GetEmitter()->emitEnableGC();
}
}
#endif // FEATURE_VARARG
{
regNumber baseReg = lea->Base()->gtRegNum;
regNumber indexReg = lea->Index()->gtRegNum;
- getEmitter()->emitIns_R_ARX(INS_lea, size, lea->gtRegNum, baseReg, indexReg, lea->gtScale, lea->Offset());
+ GetEmitter()->emitIns_R_ARX(INS_lea, size, lea->gtRegNum, baseReg, indexReg, lea->gtScale, lea->Offset());
}
else if (lea->Base())
{
- getEmitter()->emitIns_R_AR(INS_lea, size, lea->gtRegNum, lea->Base()->gtRegNum, lea->Offset());
+ GetEmitter()->emitIns_R_AR(INS_lea, size, lea->gtRegNum, lea->Base()->gtRegNum, lea->Offset());
}
else if (lea->Index())
{
- getEmitter()->emitIns_R_ARX(INS_lea, size, lea->gtRegNum, REG_NA, lea->Index()->gtRegNum, lea->gtScale,
+ GetEmitter()->emitIns_R_ARX(INS_lea, size, lea->gtRegNum, REG_NA, lea->Index()->gtRegNum, lea->gtScale,
lea->Offset());
}
ins = ins_FloatCompare(op1Type);
cmpAttr = emitTypeSize(op1Type);
- getEmitter()->emitInsBinary(ins, cmpAttr, op1, op2);
+ GetEmitter()->emitInsBinary(ins, cmpAttr, op1, op2);
// Are we evaluating this into a register?
if (targetReg != REG_NA)
// TYP_UINT and TYP_ULONG should not appear here, only small types can be unsigned
assert(!varTypeIsUnsigned(type) || varTypeIsSmall(type));
- getEmitter()->emitInsBinary(ins, emitTypeSize(type), op1, op2);
+ GetEmitter()->emitInsBinary(ins, emitTypeSize(type), op1, op2);
// Are we evaluating this into a register?
if (targetReg != REG_NA)
switch (desc.CheckKind())
{
case GenIntCastDesc::CHECK_POSITIVE:
- getEmitter()->emitIns_R_R(INS_test, EA_SIZE(desc.CheckSrcSize()), reg, reg);
+ GetEmitter()->emitIns_R_R(INS_test, EA_SIZE(desc.CheckSrcSize()), reg, reg);
genJumpToThrowHlpBlk(EJ_jl, SCK_OVERFLOW);
break;
// upper 32 bits are zero. This requires a temporary register.
const regNumber tempReg = cast->GetSingleTempReg();
assert(tempReg != reg);
- getEmitter()->emitIns_R_R(INS_mov, EA_8BYTE, tempReg, reg);
- getEmitter()->emitIns_R_I(INS_shr_N, EA_8BYTE, tempReg, 32);
+ GetEmitter()->emitIns_R_R(INS_mov, EA_8BYTE, tempReg, reg);
+ GetEmitter()->emitIns_R_I(INS_shr_N, EA_8BYTE, tempReg, 32);
genJumpToThrowHlpBlk(EJ_jne, SCK_OVERFLOW);
}
break;
case GenIntCastDesc::CHECK_POSITIVE_INT_RANGE:
- getEmitter()->emitIns_R_I(INS_cmp, EA_8BYTE, reg, INT32_MAX);
+ GetEmitter()->emitIns_R_I(INS_cmp, EA_8BYTE, reg, INT32_MAX);
genJumpToThrowHlpBlk(EJ_ja, SCK_OVERFLOW);
break;
case GenIntCastDesc::CHECK_INT_RANGE:
- getEmitter()->emitIns_R_I(INS_cmp, EA_8BYTE, reg, INT32_MAX);
+ GetEmitter()->emitIns_R_I(INS_cmp, EA_8BYTE, reg, INT32_MAX);
genJumpToThrowHlpBlk(EJ_jg, SCK_OVERFLOW);
- getEmitter()->emitIns_R_I(INS_cmp, EA_8BYTE, reg, INT32_MIN);
+ GetEmitter()->emitIns_R_I(INS_cmp, EA_8BYTE, reg, INT32_MIN);
genJumpToThrowHlpBlk(EJ_jl, SCK_OVERFLOW);
break;
#endif
const int castMaxValue = desc.CheckSmallIntMax();
const int castMinValue = desc.CheckSmallIntMin();
- getEmitter()->emitIns_R_I(INS_cmp, EA_SIZE(desc.CheckSrcSize()), reg, castMaxValue);
+ GetEmitter()->emitIns_R_I(INS_cmp, EA_SIZE(desc.CheckSrcSize()), reg, castMaxValue);
genJumpToThrowHlpBlk((castMinValue == 0) ? EJ_ja : EJ_jg, SCK_OVERFLOW);
if (castMinValue != 0)
{
- getEmitter()->emitIns_R_I(INS_cmp, EA_SIZE(desc.CheckSrcSize()), reg, castMinValue);
+ GetEmitter()->emitIns_R_I(INS_cmp, EA_SIZE(desc.CheckSrcSize()), reg, castMinValue);
genJumpToThrowHlpBlk(EJ_jl, SCK_OVERFLOW);
}
}
const regNumber srcReg = cast->gtGetOp1()->gtRegNum;
const regNumber dstReg = cast->gtRegNum;
- emitter* emit = getEmitter();
+ emitter* emit = GetEmitter();
assert(genIsValidIntReg(srcReg));
assert(genIsValidIntReg(dstReg));
else
{
instruction ins = ins_FloatConv(dstType, srcType);
- getEmitter()->emitInsBinary(ins, emitTypeSize(dstType), treeNode, op1);
+ GetEmitter()->emitInsBinary(ins, emitTypeSize(dstType), treeNode, op1);
}
genProduceReg(treeNode);
// cvtsi2ss/sd instruction.
genConsumeOperands(treeNode->AsOp());
- getEmitter()->emitIns_R_R(INS_xorps, EA_4BYTE, treeNode->gtRegNum, treeNode->gtRegNum);
+ GetEmitter()->emitIns_R_R(INS_xorps, EA_4BYTE, treeNode->gtRegNum, treeNode->gtRegNum);
// Note that here we need to specify srcType that will determine
// the size of source reg/mem operand and rex.w prefix.
instruction ins = ins_FloatConv(dstType, TYP_INT);
- getEmitter()->emitInsBinary(ins, emitTypeSize(srcType), treeNode, op1);
+ GetEmitter()->emitInsBinary(ins, emitTypeSize(srcType), treeNode, op1);
// Handle the case of srcType = TYP_ULONG. SSE2 conversion instruction
// will interpret ULONG value as LONG. Hence we need to adjust the
static_assert_no_msg(sizeof(double) == sizeof(__int64));
*((__int64*)&d) = 0x43f0000000000000LL;
- *cns = getEmitter()->emitFltOrDblConst(d, EA_8BYTE);
+ *cns = GetEmitter()->emitFltOrDblConst(d, EA_8BYTE);
}
- getEmitter()->emitIns_R_C(INS_addsd, EA_8BYTE, treeNode->gtRegNum, *cns, 0);
+ GetEmitter()->emitIns_R_C(INS_addsd, EA_8BYTE, treeNode->gtRegNum, *cns, 0);
genDefineTempLabel(label);
}
// the size of destination integer register and also the rex.w prefix.
genConsumeOperands(treeNode->AsOp());
instruction ins = ins_FloatConv(TYP_INT, srcType);
- getEmitter()->emitInsBinary(ins, emitTypeSize(dstType), treeNode, op1);
+ GetEmitter()->emitInsBinary(ins, emitTypeSize(dstType), treeNode, op1);
genProduceReg(treeNode);
}
if (*bitMask == nullptr)
{
assert(cnsAddr != nullptr);
- *bitMask = getEmitter()->emitAnyConst(cnsAddr, genTypeSize(targetType), emitDataAlignment::Preferred);
+ *bitMask = GetEmitter()->emitAnyConst(cnsAddr, genTypeSize(targetType), emitDataAlignment::Preferred);
}
// We need an additional register for bitmask.
operandReg = tmpReg;
}
- getEmitter()->emitIns_R_C(ins_Load(targetType, false), emitTypeSize(targetType), tmpReg, *bitMask, 0);
+ GetEmitter()->emitIns_R_C(ins_Load(targetType, false), emitTypeSize(targetType), tmpReg, *bitMask, 0);
assert(ins != INS_invalid);
inst_RV_RV(ins, targetReg, operandReg, targetType);
}
if (srcNode->isContained() || srcNode->isUsedFromSpillTemp())
{
- emitter* emit = getEmitter();
+ emitter* emit = GetEmitter();
TempDsc* tmpDsc = nullptr;
unsigned varNum = BAD_VAR_NUM;
assert(srcNode->TypeGet() == treeNode->TypeGet());
genConsumeOperands(treeNode->AsOp());
- getEmitter()->emitInsBinary(ins_FloatSqrt(treeNode->TypeGet()), emitTypeSize(treeNode), treeNode, srcNode);
+ GetEmitter()->emitInsBinary(ins_FloatSqrt(treeNode->TypeGet()), emitTypeSize(treeNode), treeNode, srcNode);
break;
}
assert(!varTypeIsSIMD(fieldType)); // Q: can we get here with SIMD?
assert(fieldNode->IsRegOptional());
TempDsc* tmp = getSpillTempDsc(fieldNode);
- getEmitter()->emitIns_S(INS_push, emitActualTypeSize(fieldNode->TypeGet()), tmp->tdTempNum(), 0);
+ GetEmitter()->emitIns_S(INS_push, emitActualTypeSize(fieldNode->TypeGet()), tmp->tdTempNum(), 0);
regSet.tmpRlsTemp(tmp);
}
else
if (data->isContainedIntOrIImmed())
{
- getEmitter()->emitIns_S_I(ins_Store(targetType), emitTypeSize(targetType), baseVarNum, argOffset,
+ GetEmitter()->emitIns_S_I(ins_Store(targetType), emitTypeSize(targetType), baseVarNum, argOffset,
(int)data->AsIntConCommon()->IconValue());
}
else
{
assert(data->isUsedFromReg());
genConsumeReg(data);
- getEmitter()->emitIns_S_R(ins_Store(targetType), emitTypeSize(targetType), data->gtRegNum, baseVarNum,
+ GetEmitter()->emitIns_S_R(ins_Store(targetType), emitTypeSize(targetType), data->gtRegNum, baseVarNum,
argOffset);
}
}
}
assert(genIsValidFloatReg(srcReg));
inst_RV_IV(INS_sub, REG_SPBASE, size, EA_PTRSIZE);
- getEmitter()->emitIns_AR_R(ins, attr, srcReg, REG_SPBASE, 0);
+ GetEmitter()->emitIns_AR_R(ins, attr, srcReg, REG_SPBASE, 0);
}
AddStackLevel(size);
}
}
else
{
- getEmitter()->emitIns_AR_R(ins, attr, srcReg, REG_SPBASE, offset);
+ GetEmitter()->emitIns_AR_R(ins, attr, srcReg, REG_SPBASE, offset);
}
#else // !_TARGET_X86_
assert(m_stkArgVarNum != BAD_VAR_NUM);
- getEmitter()->emitIns_S_R(ins, attr, srcReg, m_stkArgVarNum, m_stkArgOffset + offset);
+ GetEmitter()->emitIns_S_R(ins, attr, srcReg, m_stkArgVarNum, m_stkArgOffset + offset);
#endif // !_TARGET_X86_
}
const unsigned offset = i * TARGET_POINTER_SIZE;
if (srcAddrInReg)
{
- getEmitter()->emitIns_AR_R(INS_push, slotAttr, REG_NA, srcRegNum, offset);
+ GetEmitter()->emitIns_AR_R(INS_push, slotAttr, REG_NA, srcRegNum, offset);
}
else
{
- getEmitter()->emitIns_S(INS_push, slotAttr, srcLclNum, srcLclOffset + offset);
+ GetEmitter()->emitIns_S(INS_push, slotAttr, srcLclNum, srcLclOffset + offset);
}
AddStackLevel(TARGET_POINTER_SIZE);
}
}
else
{
- getEmitter()->emitIns_R_I(INS_mov, EA_4BYTE, REG_RCX, adjacentNonGCSlotCount);
+ GetEmitter()->emitIns_R_I(INS_mov, EA_4BYTE, REG_RCX, adjacentNonGCSlotCount);
instGen(INS_r_movsp);
}
}
// instGen(INS_movsp); and emission of gc info.
var_types memType = layout->GetGCPtrType(i);
- getEmitter()->emitIns_R_AR(ins_Load(memType), emitTypeSize(memType), REG_RCX, REG_RSI, 0);
+ GetEmitter()->emitIns_R_AR(ins_Load(memType), emitTypeSize(memType), REG_RCX, REG_RSI, 0);
genStoreRegToStackArg(memType, REG_RCX, i * TARGET_POINTER_SIZE);
#ifdef DEBUG
numGCSlotsCopied++;
// Source for the copy operation.
// If a LocalAddr, use EA_PTRSIZE - copy from stack.
// If not a LocalAddr, use EA_BYREF - the source location is not on the stack.
- getEmitter()->emitIns_R_I(INS_add, srcAddrAttr, REG_RSI, TARGET_POINTER_SIZE);
+ GetEmitter()->emitIns_R_I(INS_add, srcAddrAttr, REG_RSI, TARGET_POINTER_SIZE);
// Always copying to the stack - outgoing arg area
// (or the outgoing arg area of the caller for a tail call) - use EA_PTRSIZE.
- getEmitter()->emitIns_R_I(INS_add, EA_PTRSIZE, REG_RDI, TARGET_POINTER_SIZE);
+ GetEmitter()->emitIns_R_I(INS_add, EA_PTRSIZE, REG_RDI, TARGET_POINTER_SIZE);
}
}
}
}
// clang-format off
- getEmitter()->emitIns_Call(callType,
+ GetEmitter()->emitIns_Call(callType,
compiler->eeFindHelper(helper),
INDEBUG_LDISASM_COMMA(nullptr) addr,
argSize,
genDefineTempLabel(genCreateTempLabel());
// vhaddpd ymm0,ymm1,ymm2
- getEmitter()->emitIns_R_R_R(INS_haddpd, EA_32BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
+ GetEmitter()->emitIns_R_R_R(INS_haddpd, EA_32BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
// vaddss xmm0,xmm1,xmm2
- getEmitter()->emitIns_R_R_R(INS_addss, EA_4BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
+ GetEmitter()->emitIns_R_R_R(INS_addss, EA_4BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
// vaddsd xmm0,xmm1,xmm2
- getEmitter()->emitIns_R_R_R(INS_addsd, EA_8BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
+ GetEmitter()->emitIns_R_R_R(INS_addsd, EA_8BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
// vaddps xmm0,xmm1,xmm2
- getEmitter()->emitIns_R_R_R(INS_addps, EA_16BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
+ GetEmitter()->emitIns_R_R_R(INS_addps, EA_16BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
// vaddps ymm0,ymm1,ymm2
- getEmitter()->emitIns_R_R_R(INS_addps, EA_32BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
+ GetEmitter()->emitIns_R_R_R(INS_addps, EA_32BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
// vaddpd xmm0,xmm1,xmm2
- getEmitter()->emitIns_R_R_R(INS_addpd, EA_16BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
+ GetEmitter()->emitIns_R_R_R(INS_addpd, EA_16BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
// vaddpd ymm0,ymm1,ymm2
- getEmitter()->emitIns_R_R_R(INS_addpd, EA_32BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
+ GetEmitter()->emitIns_R_R_R(INS_addpd, EA_32BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
// vsubss xmm0,xmm1,xmm2
- getEmitter()->emitIns_R_R_R(INS_subss, EA_4BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
+ GetEmitter()->emitIns_R_R_R(INS_subss, EA_4BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
// vsubsd xmm0,xmm1,xmm2
- getEmitter()->emitIns_R_R_R(INS_subsd, EA_8BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
+ GetEmitter()->emitIns_R_R_R(INS_subsd, EA_8BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
// vsubps ymm0,ymm1,ymm2
- getEmitter()->emitIns_R_R_R(INS_subps, EA_16BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
+ GetEmitter()->emitIns_R_R_R(INS_subps, EA_16BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
// vsubps ymm0,ymm1,ymm2
- getEmitter()->emitIns_R_R_R(INS_subps, EA_32BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
+ GetEmitter()->emitIns_R_R_R(INS_subps, EA_32BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
// vsubpd xmm0,xmm1,xmm2
- getEmitter()->emitIns_R_R_R(INS_subpd, EA_16BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
+ GetEmitter()->emitIns_R_R_R(INS_subpd, EA_16BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
// vsubpd ymm0,ymm1,ymm2
- getEmitter()->emitIns_R_R_R(INS_subpd, EA_32BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
+ GetEmitter()->emitIns_R_R_R(INS_subpd, EA_32BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
// vmulss xmm0,xmm1,xmm2
- getEmitter()->emitIns_R_R_R(INS_mulss, EA_4BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
+ GetEmitter()->emitIns_R_R_R(INS_mulss, EA_4BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
// vmulsd xmm0,xmm1,xmm2
- getEmitter()->emitIns_R_R_R(INS_mulsd, EA_8BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
+ GetEmitter()->emitIns_R_R_R(INS_mulsd, EA_8BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
// vmulps xmm0,xmm1,xmm2
- getEmitter()->emitIns_R_R_R(INS_mulps, EA_16BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
+ GetEmitter()->emitIns_R_R_R(INS_mulps, EA_16BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
// vmulpd xmm0,xmm1,xmm2
- getEmitter()->emitIns_R_R_R(INS_mulpd, EA_16BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
+ GetEmitter()->emitIns_R_R_R(INS_mulpd, EA_16BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
// vmulps ymm0,ymm1,ymm2
- getEmitter()->emitIns_R_R_R(INS_mulps, EA_32BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
+ GetEmitter()->emitIns_R_R_R(INS_mulps, EA_32BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
// vmulpd ymm0,ymm1,ymm2
- getEmitter()->emitIns_R_R_R(INS_mulpd, EA_32BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
+ GetEmitter()->emitIns_R_R_R(INS_mulpd, EA_32BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
// vandps xmm0,xmm1,xmm2
- getEmitter()->emitIns_R_R_R(INS_andps, EA_16BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
+ GetEmitter()->emitIns_R_R_R(INS_andps, EA_16BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
// vandpd xmm0,xmm1,xmm2
- getEmitter()->emitIns_R_R_R(INS_andpd, EA_16BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
+ GetEmitter()->emitIns_R_R_R(INS_andpd, EA_16BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
// vandps ymm0,ymm1,ymm2
- getEmitter()->emitIns_R_R_R(INS_andps, EA_32BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
+ GetEmitter()->emitIns_R_R_R(INS_andps, EA_32BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
// vandpd ymm0,ymm1,ymm2
- getEmitter()->emitIns_R_R_R(INS_andpd, EA_32BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
+ GetEmitter()->emitIns_R_R_R(INS_andpd, EA_32BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
// vorps xmm0,xmm1,xmm2
- getEmitter()->emitIns_R_R_R(INS_orps, EA_16BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
+ GetEmitter()->emitIns_R_R_R(INS_orps, EA_16BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
// vorpd xmm0,xmm1,xmm2
- getEmitter()->emitIns_R_R_R(INS_orpd, EA_16BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
+ GetEmitter()->emitIns_R_R_R(INS_orpd, EA_16BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
// vorps ymm0,ymm1,ymm2
- getEmitter()->emitIns_R_R_R(INS_orps, EA_32BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
+ GetEmitter()->emitIns_R_R_R(INS_orps, EA_32BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
// vorpd ymm0,ymm1,ymm2
- getEmitter()->emitIns_R_R_R(INS_orpd, EA_32BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
+ GetEmitter()->emitIns_R_R_R(INS_orpd, EA_32BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
// vdivss xmm0,xmm1,xmm2
- getEmitter()->emitIns_R_R_R(INS_divss, EA_4BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
+ GetEmitter()->emitIns_R_R_R(INS_divss, EA_4BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
// vdivsd xmm0,xmm1,xmm2
- getEmitter()->emitIns_R_R_R(INS_divsd, EA_8BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
+ GetEmitter()->emitIns_R_R_R(INS_divsd, EA_8BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
// vdivss xmm0,xmm1,xmm2
- getEmitter()->emitIns_R_R_R(INS_divss, EA_4BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
+ GetEmitter()->emitIns_R_R_R(INS_divss, EA_4BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
// vdivsd xmm0,xmm1,xmm2
- getEmitter()->emitIns_R_R_R(INS_divsd, EA_8BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
+ GetEmitter()->emitIns_R_R_R(INS_divsd, EA_8BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
// vdivss xmm0,xmm1,xmm2
- getEmitter()->emitIns_R_R_R(INS_cvtss2sd, EA_4BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
+ GetEmitter()->emitIns_R_R_R(INS_cvtss2sd, EA_4BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
// vdivsd xmm0,xmm1,xmm2
- getEmitter()->emitIns_R_R_R(INS_cvtsd2ss, EA_8BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
+ GetEmitter()->emitIns_R_R_R(INS_cvtsd2ss, EA_8BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
#endif // ALL_XARCH_EMITTER_UNIT_TESTS
printf("*************** End of genAmd64EmitterUnitTests()\n");
}
#if defined(UNIX_X86_ABI)
// Manually align the stack to be 16-byte aligned. This is similar to CodeGen::genAlignStackBeforeCall()
- getEmitter()->emitIns_R_I(INS_sub, EA_4BYTE, REG_SPBASE, 0xC);
+ GetEmitter()->emitIns_R_I(INS_sub, EA_4BYTE, REG_SPBASE, 0xC);
#endif // UNIX_X86_ABI
// Push the profilerHandle
if (compiler->compProfilerMethHndIndirected)
{
- getEmitter()->emitIns_AR_R(INS_push, EA_PTR_DSP_RELOC, REG_NA, REG_NA, (ssize_t)compiler->compProfilerMethHnd);
+ GetEmitter()->emitIns_AR_R(INS_push, EA_PTR_DSP_RELOC, REG_NA, REG_NA, (ssize_t)compiler->compProfilerMethHnd);
}
else
{
#if defined(UNIX_X86_ABI)
// Restoring alignment manually. This is similar to CodeGen::genRemoveAlignmentAfterCall
- getEmitter()->emitIns_R_I(INS_add, EA_4BYTE, REG_SPBASE, 0x10);
+ GetEmitter()->emitIns_R_I(INS_add, EA_4BYTE, REG_SPBASE, 0x10);
#endif // UNIX_X86_ABI
/* Restore the stack level */
#if defined(UNIX_X86_ABI)
// Manually align the stack to be 16-byte aligned. This is similar to CodeGen::genAlignStackBeforeCall()
- getEmitter()->emitIns_R_I(INS_sub, EA_4BYTE, REG_SPBASE, 0xC);
+ GetEmitter()->emitIns_R_I(INS_sub, EA_4BYTE, REG_SPBASE, 0xC);
AddStackLevel(0xC);
AddNestedAlignment(0xC);
#endif // UNIX_X86_ABI
if (compiler->compProfilerMethHndIndirected)
{
- getEmitter()->emitIns_AR_R(INS_push, EA_PTR_DSP_RELOC, REG_NA, REG_NA, (ssize_t)compiler->compProfilerMethHnd);
+ GetEmitter()->emitIns_AR_R(INS_push, EA_PTR_DSP_RELOC, REG_NA, REG_NA, (ssize_t)compiler->compProfilerMethHnd);
}
else
{
#if defined(UNIX_X86_ABI)
// Restoring alignment manually. This is similar to CodeGen::genRemoveAlignmentAfterCall
- getEmitter()->emitIns_R_I(INS_add, EA_4BYTE, REG_SPBASE, 0x10);
+ GetEmitter()->emitIns_R_I(INS_add, EA_4BYTE, REG_SPBASE, 0x10);
SubtractStackLevel(0x10);
SubtractNestedAlignment(0xC);
#endif // UNIX_X86_ABI
}
#endif // FEATURE_SIMD
- getEmitter()->emitIns_S_R(store_ins, emitTypeSize(storeType), argReg, varNum, 0);
+ GetEmitter()->emitIns_S_R(store_ins, emitTypeSize(storeType), argReg, varNum, 0);
}
}
{
// Profiler hooks enabled during Ngen time.
// Profiler handle needs to be accessed through an indirection of a pointer.
- getEmitter()->emitIns_R_AI(INS_mov, EA_PTR_DSP_RELOC, REG_ARG_0, (ssize_t)compiler->compProfilerMethHnd);
+ GetEmitter()->emitIns_R_AI(INS_mov, EA_PTR_DSP_RELOC, REG_ARG_0, (ssize_t)compiler->compProfilerMethHnd);
}
else
{
// of that offset to FramePointer to obtain caller's SP value.
assert(compiler->lvaOutgoingArgSpaceVar != BAD_VAR_NUM);
int callerSPOffset = compiler->lvaToCallerSPRelativeOffset(0, isFramePointerUsed());
- getEmitter()->emitIns_R_AR(INS_lea, EA_PTRSIZE, REG_ARG_1, genFramePointerReg(), -callerSPOffset);
+ GetEmitter()->emitIns_R_AR(INS_lea, EA_PTRSIZE, REG_ARG_1, genFramePointerReg(), -callerSPOffset);
// Can't have a call until we have enough padding for rejit
genPrologPadForReJit();
}
#endif // FEATURE_SIMD
- getEmitter()->emitIns_R_S(load_ins, emitTypeSize(loadType), argReg, varNum, 0);
+ GetEmitter()->emitIns_R_S(load_ins, emitTypeSize(loadType), argReg, varNum, 0);
#if FEATURE_VARARG
if (compiler->info.compIsVarArgs && varTypeIsFloating(loadType))
{
// Profiler hooks enabled during Ngen time.
// Profiler handle needs to be accessed through an indirection of a pointer.
- getEmitter()->emitIns_R_AI(INS_mov, EA_PTR_DSP_RELOC, REG_PROFILER_ENTER_ARG_0,
+ GetEmitter()->emitIns_R_AI(INS_mov, EA_PTR_DSP_RELOC, REG_PROFILER_ENTER_ARG_0,
(ssize_t)compiler->compProfilerMethHnd);
}
else
// of that offset to FramePointer to obtain caller's SP value.
assert(compiler->lvaOutgoingArgSpaceVar != BAD_VAR_NUM);
int callerSPOffset = compiler->lvaToCallerSPRelativeOffset(0, isFramePointerUsed());
- getEmitter()->emitIns_R_AR(INS_lea, EA_PTRSIZE, REG_PROFILER_ENTER_ARG_1, genFramePointerReg(), -callerSPOffset);
+ GetEmitter()->emitIns_R_AR(INS_lea, EA_PTRSIZE, REG_PROFILER_ENTER_ARG_1, genFramePointerReg(), -callerSPOffset);
// Can't have a call until we have enough padding for rejit
genPrologPadForReJit();
{
// Profiler hooks enabled during Ngen time.
// Profiler handle needs to be accessed through an indirection of an address.
- getEmitter()->emitIns_R_AI(INS_mov, EA_PTR_DSP_RELOC, REG_ARG_0, (ssize_t)compiler->compProfilerMethHnd);
+ GetEmitter()->emitIns_R_AI(INS_mov, EA_PTR_DSP_RELOC, REG_ARG_0, (ssize_t)compiler->compProfilerMethHnd);
}
else
{
// Caller's SP relative offset to FramePointer will be negative. We need to add absolute
// value of that offset to FramePointer to obtain caller's SP value.
int callerSPOffset = compiler->lvaToCallerSPRelativeOffset(0, isFramePointerUsed());
- getEmitter()->emitIns_R_AR(INS_lea, EA_PTRSIZE, REG_ARG_1, genFramePointerReg(), -callerSPOffset);
+ GetEmitter()->emitIns_R_AR(INS_lea, EA_PTRSIZE, REG_ARG_1, genFramePointerReg(), -callerSPOffset);
}
else
{
NYI_IF((varDsc == nullptr) || !varDsc->lvIsParam, "Profiler ELT callback for a method without any params");
// lea rdx, [FramePointer + Arg0's offset]
- getEmitter()->emitIns_R_S(INS_lea, EA_PTRSIZE, REG_ARG_1, 0, 0);
+ GetEmitter()->emitIns_R_S(INS_lea, EA_PTRSIZE, REG_ARG_1, 0, 0);
}
// We can use any callee trash register (other than RAX, RCX, RDX) for call target.
// RDI = ProfilerMethHnd
if (compiler->compProfilerMethHndIndirected)
{
- getEmitter()->emitIns_R_AI(INS_mov, EA_PTR_DSP_RELOC, REG_ARG_0, (ssize_t)compiler->compProfilerMethHnd);
+ GetEmitter()->emitIns_R_AI(INS_mov, EA_PTR_DSP_RELOC, REG_ARG_0, (ssize_t)compiler->compProfilerMethHnd);
}
else
{
if (compiler->lvaDoneFrameLayout == Compiler::FINAL_FRAME_LAYOUT)
{
int callerSPOffset = compiler->lvaToCallerSPRelativeOffset(0, isFramePointerUsed());
- getEmitter()->emitIns_R_AR(INS_lea, EA_PTRSIZE, REG_ARG_1, genFramePointerReg(), -callerSPOffset);
+ GetEmitter()->emitIns_R_AR(INS_lea, EA_PTRSIZE, REG_ARG_1, genFramePointerReg(), -callerSPOffset);
}
else
{
NYI_IF((varDsc == nullptr) || !varDsc->lvIsParam, "Profiler ELT callback for a method without any params");
// lea rdx, [FramePointer + Arg0's offset]
- getEmitter()->emitIns_R_S(INS_lea, EA_PTRSIZE, REG_ARG_1, 0, 0);
+ GetEmitter()->emitIns_R_S(INS_lea, EA_PTRSIZE, REG_ARG_1, 0, 0);
}
// We can use any callee trash register (other than RAX, RDI, RSI) for call target.
if (nextMappingDsc)
{
- UNATIVE_OFFSET offset = nextMappingDsc->ipmdNativeLoc.CodeOffset(genEmitter);
+ UNATIVE_OFFSET offset = nextMappingDsc->ipmdNativeLoc.CodeOffset(GetEmitter());
if (offset <= curIP)
{
{
if (canUseVexEncoding())
{
- codeGen->getEmitter()->SetUseVEXEncoding(true);
+ codeGen->GetEmitter()->SetUseVEXEncoding(true);
// Assume each JITted method does not contain AVX instruction at first
- codeGen->getEmitter()->SetContainsAVX(false);
- codeGen->getEmitter()->SetContains256bitAVX(false);
+ codeGen->GetEmitter()->SetContainsAVX(false);
+ codeGen->GetEmitter()->SetContains256bitAVX(false);
}
}
#endif // _TARGET_XARCH_
/* Tell the emitter that we're done with this function */
- genEmitter->emitEndCG();
+ GetEmitter()->emitEndCG();
DoneCleanUp:
compDone();
if (!compIsForInlining())
{
- codeGen->getEmitter()->emitBegCG(this, compHnd);
+ codeGen->GetEmitter()->emitBegCG(this, compHnd);
}
info.compIsStatic = (info.compFlags & CORINFO_FLG_STATIC) != 0;
// convenience and backward compatibility, but the properties can only be set by invoking
// the setter on CodeGenContext directly.
- __declspec(property(get = getEmitter)) emitter* genEmitter;
- emitter* getEmitter() const
+ emitter* GetEmitter() const
{
- return codeGen->getEmitter();
+ return codeGen->GetEmitter();
}
bool isFramePointerUsed() const
header->prologSize = static_cast<unsigned char>(prologSize);
assert(FitsIn<unsigned char>(epilogSize));
header->epilogSize = static_cast<unsigned char>(epilogSize);
- header->epilogCount = compiler->getEmitter()->emitGetEpilogCnt();
- if (header->epilogCount != compiler->getEmitter()->emitGetEpilogCnt())
+ header->epilogCount = compiler->GetEmitter()->emitGetEpilogCnt();
+ if (header->epilogCount != compiler->GetEmitter()->emitGetEpilogCnt())
IMPL_LIMITATION("emitGetEpilogCnt() does not fit in InfoHdr::epilogCount");
- header->epilogAtEnd = compiler->getEmitter()->emitHasEpilogEnd();
+ header->epilogAtEnd = compiler->GetEmitter()->emitHasEpilogEnd();
if (compiler->codeGen->regSet.rsRegsModified(RBM_EDI))
header->ediSaved = 1;
if (compiler->info.compFlags & CORINFO_FLG_SYNCH)
{
assert(compiler->syncStartEmitCookie != NULL);
- header->syncStartOffset = compiler->getEmitter()->emitCodeOffset(compiler->syncStartEmitCookie, 0);
+ header->syncStartOffset = compiler->GetEmitter()->emitCodeOffset(compiler->syncStartEmitCookie, 0);
assert(header->syncStartOffset != INVALID_SYNC_OFFSET);
assert(compiler->syncEndEmitCookie != NULL);
- header->syncEndOffset = compiler->getEmitter()->emitCodeOffset(compiler->syncEndEmitCookie, 0);
+ header->syncEndOffset = compiler->GetEmitter()->emitCodeOffset(compiler->syncEndEmitCookie, 0);
assert(header->syncEndOffset != INVALID_SYNC_OFFSET);
assert(header->syncStartOffset < header->syncEndOffset);
gcEpilogTable = mask ? dest : NULL;
gcEpilogPrevOffset = 0;
- size_t sz = compiler->getEmitter()->emitGenEpilogLst(gcRecordEpilog, this);
+ size_t sz = compiler->GetEmitter()->emitGenEpilogLst(gcRecordEpilog, this);
/* Add the size of the epilog table to the total size */
regPtrDsc* genRegPtrTemp;
regNumber thisRegNum = regNumber(0);
- PendingArgsStack pasStk(compiler->getEmitter()->emitMaxStackDepth, compiler);
+ PendingArgsStack pasStk(compiler->GetEmitter()->emitMaxStackDepth, compiler);
/* Walk the list of pointer register/argument entries */
// Currently just prologs and epilogs.
InterruptibleRangeReporter reporter(prologSize, gcInfoEncoderWithLog);
- compiler->getEmitter()->emitGenNoGCLst(reporter);
+ compiler->GetEmitter()->emitGenNoGCLst(reporter);
prologSize = reporter.prevStart;
// Report any remainder
else
{
printRegMaskInt(gcRegGCrefSetCur);
- compiler->getEmitter()->emitDispRegSet(gcRegGCrefSetCur);
+ compiler->GetEmitter()->emitDispRegSet(gcRegGCrefSetCur);
printf(" => ");
}
printRegMaskInt(gcRegGCrefSetNew);
- compiler->getEmitter()->emitDispRegSet(gcRegGCrefSetNew);
+ compiler->GetEmitter()->emitDispRegSet(gcRegGCrefSetNew);
printf("\n");
}
}
else
{
printRegMaskInt(gcRegByrefSetCur);
- compiler->getEmitter()->emitDispRegSet(gcRegByrefSetCur);
+ compiler->GetEmitter()->emitDispRegSet(gcRegByrefSetCur);
printf(" => ");
}
printRegMaskInt(gcRegByrefSetNew);
- compiler->getEmitter()->emitDispRegSet(gcRegByrefSetNew);
+ compiler->GetEmitter()->emitDispRegSet(gcRegByrefSetNew);
printf("\n");
}
}
int offs = varDsc->lvStkOffs;
printf("GCINFO: untrckd %s lcl at [%s", varTypeGCstring(varDsc->TypeGet()),
- compiler->genEmitter->emitGetFrameReg());
+ compiler->GetEmitter()->emitGetFrameReg());
if (offs < 0)
{
int offs = tempThis->tdTempOffs();
printf("GCINFO: untrck %s Temp at [%s", varTypeGCstring(varDsc->TypeGet()),
- compiler->genEmitter->emitGetFrameReg());
+ compiler->GetEmitter()->emitGetFrameReg());
if (offs < 0)
{
regNumber op1Reg = REG_NA;
regNumber op2Reg = REG_NA;
- emitter* emit = getEmitter();
+ emitter* emit = GetEmitter();
assert(numArgs >= 0);
instruction ins = HWIntrinsicInfo::lookupIns(intrinsicId, baseType);
regNumber targetReg = node->gtRegNum;
GenTree* op1 = node->gtGetOp1();
GenTree* op2 = node->gtGetOp2();
- emitter* emit = getEmitter();
+ emitter* emit = GetEmitter();
if (op2 != nullptr)
{
regNumber targetReg = node->gtRegNum;
GenTree* op1 = node->gtGetOp1();
emitAttr simdSize = EA_ATTR(node->gtSIMDSize);
- emitter* emit = getEmitter();
+ emitter* emit = GetEmitter();
// TODO-XArch-CQ: Commutative operations can have op1 be contained
// TODO-XArch-CQ: Non-VEX encoded instructions can have both ops contained
void CodeGen::genHWIntrinsic_R_R_RM(
GenTreeHWIntrinsic* node, instruction ins, emitAttr attr, regNumber targetReg, regNumber op1Reg, GenTree* op2)
{
- emitter* emit = getEmitter();
+ emitter* emit = GetEmitter();
// TODO-XArch-CQ: Commutative operations can have op1 be contained
// TODO-XArch-CQ: Non-VEX encoded instructions can have both ops contained
GenTree* op1 = node->gtGetOp1();
GenTree* op2 = node->gtGetOp2();
emitAttr simdSize = EA_ATTR(node->gtSIMDSize);
- emitter* emit = getEmitter();
+ emitter* emit = GetEmitter();
// TODO-XArch-CQ: Commutative operations can have op1 be contained
// TODO-XArch-CQ: Non-VEX encoded instructions can have both ops contained
GenTree* op2 = node->gtGetOp2();
GenTree* op3 = nullptr;
emitAttr simdSize = EA_ATTR(node->gtSIMDSize);
- emitter* emit = getEmitter();
+ emitter* emit = GetEmitter();
assert(op1->OperIsList());
assert(op2 == nullptr);
assert(op1Reg != REG_NA);
assert(op2Reg != REG_NA);
- emitter* emit = getEmitter();
+ emitter* emit = GetEmitter();
if (op3->isContained() || op3->isUsedFromSpillTemp())
{
// AVX2 Gather intrinsics use managed non-const fallback since they have discrete imm8 value range
// that does work with the current compiler generated jump-table fallback
assert(!HWIntrinsicInfo::isAVX2GatherIntrinsic(intrinsic));
- emitter* emit = getEmitter();
+ emitter* emit = GetEmitter();
const unsigned maxByte = (unsigned)HWIntrinsicInfo::lookupImmUpperBound(intrinsic) + 1;
assert(maxByte <= 256);
assert(node->gtGetOp2() == nullptr);
- emitter* emit = getEmitter();
+ emitter* emit = GetEmitter();
emitAttr attr = EA_ATTR(node->gtSIMDSize);
instruction ins = HWIntrinsicInfo::lookupIns(intrinsicId, baseType);
regNumber op2Reg = REG_NA;
regNumber op3Reg = REG_NA;
regNumber op4Reg = REG_NA;
- emitter* emit = getEmitter();
+ emitter* emit = GetEmitter();
genConsumeHWIntrinsicOperands(node);
var_types baseType = node->gtSIMDBaseType;
regNumber op1Reg = REG_NA;
regNumber op2Reg = REG_NA;
- emitter* emit = getEmitter();
+ emitter* emit = GetEmitter();
genConsumeHWIntrinsicOperands(node);
regNumber op2Reg = REG_NA;
regNumber op3Reg = REG_NA;
regNumber op4Reg = REG_NA;
- emitter* emit = getEmitter();
+ emitter* emit = GetEmitter();
genConsumeHWIntrinsicOperands(node);
GenTree* op2 = node->gtGetOp2();
var_types baseType = node->gtSIMDBaseType;
var_types targetType = node->TypeGet();
- emitter* emit = getEmitter();
+ emitter* emit = GetEmitter();
genConsumeHWIntrinsicOperands(node);
regNumber op1Reg = op1->gtRegNum;
regNumber op1Reg = REG_NA;
regNumber op2Reg = REG_NA;
regNumber targetReg = node->gtRegNum;
- emitter* emit = getEmitter();
+ emitter* emit = GetEmitter();
genConsumeHWIntrinsicOperands(node);
GenTree* op2 = node->gtGetOp2();
var_types targetType = node->TypeGet();
instruction ins = HWIntrinsicInfo::lookupIns(intrinsicId, targetType);
- emitter* emit = getEmitter();
+ emitter* emit = GetEmitter();
assert(targetReg != REG_NA);
assert(op1 != nullptr);
regNumber targetReg = node->gtRegNum;
if ((targetReg != sourceReg1) && (targetReg != sourceReg2))
{
- getEmitter()->emitIns_R_R(INS_xor, EA_4BYTE, targetReg, targetReg);
+ GetEmitter()->emitIns_R_R(INS_xor, EA_4BYTE, targetReg, targetReg);
}
genHWIntrinsic_R_RM(node, ins, emitTypeSize(node->TypeGet()));
}
{
/* Display the instruction offset within the emit block */
- // printf("[%08X:%04X]", getEmitter().emitCodeCurBlock(), getEmitter().emitCodeOffsInBlock());
+ // printf("[%08X:%04X]", GetEmitter().emitCodeCurBlock(), GetEmitter().emitCodeOffsInBlock());
/* Display the FP stack depth (before the instruction is executed) */
void CodeGen::instGen(instruction ins)
{
- getEmitter()->emitIns(ins);
+ GetEmitter()->emitIns(ins);
#ifdef _TARGET_XARCH_
// A workaround necessitated by limitations of emitter
// if we are scheduled to insert a nop here, we have to delay it
// hopefully we have not missed any other prefix instructions or places
// they could be inserted
- if (ins == INS_lock && getEmitter()->emitNextNop == 0)
+ if (ins == INS_lock && GetEmitter()->emitNextNop == 0)
{
- getEmitter()->emitNextNop = 1;
+ GetEmitter()->emitNextNop = 1;
}
#endif
}
void CodeGen::instNop(unsigned size)
{
assert(size <= 15);
- getEmitter()->emitIns_Nop(size);
+ GetEmitter()->emitIns_Nop(size);
}
#endif
#endif
#endif // !FEATURE_FIXED_OUT_ARGS
- getEmitter()->emitIns_J(emitter::emitJumpKindToIns(jmp), tgtBlock);
+ GetEmitter()->emitIns_J(emitter::emitJumpKindToIns(jmp), tgtBlock);
}
/*****************************************************************************
assert(genRegMask(reg) & RBM_BYTE_REGS);
// These instructions only write the low byte of 'reg'
- getEmitter()->emitIns_R(ins, EA_1BYTE, reg);
+ GetEmitter()->emitIns_R(ins, EA_1BYTE, reg);
#elif defined(_TARGET_ARM64_)
insCond cond;
/* Convert the condition to an insCond value */
NO_WAY("unexpected condition type");
return;
}
- getEmitter()->emitIns_R_COND(INS_cset, EA_8BYTE, reg, cond);
+ GetEmitter()->emitIns_R_COND(INS_cset, EA_8BYTE, reg, cond);
#else
NYI("inst_SET");
#endif
size = emitActualTypeSize(type);
}
- getEmitter()->emitIns_R(ins, size, reg);
+ GetEmitter()->emitIns_R(ins, size, reg);
}
/*****************************************************************************
}
#ifdef _TARGET_ARM_
- getEmitter()->emitIns_R_R(ins, size, reg1, reg2, flags);
+ GetEmitter()->emitIns_R_R(ins, size, reg1, reg2, flags);
#else
- getEmitter()->emitIns_R_R(ins, size, reg1, reg2);
+ GetEmitter()->emitIns_R_R(ins, size, reg1, reg2);
#endif
}
insFlags flags /* = INS_FLAGS_DONT_CARE */)
{
#ifdef _TARGET_ARM_
- getEmitter()->emitIns_R_R_R(ins, size, reg1, reg2, reg3, flags);
+ GetEmitter()->emitIns_R_R_R(ins, size, reg1, reg2, reg3, flags);
#elif defined(_TARGET_XARCH_)
- getEmitter()->emitIns_R_R_R(ins, size, reg1, reg2, reg3);
+ GetEmitter()->emitIns_R_R_R(ins, size, reg1, reg2, reg3);
#else
NYI("inst_RV_RV_RV");
#endif
void CodeGen::inst_IV(instruction ins, int val)
{
- getEmitter()->emitIns_I(ins, EA_PTRSIZE, val);
+ GetEmitter()->emitIns_I(ins, EA_PTRSIZE, val);
}
/*****************************************************************************
void CodeGen::inst_IV_handle(instruction ins, int val)
{
- getEmitter()->emitIns_I(ins, EA_HANDLE_CNS_RELOC, val);
+ GetEmitter()->emitIns_I(ins, EA_HANDLE_CNS_RELOC, val);
}
/*****************************************************************************
assert(tree && (tree->gtOper == GT_LCL_VAR || tree->gtOper == GT_LCL_VAR_ADDR || tree->gtOper == GT_STORE_LCL_VAR));
assert(tree->gtLclVarCommon.gtLclNum < compiler->lvaCount);
- getEmitter()->emitVarRefOffs = tree->gtLclVar.gtLclILoffs;
+ GetEmitter()->emitVarRefOffs = tree->gtLclVar.gtLclILoffs;
#endif // DEBUG
}
#ifdef _TARGET_ARM_
if (arm_Valid_Imm_For_Instr(ins, val, flags))
{
- getEmitter()->emitIns_R_I(ins, size, reg, val, flags);
+ GetEmitter()->emitIns_R_I(ins, size, reg, val, flags);
}
else if (ins == INS_mov)
{
assert(ins != INS_cmp);
assert(ins != INS_tst);
assert(ins != INS_mov);
- getEmitter()->emitIns_R_R_I(ins, size, reg, reg, val);
+ GetEmitter()->emitIns_R_R_I(ins, size, reg, reg, val);
#else // !_TARGET_ARM_
#ifdef _TARGET_AMD64_
// Instead of an 8-byte immediate load, a 4-byte immediate will do fine
if (size == EA_8BYTE && ins == INS_mov && ((val & 0xFFFFFFFF00000000LL) == 0))
{
size = EA_4BYTE;
- getEmitter()->emitIns_R_I(ins, size, reg, val);
+ GetEmitter()->emitIns_R_I(ins, size, reg, val);
}
else if (EA_SIZE(size) == EA_8BYTE && ins != INS_mov && (((int)val != val) || EA_IS_CNS_RELOC(size)))
{
else
#endif // _TARGET_AMD64_
{
- getEmitter()->emitIns_R_I(ins, size, reg, val);
+ GetEmitter()->emitIns_R_I(ins, size, reg, val);
}
#endif // !_TARGET_ARM_
}
if (shfv)
{
- getEmitter()->emitIns_S_I(ins, size, varNum, offs, shfv);
+ GetEmitter()->emitIns_S_I(ins, size, varNum, offs, shfv);
}
else
{
- getEmitter()->emitIns_S(ins, size, varNum, offs);
+ GetEmitter()->emitIns_S(ins, size, varNum, offs);
}
return;
if (shfv)
{
- getEmitter()->emitIns_C_I(ins, size, tree->gtClsVar.gtClsVarHnd, offs, shfv);
+ GetEmitter()->emitIns_C_I(ins, size, tree->gtClsVar.gtClsVarHnd, offs, shfv);
}
else
{
- getEmitter()->emitIns_C(ins, size, tree->gtClsVar.gtClsVarHnd, offs);
+ GetEmitter()->emitIns_C(ins, size, tree->gtClsVar.gtClsVarHnd, offs);
}
return;
assert(varNum < compiler->lvaCount);
#if CPU_LOAD_STORE_ARCH
- if (!getEmitter()->emitInsIsStore(ins))
+ if (!GetEmitter()->emitInsIsStore(ins))
{
// TODO-LdStArch-Bug: Should regTmp be a dst on the node or an internal reg?
// Either way, it is not currently being handled by Lowering.
regNumber regTmp = tree->gtRegNum;
assert(regTmp != REG_NA);
- getEmitter()->emitIns_R_S(ins_Load(tree->TypeGet()), size, regTmp, varNum, offs);
- getEmitter()->emitIns_R_R(ins, size, regTmp, reg, flags);
- getEmitter()->emitIns_S_R(ins_Store(tree->TypeGet()), size, regTmp, varNum, offs);
+ GetEmitter()->emitIns_R_S(ins_Load(tree->TypeGet()), size, regTmp, varNum, offs);
+ GetEmitter()->emitIns_R_R(ins, size, regTmp, reg, flags);
+ GetEmitter()->emitIns_S_R(ins_Store(tree->TypeGet()), size, regTmp, varNum, offs);
regSet.verifyRegUsed(regTmp);
}
{
// ins is a Store instruction
//
- getEmitter()->emitIns_S_R(ins, size, reg, varNum, offs);
+ GetEmitter()->emitIns_S_R(ins, size, reg, varNum, offs);
#ifdef _TARGET_ARM_
// If we need to set the flags then add an extra movs reg,reg instruction
if (flags == INS_FLAGS_SET)
- getEmitter()->emitIns_R_R(INS_mov, size, reg, reg, INS_FLAGS_SET);
+ GetEmitter()->emitIns_R_R(INS_mov, size, reg, reg, INS_FLAGS_SET);
#endif
}
return;
assert(!isFloatRegType(tree->gtType) || genTypeSize(tree->gtType) == EA_SIZE_IN_BYTES(size));
#if CPU_LOAD_STORE_ARCH
- if (!getEmitter()->emitInsIsStore(ins))
+ if (!GetEmitter()->emitInsIsStore(ins))
{
NYI("Store of GT_CLS_VAR not supported for ARM");
}
else
#endif // CPU_LOAD_STORE_ARCH
{
- getEmitter()->emitIns_C_R(ins, size, tree->gtClsVar.gtClsVarHnd, reg, offs);
+ GetEmitter()->emitIns_C_R(ins, size, tree->gtClsVar.gtClsVarHnd, reg, offs);
}
return;
case INS_ldrsb:
case INS_vldr:
assert(flags != INS_FLAGS_SET);
- getEmitter()->emitIns_R_S(ins, size, reg, varNum, offs);
+ GetEmitter()->emitIns_R_S(ins, size, reg, varNum, offs);
return;
default:
regNumber regTmp;
regTmp = tree->gtRegNum;
- getEmitter()->emitIns_R_S(ins_Load(tree->TypeGet()), size, regTmp, varNum, offs);
- getEmitter()->emitIns_R_R(ins, size, reg, regTmp, flags);
+ GetEmitter()->emitIns_R_S(ins_Load(tree->TypeGet()), size, regTmp, varNum, offs);
+ GetEmitter()->emitIns_R_R(ins, size, reg, regTmp, flags);
regSet.verifyRegUsed(regTmp);
return;
}
#else // !_TARGET_ARM_
- getEmitter()->emitIns_R_S(ins, size, reg, varNum, offs);
+ GetEmitter()->emitIns_R_S(ins, size, reg, varNum, offs);
return;
#endif // !_TARGET_ARM_
#if CPU_LOAD_STORE_ARCH
assert(!"GT_CLS_VAR not supported in ARM backend");
#else // CPU_LOAD_STORE_ARCH
- getEmitter()->emitIns_R_C(ins, size, reg, tree->gtClsVar.gtClsVarHnd, offs);
+ GetEmitter()->emitIns_R_C(ins, size, reg, tree->gtClsVar.gtClsVarHnd, offs);
#endif // CPU_LOAD_STORE_ARCH
return;
if (val >= 32)
val &= 0x1f;
- getEmitter()->emitIns_R_I(ins, size, reg, val, flags);
+ GetEmitter()->emitIns_R_I(ins, size, reg, val, flags);
#elif defined(_TARGET_XARCH_)
if (val == 1)
{
- getEmitter()->emitIns_R(ins, size, reg);
+ GetEmitter()->emitIns_R(ins, size, reg);
}
else
{
- getEmitter()->emitIns_R_I(ins, size, reg, val);
+ GetEmitter()->emitIns_R_I(ins, size, reg, val);
}
#else
ins == INS_cmpps || ins == INS_cmppd || ins == INS_dppd || ins == INS_dpps || ins == INS_insertps ||
ins == INS_roundps || ins == INS_roundss || ins == INS_roundpd || ins == INS_roundsd);
- getEmitter()->emitIns_R_R_I(ins, size, reg1, reg2, ival);
+ GetEmitter()->emitIns_R_R_I(ins, size, reg1, reg2, ival);
}
#ifdef FEATURE_HW_INTRINSICS
//
void CodeGen::inst_RV_TT_IV(instruction ins, emitAttr attr, regNumber reg1, GenTree* rmOp, int ival)
{
- noway_assert(getEmitter()->emitVerifyEncodable(ins, EA_SIZE(attr), reg1));
+ noway_assert(GetEmitter()->emitVerifyEncodable(ins, EA_SIZE(attr), reg1));
if (rmOp->isContained() || rmOp->isUsedFromSpillTemp())
{
case GT_CLS_VAR_ADDR:
{
- getEmitter()->emitIns_R_C_I(ins, attr, reg1, addr->gtClsVar.gtClsVarHnd, 0, ival);
+ GetEmitter()->emitIns_R_C_I(ins, attr, reg1, addr->gtClsVar.gtClsVarHnd, 0, ival);
return;
}
GenTreeIndir load = indirForm(rmOp->TypeGet(), addr);
memIndir = &load;
}
- getEmitter()->emitIns_R_A_I(ins, attr, reg1, memIndir, ival);
+ GetEmitter()->emitIns_R_A_I(ins, attr, reg1, memIndir, ival);
return;
}
}
assert((varNum != BAD_VAR_NUM) || (tmpDsc != nullptr));
assert(offset != (unsigned)-1);
- getEmitter()->emitIns_R_S_I(ins, attr, reg1, varNum, offset, ival);
+ GetEmitter()->emitIns_R_S_I(ins, attr, reg1, varNum, offset, ival);
}
else
{
regNumber rmOpReg = rmOp->gtRegNum;
- getEmitter()->emitIns_SIMD_R_R_I(ins, attr, reg1, rmOpReg, ival);
+ GetEmitter()->emitIns_SIMD_R_R_I(ins, attr, reg1, rmOpReg, ival);
}
}
#endif // FEATURE_HW_INTRINSICS
assert(size != EA_1BYTE || (genRegMask(reg2) & RBM_BYTE_REGS));
#endif
- getEmitter()->emitIns_R_R(ins, size, reg1, reg2);
+ GetEmitter()->emitIns_R_R(ins, size, reg1, reg2);
}
/*****************************************************************************
void CodeGen::inst_ST_RV(instruction ins, TempDsc* tmp, unsigned ofs, regNumber reg, var_types type)
{
- getEmitter()->emitIns_S_R(ins, emitActualTypeSize(type), reg, tmp->tdTempNum(), ofs);
+ GetEmitter()->emitIns_S_R(ins, emitActualTypeSize(type), reg, tmp->tdTempNum(), ofs);
}
void CodeGen::inst_ST_IV(instruction ins, TempDsc* tmp, unsigned ofs, int val, var_types type)
{
- getEmitter()->emitIns_S_I(ins, emitActualTypeSize(type), tmp->tdTempNum(), ofs, val);
+ GetEmitter()->emitIns_S_I(ins, emitActualTypeSize(type), tmp->tdTempNum(), ofs, val);
}
#if FEATURE_FIXED_OUT_ARGS
{
assert(ofs < compiler->lvaOutgoingArgSpaceSize);
- getEmitter()->emitIns_S_R(ins, emitActualTypeSize(type), reg, compiler->lvaOutgoingArgSpaceVar, ofs);
+ GetEmitter()->emitIns_S_R(ins, emitActualTypeSize(type), reg, compiler->lvaOutgoingArgSpaceVar, ofs);
}
void CodeGen::inst_SA_IV(instruction ins, unsigned ofs, int val, var_types type)
{
assert(ofs < compiler->lvaOutgoingArgSpaceSize);
- getEmitter()->emitIns_S_I(ins, emitActualTypeSize(type), compiler->lvaOutgoingArgSpaceVar, ofs, val);
+ GetEmitter()->emitIns_S_I(ins, emitActualTypeSize(type), compiler->lvaOutgoingArgSpaceVar, ofs, val);
}
#endif // FEATURE_FIXED_OUT_ARGS
case INS_ldrsb:
case INS_lea:
case INS_vldr:
- getEmitter()->emitIns_R_S(ins, size, reg, tmp->tdTempNum(), ofs);
+ GetEmitter()->emitIns_R_S(ins, size, reg, tmp->tdTempNum(), ofs);
break;
default:
break;
}
#else // !_TARGET_ARM_
- getEmitter()->emitIns_R_S(ins, size, reg, tmp->tdTempNum(), ofs);
+ GetEmitter()->emitIns_R_S(ins, size, reg, tmp->tdTempNum(), ofs);
#endif // !_TARGET_ARM_
}
#ifdef _TARGET_XARCH_
void CodeGen::inst_FS_ST(instruction ins, emitAttr size, TempDsc* tmp, unsigned ofs)
{
- getEmitter()->emitIns_S(ins, size, tmp->tdTempNum(), ofs);
+ GetEmitter()->emitIns_S(ins, size, tmp->tdTempNum(), ofs);
}
#endif
#ifdef _TARGET_ARM_
bool CodeGenInterface::validImmForInstr(instruction ins, target_ssize_t imm, insFlags flags)
{
- if (getEmitter()->emitInsIsLoadOrStore(ins) && !instIsFP(ins))
+ if (GetEmitter()->emitInsIsLoadOrStore(ins) && !instIsFP(ins))
{
return validDispForLdSt(imm, TYP_INT);
}
#if defined(_TARGET_XARCH_)
instGen(INS_lock);
- getEmitter()->emitIns_I_AR(INS_or, EA_4BYTE, 0, REG_SPBASE, 0);
+ GetEmitter()->emitIns_I_AR(INS_or, EA_4BYTE, 0, REG_SPBASE, 0);
#elif defined(_TARGET_ARM_)
- getEmitter()->emitIns_I(INS_dmb, EA_4BYTE, 0xf);
+ GetEmitter()->emitIns_I(INS_dmb, EA_4BYTE, 0xf);
#elif defined(_TARGET_ARM64_)
- getEmitter()->emitIns_BARR(INS_dmb, barrierType);
+ GetEmitter()->emitIns_BARR(INS_dmb, barrierType);
#else
#error "Unknown _TARGET_"
#endif
void CodeGen::instGen_Set_Reg_To_Zero(emitAttr size, regNumber reg, insFlags flags)
{
#if defined(_TARGET_XARCH_)
- getEmitter()->emitIns_R_R(INS_xor, size, reg, reg);
+ GetEmitter()->emitIns_R_R(INS_xor, size, reg, reg);
#elif defined(_TARGET_ARMARCH_)
- getEmitter()->emitIns_R_I(INS_mov, size, reg, 0 ARM_ARG(flags));
+ GetEmitter()->emitIns_R_I(INS_mov, size, reg, 0 ARM_ARG(flags));
#else
#error "Unknown _TARGET_"
#endif
void CodeGen::instGen_Compare_Reg_To_Zero(emitAttr size, regNumber reg)
{
#if defined(_TARGET_XARCH_)
- getEmitter()->emitIns_R_R(INS_test, size, reg, reg);
+ GetEmitter()->emitIns_R_R(INS_test, size, reg, reg);
#elif defined(_TARGET_ARMARCH_)
- getEmitter()->emitIns_R_I(INS_cmp, size, reg, 0);
+ GetEmitter()->emitIns_R_I(INS_cmp, size, reg, 0);
#else
#error "Unknown _TARGET_"
#endif
void CodeGen::instGen_Compare_Reg_To_Reg(emitAttr size, regNumber reg1, regNumber reg2)
{
#if defined(_TARGET_XARCH_) || defined(_TARGET_ARMARCH_)
- getEmitter()->emitIns_R_R(INS_cmp, size, reg1, reg2);
+ GetEmitter()->emitIns_R_R(INS_cmp, size, reg1, reg2);
#else
#error "Unknown _TARGET_"
#endif
else
#endif // _TARGET_AMD64_
{
- getEmitter()->emitIns_R_I(INS_cmp, size, reg, imm);
+ GetEmitter()->emitIns_R_I(INS_cmp, size, reg, imm);
}
#elif defined(_TARGET_ARM_)
if (arm_Valid_Imm_For_Alu(imm) || arm_Valid_Imm_For_Alu(-imm))
{
- getEmitter()->emitIns_R_I(INS_cmp, size, reg, imm);
+ GetEmitter()->emitIns_R_I(INS_cmp, size, reg, imm);
}
else // We need a scratch register
{
#elif defined(_TARGET_ARM64_)
if (true) // TODO-ARM64-NYI: arm_Valid_Imm_For_Alu(imm) || arm_Valid_Imm_For_Alu(-imm))
{
- getEmitter()->emitIns_R_I(INS_cmp, size, reg, imm);
+ GetEmitter()->emitIns_R_I(INS_cmp, size, reg, imm);
}
else // We need a scratch register
{
{
emitAttr size = emitTypeSize(srcType);
- getEmitter()->emitIns_R_S(ins_Load(srcType), size, dstReg, varNum, offs);
+ GetEmitter()->emitIns_R_S(ins_Load(srcType), size, dstReg, varNum, offs);
}
/*****************************************************************************
{
emitAttr size = emitTypeSize(dstType);
- getEmitter()->emitIns_S_R(ins_Store(dstType), size, srcReg, varNum, offs);
+ GetEmitter()->emitIns_S_R(ins_Store(dstType), size, srcReg, varNum, offs);
}
/*****************************************************************************/
UNATIVE_OFFSET Compiler::ehCodeOffset(BasicBlock* block)
{
- return genEmitter->emitCodeOffset(ehEmitCookie(block), 0);
+ return GetEmitter()->emitCodeOffset(ehEmitCookie(block), 0);
}
/****************************************************************************/
if (opts.dspDiffable)
{
/* (( brace matching editor workaround to compensate for the following line */
- printf("EH#%u: try [%s..%s) handled by [%s..%s) ", num, genEmitter->emitOffsetToLabel(clause.TryOffset),
- genEmitter->emitOffsetToLabel(clause.TryLength), genEmitter->emitOffsetToLabel(clause.HandlerOffset),
- genEmitter->emitOffsetToLabel(clause.HandlerLength));
+ printf("EH#%u: try [%s..%s) handled by [%s..%s) ", num, GetEmitter()->emitOffsetToLabel(clause.TryOffset),
+ GetEmitter()->emitOffsetToLabel(clause.TryLength), GetEmitter()->emitOffsetToLabel(clause.HandlerOffset),
+ GetEmitter()->emitOffsetToLabel(clause.HandlerLength));
}
else
{
if (opts.dspDiffable)
{
/* ( brace matching editor workaround to compensate for the following line */
- printf("filter at [%s..%s)", genEmitter->emitOffsetToLabel(clause.ClassToken),
- genEmitter->emitOffsetToLabel(clause.HandlerOffset));
+ printf("filter at [%s..%s)", GetEmitter()->emitOffsetToLabel(clause.ClassToken),
+ GetEmitter()->emitOffsetToLabel(clause.HandlerOffset));
}
else
{
}
CORINFO_FIELD_HANDLE hnd =
- comp->getEmitter()->emitAnyConst(constArgValues, sizeof(constArgValues), emitDataAlignment::Required);
+ comp->GetEmitter()->emitAnyConst(constArgValues, sizeof(constArgValues), emitDataAlignment::Required);
GenTree* clsVarAddr = new (comp, GT_CLS_VAR_ADDR) GenTreeClsVar(GT_CLS_VAR_ADDR, TYP_I_IMPL, hnd, nullptr);
BlockRange().InsertBefore(simdNode, clsVarAddr);
simdNode->ChangeOper(GT_IND);
{
if (compiler->canUseVexEncoding())
{
- compiler->getEmitter()->SetContainsAVX(true);
+ compiler->GetEmitter()->SetContainsAVX(true);
if (sizeOfSIMDVector == 32)
{
- compiler->getEmitter()->SetContains256bitAVX(true);
+ compiler->GetEmitter()->SetContains256bitAVX(true);
}
}
}
else
{
printRegMaskInt(_rsMaskVars);
- m_rsCompiler->getEmitter()->emitDispRegSet(_rsMaskVars);
+ m_rsCompiler->GetEmitter()->emitDispRegSet(_rsMaskVars);
printf(" => ");
}
printRegMaskInt(newMaskVars);
- m_rsCompiler->getEmitter()->emitDispRegSet(newMaskVars);
+ m_rsCompiler->GetEmitter()->emitDispRegSet(newMaskVars);
printf("\n");
}
#endif // DEBUG
printf("\n");
#endif
- m_rsCompiler->codeGen->getEmitter()->emitIns_S(INS_fstp, emitActualTypeSize(treeType), temp->tdTempNum(), 0);
+ m_rsCompiler->codeGen->GetEmitter()->emitIns_S(INS_fstp, emitActualTypeSize(treeType), temp->tdTempNum(), 0);
/* Mark the tree node as having been spilled */
siScope* newScope = compiler->getAllocator(CMK_SiScope).allocate<siScope>(1);
- newScope->scStartLoc.CaptureLocation(getEmitter());
+ newScope->scStartLoc.CaptureLocation(GetEmitter());
assert(newScope->scStartLoc.Valid());
newScope->scEndLoc.Init();
return;
}
- scope->scEndLoc.CaptureLocation(getEmitter());
+ scope->scEndLoc.CaptureLocation(GetEmitter());
assert(scope->scEndLoc.Valid());
siRemoveFromOpenScopeList(scope);
void CodeGen::siEndScope(siScope* scope)
{
- scope->scEndLoc.CaptureLocation(getEmitter());
+ scope->scEndLoc.CaptureLocation(GetEmitter());
assert(scope->scEndLoc.Valid());
siRemoveFromOpenScopeList(scope);
{
psiScope* newScope = compiler->getAllocator(CMK_SiScope).allocate<psiScope>(1);
- newScope->scStartLoc.CaptureLocation(getEmitter());
+ newScope->scStartLoc.CaptureLocation(GetEmitter());
assert(newScope->scStartLoc.Valid());
newScope->scEndLoc.Init();
void CodeGen::psiEndPrologScope(psiScope* scope)
{
- scope->scEndLoc.CaptureLocation(getEmitter());
+ scope->scEndLoc.CaptureLocation(GetEmitter());
assert(scope->scEndLoc.Valid());
// Remove from open-scope list
if (srcReg != targetReg)
{
instruction ins = ins_Store(baseType);
- if (getEmitter()->IsDstSrcSrcAVXInstruction(ins))
+ if (GetEmitter()->IsDstSrcSrcAVXInstruction(ins))
{
// In general, when we use a three-operands move instruction, we want to merge the src with
// itself. This is an exception in that we actually want the "merge" behavior, so we must
// There is no guarantee that upper bits of op1Reg are zero.
// We achieve this by using left logical shift 12-bytes and right logical shift 12 bytes.
instruction ins = getOpForSIMDIntrinsic(SIMDIntrinsicShiftLeftInternal, TYP_SIMD16);
- getEmitter()->emitIns_R_I(ins, EA_16BYTE, srcReg, 12);
+ GetEmitter()->emitIns_R_I(ins, EA_16BYTE, srcReg, 12);
ins = getOpForSIMDIntrinsic(SIMDIntrinsicShiftRightInternal, TYP_SIMD16);
- getEmitter()->emitIns_R_I(ins, EA_16BYTE, srcReg, 12);
+ GetEmitter()->emitIns_R_I(ins, EA_16BYTE, srcReg, 12);
}
else
{
if (srcReg != targetReg)
{
instruction ins = ins_Copy(baseType);
- assert(!getEmitter()->IsDstSrcSrcAVXInstruction(ins));
+ assert(!GetEmitter()->IsDstSrcSrcAVXInstruction(ins));
inst_RV_RV(ins, targetReg, srcReg, baseType, emitTypeSize(baseType));
}
break;
inst_RV_RV(ins, tmpReg, op1hiReg, TYP_INT, emitTypeSize(TYP_INT));
ins = getOpForSIMDIntrinsic(SIMDIntrinsicShiftLeftInternal, TYP_SIMD16);
- getEmitter()->emitIns_R_I(ins, EA_16BYTE, tmpReg, 4); // shift left by 4 bytes
+ GetEmitter()->emitIns_R_I(ins, EA_16BYTE, tmpReg, 4); // shift left by 4 bytes
ins = getOpForSIMDIntrinsic(SIMDIntrinsicBitwiseOr, baseType);
inst_RV_RV(ins, targetReg, tmpReg, targetType, emitActualTypeSize(targetType));
else
{
ins = getOpForSIMDIntrinsic(SIMDIntrinsicShuffleSSE2, baseType);
- getEmitter()->emitIns_R_R_I(ins, emitActualTypeSize(targetType), targetReg, targetReg, 0);
+ GetEmitter()->emitIns_R_R_I(ins, emitActualTypeSize(targetType), targetReg, targetReg, 0);
}
}
}
ins = getOpForSIMDIntrinsic(SIMDIntrinsicInit, baseType);
if (op1->IsCnsFltOrDbl())
{
- getEmitter()->emitInsBinary(ins, emitTypeSize(targetType), simdNode, op1);
+ GetEmitter()->emitInsBinary(ins, emitTypeSize(targetType), simdNode, op1);
}
else if (op1->OperIsLocalAddr())
{
unsigned offset = (op1->OperGet() == GT_LCL_FLD_ADDR) ? op1->gtLclFld.gtLclOffs : 0;
- getEmitter()->emitIns_R_S(ins, emitTypeSize(targetType), targetReg, op1->gtLclVarCommon.gtLclNum,
+ GetEmitter()->emitIns_R_S(ins, emitTypeSize(targetType), targetReg, op1->gtLclVarCommon.gtLclNum,
offset);
}
else
}
ins = getOpForSIMDIntrinsic(simdNode->gtSIMDIntrinsicID, baseType);
- getEmitter()->emitIns_R_R(ins, emitActualTypeSize(targetType), targetReg, srcReg);
+ GetEmitter()->emitIns_R_R(ins, emitActualTypeSize(targetType), targetReg, srcReg);
}
else
{
ins = getOpForSIMDIntrinsic(SIMDIntrinsicShuffleSSE2, baseType);
assert((shuffleControl >= 0) && (shuffleControl <= 255));
- getEmitter()->emitIns_R_R_I(ins, emitActualTypeSize(targetType), targetReg, targetReg, (int8_t)shuffleControl);
+ GetEmitter()->emitIns_R_R_I(ins, emitActualTypeSize(targetType), targetReg, targetReg, (int8_t)shuffleControl);
}
genProduceReg(simdNode);
if (offset != 0)
{
assert((baseTypeSize >= 0) && (baseTypeSize <= 255));
- getEmitter()->emitIns_R_I(insLeftShift, EA_16BYTE, vectorReg, (int8_t)baseTypeSize);
+ GetEmitter()->emitIns_R_I(insLeftShift, EA_16BYTE, vectorReg, (int8_t)baseTypeSize);
}
genSIMDScalarMove(targetType, baseType, vectorReg, operandReg, SMT_PreserveUpper);
}
// prepare upper 16 bits
- getEmitter()->emitIns_R_I(INS_psrld, emitActualTypeSize(targetType), targetReg, 16);
+ GetEmitter()->emitIns_R_I(INS_psrld, emitActualTypeSize(targetType), targetReg, 16);
// prepare lower 16 bits
- getEmitter()->emitIns_R_I(INS_pslld, emitActualTypeSize(targetType), tmpReg2, 16);
- getEmitter()->emitIns_R_I(INS_psrld, emitActualTypeSize(targetType), tmpReg2, 16);
+ GetEmitter()->emitIns_R_I(INS_pslld, emitActualTypeSize(targetType), tmpReg2, 16);
+ GetEmitter()->emitIns_R_I(INS_psrld, emitActualTypeSize(targetType), tmpReg2, 16);
// prepare mask
#ifdef _TARGET_AMD64_
- getEmitter()->emitIns_R_I(INS_mov, EA_8BYTE, tmpIntReg, (ssize_t)0X5300000053000000);
+ GetEmitter()->emitIns_R_I(INS_mov, EA_8BYTE, tmpIntReg, (ssize_t)0X5300000053000000);
inst_RV_RV(INS_mov_i2xmm, tmpReg, tmpIntReg, TYP_ULONG);
#else
if (compiler->getSIMDSupportLevel() == SIMD_AVX2_Supported)
{
- getEmitter()->emitIns_R_I(INS_mov, EA_4BYTE, tmpIntReg, (ssize_t)0X53000000);
+ GetEmitter()->emitIns_R_I(INS_mov, EA_4BYTE, tmpIntReg, (ssize_t)0X53000000);
inst_RV_RV(INS_mov_i2xmm, tmpReg, tmpIntReg, TYP_UINT);
}
else
{
- getEmitter()->emitIns_R_I(INS_mov, EA_4BYTE, tmpIntReg, (ssize_t)0X00005300);
+ GetEmitter()->emitIns_R_I(INS_mov, EA_4BYTE, tmpIntReg, (ssize_t)0X00005300);
inst_RV_RV(INS_pxor, tmpReg, tmpReg, targetType, emitActualTypeSize(targetType));
- getEmitter()->emitIns_R_R_I(INS_pinsrw, emitTypeSize(TYP_INT), tmpReg, tmpIntReg, 1);
- getEmitter()->emitIns_R_R_I(INS_pinsrw, emitTypeSize(TYP_INT), tmpReg, tmpIntReg, 3);
+ GetEmitter()->emitIns_R_R_I(INS_pinsrw, emitTypeSize(TYP_INT), tmpReg, tmpIntReg, 1);
+ GetEmitter()->emitIns_R_R_I(INS_pinsrw, emitTypeSize(TYP_INT), tmpReg, tmpIntReg, 3);
}
#endif
if (compiler->getSIMDSupportLevel() == SIMD_AVX2_Supported)
}
// prepare upper 32 bits
- getEmitter()->emitIns_R_I(INS_psrlq, emitActualTypeSize(simdType), targetReg, 32);
+ GetEmitter()->emitIns_R_I(INS_psrlq, emitActualTypeSize(simdType), targetReg, 32);
// prepare lower 32 bits
- getEmitter()->emitIns_R_I(INS_psllq, emitActualTypeSize(simdType), tmpReg2, 32);
- getEmitter()->emitIns_R_I(INS_psrlq, emitActualTypeSize(simdType), tmpReg2, 32);
+ GetEmitter()->emitIns_R_I(INS_psllq, emitActualTypeSize(simdType), tmpReg2, 32);
+ GetEmitter()->emitIns_R_I(INS_psrlq, emitActualTypeSize(simdType), tmpReg2, 32);
// prepare mask for converting upper 32 bits
#ifdef _TARGET_AMD64_
- getEmitter()->emitIns_R_I(INS_mov, EA_8BYTE, tmpIntReg, (ssize_t)0X4530000000000000);
+ GetEmitter()->emitIns_R_I(INS_mov, EA_8BYTE, tmpIntReg, (ssize_t)0X4530000000000000);
inst_RV_RV(INS_mov_i2xmm, tmpReg, tmpIntReg, TYP_ULONG);
#else
- getEmitter()->emitIns_R_I(INS_mov, EA_4BYTE, tmpIntReg, (ssize_t)0X45300000);
+ GetEmitter()->emitIns_R_I(INS_mov, EA_4BYTE, tmpIntReg, (ssize_t)0X45300000);
inst_RV_RV(INS_mov_i2xmm, tmpReg, tmpIntReg, TYP_UINT);
- getEmitter()->emitIns_R_I(INS_pslldq, EA_16BYTE, tmpReg, 4);
+ GetEmitter()->emitIns_R_I(INS_pslldq, EA_16BYTE, tmpReg, 4);
#endif
if (level == SIMD_AVX2_Supported)
{
// prepare mask for converting lower 32 bits
#ifdef _TARGET_AMD64_
- getEmitter()->emitIns_R_I(INS_mov, EA_8BYTE, tmpIntReg, (ssize_t)0X4330000000000000);
+ GetEmitter()->emitIns_R_I(INS_mov, EA_8BYTE, tmpIntReg, (ssize_t)0X4330000000000000);
inst_RV_RV(INS_mov_i2xmm, tmpReg, tmpIntReg, TYP_ULONG);
#else
- getEmitter()->emitIns_R_I(INS_mov, EA_4BYTE, tmpIntReg, (ssize_t)0X43300000);
+ GetEmitter()->emitIns_R_I(INS_mov, EA_4BYTE, tmpIntReg, (ssize_t)0X43300000);
inst_RV_RV(INS_mov_i2xmm, tmpReg, tmpIntReg, TYP_UINT);
- getEmitter()->emitIns_R_I(INS_pslldq, EA_16BYTE, tmpReg, 4);
+ GetEmitter()->emitIns_R_I(INS_pslldq, EA_16BYTE, tmpReg, 4);
#endif
if (level == SIMD_AVX2_Supported)
{
if (level == SIMD_AVX2_Supported)
{
// Extract the high 16-bits
- getEmitter()->emitIns_R_R_I(INS_vextracti128, EA_32BYTE, tmpReg, op1Reg, 0x01);
+ GetEmitter()->emitIns_R_R_I(INS_vextracti128, EA_32BYTE, tmpReg, op1Reg, 0x01);
// Put v[3] (the high-order element) in tmpReg2 and convert it.
inst_RV_RV(ins_Copy(simdType), tmpReg2, tmpReg, simdType, emitActualTypeSize(simdType));
- getEmitter()->emitIns_R_I(rightShiftIns, emitActualTypeSize(simdType), tmpReg2, 8);
+ GetEmitter()->emitIns_R_I(rightShiftIns, emitActualTypeSize(simdType), tmpReg2, 8);
genSIMDLo64BitConvert(intrinsicID, simdType, baseType, tmpReg2, tmpIntReg, tmpReg2);
// Shift the resulting 64-bits left.
- getEmitter()->emitIns_R_I(leftShiftIns, emitActualTypeSize(simdType), tmpReg2, 8);
+ GetEmitter()->emitIns_R_I(leftShiftIns, emitActualTypeSize(simdType), tmpReg2, 8);
// Convert v[2], in the lo bits of tmpReg.
// For the convert to double, the convert preserves the upper bits in tmpReg2.
// Put v[1] in tmpReg.
inst_RV_RV(ins_Copy(simdType), tmpReg, op1Reg, simdType, emitActualTypeSize(simdType));
- getEmitter()->emitIns_R_I(rightShiftIns, emitActualTypeSize(simdType), tmpReg, 8);
+ GetEmitter()->emitIns_R_I(rightShiftIns, emitActualTypeSize(simdType), tmpReg, 8);
// At this point we have v[1] in the low-order 64-bits of tmpReg. Convert it.
genSIMDLo64BitConvert(intrinsicID, simdType, baseType, tmpReg, tmpIntReg, tmpReg);
// Shift the resulting 64-bits left.
- getEmitter()->emitIns_R_I(leftShiftIns, emitActualTypeSize(simdType), tmpReg, 8);
+ GetEmitter()->emitIns_R_I(leftShiftIns, emitActualTypeSize(simdType), tmpReg, 8);
// Convert the lo 64-bits into targetReg
genSIMDLo64BitConvert(intrinsicID, simdType, baseType, op1Reg, tmpIntReg, tmpReg);
if (level == SIMD_AVX2_Supported)
{
- getEmitter()->emitIns_R_R_I(INS_vinsertf128, EA_32BYTE, targetReg, tmpReg2, 0x01);
+ GetEmitter()->emitIns_R_R_I(INS_vinsertf128, EA_32BYTE, targetReg, tmpReg2, 0x01);
}
#else
// get the sign bit and put it in tmpReg3
inst_RV_RV(INS_movdqu, tmpReg3, op1Reg, baseType, emitActualTypeSize(simdType));
- getEmitter()->emitIns_R_I(INS_psrlq, emitActualTypeSize(simdType), tmpReg3, 63);
- getEmitter()->emitIns_R_I(INS_psllq, emitActualTypeSize(simdType), tmpReg3, 63);
+ GetEmitter()->emitIns_R_I(INS_psrlq, emitActualTypeSize(simdType), tmpReg3, 63);
+ GetEmitter()->emitIns_R_I(INS_psllq, emitActualTypeSize(simdType), tmpReg3, 63);
// get the absolute value of src and put it into tmpReg2 and targetReg
inst_RV_RV(INS_movdqu, tmpReg2, op1Reg, baseType, emitActualTypeSize(simdType));
- getEmitter()->emitIns_R_R_I(INS_pshufd, emitActualTypeSize(simdType), tmpReg, op1Reg, (int8_t)SHUFFLE_WWYY);
- getEmitter()->emitIns_R_I(INS_psrad, emitActualTypeSize(simdType), tmpReg, 32);
+ GetEmitter()->emitIns_R_R_I(INS_pshufd, emitActualTypeSize(simdType), tmpReg, op1Reg, (int8_t)SHUFFLE_WWYY);
+ GetEmitter()->emitIns_R_I(INS_psrad, emitActualTypeSize(simdType), tmpReg, 32);
inst_RV_RV(INS_pxor, tmpReg2, tmpReg, baseType, emitActualTypeSize(simdType));
inst_RV_RV(INS_psubq, tmpReg2, tmpReg, baseType, emitActualTypeSize(simdType));
inst_RV_RV(INS_movdqu, targetReg, tmpReg2, baseType, emitActualTypeSize(simdType));
// prepare upper 32 bits
- getEmitter()->emitIns_R_I(INS_psrlq, emitActualTypeSize(simdType), targetReg, 32);
+ GetEmitter()->emitIns_R_I(INS_psrlq, emitActualTypeSize(simdType), targetReg, 32);
// prepare lower 32 bits
- getEmitter()->emitIns_R_I(INS_psllq, emitActualTypeSize(simdType), tmpReg2, 32);
- getEmitter()->emitIns_R_I(INS_psrlq, emitActualTypeSize(simdType), tmpReg2, 32);
+ GetEmitter()->emitIns_R_I(INS_psllq, emitActualTypeSize(simdType), tmpReg2, 32);
+ GetEmitter()->emitIns_R_I(INS_psrlq, emitActualTypeSize(simdType), tmpReg2, 32);
// prepare mask for converting upper 32 bits
- getEmitter()->emitIns_R_I(INS_mov, EA_4BYTE, tmpIntReg, (ssize_t)0X45300000);
+ GetEmitter()->emitIns_R_I(INS_mov, EA_4BYTE, tmpIntReg, (ssize_t)0X45300000);
inst_RV_RV(INS_mov_i2xmm, tmpReg, tmpIntReg, TYP_UINT);
- getEmitter()->emitIns_R_I(INS_pslldq, EA_16BYTE, tmpReg, 4);
+ GetEmitter()->emitIns_R_I(INS_pslldq, EA_16BYTE, tmpReg, 4);
if (level == SIMD_AVX2_Supported)
{
inst_RV_RV(INS_subpd, targetReg, tmpReg, simdType, emitActualTypeSize(simdType));
// prepare mask for converting lower 32 bits
- getEmitter()->emitIns_R_I(INS_mov, EA_4BYTE, tmpIntReg, (ssize_t)0X43300000);
+ GetEmitter()->emitIns_R_I(INS_mov, EA_4BYTE, tmpIntReg, (ssize_t)0X43300000);
inst_RV_RV(INS_mov_i2xmm, tmpReg, tmpIntReg, TYP_UINT);
- getEmitter()->emitIns_R_I(INS_pslldq, EA_16BYTE, tmpReg, 4);
+ GetEmitter()->emitIns_R_I(INS_pslldq, EA_16BYTE, tmpReg, 4);
if (level == SIMD_AVX2_Supported)
{
if (level == SIMD_AVX2_Supported)
{
// Extract the high 16-bits
- getEmitter()->emitIns_R_R_I(INS_vextractf128, EA_32BYTE, tmpReg, op1Reg, 0x01);
+ GetEmitter()->emitIns_R_R_I(INS_vextractf128, EA_32BYTE, tmpReg, op1Reg, 0x01);
// Put v[3] (the high-order element) in tmpReg2 and convert it.
inst_RV_RV(ins_Copy(simdType), tmpReg2, tmpReg, simdType, emitActualTypeSize(simdType));
- getEmitter()->emitIns_R_I(rightShiftIns, emitActualTypeSize(simdType), tmpReg2, 8);
+ GetEmitter()->emitIns_R_I(rightShiftIns, emitActualTypeSize(simdType), tmpReg2, 8);
genSIMDLo64BitConvert(intrinsicID, simdType, baseType, tmpReg2, tmpIntReg, tmpReg2);
// Shift the resulting 64-bits left.
- getEmitter()->emitIns_R_I(leftShiftIns, emitActualTypeSize(simdType), tmpReg2, 8);
+ GetEmitter()->emitIns_R_I(leftShiftIns, emitActualTypeSize(simdType), tmpReg2, 8);
// Convert v[2], in the lo bits of tmpReg.
// For the convert to double, the convert preserves the upper bits in tmpReg2.
// Put v[1] in tmpReg.
inst_RV_RV(ins_Copy(simdType), tmpReg, op1Reg, simdType, emitActualTypeSize(simdType));
- getEmitter()->emitIns_R_I(rightShiftIns, emitActualTypeSize(simdType), tmpReg, 8);
+ GetEmitter()->emitIns_R_I(rightShiftIns, emitActualTypeSize(simdType), tmpReg, 8);
// At this point we have v[1] in the low-order 64-bits of tmpReg. Convert it.
genSIMDLo64BitConvert(intrinsicID, simdType, baseType, tmpReg, tmpIntReg, tmpReg);
// Shift the resulting 64-bits left.
- getEmitter()->emitIns_R_I(leftShiftIns, emitActualTypeSize(simdType), tmpReg, 8);
+ GetEmitter()->emitIns_R_I(leftShiftIns, emitActualTypeSize(simdType), tmpReg, 8);
// Convert the lo 64-bits into targetReg
genSIMDLo64BitConvert(intrinsicID, simdType, baseType, op1Reg, tmpIntReg, targetReg);
inst_RV_RV(INS_por, targetReg, tmpReg, simdType, emitActualTypeSize(simdType));
if (level == SIMD_AVX2_Supported)
{
- getEmitter()->emitIns_R_R_I(INS_vinserti128, EA_32BYTE, targetReg, tmpReg2, 0x01);
+ GetEmitter()->emitIns_R_R_I(INS_vinserti128, EA_32BYTE, targetReg, tmpReg2, 0x01);
}
}
genProduceReg(simdNode);
if (compiler->getSIMDSupportLevel() == SIMD_AVX2_Supported)
{
instruction extractIns = varTypeIsFloating(simdNode->gtSIMDBaseType) ? INS_vextractf128 : INS_vextracti128;
- getEmitter()->emitIns_R_R_I(extractIns, EA_32BYTE, tgtReg, srcReg, 0x01);
+ GetEmitter()->emitIns_R_R_I(extractIns, EA_32BYTE, tgtReg, srcReg, 0x01);
}
else
{
{
inst_RV_RV(ins_Copy(simdType), tgtReg, srcReg, simdType, emitSize);
}
- getEmitter()->emitIns_R_I(shiftIns, emitSize, tgtReg, 8);
+ GetEmitter()->emitIns_R_I(shiftIns, emitSize, tgtReg, 8);
}
}
ival = 0xe8;
}
assert((ival >= 0) && (ival <= 255));
- getEmitter()->emitIns_R_R_I(INS_vpermq, emitSize, targetReg, op1Reg, (int8_t)ival);
+ GetEmitter()->emitIns_R_R_I(INS_vpermq, emitSize, targetReg, op1Reg, (int8_t)ival);
}
else if (targetReg != op1Reg)
{
// Now insert the high-order result (in tmpReg) into the upper half of targetReg.
if (level == SIMD_AVX2_Supported)
{
- getEmitter()->emitIns_R_R_I(INS_vinsertf128, EA_32BYTE, targetReg, tmpReg, 0x01);
+ GetEmitter()->emitIns_R_R_I(INS_vinsertf128, EA_32BYTE, targetReg, tmpReg, 0x01);
}
else
{
// punpcklqdq tgtReg, tmpReg
regNumber tmpReg = simdNode->ExtractTempReg(RBM_ALLFLOAT);
regNumber tmpReg2 = simdNode->GetSingleTempReg(RBM_ALLFLOAT);
- getEmitter()->emitIns_R_R_I(INS_vextracti128, EA_32BYTE, tmpReg, op1Reg, 0x01);
- getEmitter()->emitIns_R_R_I(INS_vextracti128, EA_32BYTE, tmpReg2, op2Reg, 0x01);
- getEmitter()->emitIns_R_R_I(INS_vinserti128, EA_32BYTE, tmpReg, tmpReg2, 0x01);
+ GetEmitter()->emitIns_R_R_I(INS_vextracti128, EA_32BYTE, tmpReg, op1Reg, 0x01);
+ GetEmitter()->emitIns_R_R_I(INS_vextracti128, EA_32BYTE, tmpReg2, op2Reg, 0x01);
+ GetEmitter()->emitIns_R_R_I(INS_vinserti128, EA_32BYTE, tmpReg, tmpReg2, 0x01);
inst_RV_RV(ins_Copy(simdType), tmpReg2, op1Reg, simdType, emitSize);
- getEmitter()->emitIns_R_R_I(INS_vinserti128, EA_32BYTE, tmpReg2, op2Reg, 0x01);
- getEmitter()->emitIns_R_R_I(INS_pshufd, emitSize, tmpReg, tmpReg, (int8_t)SHUFFLE_XXZX);
- getEmitter()->emitIns_R_R_I(INS_pshufd, emitSize, targetReg, tmpReg2, (int8_t)SHUFFLE_XXZX);
+ GetEmitter()->emitIns_R_R_I(INS_vinserti128, EA_32BYTE, tmpReg2, op2Reg, 0x01);
+ GetEmitter()->emitIns_R_R_I(INS_pshufd, emitSize, tmpReg, tmpReg, (int8_t)SHUFFLE_XXZX);
+ GetEmitter()->emitIns_R_R_I(INS_pshufd, emitSize, targetReg, tmpReg2, (int8_t)SHUFFLE_XXZX);
inst_RV_RV_RV(INS_punpcklqdq, targetReg, targetReg, tmpReg, emitSize);
}
else
instruction shiftRightIns = getOpForSIMDIntrinsic(SIMDIntrinsicShiftRightInternal, TYP_SIMD16);
emitAttr emitSize = emitTypeSize(simdType);
- getEmitter()->emitIns_R_R_I(INS_pshufd, emitSize, targetReg, op1Reg, (int8_t)SHUFFLE_ZXXX);
- getEmitter()->emitIns_R_I(shiftRightIns, emitSize, targetReg, 8);
- getEmitter()->emitIns_R_R_I(INS_pshufd, emitSize, tmpReg, op2Reg, (int8_t)SHUFFLE_XXZX);
- getEmitter()->emitIns_R_I(shiftLeftIns, emitSize, tmpReg, 8);
+ GetEmitter()->emitIns_R_R_I(INS_pshufd, emitSize, targetReg, op1Reg, (int8_t)SHUFFLE_ZXXX);
+ GetEmitter()->emitIns_R_I(shiftRightIns, emitSize, targetReg, 8);
+ GetEmitter()->emitIns_R_R_I(INS_pshufd, emitSize, tmpReg, op2Reg, (int8_t)SHUFFLE_XXZX);
+ GetEmitter()->emitIns_R_I(shiftLeftIns, emitSize, tmpReg, 8);
inst_RV_RV(INS_por, targetReg, tmpReg, simdType);
}
}
// The AVX instructions generally operate on "lanes", so we have to permute the
// inputs so that the destination register has the low 128-bit halves of the two
// inputs, and 'tmpReg' has the high 128-bit halves of the two inputs.
- getEmitter()->emitIns_R_R_R_I(INS_vperm2i128, emitSize, tmpReg2, op1Reg, op2Reg, 0x20);
- getEmitter()->emitIns_R_R_R_I(INS_vperm2i128, emitSize, tmpReg, op1Reg, op2Reg, 0x31);
- getEmitter()->emitIns_R_I(shiftLeftIns, emitSize, tmpReg2, shiftCount);
- getEmitter()->emitIns_R_I(shiftRightIns, emitSize, tmpReg2, shiftCount);
- getEmitter()->emitIns_R_I(shiftLeftIns, emitSize, tmpReg, shiftCount);
- getEmitter()->emitIns_R_I(shiftRightIns, emitSize, tmpReg, shiftCount);
+ GetEmitter()->emitIns_R_R_R_I(INS_vperm2i128, emitSize, tmpReg2, op1Reg, op2Reg, 0x20);
+ GetEmitter()->emitIns_R_R_R_I(INS_vperm2i128, emitSize, tmpReg, op1Reg, op2Reg, 0x31);
+ GetEmitter()->emitIns_R_I(shiftLeftIns, emitSize, tmpReg2, shiftCount);
+ GetEmitter()->emitIns_R_I(shiftRightIns, emitSize, tmpReg2, shiftCount);
+ GetEmitter()->emitIns_R_I(shiftLeftIns, emitSize, tmpReg, shiftCount);
+ GetEmitter()->emitIns_R_I(shiftRightIns, emitSize, tmpReg, shiftCount);
inst_RV_RV_RV(ins, targetReg, tmpReg2, tmpReg, emitActualTypeSize(simdType));
}
else
tmpShiftRight = INS_psrad;
}
- getEmitter()->emitIns_R_I(shiftLeftIns, emitSize, targetReg, shiftCount);
- getEmitter()->emitIns_R_I(tmpShiftRight, emitSize, targetReg, shiftCount);
- getEmitter()->emitIns_R_I(shiftLeftIns, emitSize, tmpReg, shiftCount);
- getEmitter()->emitIns_R_I(tmpShiftRight, emitSize, tmpReg, shiftCount);
+ GetEmitter()->emitIns_R_I(shiftLeftIns, emitSize, targetReg, shiftCount);
+ GetEmitter()->emitIns_R_I(tmpShiftRight, emitSize, targetReg, shiftCount);
+ GetEmitter()->emitIns_R_I(shiftLeftIns, emitSize, tmpReg, shiftCount);
+ GetEmitter()->emitIns_R_I(tmpShiftRight, emitSize, tmpReg, shiftCount);
inst_RV_RV(ins, targetReg, tmpReg, simdType);
}
}
// Now we can generate the code.
// targetReg = op1 >> 4-bytes (op1 is already in targetReg)
- getEmitter()->emitIns_R_I(INS_psrldq, emitActualTypeSize(targetType), targetReg, 4);
+ GetEmitter()->emitIns_R_I(INS_psrldq, emitActualTypeSize(targetType), targetReg, 4);
// tmpReg = op2 >> 4-bytes (op2 is already in tmpReg)
- getEmitter()->emitIns_R_I(INS_psrldq, emitActualTypeSize(targetType), tmpReg, 4);
+ GetEmitter()->emitIns_R_I(INS_psrldq, emitActualTypeSize(targetType), tmpReg, 4);
// tmp = unsigned double word multiply of targetReg and tmpReg. Essentially
// tmpReg[63:0] = op1[1] * op2[1]
// Extract first and third double word results from tmpReg
// tmpReg = shuffle(0,0,2,0) of tmpReg
- getEmitter()->emitIns_R_R_I(INS_pshufd, emitActualTypeSize(targetType), tmpReg, tmpReg, (int8_t)SHUFFLE_XXZX);
+ GetEmitter()->emitIns_R_R_I(INS_pshufd, emitActualTypeSize(targetType), tmpReg, tmpReg, (int8_t)SHUFFLE_XXZX);
// targetReg[63:0] = op1[0] * op2[0]
// targetReg[127:64] = op1[2] * op2[2]
// Extract first and third double word results from targetReg
// targetReg = shuffle(0,0,2,0) of targetReg
- getEmitter()->emitIns_R_R_I(INS_pshufd, emitActualTypeSize(targetType), targetReg, targetReg,
+ GetEmitter()->emitIns_R_R_I(INS_pshufd, emitActualTypeSize(targetType), targetReg, targetReg,
(int8_t)SHUFFLE_XXZX);
// pack the results into a single vector
// Currently AVX doesn't support integer.
// if the ins is INS_cvtsi2ss or INS_cvtsi2sd, we won't use AVX.
if (op1Reg != targetReg && compiler->getSIMDSupportLevel() == SIMD_AVX2_Supported &&
- !(ins == INS_cvtsi2ss || ins == INS_cvtsi2sd) && getEmitter()->IsThreeOperandAVXInstruction(ins))
+ !(ins == INS_cvtsi2ss || ins == INS_cvtsi2sd) && GetEmitter()->IsThreeOperandAVXInstruction(ins))
{
inst_RV_RV_RV(ins, targetReg, op1Reg, op2Reg, emitActualTypeSize(targetType));
}
unsigned shiftCount = 16 - simdNode->gtSIMDSize;
assert((shiftCount > 0) && (shiftCount <= 16));
instruction ins = getOpForSIMDIntrinsic(SIMDIntrinsicShiftLeftInternal, TYP_SIMD16);
- getEmitter()->emitIns_R_I(ins, EA_16BYTE, targetReg, shiftCount);
+ GetEmitter()->emitIns_R_I(ins, EA_16BYTE, targetReg, shiftCount);
ins = getOpForSIMDIntrinsic(SIMDIntrinsicShiftRightInternal, TYP_SIMD16);
- getEmitter()->emitIns_R_I(ins, EA_16BYTE, targetReg, shiftCount);
+ GetEmitter()->emitIns_R_I(ins, EA_16BYTE, targetReg, shiftCount);
}
genProduceReg(simdNode);
if (varTypeIsFloating(baseType))
{
assert((ival >= 0) && (ival <= 255));
- getEmitter()->emitIns_R_R_I(ins, emitActualTypeSize(targetType), targetReg, otherReg, (int8_t)ival);
+ GetEmitter()->emitIns_R_R_I(ins, emitActualTypeSize(targetType), targetReg, otherReg, (int8_t)ival);
}
else
{
}
assert((ival >= 0) && (ival <= 255));
- getEmitter()->emitIns_R_R_I(ins, emitActualTypeSize(targetType), targetReg, op2Reg, (int8_t)ival);
+ GetEmitter()->emitIns_R_R_I(ins, emitActualTypeSize(targetType), targetReg, op2Reg, (int8_t)ival);
}
break;
if (varTypeIsFloating(baseType))
{
assert((ival >= 0) && (ival <= 255));
- getEmitter()->emitIns_R_R_I(ins, emitActualTypeSize(simdType), tmpReg1, otherReg, (int8_t)ival);
+ GetEmitter()->emitIns_R_R_I(ins, emitActualTypeSize(simdType), tmpReg1, otherReg, (int8_t)ival);
}
else
{
if ((simdNode->gtFlags & GTF_SIMD12_OP) != 0)
{
mask = 0x00000FFF;
- getEmitter()->emitIns_R_I(INS_and, EA_4BYTE, intReg, mask);
+ GetEmitter()->emitIns_R_I(INS_and, EA_4BYTE, intReg, mask);
}
else if (emitActualTypeSize(simdType) == 32)
{
{
mask = 0x0000FFFF;
}
- getEmitter()->emitIns_R_I(INS_cmp, EA_4BYTE, intReg, mask);
+ GetEmitter()->emitIns_R_I(INS_cmp, EA_4BYTE, intReg, mask);
}
}
break;
// If this is TYP_SIMD32, we need to combine the lower & upper results.
if (simdEvalType == TYP_SIMD32)
{
- getEmitter()->emitIns_R_R_I(INS_vextractf128, EA_32BYTE, tmpReg1, targetReg, 0x01);
+ GetEmitter()->emitIns_R_R_I(INS_vextractf128, EA_32BYTE, tmpReg1, targetReg, 0x01);
inst_RV_RV(INS_addps, targetReg, tmpReg1, targetType, emitTypeSize(targetType));
}
}
// targetReg = targetReg + tmpReg1
inst_RV_RV(INS_mulpd, targetReg, op2Reg, simdEvalType, emitActualTypeSize(simdType));
inst_RV_RV(INS_haddpd, targetReg, targetReg, simdEvalType, emitActualTypeSize(simdType));
- getEmitter()->emitIns_R_R_I(INS_vextractf128, EA_32BYTE, tmpReg1, targetReg, 0x01);
+ GetEmitter()->emitIns_R_R_I(INS_vextractf128, EA_32BYTE, tmpReg1, targetReg, 0x01);
inst_RV_RV(INS_addpd, targetReg, tmpReg1, targetType, emitTypeSize(targetType));
}
else
if (simdEvalType == TYP_SIMD32)
{
// tmpReg2[127..0] = Upper 128-bits of tmpReg1
- getEmitter()->emitIns_R_R_I(INS_vextractf128, EA_32BYTE, tmpReg2, tmpReg1, 0x01);
+ GetEmitter()->emitIns_R_R_I(INS_vextractf128, EA_32BYTE, tmpReg2, tmpReg1, 0x01);
// tmpReg1[127..0] = tmpReg1[127..0] + tmpReg2[127..0]
// This will compute
}
// Now, load the desired element.
- getEmitter()->emitIns_R_ARX(ins_Move_Extend(baseType, false), // Load
+ GetEmitter()->emitIns_R_ARX(ins_Move_Extend(baseType, false), // Load
emitTypeSize(baseType), // Of the vector baseType
targetReg, // To targetReg
baseReg, // Base Reg
regNumber indexReg = op2->gtRegNum;
// Store the vector to the temp location.
- getEmitter()->emitIns_S_R(ins_Store(simdType, compiler->isSIMDTypeLocalAligned(simdInitTempVarNum)),
+ GetEmitter()->emitIns_S_R(ins_Store(simdType, compiler->isSIMDTypeLocalAligned(simdInitTempVarNum)),
emitTypeSize(simdType), srcReg, simdInitTempVarNum, 0);
// Now, load the desired element.
- getEmitter()->emitIns_R_ARX(ins_Move_Extend(baseType, false), // Load
+ GetEmitter()->emitIns_R_ARX(ins_Move_Extend(baseType, false), // Load
emitTypeSize(baseType), // Of the vector baseType
targetReg, // To targetReg
(isEBPbased) ? REG_EBP : REG_ESP, // Stack-based
assert(tmpReg != REG_NA);
newSrcReg = tmpReg;
}
- getEmitter()->emitIns_R_R_I(INS_vextractf128, EA_32BYTE, newSrcReg, srcReg, 0x01);
+ GetEmitter()->emitIns_R_R_I(INS_vextractf128, EA_32BYTE, newSrcReg, srcReg, 0x01);
srcReg = newSrcReg;
}
{
instruction ins = getOpForSIMDIntrinsic(SIMDIntrinsicShiftRightInternal, TYP_SIMD16);
assert((byteShiftCnt > 0) && (byteShiftCnt < 32));
- getEmitter()->emitIns_R_I(ins, emitActualTypeSize(simdType), targetReg, byteShiftCnt);
+ GetEmitter()->emitIns_R_I(ins, emitActualTypeSize(simdType), targetReg, byteShiftCnt);
}
}
else
}
assert((index >= 0) && (index <= 8));
- getEmitter()->emitIns_R_R_I(INS_pextrw, emitTypeSize(TYP_INT), targetReg, srcReg, index);
+ GetEmitter()->emitIns_R_R_I(INS_pextrw, emitTypeSize(TYP_INT), targetReg, srcReg, index);
bool ZeroOrSignExtnReqd = true;
if (baseSize == 1)
assert((byteShiftCnt > 0) && (byteShiftCnt <= 32));
ins = getOpForSIMDIntrinsic(SIMDIntrinsicShiftRightInternal, TYP_SIMD16);
- getEmitter()->emitIns_R_I(ins, emitActualTypeSize(simdType), tmpReg, byteShiftCnt);
+ GetEmitter()->emitIns_R_I(ins, emitActualTypeSize(simdType), tmpReg, byteShiftCnt);
}
else
{
// First insert the lower 16-bits of tmpReg in targetReg at 2*index position
// since every float has two 16-bit words.
- getEmitter()->emitIns_R_R_I(INS_pinsrw, emitTypeSize(TYP_INT), targetReg, tmpReg, 2 * index);
+ GetEmitter()->emitIns_R_R_I(INS_pinsrw, emitTypeSize(TYP_INT), targetReg, tmpReg, 2 * index);
// Logical right shift tmpReg by 16-bits and insert in targetReg at 2*index + 1 position
inst_RV_SH(INS_SHIFT_RIGHT_LOGICAL, EA_4BYTE, tmpReg, 16);
- getEmitter()->emitIns_R_R_I(INS_pinsrw, emitTypeSize(TYP_INT), targetReg, tmpReg, 2 * index + 1);
+ GetEmitter()->emitIns_R_R_I(INS_pinsrw, emitTypeSize(TYP_INT), targetReg, tmpReg, 2 * index + 1);
}
else
{
instruction ins = getOpForSIMDIntrinsic(simdNode->gtSIMDIntrinsicID, baseType);
assert((shuffleControl >= 0) && (shuffleControl <= 255));
- getEmitter()->emitIns_R_R_I(ins, emitTypeSize(baseType), targetReg, targetReg, (int8_t)shuffleControl);
+ GetEmitter()->emitIns_R_R_I(ins, emitTypeSize(baseType), targetReg, targetReg, (int8_t)shuffleControl);
genProduceReg(simdNode);
}
genConsumeOperands(treeNode->AsOp());
// 8-byte write
- getEmitter()->emitIns_AR_R(ins_Store(TYP_DOUBLE), EA_8BYTE, data->gtRegNum, addr->gtRegNum, 0);
+ GetEmitter()->emitIns_AR_R(ins_Store(TYP_DOUBLE), EA_8BYTE, data->gtRegNum, addr->gtRegNum, 0);
// Extract upper 4-bytes from data
- getEmitter()->emitIns_R_R_I(INS_pshufd, emitActualTypeSize(TYP_SIMD16), tmpReg, data->gtRegNum, 0x02);
+ GetEmitter()->emitIns_R_R_I(INS_pshufd, emitActualTypeSize(TYP_SIMD16), tmpReg, data->gtRegNum, 0x02);
// 4-byte write
- getEmitter()->emitIns_AR_R(ins_Store(TYP_FLOAT), EA_4BYTE, tmpReg, addr->gtRegNum, 8);
+ GetEmitter()->emitIns_AR_R(ins_Store(TYP_FLOAT), EA_4BYTE, tmpReg, addr->gtRegNum, 8);
}
//-----------------------------------------------------------------------------
assert(tmpReg != targetReg);
// Load upper 4 bytes in tmpReg
- getEmitter()->emitIns_R_AR(ins_Load(TYP_FLOAT), EA_4BYTE, tmpReg, operandReg, 8);
+ GetEmitter()->emitIns_R_AR(ins_Load(TYP_FLOAT), EA_4BYTE, tmpReg, operandReg, 8);
// Load lower 8 bytes in targetReg
- getEmitter()->emitIns_R_AR(ins_Load(TYP_DOUBLE), EA_8BYTE, targetReg, operandReg, 0);
+ GetEmitter()->emitIns_R_AR(ins_Load(TYP_DOUBLE), EA_8BYTE, targetReg, operandReg, 0);
// combine upper 4 bytes and lower 8 bytes in targetReg
- getEmitter()->emitIns_R_R_I(INS_shufps, emitActualTypeSize(TYP_SIMD16), targetReg, tmpReg, (int8_t)SHUFFLE_YXYX);
+ GetEmitter()->emitIns_R_R_I(INS_shufps, emitActualTypeSize(TYP_SIMD16), targetReg, tmpReg, (int8_t)SHUFFLE_YXYX);
genProduceReg(treeNode);
}
regNumber tmpReg = treeNode->GetSingleTempReg();
// store lower 8 bytes
- getEmitter()->emitIns_S_R(ins_Store(TYP_DOUBLE), EA_8BYTE, operandReg, varNum, offs);
+ GetEmitter()->emitIns_S_R(ins_Store(TYP_DOUBLE), EA_8BYTE, operandReg, varNum, offs);
// Extract upper 4-bytes from operandReg
- getEmitter()->emitIns_R_R_I(INS_pshufd, emitActualTypeSize(TYP_SIMD16), tmpReg, operandReg, 0x02);
+ GetEmitter()->emitIns_R_R_I(INS_pshufd, emitActualTypeSize(TYP_SIMD16), tmpReg, operandReg, 0x02);
// Store upper 4 bytes
- getEmitter()->emitIns_S_R(ins_Store(TYP_FLOAT), EA_4BYTE, tmpReg, varNum, offs + 8);
+ GetEmitter()->emitIns_S_R(ins_Store(TYP_FLOAT), EA_4BYTE, tmpReg, varNum, offs + 8);
}
//-----------------------------------------------------------------------------
assert(tmpReg != targetReg);
// Read upper 4 bytes to tmpReg
- getEmitter()->emitIns_R_S(ins_Move_Extend(TYP_FLOAT, false), EA_4BYTE, tmpReg, varNum, offs + 8);
+ GetEmitter()->emitIns_R_S(ins_Move_Extend(TYP_FLOAT, false), EA_4BYTE, tmpReg, varNum, offs + 8);
// Read lower 8 bytes to targetReg
- getEmitter()->emitIns_R_S(ins_Move_Extend(TYP_DOUBLE, false), EA_8BYTE, targetReg, varNum, offs);
+ GetEmitter()->emitIns_R_S(ins_Move_Extend(TYP_DOUBLE, false), EA_8BYTE, targetReg, varNum, offs);
// combine upper 4 bytes and lower 8 bytes in targetReg
- getEmitter()->emitIns_R_R_I(INS_shufps, emitActualTypeSize(TYP_SIMD16), targetReg, tmpReg, (int8_t)SHUFFLE_YXYX);
+ GetEmitter()->emitIns_R_R_I(INS_shufps, emitActualTypeSize(TYP_SIMD16), targetReg, tmpReg, (int8_t)SHUFFLE_YXYX);
genProduceReg(treeNode);
}
assert(genIsValidFloatReg(tmpReg));
// 8-byte write
- getEmitter()->emitIns_AR_R(ins_Store(TYP_DOUBLE), EA_8BYTE, operandReg, REG_SPBASE, 0);
+ GetEmitter()->emitIns_AR_R(ins_Store(TYP_DOUBLE), EA_8BYTE, operandReg, REG_SPBASE, 0);
// Extract upper 4-bytes from data
- getEmitter()->emitIns_R_R_I(INS_pshufd, emitActualTypeSize(TYP_SIMD16), tmpReg, operandReg, 0x02);
+ GetEmitter()->emitIns_R_R_I(INS_pshufd, emitActualTypeSize(TYP_SIMD16), tmpReg, operandReg, 0x02);
// 4-byte write
- getEmitter()->emitIns_AR_R(ins_Store(TYP_FLOAT), EA_4BYTE, tmpReg, REG_SPBASE, 8);
+ GetEmitter()->emitIns_AR_R(ins_Store(TYP_FLOAT), EA_4BYTE, tmpReg, REG_SPBASE, 8);
}
//-----------------------------------------------------------------------------
assert(op1Reg != REG_NA);
if (targetReg != REG_NA)
{
- getEmitter()->emitIns_R_R_I(INS_vextractf128, EA_32BYTE, targetReg, op1Reg, 0x01);
+ GetEmitter()->emitIns_R_R_I(INS_vextractf128, EA_32BYTE, targetReg, op1Reg, 0x01);
genProduceReg(simdNode);
}
else
// We want to store this to the upper 16 bytes of this localVar's home.
int offs = 16;
- getEmitter()->emitIns_S_R_I(INS_vextractf128, EA_32BYTE, varNum, offs, op1Reg, 0x01);
+ GetEmitter()->emitIns_S_R_I(INS_vextractf128, EA_32BYTE, varNum, offs, op1Reg, 0x01);
}
}
assert(lclVarReg != REG_NA);
if (srcReg != REG_NA)
{
- getEmitter()->emitIns_R_R_R_I(INS_vinsertf128, EA_32BYTE, lclVarReg, lclVarReg, srcReg, 0x01);
+ GetEmitter()->emitIns_R_R_R_I(INS_vinsertf128, EA_32BYTE, lclVarReg, lclVarReg, srcReg, 0x01);
}
else
{
assert(varDsc->lvOnFrame);
// We will load this from the upper 16 bytes of this localVar's home.
int offs = 16;
- getEmitter()->emitIns_R_R_S_I(INS_vinsertf128, EA_32BYTE, lclVarReg, lclVarReg, varNum, offs, 0x01);
+ GetEmitter()->emitIns_R_R_S_I(INS_vinsertf128, EA_32BYTE, lclVarReg, lclVarReg, varNum, offs, 0x01);
}
}
}
else
{
- startOffset = func->startLoc->CodeOffset(genEmitter);
+ startOffset = func->startLoc->CodeOffset(GetEmitter());
}
if (func->endLoc == nullptr)
}
else
{
- endOffset = func->endLoc->CodeOffset(genEmitter);
+ endOffset = func->endLoc->CodeOffset(GetEmitter());
}
DWORD size = (DWORD)func->cfiCodes->size();
}
else
{
- startOffset = func->coldStartLoc->CodeOffset(genEmitter);
+ startOffset = func->coldStartLoc->CodeOffset(GetEmitter());
}
if (func->coldEndLoc == nullptr)
}
else
{
- endOffset = func->coldEndLoc->CodeOffset(genEmitter);
+ endOffset = func->coldEndLoc->CodeOffset(GetEmitter());
}
#ifdef DEBUG
UNATIVE_OFFSET offset;
if (func->funKind == FUNC_ROOT)
{
- offset = genEmitter->emitGetPrologOffsetEstimate();
+ offset = GetEmitter()->emitGetPrologOffsetEstimate();
}
else
{
#if defined(_TARGET_AMD64_) || (defined(_TARGET_UNIX_) && (defined(_TARGET_ARMARCH_) || defined(_TARGET_X86_)))
assert(func->startLoc != nullptr);
- offset = func->startLoc->GetFuncletPrologOffset(genEmitter);
+ offset = func->startLoc->GetFuncletPrologOffset(GetEmitter());
#else
offset = 0; // TODO ???
#endif
}
else
{
- startOffset = func->startLoc->CodeOffset(genEmitter);
+ startOffset = func->startLoc->CodeOffset(GetEmitter());
}
if (func->endLoc == nullptr)
}
else
{
- endOffset = func->endLoc->CodeOffset(genEmitter);
+ endOffset = func->endLoc->CodeOffset(GetEmitter());
}
#ifdef UNIX_AMD64_ABI
}
else
{
- startOffset = func->coldStartLoc->CodeOffset(genEmitter);
+ startOffset = func->coldStartLoc->CodeOffset(GetEmitter());
}
if (func->coldEndLoc == nullptr)
}
else
{
- endOffset = func->coldEndLoc->CodeOffset(genEmitter);
+ endOffset = func->coldEndLoc->CodeOffset(GetEmitter());
}
}
#endif // _TARGET_UNIX_
UnwindInfo* pu = &funCurrentFunc()->uwi;
- genEmitter->emitUnwindNopPadding(pu->GetCurrentEmitterLocation(), this);
+ GetEmitter()->emitUnwindNopPadding(pu->GetCurrentEmitterLocation(), this);
}
// Ask the VM to reserve space for the unwind information for the function and
{
noway_assert(epiEmitLocation == NULL); // This function is only called once per epilog
epiEmitLocation = new (uwiComp, CMK_UnwindInfo) emitLocation();
- epiEmitLocation->CaptureLocation(uwiComp->genEmitter);
+ epiEmitLocation->CaptureLocation(uwiComp->GetEmitter());
}
void UnwindEpilogInfo::FinalizeOffset()
{
- epiStartOffset = epiEmitLocation->CodeOffset(uwiComp->genEmitter);
+ epiStartOffset = epiEmitLocation->CodeOffset(uwiComp->GetEmitter());
}
#ifdef DEBUG
}
else
{
- ufiStartOffset = ufiEmitLoc->CodeOffset(uwiComp->genEmitter);
+ ufiStartOffset = ufiEmitLoc->CodeOffset(uwiComp->GetEmitter());
}
for (UnwindEpilogInfo* pEpi = ufiEpilogList; pEpi != NULL; pEpi = pEpi->epiNext)
UnwindEpilogInfo* pEpiPrev;
UnwindEpilogInfo* pEpi;
- UNATIVE_OFFSET splitOffset = emitLoc->CodeOffset(uwiComp->genEmitter);
+ UNATIVE_OFFSET splitOffset = emitLoc->CodeOffset(uwiComp->GetEmitter());
for (pEpiPrev = NULL, pEpi = pSplitFrom->ufiEpilogList; pEpi != NULL; pEpiPrev = pEpi, pEpi = pEpi->epiNext)
{
bool UnwindFragmentInfo::IsAtFragmentEnd(UnwindEpilogInfo* pEpi)
{
- return uwiComp->genEmitter->emitIsFuncEnd(pEpi->epiEmitLocation, (ufiNext == NULL) ? NULL : ufiNext->ufiEmitLoc);
+ return uwiComp->GetEmitter()->emitIsFuncEnd(pEpi->epiEmitLocation, (ufiNext == NULL) ? NULL : ufiNext->ufiEmitLoc);
}
// Merge the unwind codes as much as possible.
}
else
{
- startOffset = uwiFragmentLast->ufiEmitLoc->CodeOffset(uwiComp->genEmitter);
+ startOffset = uwiFragmentLast->ufiEmitLoc->CodeOffset(uwiComp->GetEmitter());
}
if (uwiEndLoc == NULL)
}
else
{
- endOffset = uwiEndLoc->CodeOffset(uwiComp->genEmitter);
+ endOffset = uwiEndLoc->CodeOffset(uwiComp->GetEmitter());
}
assert(endOffset > startOffset); // there better be at least 1 byte of code
#endif // DEBUG
// Call the emitter to do the split, and call us back for every split point it chooses.
- uwiComp->genEmitter->emitSplit(uwiFragmentLast->ufiEmitLoc, uwiEndLoc, maxFragmentSize, (void*)this,
+ uwiComp->GetEmitter()->emitSplit(uwiFragmentLast->ufiEmitLoc, uwiEndLoc, maxFragmentSize, (void*)this,
EmitSplitCallback);
#ifdef DEBUG
}
else
{
- endOffset = uwiEndLoc->CodeOffset(uwiComp->genEmitter);
+ endOffset = uwiEndLoc->CodeOffset(uwiComp->GetEmitter());
}
for (pFrag = &uwiFragmentFirst; pFrag != NULL; pFrag = pFrag->ufiNext)
unsigned UnwindInfo::GetInstructionSize()
{
assert(uwiInitialized == UWI_INITIALIZED_PATTERN);
- return uwiComp->genEmitter->emitGetInstructionSize(uwiCurLoc);
+ return uwiComp->GetEmitter()->emitGetInstructionSize(uwiCurLoc);
}
#endif // defined(_TARGET_ARM_)
{
assert(uwiInitialized == UWI_INITIALIZED_PATTERN);
assert(uwiCurLoc != NULL);
- uwiCurLoc->CaptureLocation(uwiComp->genEmitter);
+ uwiCurLoc->CaptureLocation(uwiComp->GetEmitter());
}
void UnwindInfo::AddFragment(emitLocation* emitLoc)
}
else
{
- startOffset = startLoc->CodeOffset(genEmitter);
+ startOffset = startLoc->CodeOffset(GetEmitter());
}
if (endLoc == nullptr)
}
else
{
- endOffset = endLoc->CodeOffset(genEmitter);
+ endOffset = endLoc->CodeOffset(GetEmitter());
}
}
else
}
else
{
- startOffset = coldStartLoc->CodeOffset(genEmitter);
+ startOffset = coldStartLoc->CodeOffset(GetEmitter());
}
if (coldEndLoc == nullptr)
}
else
{
- endOffset = coldEndLoc->CodeOffset(genEmitter);
+ endOffset = coldEndLoc->CodeOffset(GetEmitter());
}
}