case GT_STORE_LCL_FLD:
{
- NYI_IF(targetType == TYP_STRUCT, "GT_STORE_LCL_FLD: struct store local field not supported");
+ noway_assert(targetType != TYP_STRUCT);
+
+ // record the offset
+ unsigned offset = treeNode->gtLclFld.gtLclOffs;
+
+ // We must have a stack store with GT_STORE_LCL_FLD
noway_assert(!treeNode->InReg());
+ noway_assert(targetReg == REG_NA);
- GenTreePtr op1 = treeNode->gtOp.gtOp1->gtEffectiveVal();
- genConsumeIfReg(op1);
- emit->emitInsBinary(ins_Store(targetType), emitTypeSize(treeNode), treeNode, op1);
+ GenTreeLclVarCommon* varNode = treeNode->AsLclVarCommon();
+ unsigned varNum = varNode->gtLclNum;
+ assert(varNum < compiler->lvaCount);
+ LclVarDsc* varDsc = &(compiler->lvaTable[varNum]);
+
+ // Ensure that lclVar nodes are typed correctly.
+ assert(!varDsc->lvNormalizeOnStore() || targetType == genActualType(varDsc->TypeGet()));
+
+ GenTreePtr data = treeNode->gtOp.gtOp1->gtEffectiveVal();
+ instruction ins = ins_Store(targetType);
+ emitAttr attr = emitTypeSize(targetType);
+ if (data->isContainedIntOrIImmed())
+ {
+ assert(data->IsIntegralConst(0));
+ NYI_ARM("st.lclFld contained operand");
+ }
+ else
+ {
+ assert(!data->isContained());
+ genConsumeReg(data);
+ emit->emitIns_S_R(ins, attr, data->gtRegNum, varNum, offset);
+ }
+
+ genUpdateLife(varNode);
+ varDsc->lvRegNum = REG_STK;
}
break;
case GT_STORE_LCL_VAR:
{
- NYI_IF(targetType == TYP_STRUCT, "struct store local not supported");
+ GenTreeLclVarCommon* varNode = treeNode->AsLclVarCommon();
- GenTreePtr op1 = treeNode->gtOp.gtOp1->gtEffectiveVal();
- genConsumeIfReg(op1);
- if (treeNode->gtRegNum == REG_NA)
- {
- // stack store
- emit->emitInsMov(ins_Store(targetType), emitTypeSize(treeNode), treeNode);
- compiler->lvaTable[treeNode->AsLclVarCommon()->gtLclNum].lvRegNum = REG_STK;
- }
- else if (op1->isContained())
+ unsigned varNum = varNode->gtLclNum;
+ assert(varNum < compiler->lvaCount);
+ LclVarDsc* varDsc = &(compiler->lvaTable[varNum]);
+ unsigned offset = 0;
+
+ // Ensure that lclVar nodes are typed correctly.
+ assert(!varDsc->lvNormalizeOnStore() || targetType == genActualType(varDsc->TypeGet()));
+
+ GenTreePtr data = treeNode->gtOp.gtOp1->gtEffectiveVal();
+
+ // var = call, where call returns a multi-reg return value
+ // case is handled separately.
+ if (data->gtSkipReloadOrCopy()->IsMultiRegCall())
{
- // Currently, we assume that the contained source of a GT_STORE_LCL_VAR writing to a register
- // must be a constant. However, in the future we might want to support a contained memory op.
- // This is a bit tricky because we have to decide it's contained before register allocation,
- // and this would be a case where, once that's done, we need to mark that node as always
- // requiring a register - which we always assume now anyway, but once we "optimize" that
- // we'll have to take cases like this into account.
- assert((op1->gtRegNum == REG_NA) && op1->OperIsConst());
- genSetRegToConst(treeNode->gtRegNum, targetType, op1);
+ NYI_ARM("st.lclVar multi-reg value");
}
- else if (op1->gtRegNum != treeNode->gtRegNum)
+ else
{
- assert(op1->gtRegNum != REG_NA);
- emit->emitInsBinary(ins_Move_Extend(targetType, true), emitTypeSize(treeNode), treeNode, op1);
+ genConsumeRegs(data);
+
+ regNumber dataReg = REG_NA;
+ if (data->isContainedIntOrIImmed())
+ {
+ assert(data->IsIntegralConst(0));
+ NYI_ARM("st.lclVar contained operand");
+ }
+ else
+ {
+ assert(!data->isContained());
+ dataReg = data->gtRegNum;
+ }
+ assert(dataReg != REG_NA);
+
+ if (targetReg == REG_NA) // store into stack based LclVar
+ {
+ inst_set_SV_var(varNode);
+
+ instruction ins = ins_Store(targetType);
+ emitAttr attr = emitTypeSize(targetType);
+
+ emit->emitIns_S_R(ins, attr, dataReg, varNum, offset);
+
+ genUpdateLife(varNode);
+
+ varDsc->lvRegNum = REG_STK;
+ }
+ else // store into register (i.e move into register)
+ {
+ if (dataReg != targetReg)
+ {
+ // Assign into targetReg when dataReg (from op1) is not the same register
+ inst_RV_RV(ins_Copy(targetType), targetReg, dataReg, targetType);
+ }
+ genProduceReg(treeNode);
+ }
}
- if (treeNode->gtRegNum != REG_NA)
- genProduceReg(treeNode);
}
break;
case GT_IND:
genConsumeAddress(treeNode->AsIndir()->Addr());
- emit->emitInsMov(ins_Load(treeNode->TypeGet()), emitTypeSize(treeNode), treeNode);
+ emit->emitInsLoadStoreOp(ins_Load(targetType), emitTypeSize(treeNode), targetReg, treeNode->AsIndir());
genProduceReg(treeNode);
break;
genConsumeAddress(addr);
}
- emit->emitInsMov(ins_Store(data->TypeGet()), emitTypeSize(storeInd), storeInd);
+ emit->emitInsLoadStoreOp(ins_Store(targetType), emitTypeSize(storeInd), data->gtRegNum,
+ treeNode->AsIndir());
}
}
break;
#ifndef LEGACY_BACKEND
-// this is very similar to emitInsBinary and probably could be folded in to same
-// except the requirements on the incoming parameter are different,
-// ex: the memory op in storeind case must NOT be contained
-void emitter::emitInsMov(instruction ins, emitAttr attr, GenTree* node)
+void emitter::emitInsLoadStoreOp(instruction ins, emitAttr attr, regNumber dataReg, GenTreeIndir* indir)
{
- switch (node->OperGet())
+ GenTree* addr = indir->Addr();
+ GenTree* data = indir->gtOp.gtOp2;
+
+ if (addr->isContained())
{
- case GT_IND:
- case GT_STOREIND:
- {
- GenTreeIndir* indir = node->AsIndir();
- GenTree* addr = indir->Addr();
- GenTree* data = indir->gtOp.gtOp2;
+ assert(addr->OperGet() == GT_LCL_VAR_ADDR || addr->OperGet() == GT_LEA);
- regNumber reg = (node->OperGet() == GT_IND) ? node->gtRegNum : data->gtRegNum;
+ int offset = 0;
+ DWORD lsl = 0;
- if (addr->isContained())
+ if (addr->OperGet() == GT_LEA)
+ {
+ offset = (int)addr->AsAddrMode()->gtOffset;
+ if (addr->AsAddrMode()->gtScale > 0)
{
- assert(addr->OperGet() == GT_LCL_VAR_ADDR || addr->OperGet() == GT_LEA);
+ assert(isPow2(addr->AsAddrMode()->gtScale));
+ BitScanForward(&lsl, addr->AsAddrMode()->gtScale);
+ }
+ }
- int offset = 0;
- DWORD lsl = 0;
+ GenTree* memBase = indir->Base();
+
+ if (indir->HasIndex())
+ {
+ GenTree* index = indir->Index();
+
+ if (offset != 0)
+ {
+ regMaskTP tmpRegMask = indir->gtRsvdRegs;
+ regNumber tmpReg = genRegNumFromMask(tmpRegMask);
+ noway_assert(tmpReg != REG_NA);
- if (addr->OperGet() == GT_LEA)
+ if (emitIns_valid_imm_for_add(offset, INS_FLAGS_DONT_CARE))
{
- offset = (int)addr->AsAddrMode()->gtOffset;
- if (addr->AsAddrMode()->gtScale > 0)
+ if (lsl > 0)
{
- assert(isPow2(addr->AsAddrMode()->gtScale));
- BitScanForward(&lsl, addr->AsAddrMode()->gtScale);
+ // Generate code to set tmpReg = base + index*scale
+ emitIns_R_R_R_I(INS_add, EA_PTRSIZE, tmpReg, memBase->gtRegNum, index->gtRegNum, lsl,
+ INS_FLAGS_DONT_CARE, INS_OPTS_LSL);
+ }
+ else // no scale
+ {
+ // Generate code to set tmpReg = base + index
+ emitIns_R_R_R(INS_add, EA_PTRSIZE, tmpReg, memBase->gtRegNum, index->gtRegNum);
}
- }
- GenTree* memBase = indir->Base();
+ noway_assert(emitInsIsLoad(ins) || (tmpReg != dataReg));
- if (indir->HasIndex())
- {
- NYI_ARM("emitInsMov HasIndex");
+ // Then load/store dataReg from/to [tmpReg + offset]
+ emitIns_R_R_I(ins, attr, dataReg, tmpReg, offset);
}
- else
+ else // large offset
{
- // TODO check offset is valid for encoding
- emitIns_R_R_I(ins, attr, reg, memBase->gtRegNum, offset);
+ // First load/store tmpReg with the large offset constant
+ codeGen->instGen_Set_Reg_To_Imm(EA_PTRSIZE, tmpReg, offset);
+ // Then add the base register
+ // rd = rd + base
+ emitIns_R_R_R(INS_add, EA_PTRSIZE, tmpReg, tmpReg, memBase->gtRegNum);
+
+ noway_assert(emitInsIsLoad(ins) || (tmpReg != dataReg));
+ noway_assert(tmpReg != index->gtRegNum);
+
+ // Then load/store dataReg from/to [tmpReg + index*scale]
+ emitIns_R_R_R_I(ins, attr, dataReg, tmpReg, index->gtRegNum, lsl, INS_FLAGS_DONT_CARE,
+ INS_OPTS_LSL);
}
}
- else
+ else // (offset == 0)
{
- if (addr->OperGet() == GT_CLS_VAR_ADDR)
+ if (lsl > 0)
{
- emitIns_C_R(ins, attr, addr->gtClsVar.gtClsVarHnd, data->gtRegNum, 0);
+ // Then load/store dataReg from/to [memBase + index*scale]
+ emitIns_R_R_R_I(ins, attr, dataReg, memBase->gtRegNum, index->gtRegNum, lsl, INS_FLAGS_DONT_CARE,
+ INS_OPTS_LSL);
}
- else
+ else // no scale
{
- emitIns_R_R(ins, attr, reg, addr->gtRegNum);
+ // Then load/store dataReg from/to [memBase + index]
+ emitIns_R_R_R(ins, attr, dataReg, memBase->gtRegNum, index->gtRegNum);
}
}
}
- break;
-
- case GT_STORE_LCL_VAR:
+ else
{
- GenTreeLclVarCommon* varNode = node->AsLclVarCommon();
-
- GenTree* data = node->gtOp.gtOp1->gtEffectiveVal();
- codeGen->inst_set_SV_var(varNode);
- assert(varNode->gtRegNum == REG_NA); // stack store
-
- if (data->isContainedIntOrIImmed())
- {
- emitIns_S_I(ins, attr, varNode->GetLclNum(), 0, (int)data->AsIntConCommon()->IconValue());
- codeGen->genUpdateLife(varNode);
- }
- else
- {
- assert(!data->isContained());
- emitIns_S_R(ins, attr, data->gtRegNum, varNode->GetLclNum(), 0);
- codeGen->genUpdateLife(varNode);
- }
+ // TODO check offset is valid for encoding
+ emitIns_R_R_I(ins, attr, dataReg, memBase->gtRegNum, offset);
}
- return;
-
- default:
- unreached();
+ }
+ else
+ {
+ emitIns_R_R(ins, attr, dataReg, addr->gtRegNum);
}
}
bool emitInsIsStore(instruction ins);
bool emitInsIsLoadOrStore(instruction ins);
+// Generate code for a load or store operation and handle the case
+// of contained GT_LEA op1 with [base + index<<scale + offset]
+void emitInsLoadStoreOp(instruction ins, emitAttr attr, regNumber dataReg, GenTreeIndir* indir);
+
/*****************************************************************************
*
* Convert between an index scale in bytes to a smaller encoding used for
if (index != nullptr && !modifiedSources)
{
info->srcCount++;
+ info->internalIntCount++;
}
}