src = src->AsUnOp()->gtGetOp1();
}
+ unsigned size = node->GetLayout()->GetSize();
+
+ // An SSE mov that accesses data larger than 8 bytes may be implemented using
+ // multiple memory accesses. Hence, the JIT must not use such stores when
+ // INITBLK zeroes a struct that contains GC pointers and can be observed by
+ // other threads (i.e. when dstAddr is not an address of a local).
+ // For example, this can happen when initializing a struct field of an object.
+ const bool canUse16BytesSimdMov = !node->IsOnHeapAndContainsReferences();
+
+#ifdef TARGET_AMD64
+ // On Amd64 the JIT will not use SIMD stores for such structs and instead
+ // will always allocate a GP register for src node.
+ const bool willUseSimdMov = canUse16BytesSimdMov && (size >= XMM_REGSIZE_BYTES);
+#else
+ // On X86 the JIT will use movq for structs that are larger than 16 bytes
+ // since it is more beneficial than using two mov-s from a GP register.
+ const bool willUseSimdMov = (size >= 16);
+#endif
+
if (!src->isContained())
{
srcIntReg = genConsumeReg(src);
}
else
{
- // If src is contained then it must be 0 and the size must be a multiple
- // of XMM_REGSIZE_BYTES so initialization can use only SSE2 instructions.
+ // If src is contained then it must be 0.
assert(src->IsIntegralConst(0));
- assert((node->GetLayout()->GetSize() % XMM_REGSIZE_BYTES) == 0);
+ assert(willUseSimdMov);
+#ifdef TARGET_AMD64
+ assert(size % 16 == 0);
+#else
+ assert(size % 8 == 0);
+#endif
}
emitter* emit = GetEmitter();
- unsigned size = node->GetLayout()->GetSize();
assert(size <= INT32_MAX);
assert(dstOffset < (INT32_MAX - static_cast<int>(size)));
- // Fill as much as possible using SSE2 stores.
- if (size >= XMM_REGSIZE_BYTES)
+ if (willUseSimdMov)
{
regNumber srcXmmReg = node->GetSingleTempReg(RBM_ALLFLOAT);
#endif
}
- instruction simdMov = simdUnalignedMovIns();
- for (unsigned regSize = XMM_REGSIZE_BYTES; size >= regSize; size -= regSize, dstOffset += regSize)
+ instruction simdMov = simdUnalignedMovIns();
+ unsigned regSize = XMM_REGSIZE_BYTES;
+ unsigned bytesWritten = 0;
+
+ while (bytesWritten < size)
{
+#ifdef TARGET_X86
+ if (!canUse16BytesSimdMov || (bytesWritten + regSize > size))
+ {
+ simdMov = INS_movq;
+ regSize = 8;
+ }
+#endif
+ if (bytesWritten + regSize > size)
+ {
+ assert(srcIntReg != REG_NA);
+ break;
+ }
+
if (dstLclNum != BAD_VAR_NUM)
{
emit->emitIns_S_R(simdMov, EA_ATTR(regSize), srcXmmReg, dstLclNum, dstOffset);
emit->emitIns_ARX_R(simdMov, EA_ATTR(regSize), srcXmmReg, dstAddrBaseReg, dstAddrIndexReg,
dstAddrIndexScale, dstOffset);
}
+
+ dstOffset += regSize;
+ bytesWritten += regSize;
}
- // TODO-CQ-XArch: On x86 we could initialize 8 byte at once by using MOVQ instead of two 4 byte MOV stores.
- // On x64 it may also be worth zero initializing a 4/8 byte remainder using MOVD/MOVQ, that avoids the need
- // to allocate a GPR just for the remainder.
+ size -= bytesWritten;
}
// Fill the remainder using normal stores.
// The VM doesn't allow such large array elements but let's be sure.
noway_assert(scale <= INT32_MAX);
#else // !TARGET_64BIT
- tmpReg = node->GetSingleTempReg();
+ tmpReg = node->GetSingleTempReg();
#endif // !TARGET_64BIT
GetEmitter()->emitIns_R_I(emitter::inst3opImulForReg(tmpReg), EA_PTRSIZE, indexReg,
"type operand incompatible with type of address");
}
- size = info.compCompHnd->getClassSize(resolvedToken.hClass); // Size
- op2 = gtNewIconNode(0); // Value
- op1 = impPopStack().val; // Dest
- op1 = gtNewBlockVal(op1, size);
- op1 = gtNewBlkOpNode(op1, op2, (prefixFlags & PREFIX_VOLATILE) != 0, false);
+ op2 = gtNewIconNode(0); // Value
+ op1 = impPopStack().val; // Dest
+
+ if (eeIsValueClass(resolvedToken.hClass))
+ {
+ op1 = gtNewStructVal(resolvedToken.hClass, op1);
+ if (op1->OperIs(GT_OBJ))
+ {
+ gtSetObjGcInfo(op1->AsObj());
+ }
+ }
+ else
+ {
+ size = info.compCompHnd->getClassSize(resolvedToken.hClass);
+ assert(size == TARGET_POINTER_SIZE);
+ op1 = gtNewBlockVal(op1, size);
+ }
+
+ op1 = gtNewBlkOpNode(op1, op2, (prefixFlags & PREFIX_VOLATILE) != 0, false);
goto SPILL_APPEND;
case CEE_INITBLK:
// The return value will be on the X87 stack, and we will need to move it.
dstCandidates = allRegs(registerType);
#else // !TARGET_X86
- dstCandidates = RBM_FLOATRET;
+ dstCandidates = RBM_FLOATRET;
#endif // !TARGET_X86
}
else if (registerType == TYP_LONG)
switch (blkNode->gtBlkOpKind)
{
case GenTreeBlk::BlkOpKindUnroll:
- if (size >= XMM_REGSIZE_BYTES)
+ {
+#ifdef TARGET_AMD64
+ const bool canUse16BytesSimdMov = !blkNode->IsOnHeapAndContainsReferences();
+ const bool willUseSimdMov = canUse16BytesSimdMov && (size >= 16);
+#else
+ const bool willUseSimdMov = (size >= 16);
+#endif
+ if (willUseSimdMov)
{
buildInternalFloatRegisterDefForNode(blkNode, internalFloatRegCandidates());
SetContainsAVXFlags();
srcRegMask = allByteRegs();
}
#endif
- break;
+ }
+ break;
case GenTreeBlk::BlkOpKindRepInstr:
dstAddrRegMask = RBM_RDI;