case Instruction::ZExt:
case Instruction::AddrSpaceCast:
return TargetTTI->getCastInstrCost(Opcode, Ty, OpTy, CostKind, I);
+ case Instruction::Store: {
+ auto *SI = cast<StoreInst>(U);
+ Type *ValTy = U->getOperand(0)->getType();
+ return TargetTTI->getMemoryOpCost(Opcode, ValTy, SI->getAlign(),
+ SI->getPointerAddressSpace(),
+ CostKind, I);
+ }
+ case Instruction::Load: {
+ auto *LI = cast<LoadInst>(U);
+ return TargetTTI->getMemoryOpCost(Opcode, U->getType(), LI->getAlign(),
+ LI->getPointerAddressSpace(),
+ CostKind, I);
+ }
}
// By default, just classify everything as 'basic'.
return TTI::TCC_Basic;
// Assuming that all loads of legal types cost 1.
unsigned Cost = LT.first;
+ if (CostKind != TTI::TCK_RecipThroughput)
+ return Cost;
if (Src->isVectorTy() &&
Src->getPrimitiveSizeInBits() < LT.second.getSizeInBits()) {
return getCmpSelInstrCost(I->getOpcode(), ValTy, I->getType(),
CostKind, I);
}
- case Instruction::Store: {
- const StoreInst *SI = cast<StoreInst>(I);
- Type *ValTy = SI->getValueOperand()->getType();
- return getMemoryOpCost(I->getOpcode(), ValTy, SI->getAlign(),
- SI->getPointerAddressSpace(), CostKind, I);
- }
- case Instruction::Load: {
- const LoadInst *LI = cast<LoadInst>(I);
- return getMemoryOpCost(I->getOpcode(), I->getType(), LI->getAlign(),
- LI->getPointerAddressSpace(), CostKind, I);
- }
+ case Instruction::Store:
+ case Instruction::Load:
+ return getUserCost(I, CostKind);
case Instruction::ZExt:
case Instruction::SExt:
case Instruction::FPToUI:
MaybeAlign Alignment, unsigned AddressSpace,
TTI::TargetCostKind CostKind,
const Instruction *I) {
+ // TODO: Handle other cost kinds.
+ if (CostKind != TTI::TCK_RecipThroughput)
+ return 1;
+
auto LT = TLI->getTypeLegalizationCost(DL, Ty);
if (ST->isMisaligned128StoreSlow() && Opcode == Instruction::Store &&
MaybeAlign Alignment, unsigned AddressSpace,
TTI::TargetCostKind CostKind,
const Instruction *I) {
+ // TODO: Handle other cost kinds.
+ if (CostKind != TTI::TCK_RecipThroughput)
+ return 1;
+
std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Src);
if (ST->hasNEON() && Src->isVectorTy() &&
TTI::TargetCostKind CostKind,
const Instruction *I) {
assert(Opcode == Instruction::Load || Opcode == Instruction::Store);
+ // TODO: Handle other cost kinds.
+ if (CostKind != TTI::TCK_RecipThroughput)
+ return 1;
+
if (Opcode == Instruction::Store)
return BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace,
CostKind, I);
unsigned
PPCTTIImpl::getUserCost(const User *U, ArrayRef<const Value *> Operands,
TTI::TargetCostKind CostKind) {
- // We already implement getCastInstrCost and perform the vector adjustment there.
- if (!isa<CastInst>(U) && U->getType()->isVectorTy()) {
+ // We already implement getCastInstrCost and getMemoryOpCost where we perform
+ // the vector adjustment there.
+ if (isa<CastInst>(U) || isa<LoadInst>(U) || isa<StoreInst>(U))
+ return BaseT::getUserCost(U, Operands, CostKind);
+
+ if (U->getType()->isVectorTy()) {
// Instructions that need to be split should cost more.
std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, U->getType());
return LT.first * BaseT::getUserCost(U, Operands, CostKind);
int Cost = BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace,
CostKind);
+ // TODO: Handle other cost kinds.
+ if (CostKind != TTI::TCK_RecipThroughput)
+ return Cost;
+
Cost = vectorCostAdjustment(Cost, Opcode, Src, nullptr);
bool IsAltivecType = ST->hasAltivec() &&
const Instruction *I) {
assert(!Src->isVoidTy() && "Invalid type");
+ // TODO: Handle other cost kinds.
+ if (CostKind != TTI::TCK_RecipThroughput)
+ return 1;
+
if (!Src->isVectorTy() && Opcode == Instruction::Load && I != nullptr) {
// Store the load or its truncated or extended value in FoldedValue.
const Instruction *FoldedValue = nullptr;
MaybeAlign Alignment, unsigned AddressSpace,
TTI::TargetCostKind CostKind,
const Instruction *I) {
+ // TODO: Handle other cost kinds.
+ if (CostKind != TTI::TCK_RecipThroughput) {
+ if (isa_and_nonnull<StoreInst>(I)) {
+ Value *Ptr = I->getOperand(1);
+ // Store instruction with index and scale costs 2 Uops.
+ // Check the preceding GEP to identify non-const indices.
+ if (auto *GEP = dyn_cast<GetElementPtrInst>(Ptr)) {
+ if (!all_of(GEP->indices(), [](Value *V) { return isa<Constant>(V); }))
+ return TTI::TCC_Basic * 2;
+ }
+ }
+ return TTI::TCC_Basic;
+ }
+
// Handle non-power-of-two vectors such as <3 x float>
if (VectorType *VTy = dyn_cast<VectorType>(Src)) {
unsigned NumElem = VTy->getNumElements();
return X86TTIImpl::getIntImmCost(Imm, Ty, CostKind);
}
-unsigned
-X86TTIImpl::getUserCost(const User *U, ArrayRef<const Value *> Operands,
- TTI::TargetCostKind CostKind) {
- if (isa<StoreInst>(U)) {
- Value *Ptr = U->getOperand(1);
- // Store instruction with index and scale costs 2 Uops.
- // Check the preceding GEP to identify non-const indices.
- if (auto GEP = dyn_cast<GetElementPtrInst>(Ptr)) {
- if (!all_of(GEP->indices(), [](Value *V) { return isa<Constant>(V); }))
- return TTI::TCC_Basic * 2;
- }
- return TTI::TCC_Basic;
- }
- return BaseT::getUserCost(U, Operands, CostKind);
-}
-
// Return an average cost of Gather / Scatter instruction, maybe improved later
int X86TTIImpl::getGSVectorCost(unsigned Opcode, Type *SrcVTy, Value *Ptr,
unsigned Alignment, unsigned AddressSpace) {
int getIntImmCost(const APInt &Imm, Type *Ty, TTI::TargetCostKind CostKind);
- unsigned getUserCost(const User *U, ArrayRef<const Value *> Operands,
- TTI::TargetCostKind);
-
int getIntImmCostInst(unsigned Opcode, unsigned Idx, const APInt &Imm, Type *Ty,
TTI::TargetCostKind CostKind);
int getIntImmCostIntrin(Intrinsic::ID IID, unsigned Idx, const APInt &Imm,