From 067c660ac99b3d8cfe86264c16aad7d75c512793 Mon Sep 17 00:00:00 2001 From: Guillaume Chatelet Date: Wed, 24 Jun 2020 21:26:03 +0000 Subject: [PATCH] Use concrete natural type alignment for masked load/store operations instead of 0. Summary: Alignment needs to be resolved at this point so we replace the 0 value with the ABI Type Alignment. --- llvm/include/llvm/CodeGen/BasicTTIImpl.h | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/llvm/include/llvm/CodeGen/BasicTTIImpl.h b/llvm/include/llvm/CodeGen/BasicTTIImpl.h index 4962c0d..497f6f7 100644 --- a/llvm/include/llvm/CodeGen/BasicTTIImpl.h +++ b/llvm/include/llvm/CodeGen/BasicTTIImpl.h @@ -1387,12 +1387,18 @@ public: case Intrinsic::lifetime_end: case Intrinsic::sideeffect: return 0; - case Intrinsic::masked_store: - return ConcreteTTI->getMaskedMemoryOpCost(Instruction::Store, Tys[0], 0, + case Intrinsic::masked_store: { + Type *Ty = Tys[0]; + unsigned TyAlign = ConcreteTTI->DL.getABITypeAlignment(Ty); + return ConcreteTTI->getMaskedMemoryOpCost(Instruction::Store, Ty, TyAlign, 0, CostKind); - case Intrinsic::masked_load: - return ConcreteTTI->getMaskedMemoryOpCost(Instruction::Load, RetTy, 0, 0, - CostKind); + } + case Intrinsic::masked_load: { + Type *Ty = RetTy; + unsigned TyAlign = ConcreteTTI->DL.getABITypeAlignment(Ty); + return ConcreteTTI->getMaskedMemoryOpCost(Instruction::Load, Ty, TyAlign, + 0, CostKind); + } case Intrinsic::experimental_vector_reduce_add: return ConcreteTTI->getArithmeticReductionCost(Instruction::Add, VecOpTy, /*IsPairwiseForm=*/false, -- 2.7.4