From 7c8647b26f053c01071b8efb3a98d0cd1229f413 Mon Sep 17 00:00:00 2001 From: Philip Reames Date: Thu, 25 Apr 2019 01:18:56 +0000 Subject: [PATCH] [InstCombine] Be consistent w/handling of masked intrinsics style wise [NFC] llvm-svn: 359160 --- llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp | 9 ++++----- llvm/lib/Transforms/InstCombine/InstCombineInternal.h | 2 ++ 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp index b1bb928..fbdc1b7 100644 --- a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp @@ -1179,8 +1179,7 @@ static APInt possiblyDemandedEltsInMask(Value *Mask) { // TODO, Obvious Missing Transforms: // * Narrow width by halfs excluding zero/undef lanes -static Value *simplifyMaskedLoad(const IntrinsicInst &II, - InstCombiner::BuilderTy &Builder) { +Value *InstCombiner::simplifyMaskedLoad(IntrinsicInst &II) { Value *LoadPtr = II.getArgOperand(0); unsigned Alignment = cast(II.getArgOperand(1))->getZExtValue(); @@ -1241,7 +1240,7 @@ Instruction *InstCombiner::simplifyMaskedStore(IntrinsicInst &II) { // * Narrow width by halfs excluding zero/undef lanes // * Vector splat address w/known mask -> scalar load // * Vector incrementing address -> vector masked load -static Instruction *simplifyMaskedGather(IntrinsicInst &II, InstCombiner &IC) { +Instruction *InstCombiner::simplifyMaskedGather(IntrinsicInst &II) { return nullptr; } @@ -2018,13 +2017,13 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) { break; } case Intrinsic::masked_load: - if (Value *SimplifiedMaskedOp = simplifyMaskedLoad(*II, Builder)) + if (Value *SimplifiedMaskedOp = simplifyMaskedLoad(*II)) return replaceInstUsesWith(CI, SimplifiedMaskedOp); break; case Intrinsic::masked_store: return simplifyMaskedStore(*II); case Intrinsic::masked_gather: - return simplifyMaskedGather(*II, *this); + return simplifyMaskedGather(*II); case Intrinsic::masked_scatter: return simplifyMaskedScatter(*II); case Intrinsic::launder_invariant_group: diff --git a/llvm/lib/Transforms/InstCombine/InstCombineInternal.h b/llvm/lib/Transforms/InstCombine/InstCombineInternal.h index 0488f7b..27b8ea8 100644 --- a/llvm/lib/Transforms/InstCombine/InstCombineInternal.h +++ b/llvm/lib/Transforms/InstCombine/InstCombineInternal.h @@ -478,7 +478,9 @@ private: Instruction *transformCallThroughTrampoline(CallBase &Call, IntrinsicInst &Tramp); + Value *simplifyMaskedLoad(IntrinsicInst &II); Instruction *simplifyMaskedStore(IntrinsicInst &II); + Instruction *simplifyMaskedGather(IntrinsicInst &II); Instruction *simplifyMaskedScatter(IntrinsicInst &II); /// Transform (zext icmp) to bitwise / integer operations in order to -- 2.7.4