From 98f08752f76b80a509037986d620b402c48f5115 Mon Sep 17 00:00:00 2001 From: Peter Waller Date: Thu, 28 Oct 2021 12:14:52 +0000 Subject: [PATCH] [InstCombine][ConstantFolding] Make ConstantFoldLoadThroughBitcast TypeSize-aware The newly added test previously caused the compiler to fail an assertion. It looks like a strightforward TypeSize upgrade. Reviewed By: paulwalker-arm Differential Revision: https://reviews.llvm.org/D112142 --- llvm/lib/Analysis/ConstantFolding.cpp | 6 +++--- llvm/test/Transforms/InstCombine/vscale_load.ll | 27 +++++++++++++++++++++++++ 2 files changed, 30 insertions(+), 3 deletions(-) create mode 100644 llvm/test/Transforms/InstCombine/vscale_load.ll diff --git a/llvm/lib/Analysis/ConstantFolding.cpp b/llvm/lib/Analysis/ConstantFolding.cpp index 4cc3fcd..3ed3b89 100644 --- a/llvm/lib/Analysis/ConstantFolding.cpp +++ b/llvm/lib/Analysis/ConstantFolding.cpp @@ -352,9 +352,9 @@ Constant *llvm::ConstantFoldLoadThroughBitcast(Constant *C, Type *DestTy, const DataLayout &DL) { do { Type *SrcTy = C->getType(); - uint64_t DestSize = DL.getTypeSizeInBits(DestTy); - uint64_t SrcSize = DL.getTypeSizeInBits(SrcTy); - if (SrcSize < DestSize) + TypeSize DestSize = DL.getTypeSizeInBits(DestTy); + TypeSize SrcSize = DL.getTypeSizeInBits(SrcTy); + if (!TypeSize::isKnownGE(SrcSize, DestSize)) return nullptr; // Catch the obvious splat cases (since all-zeros can coerce non-integral diff --git a/llvm/test/Transforms/InstCombine/vscale_load.ll b/llvm/test/Transforms/InstCombine/vscale_load.ll new file mode 100644 index 0000000..4041ffc --- /dev/null +++ b/llvm/test/Transforms/InstCombine/vscale_load.ll @@ -0,0 +1,27 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt < %s -instcombine -S -verify | FileCheck %s + +define <2 x i8> @constprop_load_bitcast(* %ptr) { +; CHECK-LABEL: @constprop_load_bitcast( +; CHECK-NEXT: store zeroinitializer, * [[PTR:%.*]], align 16 +; CHECK-NEXT: ret <2 x i8> zeroinitializer +; + store zeroinitializer, * %ptr, align 16 + %cast_to_fixed = bitcast * %ptr to <2 x i8>* + %a = load <2 x i8>, <2 x i8>* %cast_to_fixed, align 16 + ret <2 x i8> %a +} + +; vscale-sized vec not guaranteed to fill destination. +define <8 x i8> @constprop_load_bitcast_neg(* %ptr) { +; CHECK-LABEL: @constprop_load_bitcast_neg( +; CHECK-NEXT: store zeroinitializer, * [[PTR:%.*]], align 16 +; CHECK-NEXT: [[CAST_TO_FIXED:%.*]] = bitcast * [[PTR]] to <8 x i8>* +; CHECK-NEXT: [[A:%.*]] = load <8 x i8>, <8 x i8>* [[CAST_TO_FIXED]], align 16 +; CHECK-NEXT: ret <8 x i8> [[A]] +; + store zeroinitializer, * %ptr, align 16 + %cast_to_fixed = bitcast * %ptr to <8 x i8>* + %a = load <8 x i8>, <8 x i8>* %cast_to_fixed, align 16 + ret <8 x i8> %a +} -- 2.7.4