From 091fed94ae701005ccfde290713c4908e25008d4 Mon Sep 17 00:00:00 2001 From: Evgeniy Stepanov Date: Tue, 15 May 2018 21:28:25 +0000 Subject: [PATCH] [msan] Instrument masked.store, masked.load intrinsics. Summary: Instrument masked store/load intrinsics. Reviewers: kcc Subscribers: hiraditya, llvm-commits Differential Revision: https://reviews.llvm.org/D46785 llvm-svn: 332402 --- .../Transforms/Instrumentation/MemorySanitizer.cpp | 87 +++++++++++++++ .../MemorySanitizer/masked-store-load.ll | 124 +++++++++++++++++++++ 2 files changed, 211 insertions(+) create mode 100644 llvm/test/Instrumentation/MemorySanitizer/masked-store-load.ll diff --git a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp index 2fad41a..eb2af4f 100644 --- a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp +++ b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp @@ -2537,11 +2537,98 @@ struct MemorySanitizerVisitor : public InstVisitor { insertShadowCheck(Shadow, Origin, &I); } + void handleMaskedStore(IntrinsicInst &I) { + IRBuilder<> IRB(&I); + Value *V = I.getArgOperand(0); + Value *Addr = I.getArgOperand(1); + unsigned Align = cast(I.getArgOperand(2))->getZExtValue(); + Value *Mask = I.getArgOperand(3); + Value *Shadow = getShadow(V); + + Value *ShadowPtr; + Value *OriginPtr; + std::tie(ShadowPtr, OriginPtr) = getShadowOriginPtr( + Addr, IRB, Shadow->getType(), Align, /*isStore*/ true); + + if (ClCheckAccessAddress) { + insertShadowCheck(Addr, &I); + // Uninitialized mask is kind of like uninitialized address, but not as + // scary. + insertShadowCheck(Mask, &I); + } + + IRB.CreateMaskedStore(Shadow, ShadowPtr, Align, Mask); + + if (MS.TrackOrigins) { + auto &DL = F.getParent()->getDataLayout(); + paintOrigin(IRB, getOrigin(V), OriginPtr, + DL.getTypeStoreSize(Shadow->getType()), + std::max(Align, kMinOriginAlignment)); + } + } + + bool handleMaskedLoad(IntrinsicInst &I) { + IRBuilder<> IRB(&I); + Value *Addr = I.getArgOperand(0); + unsigned Align = cast(I.getArgOperand(1))->getZExtValue(); + Value *Mask = I.getArgOperand(2); + Value *PassThru = I.getArgOperand(3); + + Type *ShadowTy = getShadowTy(&I); + Value *ShadowPtr, *OriginPtr; + if (PropagateShadow) { + std::tie(ShadowPtr, OriginPtr) = + getShadowOriginPtr(Addr, IRB, ShadowTy, Align, /*isStore*/ false); + setShadow(&I, IRB.CreateMaskedLoad(ShadowPtr, Align, Mask, + getShadow(PassThru), "_msmaskedld")); + } else { + setShadow(&I, getCleanShadow(&I)); + } + + if (ClCheckAccessAddress) { + insertShadowCheck(Addr, &I); + insertShadowCheck(Mask, &I); + } + + if (MS.TrackOrigins) { + if (PropagateShadow) { + // Choose between PassThru's and the loaded value's origins. + Value *MaskedPassThruShadow = IRB.CreateAnd( + getShadow(PassThru), IRB.CreateSExt(IRB.CreateNeg(Mask), ShadowTy)); + + Value *Acc = IRB.CreateExtractElement( + MaskedPassThruShadow, ConstantInt::get(IRB.getInt32Ty(), 0)); + for (int i = 1, N = PassThru->getType()->getVectorNumElements(); i < N; + ++i) { + Value *More = IRB.CreateExtractElement( + MaskedPassThruShadow, ConstantInt::get(IRB.getInt32Ty(), i)); + Acc = IRB.CreateOr(Acc, More); + } + + Value *Origin = IRB.CreateSelect( + IRB.CreateICmpNE(Acc, Constant::getNullValue(Acc->getType())), + getOrigin(PassThru), IRB.CreateLoad(OriginPtr)); + + setOrigin(&I, Origin); + } else { + setOrigin(&I, getCleanOrigin()); + } + } + return true; + } + + void visitIntrinsicInst(IntrinsicInst &I) { switch (I.getIntrinsicID()) { case Intrinsic::bswap: handleBswap(I); break; + case Intrinsic::masked_store: + handleMaskedStore(I); + break; + case Intrinsic::masked_load: + handleMaskedLoad(I); + break; case Intrinsic::x86_sse_stmxcsr: handleStmxcsr(I); break; diff --git a/llvm/test/Instrumentation/MemorySanitizer/masked-store-load.ll b/llvm/test/Instrumentation/MemorySanitizer/masked-store-load.ll new file mode 100644 index 0000000..9391c9f --- /dev/null +++ b/llvm/test/Instrumentation/MemorySanitizer/masked-store-load.ll @@ -0,0 +1,124 @@ +; RUN: opt < %s -msan -msan-check-access-address=0 -S | FileCheck %s +; RUN: opt < %s -msan -msan-check-access-address=0 -msan-track-origins=1 -S | FileCheck %s --check-prefixes=CHECK,CHECK-ORIGIN +; RUN: opt < %s -msan -msan-check-access-address=1 -S | FileCheck %s --check-prefix=ADDR + +target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128" +target triple = "x86_64-unknown-linux-gnu" + +declare void @llvm.masked.store.v4i64.p0v4i64(<4 x i64>, <4 x i64>*, i32, <4 x i1>) +declare <4 x double> @llvm.masked.load.v4f64.p0v4f64(<4 x double>*, i32, <4 x i1>, <4 x double>) + +define void @Store(<4 x i64>* %p, <4 x i64> %v, <4 x i1> %mask) sanitize_memory { +entry: + tail call void @llvm.masked.store.v4i64.p0v4i64(<4 x i64> %v, <4 x i64>* %p, i32 1, <4 x i1> %mask) + ret void +} + +; CHECK-LABEL: @Store( +; CHECK: %[[A:.*]] = load <4 x i64>, {{.*}}@__msan_param_tls to i64), i64 8) +; CHECK-ORIGIN: %[[O:.*]] = load i32, {{.*}}@__msan_param_origin_tls to i64), i64 8) +; CHECK: %[[B:.*]] = ptrtoint <4 x i64>* %p to i64 +; CHECK: %[[C:.*]] = xor i64 %[[B]], 87960930222080 +; CHECK: %[[D:.*]] = inttoptr i64 %[[C]] to <4 x i64>* +; CHECK-ORIGIN: %[[E:.*]] = add i64 %[[C]], 17592186044416 +; CHECK-ORIGIN: %[[F:.*]] = and i64 %[[E]], -4 +; CHECK-ORIGIN: %[[G:.*]] = inttoptr i64 %[[F]] to i32* +; CHECK: call void @llvm.masked.store.v4i64.p0v4i64(<4 x i64> %[[A]], <4 x i64>* %[[D]], i32 1, <4 x i1> %mask) +; CHECK-ORIGIN: store i32 %[[O]], i32* %[[G]], align 4 +; CHECK-ORIGIN: getelementptr i32, i32* %[[G]], i32 1 +; CHECK-ORIGIN: store i32 %[[O]], i32* {{.*}}, align 4 +; CHECK-ORIGIN: getelementptr i32, i32* %[[G]], i32 2 +; CHECK-ORIGIN: store i32 %[[O]], i32* {{.*}}, align 4 +; CHECK-ORIGIN: getelementptr i32, i32* %[[G]], i32 3 +; CHECK-ORIGIN: store i32 %[[O]], i32* {{.*}}, align 4 +; CHECK-ORIGIN: getelementptr i32, i32* %[[G]], i32 4 +; CHECK-ORIGIN: store i32 %[[O]], i32* {{.*}}, align 4 +; CHECK-ORIGIN: getelementptr i32, i32* %[[G]], i32 5 +; CHECK-ORIGIN: store i32 %[[O]], i32* {{.*}}, align 4 +; CHECK-ORIGIN: getelementptr i32, i32* %[[G]], i32 6 +; CHECK-ORIGIN: store i32 %[[O]], i32* {{.*}}, align 4 +; CHECK-ORIGIN: getelementptr i32, i32* %[[G]], i32 7 +; CHECK-ORIGIN: store i32 %[[O]], i32* {{.*}}, align 4 +; CHECK: tail call void @llvm.masked.store.v4i64.p0v4i64(<4 x i64> %v, <4 x i64>* %p, i32 1, <4 x i1> %mask) +; CHECK: ret void + +; ADDR-LABEL: @Store( +; ADDR: %[[MASKSHADOW:.*]] = load <4 x i1>, {{.*}}@__msan_param_tls to i64), i64 40) +; ADDR: %[[ADDRSHADOW:.*]] = load i64, {{.*}}[100 x i64]* @__msan_param_tls, i32 0, i32 0) + +; ADDR: %[[ADDRBAD:.*]] = icmp ne i64 %[[ADDRSHADOW]], 0 +; ADDR: br i1 %[[ADDRBAD]], label {{.*}}, label {{.*}} +; ADDR: call void @__msan_warning_noreturn() + +; ADDR: %[[MASKSHADOWFLAT:.*]] = bitcast <4 x i1> %[[MASKSHADOW]] to i4 +; ADDR: %[[MASKBAD:.*]] = icmp ne i4 %[[MASKSHADOWFLAT]], 0 +; ADDR: br i1 %[[MASKBAD]], label {{.*}}, label {{.*}} +; ADDR: call void @__msan_warning_noreturn() + +; ADDR: tail call void @llvm.masked.store.v4i64.p0v4i64(<4 x i64> %v, <4 x i64>* %p, i32 1, <4 x i1> %mask) +; ADDR: ret void + + +define <4 x double> @Load(<4 x double>* %p, <4 x double> %v, <4 x i1> %mask) sanitize_memory { +entry: + %x = call <4 x double> @llvm.masked.load.v4f64.p0v4f64(<4 x double>* %p, i32 1, <4 x i1> %mask, <4 x double> %v) + ret <4 x double> %x +} + +; CHECK-LABEL: @Load( +; CHECK: %[[A:.*]] = load <4 x i64>, {{.*}}@__msan_param_tls to i64), i64 8) +; CHECK-ORIGIN: %[[O:.*]] = load i32, {{.*}}@__msan_param_origin_tls to i64), i64 8) +; CHECK: %[[B:.*]] = ptrtoint <4 x double>* %p to i64 +; CHECK: %[[C:.*]] = xor i64 %[[B]], 87960930222080 +; CHECK: %[[D:.*]] = inttoptr i64 %[[C]] to <4 x i64>* +; CHECK-ORIGIN: %[[E:.*]] = add i64 %[[C]], 17592186044416 +; CHECK-ORIGIN: %[[F:.*]] = and i64 %[[E]], -4 +; CHECK-ORIGIN: %[[G:.*]] = inttoptr i64 %[[F]] to i32* +; CHECK: %[[E:.*]] = call <4 x i64> @llvm.masked.load.v4i64.p0v4i64(<4 x i64>* %[[D]], i32 1, <4 x i1> %mask, <4 x i64> %[[A]]) +; CHECK-ORIGIN: %[[H:.*]] = load i32, i32* %[[G]] +; CHECK-ORIGIN: %[[O2:.*]] = select i1 %{{.*}}, i32 %[[O]], i32 %[[H]] +; CHECK: %[[X:.*]] = call <4 x double> @llvm.masked.load.v4f64.p0v4f64(<4 x double>* %p, i32 1, <4 x i1> %mask, <4 x double> %v) +; CHECK: store <4 x i64> %[[E]], {{.*}}@__msan_retval_tls +; CHECK-ORIGIN: store i32 %[[O2]], i32* @__msan_retval_origin_tls +; CHECK: ret <4 x double> %[[X]] + +; ADDR-LABEL: @Load( +; ADDR: %[[MASKSHADOW:.*]] = load <4 x i1>, {{.*}}@__msan_param_tls to i64), i64 40) +; ADDR: %[[ADDRSHADOW:.*]] = load i64, {{.*}}[100 x i64]* @__msan_param_tls, i32 0, i32 0) + +; ADDR: %[[ADDRBAD:.*]] = icmp ne i64 %[[ADDRSHADOW]], 0 +; ADDR: br i1 %[[ADDRBAD]], label {{.*}}, label {{.*}} +; ADDR: call void @__msan_warning_noreturn() + +; ADDR: %[[MASKSHADOWFLAT:.*]] = bitcast <4 x i1> %[[MASKSHADOW]] to i4 +; ADDR: %[[MASKBAD:.*]] = icmp ne i4 %[[MASKSHADOWFLAT]], 0 +; ADDR: br i1 %[[MASKBAD]], label {{.*}}, label {{.*}} +; ADDR: call void @__msan_warning_noreturn() + +; ADDR: = call <4 x double> @llvm.masked.load.v4f64.p0v4f64(<4 x double>* %p, i32 1, <4 x i1> %mask, <4 x double> %v) +; ADDR: ret <4 x double> + +define void @StoreNoSanitize(<4 x i64>* %p, <4 x i64> %v, <4 x i1> %mask) { +entry: + tail call void @llvm.masked.store.v4i64.p0v4i64(<4 x i64> %v, <4 x i64>* %p, i32 1, <4 x i1> %mask) + ret void +} + +; CHECK-LABEL: @StoreNoSanitize( +; CHECK: %[[B:.*]] = ptrtoint <4 x i64>* %p to i64 +; CHECK: %[[C:.*]] = xor i64 %[[B]], 87960930222080 +; CHECK: %[[D:.*]] = inttoptr i64 %[[C]] to <4 x i64>* +; CHECK: call void @llvm.masked.store.v4i64.p0v4i64(<4 x i64> zeroinitializer, <4 x i64>* %[[D]], i32 1, <4 x i1> %mask) +; CHECK: tail call void @llvm.masked.store.v4i64.p0v4i64(<4 x i64> %v, <4 x i64>* %p, i32 1, <4 x i1> %mask) +; CHECK: ret void + +define <4 x double> @LoadNoSanitize(<4 x double>* %p, <4 x double> %v, <4 x i1> %mask) { +entry: + %x = call <4 x double> @llvm.masked.load.v4f64.p0v4f64(<4 x double>* %p, i32 1, <4 x i1> %mask, <4 x double> %v) + ret <4 x double> %x +} + +; CHECK-LABEL: @LoadNoSanitize( +; CHECK: %[[X:.*]] = call <4 x double> @llvm.masked.load.v4f64.p0v4f64(<4 x double>* %p, i32 1, <4 x i1> %mask, <4 x double> %v) +; CHECK: store <4 x i64> zeroinitializer, {{.*}}@__msan_retval_tls to <4 x i64>*) +; CHECK: ret <4 x double> %[[X]] -- 2.7.4