static const uint64_t kDefaultShadowScale = 3;
static const uint64_t kDefaultShadowOffset32 = 1ULL << 29;
static const uint64_t kDefaultShadowOffset64 = 1ULL << 44;
+static const uint64_t kDynamicShadowSentinel = ~(uint64_t)0;
static const uint64_t kIOSShadowOffset32 = 1ULL << 30;
static const uint64_t kIOSShadowOffset64 = 0x120200000;
static const uint64_t kIOSSimShadowOffset32 = 1ULL << 30;
static const uint64_t kFreeBSD_ShadowOffset32 = 1ULL << 30;
static const uint64_t kFreeBSD_ShadowOffset64 = 1ULL << 46;
static const uint64_t kWindowsShadowOffset32 = 3ULL << 28;
-// TODO(wwchrome): Experimental for asan Win64, may change.
-static const uint64_t kWindowsShadowOffset64 = 0x1ULL << 45; // 32TB.
+// The shadow memory space is dynamically allocated.
+static const uint64_t kWindowsShadowOffset64 = kDynamicShadowSentinel;
static const size_t kMinStackMallocSize = 1 << 6; // 64B
static const size_t kMaxStackMallocSize = 1 << 16; // 64K
static const char *const kAsanOptionDetectUseAfterReturn =
"__asan_option_detect_stack_use_after_return";
+static const char *const kAsanShadowMemoryDynamicAddress =
+ "__asan_shadow_memory_dynamic_address";
+
static const char *const kAsanAllocaPoison = "__asan_alloca_poison";
static const char *const kAsanAllocasUnpoison = "__asan_allocas_unpoison";
"asan-always-slow-path",
cl::desc("use instrumentation with slow path for all accesses"), cl::Hidden,
cl::init(false));
+static cl::opt<bool> ClForceDynamicShadow(
+ "asan-force-dynamic-shadow",
+ cl::desc("Load shadow address into a local variable for each function"),
+ cl::Hidden, cl::init(false));
+
// This flag limits the number of instructions to be instrumented
// in any given BB. Normally, this should be set to unlimited (INT_MAX),
// but due to http://llvm.org/bugs/show_bug.cgi?id=12652 we temporary
// we could OR the constant in a single instruction, but it's more
// efficient to load it once and use indexed addressing.
Mapping.OrShadowOffset = !IsAArch64 && !IsPPC64 && !IsSystemZ
- && !(Mapping.Offset & (Mapping.Offset - 1));
+ && !(Mapping.Offset & (Mapping.Offset - 1))
+ && Mapping.Offset != kDynamicShadowSentinel;
return Mapping;
}
bool UseAfterScope = false)
: FunctionPass(ID), CompileKernel(CompileKernel || ClEnableKasan),
Recover(Recover || ClRecover),
- UseAfterScope(UseAfterScope || ClUseAfterScope) {
+ UseAfterScope(UseAfterScope || ClUseAfterScope),
+ LocalDynamicShadow(nullptr) {
initializeAddressSanitizerPass(*PassRegistry::getPassRegistry());
}
const char *getPassName() const override {
Value *memToShadow(Value *Shadow, IRBuilder<> &IRB);
bool runOnFunction(Function &F) override;
bool maybeInsertAsanInitAtFunctionEntry(Function &F);
+ void maybeInsertDynamicShadowAtFunctionEntry(Function &F);
void markEscapedLocalAllocas(Function &F);
bool doInitialization(Module &M) override;
bool doFinalization(Module &M) override;
FunctionStateRAII(AddressSanitizer *Pass) : Pass(Pass) {
assert(Pass->ProcessedAllocas.empty() &&
"last pass forgot to clear cache");
+ assert(!Pass->LocalDynamicShadow);
+ }
+ ~FunctionStateRAII() {
+ Pass->LocalDynamicShadow = nullptr;
+ Pass->ProcessedAllocas.clear();
}
- ~FunctionStateRAII() { Pass->ProcessedAllocas.clear(); }
};
LLVMContext *C;
Function *AsanMemoryAccessCallbackSized[2][2];
Function *AsanMemmove, *AsanMemcpy, *AsanMemset;
InlineAsm *EmptyAsm;
+ Value *LocalDynamicShadow;
GlobalsMetadata GlobalsMD;
DenseMap<const AllocaInst *, bool> ProcessedAllocas;
Shadow = IRB.CreateLShr(Shadow, Mapping.Scale);
if (Mapping.Offset == 0) return Shadow;
// (Shadow >> scale) | offset
+ Value *ShadowBase;
+ if (LocalDynamicShadow)
+ ShadowBase = LocalDynamicShadow;
+ else
+ ShadowBase = ConstantInt::get(IntptrTy, Mapping.Offset);
if (Mapping.OrShadowOffset)
- return IRB.CreateOr(Shadow, ConstantInt::get(IntptrTy, Mapping.Offset));
+ return IRB.CreateOr(Shadow, ShadowBase);
else
- return IRB.CreateAdd(Shadow, ConstantInt::get(IntptrTy, Mapping.Offset));
+ return IRB.CreateAdd(Shadow, ShadowBase);
}
// Instrument memset/memmove/memcpy
// Skip memory accesses inserted by another instrumentation.
if (I->getMetadata("nosanitize")) return nullptr;
+ // Do not instrument the load fetching the dynamic shadow address.
+ if (LocalDynamicShadow == I)
+ return nullptr;
+
Value *PtrOperand = nullptr;
const DataLayout &DL = I->getModule()->getDataLayout();
if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
return false;
}
+void AddressSanitizer::maybeInsertDynamicShadowAtFunctionEntry(Function &F) {
+ // Generate code only when dynamic addressing is needed.
+ if (!ClForceDynamicShadow && Mapping.Offset != kDynamicShadowSentinel)
+ return;
+
+ IRBuilder<> IRB(&F.front().front());
+ Value *GlobalDynamicAddress = F.getParent()->getOrInsertGlobal(
+ kAsanShadowMemoryDynamicAddress, IntptrTy);
+ LocalDynamicShadow = IRB.CreateLoad(GlobalDynamicAddress);
+}
+
void AddressSanitizer::markEscapedLocalAllocas(Function &F) {
// Find the one possible call to llvm.localescape and pre-mark allocas passed
// to it as uninteresting. This assumes we haven't started processing allocas
FunctionStateRAII CleanupObj(this);
+ maybeInsertDynamicShadowAtFunctionEntry(F);
+
// We can't instrument allocas used with llvm.localescape. Only static allocas
// can be passed to that intrinsic.
markEscapedLocalAllocas(F);
--- /dev/null
+; Test basic address sanitizer instrumentation.
+;
+; RUN: opt -asan -asan-module -S < %s | FileCheck %s
+
+target triple = "x86_64-pc-windows-msvc"
+; CHECK: @llvm.global_ctors = {{.*}}@asan.module_ctor
+
+define i32 @test_load(i32* %a) sanitize_address {
+; First instrumentation in the function must be to load the dynamic shadow
+; address into a local variable.
+; CHECK-LABEL: @test_load
+; CHECK: entry:
+; CHECK-NEXT: %[[SHADOW:[^ ]*]] = load i64, i64* @__asan_shadow_memory_dynamic_address
+
+; Shadow address is loaded and added into the whole offset computation.
+; CHECK add i64 %{{.*}}, %[[SHADOW] ]
+
+entry:
+ %tmp1 = load i32, i32* %a, align 4
+ ret i32 %tmp1
+}
+
+define i32 @__asan_options(i32* %a) sanitize_address {
+; Asan functions are not instrumented. Asan function may be called by
+; __asan_init before the shadow initialisation, which may lead to incorrect
+; behavior of the instrumented code.
+; CHECK-LABEL: @__asan_options
+; CHECK: entry:
+; CHECK-NEXT: %tmp1 = load i32, i32* %a, align 4
+; CHECK-NEXT: ret i32 %tmp1
+
+entry:
+ %tmp1 = load i32, i32* %a, align 4
+ ret i32 %tmp1
+}