From 06e8101034e71b95e1338662d27472492dae4d4b Mon Sep 17 00:00:00 2001 From: Jacques Pienaar Date: Fri, 30 Aug 2019 13:01:34 -0700 Subject: [PATCH] Add mechanism to dump JIT-compiled objects to files This commit introduces the bits to be able to dump JIT-compile objects to external files by passing an object cache to OrcJit. The new functionality is tested in mlir-cpu-runner under the flag `dump-object-file`. Closes tensorflow/mlir#95 PiperOrigin-RevId: 266439265 --- .../include/mlir/ExecutionEngine/ExecutionEngine.h | 19 +++++++--- mlir/lib/ExecutionEngine/ExecutionEngine.cpp | 41 +++++++++++++++++----- mlir/lib/Support/JitRunner.cpp | 15 ++++++++ mlir/test/mlir-cpu-runner/simple.mlir | 9 +++++ 4 files changed, 70 insertions(+), 14 deletions(-) diff --git a/mlir/include/mlir/ExecutionEngine/ExecutionEngine.h b/mlir/include/mlir/ExecutionEngine/ExecutionEngine.h index e3ba490..72aacd0 100644 --- a/mlir/include/mlir/ExecutionEngine/ExecutionEngine.h +++ b/mlir/include/mlir/ExecutionEngine/ExecutionEngine.h @@ -49,8 +49,11 @@ public: llvm::MemoryBufferRef ObjBuffer) override; std::unique_ptr getObject(const llvm::Module *M) override; + /// Dump cached object to output file `filename`. + void dumpToObjectFile(llvm::StringRef filename); + private: - llvm::StringMap> CachedObjects; + llvm::StringMap> cachedObjects; }; /// JIT-backed execution engine for MLIR modules. Assumes the module can be @@ -65,15 +68,18 @@ private: /// be used to invoke the JIT-compiled function. class ExecutionEngine { public: + ExecutionEngine(bool enableObjectCache); + /// Creates an execution engine for the given module. If `transformer` is /// provided, it will be called on the LLVM module during JIT-compilation and /// can be used, e.g., for reporting or optimization. /// If `sharedLibPaths` are provided, the underlying JIT-compilation will open /// and link the shared libraries for symbol resolution. - static llvm::Expected> - create(ModuleOp m, - std::function transformer = {}, - ArrayRef sharedLibPaths = {}); + /// If `objectCache` is provided, JIT compiler will use it to store the object + /// generated for the given module. + static llvm::Expected> create( + ModuleOp m, std::function transformer = {}, + ArrayRef sharedLibPaths = {}, bool enableObjectCache = false); /// Looks up a packed-argument function with the given name and returns a /// pointer to it. Propagates errors in case of failure. @@ -94,6 +100,9 @@ public: /// the engine. static bool setupTargetTriple(llvm::Module *llvmModule); + /// Dump object code to output file `filename`. + void dumpToObjectFile(llvm::StringRef filename); + private: // Ordering of llvmContext and jit is important for destruction purposes: the // jit must be destroyed before the context. diff --git a/mlir/lib/ExecutionEngine/ExecutionEngine.cpp b/mlir/lib/ExecutionEngine/ExecutionEngine.cpp index dbc59d0..08b8086 100644 --- a/mlir/lib/ExecutionEngine/ExecutionEngine.cpp +++ b/mlir/lib/ExecutionEngine/ExecutionEngine.cpp @@ -22,6 +22,7 @@ #include "mlir/ExecutionEngine/ExecutionEngine.h" #include "mlir/IR/Function.h" #include "mlir/IR/Module.h" +#include "mlir/Support/FileUtilities.h" #include "mlir/Target/LLVMIR.h" #include "llvm/Bitcode/BitcodeReader.h" @@ -37,6 +38,7 @@ #include "llvm/IR/IRBuilder.h" #include "llvm/Support/Error.h" #include "llvm/Support/TargetRegistry.h" +#include "llvm/Support/ToolOutputFile.h" using namespace mlir; using llvm::dbgs; @@ -68,13 +70,13 @@ namespace mlir { void SimpleObjectCache::notifyObjectCompiled(const Module *M, MemoryBufferRef ObjBuffer) { - CachedObjects[M->getModuleIdentifier()] = MemoryBuffer::getMemBufferCopy( + cachedObjects[M->getModuleIdentifier()] = MemoryBuffer::getMemBufferCopy( ObjBuffer.getBuffer(), ObjBuffer.getBufferIdentifier()); } std::unique_ptr SimpleObjectCache::getObject(const Module *M) { - auto I = CachedObjects.find(M->getModuleIdentifier()); - if (I == CachedObjects.end()) { + auto I = cachedObjects.find(M->getModuleIdentifier()); + if (I == cachedObjects.end()) { dbgs() << "No object for " << M->getModuleIdentifier() << " in cache. Compiling.\n"; return nullptr; @@ -84,6 +86,26 @@ std::unique_ptr SimpleObjectCache::getObject(const Module *M) { return MemoryBuffer::getMemBuffer(I->second->getMemBufferRef()); } +void SimpleObjectCache::dumpToObjectFile(llvm::StringRef outputFilename) { + // Set up the output file. + std::string errorMessage; + auto file = openOutputFile(outputFilename, &errorMessage); + if (!file) { + llvm::errs() << errorMessage << "\n"; + return; + } + + // Dump the object generated for a single module to the output file. + assert(cachedObjects.size() == 1 && "Expected only one object entry."); + auto &cachedObject = cachedObjects.begin()->second; + file->os() << cachedObject->getBuffer(); + file->keep(); +} + +void ExecutionEngine::dumpToObjectFile(llvm::StringRef filename) { + cache->dumpToObjectFile(filename); +} + // Setup LLVM target triple from the current machine. bool ExecutionEngine::setupTargetTriple(Module *llvmModule) { // Setup the machine properties from the current architecture. @@ -168,11 +190,13 @@ void packFunctionArguments(Module *module) { } } -Expected> -ExecutionEngine::create(ModuleOp m, - std::function transformer, - ArrayRef sharedLibPaths) { - auto engine = std::make_unique(); +ExecutionEngine::ExecutionEngine(bool enableObjectCache) + : cache(enableObjectCache ? nullptr : new SimpleObjectCache()) {} + +Expected> ExecutionEngine::create( + ModuleOp m, std::function transformer, + ArrayRef sharedLibPaths, bool enableObjectCache) { + auto engine = std::make_unique(enableObjectCache); std::unique_ptr ctx(new llvm::LLVMContext); auto llvmModule = translateModuleToLLVMIR(m); @@ -280,5 +304,4 @@ Error ExecutionEngine::invoke(StringRef name, MutableArrayRef args) { return Error::success(); } - } // end namespace mlir diff --git a/mlir/lib/Support/JitRunner.cpp b/mlir/lib/Support/JitRunner.cpp index 40f1292..c79a55f 100644 --- a/mlir/lib/Support/JitRunner.cpp +++ b/mlir/lib/Support/JitRunner.cpp @@ -95,6 +95,16 @@ static llvm::cl::list llvm::cl::ZeroOrMore, llvm::cl::MiscFlags::CommaSeparated, llvm::cl::cat(clOptionsCategory)); +// CLI variables for debugging. +static llvm::cl::opt dumpObjectFile( + "dump-object-file", + llvm::cl::desc("Dump JITted-compiled object to file specified with " + "-object-filename (.o by default).")); + +static llvm::cl::opt objectFilename( + "object-filename", + llvm::cl::desc("Dump JITted-compiled object to file .o")); + static OwningModuleRef parseMLIRInput(StringRef inputFilename, MLIRContext *context) { // Set up the input file. @@ -181,6 +191,11 @@ compileAndExecute(ModuleOp module, StringRef entryPoint, auto expectedFPtr = engine->lookup(entryPoint); if (!expectedFPtr) return expectedFPtr.takeError(); + + if (dumpObjectFile) + engine->dumpToObjectFile(objectFilename.empty() ? inputFilename + ".o" + : objectFilename); + void (*fptr)(void **) = *expectedFPtr; (*fptr)(args); diff --git a/mlir/test/mlir-cpu-runner/simple.mlir b/mlir/test/mlir-cpu-runner/simple.mlir index 5fcbd22..6f8d0db 100644 --- a/mlir/test/mlir-cpu-runner/simple.mlir +++ b/mlir/test/mlir-cpu-runner/simple.mlir @@ -4,6 +4,15 @@ // RUN: mlir-cpu-runner %s -O3 -loop-distribute -loop-vectorize | FileCheck %s // RUN: mlir-cpu-runner %s -loop-distribute -loop-vectorize | FileCheck %s +// RUN: cp %s %t +// RUN: mlir-cpu-runner %t -dump-object-file | FileCheck %t +// RUN: ls %t.o +// RUN: rm %t.o + +// RUN: mlir-cpu-runner %s -dump-object-file -object-filename=%T/test.o | FileCheck %s +// RUN: ls %T/test.o +// RUN: rm %T/test.o + func @fabsf(f32) -> f32 func @main(%a : memref<2xf32>, %b : memref<1xf32>) { -- 2.7.4