/// that will be loaded are specified via `numPaths` and `sharedLibPaths`
/// respectively.
/// TODO: figure out other options.
-MLIR_CAPI_EXPORTED MlirExecutionEngine
-mlirExecutionEngineCreate(MlirModule op, int optLevel, int numPaths,
- const MlirStringRef *sharedLibPaths);
+MLIR_CAPI_EXPORTED MlirExecutionEngine mlirExecutionEngineCreate(
+ MlirModule op, int optLevel, int numPaths,
+ const MlirStringRef *sharedLibPaths, bool enableObjectDump);
/// Destroy an ExecutionEngine instance.
MLIR_CAPI_EXPORTED void mlirExecutionEngineDestroy(MlirExecutionEngine jit);
/// Dump cached object to output file `filename`.
void dumpToObjectFile(StringRef filename);
+ /// Returns `true` if cache hasn't been populated yet.
+ bool isEmpty();
+
private:
llvm::StringMap<std::unique_ptr<llvm::MemoryBuffer>> cachedObjects;
};
/// If `enableObjectCache` is set, the JIT compiler will create one to store
/// the object generated for the given module. The contents of the cache can
- /// be dumped to a file via the `dumpToObjectfile` method.
- bool enableObjectCache = false;
+ /// be dumped to a file via the `dumpToObjectFile` method.
+ bool enableObjectDump = false;
/// If enable `enableGDBNotificationListener` is set, the JIT compiler will
/// notify the llvm's global GDB notification listener.
/// be used to invoke the JIT-compiled function.
class ExecutionEngine {
public:
- ExecutionEngine(bool enableObjectCache, bool enableGDBNotificationListener,
+ ExecutionEngine(bool enableObjectDump, bool enableGDBNotificationListener,
bool enablePerfNotificationListener);
/// Creates an execution engine for the given MLIR IR.
/// Underlying cache.
std::unique_ptr<SimpleObjectCache> cache;
+ /// Names of functions that may be looked up.
+ std::vector<std::string> functionNames;
+
/// GDB notification listener.
llvm::JITEventListener *gdbListener;
//----------------------------------------------------------------------------
py::class_<PyExecutionEngine>(m, "ExecutionEngine", py::module_local())
.def(py::init<>([](MlirModule module, int optLevel,
- const std::vector<std::string> &sharedLibPaths) {
+ const std::vector<std::string> &sharedLibPaths,
+ bool enableObjectDump) {
llvm::SmallVector<MlirStringRef, 4> libPaths;
for (const std::string &path : sharedLibPaths)
libPaths.push_back({path.c_str(), path.length()});
- MlirExecutionEngine executionEngine = mlirExecutionEngineCreate(
- module, optLevel, libPaths.size(), libPaths.data());
+ MlirExecutionEngine executionEngine =
+ mlirExecutionEngineCreate(module, optLevel, libPaths.size(),
+ libPaths.data(), enableObjectDump);
if (mlirExecutionEngineIsNull(executionEngine))
throw std::runtime_error(
"Failure while creating the ExecutionEngine.");
}),
py::arg("module"), py::arg("opt_level") = 2,
py::arg("shared_libs") = py::list(),
+ py::arg("enable_object_dump") = true,
"Create a new ExecutionEngine instance for the given Module. The "
"module must contain only dialects that can be translated to LLVM. "
"Perform transformations and code generation at the optimization "
extern "C" MlirExecutionEngine
mlirExecutionEngineCreate(MlirModule op, int optLevel, int numPaths,
- const MlirStringRef *sharedLibPaths) {
+ const MlirStringRef *sharedLibPaths,
+ bool enableObjectDump) {
static bool initOnce = [] {
llvm::InitializeNativeTarget();
llvm::InitializeNativeTargetAsmParser(); // needed for inline_asm
jitOptions.transformer = transformer;
jitOptions.jitCodeGenOptLevel = llvmOptLevel;
jitOptions.sharedLibPaths = libPaths;
+ jitOptions.enableObjectDump = enableObjectDump;
auto jitOrError = ExecutionEngine::create(unwrap(op), jitOptions);
if (!jitOrError) {
consumeError(jitOrError.takeError());
file->keep();
}
+bool SimpleObjectCache::isEmpty() { return cachedObjects.size() == 0; }
+
void ExecutionEngine::dumpToObjectFile(StringRef filename) {
if (cache == nullptr) {
llvm::errs() << "cannot dump ExecutionEngine object code to file: "
"object cache is disabled\n";
return;
}
+ // Compilation is lazy and it doesn't populate object cache unless requested.
+ // In case object dump is requested before cache is populated, we need to
+ // force compilation manually.
+ if (cache->isEmpty()) {
+ for (std::string &functionName : functionNames) {
+ auto result = lookupPacked(functionName);
+ if (!result) {
+ llvm::errs() << "Could not compile " << functionName << ":\n "
+ << result.takeError() << "\n";
+ return;
+ }
+ }
+ }
cache->dumpToObjectFile(filename);
}
}
}
-ExecutionEngine::ExecutionEngine(bool enableObjectCache,
+ExecutionEngine::ExecutionEngine(bool enableObjectDump,
bool enableGDBNotificationListener,
bool enablePerfNotificationListener)
- : cache(enableObjectCache ? new SimpleObjectCache() : nullptr),
+ : cache(enableObjectDump ? new SimpleObjectCache() : nullptr),
+ functionNames(),
gdbListener(enableGDBNotificationListener
? llvm::JITEventListener::createGDBRegistrationListener()
: nullptr),
Expected<std::unique_ptr<ExecutionEngine>>
ExecutionEngine::create(Operation *m, const ExecutionEngineOptions &options) {
auto engine = std::make_unique<ExecutionEngine>(
- options.enableObjectCache, options.enableGDBNotificationListener,
+ options.enableObjectDump, options.enableGDBNotificationListener,
options.enablePerfNotificationListener);
+ // Remember all entry-points if object dumping is enabled.
+ if (options.enableObjectDump) {
+ for (auto funcOp : m->getRegion(0).getOps<LLVM::LLVMFuncOp>()) {
+ StringRef funcName = funcOp.getSymName();
+ engine->functionNames.push_back(funcName.str());
+ }
+ }
+
std::unique_ptr<llvm::LLVMContext> ctx(new llvm::LLVMContext);
auto llvmModule = options.llvmModuleBuilder
? options.llvmModuleBuilder(m, *ctx)
engineOptions.transformer = config.transformer;
engineOptions.jitCodeGenOptLevel = jitCodeGenOptLevel;
engineOptions.sharedLibPaths = executionEngineLibs;
- engineOptions.enableObjectCache = true;
+ engineOptions.enableObjectDump = true;
auto expectedEngine = mlir::ExecutionEngine::create(module, engineOptions);
if (!expectedEngine)
return expectedEngine.takeError();
lowerModuleToLLVM(ctx, module);
mlirRegisterAllLLVMTranslations(ctx);
MlirExecutionEngine jit = mlirExecutionEngineCreate(
- module, /*optLevel=*/2, /*numPaths=*/0, /*sharedLibPaths=*/NULL);
+ module, /*optLevel=*/2, /*numPaths=*/0, /*sharedLibPaths=*/NULL,
+ /*enableObjectDump=*/false);
if (mlirExecutionEngineIsNull(jit)) {
fprintf(stderr, "Execution engine creation failed");
exit(2);
# RUN: %PYTHON %s 2>&1 | FileCheck %s
# REQUIRES: host-supports-jit
-import gc, sys
+import gc, sys, os, tempfile
from mlir.ir import *
from mlir.passmanager import *
from mlir.execution_engine import *
run(testNanoTime)
+
+
+# Test that nano time clock is available.
+# CHECK-LABEL: TEST: testDumpToObjectFile
+def testDumpToObjectFile():
+ _, object_path = tempfile.mkstemp(suffix=".o")
+
+ try:
+ with Context():
+ module = Module.parse("""
+ module {
+ func.func @main() attributes { llvm.emit_c_interface } {
+ return
+ }
+ }""")
+
+ execution_engine = ExecutionEngine(
+ lowerToLLVM(module),
+ opt_level=3)
+
+ # CHECK: Object file exists: True
+ print(f"Object file exists: {os.path.exists(object_path)}")
+ # CHECK: Object file is empty: True
+ print(f"Object file is empty: {os.path.getsize(object_path) == 0}")
+
+ execution_engine.dump_to_object_file(object_path)
+
+ # CHECK: Object file exists: True
+ print(f"Object file exists: {os.path.exists(object_path)}")
+ # CHECK: Object file is empty: False
+ print(f"Object file is empty: {os.path.getsize(object_path) == 0}")
+
+ finally:
+ os.remove(object_path)
+
+
+run(testDumpToObjectFile)