// Create an MLIR execution engine. The execution engine eagerly JIT-compiles
// the module.
- auto maybeEngine = mlir::ExecutionEngine::create(
- module, /*llvmModuleBuilder=*/nullptr, optPipeline);
+ mlir::ExecutionEngineOptions engineOptions;
+ engineOptions.transformer = optPipeline;
+ auto maybeEngine = mlir::ExecutionEngine::create(module, engineOptions);
assert(maybeEngine && "failed to construct an execution engine");
auto &engine = maybeEngine.get();
// Create an MLIR execution engine. The execution engine eagerly JIT-compiles
// the module.
- auto maybeEngine = mlir::ExecutionEngine::create(
- module, /*llvmModuleBuilder=*/nullptr, optPipeline);
+ mlir::ExecutionEngineOptions engineOptions;
+ engineOptions.transformer = optPipeline;
+ auto maybeEngine = mlir::ExecutionEngine::create(module, engineOptions);
assert(maybeEngine && "failed to construct an execution engine");
auto &engine = maybeEngine.get();
llvm::StringMap<std::unique_ptr<llvm::MemoryBuffer>> cachedObjects;
};
+struct ExecutionEngineOptions {
+ /// If `llvmModuleBuilder` is provided, it will be used to create LLVM module
+ /// from the given MLIR module. Otherwise, a default `translateModuleToLLVMIR`
+ /// function will be used to translate MLIR module to LLVM IR.
+ llvm::function_ref<std::unique_ptr<llvm::Module>(ModuleOp,
+ llvm::LLVMContext &)>
+ llvmModuleBuilder = nullptr;
+
+ /// If `transformer` is provided, it will be called on the LLVM module during
+ /// JIT-compilation and can be used, e.g., for reporting or optimization.
+ llvm::function_ref<llvm::Error(llvm::Module *)> transformer = {};
+
+ /// `jitCodeGenOptLevel`, when provided, is used as the optimization level for
+ /// target code generation.
+ Optional<llvm::CodeGenOpt::Level> jitCodeGenOptLevel = llvm::None;
+
+ /// If `sharedLibPaths` are provided, the underlying JIT-compilation will
+ /// open and link the shared libraries for symbol resolution.
+ ArrayRef<StringRef> sharedLibPaths = {};
+
+ /// If `enableObjectCache` is set, the JIT compiler will create one to store
+ /// the object generated for the given module.
+ bool enableObjectCache = true;
+
+ /// If enable `enableGDBNotificationListener` is set, the JIT compiler will
+ /// notify the llvm's global GDB notification listener.
+ bool enableGDBNotificationListener = true;
+
+ /// If `enablePerfNotificationListener` is set, the JIT compiler will notify
+ /// the llvm's global Perf notification listener.
+ bool enablePerfNotificationListener = true;
+};
+
/// JIT-backed execution engine for MLIR modules. Assumes the module can be
/// converted to LLVM IR. For each function, creates a wrapper function with
/// the fixed interface
bool enablePerfNotificationListener);
/// Creates an execution engine for the given module.
- ///
- /// If `llvmModuleBuilder` is provided, it will be used to create LLVM module
- /// from the given MLIR module. Otherwise, a default `translateModuleToLLVMIR`
- /// function will be used to translate MLIR module to LLVM IR.
- ///
- /// If `transformer` is provided, it will be called on the LLVM module during
- /// JIT-compilation and can be used, e.g., for reporting or optimization.
- ///
- /// `jitCodeGenOptLevel`, when provided, is used as the optimization level for
- /// target code generation.
- ///
- /// If `sharedLibPaths` are provided, the underlying JIT-compilation will
- /// open and link the shared libraries for symbol resolution.
- ///
- /// If `enableObjectCache` is set, the JIT compiler will create one to store
- /// the object generated for the given module.
- ///
- /// If enable `enableGDBNotificationListener` is set, the JIT compiler will
- /// notify the llvm's global GDB notification listener.
- ///
- /// If `enablePerfNotificationListener` is set, the JIT compiler will notify
- /// the llvm's global Perf notification listener.
static llvm::Expected<std::unique_ptr<ExecutionEngine>>
- create(ModuleOp m,
- llvm::function_ref<std::unique_ptr<llvm::Module>(ModuleOp,
- llvm::LLVMContext &)>
- llvmModuleBuilder = nullptr,
- llvm::function_ref<llvm::Error(llvm::Module *)> transformer = {},
- Optional<llvm::CodeGenOpt::Level> jitCodeGenOptLevel = llvm::None,
- ArrayRef<StringRef> sharedLibPaths = {}, bool enableObjectCache = true,
- bool enableGDBNotificationListener = true,
- bool enablePerfNotificationListener = true);
+ create(ModuleOp m, const ExecutionEngineOptions &options = {});
/// Looks up a packed-argument function wrapping the function with the given
/// name and returns a pointer to it. Propagates errors in case of failure.
auto llvmOptLevel = static_cast<llvm::CodeGenOpt::Level>(optLevel);
auto transformer = mlir::makeLLVMPassesTransformer(
/*passes=*/{}, llvmOptLevel, /*targetMachine=*/tmOrError->get());
- auto jitOrError =
- ExecutionEngine::create(unwrap(op), /*llvmModuleBuilder=*/{}, transformer,
- llvmOptLevel, libPaths);
+ ExecutionEngineOptions jitOptions;
+ jitOptions.transformer = transformer;
+ jitOptions.jitCodeGenOptLevel = llvmOptLevel;
+ jitOptions.sharedLibPaths = libPaths;
+ auto jitOrError = ExecutionEngine::create(unwrap(op), jitOptions);
if (!jitOrError) {
consumeError(jitOrError.takeError());
return MlirExecutionEngine{nullptr};
}
}
-Expected<std::unique_ptr<ExecutionEngine>> ExecutionEngine::create(
- ModuleOp m,
- llvm::function_ref<std::unique_ptr<llvm::Module>(ModuleOp,
- llvm::LLVMContext &)>
- llvmModuleBuilder,
- llvm::function_ref<Error(llvm::Module *)> transformer,
- Optional<llvm::CodeGenOpt::Level> jitCodeGenOptLevel,
- ArrayRef<StringRef> sharedLibPaths, bool enableObjectCache,
- bool enableGDBNotificationListener, bool enablePerfNotificationListener) {
+Expected<std::unique_ptr<ExecutionEngine>>
+ExecutionEngine::create(ModuleOp m, const ExecutionEngineOptions &options) {
auto engine = std::make_unique<ExecutionEngine>(
- enableObjectCache, enableGDBNotificationListener,
- enablePerfNotificationListener);
+ options.enableObjectCache, options.enableGDBNotificationListener,
+ options.enablePerfNotificationListener);
std::unique_ptr<llvm::LLVMContext> ctx(new llvm::LLVMContext);
- auto llvmModule = llvmModuleBuilder ? llvmModuleBuilder(m, *ctx)
- : translateModuleToLLVMIR(m, *ctx);
+ auto llvmModule = options.llvmModuleBuilder
+ ? options.llvmModuleBuilder(m, *ctx)
+ : translateModuleToLLVMIR(m, *ctx);
if (!llvmModule)
return makeStringError("could not convert to LLVM IR");
// FIXME: the triple should be passed to the translation or dialect conversion
}
// Resolve symbols from shared libraries.
- for (auto libPath : sharedLibPaths) {
+ for (auto libPath : options.sharedLibPaths) {
auto mb = llvm::MemoryBuffer::getFile(libPath);
if (!mb) {
errs() << "Failed to create MemoryBuffer for: " << libPath
// LLJITWithObjectCache example.
auto compileFunctionCreator = [&](JITTargetMachineBuilder jtmb)
-> Expected<std::unique_ptr<IRCompileLayer::IRCompiler>> {
- if (jitCodeGenOptLevel)
- jtmb.setCodeGenOptLevel(jitCodeGenOptLevel.getValue());
+ if (options.jitCodeGenOptLevel)
+ jtmb.setCodeGenOptLevel(options.jitCodeGenOptLevel.getValue());
auto tm = jtmb.createTargetMachine();
if (!tm)
return tm.takeError();
// Add a ThreadSafemodule to the engine and return.
ThreadSafeModule tsm(std::move(llvmModule), std::move(ctx));
- if (transformer)
+ if (options.transformer)
cantFail(tsm.withModuleDo(
- [&](llvm::Module &module) { return transformer(&module); }));
+ [&](llvm::Module &module) { return options.transformer(&module); }));
cantFail(jit->addIRModule(std::move(tsm)));
engine->jit = std::move(jit);
return symbolMap;
};
- auto expectedEngine = mlir::ExecutionEngine::create(
- module, config.llvmModuleBuilder, config.transformer, jitCodeGenOptLevel,
- executionEngineLibs);
+ mlir::ExecutionEngineOptions engineOptions;
+ engineOptions.llvmModuleBuilder = config.llvmModuleBuilder;
+ engineOptions.transformer = config.transformer;
+ engineOptions.jitCodeGenOptLevel = jitCodeGenOptLevel;
+ engineOptions.sharedLibPaths = executionEngineLibs;
+ auto expectedEngine = mlir::ExecutionEngine::create(module, engineOptions);
if (!expectedEngine)
return expectedEngine.takeError();