}
/// Get the current Function
- mlir::FuncOp getFunction() {
- return getRegion().getParentOfType<mlir::FuncOp>();
+ mlir::func::FuncOp getFunction() {
+ return getRegion().getParentOfType<mlir::func::FuncOp>();
}
/// Get a reference to the kind map.
/// Get a function by name. If the function exists in the current module, it
/// is returned. Otherwise, a null FuncOp is returned.
- mlir::FuncOp getNamedFunction(llvm::StringRef name) {
+ mlir::func::FuncOp getNamedFunction(llvm::StringRef name) {
return getNamedFunction(getModule(), name);
}
- static mlir::FuncOp getNamedFunction(mlir::ModuleOp module,
- llvm::StringRef name);
+ static mlir::func::FuncOp getNamedFunction(mlir::ModuleOp module,
+ llvm::StringRef name);
/// Get a function by symbol name. The result will be null if there is no
/// function with the given symbol in the module.
- mlir::FuncOp getNamedFunction(mlir::SymbolRefAttr symbol) {
+ mlir::func::FuncOp getNamedFunction(mlir::SymbolRefAttr symbol) {
return getNamedFunction(getModule(), symbol);
}
- static mlir::FuncOp getNamedFunction(mlir::ModuleOp module,
- mlir::SymbolRefAttr symbol);
+ static mlir::func::FuncOp getNamedFunction(mlir::ModuleOp module,
+ mlir::SymbolRefAttr symbol);
fir::GlobalOp getNamedGlobal(llvm::StringRef name) {
return getNamedGlobal(getModule(), name);
/// Create a new FuncOp. If the function may have already been created, use
/// `addNamedFunction` instead.
- mlir::FuncOp createFunction(mlir::Location loc, llvm::StringRef name,
- mlir::FunctionType ty) {
+ mlir::func::FuncOp createFunction(mlir::Location loc, llvm::StringRef name,
+ mlir::FunctionType ty) {
return createFunction(loc, getModule(), name, ty);
}
- static mlir::FuncOp createFunction(mlir::Location loc, mlir::ModuleOp module,
- llvm::StringRef name,
- mlir::FunctionType ty);
+ static mlir::func::FuncOp createFunction(mlir::Location loc,
+ mlir::ModuleOp module,
+ llvm::StringRef name,
+ mlir::FunctionType ty);
/// Determine if the named function is already in the module. Return the
/// instance if found, otherwise add a new named function to the module.
- mlir::FuncOp addNamedFunction(mlir::Location loc, llvm::StringRef name,
- mlir::FunctionType ty) {
+ mlir::func::FuncOp addNamedFunction(mlir::Location loc, llvm::StringRef name,
+ mlir::FunctionType ty) {
if (auto func = getNamedFunction(name))
return func;
return createFunction(loc, name, ty);
}
- static mlir::FuncOp addNamedFunction(mlir::Location loc,
- mlir::ModuleOp module,
- llvm::StringRef name,
- mlir::FunctionType ty) {
+ static mlir::func::FuncOp addNamedFunction(mlir::Location loc,
+ mlir::ModuleOp module,
+ llvm::StringRef name,
+ mlir::FunctionType ty) {
if (auto func = getNamedFunction(module, name))
return func;
return createFunction(loc, module, name, ty);
/// Get (or generate) the MLIR FuncOp for a given runtime function. Its template
/// argument is intended to be of the form: <mkRTKey(runtime function name)>.
template <typename RuntimeEntry>
-static mlir::FuncOp getRuntimeFunc(mlir::Location loc,
- fir::FirOpBuilder &builder) {
+static mlir::func::FuncOp getRuntimeFunc(mlir::Location loc,
+ fir::FirOpBuilder &builder) {
using namespace Fortran::runtime;
auto name = RuntimeEntry::name;
auto func = builder.getNamedFunction(name);
let hasCustomAssemblyFormat = 1;
let builders = [
- OpBuilder<(ins "mlir::FuncOp":$callee,
+ OpBuilder<(ins "mlir::func::FuncOp":$callee,
CArg<"mlir::ValueRange", "{}">:$operands)>,
OpBuilder<(ins "mlir::SymbolRefAttr":$callee,
"llvm::ArrayRef<mlir::Type>":$results,
///
/// If `module` already contains FuncOp `name`, it is returned. Otherwise, a new
/// FuncOp is created, and that new FuncOp is returned.
-mlir::FuncOp createFuncOp(mlir::Location loc, mlir::ModuleOp module,
- llvm::StringRef name, mlir::FunctionType type,
- llvm::ArrayRef<mlir::NamedAttribute> attrs = {});
+mlir::func::FuncOp
+createFuncOp(mlir::Location loc, mlir::ModuleOp module, llvm::StringRef name,
+ mlir::FunctionType type,
+ llvm::ArrayRef<mlir::NamedAttribute> attrs = {});
/// Get or create a GlobalOp in a module.
fir::GlobalOp createGlobalOp(mlir::Location loc, mlir::ModuleOp module,
/// Does the function, \p func, have a host-associations tuple argument?
/// Some internal procedures may have access to host procedure variables.
-bool hasHostAssociationArgument(mlir::FuncOp func);
+bool hasHostAssociationArgument(mlir::func::FuncOp func);
/// Tell if \p value is:
/// - a function argument that has attribute \p attributeName
/// Scan the arguments of a FuncOp to determine if any arguments have the
/// attribute `attr` placed on them. This can be used to determine if the
/// function has any host associations, for example.
-bool anyFuncArgsHaveAttr(mlir::FuncOp func, llvm::StringRef attr);
+bool anyFuncArgsHaveAttr(mlir::func::FuncOp func, llvm::StringRef attr);
} // namespace fir
include "mlir/Pass/PassBase.td"
-def AbstractResultOpt : Pass<"abstract-result-opt", "mlir::FuncOp"> {
+def AbstractResultOpt : Pass<"abstract-result-opt", "mlir::func::FuncOp"> {
let summary = "Convert fir.array, fir.box and fir.rec function result to "
"function argument";
let description = [{
];
}
-def AffineDialectPromotion : Pass<"promote-to-affine", "::mlir::FuncOp"> {
+def AffineDialectPromotion : Pass<"promote-to-affine", "::mlir::func::FuncOp"> {
let summary = "Promotes `fir.{do_loop,if}` to `affine.{for,if}`.";
let description = [{
Convert fir operations which satisfy affine constraints to the affine
];
}
-def AffineDialectDemotion : Pass<"demote-affine", "::mlir::FuncOp"> {
+def AffineDialectDemotion : Pass<"demote-affine", "::mlir::func::FuncOp"> {
let summary = "Converts `affine.{load,store}` back to fir operations";
let description = [{
Affine dialect's default lowering for loads and stores is different from
let dependentDialects = [ "fir::FIROpsDialect" ];
}
-def ArrayValueCopy : Pass<"array-value-copy", "::mlir::FuncOp"> {
+def ArrayValueCopy : Pass<"array-value-copy", "::mlir::func::FuncOp"> {
let summary = "Convert array value operations to memory operations.";
let description = [{
Transform the set of array value primitives to a memory-based array
];
}
-def CFGConversion : Pass<"cfg-conversion", "::mlir::FuncOp"> {
+def CFGConversion : Pass<"cfg-conversion", "::mlir::func::FuncOp"> {
let summary = "Convert FIR structured control flow ops to CFG ops.";
let description = [{
Transform the `fir.do_loop`, `fir.if`, and `fir.iterate_while` ops into
let constructor = "::fir::createExternalNameConversionPass()";
}
-def MemRefDataFlowOpt : Pass<"fir-memref-dataflow-opt", "::mlir::FuncOp"> {
+def MemRefDataFlowOpt : Pass<"fir-memref-dataflow-opt", "::mlir::func::FuncOp"> {
let summary =
"Perform store/load forwarding and potentially removing dead stores.";
let description = [{
];
}
-def MemoryAllocationOpt : Pass<"memory-allocation-opt", "mlir::FuncOp"> {
+def MemoryAllocationOpt : Pass<"memory-allocation-opt", "mlir::func::FuncOp"> {
let summary = "Convert stack to heap allocations and vice versa.";
let description = [{
Convert stack allocations to heap allocations and vice versa based on
}
inline void addCfgConversionPass(mlir::PassManager &pm) {
- addNestedPassConditionally<mlir::FuncOp>(
+ addNestedPassConditionally<mlir::func::FuncOp>(
pm, disableCfgConversion, fir::createFirToCfgPass);
}
inline void addAVC(mlir::PassManager &pm) {
- addNestedPassConditionally<mlir::FuncOp>(
+ addNestedPassConditionally<mlir::func::FuncOp>(
pm, disableFirAvc, fir::createArrayValueCopyPass);
}
inline void addMemoryAllocationOpt(mlir::PassManager &pm) {
- addNestedPassConditionally<mlir::FuncOp>(pm, disableFirMao, [&]() {
+ addNestedPassConditionally<mlir::func::FuncOp>(pm, disableFirMao, [&]() {
return fir::createMemoryAllocationPass(
dynamicArrayStackToHeapAllocation, arrayStackAllocationThreshold);
});
mlir::GreedyRewriteConfig config;
config.enableRegionSimplification = false;
fir::addAVC(pm);
- pm.addNestedPass<mlir::FuncOp>(fir::createCharacterConversionPass());
+ pm.addNestedPass<mlir::func::FuncOp>(fir::createCharacterConversionPass());
pm.addPass(mlir::createCanonicalizerPass(config));
pm.addPass(fir::createSimplifyRegionLitePass());
fir::addMemoryAllocationOpt(pm);
#if !defined(FLANG_EXCLUDE_CODEGEN)
inline void createDefaultFIRCodeGenPassPipeline(mlir::PassManager &pm) {
fir::addBoxedProcedurePass(pm);
- pm.addNestedPass<mlir::FuncOp>(fir::createAbstractResultOptPass());
+ pm.addNestedPass<mlir::func::FuncOp>(fir::createAbstractResultOptPass());
fir::addCodeGenRewritePass(pm);
fir::addTargetRewritePass(pm);
fir::addFIRToLLVMPass(pm);
const fir::MutableBoxValue &box,
mlir::Value dimIndex, mlir::Value lowerBound,
mlir::Value upperBound) {
- mlir::FuncOp callee =
+ mlir::func::FuncOp callee =
box.isPointer()
? fir::runtime::getRuntimeFunc<mkRTKey(PointerSetBounds)>(loc,
builder)
mlir::Location loc,
const fir::MutableBoxValue &box,
mlir::Value len) {
- mlir::FuncOp callee =
+ mlir::func::FuncOp callee =
box.isPointer()
? fir::runtime::getRuntimeFunc<mkRTKey(PointerNullifyCharacter)>(
loc, builder)
mlir::Location loc,
const fir::MutableBoxValue &box,
ErrorManager &errorManager) {
- mlir::FuncOp callee =
+ mlir::func::FuncOp callee =
box.isPointer()
? fir::runtime::getRuntimeFunc<mkRTKey(PointerAllocate)>(loc, builder)
: fir::runtime::getRuntimeFunc<mkRTKey(AllocatableAllocate)>(loc,
ErrorManager &errorManager) {
// Ensure fir.box is up-to-date before passing it to deallocate runtime.
mlir::Value boxAddress = fir::factory::getMutableIRBox(builder, loc, box);
- mlir::FuncOp callee =
+ mlir::func::FuncOp callee =
box.isPointer()
? fir::runtime::getRuntimeFunc<mkRTKey(PointerDeallocate)>(loc,
builder)
for (int entryIndex = 0, last = funit.entryPointList.size();
entryIndex < last; ++entryIndex) {
funit.setActiveEntry(entryIndex);
- // Calling CalleeInterface ctor will build a declaration mlir::FuncOp with
- // no other side effects.
+ // Calling CalleeInterface ctor will build a declaration
+ // mlir::func::FuncOp with no other side effects.
// TODO: when doing some compiler profiling on real apps, it may be worth
// to check it's better to save the CalleeInterface instead of recomputing
// it later when lowering the body. CalleeInterface ctor should be linear
return cond;
}
- mlir::FuncOp getFunc(llvm::StringRef name, mlir::FunctionType ty) {
- if (mlir::FuncOp func = builder->getNamedFunction(name)) {
+ mlir::func::FuncOp getFunc(llvm::StringRef name, mlir::FunctionType ty) {
+ if (mlir::func::FuncOp func = builder->getNamedFunction(name)) {
assert(func.getFunctionType() == ty);
return func;
}
void startNewFunction(Fortran::lower::pft::FunctionLikeUnit &funit) {
assert(!builder && "expected nullptr");
Fortran::lower::CalleeInterface callee(funit, *this);
- mlir::FuncOp func = callee.addEntryBlockAndMapArguments();
+ mlir::func::FuncOp func = callee.addEntryBlockAndMapArguments();
builder = new fir::FirOpBuilder(func, bridge.getKindMap());
assert(builder && "FirOpBuilder did not instantiate");
builder->setInsertionPointToStart(&func.front());
// FIXME: get rid of the bogus function context and instantiate the
// globals directly into the module.
mlir::MLIRContext *context = &getMLIRContext();
- mlir::FuncOp func = fir::FirOpBuilder::createFunction(
+ mlir::func::FuncOp func = fir::FirOpBuilder::createFunction(
mlir::UnknownLoc::get(context), getModuleOp(),
fir::NameUniquer::doGenerated("Sham"),
mlir::FunctionType::get(context, llvm::None, llvm::None));
return funit.isMainProgram();
}
-mlir::FuncOp Fortran::lower::CalleeInterface::addEntryBlockAndMapArguments() {
+mlir::func::FuncOp
+Fortran::lower::CalleeInterface::addEntryBlockAndMapArguments() {
// On the callee side, directly map the mlir::value argument of
// the function block to the Fortran symbols.
func.addEntryBlock();
// sides.
//===----------------------------------------------------------------------===//
-static void addSymbolAttribute(mlir::FuncOp func,
+static void addSymbolAttribute(mlir::func::FuncOp func,
const Fortran::semantics::Symbol &sym,
mlir::MLIRContext &mlirContext) {
// Only add this on bind(C) functions for which the symbol is not reflected in
}
/// Declare drives the different actions to be performed while analyzing the
-/// signature and building/finding the mlir::FuncOp.
+/// signature and building/finding the mlir::func::FuncOp.
template <typename T>
void Fortran::lower::CallInterface<T>::declare() {
if (!side().isMainProgram()) {
}
}
-/// Once the signature has been analyzed and the mlir::FuncOp was built/found,
-/// map the fir inputs to Fortran entities (the symbols or expressions).
+/// Once the signature has been analyzed and the mlir::func::FuncOp was
+/// built/found, map the fir inputs to Fortran entities (the symbols or
+/// expressions).
template <typename T>
void Fortran::lower::CallInterface<T>::mapPassedEntities() {
// map back fir inputs to passed entities
.getFunctionType();
}
-mlir::FuncOp Fortran::lower::getOrDeclareFunction(
+mlir::func::FuncOp Fortran::lower::getOrDeclareFunction(
llvm::StringRef name, const Fortran::evaluate::ProcedureDesignator &proc,
Fortran::lower::AbstractConverter &converter) {
mlir::ModuleOp module = converter.getModuleOp();
- mlir::FuncOp func = fir::FirOpBuilder::getNamedFunction(module, name);
+ mlir::func::FuncOp func = fir::FirOpBuilder::getNamedFunction(module, name);
if (func)
return func;
mlir::FunctionType ty = SignatureBuilder{characteristics.value(), converter,
/*forceImplicit=*/false}
.getFunctionType();
- mlir::FuncOp newFunc =
+ mlir::func::FuncOp newFunc =
fir::FirOpBuilder::createFunction(loc, module, name, ty);
addSymbolAttribute(newFunc, *symbol, converter.getMLIRContext());
return newFunc;
mlir::Type getSomeKindInteger() { return builder.getIndexType(); }
- mlir::FuncOp getFunction(llvm::StringRef name, mlir::FunctionType funTy) {
- if (mlir::FuncOp func = builder.getNamedFunction(name))
+ mlir::func::FuncOp getFunction(llvm::StringRef name,
+ mlir::FunctionType funTy) {
+ if (mlir::func::FuncOp func = builder.getNamedFunction(name))
return func;
return builder.createFunction(getLoc(), name, funTy);
}
fir::factory::extractCharacterProcedureTuple(builder, loc, funcPtr);
} else {
std::string name = converter.mangleName(*symbol);
- mlir::FuncOp func =
+ mlir::func::FuncOp func =
Fortran::lower::getOrDeclareFunction(name, proc, converter);
funcPtr = builder.create<fir::AddrOfOp>(loc, func.getFunctionType(),
builder.getSymbolRefAttr(name));
// Find the argument that corresponds to the host associations.
// Verify some assumptions about how the signature was built here.
- [[maybe_unused]] static unsigned findHostAssocTuplePos(mlir::FuncOp fn) {
+ [[maybe_unused]] static unsigned
+ findHostAssocTuplePos(mlir::func::FuncOp fn) {
// Scan the argument list from last to first as the host associations are
// appended for now.
for (unsigned i = fn.getNumArguments(); i > 0; --i)
// different view of what the function signature is in different locations.
// Casts are inserted as needed below to accommodate this.
- // The mlir::FuncOp type prevails, unless it has a different number of
+ // The mlir::func::FuncOp type prevails, unless it has a different number of
// arguments which can happen in legal program if it was passed as a dummy
// procedure argument earlier with no further type information.
mlir::SymbolRefAttr funcSymbolAttr;
/// Helper to generate calls to scalar user defined assignment procedures.
static void genScalarUserDefinedAssignmentCall(fir::FirOpBuilder &builder,
mlir::Location loc,
- mlir::FuncOp func,
+ mlir::func::FuncOp func,
const fir::ExtendedValue &lhs,
const fir::ExtendedValue &rhs) {
auto prepareUserDefinedArg =
const auto *rhs = procRef.arguments()[1].value().UnwrapExpr();
assert(lhs && rhs &&
"user defined assignment arguments must be expressions");
- mlir::FuncOp func =
+ mlir::func::FuncOp func =
Fortran::lower::CallerInterface(procRef, converter).getFuncOp();
ael.lowerElementalUserAssignment(func, *lhs, *rhs);
}
- void lowerElementalUserAssignment(mlir::FuncOp userAssignment,
+ void lowerElementalUserAssignment(mlir::func::FuncOp userAssignment,
const Fortran::lower::SomeExpr &lhs,
const Fortran::lower::SomeExpr &rhs) {
mlir::Location loc = getLoc();
Fortran::lower::AbstractConverter &converter,
Fortran::lower::SymMap &symMap, Fortran::lower::StatementContext &stmtCtx,
Fortran::lower::ExplicitIterSpace &explicitIterSpace,
- mlir::FuncOp userAssignmentFunction, const Fortran::lower::SomeExpr &lhs,
+ mlir::func::FuncOp userAssignmentFunction,
+ const Fortran::lower::SomeExpr &lhs,
const Fortran::lower::SomeExpr &rhs) {
Fortran::lower::ImplicitIterSpace implicit;
ArrayExprLowering ael(converter, stmtCtx, symMap,
return ael.lowerScalarUserAssignment(userAssignmentFunction, lhs, rhs);
}
- ExtValue lowerScalarUserAssignment(mlir::FuncOp userAssignment,
+ ExtValue lowerScalarUserAssignment(mlir::func::FuncOp userAssignment,
const Fortran::lower::SomeExpr &lhs,
const Fortran::lower::SomeExpr &rhs) {
mlir::Location loc = getLoc();
/// Create a call to the LLVM memcpy intrinsic.
void createCallMemcpy(llvm::ArrayRef<mlir::Value> args) {
mlir::Location loc = getLoc();
- mlir::FuncOp memcpyFunc = fir::factory::getLlvmMemcpy(builder);
+ mlir::func::FuncOp memcpyFunc = fir::factory::getLlvmMemcpy(builder);
mlir::SymbolRefAttr funcSymAttr =
builder.getSymbolRefAttr(memcpyFunc.getName());
mlir::FunctionType funcTy = memcpyFunc.getFunctionType();
mlir::Value bufferSize, mlir::Value buffSize,
mlir::Value eleSz) {
mlir::Location loc = getLoc();
- mlir::FuncOp reallocFunc = fir::factory::getRealloc(builder);
+ mlir::func::FuncOp reallocFunc = fir::factory::getRealloc(builder);
auto cond = builder.create<mlir::arith::CmpIOp>(
loc, mlir::arith::CmpIPredicate::sle, bufferSize, needed);
auto ifOp = builder.create<fir::IfOp>(loc, mem.getType(), cond,
call);
} else if (explicitIterSpace.isActive() && lhs->Rank() == 0) {
// Scalar defined assignment (elemental or not) in a FORALL context.
- mlir::FuncOp func =
+ mlir::func::FuncOp func =
Fortran::lower::CallerInterface(call, converter).getFuncOp();
ArrayExprLowering::lowerScalarUserAssignment(
converter, symMap, stmtCtx, explicitIterSpace, func, *lhs, *rhs);
mlir::Type argTy = getArgumentType(converter);
mlir::TupleType tupTy = unwrapTupleTy(argTy);
mlir::Location loc = converter.getCurrentLocation();
- mlir::FuncOp func = builder.getFunction();
+ mlir::func::FuncOp func = builder.getFunction();
mlir::Value tupleArg;
for (auto [ty, arg] : llvm::reverse(llvm::zip(
func.getFunctionType().getInputs(), func.front().getArguments())))
/// Get (or generate) the MLIR FuncOp for a given IO runtime function.
template <typename E>
-static mlir::FuncOp getIORuntimeFunc(mlir::Location loc,
- fir::FirOpBuilder &builder) {
+static mlir::func::FuncOp getIORuntimeFunc(mlir::Location loc,
+ fir::FirOpBuilder &builder) {
llvm::StringRef name = getName<E>();
- mlir::FuncOp func = builder.getNamedFunction(name);
+ mlir::func::FuncOp func = builder.getNamedFunction(name);
if (func)
return func;
auto funTy = getTypeModel<E>()(builder.getContext());
Fortran::lower::StatementContext &stmtCtx) {
fir::FirOpBuilder &builder = converter.getFirOpBuilder();
if (csi.ioMsgExpr) {
- mlir::FuncOp getIoMsg = getIORuntimeFunc<mkIOKey(GetIoMsg)>(loc, builder);
+ mlir::func::FuncOp getIoMsg =
+ getIORuntimeFunc<mkIOKey(GetIoMsg)>(loc, builder);
fir::ExtendedValue ioMsgVar =
converter.genExprAddr(csi.ioMsgExpr, stmtCtx, loc);
builder.create<fir::CallOp>(
builder.createConvert(loc, getIoMsg.getFunctionType().getInput(2),
fir::getLen(ioMsgVar))});
}
- mlir::FuncOp endIoStatement =
+ mlir::func::FuncOp endIoStatement =
getIORuntimeFunc<mkIOKey(EndIoStatement)>(loc, builder);
auto call = builder.create<fir::CallOp>(loc, endIoStatement,
mlir::ValueRange{cookie});
/// Generate a namelist IO call.
static void genNamelistIO(Fortran::lower::AbstractConverter &converter,
- mlir::Value cookie, mlir::FuncOp funcOp,
+ mlir::Value cookie, mlir::func::FuncOp funcOp,
Fortran::semantics::Symbol &symbol, bool checkResult,
mlir::Value &ok,
Fortran::lower::StatementContext &stmtCtx) {
}
/// Get the output function to call for a value of the given type.
-static mlir::FuncOp getOutputFunc(mlir::Location loc,
- fir::FirOpBuilder &builder, mlir::Type type,
- bool isFormatted) {
+static mlir::func::FuncOp getOutputFunc(mlir::Location loc,
+ fir::FirOpBuilder &builder,
+ mlir::Type type, bool isFormatted) {
if (!isFormatted)
return getIORuntimeFunc<mkIOKey(OutputDescriptor)>(loc, builder);
if (auto ty = type.dyn_cast<mlir::IntegerType>()) {
if (!expr)
fir::emitFatalError(loc, "internal error: could not get evaluate::Expr");
mlir::Type itemTy = converter.genType(*expr);
- mlir::FuncOp outputFunc = getOutputFunc(loc, builder, itemTy, isFormatted);
+ mlir::func::FuncOp outputFunc =
+ getOutputFunc(loc, builder, itemTy, isFormatted);
mlir::Type argType = outputFunc.getFunctionType().getInput(1);
assert((isFormatted || argType.isa<fir::BoxType>()) &&
"expect descriptor for unformatted IO runtime");
}
/// Get the input function to call for a value of the given type.
-static mlir::FuncOp getInputFunc(mlir::Location loc, fir::FirOpBuilder &builder,
- mlir::Type type, bool isFormatted) {
+static mlir::func::FuncOp getInputFunc(mlir::Location loc,
+ fir::FirOpBuilder &builder,
+ mlir::Type type, bool isFormatted) {
if (!isFormatted)
return getIORuntimeFunc<mkIOKey(InputDescriptor)>(loc, builder);
if (auto ty = type.dyn_cast<mlir::IntegerType>())
static mlir::Value createIoRuntimeCallForItem(mlir::Location loc,
fir::FirOpBuilder &builder,
- mlir::FuncOp inputFunc,
+ mlir::func::FuncOp inputFunc,
mlir::Value cookie,
const fir::ExtendedValue &item) {
mlir::Type argType = inputFunc.getFunctionType().getInput(1);
if (Fortran::evaluate::HasVectorSubscript(*expr)) {
auto vectorSubscriptBox =
Fortran::lower::genVectorSubscriptBox(loc, converter, stmtCtx, *expr);
- mlir::FuncOp inputFunc = getInputFunc(
+ mlir::func::FuncOp inputFunc = getInputFunc(
loc, builder, vectorSubscriptBox.getElementType(), isFormatted);
const bool mustBox =
inputFunc.getFunctionType().getInput(1).isa<fir::BoxType>();
continue;
}
mlir::Type itemTy = converter.genType(*expr);
- mlir::FuncOp inputFunc = getInputFunc(loc, builder, itemTy, isFormatted);
+ mlir::func::FuncOp inputFunc =
+ getInputFunc(loc, builder, itemTy, isFormatted);
auto itemExv = inputFunc.getFunctionType().getInput(1).isa<fir::BoxType>()
? converter.genExprBox(*expr, stmtCtx, loc)
: converter.genExprAddr(expr, stmtCtx, loc);
const B &spec) {
Fortran::lower::StatementContext localStatementCtx;
fir::FirOpBuilder &builder = converter.getFirOpBuilder();
- mlir::FuncOp ioFunc = getIORuntimeFunc<A>(loc, builder);
+ mlir::func::FuncOp ioFunc = getIORuntimeFunc<A>(loc, builder);
mlir::FunctionType ioFuncTy = ioFunc.getFunctionType();
mlir::Value expr = fir::getBase(converter.genExprValue(
Fortran::semantics::GetExpr(spec.v), localStatementCtx, loc));
const B &spec) {
Fortran::lower::StatementContext localStatementCtx;
fir::FirOpBuilder &builder = converter.getFirOpBuilder();
- mlir::FuncOp ioFunc = getIORuntimeFunc<A>(loc, builder);
+ mlir::func::FuncOp ioFunc = getIORuntimeFunc<A>(loc, builder);
mlir::FunctionType ioFuncTy = ioFunc.getFunctionType();
std::tuple<mlir::Value, mlir::Value, mlir::Value> tup =
lowerStringLit(converter, loc, localStatementCtx, spec,
Fortran::lower::StatementContext localStatementCtx;
fir::FirOpBuilder &builder = converter.getFirOpBuilder();
// has an extra KIND argument
- mlir::FuncOp ioFunc = getIORuntimeFunc<mkIOKey(SetFile)>(loc, builder);
+ mlir::func::FuncOp ioFunc = getIORuntimeFunc<mkIOKey(SetFile)>(loc, builder);
mlir::FunctionType ioFuncTy = ioFunc.getFunctionType();
std::tuple<mlir::Value, mlir::Value, mlir::Value> tup =
lowerStringLit(converter, loc, localStatementCtx, spec,
Fortran::lower::AbstractConverter &converter, mlir::Location loc,
mlir::Value cookie, const Fortran::parser::ConnectSpec::CharExpr &spec) {
fir::FirOpBuilder &builder = converter.getFirOpBuilder();
- mlir::FuncOp ioFunc;
+ mlir::func::FuncOp ioFunc;
switch (std::get<Fortran::parser::ConnectSpec::CharExpr::Kind>(spec.t)) {
case Fortran::parser::ConnectSpec::CharExpr::Kind::Access:
ioFunc = getIORuntimeFunc<mkIOKey(SetAccess)>(loc, builder);
Fortran::lower::AbstractConverter &converter, mlir::Location loc,
mlir::Value cookie, const Fortran::parser::IoControlSpec::CharExpr &spec) {
fir::FirOpBuilder &builder = converter.getFirOpBuilder();
- mlir::FuncOp ioFunc;
+ mlir::func::FuncOp ioFunc;
switch (std::get<Fortran::parser::IoControlSpec::CharExpr::Kind>(spec.t)) {
case Fortran::parser::IoControlSpec::CharExpr::Kind::Advance:
ioFunc = getIORuntimeFunc<mkIOKey(SetAdvance)>(loc, builder);
std::get_if<Fortran::parser::IoControlSpec::Size>(&spec.u)) {
fir::FirOpBuilder &builder = converter.getFirOpBuilder();
- mlir::FuncOp ioFunc = getIORuntimeFunc<mkIOKey(GetSize)>(loc, builder);
+ mlir::func::FuncOp ioFunc =
+ getIORuntimeFunc<mkIOKey(GetSize)>(loc, builder);
auto sizeValue =
builder.create<fir::CallOp>(loc, ioFunc, mlir::ValueRange{cookie})
.getResult(0);
if (!csi.hasAnyConditionSpec())
return;
fir::FirOpBuilder &builder = converter.getFirOpBuilder();
- mlir::FuncOp enableHandlers =
+ mlir::func::FuncOp enableHandlers =
getIORuntimeFunc<mkIOKey(EnableHandlers)>(loc, builder);
mlir::Type boolType = enableHandlers.getFunctionType().getInput(1);
auto boolValue = [&](bool specifierIsPresent) {
fir::FirOpBuilder &builder = converter.getFirOpBuilder();
Fortran::lower::StatementContext stmtCtx;
mlir::Location loc = converter.getCurrentLocation();
- mlir::FuncOp beginFunc = getIORuntimeFunc<K>(loc, builder);
+ mlir::func::FuncOp beginFunc = getIORuntimeFunc<K>(loc, builder);
mlir::FunctionType beginFuncTy = beginFunc.getFunctionType();
mlir::Value unit = fir::getBase(converter.genExprValue(
getExpr<Fortran::parser::FileUnitNumber>(stmt), stmtCtx, loc));
std::get_if<Fortran::parser::ConnectSpec::Newunit>(&spec.u)) {
Fortran::lower::StatementContext stmtCtx;
fir::FirOpBuilder &builder = converter.getFirOpBuilder();
- mlir::FuncOp ioFunc = getIORuntimeFunc<mkIOKey(GetNewUnit)>(loc, builder);
+ mlir::func::FuncOp ioFunc =
+ getIORuntimeFunc<mkIOKey(GetNewUnit)>(loc, builder);
mlir::FunctionType ioFuncTy = ioFunc.getFunctionType();
const auto *var = Fortran::semantics::GetExpr(newunit->v);
mlir::Value addr = builder.createConvert(
const Fortran::parser::OpenStmt &stmt) {
fir::FirOpBuilder &builder = converter.getFirOpBuilder();
Fortran::lower::StatementContext stmtCtx;
- mlir::FuncOp beginFunc;
+ mlir::func::FuncOp beginFunc;
llvm::SmallVector<mlir::Value> beginArgs;
mlir::Location loc = converter.getCurrentLocation();
bool hasNewunitSpec = false;
Fortran::lower::StatementContext stmtCtx;
mlir::Location loc = converter.getCurrentLocation();
bool hasId = hasSpec<Fortran::parser::IdExpr>(stmt);
- mlir::FuncOp beginFunc =
+ mlir::func::FuncOp beginFunc =
hasId ? getIORuntimeFunc<mkIOKey(BeginWait)>(loc, builder)
: getIORuntimeFunc<mkIOKey(BeginWaitAll)>(loc, builder);
mlir::FunctionType beginFuncTy = beginFunc.getFunctionType();
// Get the begin data transfer IO function to call for the given values.
template <bool isInput>
-mlir::FuncOp
+mlir::func::FuncOp
getBeginDataTransferFunc(mlir::Location loc, fir::FirOpBuilder &builder,
bool isFormatted, bool isListOrNml, bool isInternal,
bool isInternalWithDesc, bool isAsync) {
const bool isNml = isDataTransferNamelist(stmt);
// Generate the begin data transfer function call.
- mlir::FuncOp ioFunc = getBeginDataTransferFunc<isInput>(
+ mlir::func::FuncOp ioFunc = getBeginDataTransferFunc<isInput>(
loc, builder, isFormatted, isList || isNml, isInternal,
isInternalWithDesc, isAsync);
llvm::SmallVector<mlir::Value> ioArgs;
Fortran::parser::InquireSpec::CharVar::Kind::Iomsg)
return {};
fir::FirOpBuilder &builder = converter.getFirOpBuilder();
- mlir::FuncOp specFunc =
+ mlir::func::FuncOp specFunc =
getIORuntimeFunc<mkIOKey(InquireCharacter)>(loc, builder);
mlir::FunctionType specFuncTy = specFunc.getFunctionType();
const auto *varExpr = Fortran::semantics::GetExpr(
Fortran::parser::InquireSpec::IntVar::Kind::Iostat)
return {};
fir::FirOpBuilder &builder = converter.getFirOpBuilder();
- mlir::FuncOp specFunc =
+ mlir::func::FuncOp specFunc =
getIORuntimeFunc<mkIOKey(InquireInteger64)>(loc, builder);
mlir::FunctionType specFuncTy = specFunc.getFunctionType();
const auto *varExpr = Fortran::semantics::GetExpr(
bool pendId =
idExpr &&
logVarKind == Fortran::parser::InquireSpec::LogVar::Kind::Pending;
- mlir::FuncOp specFunc =
+ mlir::func::FuncOp specFunc =
pendId ? getIORuntimeFunc<mkIOKey(InquirePendingId)>(loc, builder)
: getIORuntimeFunc<mkIOKey(InquireLogical)>(loc, builder);
mlir::FunctionType specFuncTy = specFunc.getFunctionType();
fir::FirOpBuilder &builder = converter.getFirOpBuilder();
Fortran::lower::StatementContext stmtCtx;
mlir::Location loc = converter.getCurrentLocation();
- mlir::FuncOp beginFunc;
+ mlir::func::FuncOp beginFunc;
ConditionSpecInfo csi;
llvm::SmallVector<mlir::Value> beginArgs;
const auto *list =
llvm::ArrayRef<fir::ExtendedValue> args);
template <typename GeneratorType>
- mlir::FuncOp getWrapper(GeneratorType, llvm::StringRef name,
- mlir::FunctionType, bool loadRefArguments = false);
+ mlir::func::FuncOp getWrapper(GeneratorType, llvm::StringRef name,
+ mlir::FunctionType,
+ bool loadRefArguments = false);
/// Generate calls to ElementalGenerator, handling the elemental aspects
template <typename GeneratorType>
bool infinite = false; // When forbidden conversion or wrong argument number
};
-/// Build mlir::FuncOp from runtime symbol description and add
+/// Build mlir::func::FuncOp from runtime symbol description and add
/// fir.runtime attribute.
-static mlir::FuncOp getFuncOp(mlir::Location loc, fir::FirOpBuilder &builder,
- const RuntimeFunction &runtime) {
- mlir::FuncOp function = builder.addNamedFunction(
+static mlir::func::FuncOp getFuncOp(mlir::Location loc,
+ fir::FirOpBuilder &builder,
+ const RuntimeFunction &runtime) {
+ mlir::func::FuncOp function = builder.addNamedFunction(
loc, runtime.symbol, runtime.typeGenerator(builder.getContext()));
function->setAttr("fir.runtime", builder.getUnitAttr());
return function;
/// Select runtime function that has the smallest distance to the intrinsic
/// function type and that will not imply narrowing arguments or extending the
/// result.
-/// If nothing is found, the mlir::FuncOp will contain a nullptr.
-mlir::FuncOp searchFunctionInLibrary(
+/// If nothing is found, the mlir::func::FuncOp will contain a nullptr.
+mlir::func::FuncOp searchFunctionInLibrary(
mlir::Location loc, fir::FirOpBuilder &builder,
const Fortran::common::StaticMultimapView<RuntimeFunction> &lib,
llvm::StringRef name, mlir::FunctionType funcType,
/// Search runtime for the best runtime function given an intrinsic name
/// and interface. The interface may not be a perfect match in which case
/// the caller is responsible to insert argument and return value conversions.
-/// If nothing is found, the mlir::FuncOp will contain a nullptr.
-static mlir::FuncOp getRuntimeFunction(mlir::Location loc,
- fir::FirOpBuilder &builder,
- llvm::StringRef name,
- mlir::FunctionType funcType) {
+/// If nothing is found, the mlir::func::FuncOp will contain a nullptr.
+static mlir::func::FuncOp getRuntimeFunction(mlir::Location loc,
+ fir::FirOpBuilder &builder,
+ llvm::StringRef name,
+ mlir::FunctionType funcType) {
const RuntimeFunction *bestNearMatch = nullptr;
FunctionDistance bestMatchDistance{};
- mlir::FuncOp match;
+ mlir::func::FuncOp match;
using RtMap = Fortran::common::StaticMultimapView<RuntimeFunction>;
static constexpr RtMap pgmathF(pgmathFast);
static_assert(pgmathF.Verify() && "map must be sorted");
// mathRuntimeVersion == llvmOnly
static constexpr RtMap llvmIntr(llvmIntrinsics);
static_assert(llvmIntr.Verify() && "map must be sorted");
- if (mlir::FuncOp exactMatch =
+ if (mlir::func::FuncOp exactMatch =
searchFunctionInLibrary(loc, builder, llvmIntr, name, funcType,
&bestNearMatch, bestMatchDistance))
return exactMatch;
}
template <typename GeneratorType>
-mlir::FuncOp IntrinsicLibrary::getWrapper(GeneratorType generator,
- llvm::StringRef name,
- mlir::FunctionType funcType,
- bool loadRefArguments) {
+mlir::func::FuncOp IntrinsicLibrary::getWrapper(GeneratorType generator,
+ llvm::StringRef name,
+ mlir::FunctionType funcType,
+ bool loadRefArguments) {
std::string wrapperName = fir::mangleIntrinsicProcedure(name, funcType);
- mlir::FuncOp function = builder.getNamedFunction(wrapperName);
+ mlir::func::FuncOp function = builder.getNamedFunction(wrapperName);
if (!function) {
// First time this wrapper is needed, build it.
function = builder.createFunction(loc, wrapperName, funcType);
}
mlir::FunctionType funcType = getFunctionType(resultType, args, builder);
- mlir::FuncOp wrapper = getWrapper(generator, name, funcType);
+ mlir::func::FuncOp wrapper = getWrapper(generator, name, funcType);
return builder.create<fir::CallOp>(loc, wrapper, args).getResult(0);
}
for (const auto &extendedVal : args)
mlirArgs.emplace_back(toValue(extendedVal, builder, loc));
mlir::FunctionType funcType = getFunctionType(resultType, mlirArgs, builder);
- mlir::FuncOp wrapper = getWrapper(generator, name, funcType);
+ mlir::func::FuncOp wrapper = getWrapper(generator, name, funcType);
auto call = builder.create<fir::CallOp>(loc, wrapper, mlirArgs);
if (resultType)
return toExtendedValue(call.getResult(0), builder, loc);
IntrinsicLibrary::RuntimeCallGenerator
IntrinsicLibrary::getRuntimeCallGenerator(llvm::StringRef name,
mlir::FunctionType soughtFuncType) {
- mlir::FuncOp funcOp = getRuntimeFunction(loc, builder, name, soughtFuncType);
+ mlir::func::FuncOp funcOp =
+ getRuntimeFunction(loc, builder, name, soughtFuncType);
if (!funcOp) {
std::string buffer("not yet implemented: missing intrinsic lowering: ");
llvm::raw_string_ostream sstream(buffer);
// So instead of duplicating the runtime, just have the wrappers loading
// this before calling the code generators.
bool loadRefArguments = true;
- mlir::FuncOp funcOp;
+ mlir::func::FuncOp funcOp;
if (const IntrinsicHandler *handler = findIntrinsicHandler(name))
funcOp = std::visit(
[&](auto generator) {
mlir::Location loc = converter.getCurrentLocation();
Fortran::lower::StatementContext stmtCtx;
llvm::SmallVector<mlir::Value> operands;
- mlir::FuncOp callee;
+ mlir::func::FuncOp callee;
mlir::FunctionType calleeType;
// First operand is stop code (zero if absent)
if (const auto &code =
Fortran::lower::AbstractConverter &converter) {
fir::FirOpBuilder &builder = converter.getFirOpBuilder();
mlir::Location loc = converter.getCurrentLocation();
- mlir::FuncOp callee =
+ mlir::func::FuncOp callee =
fir::runtime::getRuntimeFunc<mkRTKey(FailImageStatement)>(loc, builder);
builder.create<fir::CallOp>(loc, callee, llvm::None);
genUnreachable(builder, loc);
const Fortran::parser::PauseStmt &) {
fir::FirOpBuilder &builder = converter.getFirOpBuilder();
mlir::Location loc = converter.getCurrentLocation();
- mlir::FuncOp callee =
+ mlir::func::FuncOp callee =
fir::runtime::getRuntimeFunc<mkRTKey(PauseStatement)>(loc, builder);
builder.create<fir::CallOp>(loc, callee, llvm::None);
}
mlir::Location loc,
mlir::Value pointer,
mlir::Value target) {
- mlir::FuncOp func =
+ mlir::func::FuncOp func =
fir::runtime::getRuntimeFunc<mkRTKey(PointerIsAssociatedWith)>(loc,
builder);
llvm::SmallVector<mlir::Value> args = fir::runtime::createArguments(
mlir::Value Fortran::lower::genCpuTime(fir::FirOpBuilder &builder,
mlir::Location loc) {
- mlir::FuncOp func =
+ mlir::func::FuncOp func =
fir::runtime::getRuntimeFunc<mkRTKey(CpuTime)>(loc, builder);
return builder.create<fir::CallOp>(loc, func, llvm::None).getResult(0);
}
llvm::Optional<fir::CharBoxValue> time,
llvm::Optional<fir::CharBoxValue> zone,
mlir::Value values) {
- mlir::FuncOp callee =
+ mlir::func::FuncOp callee =
fir::runtime::getRuntimeFunc<mkRTKey(DateAndTime)>(loc, builder);
mlir::FunctionType funcTy = callee.getFunctionType();
mlir::Type idxTy = builder.getIndexType();
void Fortran::lower::genRandomInit(fir::FirOpBuilder &builder,
mlir::Location loc, mlir::Value repeatable,
mlir::Value imageDistinct) {
- mlir::FuncOp func =
+ mlir::func::FuncOp func =
fir::runtime::getRuntimeFunc<mkRTKey(RandomInit)>(loc, builder);
llvm::SmallVector<mlir::Value> args = fir::runtime::createArguments(
builder, loc, func.getFunctionType(), repeatable, imageDistinct);
void Fortran::lower::genRandomNumber(fir::FirOpBuilder &builder,
mlir::Location loc, mlir::Value harvest) {
- mlir::FuncOp func =
+ mlir::func::FuncOp func =
fir::runtime::getRuntimeFunc<mkRTKey(RandomNumber)>(loc, builder);
mlir::FunctionType funcTy = func.getFunctionType();
mlir::Value sourceFile = fir::factory::locationToFilename(builder, loc);
void Fortran::lower::genRandomSeed(fir::FirOpBuilder &builder,
mlir::Location loc, int argIndex,
mlir::Value argBox) {
- mlir::FuncOp func;
+ mlir::func::FuncOp func;
// argIndex is the nth (0-origin) argument in declaration order,
// or -1 if no argument is present.
switch (argIndex) {
mlir::Value resultBox, mlir::Value sourceBox,
mlir::Value moldBox) {
- mlir::FuncOp func =
+ mlir::func::FuncOp func =
fir::runtime::getRuntimeFunc<mkRTKey(Transfer)>(loc, builder);
mlir::FunctionType fTy = func.getFunctionType();
mlir::Value sourceFile = fir::factory::locationToFilename(builder, loc);
mlir::Location loc, mlir::Value resultBox,
mlir::Value sourceBox, mlir::Value moldBox,
mlir::Value size) {
- mlir::FuncOp func =
+ mlir::func::FuncOp func =
fir::runtime::getRuntimeFunc<mkRTKey(TransferSize)>(loc, builder);
mlir::FunctionType fTy = func.getFunctionType();
mlir::Value sourceFile = fir::factory::locationToFilename(builder, loc);
void Fortran::lower::genSystemClock(fir::FirOpBuilder &builder,
mlir::Location loc, mlir::Value count,
mlir::Value rate, mlir::Value max) {
- auto makeCall = [&](mlir::FuncOp func, mlir::Value arg) {
+ auto makeCall = [&](mlir::func::FuncOp func, mlir::Value arg) {
mlir::Type kindTy = func.getFunctionType().getInput(0);
int integerKind = 8;
if (auto intType =
"name"),
llvm::cl::init(32));
-mlir::FuncOp fir::FirOpBuilder::createFunction(mlir::Location loc,
- mlir::ModuleOp module,
- llvm::StringRef name,
- mlir::FunctionType ty) {
+mlir::func::FuncOp fir::FirOpBuilder::createFunction(mlir::Location loc,
+ mlir::ModuleOp module,
+ llvm::StringRef name,
+ mlir::FunctionType ty) {
return fir::createFuncOp(loc, module, name, ty);
}
-mlir::FuncOp fir::FirOpBuilder::getNamedFunction(mlir::ModuleOp modOp,
- llvm::StringRef name) {
- return modOp.lookupSymbol<mlir::FuncOp>(name);
+mlir::func::FuncOp fir::FirOpBuilder::getNamedFunction(mlir::ModuleOp modOp,
+ llvm::StringRef name) {
+ return modOp.lookupSymbol<mlir::func::FuncOp>(name);
}
-mlir::FuncOp fir::FirOpBuilder::getNamedFunction(mlir::ModuleOp modOp,
- mlir::SymbolRefAttr symbol) {
- return modOp.lookupSymbol<mlir::FuncOp>(symbol);
+mlir::func::FuncOp
+fir::FirOpBuilder::getNamedFunction(mlir::ModuleOp modOp,
+ mlir::SymbolRefAttr symbol) {
+ return modOp.lookupSymbol<mlir::func::FuncOp>(symbol);
}
fir::GlobalOp fir::FirOpBuilder::getNamedGlobal(mlir::ModuleOp modOp,
#include "flang/Optimizer/Builder/LowLevelIntrinsics.h"
#include "flang/Optimizer/Builder/FIRBuilder.h"
-mlir::FuncOp fir::factory::getLlvmMemcpy(fir::FirOpBuilder &builder) {
+mlir::func::FuncOp fir::factory::getLlvmMemcpy(fir::FirOpBuilder &builder) {
auto ptrTy = builder.getRefType(builder.getIntegerType(8));
llvm::SmallVector<mlir::Type> args = {ptrTy, ptrTy, builder.getI64Type(),
builder.getI1Type()};
"llvm.memcpy.p0i8.p0i8.i64", memcpyTy);
}
-mlir::FuncOp fir::factory::getLlvmMemmove(fir::FirOpBuilder &builder) {
+mlir::func::FuncOp fir::factory::getLlvmMemmove(fir::FirOpBuilder &builder) {
auto ptrTy = builder.getRefType(builder.getIntegerType(8));
llvm::SmallVector<mlir::Type> args = {ptrTy, ptrTy, builder.getI64Type(),
builder.getI1Type()};
"llvm.memmove.p0i8.p0i8.i64", memmoveTy);
}
-mlir::FuncOp fir::factory::getLlvmMemset(fir::FirOpBuilder &builder) {
+mlir::func::FuncOp fir::factory::getLlvmMemset(fir::FirOpBuilder &builder) {
auto ptrTy = builder.getRefType(builder.getIntegerType(8));
llvm::SmallVector<mlir::Type> args = {ptrTy, ptrTy, builder.getI64Type(),
builder.getI1Type()};
"llvm.memset.p0i8.p0i8.i64", memsetTy);
}
-mlir::FuncOp fir::factory::getRealloc(fir::FirOpBuilder &builder) {
+mlir::func::FuncOp fir::factory::getRealloc(fir::FirOpBuilder &builder) {
auto ptrTy = builder.getRefType(builder.getIntegerType(8));
llvm::SmallVector<mlir::Type> args = {ptrTy, builder.getI64Type()};
auto reallocTy = mlir::FunctionType::get(builder.getContext(), args, {ptrTy});
reallocTy);
}
-mlir::FuncOp fir::factory::getLlvmStackSave(fir::FirOpBuilder &builder) {
+mlir::func::FuncOp fir::factory::getLlvmStackSave(fir::FirOpBuilder &builder) {
auto ptrTy = builder.getRefType(builder.getIntegerType(8));
auto funcTy =
mlir::FunctionType::get(builder.getContext(), llvm::None, {ptrTy});
funcTy);
}
-mlir::FuncOp fir::factory::getLlvmStackRestore(fir::FirOpBuilder &builder) {
+mlir::func::FuncOp
+fir::factory::getLlvmStackRestore(fir::FirOpBuilder &builder) {
auto ptrTy = builder.getRefType(builder.getIntegerType(8));
auto funcTy =
mlir::FunctionType::get(builder.getContext(), {ptrTy}, llvm::None);
funcTy);
}
-mlir::FuncOp fir::factory::getLlvmInitTrampoline(fir::FirOpBuilder &builder) {
+mlir::func::FuncOp
+fir::factory::getLlvmInitTrampoline(fir::FirOpBuilder &builder) {
auto ptrTy = builder.getRefType(builder.getIntegerType(8));
auto funcTy = mlir::FunctionType::get(builder.getContext(),
{ptrTy, ptrTy, ptrTy}, llvm::None);
"llvm.init.trampoline", funcTy);
}
-mlir::FuncOp fir::factory::getLlvmAdjustTrampoline(fir::FirOpBuilder &builder) {
+mlir::func::FuncOp
+fir::factory::getLlvmAdjustTrampoline(fir::FirOpBuilder &builder) {
auto ptrTy = builder.getRefType(builder.getIntegerType(8));
auto funcTy = mlir::FunctionType::get(builder.getContext(), {ptrTy}, {ptrTy});
return builder.addNamedFunction(builder.getUnknownLoc(),
///
/// \p resultBox must be an unallocated allocatable used for the temporary
/// result. \p StringBox must be a fir.box describing the adjustr string
-/// argument. The \p adjustFunc should be a mlir::FuncOp for the appropriate
-/// runtime entry function.
+/// argument. The \p adjustFunc should be a mlir::func::FuncOp for the
+/// appropriate runtime entry function.
static void genAdjust(fir::FirOpBuilder &builder, mlir::Location loc,
mlir::Value resultBox, mlir::Value stringBox,
- mlir::FuncOp &adjustFunc) {
+ mlir::func::FuncOp &adjustFunc) {
auto fTy = adjustFunc.getFunctionType();
auto sourceLine =
mlir::arith::CmpIPredicate cmp,
mlir::Value lhsBuff, mlir::Value lhsLen,
mlir::Value rhsBuff, mlir::Value rhsLen) {
- mlir::FuncOp beginFunc;
+ mlir::func::FuncOp beginFunc;
switch (discoverKind(lhsBuff.getType())) {
case 1:
beginFunc = fir::runtime::getRuntimeFunc<mkRTKey(CharacterCompareScalar1)>(
mlir::Value stringLen,
mlir::Value substringBase,
mlir::Value substringLen, mlir::Value back) {
- mlir::FuncOp indexFunc;
+ mlir::func::FuncOp indexFunc;
switch (kind) {
case 1:
indexFunc = fir::runtime::getRuntimeFunc<mkRTKey(Index1)>(loc, builder);
mlir::Value stringBase, mlir::Value stringLen,
mlir::Value setBase, mlir::Value setLen,
mlir::Value back) {
- mlir::FuncOp func;
+ mlir::func::FuncOp func;
switch (kind) {
case 1:
func = fir::runtime::getRuntimeFunc<mkRTKey(Scan1)>(loc, builder);
mlir::Value stringBase,
mlir::Value stringLen, mlir::Value setBase,
mlir::Value setLen, mlir::Value back) {
- mlir::FuncOp func;
+ mlir::func::FuncOp func;
switch (kind) {
case 1:
func = fir::runtime::getRuntimeFunc<mkRTKey(Verify1)>(loc, builder);
mlir::Value fir::runtime::genLboundDim(fir::FirOpBuilder &builder,
mlir::Location loc, mlir::Value array,
mlir::Value dim) {
- mlir::FuncOp lboundFunc =
+ mlir::func::FuncOp lboundFunc =
fir::runtime::getRuntimeFunc<mkRTKey(LboundDim)>(loc, builder);
auto fTy = lboundFunc.getFunctionType();
auto sourceFile = fir::factory::locationToFilename(builder, loc);
void fir::runtime::genUbound(fir::FirOpBuilder &builder, mlir::Location loc,
mlir::Value resultBox, mlir::Value array,
mlir::Value kind) {
- mlir::FuncOp uboundFunc =
+ mlir::func::FuncOp uboundFunc =
fir::runtime::getRuntimeFunc<mkRTKey(Ubound)>(loc, builder);
auto fTy = uboundFunc.getFunctionType();
auto sourceFile = fir::factory::locationToFilename(builder, loc);
mlir::Value fir::runtime::genSizeDim(fir::FirOpBuilder &builder,
mlir::Location loc, mlir::Value array,
mlir::Value dim) {
- mlir::FuncOp sizeFunc =
+ mlir::func::FuncOp sizeFunc =
fir::runtime::getRuntimeFunc<mkRTKey(SizeDim)>(loc, builder);
auto fTy = sizeFunc.getFunctionType();
auto sourceFile = fir::factory::locationToFilename(builder, loc);
/// the DIM argument is absent.
mlir::Value fir::runtime::genSize(fir::FirOpBuilder &builder,
mlir::Location loc, mlir::Value array) {
- mlir::FuncOp sizeFunc =
+ mlir::func::FuncOp sizeFunc =
fir::runtime::getRuntimeFunc<mkRTKey(Size)>(loc, builder);
auto fTy = sizeFunc.getFunctionType();
auto sourceFile = fir::factory::locationToFilename(builder, loc);
mlir::Value fir::runtime::genExponent(fir::FirOpBuilder &builder,
mlir::Location loc, mlir::Type resultType,
mlir::Value x) {
- mlir::FuncOp func;
+ mlir::func::FuncOp func;
mlir::Type fltTy = x.getType();
if (fltTy.isF32()) {
/// Generate call to Fraction instrinsic runtime routine.
mlir::Value fir::runtime::genFraction(fir::FirOpBuilder &builder,
mlir::Location loc, mlir::Value x) {
- mlir::FuncOp func;
+ mlir::func::FuncOp func;
mlir::Type fltTy = x.getType();
if (fltTy.isF32())
mlir::Value fir::runtime::genNearest(fir::FirOpBuilder &builder,
mlir::Location loc, mlir::Value x,
mlir::Value s) {
- mlir::FuncOp func;
+ mlir::func::FuncOp func;
mlir::Type fltTy = x.getType();
if (fltTy.isF32())
/// Generate call to RRSpacing intrinsic runtime routine.
mlir::Value fir::runtime::genRRSpacing(fir::FirOpBuilder &builder,
mlir::Location loc, mlir::Value x) {
- mlir::FuncOp func;
+ mlir::func::FuncOp func;
mlir::Type fltTy = x.getType();
if (fltTy.isF32())
mlir::Value fir::runtime::genScale(fir::FirOpBuilder &builder,
mlir::Location loc, mlir::Value x,
mlir::Value i) {
- mlir::FuncOp func;
+ mlir::func::FuncOp func;
mlir::Type fltTy = x.getType();
if (fltTy.isF32())
mlir::Value fir::runtime::genSetExponent(fir::FirOpBuilder &builder,
mlir::Location loc, mlir::Value x,
mlir::Value i) {
- mlir::FuncOp func;
+ mlir::func::FuncOp func;
mlir::Type fltTy = x.getType();
if (fltTy.isF32())
/// Generate call to Spacing intrinsic runtime routine.
mlir::Value fir::runtime::genSpacing(fir::FirOpBuilder &builder,
mlir::Location loc, mlir::Value x) {
- mlir::FuncOp func;
+ mlir::func::FuncOp func;
mlir::Type fltTy = x.getType();
if (fltTy.isF32())
mlir::Value fir::runtime::genMaxval(fir::FirOpBuilder &builder,
mlir::Location loc, mlir::Value arrayBox,
mlir::Value maskBox) {
- mlir::FuncOp func;
+ mlir::func::FuncOp func;
auto ty = arrayBox.getType();
auto arrTy = fir::dyn_cast_ptrOrBoxEleTy(ty);
auto eleTy = arrTy.cast<fir::SequenceType>().getEleTy();
mlir::Value fir::runtime::genMinval(fir::FirOpBuilder &builder,
mlir::Location loc, mlir::Value arrayBox,
mlir::Value maskBox) {
- mlir::FuncOp func;
+ mlir::func::FuncOp func;
auto ty = arrayBox.getType();
auto arrTy = fir::dyn_cast_ptrOrBoxEleTy(ty);
auto eleTy = arrTy.cast<fir::SequenceType>().getEleTy();
mlir::Location loc, mlir::Value arrayBox,
mlir::Value maskBox,
mlir::Value resultBox) {
- mlir::FuncOp func;
+ mlir::func::FuncOp func;
auto ty = arrayBox.getType();
auto arrTy = fir::dyn_cast_ptrOrBoxEleTy(ty);
auto eleTy = arrTy.cast<fir::SequenceType>().getEleTy();
mlir::Value vectorABox,
mlir::Value vectorBBox,
mlir::Value resultBox) {
- mlir::FuncOp func;
+ mlir::func::FuncOp func;
auto ty = vectorABox.getType();
auto arrTy = fir::dyn_cast_ptrOrBoxEleTy(ty);
auto eleTy = arrTy.cast<fir::SequenceType>().getEleTy();
mlir::Value fir::runtime::genSum(fir::FirOpBuilder &builder, mlir::Location loc,
mlir::Value arrayBox, mlir::Value maskBox,
mlir::Value resultBox) {
- mlir::FuncOp func;
+ mlir::func::FuncOp func;
auto ty = arrayBox.getType();
auto arrTy = fir::dyn_cast_ptrOrBoxEleTy(ty);
auto eleTy = arrTy.cast<fir::SequenceType>().getEleTy();
void fir::runtime::genReportFatalUserError(fir::FirOpBuilder &builder,
mlir::Location loc,
llvm::StringRef message) {
- mlir::FuncOp crashFunc =
+ mlir::func::FuncOp crashFunc =
fir::runtime::getRuntimeFunc<mkRTKey(ReportFatalUserError)>(loc, builder);
mlir::FunctionType funcTy = crashFunc.getFunctionType();
mlir::Value msgVal = fir::getBase(
rewriter.replaceOpWithNewOp<ConvertOp>(
addr, typeConverter.convertType(addr.getType()), addr.getVal());
}
- } else if (auto func = mlir::dyn_cast<mlir::FuncOp>(op)) {
+ } else if (auto func = mlir::dyn_cast<mlir::func::FuncOp>(op)) {
mlir::FunctionType ty = func.getFunctionType();
if (typeConverter.needsConversion(ty)) {
rewriter.startRootUpdate(func);
FixupTy(Codes code, std::size_t index, std::size_t second = 0)
: code{code}, index{index}, second{second} {}
FixupTy(Codes code, std::size_t index,
- std::function<void(mlir::FuncOp)> &&finalizer)
+ std::function<void(mlir::func::FuncOp)> &&finalizer)
: code{code}, index{index}, finalizer{finalizer} {}
FixupTy(Codes code, std::size_t index, std::size_t second,
- std::function<void(mlir::FuncOp)> &&finalizer)
+ std::function<void(mlir::func::FuncOp)> &&finalizer)
: code{code}, index{index}, second{second}, finalizer{finalizer} {}
Codes code;
std::size_t index;
std::size_t second{};
- llvm::Optional<std::function<void(mlir::FuncOp)>> finalizer{};
+ llvm::Optional<std::function<void(mlir::func::FuncOp)>> finalizer{};
}; // namespace
/// Target-specific rewriting of the FIR. This is a prerequisite pass to code
bool sret;
if constexpr (std::is_same_v<std::decay_t<A>, fir::CallOp>) {
sret = callOp.getCallee() &&
- functionArgIsSRet(index,
- getModule().lookupSymbol<mlir::FuncOp>(
- *callOp.getCallee()));
+ functionArgIsSRet(
+ index, getModule().lookupSymbol<mlir::func::FuncOp>(
+ *callOp.getCallee()));
} else {
// TODO: dispatch case; how do we put arguments on a call?
// We cannot put both an sret and the dispatch object first.
// confirm that this is a dummy procedure and should be split.
// It cannot be used to match because attributes are not
// available in case of indirect calls.
- auto funcOp =
- module.lookupSymbol<mlir::FuncOp>(*callOp.getCallee());
+ auto funcOp = module.lookupSymbol<mlir::func::FuncOp>(
+ *callOp.getCallee());
if (funcOp &&
!funcOp.template getArgAttrOfType<mlir::UnitAttr>(
index, charProcAttr))
/// As the type signature is being changed, this must also update the
/// function itself to use any new arguments, etc.
mlir::LogicalResult convertTypes(mlir::ModuleOp mod) {
- for (auto fn : mod.getOps<mlir::FuncOp>())
+ for (auto fn : mod.getOps<mlir::func::FuncOp>())
convertSignature(fn);
return mlir::success();
}
/// Determine if the signature has host associations. The host association
/// argument may need special target specific rewriting.
- static bool hasHostAssociations(mlir::FuncOp func) {
+ static bool hasHostAssociations(mlir::func::FuncOp func) {
std::size_t end = func.getFunctionType().getInputs().size();
for (std::size_t i = 0; i < end; ++i)
if (func.getArgAttrOfType<mlir::UnitAttr>(i, getHostAssocAttrName()))
/// Rewrite the signatures and body of the `FuncOp`s in the module for
/// the immediately subsequent target code gen.
- void convertSignature(mlir::FuncOp func) {
+ void convertSignature(mlir::func::FuncOp func) {
auto funcTy = func.getFunctionType().cast<mlir::FunctionType>();
if (hasPortableSignature(funcTy) && !hasHostAssociations(func))
return;
(*fixup.finalizer)(func);
}
- inline bool functionArgIsSRet(unsigned index, mlir::FuncOp func) {
+ inline bool functionArgIsSRet(unsigned index, mlir::func::FuncOp func) {
if (auto attr = func.getArgAttrOfType<mlir::UnitAttr>(index, "llvm.sret"))
return true;
return false;
/// value to a "hidden" first argument or packing the complex into a wide
/// GPR.
template <typename A, typename B, typename C>
- void doComplexReturn(mlir::FuncOp func, A cmplx, B &newResTys, B &newInTys,
- C &fixups) {
+ void doComplexReturn(mlir::func::FuncOp func, A cmplx, B &newResTys,
+ B &newInTys, C &fixups) {
if (noComplexConversion) {
newResTys.push_back(cmplx);
return;
if (attr.isSRet()) {
unsigned argNo = newInTys.size();
fixups.emplace_back(
- FixupTy::Codes::ReturnAsStore, argNo, [=](mlir::FuncOp func) {
+ FixupTy::Codes::ReturnAsStore, argNo, [=](mlir::func::FuncOp func) {
func.setArgAttr(argNo, "llvm.sret", rewriter->getUnitAttr());
});
newInTys.push_back(argTy);
/// a temporary memory location or factoring the value into two distinct
/// arguments.
template <typename A, typename B, typename C>
- void doComplexArg(mlir::FuncOp func, A cmplx, B &newInTys, C &fixups) {
+ void doComplexArg(mlir::func::FuncOp func, A cmplx, B &newInTys, C &fixups) {
if (noComplexConversion) {
newInTys.push_back(cmplx);
return;
if (attr.isByVal()) {
if (auto align = attr.getAlignment())
fixups.emplace_back(
- FixupTy::Codes::ArgumentAsLoad, argNo, [=](mlir::FuncOp func) {
+ FixupTy::Codes::ArgumentAsLoad, argNo,
+ [=](mlir::func::FuncOp func) {
func.setArgAttr(argNo, "llvm.byval", rewriter->getUnitAttr());
func.setArgAttr(argNo, "llvm.align",
rewriter->getIntegerAttr(
});
else
fixups.emplace_back(FixupTy::Codes::ArgumentAsLoad, newInTys.size(),
- [=](mlir::FuncOp func) {
+ [=](mlir::func::FuncOp func) {
func.setArgAttr(argNo, "llvm.byval",
rewriter->getUnitAttr());
});
} else {
if (auto align = attr.getAlignment())
- fixups.emplace_back(fixupCode, argNo, index, [=](mlir::FuncOp func) {
- func.setArgAttr(
- argNo, "llvm.align",
- rewriter->getIntegerAttr(rewriter->getIntegerType(32), align));
- });
+ fixups.emplace_back(
+ fixupCode, argNo, index, [=](mlir::func::FuncOp func) {
+ func.setArgAttr(argNo, "llvm.align",
+ rewriter->getIntegerAttr(
+ rewriter->getIntegerType(32), align));
+ });
else
fixups.emplace_back(fixupCode, argNo, index);
}
}
void fir::CallOp::build(mlir::OpBuilder &builder, mlir::OperationState &result,
- mlir::FuncOp callee, mlir::ValueRange operands) {
+ mlir::func::FuncOp callee, mlir::ValueRange operands) {
result.addOperands(operands);
result.addAttribute(getCalleeAttrNameStr(), SymbolRefAttr::get(callee));
result.addTypes(callee.getFunctionType().getResults());
type.isa<fir::PointerType>();
}
-mlir::FuncOp fir::createFuncOp(mlir::Location loc, mlir::ModuleOp module,
- StringRef name, mlir::FunctionType type,
- llvm::ArrayRef<mlir::NamedAttribute> attrs) {
- if (auto f = module.lookupSymbol<mlir::FuncOp>(name))
+mlir::func::FuncOp
+fir::createFuncOp(mlir::Location loc, mlir::ModuleOp module, StringRef name,
+ mlir::FunctionType type,
+ llvm::ArrayRef<mlir::NamedAttribute> attrs) {
+ if (auto f = module.lookupSymbol<mlir::func::FuncOp>(name))
return f;
mlir::OpBuilder modBuilder(module.getBodyRegion());
modBuilder.setInsertionPointToEnd(module.getBody());
- auto result = modBuilder.create<mlir::FuncOp>(loc, name, type, attrs);
+ auto result = modBuilder.create<mlir::func::FuncOp>(loc, name, type, attrs);
result.setVisibility(mlir::SymbolTable::Visibility::Private);
return result;
}
return result;
}
-bool fir::hasHostAssociationArgument(mlir::FuncOp func) {
+bool fir::hasHostAssociationArgument(mlir::func::FuncOp func) {
if (auto allArgAttrs = func.getAllArgAttrs())
for (auto attr : allArgAttrs)
if (auto dict = attr.template dyn_cast_or_null<mlir::DictionaryAttr>())
// If this is a function argument, look in the argument attributes.
if (auto blockArg = value.dyn_cast<mlir::BlockArgument>()) {
if (blockArg.getOwner() && blockArg.getOwner()->isEntryBlock())
- if (auto funcOp =
- mlir::dyn_cast<mlir::FuncOp>(blockArg.getOwner()->getParentOp()))
+ if (auto funcOp = mlir::dyn_cast<mlir::func::FuncOp>(
+ blockArg.getOwner()->getParentOp()))
if (funcOp.getArgAttr(blockArg.getArgNumber(), attributeName))
return true;
return false;
return false;
}
-bool fir::anyFuncArgsHaveAttr(mlir::FuncOp func, llvm::StringRef attr) {
+bool fir::anyFuncArgsHaveAttr(mlir::func::FuncOp func, llvm::StringRef attr) {
for (unsigned i = 0, end = func.getNumArguments(); i < end; ++i)
if (func.getArgAttr(i, attr))
return true;
/// these analysis are used twice, first for marking operations for rewrite and
/// second when doing rewrite.
struct AffineFunctionAnalysis {
- explicit AffineFunctionAnalysis(mlir::FuncOp funcOp) {
+ explicit AffineFunctionAnalysis(mlir::func::FuncOp funcOp) {
for (fir::DoLoopOp op : funcOp.getOps<fir::DoLoopOp>())
loopAnalysisMap.try_emplace(op, op, *this);
}
call.getCallableForCallee().dyn_cast<mlir::SymbolRefAttr>()) {
auto module = op->getParentOfType<mlir::ModuleOp>();
return hasHostAssociationArgument(
- module.lookupSymbol<mlir::FuncOp>(callee));
+ module.lookupSymbol<mlir::func::FuncOp>(callee));
}
return false;
});
}
};
-struct MangleNameOnFuncOp : public mlir::OpRewritePattern<mlir::FuncOp> {
+struct MangleNameOnFuncOp : public mlir::OpRewritePattern<mlir::func::FuncOp> {
public:
using OpRewritePattern::OpRewritePattern;
mlir::LogicalResult
- matchAndRewrite(mlir::FuncOp op,
+ matchAndRewrite(mlir::func::FuncOp op,
mlir::PatternRewriter &rewriter) const override {
rewriter.startRootUpdate(op);
auto result = fir::NameUniquer::deconstruct(op.getSymName());
return true;
});
- target.addDynamicallyLegalOp<mlir::FuncOp>([](mlir::FuncOp op) {
+ target.addDynamicallyLegalOp<mlir::func::FuncOp>([](mlir::func::FuncOp op) {
return !fir::NameUniquer::needExternalNameMangling(op.getSymName());
});
class MemDataFlowOpt : public fir::MemRefDataFlowOptBase<MemDataFlowOpt> {
public:
void runOnOperation() override {
- mlir::FuncOp f = getOperation();
+ mlir::func::FuncOp f = getOperation();
auto *domInfo = &getAnalysis<mlir::DominanceInfo>();
LoadStoreForwarding<fir::LoadOp, fir::StoreOp> lsf(domInfo);
MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(ReturnAnalysis)
ReturnAnalysis(mlir::Operation *op) {
- if (auto func = mlir::dyn_cast<mlir::FuncOp>(op))
+ if (auto func = mlir::dyn_cast<mlir::func::FuncOp>(op))
for (mlir::Block &block : func)
for (mlir::Operation &i : block)
if (mlir::isa<mlir::func::ReturnOp>(i)) {
// Set up a Module with a dummy function operation inside.
// Set the insertion point in the function entry block.
mlir::ModuleOp mod = builder.create<mlir::ModuleOp>(loc);
- mlir::FuncOp func = mlir::FuncOp::create(
+ mlir::func::FuncOp func = mlir::func::FuncOp::create(
loc, "func1", builder.getFunctionType(llvm::None, llvm::None));
auto *entryBlock = func.addEntryBlock();
mod.push_back(mod);
// Set up a Module with a dummy function operation inside.
// Set the insertion point in the function entry block.
mlir::ModuleOp mod = builder.create<mlir::ModuleOp>(loc);
- mlir::FuncOp func = mlir::FuncOp::create(
+ mlir::func::FuncOp func = mlir::func::FuncOp::create(
loc, "func1", builder.getFunctionType(llvm::None, llvm::None));
auto *entryBlock = func.addEntryBlock();
mod.push_back(mod);
// Set up a Module with a dummy function operation inside.
// Set the insertion point in the function entry block.
mlir::ModuleOp mod = builder.create<mlir::ModuleOp>(loc);
- mlir::FuncOp func = mlir::FuncOp::create(
+ mlir::func::FuncOp func = mlir::func::FuncOp::create(
loc, "func1", builder.getFunctionType(llvm::None, llvm::None));
auto *entryBlock = func.addEntryBlock();
mod.push_back(mod);
// Set up a Module with a dummy function operation inside.
// Set the insertion point in the function entry block.
mlir::ModuleOp mod = builder.create<mlir::ModuleOp>(loc);
- mlir::FuncOp func = mlir::FuncOp::create(loc, "runtime_unit_tests_func",
- builder.getFunctionType(llvm::None, llvm::None));
+ mlir::func::FuncOp func =
+ mlir::func::FuncOp::create(loc, "runtime_unit_tests_func",
+ builder.getFunctionType(llvm::None, llvm::None));
auto *entryBlock = func.addEntryBlock();
mod.push_back(mod);
builder.setInsertionPointToStart(entryBlock);
```c++
// Partial bufferization passes.
pm.addPass(createTensorConstantBufferizePass());
- pm.addNestedPass<FuncOp>(createTCPBufferizePass()); // Bufferizes the downstream `tcp` dialect.
- pm.addNestedPass<FuncOp>(createSCFBufferizePass());
- pm.addNestedPass<FuncOp>(createLinalgBufferizePass());
- pm.addNestedPass<FuncOp>(createTensorBufferizePass());
+ pm.addNestedPass<func::FuncOp>(createTCPBufferizePass()); // Bufferizes the downstream `tcp` dialect.
+ pm.addNestedPass<func::FuncOp>(createSCFBufferizePass());
+ pm.addNestedPass<func::FuncOp>(createLinalgBufferizePass());
+ pm.addNestedPass<func::FuncOp>(createTensorBufferizePass());
pm.addPass(createFuncBufferizePass());
// Finalizing bufferization pass.
- pm.addNestedPass<FuncOp>(createFinalizingBufferizePass());
+ pm.addNestedPass<func::FuncOp>(createFinalizingBufferizePass());
```
Looking first at the partial bufferization passes, we see that there are a
use the argument materialization hook on the `TypeConverter`. This hook also
takes an optional `TypeConverter::SignatureConversion` parameter that applies a
custom conversion to the entry block of the region. The types of the entry block
-arguments are often tied semantically to details on the operation, e.g. FuncOp,
+arguments are often tied semantically to details on the operation, e.g. func::FuncOp,
AffineForOp, etc. To convert the signature of just the region entry block, and
not any other blocks within the region, the `applySignatureConversion` hook may
be used instead. A signature conversion, `TypeConverter::SignatureConversion`,
/// in C++. Passes defined declaratively use a cleaner mechanism for providing
/// these utilities.
struct MyFunctionPass : public PassWrapper<MyFunctionPass,
- OperationPass<FuncOp>> {
+ OperationPass<func::FuncOp>> {
void runOnOperation() override {
- // Get the current FuncOp operation being operated on.
- FuncOp f = getOperation();
+ // Get the current func::FuncOp operation being operated on.
+ func::FuncOp f = getOperation();
// Walk the operations within the function.
f.walk([](Operation *inst) {
// Nest a pass manager that operates on functions within the nested SPIRV
// module.
-OpPassManager &nestedFunctionPM = nestedModulePM.nest<FuncOp>();
+OpPassManager &nestedFunctionPM = nestedModulePM.nest<func::FuncOp>();
nestedFunctionPM.addPass(std::make_unique<MyFunctionPass>());
// Run the pass manager on the top-level module.
MyModulePass
OpPassManager<spirv::ModuleOp>
MySPIRVModulePass
- OpPassManager<FuncOp>
+ OpPassManager<func::FuncOp>
MyFunctionPass
```
These pipelines are then run over a single operation at a time. This means that,
-for example, given a series of consecutive passes on FuncOp, it will execute all
+for example, given a series of consecutive passes on func::FuncOp, it will execute all
on the first function, then all on the second function, etc. until the entire
program has been run through the passes. This provides several benefits:
}
// Create a new non-toy function, with the same region.
- auto func = rewriter.create<mlir::FuncOp>(op.getLoc(), op.getName(),
- op.getFunctionType());
+ auto func = rewriter.create<mlir::func::FuncOp>(op.getLoc(), op.getName(),
+ op.getFunctionType());
rewriter.inlineRegionBefore(op.getRegion(), func.getBody(), func.end());
rewriter.eraseOp(op);
return success();
pm.addPass(mlir::toy::createLowerToAffinePass());
// Add a few cleanups post lowering.
- mlir::OpPassManager &optPM = pm.nest<mlir::FuncOp>();
+ mlir::OpPassManager &optPM = pm.nest<mlir::func::FuncOp>();
optPM.addPass(mlir::createCanonicalizerPass());
optPM.addPass(mlir::createCSEPass());
}
// Create a new non-toy function, with the same region.
- auto func = rewriter.create<mlir::FuncOp>(op.getLoc(), op.getName(),
- op.getFunctionType());
+ auto func = rewriter.create<mlir::func::FuncOp>(op.getLoc(), op.getName(),
+ op.getFunctionType());
rewriter.inlineRegionBefore(op.getRegion(), func.getBody(), func.end());
rewriter.eraseOp(op);
return success();
pm.addPass(mlir::toy::createLowerToAffinePass());
// Add a few cleanups post lowering.
- mlir::OpPassManager &optPM = pm.nest<mlir::FuncOp>();
+ mlir::OpPassManager &optPM = pm.nest<mlir::func::FuncOp>();
optPM.addPass(mlir::createCanonicalizerPass());
optPM.addPass(mlir::createCSEPass());
}
// Create a new non-toy function, with the same region.
- auto func = rewriter.create<mlir::FuncOp>(op.getLoc(), op.getName(),
- op.getFunctionType());
+ auto func = rewriter.create<mlir::func::FuncOp>(op.getLoc(), op.getName(),
+ op.getFunctionType());
rewriter.inlineRegionBefore(op.getRegion(), func.getBody(), func.end());
rewriter.eraseOp(op);
return success();
pm.addPass(mlir::toy::createLowerToAffinePass());
// Add a few cleanups post lowering.
- mlir::OpPassManager &optPM = pm.nest<mlir::FuncOp>();
+ mlir::OpPassManager &optPM = pm.nest<mlir::func::FuncOp>();
optPM.addPass(mlir::createCanonicalizerPass());
optPM.addPass(mlir::createCSEPass());
include "mlir/Pass/PassBase.td"
-def AffineDataCopyGeneration : Pass<"affine-data-copy-generate", "FuncOp"> {
+def AffineDataCopyGeneration : Pass<"affine-data-copy-generate", "func::FuncOp"> {
let summary = "Generate explicit copying for affine memory operations";
let constructor = "mlir::createAffineDataCopyGenerationPass()";
let dependentDialects = ["memref::MemRefDialect"];
];
}
-def AffineLoopFusion : Pass<"affine-loop-fusion", "FuncOp"> {
+def AffineLoopFusion : Pass<"affine-loop-fusion", "func::FuncOp"> {
let summary = "Fuse affine loop nests";
let description = [{
This pass performs fusion of loop nests using a slicing-based approach. It
}
def AffineLoopInvariantCodeMotion
- : Pass<"affine-loop-invariant-code-motion", "FuncOp"> {
+ : Pass<"affine-loop-invariant-code-motion", "func::FuncOp"> {
let summary = "Hoist loop invariant instructions outside of affine loops";
let constructor = "mlir::createAffineLoopInvariantCodeMotionPass()";
}
-def AffineLoopTiling : Pass<"affine-loop-tile", "FuncOp"> {
+def AffineLoopTiling : Pass<"affine-loop-tile", "func::FuncOp"> {
let summary = "Tile affine loop nests";
let constructor = "mlir::createLoopTilingPass()";
let options = [
];
}
-def AffineLoopUnroll : Pass<"affine-loop-unroll", "FuncOp"> {
+def AffineLoopUnroll : Pass<"affine-loop-unroll", "func::FuncOp"> {
let summary = "Unroll affine loops";
let constructor = "mlir::createLoopUnrollPass()";
let options = [
];
}
-def AffineLoopUnrollAndJam : Pass<"affine-loop-unroll-jam", "FuncOp"> {
+def AffineLoopUnrollAndJam : Pass<"affine-loop-unroll-jam", "func::FuncOp"> {
let summary = "Unroll and jam affine loops";
let constructor = "mlir::createLoopUnrollAndJamPass()";
let options = [
}
def AffinePipelineDataTransfer
- : Pass<"affine-pipeline-data-transfer", "FuncOp"> {
+ : Pass<"affine-pipeline-data-transfer", "func::FuncOp"> {
let summary = "Pipeline non-blocking data transfers between explicitly "
"managed levels of the memory hierarchy";
let description = [{
let constructor = "mlir::createPipelineDataTransferPass()";
}
-def AffineScalarReplacement : Pass<"affine-scalrep", "FuncOp"> {
+def AffineScalarReplacement : Pass<"affine-scalrep", "func::FuncOp"> {
let summary = "Replace affine memref acceses by scalars by forwarding stores "
"to loads and eliminating redundant loads";
let description = [{
let constructor = "mlir::createAffineScalarReplacementPass()";
}
-def AffineVectorize : Pass<"affine-super-vectorize", "FuncOp"> {
+def AffineVectorize : Pass<"affine-super-vectorize", "func::FuncOp"> {
let summary = "Vectorize to a target independent n-D vector abstraction";
let constructor = "mlir::createSuperVectorizePass()";
let dependentDialects = ["vector::VectorDialect"];
];
}
-def AffineParallelize : Pass<"affine-parallelize", "FuncOp"> {
+def AffineParallelize : Pass<"affine-parallelize", "func::FuncOp"> {
let summary = "Convert affine.for ops into 1-D affine.parallel";
let constructor = "mlir::createAffineParallelizePass()";
let options = [
];
}
-def AffineLoopNormalize : Pass<"affine-loop-normalize", "FuncOp"> {
+def AffineLoopNormalize : Pass<"affine-loop-normalize", "func::FuncOp"> {
let summary = "Apply normalization transformations to affine loop-like ops";
let constructor = "mlir::createAffineLoopNormalizePass()";
}
-def LoopCoalescing : Pass<"affine-loop-coalescing", "FuncOp"> {
+def LoopCoalescing : Pass<"affine-loop-coalescing", "func::FuncOp"> {
let summary = "Coalesce nested loops with independent bounds into a single "
"loop";
let constructor = "mlir::createLoopCoalescingPass()";
let dependentDialects = ["arith::ArithmeticDialect"];
}
-def SimplifyAffineStructures : Pass<"affine-simplify-structures", "FuncOp"> {
+def SimplifyAffineStructures : Pass<"affine-simplify-structures", "func::FuncOp"> {
let summary = "Simplify affine expressions in maps/sets and normalize "
"memrefs";
let constructor = "mlir::createSimplifyAffineStructuresPass()";
Equivalent
};
-/// Return `true` if the given value is a BlockArgument of a FuncOp.
+/// Return `true` if the given value is a BlockArgument of a func::FuncOp.
bool isFunctionArgument(Value value);
/// Dialect-specific analysis state. Analysis/bufferization information
#include "mlir/Dialect/Func/IR/FuncOpsDialect.h.inc"
-namespace mlir {
-/// FIXME: This is a temporary using directive to ease the transition of FuncOp
-/// to the Func dialect. This will be removed after all uses are updated.
-using FuncOp = func::FuncOp;
-} // namespace mlir
-
namespace llvm {
/// Allow stealing the low bits of FuncOp.
let dependentDialects = ["mlir::DLTIDialect"];
}
-def GpuAsyncRegionPass : Pass<"gpu-async-region", "FuncOp"> {
+def GpuAsyncRegionPass : Pass<"gpu-async-region", "func::FuncOp"> {
let summary = "Make GPU ops async";
let constructor = "mlir::createGpuAsyncRegionPass()";
let dependentDialects = ["async::AsyncDialect"];
class FuncOp;
} // namespace func
-// TODO: Remove when all references have been updated.
-using FuncOp = func::FuncOp;
-
namespace bufferization {
struct OneShotBufferizationOptions;
} // namespace bufferization
let dependentDialects = ["linalg::LinalgDialect", "tensor::TensorDialect"];
}
-def LinalgInlineScalarOperands : Pass<"linalg-inline-scalar-operands", "FuncOp"> {
+def LinalgInlineScalarOperands : Pass<"linalg-inline-scalar-operands", "func::FuncOp"> {
let summary = "Inline scalar operands into linalg generic ops";
let constructor = "mlir::createLinalgInlineScalarOperandsPass()";
let dependentDialects = [
];
}
-def LinalgLowerToAffineLoops : Pass<"convert-linalg-to-affine-loops", "FuncOp"> {
+def LinalgLowerToAffineLoops : Pass<"convert-linalg-to-affine-loops", "func::FuncOp"> {
let summary = "Lower the operations from the linalg dialect into affine "
"loops";
let constructor = "mlir::createConvertLinalgToAffineLoopsPass()";
"AffineDialect", "linalg::LinalgDialect", "memref::MemRefDialect"];
}
-def LinalgLowerToLoops : Pass<"convert-linalg-to-loops", "FuncOp"> {
+def LinalgLowerToLoops : Pass<"convert-linalg-to-loops", "func::FuncOp"> {
let summary = "Lower the operations from the linalg dialect into loops";
let constructor = "mlir::createConvertLinalgToLoopsPass()";
let dependentDialects = [
}
def LinalgLowerToParallelLoops
- : Pass<"convert-linalg-to-parallel-loops", "FuncOp"> {
+ : Pass<"convert-linalg-to-parallel-loops", "func::FuncOp"> {
let summary = "Lower the operations from the linalg dialect into parallel "
"loops";
let constructor = "mlir::createConvertLinalgToParallelLoopsPass()";
];
}
-def LinalgBufferize : Pass<"linalg-bufferize", "FuncOp"> {
+def LinalgBufferize : Pass<"linalg-bufferize", "func::FuncOp"> {
let summary = "Bufferize the linalg dialect";
let constructor = "mlir::createLinalgBufferizePass()";
let dependentDialects = [
];
}
-def LinalgPromotion : Pass<"linalg-promote-subviews", "FuncOp"> {
+def LinalgPromotion : Pass<"linalg-promote-subviews", "func::FuncOp"> {
let summary = "Promote subview ops to local buffers";
let constructor = "mlir::createLinalgPromotionPass()";
let options = [
let dependentDialects = ["linalg::LinalgDialect"];
}
-def LinalgTiling : Pass<"linalg-tile", "FuncOp"> {
+def LinalgTiling : Pass<"linalg-tile", "func::FuncOp"> {
let summary = "Tile operations in the linalg dialect";
let constructor = "mlir::createLinalgTilingPass()";
let dependentDialects = [
];
}
-def LinalgGeneralization : Pass<"linalg-generalize-named-ops", "FuncOp"> {
+def LinalgGeneralization : Pass<"linalg-generalize-named-ops", "func::FuncOp"> {
let summary = "Convert named ops into generic ops";
let constructor = "mlir::createLinalgGeneralizationPass()";
let dependentDialects = ["linalg::LinalgDialect"];
}
def LinalgStrategyTileAndFusePass
- : Pass<"linalg-strategy-tile-and-fuse-pass", "FuncOp"> {
+ : Pass<"linalg-strategy-tile-and-fuse-pass", "func::FuncOp"> {
let summary = "Configurable pass to apply pattern-based tiling and fusion.";
let constructor = "mlir::createLinalgStrategyTileAndFusePass()";
let options = [
}
def LinalgStrategyTilePass
- : Pass<"linalg-strategy-tile-pass", "FuncOp"> {
+ : Pass<"linalg-strategy-tile-pass", "func::FuncOp"> {
let summary = "Configurable pass to apply pattern-based linalg tiling.";
let constructor = "mlir::createLinalgStrategyTilePass()";
let dependentDialects = ["linalg::LinalgDialect"];
}
def LinalgStrategyPadPass
- : Pass<"linalg-strategy-pad-pass", "FuncOp"> {
+ : Pass<"linalg-strategy-pad-pass", "func::FuncOp"> {
let summary = "Configurable pass to apply padding and hoisting.";
let constructor = "mlir::createLinalgStrategyPadPass()";
let dependentDialects = ["linalg::LinalgDialect"];
}
def LinalgStrategyPromotePass
- : Pass<"linalg-strategy-promote-pass", "FuncOp"> {
+ : Pass<"linalg-strategy-promote-pass", "func::FuncOp"> {
let summary = "Configurable pass to apply pattern-based linalg promotion.";
let constructor = "mlir::createLinalgStrategyPromotePass()";
let dependentDialects = ["linalg::LinalgDialect"];
}
def LinalgStrategyGeneralizePass
- : Pass<"linalg-strategy-generalize-pass", "FuncOp"> {
+ : Pass<"linalg-strategy-generalize-pass", "func::FuncOp"> {
let summary = "Configurable pass to apply pattern-based generalization.";
let constructor = "mlir::createLinalgStrategyGeneralizePass()";
let dependentDialects = ["linalg::LinalgDialect"];
// TODO: if/when we need finer control add an anchorOp option.
def LinalgStrategyDecomposePass
- : Pass<"linalg-strategy-decompose-pass", "FuncOp"> {
+ : Pass<"linalg-strategy-decompose-pass", "func::FuncOp"> {
let summary = "Configurable pass to apply pattern-based generalization.";
let constructor = "mlir::createLinalgStrategyDecomposePass()";
let dependentDialects = ["linalg::LinalgDialect"];
}
def LinalgStrategyInterchangePass
- : Pass<"linalg-strategy-interchange-pass", "FuncOp"> {
+ : Pass<"linalg-strategy-interchange-pass", "func::FuncOp"> {
let summary = "Configurable pass to apply pattern-based iterator interchange.";
let constructor = "mlir::createLinalgStrategyInterchangePass()";
let dependentDialects = ["linalg::LinalgDialect"];
}
def LinalgStrategyVectorizePass
- : Pass<"linalg-strategy-vectorize-pass", "FuncOp"> {
+ : Pass<"linalg-strategy-vectorize-pass", "func::FuncOp"> {
let summary = "Configurable pass to apply pattern-based linalg vectorization.";
let constructor = "mlir::createLinalgStrategyVectorizePass()";
let dependentDialects = ["linalg::LinalgDialect"];
}
def LinalgStrategyEnablePass
- : Pass<"linalg-strategy-enable-pass", "FuncOp"> {
+ : Pass<"linalg-strategy-enable-pass", "func::FuncOp"> {
let summary = "Configurable pass to enable the application of other "
"pattern-based linalg passes.";
let constructor = "mlir::createLinalgStrategyEnablePass()";
}
def LinalgStrategyLowerVectorsPass
- : Pass<"linalg-strategy-lower-vectors-pass", "FuncOp"> {
+ : Pass<"linalg-strategy-lower-vectors-pass", "func::FuncOp"> {
let summary = "Configurable pass to lower vector operations.";
let constructor = "mlir::createLinalgStrategyLowerVectorsPass()";
let dependentDialects = ["linalg::LinalgDialect"];
}
def LinalgStrategyRemoveMarkersPass
- : Pass<"linalg-strategy-remove-markers-pass", "FuncOp"> {
+ : Pass<"linalg-strategy-remove-markers-pass", "func::FuncOp"> {
let summary = "Cleanup pass that drops markers.";
let constructor = "mlir::createLinalgStrategyRemoveMarkersPass()";
let dependentDialects = ["linalg::LinalgDialect"];
include "mlir/Pass/PassBase.td"
-def QuantConvertConst : Pass<"quant-convert-const", "FuncOp"> {
+def QuantConvertConst : Pass<"quant-convert-const", "func::FuncOp"> {
let summary = "Converts constants followed by qbarrier to actual quantized "
"values";
let constructor = "mlir::quant::createConvertConstPass()";
}
def QuantConvertSimulatedQuant
- : Pass<"quant-convert-simulated-quantization", "FuncOp"> {
+ : Pass<"quant-convert-simulated-quantization", "func::FuncOp"> {
let summary = "Converts training-time simulated quantization ops to "
"corresponding quantize/dequantize casts";
let constructor = "mlir::quant::createConvertSimulatedQuantPass()";
include "mlir/Pass/PassBase.td"
-def SCFBufferize : Pass<"scf-bufferize", "FuncOp"> {
+def SCFBufferize : Pass<"scf-bufferize", "func::FuncOp"> {
let summary = "Bufferize the scf dialect.";
let constructor = "mlir::createSCFBufferizePass()";
let dependentDialects = ["bufferization::BufferizationDialect",
// Note: Making these canonicalization patterns would require a dependency
// of the SCF dialect on the Affine/Tensor/MemRef dialects or vice versa.
def SCFForLoopCanonicalization
- : Pass<"scf-for-loop-canonicalization", "FuncOp"> {
+ : Pass<"scf-for-loop-canonicalization", "func::FuncOp"> {
let summary = "Canonicalize operations within scf.for loop bodies";
let constructor = "mlir::createSCFForLoopCanonicalizationPass()";
let dependentDialects = ["AffineDialect", "tensor::TensorDialect",
"memref::MemRefDialect"];
}
-def SCFForLoopPeeling : Pass<"scf-for-loop-peeling", "FuncOp"> {
+def SCFForLoopPeeling : Pass<"scf-for-loop-peeling", "func::FuncOp"> {
let summary = "Peel `for` loops at their upper bounds.";
let constructor = "mlir::createForLoopPeelingPass()";
let options = [
let dependentDialects = ["AffineDialect"];
}
-def SCFForLoopSpecialization : Pass<"scf-for-loop-specialization", "FuncOp"> {
+def SCFForLoopSpecialization : Pass<"scf-for-loop-specialization", "func::FuncOp"> {
let summary = "Specialize `for` loops for vectorization";
let constructor = "mlir::createForLoopSpecializationPass()";
}
}
def SCFParallelLoopSpecialization
- : Pass<"scf-parallel-loop-specialization", "FuncOp"> {
+ : Pass<"scf-parallel-loop-specialization", "func::FuncOp"> {
let summary = "Specialize parallel loops for vectorization";
let constructor = "mlir::createParallelLoopSpecializationPass()";
}
-def SCFParallelLoopTiling : Pass<"scf-parallel-loop-tiling", "FuncOp"> {
+def SCFParallelLoopTiling : Pass<"scf-parallel-loop-tiling", "func::FuncOp"> {
let summary = "Tile parallel loops";
let constructor = "mlir::createParallelLoopTilingPass()";
let options = [
let constructor = "mlir::createForLoopRangeFoldingPass()";
}
-def SCFForToWhileLoop : Pass<"scf-for-to-while", "FuncOp"> {
+def SCFForToWhileLoop : Pass<"scf-for-to-while", "func::FuncOp"> {
let summary = "Convert SCF for loops to SCF while loops";
let constructor = "mlir::createForToWhileLoopPass()";
let description = [{
let extraClassDeclaration = [{
/// Returns an associated shape function for an operation if defined.
- FuncOp getShapeFunction(Operation *op);
+ func::FuncOp getShapeFunction(Operation *op);
}];
let builders = [OpBuilder<(ins "StringRef":$name)>];
include "mlir/Pass/PassBase.td"
-def RemoveShapeConstraints : Pass<"remove-shape-constraints", "FuncOp"> {
+def RemoveShapeConstraints : Pass<"remove-shape-constraints", "func::FuncOp"> {
let summary = "Replace all cstr_ ops with a true witness";
let constructor = "mlir::createRemoveShapeConstraintsPass()";
}
-def ShapeToShapeLowering : Pass<"shape-to-shape-lowering", "FuncOp"> {
+def ShapeToShapeLowering : Pass<"shape-to-shape-lowering", "func::FuncOp"> {
let summary = "Legalize Shape dialect to be convertible to Arithmetic";
let constructor = "mlir::createShapeToShapeLowering()";
}
// TODO: Generalize this to allow any type conversions desired.
-def ShapeBufferize : Pass<"shape-bufferize", "FuncOp"> {
+def ShapeBufferize : Pass<"shape-bufferize", "func::FuncOp"> {
let summary = "Bufferize the shape dialect.";
let constructor = "mlir::createShapeBufferizePass()";
let dependentDialects = ["bufferization::BufferizationDialect",
include "mlir/Pass/PassBase.td"
-def TensorBufferize : Pass<"tensor-bufferize", "FuncOp"> {
+def TensorBufferize : Pass<"tensor-bufferize", "func::FuncOp"> {
let summary = "Bufferize the `tensor` dialect";
let constructor = "mlir::createTensorBufferizePass()";
}
include "mlir/Pass/PassBase.td"
-def TosaInferShapes : Pass<"tosa-infer-shapes", "FuncOp"> {
+def TosaInferShapes : Pass<"tosa-infer-shapes", "func::FuncOp"> {
let summary = "Propagate shapes across TOSA operations";
let description = [{
Pass that uses operand types and propagates shapes to TOSA operations.
];
}
-def TosaMakeBroadcastable : Pass<"tosa-make-broadcastable", "FuncOp"> {
+def TosaMakeBroadcastable : Pass<"tosa-make-broadcastable", "func::FuncOp"> {
let summary = "TOSA rank Reshape to enable Broadcasting";
let description = [{
Pass that enables broadcast by making all input arrays have the same
}
def TosaOptionalDecompositions
- : Pass<"tosa-optional-decompositions", "FuncOp"> {
+ : Pass<"tosa-optional-decompositions", "func::FuncOp"> {
let summary = "Applies Tosa operations optional decompositions";
let description = [{
Pass to apply the Tosa operations decompositions
include "mlir/Pass/PassBase.td"
-def VectorBufferize : Pass<"vector-bufferize", "FuncOp"> {
+def VectorBufferize : Pass<"vector-bufferize", "func::FuncOp"> {
let summary = "Bufferize Vector dialect ops";
let constructor = "mlir::vector::createVectorBufferizePass()";
}
auto addFuncDecl = [&](StringRef name, FunctionType type) {
if (module.lookupSymbol(name))
return;
- builder.create<FuncOp>(name, type).setPrivate();
+ builder.create<func::FuncOp>(name, type).setPrivate();
};
MLIRContext *ctx = module.getContext();
llvmConverter.addConversion(AsyncRuntimeTypeConverter::convertAsyncTypes);
// Convert async types in function signatures and function calls.
- populateFunctionOpInterfaceTypeConversionPattern<FuncOp>(patterns, converter);
+ populateFunctionOpInterfaceTypeConversionPattern<func::FuncOp>(patterns,
+ converter);
populateCallOpTypeConversionPattern(patterns, converter);
// Convert return operations inside async.execute regions.
target.addIllegalDialect<AsyncDialect>();
// Add dynamic legality constraints to apply conversions defined above.
- target.addDynamicallyLegalOp<FuncOp>([&](FuncOp op) {
+ target.addDynamicallyLegalOp<func::FuncOp>([&](func::FuncOp op) {
return converter.isSignatureLegal(op.getFunctionType());
});
target.addDynamicallyLegalOp<func::ReturnOp>([&](func::ReturnOp op) {
/// the extra arguments.
static void wrapForExternalCallers(OpBuilder &rewriter, Location loc,
LLVMTypeConverter &typeConverter,
- FuncOp funcOp, LLVM::LLVMFuncOp newFuncOp) {
+ func::FuncOp funcOp,
+ LLVM::LLVMFuncOp newFuncOp) {
auto type = funcOp.getFunctionType();
SmallVector<NamedAttribute, 4> attributes;
filterFuncAttributes(funcOp->getAttrs(), /*filterArgAndResAttrs=*/false,
/// corresponding to a memref descriptor.
static void wrapExternalFunction(OpBuilder &builder, Location loc,
LLVMTypeConverter &typeConverter,
- FuncOp funcOp, LLVM::LLVMFuncOp newFuncOp) {
+ func::FuncOp funcOp,
+ LLVM::LLVMFuncOp newFuncOp) {
OpBuilder::InsertionGuard guard(builder);
Type wrapperType;
namespace {
-struct FuncOpConversionBase : public ConvertOpToLLVMPattern<FuncOp> {
+struct FuncOpConversionBase : public ConvertOpToLLVMPattern<func::FuncOp> {
protected:
- using ConvertOpToLLVMPattern<FuncOp>::ConvertOpToLLVMPattern;
+ using ConvertOpToLLVMPattern<func::FuncOp>::ConvertOpToLLVMPattern;
// Convert input FuncOp to LLVMFuncOp by using the LLVMTypeConverter provided
// to this legalization pattern.
LLVM::LLVMFuncOp
- convertFuncOpToLLVMFuncOp(FuncOp funcOp,
+ convertFuncOpToLLVMFuncOp(func::FuncOp funcOp,
ConversionPatternRewriter &rewriter) const {
// Convert the original function arguments. They are converted using the
// LLVMTypeConverter provided to this legalization pattern.
: FuncOpConversionBase(converter) {}
LogicalResult
- matchAndRewrite(FuncOp funcOp, OpAdaptor adaptor,
+ matchAndRewrite(func::FuncOp funcOp, OpAdaptor adaptor,
ConversionPatternRewriter &rewriter) const override {
auto newFuncOp = convertFuncOpToLLVMFuncOp(funcOp, rewriter);
if (!newFuncOp)
using FuncOpConversionBase::FuncOpConversionBase;
LogicalResult
- matchAndRewrite(FuncOp funcOp, OpAdaptor adaptor,
+ matchAndRewrite(func::FuncOp funcOp, OpAdaptor adaptor,
ConversionPatternRewriter &rewriter) const override {
// TODO: bare ptr conversion could be handled by argument materialization
} // namespace
void mlir::configureGpuToNVVMConversionLegality(ConversionTarget &target) {
- target.addIllegalOp<FuncOp>();
+ target.addIllegalOp<func::FuncOp>();
target.addLegalDialect<::mlir::LLVM::LLVMDialect>();
target.addLegalDialect<::mlir::NVVM::NVVMDialect>();
target.addIllegalDialect<gpu::GPUDialect>();
} // namespace
void mlir::configureGpuToROCDLConversionLegality(ConversionTarget &target) {
- target.addIllegalOp<FuncOp>();
+ target.addIllegalOp<func::FuncOp>();
target.addLegalDialect<::mlir::LLVM::LLVMDialect>();
target.addLegalDialect<ROCDL::ROCDLDialect>();
target.addIllegalDialect<gpu::GPUDialect>();
// Declare vulkan launch function.
auto funcType = builder.getFunctionType(vulkanLaunchTypes, {});
- builder.create<FuncOp>(loc, kVulkanLaunch, funcType).setPrivate();
+ builder.create<func::FuncOp>(loc, kVulkanLaunch, funcType).setPrivate();
return success();
}
// Allow builtin ops.
target->addLegalOp<ModuleOp>();
- target->addDynamicallyLegalOp<FuncOp>([&](FuncOp op) {
+ target->addDynamicallyLegalOp<func::FuncOp>([&](func::FuncOp op) {
return typeConverter.isSignatureLegal(op.getFunctionType()) &&
typeConverter.isLegal(&op.getBody());
});
// Insert before module terminator.
rewriter.setInsertionPoint(module.getBody(),
std::prev(module.getBody()->end()));
- FuncOp funcOp =
- rewriter.create<FuncOp>(op->getLoc(), fnNameAttr.getValue(), libFnType);
+ func::FuncOp funcOp = rewriter.create<func::FuncOp>(
+ op->getLoc(), fnNameAttr.getValue(), libFnType);
// Insert a function attribute that will trigger the emission of the
// corresponding `_mlir_ciface_xxx` interface so that external libraries see
// a normalized ABI. This interface is added during std to llvm conversion.
target.addLegalDialect<AffineDialect, arith::ArithmeticDialect,
func::FuncDialect, memref::MemRefDialect,
scf::SCFDialect>();
- target.addLegalOp<ModuleOp, FuncOp, func::ReturnOp>();
+ target.addLegalOp<ModuleOp, func::FuncOp, func::ReturnOp>();
RewritePatternSet patterns(&getContext());
populateLinalgToStandardConversionPatterns(patterns);
if (failed(applyFullConversion(module, target, std::move(patterns))))
rewriter.setInsertionPointToStart(&module->getRegion(0).front());
auto opFunctionTy = FunctionType::get(
rewriter.getContext(), op->getOperandTypes(), op->getResultTypes());
- opFunc =
- rewriter.create<FuncOp>(rewriter.getUnknownLoc(), name, opFunctionTy);
+ opFunc = rewriter.create<func::FuncOp>(rewriter.getUnknownLoc(), name,
+ opFunctionTy);
opFunc.setPrivate();
}
assert(isa<FunctionOpInterface>(SymbolTable::lookupSymbolIn(module, name)));
RewritePatternSet patterns(&getContext());
populateMemRefToLLVMConversionPatterns(typeConverter, patterns);
LLVMConversionTarget target(getContext());
- target.addLegalOp<FuncOp>();
+ target.addLegalOp<func::FuncOp>();
if (failed(applyPartialConversion(op, target, std::move(patterns))))
signalPassFailure();
}
ConversionTarget target(ctx);
target.addLegalDialect<arith::ArithmeticDialect, SCFDialect,
tensor::TensorDialect>();
- target.addLegalOp<CstrRequireOp, FuncOp, ModuleOp>();
+ target.addLegalOp<CstrRequireOp, func::FuncOp, ModuleOp>();
// Setup conversion patterns.
RewritePatternSet patterns(&ctx);
bool disableTosaDecompositions) {
// Optional decompositions are designed to benefit linalg.
if (!disableTosaDecompositions)
- pm.addNestedPass<FuncOp>(mlir::tosa::createTosaOptionalDecompositions());
- pm.addNestedPass<FuncOp>(mlir::createCanonicalizerPass());
+ pm.addNestedPass<func::FuncOp>(tosa::createTosaOptionalDecompositions());
+ pm.addNestedPass<func::FuncOp>(createCanonicalizerPass());
- pm.addNestedPass<FuncOp>(tosa::createTosaMakeBroadcastablePass());
- pm.addNestedPass<FuncOp>(tosa::createTosaToLinalgNamed());
- pm.addNestedPass<FuncOp>(mlir::createCanonicalizerPass());
- pm.addNestedPass<FuncOp>(tosa::createTosaMakeBroadcastablePass());
- pm.addNestedPass<FuncOp>(tosa::createTosaToLinalg());
+ pm.addNestedPass<func::FuncOp>(tosa::createTosaMakeBroadcastablePass());
+ pm.addNestedPass<func::FuncOp>(tosa::createTosaToLinalgNamed());
+ pm.addNestedPass<func::FuncOp>(createCanonicalizerPass());
+ pm.addNestedPass<func::FuncOp>(tosa::createTosaMakeBroadcastablePass());
+ pm.addNestedPass<func::FuncOp>(tosa::createTosaToLinalg());
}
}
void mlir::tosa::addTosaToSCFPasses(OpPassManager &pm) {
- pm.addNestedPass<FuncOp>(createTosaToSCF());
+ pm.addNestedPass<func::FuncOp>(createTosaToSCF());
}
if (numCommonLoops == 0) {
Block *block = srcAccess.opInst->getBlock();
- while (!llvm::isa<FuncOp>(block->getParentOp())) {
+ while (!llvm::isa<func::FuncOp>(block->getParentOp())) {
block = block->getParentOp()->getBlock();
}
return block;
/// buffers in 'fastMemorySpace', and replaces memory operations to the former
/// by the latter. Only load op's handled for now.
/// TODO: extend this to store op's.
-std::unique_ptr<OperationPass<FuncOp>> mlir::createAffineDataCopyGenerationPass(
- unsigned slowMemorySpace, unsigned fastMemorySpace, unsigned tagMemorySpace,
- int minDmaTransferSize, uint64_t fastMemCapacityBytes) {
+std::unique_ptr<OperationPass<func::FuncOp>>
+mlir::createAffineDataCopyGenerationPass(unsigned slowMemorySpace,
+ unsigned fastMemorySpace,
+ unsigned tagMemorySpace,
+ int minDmaTransferSize,
+ uint64_t fastMemCapacityBytes) {
return std::make_unique<AffineDataCopyGeneration>(
slowMemorySpace, fastMemorySpace, tagMemorySpace, minDmaTransferSize,
fastMemCapacityBytes);
}
-std::unique_ptr<OperationPass<FuncOp>>
+std::unique_ptr<OperationPass<func::FuncOp>>
mlir::createAffineDataCopyGenerationPass() {
return std::make_unique<AffineDataCopyGeneration>();
}
}
void AffineDataCopyGeneration::runOnOperation() {
- FuncOp f = getOperation();
+ func::FuncOp f = getOperation();
OpBuilder topBuilder(f.getBody());
zeroIndex = topBuilder.create<arith::ConstantIndexOp>(f.getLoc(), 0);
});
}
-std::unique_ptr<OperationPass<FuncOp>>
+std::unique_ptr<OperationPass<func::FuncOp>>
mlir::createAffineLoopInvariantCodeMotionPass() {
return std::make_unique<LoopInvariantCodeMotion>();
}
} // namespace
-std::unique_ptr<OperationPass<FuncOp>> mlir::createAffineLoopNormalizePass() {
+std::unique_ptr<OperationPass<func::FuncOp>>
+mlir::createAffineLoopNormalizePass() {
return std::make_unique<AffineLoopNormalizePass>();
}
} // namespace
void AffineParallelize::runOnOperation() {
- FuncOp f = getOperation();
+ func::FuncOp f = getOperation();
// The walker proceeds in pre-order to process the outer loops first
// and control the number of outer parallel loops.
}
}
-std::unique_ptr<OperationPass<FuncOp>> mlir::createAffineParallelizePass() {
+std::unique_ptr<OperationPass<func::FuncOp>>
+mlir::createAffineParallelizePass() {
return std::make_unique<AffineParallelize>();
}
} // namespace
-std::unique_ptr<OperationPass<FuncOp>>
+std::unique_ptr<OperationPass<func::FuncOp>>
mlir::createAffineScalarReplacementPass() {
return std::make_unique<AffineScalarReplacement>();
}
}
void runOnOperation() override {
- FuncOp func = getOperation();
+ func::FuncOp func = getOperation();
func.walk([&](Operation *op) {
if (auto scfForOp = dyn_cast<scf::ForOp>(op))
walkLoop(scfForOp);
} // namespace
-std::unique_ptr<OperationPass<FuncOp>> mlir::createLoopCoalescingPass() {
+std::unique_ptr<OperationPass<func::FuncOp>> mlir::createLoopCoalescingPass() {
return std::make_unique<LoopCoalescingPass>();
}
} // namespace
-std::unique_ptr<OperationPass<FuncOp>>
+std::unique_ptr<OperationPass<func::FuncOp>>
mlir::createLoopFusionPass(unsigned fastMemorySpace,
uint64_t localBufSizeThreshold, bool maximalFusion,
enum FusionMode affineFusionMode) {
// Initializes the dependence graph based on operations in 'f'.
// Returns true on success, false otherwise.
- bool init(FuncOp f);
+ bool init(func::FuncOp f);
// Returns the graph node for 'id'.
Node *getNode(unsigned id) {
// Assigns each node in the graph a node id based on program order in 'f'.
// TODO: Add support for taking a Block arg to construct the
// dependence graph at a different depth.
-bool MemRefDependenceGraph::init(FuncOp f) {
+bool MemRefDependenceGraph::init(func::FuncOp f) {
LLVM_DEBUG(llvm::dbgs() << "--- Initializing MDG ---\n");
DenseMap<Value, SetVector<unsigned>> memrefAccesses;
// Create builder to insert alloc op just before 'forOp'.
OpBuilder b(forInst);
// Builder to create constants at the top level.
- OpBuilder top(forInst->getParentOfType<FuncOp>().getBody());
+ OpBuilder top(forInst->getParentOfType<func::FuncOp>().getBody());
// Create new memref type based on slice bounds.
auto oldMemRef = cast<AffineWriteOpInterface>(srcStoreOpInst).getMemRef();
auto oldMemRefType = oldMemRef.getType().cast<MemRefType>();
};
// Search for siblings which load the same memref function argument.
- auto fn = dstNode->op->getParentOfType<FuncOp>();
+ auto fn = dstNode->op->getParentOfType<func::FuncOp>();
for (unsigned i = 0, e = fn.getNumArguments(); i != e; ++i) {
for (auto *user : fn.getArgument(i).getUsers()) {
if (auto loadOp = dyn_cast<AffineReadOpInterface>(user)) {
/// Creates a pass to perform loop tiling on all suitable loop nests of a
/// Function.
-std::unique_ptr<OperationPass<FuncOp>>
+std::unique_ptr<OperationPass<func::FuncOp>>
mlir::createLoopTilingPass(uint64_t cacheSizeBytes) {
return std::make_unique<LoopTiling>(cacheSizeBytes);
}
-std::unique_ptr<OperationPass<FuncOp>> mlir::createLoopTilingPass() {
+std::unique_ptr<OperationPass<func::FuncOp>> mlir::createLoopTilingPass() {
return std::make_unique<LoopTiling>();
}
}
/// Gathers loops that have no affine.for's nested within.
-static void gatherInnermostLoops(FuncOp f,
+static void gatherInnermostLoops(func::FuncOp f,
SmallVectorImpl<AffineForOp> &loops) {
f.walk([&](AffineForOp forOp) {
if (isInnermostAffineForOp(forOp))
}
void LoopUnroll::runOnOperation() {
- FuncOp func = getOperation();
+ func::FuncOp func = getOperation();
if (func.isExternal())
return;
return loopUnrollByFactor(forOp, unrollFactor);
}
-std::unique_ptr<OperationPass<FuncOp>> mlir::createLoopUnrollPass(
+std::unique_ptr<OperationPass<func::FuncOp>> mlir::createLoopUnrollPass(
int unrollFactor, bool unrollUpToFactor, bool unrollFull,
const std::function<unsigned(AffineForOp)> &getUnrollFactor) {
return std::make_unique<LoopUnroll>(
};
} // namespace
-std::unique_ptr<OperationPass<FuncOp>>
+std::unique_ptr<OperationPass<func::FuncOp>>
mlir::createLoopUnrollAndJamPass(int unrollJamFactor) {
return std::make_unique<LoopUnrollAndJam>(
unrollJamFactor == -1 ? None : Optional<unsigned>(unrollJamFactor));
/// Creates a pass to pipeline explicit movement of data across levels of the
/// memory hierarchy.
-std::unique_ptr<OperationPass<FuncOp>> mlir::createPipelineDataTransferPass() {
+std::unique_ptr<OperationPass<func::FuncOp>>
+mlir::createPipelineDataTransferPass() {
return std::make_unique<PipelineDataTransfer>();
}
} // namespace
-std::unique_ptr<OperationPass<FuncOp>>
+std::unique_ptr<OperationPass<func::FuncOp>>
mlir::createSimplifyAffineStructuresPass() {
return std::make_unique<SimplifyAffineStructures>();
}
LLVM_DEBUG(dbgs() << "\n");
}
-std::unique_ptr<OperationPass<FuncOp>>
+std::unique_ptr<OperationPass<func::FuncOp>>
createSuperVectorizePass(ArrayRef<int64_t> virtualVectorSize) {
return std::make_unique<Vectorize>(virtualVectorSize);
}
-std::unique_ptr<OperationPass<FuncOp>> createSuperVectorizePass() {
+std::unique_ptr<OperationPass<func::FuncOp>> createSuperVectorizePass() {
return std::make_unique<Vectorize>();
}
/// Applies vectorization to the current function by searching over a bunch of
/// predetermined patterns.
void Vectorize::runOnOperation() {
- FuncOp f = getOperation();
+ func::FuncOp f = getOperation();
if (!fastestVaryingPattern.empty() &&
fastestVaryingPattern.size() != vectorSizes.size()) {
f.emitRemark("Fastest varying pattern specified with different size than "
return vectorizeLoopNest(loops, strategy);
}
-std::unique_ptr<OperationPass<FuncOp>>
+std::unique_ptr<OperationPass<func::FuncOp>>
createSuperVectorizePass(ArrayRef<int64_t> virtualVectorSize) {
return std::make_unique<Vectorize>(virtualVectorSize);
}
-std::unique_ptr<OperationPass<FuncOp>> createSuperVectorizePass() {
+std::unique_ptr<OperationPass<func::FuncOp>> createSuperVectorizePass() {
return std::make_unique<Vectorize>();
}
auto walkResult = forOpRoot.walk([&](AffineForOp forOp) {
auto *childForOp = forOp.getOperation();
auto *parentForOp = forOp->getParentOp();
- if (!llvm::isa<FuncOp>(parentForOp)) {
+ if (!llvm::isa<func::FuncOp>(parentForOp)) {
if (!isa<AffineForOp>(parentForOp)) {
LLVM_DEBUG(llvm::dbgs() << "Expected parent AffineForOp\n");
return WalkResult::interrupt();
auto *parentBlock = forOp->getBlock();
if (!iv.use_empty()) {
if (forOp.hasConstantLowerBound()) {
- OpBuilder topBuilder(forOp->getParentOfType<FuncOp>().getBody());
+ OpBuilder topBuilder(forOp->getParentOfType<func::FuncOp>().getBody());
auto constOp = topBuilder.create<arith::ConstantIndexOp>(
forOp.getLoc(), forOp.getConstantLowerBound());
iv.replaceAllUsesWith(constOp);
/// Identify valid and profitable bands of loops to tile. This is currently just
/// a temporary placeholder to test the mechanics of tiled code generation.
/// Returns all maximal outermost perfect loop nests to tile.
-void mlir::getTileableBands(FuncOp f,
+void mlir::getTileableBands(func::FuncOp f,
std::vector<SmallVector<AffineForOp, 6>> *bands) {
// Get maximal perfect nest of 'affine.for' insts starting from root
// (inclusive).
*nBegin = begin;
*nEnd = end;
- FuncOp f = begin->getParentOfType<FuncOp>();
+ func::FuncOp f = begin->getParentOfType<func::FuncOp>();
OpBuilder topBuilder(f.getBody());
Value zeroIndex = topBuilder.create<arith::ConstantIndexOp>(f.getLoc(), 0);
OpBuilder &b = region.isWrite() ? epilogue : prologue;
// Builder to create constants at the top level.
- auto func = copyPlacementBlock->getParent()->getParentOfType<FuncOp>();
+ auto func = copyPlacementBlock->getParent()->getParentOfType<func::FuncOp>();
OpBuilder top(func.getBody());
auto loc = region.loc;
}
/// Gathers all AffineForOps in 'func.func' grouped by loop depth.
-void mlir::gatherLoops(FuncOp func,
+void mlir::gatherLoops(func::FuncOp func,
std::vector<SmallVector<AffineForOp, 2>> &depthToLoops) {
for (auto &block : func)
gatherLoopsInBlock(&block, /*currLoopDepth=*/0, depthToLoops);
// Walk up the parents past all for op that this conditional is invariant on.
auto ifOperands = ifOp.getOperands();
auto *res = ifOp.getOperation();
- while (!isa<FuncOp>(res->getParentOp())) {
+ while (!isa<func::FuncOp>(res->getParentOp())) {
auto *parentOp = res->getParentOp();
if (auto forOp = dyn_cast<AffineForOp>(parentOp)) {
if (llvm::is_contained(ifOperands, forOp.getInductionVar()))
// currently only eliminates the stores only if no other loads/uses (other
// than dealloc) remain.
//
-void mlir::affineScalarReplace(FuncOp f, DominanceInfo &domInfo,
+void mlir::affineScalarReplace(func::FuncOp f, DominanceInfo &domInfo,
PostDominanceInfo &postDomInfo) {
// Load op's whose results were replaced by those forwarded from stores.
SmallVector<Operation *, 8> opsToErase;
std::unique_ptr<DominanceInfo> domInfo;
std::unique_ptr<PostDominanceInfo> postDomInfo;
if (domOpFilter)
- domInfo =
- std::make_unique<DominanceInfo>(domOpFilter->getParentOfType<FuncOp>());
+ domInfo = std::make_unique<DominanceInfo>(
+ domOpFilter->getParentOfType<func::FuncOp>());
if (postDomOpFilter)
postDomInfo = std::make_unique<PostDominanceInfo>(
- postDomOpFilter->getParentOfType<FuncOp>());
+ postDomOpFilter->getParentOfType<func::FuncOp>());
// Walk all uses of old memref; collect ops to perform replacement. We use a
// DenseSet since an operation could potentially have multiple uses of a
struct ParallelComputeFunction {
unsigned numLoops;
- FuncOp func;
+ func::FuncOp func;
llvm::SmallVector<Value> captures;
};
getParallelComputeFunctionType(op, rewriter);
FunctionType type = computeFuncType.type;
- FuncOp func = FuncOp::create(op.getLoc(),
- numBlockAlignedInnerLoops > 0
- ? "parallel_compute_fn_with_aligned_loops"
- : "parallel_compute_fn",
- type);
+ func::FuncOp func = func::FuncOp::create(
+ op.getLoc(),
+ numBlockAlignedInnerLoops > 0 ? "parallel_compute_fn_with_aligned_loops"
+ : "parallel_compute_fn",
+ type);
func.setPrivate();
// Insert function into the module symbol table and assign it unique name.
// call @parallel_compute_fn(%block_start, %block_size, ...);
// }
//
-static FuncOp createAsyncDispatchFunction(ParallelComputeFunction &computeFunc,
- PatternRewriter &rewriter) {
+static func::FuncOp
+createAsyncDispatchFunction(ParallelComputeFunction &computeFunc,
+ PatternRewriter &rewriter) {
OpBuilder::InsertionGuard guard(rewriter);
Location loc = computeFunc.func.getLoc();
ImplicitLocOpBuilder b(loc, rewriter);
inputTypes.append(computeFuncInputTypes.begin(), computeFuncInputTypes.end());
FunctionType type = rewriter.getFunctionType(inputTypes, TypeRange());
- FuncOp func = FuncOp::create(loc, "async_dispatch_fn", type);
+ func::FuncOp func = func::FuncOp::create(loc, "async_dispatch_fn", type);
func.setPrivate();
// Insert function into the module symbol table and assign it unique name.
// Add one more level of indirection to dispatch parallel compute functions
// using async operations and recursive work splitting.
- FuncOp asyncDispatchFunction =
+ func::FuncOp asyncDispatchFunction =
createAsyncDispatchFunction(parallelComputeFunction, rewriter);
Value c0 = b.create<arith::ConstantIndexOp>(0);
const SmallVector<Value> &tripCounts) {
MLIRContext *ctx = op->getContext();
- FuncOp compute = parallelComputeFunction.func;
+ func::FuncOp compute = parallelComputeFunction.func;
Value c0 = b.create<arith::ConstantIndexOp>(0);
Value c1 = b.create<arith::ConstantIndexOp>(1);
/// operation to enable non-blocking waiting via coroutine suspension.
namespace {
struct CoroMachinery {
- FuncOp func;
+ func::FuncOp func;
// Async execute region returns a completion token, and an async value for
// each yielded value.
/// return %token, %value : !async.token, !async.value<T>
/// }
///
-static CoroMachinery setupCoroMachinery(FuncOp func) {
+static CoroMachinery setupCoroMachinery(func::FuncOp func) {
assert(!func.getBlocks().empty() && "Function must have an entry block");
MLIRContext *ctx = func.getContext();
/// function.
///
/// Note that this is not reversible transformation.
-static std::pair<FuncOp, CoroMachinery>
+static std::pair<func::FuncOp, CoroMachinery>
outlineExecuteOp(SymbolTable &symbolTable, ExecuteOp execute) {
ModuleOp module = execute->getParentOfType<ModuleOp>();
// TODO: Derive outlined function name from the parent FuncOp (support
// multiple nested async.execute operations).
- FuncOp func = FuncOp::create(loc, kAsyncFnPrefix, funcType, funcAttrs);
+ func::FuncOp func =
+ func::FuncOp::create(loc, kAsyncFnPrefix, funcType, funcAttrs);
symbolTable.insert(func);
SymbolTable::setSymbolVisibility(func, SymbolTable::Visibility::Private);
using AwaitAdaptor = typename AwaitType::Adaptor;
public:
- AwaitOpLoweringBase(MLIRContext *ctx,
- llvm::DenseMap<FuncOp, CoroMachinery> &outlinedFunctions)
+ AwaitOpLoweringBase(
+ MLIRContext *ctx,
+ llvm::DenseMap<func::FuncOp, CoroMachinery> &outlinedFunctions)
: OpConversionPattern<AwaitType>(ctx),
outlinedFunctions(outlinedFunctions) {}
return rewriter.notifyMatchFailure(op, "unsupported awaitable type");
// Check if await operation is inside the outlined coroutine function.
- auto func = op->template getParentOfType<FuncOp>();
+ auto func = op->template getParentOfType<func::FuncOp>();
auto outlined = outlinedFunctions.find(func);
const bool isInCoroutine = outlined != outlinedFunctions.end();
}
private:
- llvm::DenseMap<FuncOp, CoroMachinery> &outlinedFunctions;
+ llvm::DenseMap<func::FuncOp, CoroMachinery> &outlinedFunctions;
};
/// Lowering for `async.await` with a token operand.
public:
YieldOpLowering(
MLIRContext *ctx,
- const llvm::DenseMap<FuncOp, CoroMachinery> &outlinedFunctions)
+ const llvm::DenseMap<func::FuncOp, CoroMachinery> &outlinedFunctions)
: OpConversionPattern<async::YieldOp>(ctx),
outlinedFunctions(outlinedFunctions) {}
matchAndRewrite(async::YieldOp op, OpAdaptor adaptor,
ConversionPatternRewriter &rewriter) const override {
// Check if yield operation is inside the async coroutine function.
- auto func = op->template getParentOfType<FuncOp>();
+ auto func = op->template getParentOfType<func::FuncOp>();
auto outlined = outlinedFunctions.find(func);
if (outlined == outlinedFunctions.end())
return rewriter.notifyMatchFailure(
}
private:
- const llvm::DenseMap<FuncOp, CoroMachinery> &outlinedFunctions;
+ const llvm::DenseMap<func::FuncOp, CoroMachinery> &outlinedFunctions;
};
//===----------------------------------------------------------------------===//
class AssertOpLowering : public OpConversionPattern<cf::AssertOp> {
public:
- AssertOpLowering(MLIRContext *ctx,
- llvm::DenseMap<FuncOp, CoroMachinery> &outlinedFunctions)
+ AssertOpLowering(
+ MLIRContext *ctx,
+ llvm::DenseMap<func::FuncOp, CoroMachinery> &outlinedFunctions)
: OpConversionPattern<cf::AssertOp>(ctx),
outlinedFunctions(outlinedFunctions) {}
matchAndRewrite(cf::AssertOp op, OpAdaptor adaptor,
ConversionPatternRewriter &rewriter) const override {
// Check if assert operation is inside the async coroutine function.
- auto func = op->template getParentOfType<FuncOp>();
+ auto func = op->template getParentOfType<func::FuncOp>();
auto outlined = outlinedFunctions.find(func);
if (outlined == outlinedFunctions.end())
return rewriter.notifyMatchFailure(
}
private:
- llvm::DenseMap<FuncOp, CoroMachinery> &outlinedFunctions;
+ llvm::DenseMap<func::FuncOp, CoroMachinery> &outlinedFunctions;
};
//===----------------------------------------------------------------------===//
/// 2) Prepending the results with `async.token`.
/// 3) Setting up coroutine blocks.
/// 4) Rewriting return ops as yield op and branch op into the suspend block.
-static CoroMachinery rewriteFuncAsCoroutine(FuncOp func) {
+static CoroMachinery rewriteFuncAsCoroutine(func::FuncOp func) {
auto *ctx = func->getContext();
auto loc = func.getLoc();
SmallVector<Type> resultTypes;
///
/// The invocation of this function is safe only when call ops are traversed in
/// reverse order of how they appear in a single block. See `funcsToCoroutines`.
-static void rewriteCallsiteForCoroutine(func::CallOp oldCall, FuncOp func) {
+static void rewriteCallsiteForCoroutine(func::CallOp oldCall,
+ func::FuncOp func) {
auto loc = func.getLoc();
ImplicitLocOpBuilder callBuilder(loc, oldCall);
auto newCall = callBuilder.create<func::CallOp>(
oldCall.erase();
}
-static bool isAllowedToBlock(FuncOp func) {
+static bool isAllowedToBlock(func::FuncOp func) {
return !!func->getAttrOfType<UnitAttr>(AsyncDialect::kAllowedToBlockAttrName);
}
-static LogicalResult
-funcsToCoroutines(ModuleOp module,
- llvm::DenseMap<FuncOp, CoroMachinery> &outlinedFunctions) {
+static LogicalResult funcsToCoroutines(
+ ModuleOp module,
+ llvm::DenseMap<func::FuncOp, CoroMachinery> &outlinedFunctions) {
// The following code supports the general case when 2 functions mutually
// recurse into each other. Because of this and that we are relying on
// SymbolUserMap to find pointers to calling FuncOps, we cannot simply erase
// a FuncOp while inserting an equivalent coroutine, because that could lead
// to dangling pointers.
- SmallVector<FuncOp> funcWorklist;
+ SmallVector<func::FuncOp> funcWorklist;
// Careful, it's okay to add a func to the worklist multiple times if and only
// if the loop processing the worklist will skip the functions that have
// already been converted to coroutines.
- auto addToWorklist = [&](FuncOp func) {
+ auto addToWorklist = [&](func::FuncOp func) {
if (isAllowedToBlock(func))
return;
// N.B. To refactor this code into a separate pass the lookup in
};
// Traverse in post-order collecting for each func op the await ops it has.
- for (FuncOp func : module.getOps<FuncOp>())
+ for (func::FuncOp func : module.getOps<func::FuncOp>())
addToWorklist(func);
SymbolTableCollection symbolTable;
// Rewrite the callsites to await on results of the newly created coroutine.
for (Operation *op : users) {
if (func::CallOp call = dyn_cast<func::CallOp>(*op)) {
- FuncOp caller = call->getParentOfType<FuncOp>();
+ func::FuncOp caller = call->getParentOfType<func::FuncOp>();
rewriteCallsiteForCoroutine(call, func); // Careful, erases the call op.
addToWorklist(caller);
} else {
SymbolTable symbolTable(module);
// Outline all `async.execute` body regions into async functions (coroutines).
- llvm::DenseMap<FuncOp, CoroMachinery> outlinedFunctions;
+ llvm::DenseMap<func::FuncOp, CoroMachinery> outlinedFunctions;
module.walk([&](ExecuteOp execute) {
outlinedFunctions.insert(outlineExecuteOp(symbolTable, execute));
// Returns true if operation is inside the coroutine.
auto isInCoroutine = [&](Operation *op) -> bool {
- auto parentFunc = op->getParentOfType<FuncOp>();
+ auto parentFunc = op->getParentOfType<func::FuncOp>();
return outlinedFunctions.find(parentFunc) != outlinedFunctions.end();
};
// Assertions must be converted to runtime errors inside async functions.
runtimeTarget.addDynamicallyLegalOp<cf::AssertOp>(
[&](cf::AssertOp op) -> bool {
- auto func = op->getParentOfType<FuncOp>();
+ auto func = op->getParentOfType<func::FuncOp>();
return outlinedFunctions.find(func) == outlinedFunctions.end();
});
if (eliminateBlockingAwaitOps)
runtimeTarget.addDynamicallyLegalOp<RuntimeAwaitOp>(
[&](RuntimeAwaitOp op) -> bool {
- return isAllowedToBlock(op->getParentOfType<FuncOp>());
+ return isAllowedToBlock(op->getParentOfType<func::FuncOp>());
});
if (failed(applyPartialConversion(module, runtimeTarget,
auto bbArg = value.dyn_cast<BlockArgument>();
if (!bbArg)
return false;
- return isa<FuncOp>(bbArg.getOwner()->getParentOp());
+ return isa<func::FuncOp>(bbArg.getOwner()->getParentOp());
}
MemRefType bufferization::getContiguousMemRefType(ShapedType shapedType,
static bool validateSupportedControlFlow(Operation *op) {
WalkResult result = op->walk([&](Operation *operation) {
// Only check ops that are inside a function.
- if (!operation->getParentOfType<FuncOp>())
+ if (!operation->getParentOfType<func::FuncOp>())
return WalkResult::advance();
auto regions = operation->getRegions();
}
void runOnOperation() override {
- FuncOp func = getOperation();
+ func::FuncOp func = getOperation();
if (func.isExternal())
return;
LogicalResult bufferization::deallocateBuffers(Operation *op) {
if (isa<ModuleOp>(op)) {
- WalkResult result = op->walk([&](FuncOp funcOp) {
+ WalkResult result = op->walk([&](func::FuncOp funcOp) {
if (failed(deallocateBuffers(funcOp)))
return WalkResult::interrupt();
return WalkResult::advance();
// Updates the func op and entry block.
//
// Any args appended to the entry block are added to `appendedEntryArgs`.
-static void updateFuncOp(FuncOp func,
+static void updateFuncOp(func::FuncOp func,
SmallVectorImpl<BlockArgument> &appendedEntryArgs) {
auto functionType = func.getFunctionType();
appendedEntryArgs.push_back(func.front().addArgument(type, loc));
}
-// Updates all ReturnOps in the scope of the given FuncOp by either keeping them
-// as return values or copying the associated buffer contents into the given
-// out-params.
-static void updateReturnOps(FuncOp func,
+// Updates all ReturnOps in the scope of the given func::FuncOp by either
+// keeping them as return values or copying the associated buffer contents into
+// the given out-params.
+static void updateReturnOps(func::FuncOp func,
ArrayRef<BlockArgument> appendedEntryArgs) {
func.walk([&](func::ReturnOp op) {
SmallVector<Value, 6> copyIntoOutParams;
void runOnOperation() override {
ModuleOp module = getOperation();
- for (auto func : module.getOps<FuncOp>()) {
+ for (auto func : module.getOps<func::FuncOp>()) {
SmallVector<BlockArgument, 6> appendedEntryArgs;
updateFuncOp(func, appendedEntryArgs);
if (func.isExternal())
return std::make_unique<OneShotBufferizePass>(options);
}
-std::unique_ptr<OperationPass<FuncOp>>
+std::unique_ptr<OperationPass<func::FuncOp>>
mlir::bufferization::createFinalizingBufferizePass() {
return std::make_unique<FinalizingBufferizePass>();
}
/// Expand function arguments according to the provided TypeConverter and
/// ValueDecomposer.
struct DecomposeCallGraphTypesForFuncArgs
- : public DecomposeCallGraphTypesOpConversionPattern<FuncOp> {
+ : public DecomposeCallGraphTypesOpConversionPattern<func::FuncOp> {
using DecomposeCallGraphTypesOpConversionPattern::
DecomposeCallGraphTypesOpConversionPattern;
LogicalResult
- matchAndRewrite(FuncOp op, OpAdaptor adaptor,
+ matchAndRewrite(func::FuncOp op, OpAdaptor adaptor,
ConversionPatternRewriter &rewriter) const final {
auto functionType = op.getFunctionType();
getOperation().getRegion().walk(SingleTokenUseCallback());
}
-std::unique_ptr<OperationPass<FuncOp>> mlir::createGpuAsyncRegionPass() {
+std::unique_ptr<OperationPass<func::FuncOp>> mlir::createGpuAsyncRegionPass() {
return std::make_unique<GpuAsyncRegionPass>();
}
void runOnOperation() override {
SymbolTable symbolTable(getOperation());
bool modified = false;
- for (auto func : getOperation().getOps<FuncOp>()) {
+ for (auto func : getOperation().getOps<func::FuncOp>()) {
// Insert just after the function.
Block::iterator insertPt(func->getNextNode());
auto funcWalkResult = func.walk([&](gpu::LaunchOp op) {
SetVector<Value> operands;
std::string kernelFnName =
- Twine(op->getParentOfType<FuncOp>().getName(), "_kernel").str();
+ Twine(op->getParentOfType<func::FuncOp>().getName(), "_kernel")
+ .str();
gpu::GPUFuncOp outlinedFunc =
outlineKernelFuncImpl(op, kernelFnName, operands);
}
LinalgDependenceGraph
-LinalgDependenceGraph::buildDependenceGraph(Aliases &aliases, FuncOp f) {
+LinalgDependenceGraph::buildDependenceGraph(Aliases &aliases, func::FuncOp f) {
SmallVector<LinalgOp, 8> linalgOps;
f.walk([&](LinalgOp op) { linalgOps.push_back(op); });
return LinalgDependenceGraph(aliases, linalgOps);
using namespace mlir::bufferization;
/// A mapping of FuncOps to their callers.
-using FuncCallerMap = DenseMap<FuncOp, DenseSet<Operation *>>;
+using FuncCallerMap = DenseMap<func::FuncOp, DenseSet<Operation *>>;
namespace {
/// The state of analysis of a FuncOp.
/// A mapping of ReturnOp OpOperand indices to equivalent FuncOp BBArg
/// indices.
- DenseMap<FuncOp, IndexMapping> equivalentFuncArgs;
+ DenseMap<func::FuncOp, IndexMapping> equivalentFuncArgs;
/// A mapping of ReturnOp OpOperand indices to aliasing FuncOp BBArg indices.
- DenseMap<FuncOp, IndexToIndexListMapping> aliasingFuncArgs;
+ DenseMap<func::FuncOp, IndexToIndexListMapping> aliasingFuncArgs;
/// A mapping of FuncOp BBArg indices to aliasing ReturnOp OpOperand indices.
- DenseMap<FuncOp, IndexToIndexListMapping> aliasingReturnVals;
+ DenseMap<func::FuncOp, IndexToIndexListMapping> aliasingReturnVals;
/// A set of all read BlockArguments of FuncOps.
- DenseMap<FuncOp, BbArgIndexSet> readBbArgs;
+ DenseMap<func::FuncOp, BbArgIndexSet> readBbArgs;
/// A set of all written-to BlockArguments of FuncOps.
- DenseMap<FuncOp, BbArgIndexSet> writtenBbArgs;
+ DenseMap<func::FuncOp, BbArgIndexSet> writtenBbArgs;
/// Keep track of which FuncOps are fully analyzed or currently being
/// analyzed.
- DenseMap<FuncOp, FuncOpAnalysisState> analyzedFuncOps;
+ DenseMap<func::FuncOp, FuncOpAnalysisState> analyzedFuncOps;
/// This function is called right before analyzing the given FuncOp. It
/// initializes the data structures for the FuncOp in this state object.
- void startFunctionAnalysis(FuncOp funcOp) {
+ void startFunctionAnalysis(func::FuncOp funcOp) {
analyzedFuncOps[funcOp] = FuncOpAnalysisState::InProgress;
auto createdEquiv = equivalentFuncArgs.try_emplace(funcOp, IndexMapping());
auto createdAliasingOperands =
/// Return the state (phase) of analysis of the FuncOp.
static FuncOpAnalysisState getFuncOpAnalysisState(const AnalysisState &state,
- FuncOp funcOp) {
+ func::FuncOp funcOp) {
const FuncAnalysisState &moduleState = getFuncAnalysisState(state);
auto it = moduleState.analyzedFuncOps.find(funcOp);
if (it == moduleState.analyzedFuncOps.end())
/// Return the unique ReturnOp that terminates `funcOp`.
/// Return nullptr if there is no such unique ReturnOp.
-static func::ReturnOp getAssumedUniqueReturnOp(FuncOp funcOp) {
+static func::ReturnOp getAssumedUniqueReturnOp(func::FuncOp funcOp) {
func::ReturnOp returnOp;
for (Block &b : funcOp.getBody()) {
if (auto candidateOp = dyn_cast<func::ReturnOp>(b.getTerminator())) {
FuncAnalysisState &funcState = getFuncAnalysisState(state);
// Support only single return-terminated block in the function.
- auto funcOp = cast<FuncOp>(op);
+ auto funcOp = cast<func::FuncOp>(op);
func::ReturnOp returnOp = getAssumedUniqueReturnOp(funcOp);
assert(returnOp && "expected func with single return op");
const BufferizationAliasInfo &aliasInfo) {
#ifndef NDEBUG
assert(value.getType().isa<TensorType>() && "expected TensorType");
- FuncOp funcOp;
+ func::FuncOp funcOp;
if (auto bbArg = value.dyn_cast<BlockArgument>()) {
Operation *owner = bbArg.getOwner()->getParentOp();
- funcOp = isa<FuncOp>(owner) ? cast<FuncOp>(owner)
- : owner->getParentOfType<FuncOp>();
+ funcOp = isa<func::FuncOp>(owner) ? cast<func::FuncOp>(owner)
+ : owner->getParentOfType<func::FuncOp>();
} else {
- funcOp = value.getDefiningOp()->getParentOfType<FuncOp>();
+ funcOp = value.getDefiningOp()->getParentOfType<func::FuncOp>();
}
assert(getFuncOpAnalysisState(state, funcOp) !=
FuncOpAnalysisState::NotAnalyzed &&
return isWritten;
}
-static void annotateFuncArgAccess(FuncOp funcOp, BlockArgument bbArg,
+static void annotateFuncArgAccess(func::FuncOp funcOp, BlockArgument bbArg,
bool isRead, bool isWritten) {
OpBuilder b(funcOp.getContext());
Attribute accessType;
BufferizationAliasInfo &aliasInfo,
SmallVector<Operation *> &newOps) {
FuncAnalysisState &funcState = getFuncAnalysisState(state);
- auto funcOp = cast<FuncOp>(op);
+ auto funcOp = cast<func::FuncOp>(op);
// If the function has no body, conservatively assume that all args are
// read + written.
return value;
}
-/// Remove the attribute that triggers inplace bufferization on a FuncOp
+/// Remove the attribute that triggers inplace bufferization on a func::FuncOp
/// argument `bbArg`.
static void removeBufferizationFuncArguments(BlockArgument bbArg) {
- auto funcOp = cast<FuncOp>(bbArg.getOwner()->getParentOp());
+ auto funcOp = cast<func::FuncOp>(bbArg.getOwner()->getParentOp());
funcOp.removeArgAttr(bbArg.getArgNumber(),
BufferizableOpInterface::kBufferLayoutAttrName);
funcOp.removeArgAttr(bbArg.getArgNumber(),
BufferizableOpInterface::kInplaceableAttrName);
}
-/// Return the FuncOp called by `callOp`.
-static FuncOp getCalledFunction(CallOpInterface callOp) {
+/// Return the func::FuncOp called by `callOp`.
+static func::FuncOp getCalledFunction(CallOpInterface callOp) {
SymbolRefAttr sym = callOp.getCallableForCallee().dyn_cast<SymbolRefAttr>();
if (!sym)
return nullptr;
- return dyn_cast_or_null<FuncOp>(
+ return dyn_cast_or_null<func::FuncOp>(
SymbolTable::lookupNearestSymbolFrom(callOp, sym));
}
/// Note: This only adds new equivalence info if the called function was already
/// analyzed.
// TODO: This does not handle cyclic function call graphs etc.
-static void equivalenceAnalysis(FuncOp funcOp,
+static void equivalenceAnalysis(func::FuncOp funcOp,
BufferizationAliasInfo &aliasInfo,
FuncAnalysisState &funcState) {
funcOp->walk([&](func::CallOp callOp) {
- FuncOp calledFunction = getCalledFunction(callOp);
- assert(calledFunction && "could not retrieved called FuncOp");
+ func::FuncOp calledFunction = getCalledFunction(callOp);
+ assert(calledFunction && "could not retrieved called func::FuncOp");
// No equivalence info available for the called function.
if (!funcState.equivalentFuncArgs.count(calledFunction))
/// Note: Returning a memref currently fails bufferization. If such memrefs
/// originate from an op with an Alloc effect, they could be hoisted in the
/// future.
-static LogicalResult bufferizeFuncOpBoundary(FuncOp funcOp,
+static LogicalResult bufferizeFuncOpBoundary(func::FuncOp funcOp,
RewriterBase &rewriter,
BufferizationState &state) {
const FuncAnalysisState &funcState =
/// retrieve the called FuncOp from any CallOpInterface.
static LogicalResult
getFuncOpsOrderedByCalls(ModuleOp moduleOp,
- SmallVectorImpl<FuncOp> &orderedFuncOps,
+ SmallVectorImpl<func::FuncOp> &orderedFuncOps,
FuncCallerMap &callerMap) {
// For each FuncOp, the set of functions called by it (i.e. the union of
// symbols of all nested CallOpInterfaceOp).
- DenseMap<FuncOp, DenseSet<FuncOp>> calledBy;
+ DenseMap<func::FuncOp, DenseSet<func::FuncOp>> calledBy;
// For each FuncOp, the number of CallOpInterface it contains.
- DenseMap<FuncOp, unsigned> numberCallOpsContainedInFuncOp;
- WalkResult res = moduleOp.walk([&](FuncOp funcOp) -> WalkResult {
+ DenseMap<func::FuncOp, unsigned> numberCallOpsContainedInFuncOp;
+ WalkResult res = moduleOp.walk([&](func::FuncOp funcOp) -> WalkResult {
if (!funcOp.getBody().empty()) {
func::ReturnOp returnOp = getAssumedUniqueReturnOp(funcOp);
if (!returnOp)
// Only support CallOp for now.
if (!isa<func::CallOp>(callOp.getOperation()))
return callOp->emitError() << "expected a CallOp";
- FuncOp calledFunction = getCalledFunction(callOp);
- assert(calledFunction && "could not retrieved called FuncOp");
+ func::FuncOp calledFunction = getCalledFunction(callOp);
+ assert(calledFunction && "could not retrieved called func::FuncOp");
auto it = callerMap.try_emplace(calledFunction, DenseSet<Operation *>{});
it.first->getSecond().insert(callOp);
if (calledBy[calledFunction].count(funcOp) == 0) {
return success();
}
-static void foreachCaller(const FuncCallerMap &callerMap, FuncOp callee,
+static void foreachCaller(const FuncCallerMap &callerMap, func::FuncOp callee,
llvm::function_ref<void(Operation *)> doit) {
auto itCallers = callerMap.find(callee);
if (itCallers == callerMap.end())
/// This is a purely mechanical process that may later become part of a
/// separate pass with its own layout assignment heuristic.
static void layoutPostProcessing(ModuleOp moduleOp) {
- SmallVector<FuncOp> orderedFuncOps;
- DenseMap<FuncOp, DenseSet<Operation *>> callerMap;
+ SmallVector<func::FuncOp> orderedFuncOps;
+ DenseMap<func::FuncOp, DenseSet<Operation *>> callerMap;
auto res = getFuncOpsOrderedByCalls(moduleOp, orderedFuncOps, callerMap);
(void)res;
assert(succeeded(res) && "unexpected getFuncOpsOrderedByCalls failure");
- for (FuncOp funcOp : orderedFuncOps) {
+ for (func::FuncOp funcOp : orderedFuncOps) {
DenseMap<Operation *, SmallVector<Value>> operandsPerCaller;
foreachCaller(callerMap, funcOp, [&](Operation *caller) {
operandsPerCaller.try_emplace(caller, SmallVector<Value>());
namespace comprehensive_bufferize {
namespace std_ext {
-/// Return the index of the bbArg in the given FuncOp that is equivalent to the
-/// specified return value (if any).
-static Optional<int64_t> getEquivalentFuncArgIdx(FuncOp funcOp,
+/// Return the index of the bbArg in the given func::FuncOp that is equivalent
+/// to the specified return value (if any).
+static Optional<int64_t> getEquivalentFuncArgIdx(func::FuncOp funcOp,
const FuncAnalysisState &state,
int64_t returnValIdx) {
auto funcOpIt = state.equivalentFuncArgs.find(funcOp);
bool bufferizesToMemoryRead(Operation *op, OpOperand &opOperand,
const AnalysisState &state) const {
func::CallOp callOp = cast<func::CallOp>(op);
- FuncOp funcOp = getCalledFunction(callOp);
- assert(funcOp && "expected CallOp to a FuncOp");
+ func::FuncOp funcOp = getCalledFunction(callOp);
+ assert(funcOp && "expected CallOp to a func::FuncOp");
const FuncAnalysisState &funcState = getFuncAnalysisState(state);
if (getFuncOpAnalysisState(state, funcOp) != FuncOpAnalysisState::Analyzed)
bool bufferizesToMemoryWrite(Operation *op, OpOperand &opOperand,
const AnalysisState &state) const {
func::CallOp callOp = cast<func::CallOp>(op);
- FuncOp funcOp = getCalledFunction(callOp);
- assert(funcOp && "expected CallOp to a FuncOp");
+ func::FuncOp funcOp = getCalledFunction(callOp);
+ assert(funcOp && "expected CallOp to a func::FuncOp");
const FuncAnalysisState &funcState = getFuncAnalysisState(state);
if (getFuncOpAnalysisState(state, funcOp) != FuncOpAnalysisState::Analyzed)
SmallVector<OpResult> getAliasingOpResult(Operation *op, OpOperand &opOperand,
const AnalysisState &state) const {
func::CallOp callOp = cast<func::CallOp>(op);
- FuncOp funcOp = getCalledFunction(callOp);
- assert(funcOp && "expected CallOp to a FuncOp");
+ func::FuncOp funcOp = getCalledFunction(callOp);
+ assert(funcOp && "expected CallOp to a func::FuncOp");
const FuncAnalysisState &funcState = getFuncAnalysisState(state);
if (getFuncOpAnalysisState(state, funcOp) !=
FuncOpAnalysisState::Analyzed) {
getAliasingOpOperand(Operation *op, OpResult opResult,
const AnalysisState &state) const {
func::CallOp callOp = cast<func::CallOp>(op);
- FuncOp funcOp = getCalledFunction(callOp);
- assert(funcOp && "expected CallOp to a FuncOp");
+ func::FuncOp funcOp = getCalledFunction(callOp);
+ assert(funcOp && "expected CallOp to a func::FuncOp");
const FuncAnalysisState &funcState = getFuncAnalysisState(state);
if (getFuncOpAnalysisState(state, funcOp) !=
FuncOpAnalysisState::Analyzed) {
return BufferRelation::Equivalent;
}
- /// In a first approximation, all the function arguments of a FuncOp are
+ /// In a first approximation, all the function arguments of a func::FuncOp are
/// marked inplaceable. For now, it is the responsibility of the `callOp`
- /// bufferization to allow FuncOp that are inplaceable to write inPlace.
+ /// bufferization to allow func::FuncOp that are inplaceable to write inPlace.
LogicalResult bufferize(Operation *op, RewriterBase &rewriter,
BufferizationState &state) const {
func::CallOp callOp = cast<func::CallOp>(op);
unsigned numResults = callOp.getNumResults();
unsigned numOperands = callOp->getNumOperands();
- FuncOp funcOp = getCalledFunction(callOp);
- assert(funcOp && "expected CallOp to a FuncOp");
+ func::FuncOp funcOp = getCalledFunction(callOp);
+ assert(funcOp && "expected CallOp to a func::FuncOp");
const FuncAnalysisState &funcState =
getFuncAnalysisState(state.getAnalysisState());
const OneShotBufferizationOptions &options =
//
// Note: If a function has no body, no equivalence information is
// available. Consequently, a tensor return value cannot be proven to fold
- // onto a FuncOp bbArg, so calls to such functions are not bufferizable at
- // the moment.
+ // onto a func::FuncOp bbArg, so calls to such functions are not
+ // bufferizable at the moment.
// 1. Compute the result types of the new CallOp. Tensor results that are
- // equivalent to a FuncOp bbArg are no longer returned.
+ // equivalent to a func::FuncOp bbArg are no longer returned.
for (const auto &it : llvm::enumerate(callOp.getResultTypes())) {
unsigned returnValIdx = it.index();
Type returnType = it.value();
}
// Retrieve buffers for tensor operands. Tensor operand buffers, who's
- // corresponding FuncOp bbArgs are equivalent to a returned tensor, were
- // already stored in `newOperands` during Step 1.
+ // corresponding func::FuncOp bbArgs are equivalent to a returned tensor,
+ // were already stored in `newOperands` during Step 1.
Value buffer = newOperands[idx];
if (!buffer) {
FailureOr<Value> bufferOrFailure = state.getBuffer(rewriter, opOperand);
BufferizationState &state) const {
#ifndef NDEBUG
auto returnOp = cast<func::ReturnOp>(op);
- assert(isa<FuncOp>(returnOp->getParentOp()) &&
+ assert(isa<func::FuncOp>(returnOp->getParentOp()) &&
"only support FuncOp parent for ReturnOp");
#endif // NDEBUG
return failure();
};
struct FuncOpInterface
- : public BufferizableOpInterface::ExternalModel<FuncOpInterface, FuncOp> {
+ : public BufferizableOpInterface::ExternalModel<FuncOpInterface,
+ func::FuncOp> {
LogicalResult bufferize(Operation *op, RewriterBase &rewriter,
BufferizationState &state) const {
return failure();
/// Return `true` if the given function argument is writable.
bool isWritable(Operation *op, Value value,
const AnalysisState &state) const {
- auto funcOp = cast<FuncOp>(op);
+ auto funcOp = cast<func::FuncOp>(op);
BlockArgument bbArg = value.dyn_cast<BlockArgument>();
assert(bbArg && "expected BlockArgument");
});
}
-/// Set the attribute that triggers inplace bufferization on a FuncOp argument
-/// `bbArg`.
+/// Set the attribute that triggers inplace bufferization on a func::FuncOp
+/// argument `bbArg`.
static void setInPlaceFuncArgument(BlockArgument bbArg, bool inPlace) {
- auto funcOp = cast<FuncOp>(bbArg.getOwner()->getParentOp());
+ auto funcOp = cast<func::FuncOp>(bbArg.getOwner()->getParentOp());
funcOp.setArgAttr(bbArg.getArgNumber(),
BufferizableOpInterface::kInplaceableAttrName,
BoolAttr::get(bbArg.getContext(), inPlace));
}
/// Annotate the IR with the result of the analysis. For testing/debugging only.
-static void annotateOpsWithBufferizationMarkers(FuncOp funcOp,
+static void annotateOpsWithBufferizationMarkers(func::FuncOp funcOp,
const AnalysisState &state) {
auto bufferizableOp = cast<BufferizableOpInterface>(funcOp.getOperation());
for (BlockArgument bbArg : funcOp.getArguments())
BufferizationAliasInfo &aliasInfo = analysisState.getAliasInfo();
// A list of functions in the order in which they are analyzed + bufferized.
- SmallVector<FuncOp> orderedFuncOps;
+ SmallVector<func::FuncOp> orderedFuncOps;
// A mapping of FuncOps to their callers.
FuncCallerMap callerMap;
options.addPostAnalysisStep(funcOpBbArgReadWriteAnalysis);
// Analyze ops.
- for (FuncOp funcOp : orderedFuncOps) {
+ for (func::FuncOp funcOp : orderedFuncOps) {
// No body => no analysis.
if (funcOp.getBody().empty())
continue;
return success();
// Bufferize functions.
- for (FuncOp funcOp : orderedFuncOps) {
+ for (func::FuncOp funcOp : orderedFuncOps) {
// No body => no analysis.
if (!funcOp.getBody().empty())
if (failed(bufferizeOp(funcOp, bufferizationState)))
}
// Check result.
- for (FuncOp funcOp : orderedFuncOps) {
+ for (func::FuncOp funcOp : orderedFuncOps) {
if (!options.allowReturnAllocs &&
llvm::any_of(funcOp.getFunctionType().getResults(), [](Type t) {
return t.isa<MemRefType, UnrankedMemRefType>();
layoutPostProcessing(moduleOp);
// Post-pass cleanup of inplaceable and buffer_layout attributes.
- moduleOp.walk([&](FuncOp op) {
+ moduleOp.walk([&](func::FuncOp op) {
for (BlockArgument bbArg : op.getArguments())
removeBufferizationFuncArguments(bbArg);
});
};
} // namespace
-std::unique_ptr<OperationPass<FuncOp>> mlir::createLinalgBufferizePass() {
+std::unique_ptr<OperationPass<func::FuncOp>> mlir::createLinalgBufferizePass() {
return std::make_unique<LinalgBufferizePass>();
}
} // namespace
void LinalgGeneralizationPass::runOnOperation() {
- FuncOp func = getOperation();
+ func::FuncOp func = getOperation();
RewritePatternSet patterns(&getContext());
populateLinalgNamedOpsGeneralizationPatterns(patterns);
(void)applyPatternsAndFoldGreedily(func.getBody(), std::move(patterns));
patterns.add<LinalgGeneralizationPattern>(patterns.getContext(), marker);
}
-std::unique_ptr<OperationPass<FuncOp>> mlir::createLinalgGeneralizationPass() {
+std::unique_ptr<OperationPass<func::FuncOp>>
+mlir::createLinalgGeneralizationPass() {
return std::make_unique<LinalgGeneralizationPass>();
}
static void
getAtMostNEnclosingLoops(tensor::PadOp padOp, int nLevels,
SmallVector<scf::ForOp> &reverseEnclosingLoops) {
- AsmState state(padOp->getParentOfType<mlir::FuncOp>());
+ AsmState state(padOp->getParentOfType<func::FuncOp>());
(void)state;
scf::ForOp outermostEnclosingForOp = nullptr;
Operation *nextEnclosingOp = padOp->getParentOp();
// 4. Hoist the tensor_read/tensor_write and update the tensor SSA links.
// After this transformation the scf.forOp may have unused arguments that can be
// remove by the canonicalization pass.
-void mlir::linalg::hoistRedundantVectorTransfersOnTensor(FuncOp func) {
+void mlir::linalg::hoistRedundantVectorTransfersOnTensor(func::FuncOp func) {
bool changed = true;
while (changed) {
changed = false;
}
}
-void mlir::linalg::hoistRedundantVectorTransfers(FuncOp func) {
+void mlir::linalg::hoistRedundantVectorTransfers(func::FuncOp func) {
bool changed = true;
while (changed) {
changed = false;
struct LinalgInlineScalarOperandsPass
: public LinalgInlineScalarOperandsBase<LinalgInlineScalarOperandsPass> {
void runOnOperation() override {
- FuncOp funcOp = getOperation();
+ func::FuncOp funcOp = getOperation();
MLIRContext *context = funcOp.getContext();
RewritePatternSet patterns(context);
};
} // namespace
-std::unique_ptr<OperationPass<FuncOp>>
+std::unique_ptr<OperationPass<func::FuncOp>>
mlir::createLinalgInlineScalarOperandsPass() {
return std::make_unique<LinalgInlineScalarOperandsPass>();
}
} // namespace
/// Create a LinalgStrategyTileAndFusePass.
-std::unique_ptr<OperationPass<FuncOp>>
+std::unique_ptr<OperationPass<func::FuncOp>>
mlir::createLinalgStrategyTileAndFusePass(
StringRef opName, const LinalgTilingAndFusionOptions &options,
const LinalgTransformationFilter &filter) {
}
/// Create a LinalgStrategyTilePass.
-std::unique_ptr<OperationPass<FuncOp>>
+std::unique_ptr<OperationPass<func::FuncOp>>
mlir::createLinalgStrategyTilePass(StringRef opName,
const LinalgTilingOptions &opt,
const LinalgTransformationFilter &filter) {
}
/// Create a LinalgStrategyPadPass.
-std::unique_ptr<OperationPass<FuncOp>>
+std::unique_ptr<OperationPass<func::FuncOp>>
mlir::createLinalgStrategyPadPass(StringRef opName,
const LinalgPaddingOptions &opt,
const LinalgTransformationFilter &filter) {
}
/// Create a LinalgStrategyPromotePass.
-std::unique_ptr<OperationPass<FuncOp>> mlir::createLinalgStrategyPromotePass(
+std::unique_ptr<OperationPass<func::FuncOp>>
+mlir::createLinalgStrategyPromotePass(
StringRef opName, const LinalgPromotionOptions &opt,
const LinalgTransformationFilter &filter) {
return std::make_unique<LinalgStrategyPromotePass>(opName, opt, filter);
}
/// Create a LinalgStrategyGeneralizePass.
-std::unique_ptr<OperationPass<FuncOp>> mlir::createLinalgStrategyGeneralizePass(
+std::unique_ptr<OperationPass<func::FuncOp>>
+mlir::createLinalgStrategyGeneralizePass(
StringRef opName, const LinalgTransformationFilter &filter) {
return std::make_unique<LinalgStrategyGeneralizePass>(opName, filter);
}
/// Create a LinalgStrategyDecomposePass.
// TODO: if/when we need finer control add an `opName` parameter.
-std::unique_ptr<OperationPass<FuncOp>> mlir::createLinalgStrategyDecomposePass(
+std::unique_ptr<OperationPass<func::FuncOp>>
+mlir::createLinalgStrategyDecomposePass(
const LinalgTransformationFilter &filter) {
return std::make_unique<LinalgStrategyDecomposePass>(filter);
}
/// Create a LinalgStrategyInterchangePass.
-std::unique_ptr<OperationPass<FuncOp>>
+std::unique_ptr<OperationPass<func::FuncOp>>
mlir::createLinalgStrategyInterchangePass(
ArrayRef<int64_t> iteratorInterchange,
const LinalgTransformationFilter &filter) {
}
/// Create a LinalgStrategyVectorizePass.
-std::unique_ptr<OperationPass<FuncOp>> mlir::createLinalgStrategyVectorizePass(
+std::unique_ptr<OperationPass<func::FuncOp>>
+mlir::createLinalgStrategyVectorizePass(
StringRef opName, LinalgVectorizationOptions opt,
const LinalgTransformationFilter &filter, bool padVectorize) {
return std::make_unique<LinalgStrategyVectorizePass>(opName, opt, filter,
}
/// Create a LinalgStrategyEnablePass.
-std::unique_ptr<OperationPass<FuncOp>>
+std::unique_ptr<OperationPass<func::FuncOp>>
mlir::createLinalgStrategyEnablePass(LinalgEnablingOptions opt,
const LinalgTransformationFilter &filter) {
return std::make_unique<LinalgStrategyEnablePass>(opt, filter);
}
/// Create a LinalgStrategyLowerVectorsPass.
-std::unique_ptr<OperationPass<FuncOp>>
+std::unique_ptr<OperationPass<func::FuncOp>>
mlir::createLinalgStrategyLowerVectorsPass(
LinalgVectorLoweringOptions opt, const LinalgTransformationFilter &filter) {
return std::make_unique<LinalgStrategyLowerVectorsPass>(opt, filter);
}
/// Create a LinalgStrategyRemoveMarkersPass.
-std::unique_ptr<OperationPass<FuncOp>>
+std::unique_ptr<OperationPass<func::FuncOp>>
mlir::createLinalgStrategyRemoveMarkersPass() {
return std::make_unique<LinalgStrategyRemoveMarkersPass>();
}
};
template <typename LoopType>
-static void lowerLinalgToLoopsImpl(FuncOp funcOp) {
+static void lowerLinalgToLoopsImpl(func::FuncOp funcOp) {
MLIRContext *context = funcOp.getContext();
RewritePatternSet patterns(context);
patterns.add<LinalgRewritePattern<LoopType>>(context);
} // namespace
-std::unique_ptr<OperationPass<FuncOp>> mlir::createConvertLinalgToLoopsPass() {
+std::unique_ptr<OperationPass<func::FuncOp>>
+mlir::createConvertLinalgToLoopsPass() {
return std::make_unique<LowerToLoops>();
}
-std::unique_ptr<OperationPass<FuncOp>>
+std::unique_ptr<OperationPass<func::FuncOp>>
mlir::createConvertLinalgToParallelLoopsPass() {
return std::make_unique<LowerToParallelLoops>();
}
-std::unique_ptr<OperationPass<FuncOp>>
+std::unique_ptr<OperationPass<func::FuncOp>>
mlir::createConvertLinalgToAffineLoopsPass() {
return std::make_unique<LowerToAffineLoops>();
}
} // namespace
// TODO: support more transformation options in the pass.
-std::unique_ptr<OperationPass<FuncOp>>
+std::unique_ptr<OperationPass<func::FuncOp>>
mlir::createLinalgPromotionPass(bool dynamicBuffers, bool useAlloca) {
return std::make_unique<LinalgPromotionPass>(dynamicBuffers, useAlloca);
}
-std::unique_ptr<OperationPass<FuncOp>> mlir::createLinalgPromotionPass() {
+std::unique_ptr<OperationPass<func::FuncOp>> mlir::createLinalgPromotionPass() {
return std::make_unique<LinalgPromotionPass>();
}
patterns.add<PadOpTilingPattern>(ctx, options);
}
-static void applyExtractSliceOfPadTensorSwapPattern(FuncOp funcOp) {
+static void applyExtractSliceOfPadTensorSwapPattern(func::FuncOp funcOp) {
MLIRContext *ctx = funcOp.getContext();
RewritePatternSet patterns(ctx);
patterns.add<ExtractSliceOfPadTensorSwapPattern>(patterns.getContext());
}
void runOnOperation() override {
- FuncOp funcOp = getOperation();
+ func::FuncOp funcOp = getOperation();
LinalgTilingLoopType type =
llvm::StringSwitch<LinalgTilingLoopType>(loopType)
.Case("for", LinalgTilingLoopType::Loops)
} // namespace
-std::unique_ptr<OperationPass<FuncOp>>
+std::unique_ptr<OperationPass<func::FuncOp>>
mlir::createLinalgTilingPass(ArrayRef<int64_t> tileSizes,
linalg::LinalgTilingLoopType loopType) {
return std::make_unique<LinalgTilingPass>(tileSizes, loopType);
tileSizeComputationFunction = [tileSizes](OpBuilder &b, Operation *op) {
OpBuilder::InsertionGuard guard(b);
b.setInsertionPointToStart(
- &op->getParentOfType<FuncOp>().getBody().front());
+ &op->getParentOfType<func::FuncOp>().getBody().front());
return llvm::to_vector<4>(map_range(tileSizes, [&](int64_t s) {
Value v = b.create<arith::ConstantIndexOp>(op->getLoc(), s);
return v;
Operation *writeValue = rewriter.create<vector::TransferWriteOp>(
loc, readValue, copyOp.target(), indices,
rewriter.getMultiDimIdentityMap(srcType.getRank()));
- copyOp->getParentOfType<FuncOp>().dump();
rewriter.replaceOp(copyOp, writeValue->getResults());
return success();
}
/// non-normalizable as well. We assume external functions to be normalizable.
struct NormalizeMemRefs : public NormalizeMemRefsBase<NormalizeMemRefs> {
void runOnOperation() override;
- void normalizeFuncOpMemRefs(FuncOp funcOp, ModuleOp moduleOp);
- bool areMemRefsNormalizable(FuncOp funcOp);
- void updateFunctionSignature(FuncOp funcOp, ModuleOp moduleOp);
- void setCalleesAndCallersNonNormalizable(FuncOp funcOp, ModuleOp moduleOp,
- DenseSet<FuncOp> &normalizableFuncs);
- Operation *createOpResultsNormalized(FuncOp funcOp, Operation *oldOp);
+ void normalizeFuncOpMemRefs(func::FuncOp funcOp, ModuleOp moduleOp);
+ bool areMemRefsNormalizable(func::FuncOp funcOp);
+ void updateFunctionSignature(func::FuncOp funcOp, ModuleOp moduleOp);
+ void setCalleesAndCallersNonNormalizable(
+ func::FuncOp funcOp, ModuleOp moduleOp,
+ DenseSet<func::FuncOp> &normalizableFuncs);
+ Operation *createOpResultsNormalized(func::FuncOp funcOp, Operation *oldOp);
};
} // namespace
// normalizable are removed from this set.
// TODO: Change this to work on FuncLikeOp once there is an operation
// interface for it.
- DenseSet<FuncOp> normalizableFuncs;
+ DenseSet<func::FuncOp> normalizableFuncs;
// Initialize `normalizableFuncs` with all the functions within a module.
- moduleOp.walk([&](FuncOp funcOp) { normalizableFuncs.insert(funcOp); });
+ moduleOp.walk([&](func::FuncOp funcOp) { normalizableFuncs.insert(funcOp); });
// Traverse through all the functions applying a filter which determines
// whether that function is normalizable or not. All callers/callees of
// they aren't passing any or specific non-normalizable memrefs. So,
// functions which calls or get called by a non-normalizable becomes non-
// normalizable functions themselves.
- moduleOp.walk([&](FuncOp funcOp) {
+ moduleOp.walk([&](func::FuncOp funcOp) {
if (normalizableFuncs.contains(funcOp)) {
if (!areMemRefsNormalizable(funcOp)) {
LLVM_DEBUG(llvm::dbgs()
LLVM_DEBUG(llvm::dbgs() << "Normalizing " << normalizableFuncs.size()
<< " functions\n");
// Those functions which can be normalized are subjected to normalization.
- for (FuncOp &funcOp : normalizableFuncs)
+ for (func::FuncOp &funcOp : normalizableFuncs)
normalizeFuncOpMemRefs(funcOp, moduleOp);
}
/// Set all the calling functions and the callees of the function as not
/// normalizable.
void NormalizeMemRefs::setCalleesAndCallersNonNormalizable(
- FuncOp funcOp, ModuleOp moduleOp, DenseSet<FuncOp> &normalizableFuncs) {
+ func::FuncOp funcOp, ModuleOp moduleOp,
+ DenseSet<func::FuncOp> &normalizableFuncs) {
if (!normalizableFuncs.contains(funcOp))
return;
for (SymbolTable::SymbolUse symbolUse : *symbolUses) {
// TODO: Extend this for ops that are FunctionOpInterface. This would
// require creating an OpInterface for FunctionOpInterface ops.
- FuncOp parentFuncOp = symbolUse.getUser()->getParentOfType<FuncOp>();
- for (FuncOp &funcOp : normalizableFuncs) {
+ func::FuncOp parentFuncOp =
+ symbolUse.getUser()->getParentOfType<func::FuncOp>();
+ for (func::FuncOp &funcOp : normalizableFuncs) {
if (parentFuncOp == funcOp) {
setCalleesAndCallersNonNormalizable(funcOp, moduleOp,
normalizableFuncs);
// Functions called by this function.
funcOp.walk([&](func::CallOp callOp) {
StringAttr callee = callOp.getCalleeAttr().getAttr();
- for (FuncOp &funcOp : normalizableFuncs) {
- // We compare FuncOp and callee's name.
+ for (func::FuncOp &funcOp : normalizableFuncs) {
+ // We compare func::FuncOp and callee's name.
if (callee == funcOp.getNameAttr()) {
setCalleesAndCallersNonNormalizable(funcOp, moduleOp,
normalizableFuncs);
/// wherein even if the non-normalizable memref is not a part of the function's
/// argument or return type, we still label the entire function as
/// non-normalizable. We assume external functions to be normalizable.
-bool NormalizeMemRefs::areMemRefsNormalizable(FuncOp funcOp) {
+bool NormalizeMemRefs::areMemRefsNormalizable(func::FuncOp funcOp) {
// We assume external functions to be normalizable.
if (funcOp.isExternal())
return true;
/// the calling function's signature.
/// TODO: An update to the calling function signature is required only if the
/// returned value is in turn used in ReturnOp of the calling function.
-void NormalizeMemRefs::updateFunctionSignature(FuncOp funcOp,
+void NormalizeMemRefs::updateFunctionSignature(func::FuncOp funcOp,
ModuleOp moduleOp) {
FunctionType functionType = funcOp.getFunctionType();
SmallVector<Type, 4> resultTypes;
// function in ReturnOps, the caller function's signature will also change.
// Hence we record the caller function in 'funcOpsToUpdate' to update their
// signature as well.
- llvm::SmallDenseSet<FuncOp, 8> funcOpsToUpdate;
+ llvm::SmallDenseSet<func::FuncOp, 8> funcOpsToUpdate;
// We iterate over all symbolic uses of the function and update the return
// type at the caller site.
Optional<SymbolTable::UseRange> symbolUses = funcOp.getSymbolUses(moduleOp);
// required.
// TODO: Extend this for ops that are FunctionOpInterface. This would
// require creating an OpInterface for FunctionOpInterface ops.
- FuncOp parentFuncOp = newCallOp->getParentOfType<FuncOp>();
+ func::FuncOp parentFuncOp = newCallOp->getParentOfType<func::FuncOp>();
funcOpsToUpdate.insert(parentFuncOp);
}
}
// Updating the signature type of those functions which call the current
// function. Only if the return type of the current function has a normalized
// memref will the caller function become a candidate for signature update.
- for (FuncOp parentFuncOp : funcOpsToUpdate)
+ for (func::FuncOp parentFuncOp : funcOpsToUpdate)
updateFunctionSignature(parentFuncOp, moduleOp);
}
/// Normalizes the memrefs within a function which includes those arising as a
/// result of AllocOps, CallOps and function's argument. The ModuleOp argument
/// is used to help update function's signature after normalization.
-void NormalizeMemRefs::normalizeFuncOpMemRefs(FuncOp funcOp,
+void NormalizeMemRefs::normalizeFuncOpMemRefs(func::FuncOp funcOp,
ModuleOp moduleOp) {
// Turn memrefs' non-identity layouts maps into ones with identity. Collect
// alloc ops first and then process since normalizeMemRef replaces/erases ops
/// normalized, and new operation containing them in the operation results is
/// returned. If all of the results of `oldOp` have no memrefs or memrefs
/// without affine map, `oldOp` is returned without modification.
-Operation *NormalizeMemRefs::createOpResultsNormalized(FuncOp funcOp,
+Operation *NormalizeMemRefs::createOpResultsNormalized(func::FuncOp funcOp,
Operation *oldOp) {
// Prepare OperationState to create newOp containing normalized memref in
// the operation results.
(void)applyPatternsAndFoldGreedily(func, std::move(patterns));
}
-std::unique_ptr<OperationPass<FuncOp>> mlir::quant::createConvertConstPass() {
+std::unique_ptr<OperationPass<func::FuncOp>>
+mlir::quant::createConvertConstPass() {
return std::make_unique<ConvertConstPass>();
}
signalPassFailure();
}
-std::unique_ptr<OperationPass<FuncOp>>
+std::unique_ptr<OperationPass<func::FuncOp>>
mlir::quant::createConvertSimulatedQuantPass() {
return std::make_unique<ConvertSimulatedQuantPass>();
}
struct ForToWhileLoop : public SCFForToWhileLoopBase<ForToWhileLoop> {
void runOnOperation() override {
- FuncOp funcOp = getOperation();
+ func::FuncOp funcOp = getOperation();
MLIRContext *ctx = funcOp.getContext();
RewritePatternSet patterns(ctx);
patterns.add<ForLoopLoweringPattern>(ctx);
struct SCFForLoopCanonicalization
: public SCFForLoopCanonicalizationBase<SCFForLoopCanonicalization> {
void runOnOperation() override {
- FuncOp funcOp = getOperation();
+ func::FuncOp funcOp = getOperation();
MLIRContext *ctx = funcOp.getContext();
RewritePatternSet patterns(ctx);
scf::populateSCFForLoopCanonicalizationPatterns(patterns);
struct ForLoopPeeling : public SCFForLoopPeelingBase<ForLoopPeeling> {
void runOnOperation() override {
- FuncOp funcOp = getOperation();
+ func::FuncOp funcOp = getOperation();
MLIRContext *ctx = funcOp.getContext();
RewritePatternSet patterns(ctx);
patterns.add<ForLoopPeelingPattern>(ctx, skipPartial);
/// `outlinedFuncBody` to alloc simple canonicalizations.
// TODO: support more than single-block regions.
// TODO: more flexible constant handling.
-FailureOr<FuncOp> mlir::outlineSingleBlockRegion(RewriterBase &rewriter,
- Location loc, Region ®ion,
- StringRef funcName) {
+FailureOr<func::FuncOp> mlir::outlineSingleBlockRegion(RewriterBase &rewriter,
+ Location loc,
+ Region ®ion,
+ StringRef funcName) {
assert(!funcName.empty() && "funcName cannot be empty");
if (!region.hasOneBlock())
return failure();
// Outline before current function.
OpBuilder::InsertionGuard g(rewriter);
- rewriter.setInsertionPoint(region.getParentOfType<FuncOp>());
+ rewriter.setInsertionPoint(region.getParentOfType<func::FuncOp>());
SetVector<Value> captures;
getUsedValuesDefinedAbove(region, captures);
FunctionType outlinedFuncType =
FunctionType::get(rewriter.getContext(), outlinedFuncArgTypes,
originalTerminator->getOperandTypes());
- auto outlinedFunc = rewriter.create<FuncOp>(loc, funcName, outlinedFuncType);
+ auto outlinedFunc =
+ rewriter.create<func::FuncOp>(loc, funcName, outlinedFuncType);
Block *outlinedFuncBody = outlinedFunc.addEntryBlock();
// Merge blocks while replacing the original block operands.
return outlinedFunc;
}
-LogicalResult mlir::outlineIfOp(RewriterBase &b, scf::IfOp ifOp, FuncOp *thenFn,
- StringRef thenFnName, FuncOp *elseFn,
- StringRef elseFnName) {
+LogicalResult mlir::outlineIfOp(RewriterBase &b, scf::IfOp ifOp,
+ func::FuncOp *thenFn, StringRef thenFnName,
+ func::FuncOp *elseFn, StringRef elseFnName) {
IRRewriter rewriter(b);
Location loc = ifOp.getLoc();
- FailureOr<FuncOp> outlinedFuncOpOrFailure;
+ FailureOr<func::FuncOp> outlinedFuncOpOrFailure;
if (thenFn && !ifOp.getThenRegion().empty()) {
outlinedFuncOpOrFailure = outlineSingleBlockRegion(
rewriter, loc, ifOp.getThenRegion(), thenFnName);
populateSPIRVLayoutInfoPatterns(patterns);
ConversionTarget target(*(module.getContext()));
target.addLegalDialect<spirv::SPIRVDialect>();
- target.addLegalOp<FuncOp>();
+ target.addLegalOp<func::FuncOp>();
target.addDynamicallyLegalOp<spirv::GlobalVariableOp>(
[](spirv::GlobalVariableOp op) {
return VulkanLayoutUtils::isLegalType(op.type());
}
//===----------------------------------------------------------------------===//
-// FuncOp Conversion Patterns
+// func::FuncOp Conversion Patterns
//===----------------------------------------------------------------------===//
namespace {
/// A pattern for rewriting function signature to convert arguments of functions
/// to be of valid SPIR-V types.
-class FuncOpConversion final : public OpConversionPattern<FuncOp> {
+class FuncOpConversion final : public OpConversionPattern<func::FuncOp> {
public:
- using OpConversionPattern<FuncOp>::OpConversionPattern;
+ using OpConversionPattern<func::FuncOp>::OpConversionPattern;
LogicalResult
- matchAndRewrite(FuncOp funcOp, OpAdaptor adaptor,
+ matchAndRewrite(func::FuncOp funcOp, OpAdaptor adaptor,
ConversionPatternRewriter &rewriter) const override;
};
} // namespace
LogicalResult
-FuncOpConversion::matchAndRewrite(FuncOp funcOp, OpAdaptor adaptor,
+FuncOpConversion::matchAndRewrite(func::FuncOp funcOp, OpAdaptor adaptor,
ConversionPatternRewriter &rewriter) const {
auto fnType = funcOp.getFunctionType();
if (fnType.getNumResults() > 1)
::mlir::SymbolTable::getSymbolAttrName(), builder.getStringAttr(name)));
}
-FuncOp FunctionLibraryOp::getShapeFunction(Operation *op) {
+func::FuncOp FunctionLibraryOp::getShapeFunction(Operation *op) {
auto attr = getMapping()
.get(op->getName().getIdentifier())
.dyn_cast_or_null<FlatSymbolRefAttr>();
if (!attr)
return nullptr;
- return lookupSymbol<FuncOp>(attr);
+ return lookupSymbol<func::FuncOp>(attr);
}
ParseResult FunctionLibraryOp::parse(OpAsmParser &parser,
};
} // namespace
-std::unique_ptr<OperationPass<FuncOp>> mlir::createShapeBufferizePass() {
+std::unique_ptr<OperationPass<func::FuncOp>> mlir::createShapeBufferizePass() {
return std::make_unique<ShapeBufferizePass>();
}
patterns.getContext());
}
-std::unique_ptr<OperationPass<FuncOp>>
+std::unique_ptr<OperationPass<func::FuncOp>>
mlir::createRemoveShapeConstraintsPass() {
return std::make_unique<RemoveShapeConstraintsPass>();
}
void mlir::sparse_tensor::buildSparseCompiler(
OpPassManager &pm, const SparseCompilerOptions &options) {
// TODO(wrengr): ensure the original `pm` is for ModuleOp
- pm.addNestedPass<FuncOp>(createLinalgGeneralizationPass());
+ pm.addNestedPass<func::FuncOp>(createLinalgGeneralizationPass());
pm.addPass(createLinalgElementwiseOpFusionPass());
pm.addPass(createSparsificationPass(options.sparsificationOptions()));
pm.addPass(createSparseTensorConversionPass(
options.sparseTensorConversionOptions()));
- pm.addNestedPass<FuncOp>(createLinalgBufferizePass());
- pm.addNestedPass<FuncOp>(vector::createVectorBufferizePass());
- pm.addNestedPass<FuncOp>(createConvertLinalgToLoopsPass());
- pm.addNestedPass<FuncOp>(createConvertVectorToSCFPass());
- pm.addNestedPass<FuncOp>(createConvertSCFToCFPass());
+ pm.addNestedPass<func::FuncOp>(createLinalgBufferizePass());
+ pm.addNestedPass<func::FuncOp>(vector::createVectorBufferizePass());
+ pm.addNestedPass<func::FuncOp>(createConvertLinalgToLoopsPass());
+ pm.addNestedPass<func::FuncOp>(createConvertVectorToSCFPass());
+ pm.addNestedPass<func::FuncOp>(createConvertSCFToCFPass());
pm.addPass(func::createFuncBufferizePass());
pm.addPass(arith::createConstantBufferizePass());
- pm.addNestedPass<FuncOp>(createTensorBufferizePass());
- pm.addNestedPass<FuncOp>(
+ pm.addNestedPass<func::FuncOp>(createTensorBufferizePass());
+ pm.addNestedPass<func::FuncOp>(
mlir::bufferization::createFinalizingBufferizePass());
pm.addPass(createLowerAffinePass());
pm.addPass(createConvertVectorToLLVMPass(options.lowerVectorToLLVMOptions()));
pm.addPass(createMemRefToLLVMPass());
- pm.addNestedPass<FuncOp>(createConvertMathToLLVMPass());
+ pm.addNestedPass<func::FuncOp>(createConvertMathToLLVMPass());
pm.addPass(createConvertFuncToLLVMPass());
pm.addPass(createReconcileUnrealizedCastsPass());
}
MLIRContext *context = op->getContext();
auto module = op->getParentOfType<ModuleOp>();
auto result = SymbolRefAttr::get(context, name);
- auto func = module.lookupSymbol<FuncOp>(result.getAttr());
+ auto func = module.lookupSymbol<func::FuncOp>(result.getAttr());
if (!func) {
OpBuilder moduleBuilder(module.getBodyRegion());
- func = moduleBuilder.create<FuncOp>(
+ func = moduleBuilder.create<func::FuncOp>(
op->getLoc(), name,
FunctionType::get(context, operands.getTypes(), resultType));
func.setPrivate();
// All dynamic rules below accept new function, call, return, and tensor
// dim and cast operations as legal output of the rewriting provided that
// all sparse tensor types have been fully rewritten.
- target.addDynamicallyLegalOp<FuncOp>([&](FuncOp op) {
+ target.addDynamicallyLegalOp<func::FuncOp>([&](func::FuncOp op) {
return converter.isSignatureLegal(op.getFunctionType());
});
target.addDynamicallyLegalOp<func::CallOp>([&](func::CallOp op) {
SparseTensorConversionOptions options(
sparseToSparseConversionStrategy(sparseToSparse));
// Populate with rules and apply rewriting rules.
- populateFunctionOpInterfaceTypeConversionPattern<FuncOp>(patterns,
- converter);
+ populateFunctionOpInterfaceTypeConversionPattern<func::FuncOp>(patterns,
+ converter);
populateCallOpTypeConversionPattern(patterns, converter);
populateSparseTensorConversionPatterns(converter, patterns, options);
if (failed(applyPartialConversion(getOperation(), target,
/// Returns true if tensor has an in-place annotation.
static bool isInPlace(Value val) {
if (auto arg = val.dyn_cast<BlockArgument>())
- if (auto funcOp = dyn_cast<FuncOp>(arg.getOwner()->getParentOp()))
+ if (auto funcOp = dyn_cast<func::FuncOp>(arg.getOwner()->getParentOp()))
if (auto attr = funcOp.getArgAttrOfType<BoolAttr>(
arg.getArgNumber(),
bufferization::BufferizableOpInterface::kInplaceableAttrName))
struct TosaInferShapes : public TosaInferShapesBase<TosaInferShapes> {
public:
void runOnOperation() override {
- FuncOp func = getOperation();
+ func::FuncOp func = getOperation();
IRRewriter rewriter(func.getContext());
// Insert UnrealizedConversionCasts to guarantee ReturnOp agress with
// the FuncOp type.
func.walk([&](func::ReturnOp op) {
- FuncOp parent = dyn_cast<FuncOp>(op->getParentOp());
+ func::FuncOp parent = dyn_cast<func::FuncOp>(op->getParentOp());
if (!parent)
return;
// -----
-// expected-error @+1 {{referencing to a 'FuncOp' symbol}}
+// expected-error @+1 {{referencing to a 'func::FuncOp' symbol}}
"test.symbol_ref_attr"() {symbol = @foo} : () -> ()
// -----
namespace {
struct TestAffineDataCopy
- : public PassWrapper<TestAffineDataCopy, OperationPass<FuncOp>> {
+ : public PassWrapper<TestAffineDataCopy, OperationPass<func::FuncOp>> {
MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(TestAffineDataCopy)
StringRef getArgument() const final { return PASS_NAME; }
namespace {
struct TestAffineLoopParametricTiling
: public PassWrapper<TestAffineLoopParametricTiling,
- OperationPass<FuncOp>> {
+ OperationPass<func::FuncOp>> {
MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(TestAffineLoopParametricTiling)
StringRef getArgument() const final { return "test-affine-parametric-tile"; }
assert(!band.empty() && "no loops in input band");
AffineForOp topLoop = band[0];
- if (FuncOp funcOp = dyn_cast<FuncOp>(topLoop->getParentOp()))
+ if (func::FuncOp funcOp = dyn_cast<func::FuncOp>(topLoop->getParentOp()))
assert(funcOp.getNumArguments() >= band.size() && "Too few tile sizes");
}
namespace {
struct TestLoopFusion
- : public PassWrapper<TestLoopFusion, OperationPass<FuncOp>> {
+ : public PassWrapper<TestLoopFusion, OperationPass<func::FuncOp>> {
MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(TestLoopFusion)
StringRef getArgument() const final { return "test-loop-fusion"; }
namespace {
struct VectorizerTestPass
- : public PassWrapper<VectorizerTestPass, OperationPass<FuncOp>> {
+ : public PassWrapper<VectorizerTestPass, OperationPass<func::FuncOp>> {
MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(VectorizerTestPass)
static constexpr auto kTestAffineMapOpName = "test_affine_map";
void VectorizerTestPass::runOnOperation() {
// Only support single block functions at this point.
- FuncOp f = getOperation();
+ func::FuncOp f = getOperation();
if (!llvm::hasSingleElement(f))
return;
/// attributes containing the results of data layout queries for operation
/// result types.
struct TestDataLayoutQuery
- : public PassWrapper<TestDataLayoutQuery, OperationPass<FuncOp>> {
+ : public PassWrapper<TestDataLayoutQuery, OperationPass<func::FuncOp>> {
MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(TestDataLayoutQuery)
StringRef getArgument() const final { return "test-data-layout-query"; }
StringRef getDescription() const final { return "Test data layout queries"; }
void runOnOperation() override {
- FuncOp func = getOperation();
+ func::FuncOp func = getOperation();
Builder builder(func.getContext());
const DataLayoutAnalysis &layouts = getAnalysis<DataLayoutAnalysis>();
});
target.addDynamicallyLegalOp<func::CallOp>(
[&](func::CallOp op) { return typeConverter.isLegal(op); });
- target.addDynamicallyLegalOp<FuncOp>([&](FuncOp op) {
+ target.addDynamicallyLegalOp<func::FuncOp>([&](func::FuncOp op) {
return typeConverter.isSignatureLegal(op.getFunctionType());
});
namespace {
struct TestLinalgCodegenStrategy
- : public PassWrapper<TestLinalgCodegenStrategy, OperationPass<FuncOp>> {
+ : public PassWrapper<TestLinalgCodegenStrategy,
+ OperationPass<func::FuncOp>> {
MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(TestLinalgCodegenStrategy)
StringRef getArgument() const final { return "test-linalg-codegen-strategy"; }
.enableContractionLowering()
.enableTransferToSCFConversion());
// Created a nested OpPassManager and run.
- FuncOp funcOp = getOperation();
+ func::FuncOp funcOp = getOperation();
OpPassManager dynamicPM("func.func");
strategy.configurePassPipeline(dynamicPM, funcOp.getContext(), runEnablePass);
if (failed(runPipeline(dynamicPM, funcOp)))
namespace {
struct TestLinalgElementwiseFusion
- : public PassWrapper<TestLinalgElementwiseFusion, OperationPass<FuncOp>> {
+ : public PassWrapper<TestLinalgElementwiseFusion,
+ OperationPass<func::FuncOp>> {
MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(TestLinalgElementwiseFusion)
TestLinalgElementwiseFusion() = default;
void runOnOperation() override {
MLIRContext *context = &this->getContext();
- FuncOp funcOp = this->getOperation();
+ func::FuncOp funcOp = this->getOperation();
if (fuseGenericOps) {
RewritePatternSet fusionPatterns(context);
template <LinalgTilingLoopType LoopType>
struct TestLinalgFusionTransforms
: public PassWrapper<TestLinalgFusionTransforms<LoopType>,
- OperationPass<FuncOp>> {
+ OperationPass<func::FuncOp>> {
MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(TestLinalgFusionTransforms)
void getDependentDialects(DialectRegistry ®istry) const override {
void runOnOperation() override {
MLIRContext *context = &this->getContext();
- FuncOp funcOp = this->getOperation();
+ func::FuncOp funcOp = this->getOperation();
RewritePatternSet fusionPatterns(context);
Aliases alias;
LinalgDependenceGraph dependenceGraph =
};
} // namespace
-static LogicalResult fuseLinalgOpsGreedily(FuncOp f) {
+static LogicalResult fuseLinalgOpsGreedily(func::FuncOp f) {
OpBuilder b(f);
DenseSet<Operation *> eraseSet;
namespace {
struct TestLinalgGreedyFusion
- : public PassWrapper<TestLinalgGreedyFusion, OperationPass<FuncOp>> {
+ : public PassWrapper<TestLinalgGreedyFusion, OperationPass<func::FuncOp>> {
MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(TestLinalgGreedyFusion)
void getDependentDialects(DialectRegistry ®istry) const override {
patterns.add<ExtractSliceOfPadTensorSwapPattern>(context);
scf::populateSCFForLoopCanonicalizationPatterns(patterns);
FrozenRewritePatternSet frozenPatterns(std::move(patterns));
- OpPassManager pm(FuncOp::getOperationName());
+ OpPassManager pm(func::FuncOp::getOperationName());
pm.addPass(createLoopInvariantCodeMotionPass());
pm.addPass(createCanonicalizerPass());
pm.addPass(createCSEPass());
/// testing.
struct TestLinalgTileAndFuseSequencePass
: public PassWrapper<TestLinalgTileAndFuseSequencePass,
- OperationPass<FuncOp>> {
+ OperationPass<func::FuncOp>> {
MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(
TestLinalgTileAndFuseSequencePass)
}
void runOnOperation() override {
- FuncOp funcOp = getOperation();
+ func::FuncOp funcOp = getOperation();
auto &blocks = funcOp.getBody().getBlocks();
if (!llvm::hasSingleElement(blocks)) {
return;
namespace {
struct TestLinalgHoisting
- : public PassWrapper<TestLinalgHoisting, OperationPass<FuncOp>> {
+ : public PassWrapper<TestLinalgHoisting, OperationPass<func::FuncOp>> {
MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(TestLinalgHoisting)
TestLinalgHoisting() = default;
namespace {
struct TestLinalgTransforms
- : public PassWrapper<TestLinalgTransforms, OperationPass<FuncOp>> {
+ : public PassWrapper<TestLinalgTransforms, OperationPass<func::FuncOp>> {
MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(TestLinalgTransforms)
TestLinalgTransforms() = default;
};
} // namespace
-static void applyPatterns(FuncOp funcOp) {
+static void applyPatterns(func::FuncOp funcOp) {
MLIRContext *ctx = funcOp.getContext();
RewritePatternSet patterns(ctx);
}
static void fillL1TilingAndMatmulToVectorPatterns(
- FuncOp funcOp, StringRef startMarker,
+ func::FuncOp funcOp, StringRef startMarker,
SmallVectorImpl<RewritePatternSet> &patternsVector) {
MLIRContext *ctx = funcOp.getContext();
patternsVector.emplace_back(
}
static void
-applyMatmulToVectorPatterns(FuncOp funcOp,
+applyMatmulToVectorPatterns(func::FuncOp funcOp,
bool testMatmulToVectorPatterns1dTiling,
bool testMatmulToVectorPatterns2dTiling) {
MLIRContext *ctx = funcOp.getContext();
(void)applyStagedPatterns(funcOp, frozenStage1Patterns, stage2Patterns);
}
-static void applyVectorTransferForwardingPatterns(FuncOp funcOp) {
+static void applyVectorTransferForwardingPatterns(func::FuncOp funcOp) {
RewritePatternSet forwardPattern(funcOp.getContext());
forwardPattern.add<LinalgCopyVTRForwardingPattern>(funcOp.getContext());
forwardPattern.add<LinalgCopyVTWForwardingPattern>(funcOp.getContext());
(void)applyPatternsAndFoldGreedily(funcOp, std::move(forwardPattern));
}
-static void applyLinalgToVectorPatterns(FuncOp funcOp) {
+static void applyLinalgToVectorPatterns(func::FuncOp funcOp) {
RewritePatternSet patterns(funcOp.getContext());
auto *ctx = funcOp.getContext();
patterns.add<LinalgVectorizationPattern>(
(void)applyPatternsAndFoldGreedily(funcOp, std::move(patterns));
}
-static void applyPadTensorToGenericPatterns(FuncOp funcOp) {
+static void applyPadTensorToGenericPatterns(func::FuncOp funcOp) {
RewritePatternSet patterns(funcOp.getContext());
patterns.add<PadOpTransformationPattern>(funcOp.getContext());
(void)applyPatternsAndFoldGreedily(funcOp, std::move(patterns));
}
-static void applyGeneralizePadTensorPatterns(FuncOp funcOp) {
+static void applyGeneralizePadTensorPatterns(func::FuncOp funcOp) {
RewritePatternSet patterns(funcOp.getContext());
patterns.add<GeneralizePadOpPattern>(funcOp.getContext());
(void)applyPatternsAndFoldGreedily(funcOp, std::move(patterns));
}
-static void applyExtractSliceOfPadTensorSwapPattern(FuncOp funcOp) {
+static void applyExtractSliceOfPadTensorSwapPattern(func::FuncOp funcOp) {
RewritePatternSet patterns(funcOp.getContext());
patterns.add<ExtractSliceOfPadTensorSwapPattern>(funcOp.getContext());
(void)applyPatternsAndFoldGreedily(funcOp, std::move(patterns));
}
-static void applyTilePattern(FuncOp funcOp, const std::string &loopType,
+static void applyTilePattern(func::FuncOp funcOp, const std::string &loopType,
ArrayRef<int64_t> tileSizes,
ArrayRef<int64_t> peeledLoops,
bool scalarizeDynamicDims) {
(void)applyPatternsAndFoldGreedily(funcOp, std::move(tilingPattern));
}
-static void applySplitReduction(FuncOp funcOp) {
+static void applySplitReduction(func::FuncOp funcOp) {
RewritePatternSet patterns(funcOp.getContext());
linalg::populateSplitReductionPattern(
patterns,
(void)applyPatternsAndFoldGreedily(funcOp, std::move(patterns));
}
-static void applyBubbleUpExtractSliceOpPattern(FuncOp funcOp) {
+static void applyBubbleUpExtractSliceOpPattern(func::FuncOp funcOp) {
RewritePatternSet patterns(funcOp.getContext());
populateBubbleUpExtractSliceOpPatterns(patterns);
(void)applyPatternsAndFoldGreedily(funcOp, std::move(patterns));
namespace {
struct TestSCFForUtilsPass
- : public PassWrapper<TestSCFForUtilsPass, OperationPass<FuncOp>> {
+ : public PassWrapper<TestSCFForUtilsPass, OperationPass<func::FuncOp>> {
MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(TestSCFForUtilsPass)
StringRef getArgument() const final { return "test-scf-for-utils"; }
explicit TestSCFForUtilsPass() = default;
void runOnOperation() override {
- FuncOp func = getOperation();
+ func::FuncOp func = getOperation();
SmallVector<scf::ForOp, 4> toErase;
func.walk([&](Operation *fakeRead) {
int count = 0;
getOperation().walk([&](scf::IfOp ifOp) {
auto strCount = std::to_string(count++);
- FuncOp thenFn, elseFn;
+ func::FuncOp thenFn, elseFn;
OpBuilder b(ifOp);
IRRewriter rewriter(b);
if (failed(outlineIfOp(rewriter, ifOp, &thenFn,
"__test_pipelining_iteration";
struct TestSCFPipeliningPass
- : public PassWrapper<TestSCFPipeliningPass, OperationPass<FuncOp>> {
+ : public PassWrapper<TestSCFPipeliningPass, OperationPass<func::FuncOp>> {
MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(TestSCFPipeliningPass)
TestSCFPipeliningPass() = default;
namespace {
/// A pass for testing SPIR-V op availability.
struct PrintOpAvailability
- : public PassWrapper<PrintOpAvailability, OperationPass<FuncOp>> {
+ : public PassWrapper<PrintOpAvailability, OperationPass<func::FuncOp>> {
MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(PrintOpAvailability)
void runOnOperation() override;
namespace {
/// A pass for testing SPIR-V op availability.
struct ConvertToTargetEnv
- : public PassWrapper<ConvertToTargetEnv, OperationPass<FuncOp>> {
+ : public PassWrapper<ConvertToTargetEnv, OperationPass<func::FuncOp>> {
MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(ConvertToTargetEnv)
StringRef getArgument() const override { return "test-spirv-target-env"; }
void ConvertToTargetEnv::runOnOperation() {
MLIRContext *context = &getContext();
- FuncOp fn = getOperation();
+ func::FuncOp fn = getOperation();
auto targetEnv = fn.getOperation()
->getAttr(spirv::getTargetEnvAttrName())
return true;
}
if (auto symbol = op->getAttrOfType<SymbolRefAttr>(shapeFnId)) {
- auto fn = cast<FuncOp>(SymbolTable::lookupSymbolIn(module, symbol));
+ auto fn = cast<func::FuncOp>(SymbolTable::lookupSymbolIn(module, symbol));
op->emitRemark() << "associated shape function: " << fn.getName();
return true;
}
}
}
- module.getBodyRegion().walk([&](FuncOp func) {
+ module.getBodyRegion().walk([&](func::FuncOp func) {
// Skip ops in the shape function library.
if (isa<shape::FunctionLibraryOp>(func->getParentOp()))
return;
def SymbolRefOp : TEST_Op<"symbol_ref_attr"> {
let arguments = (ins
- Confined<FlatSymbolRefAttr, [ReferToOp<"FuncOp">]>:$symbol
+ Confined<FlatSymbolRefAttr, [ReferToOp<"func::FuncOp">]>:$symbol
);
}
let description = [{
The "test.op_funcref" is a test op with a reference to a function symbol.
}];
- let builders = [OpBuilder<(ins "::mlir::FuncOp":$function)>];
+ let builders = [OpBuilder<(ins "::mlir::func::FuncOp":$function)>];
}
// Pattern add the argument plus a increasing static number hidden in
};
struct TestPatternDriver
- : public PassWrapper<TestPatternDriver, OperationPass<FuncOp>> {
+ : public PassWrapper<TestPatternDriver, OperationPass<func::FuncOp>> {
MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(TestPatternDriver)
StringRef getArgument() const final { return "test-patterns"; }
template <typename OpTy>
static void invokeCreateWithInferredReturnType(Operation *op) {
auto *context = op->getContext();
- auto fop = op->getParentOfType<FuncOp>();
+ auto fop = op->getParentOfType<func::FuncOp>();
auto location = UnknownLoc::get(context);
OpBuilder b(op);
b.setInsertionPointAfter(op);
}
struct TestReturnTypeDriver
- : public PassWrapper<TestReturnTypeDriver, OperationPass<FuncOp>> {
+ : public PassWrapper<TestReturnTypeDriver, OperationPass<func::FuncOp>> {
MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(TestReturnTypeDriver)
void getDependentDialects(DialectRegistry ®istry) const override {
namespace {
struct TestDerivedAttributeDriver
- : public PassWrapper<TestDerivedAttributeDriver, OperationPass<FuncOp>> {
+ : public PassWrapper<TestDerivedAttributeDriver,
+ OperationPass<func::FuncOp>> {
MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(TestDerivedAttributeDriver)
StringRef getArgument() const final { return "test-derived-attr"; }
TestNestedOpCreationUndoRewrite, TestReplaceEraseOp,
TestCreateUnregisteredOp>(&getContext());
patterns.add<TestDropOpSignatureConversion>(&getContext(), converter);
- mlir::populateFunctionOpInterfaceTypeConversionPattern<FuncOp>(patterns,
- converter);
+ mlir::populateFunctionOpInterfaceTypeConversionPattern<func::FuncOp>(
+ patterns, converter);
mlir::populateCallOpTypeConversionPattern(patterns, converter);
// Define the conversion target used for the test.
return llvm::none_of(op.getOperandTypes(),
[](Type type) { return type.isF32(); });
});
- target.addDynamicallyLegalOp<FuncOp>([&](FuncOp op) {
+ target.addDynamicallyLegalOp<func::FuncOp>([&](func::FuncOp op) {
return converter.isSignatureLegal(op.getFunctionType()) &&
converter.isLegal(&op.getBody());
});
});
// Check support for marking certain operations as recursively legal.
- target.markOpRecursivelyLegal<FuncOp, ModuleOp>([](Operation *op) {
+ target.markOpRecursivelyLegal<func::FuncOp, ModuleOp>([](Operation *op) {
return static_cast<bool>(
op->getAttrOfType<UnitAttr>("test.recursively_legal"));
});
};
struct TestRemappedValue
- : public mlir::PassWrapper<TestRemappedValue, OperationPass<FuncOp>> {
+ : public mlir::PassWrapper<TestRemappedValue, OperationPass<func::FuncOp>> {
MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(TestRemappedValue)
StringRef getArgument() const final { return "test-remapped-value"; }
patterns.add<TestRemapValueInRegion>(typeConverter, &getContext());
mlir::ConversionTarget target(getContext());
- target.addLegalOp<ModuleOp, FuncOp, TestReturnOp>();
+ target.addLegalOp<ModuleOp, func::FuncOp, TestReturnOp>();
// Expect the type_producer/type_consumer operations to only operate on f64.
target.addDynamicallyLegalOp<TestTypeProducerOp>(
};
struct TestUnknownRootOpDriver
- : public mlir::PassWrapper<TestUnknownRootOpDriver, OperationPass<FuncOp>> {
+ : public mlir::PassWrapper<TestUnknownRootOpDriver,
+ OperationPass<func::FuncOp>> {
MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(TestUnknownRootOpDriver)
StringRef getArgument() const final {
(recursiveType &&
recursiveType.getName() == "outer_converted_type");
});
- target.addDynamicallyLegalOp<FuncOp>([&](FuncOp op) {
+ target.addDynamicallyLegalOp<func::FuncOp>([&](func::FuncOp op) {
return converter.isSignatureLegal(op.getFunctionType()) &&
converter.isLegal(&op.getBody());
});
TestTestSignatureConversionNoConverter>(converter,
&getContext());
patterns.add<TestTypeConversionAnotherProducer>(&getContext());
- mlir::populateFunctionOpInterfaceTypeConversionPattern<FuncOp>(patterns,
- converter);
+ mlir::populateFunctionOpInterfaceTypeConversionPattern<func::FuncOp>(
+ patterns, converter);
if (failed(applyPartialConversion(getOperation(), target,
std::move(patterns))))
patterns.add<TestMergeBlock, TestUndoBlocksMerge, TestMergeSingleBlockOps>(
context);
ConversionTarget target(*context);
- target.addLegalOp<FuncOp, ModuleOp, TerminatorOp, TestBranchOp,
+ target.addLegalOp<func::FuncOp, ModuleOp, TerminatorOp, TestBranchOp,
TestTypeConsumerOp, TestTypeProducerOp, TestReturnOp>();
target.addIllegalOp<ILLegalOpF>();
namespace {
struct TestTraitFolder
- : public PassWrapper<TestTraitFolder, OperationPass<FuncOp>> {
+ : public PassWrapper<TestTraitFolder, OperationPass<func::FuncOp>> {
MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(TestTraitFolder)
StringRef getArgument() const final { return "test-trait-folder"; }
namespace {
struct TosaTestQuantUtilAPI
- : public PassWrapper<TosaTestQuantUtilAPI, OperationPass<FuncOp>> {
+ : public PassWrapper<TosaTestQuantUtilAPI, OperationPass<func::FuncOp>> {
MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(TosaTestQuantUtilAPI)
StringRef getArgument() const final { return PASS_NAME; }
namespace {
struct TestVectorToVectorLowering
- : public PassWrapper<TestVectorToVectorLowering, OperationPass<FuncOp>> {
+ : public PassWrapper<TestVectorToVectorLowering,
+ OperationPass<func::FuncOp>> {
MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(TestVectorToVectorLowering)
TestVectorToVectorLowering() = default;
};
struct TestVectorContractionLowering
- : public PassWrapper<TestVectorContractionLowering, OperationPass<FuncOp>> {
+ : public PassWrapper<TestVectorContractionLowering,
+ OperationPass<func::FuncOp>> {
MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(TestVectorContractionLowering)
StringRef getArgument() const final {
};
struct TestVectorTransposeLowering
- : public PassWrapper<TestVectorTransposeLowering, OperationPass<FuncOp>> {
+ : public PassWrapper<TestVectorTransposeLowering,
+ OperationPass<func::FuncOp>> {
MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(TestVectorTransposeLowering)
StringRef getArgument() const final {
};
struct TestVectorUnrollingPatterns
- : public PassWrapper<TestVectorUnrollingPatterns, OperationPass<FuncOp>> {
+ : public PassWrapper<TestVectorUnrollingPatterns,
+ OperationPass<func::FuncOp>> {
MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(TestVectorUnrollingPatterns)
StringRef getArgument() const final {
};
struct TestVectorDistributePatterns
- : public PassWrapper<TestVectorDistributePatterns, OperationPass<FuncOp>> {
+ : public PassWrapper<TestVectorDistributePatterns,
+ OperationPass<func::FuncOp>> {
MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(TestVectorDistributePatterns)
StringRef getArgument() const final {
void runOnOperation() override {
MLIRContext *ctx = &getContext();
RewritePatternSet patterns(ctx);
- FuncOp func = getOperation();
+ func::FuncOp func = getOperation();
func.walk([&](arith::AddFOp op) {
OpBuilder builder(op);
if (auto vecType = op.getType().dyn_cast<VectorType>()) {
};
struct TestVectorToLoopPatterns
- : public PassWrapper<TestVectorToLoopPatterns, OperationPass<FuncOp>> {
+ : public PassWrapper<TestVectorToLoopPatterns,
+ OperationPass<func::FuncOp>> {
MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(TestVectorToLoopPatterns)
StringRef getArgument() const final { return "test-vector-to-forloop"; }
void runOnOperation() override {
MLIRContext *ctx = &getContext();
RewritePatternSet patterns(ctx);
- FuncOp func = getOperation();
+ func::FuncOp func = getOperation();
func.walk([&](arith::AddFOp op) {
// Check that the operation type can be broken down into a loop.
VectorType type = op.getType().dyn_cast<VectorType>();
struct TestVectorTransferUnrollingPatterns
: public PassWrapper<TestVectorTransferUnrollingPatterns,
- OperationPass<FuncOp>> {
+ OperationPass<func::FuncOp>> {
MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(
TestVectorTransferUnrollingPatterns)
struct TestVectorTransferFullPartialSplitPatterns
: public PassWrapper<TestVectorTransferFullPartialSplitPatterns,
- OperationPass<FuncOp>> {
+ OperationPass<func::FuncOp>> {
MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(
TestVectorTransferFullPartialSplitPatterns)
};
struct TestVectorTransferOpt
- : public PassWrapper<TestVectorTransferOpt, OperationPass<FuncOp>> {
+ : public PassWrapper<TestVectorTransferOpt, OperationPass<func::FuncOp>> {
MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(TestVectorTransferOpt)
StringRef getArgument() const final { return "test-vector-transferop-opt"; }
struct TestVectorTransferLoweringPatterns
: public PassWrapper<TestVectorTransferLoweringPatterns,
- OperationPass<FuncOp>> {
+ OperationPass<func::FuncOp>> {
MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(
TestVectorTransferLoweringPatterns)
struct TestVectorMultiReductionLoweringPatterns
: public PassWrapper<TestVectorMultiReductionLoweringPatterns,
- OperationPass<FuncOp>> {
+ OperationPass<func::FuncOp>> {
MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(
TestVectorMultiReductionLoweringPatterns)
struct TestVectorTransferCollapseInnerMostContiguousDims
: public PassWrapper<TestVectorTransferCollapseInnerMostContiguousDims,
- OperationPass<FuncOp>> {
+ OperationPass<func::FuncOp>> {
MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(
TestVectorTransferCollapseInnerMostContiguousDims)
struct TestVectorReduceToContractPatternsPatterns
: public PassWrapper<TestVectorReduceToContractPatternsPatterns,
- OperationPass<FuncOp>> {
+ OperationPass<func::FuncOp>> {
MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(
TestVectorReduceToContractPatternsPatterns)
struct TestVectorTransferDropUnitDimsPatterns
: public PassWrapper<TestVectorTransferDropUnitDimsPatterns,
- OperationPass<FuncOp>> {
+ OperationPass<func::FuncOp>> {
MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(
TestVectorTransferDropUnitDimsPatterns)
struct TestFlattenVectorTransferPatterns
: public PassWrapper<TestFlattenVectorTransferPatterns,
- OperationPass<FuncOp>> {
+ OperationPass<func::FuncOp>> {
MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(
TestFlattenVectorTransferPatterns)
};
struct TestVectorScanLowering
- : public PassWrapper<TestVectorScanLowering, OperationPass<FuncOp>> {
+ : public PassWrapper<TestVectorScanLowering, OperationPass<func::FuncOp>> {
MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(TestVectorScanLowering)
StringRef getArgument() const final { return "test-vector-scan-lowering"; }
void runOnOperation() override {
Location loc = getOperation().getLoc();
OpBuilder builder(getOperation().getBodyRegion());
- auto funcOp = builder.create<FuncOp>(
+ auto funcOp = builder.create<func::FuncOp>(
loc, "test", FunctionType::get(getOperation().getContext(), {}, {}));
funcOp.addEntryBlock();
// The created function is invalid because there is no return op.
/// with name being the function name and a `suffix`.
static LogicalResult createBackwardSliceFunction(Operation *op,
StringRef suffix) {
- FuncOp parentFuncOp = op->getParentOfType<FuncOp>();
+ func::FuncOp parentFuncOp = op->getParentOfType<func::FuncOp>();
OpBuilder builder(parentFuncOp);
Location loc = op->getLoc();
std::string clonedFuncOpName = parentFuncOp.getName().str() + suffix.str();
- FuncOp clonedFuncOp = builder.create<FuncOp>(loc, clonedFuncOpName,
- parentFuncOp.getFunctionType());
+ func::FuncOp clonedFuncOp = builder.create<func::FuncOp>(
+ loc, clonedFuncOpName, parentFuncOp.getFunctionType());
BlockAndValueMapping mapper;
builder.setInsertionPointToEnd(clonedFuncOp.addEntryBlock());
for (const auto &arg : enumerate(parentFuncOp.getArguments()))
void SliceAnalysisTestPass::runOnOperation() {
ModuleOp module = getOperation();
- auto funcOps = module.getOps<FuncOp>();
+ auto funcOps = module.getOps<func::FuncOp>();
unsigned opNum = 0;
for (auto funcOp : funcOps) {
// TODO: For now this is just looking for Linalg ops. It can be generalized
return "Test detection of symbol uses";
}
WalkResult operateOnSymbol(Operation *symbol, ModuleOp module,
- SmallVectorImpl<FuncOp> &deadFunctions) {
+ SmallVectorImpl<func::FuncOp> &deadFunctions) {
// Test computing uses on a non symboltable op.
Optional<SymbolTable::UseRange> symbolUses =
SymbolTable::getSymbolUses(symbol);
// Test the functionality of symbolKnownUseEmpty.
if (SymbolTable::symbolKnownUseEmpty(symbol, &module.getBodyRegion())) {
- FuncOp funcSymbol = dyn_cast<FuncOp>(symbol);
+ func::FuncOp funcSymbol = dyn_cast<func::FuncOp>(symbol);
if (funcSymbol && funcSymbol.isExternal())
deadFunctions.push_back(funcSymbol);
auto module = getOperation();
// Walk nested symbols.
- SmallVector<FuncOp, 4> deadFunctions;
+ SmallVector<func::FuncOp, 4> deadFunctions;
module.getBodyRegion().walk([&](Operation *nestedOp) {
if (isa<SymbolOpInterface>(nestedOp))
return operateOnSymbol(nestedOp, module, deadFunctions);
namespace {
struct TestRecursiveTypesPass
- : public PassWrapper<TestRecursiveTypesPass, OperationPass<FuncOp>> {
+ : public PassWrapper<TestRecursiveTypesPass, OperationPass<func::FuncOp>> {
MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(TestRecursiveTypesPass)
LogicalResult createIRWithTypes();
return "Test support for recursive types";
}
void runOnOperation() override {
- FuncOp func = getOperation();
+ func::FuncOp func = getOperation();
// Just make sure recursive types are printed and parsed.
if (func.getName() == "roundtrip")
LogicalResult TestRecursiveTypesPass::createIRWithTypes() {
MLIRContext *ctx = &getContext();
- FuncOp func = getOperation();
+ func::FuncOp func = getOperation();
auto type = TestRecursiveType::get(ctx, "some_long_and_unique_name");
if (failed(type.setBody(type)))
return func.emitError("expected to be able to set the type body");
}
};
struct TestFunctionPass
- : public PassWrapper<TestFunctionPass, OperationPass<FuncOp>> {
+ : public PassWrapper<TestFunctionPass, OperationPass<func::FuncOp>> {
MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(TestFunctionPass)
void runOnOperation() final {}
}
};
struct TestOptionsPass
- : public PassWrapper<TestOptionsPass, OperationPass<FuncOp>> {
+ : public PassWrapper<TestOptionsPass, OperationPass<func::FuncOp>> {
MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(TestOptionsPass)
struct Options : public PassPipelineOptions<Options> {
auto &modulePM = pm.nest<ModuleOp>();
modulePM.addPass(std::make_unique<TestModulePass>());
/// A nested function pass.
- auto &nestedFunctionPM = modulePM.nest<FuncOp>();
+ auto &nestedFunctionPM = modulePM.nest<func::FuncOp>();
nestedFunctionPM.addPass(std::make_unique<TestFunctionPass>());
// Nest a function pipeline that contains a single pass.
- auto &functionPM = pm.nest<FuncOp>();
+ auto &functionPM = pm.nest<func::FuncOp>();
functionPM.addPass(std::make_unique<TestFunctionPass>());
}
/// This pass will sink ops named `test.sink_me` and tag them with an attribute
/// `was_sunk` into the first region of `test.sink_target` ops.
struct TestControlFlowSinkPass
- : public PassWrapper<TestControlFlowSinkPass, OperationPass<FuncOp>> {
+ : public PassWrapper<TestControlFlowSinkPass, OperationPass<func::FuncOp>> {
MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(TestControlFlowSinkPass)
/// Get the command-line argument of the test pass.
using namespace test;
namespace {
-struct Inliner : public PassWrapper<Inliner, OperationPass<FuncOp>> {
+struct Inliner : public PassWrapper<Inliner, OperationPass<func::FuncOp>> {
MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(Inliner)
StringRef getArgument() const final { return "test-inline"; }
static LogicalResult lowerToLLVMDialect(ModuleOp module) {
PassManager pm(module.getContext());
pm.addPass(mlir::createMemRefToLLVMPass());
- pm.addNestedPass<FuncOp>(mlir::arith::createConvertArithmeticToLLVMPass());
+ pm.addNestedPass<func::FuncOp>(
+ mlir::arith::createConvertArithmeticToLLVMPass());
pm.addPass(mlir::createConvertFuncToLLVMPass());
pm.addPass(mlir::createReconcileUnrealizedCastsPass());
return pm.run(module);
registry.insert<func::FuncDialect, arith::ArithmeticDialect>();
ctx.appendDialectRegistry(registry);
module = parseSourceString<ModuleOp>(ir, &ctx);
- mapFn = cast<FuncOp>(module->front());
+ mapFn = cast<func::FuncOp>(module->front());
}
// Create ValueShapeRange on the arith.addi operation.
DialectRegistry registry;
MLIRContext ctx;
OwningOpRef<ModuleOp> module;
- FuncOp mapFn;
+ func::FuncOp mapFn;
};
TEST_F(ValueShapeRangeTest, ShapesFromValues) {
// Create a function and a module.
OwningOpRef<ModuleOp> module(ModuleOp::create(UnknownLoc::get(&context)));
- FuncOp func1 =
- FuncOp::create(builder.getUnknownLoc(), "foo",
- builder.getFunctionType(llvm::None, llvm::None));
+ func::FuncOp func1 =
+ func::FuncOp::create(builder.getUnknownLoc(), "foo",
+ builder.getFunctionType(llvm::None, llvm::None));
func1.setPrivate();
module->push_back(func1);
// Create a function and a module.
OwningOpRef<ModuleOp> module(ModuleOp::create(UnknownLoc::get(&context)));
- FuncOp func1 =
- FuncOp::create(builder.getUnknownLoc(), "foo",
- builder.getFunctionType(llvm::None, llvm::None));
+ func::FuncOp func1 =
+ func::FuncOp::create(builder.getUnknownLoc(), "foo",
+ builder.getFunctionType(llvm::None, llvm::None));
func1.setPrivate();
module->push_back(func1);
struct GenericAnalysis {
MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(GenericAnalysis)
- GenericAnalysis(Operation *op) : isFunc(isa<FuncOp>(op)) {}
+ GenericAnalysis(Operation *op) : isFunc(isa<func::FuncOp>(op)) {}
const bool isFunc;
};
struct OpSpecificAnalysis {
MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(OpSpecificAnalysis)
- OpSpecificAnalysis(FuncOp op) : isSecret(op.getName() == "secret") {}
+ OpSpecificAnalysis(func::FuncOp op) : isSecret(op.getName() == "secret") {}
const bool isSecret;
};
-/// Simple pass to annotate a FuncOp with the results of analysis.
+/// Simple pass to annotate a func::FuncOp with the results of analysis.
struct AnnotateFunctionPass
- : public PassWrapper<AnnotateFunctionPass, OperationPass<FuncOp>> {
+ : public PassWrapper<AnnotateFunctionPass, OperationPass<func::FuncOp>> {
MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(AnnotateFunctionPass)
void runOnOperation() override {
- FuncOp op = getOperation();
+ func::FuncOp op = getOperation();
Builder builder(op->getParentOfType<ModuleOp>());
auto &ga = getAnalysis<GenericAnalysis>();
// Create a module with 2 functions.
OwningOpRef<ModuleOp> module(ModuleOp::create(UnknownLoc::get(&context)));
for (StringRef name : {"secret", "not_secret"}) {
- FuncOp func =
- FuncOp::create(builder.getUnknownLoc(), name,
- builder.getFunctionType(llvm::None, llvm::None));
+ auto func =
+ func::FuncOp::create(builder.getUnknownLoc(), name,
+ builder.getFunctionType(llvm::None, llvm::None));
func.setPrivate();
module->push_back(func);
}
// Instantiate and run our pass.
PassManager pm(&context);
- pm.addNestedPass<FuncOp>(std::make_unique<AnnotateFunctionPass>());
+ pm.addNestedPass<func::FuncOp>(std::make_unique<AnnotateFunctionPass>());
LogicalResult result = pm.run(module.get());
EXPECT_TRUE(succeeded(result));
// Verify that each function got annotated with expected attributes.
- for (FuncOp func : module->getOps<FuncOp>()) {
+ for (func::FuncOp func : module->getOps<func::FuncOp>()) {
ASSERT_TRUE(func->getAttr("isFunc").isa<BoolAttr>());
EXPECT_TRUE(func->getAttr("isFunc").cast<BoolAttr>().getValue());