struct LLVMInlinerInterface : public DialectInlinerInterface {
using DialectInlinerInterface::DialectInlinerInterface;
- /// Conservatively only allow inlining of pure ops.
+ /// Conservative allowlist-based inlining of operations supported so far.
bool isLegalToInline(Operation *op, Region *, bool,
BlockAndValueMapping &) const final {
- return isPure(op);
+ if (isPure(op))
+ return true;
+ return llvm::TypeSwitch<Operation *, bool>(op)
+ .Case<LLVM::LoadOp, LLVM::StoreOp>([&](auto memOp) {
+ // Some attributes on load and store operations require handling
+ // during inlining. Since this is not yet implemented, refuse to
+ // inline memory operations that have any of these attributes.
+ if (memOp.getAccessGroups())
+ return false;
+ if (memOp.getAliasScopes())
+ return false;
+ if (memOp.getNoaliasScopes())
+ return false;
+ return true;
+ })
+ .Default([](auto) { return false; });
}
};
} // end anonymous namespace
-// RUN: mlir-opt %s -inline | FileCheck %s
+// RUN: mlir-opt %s -inline -split-input-file | FileCheck %s
-// CHECK-LABEL: func.func @test_inline() -> i32 {
-// CHECK-NEXT: %[[RES:.*]] = llvm.mlir.constant(42 : i32) : i32
+func.func @inner_func_inlinable(%ptr : !llvm.ptr) -> i32 {
+ %0 = llvm.mlir.constant(42 : i32) : i32
+ llvm.store %0, %ptr { alignment = 8 } : i32, !llvm.ptr
+ %1 = llvm.load %ptr { alignment = 8 } : !llvm.ptr -> i32
+ return %1 : i32
+}
+
+// CHECK-LABEL: func.func @test_inline(
+// CHECK-SAME: %[[PTR:[a-zA-Z0-9_]+]]
+// CHECK-NEXT: %[[CST:.*]] = llvm.mlir.constant(42 : i32) : i32
+// CHECK-NEXT: llvm.store %[[CST]], %[[PTR]]
+// CHECK-NEXT: %[[RES:.+]] = llvm.load %[[PTR]]
// CHECK-NEXT: return %[[RES]] : i32
-func.func @test_inline() -> i32 {
- %0 = call @inner_func_inlinable() : () -> i32
+func.func @test_inline(%ptr : !llvm.ptr) -> i32 {
+ %0 = call @inner_func_inlinable(%ptr) : (!llvm.ptr) -> i32
return %0 : i32
}
-func.func @inner_func_inlinable() -> i32 {
- %0 = llvm.mlir.constant(42 : i32) : i32
- return %0 : i32
+// -----
+
+func.func @inner_func_not_inlinable() -> !llvm.ptr<f64> {
+ %0 = llvm.mlir.constant(0 : i32) : i32
+ %1 = llvm.alloca %0 x f64 : (i32) -> !llvm.ptr<f64>
+ return %1 : !llvm.ptr<f64>
}
// CHECK-LABEL: func.func @test_not_inline() -> !llvm.ptr<f64> {
return %0 : !llvm.ptr<f64>
}
-func.func @inner_func_not_inlinable() -> !llvm.ptr<f64> {
- %0 = llvm.mlir.constant(0 : i32) : i32
- %1 = llvm.alloca %0 x f64 : (i32) -> !llvm.ptr<f64>
- return %1 : !llvm.ptr<f64>
+// -----
+
+llvm.metadata @metadata {
+ llvm.access_group @group
+ llvm.return
+}
+
+func.func private @with_mem_attr(%ptr : !llvm.ptr) -> () {
+ %0 = llvm.mlir.constant(42 : i32) : i32
+ // Do not inline load/store operations that carry attributes requiring
+ // handling while inlining, until this is supported by the inliner.
+ llvm.store %0, %ptr { access_groups = [@metadata::@group] }: i32, !llvm.ptr
+ return
+}
+
+// CHECK-LABEL: func.func @test_not_inline
+// CHECK-NEXT: call @with_mem_attr
+// CHECK-NEXT: return
+func.func @test_not_inline(%ptr : !llvm.ptr) -> () {
+ call @with_mem_attr(%ptr) : (!llvm.ptr) -> ()
+ return
}