From 9185896a36457f8e6858c9a4c789975fc067228a Mon Sep 17 00:00:00 2001 From: Tobias Gysi Date: Mon, 20 Feb 2023 08:46:33 +0100 Subject: [PATCH] [mlir][llvm] Add atomic support to the StoreOp. This revision adds atomic support to the StoreOp. It chooses to print the atomic keywords together with the syncscope and ordering arguments. The revision also implements verifiers to ensure the constraints that apply to atomic store operations are checked. Depends on D144112 Reviewed By: Dinistro Differential Revision: https://reviews.llvm.org/D144200 --- mlir/include/mlir/Dialect/LLVMIR/LLVMOps.td | 49 ++++++++++++++++++---- mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp | 56 ++++++++++++++++++-------- mlir/test/Dialect/LLVMIR/invalid.mlir | 35 ++++++++++++++++ mlir/test/Dialect/LLVMIR/roundtrip.mlir | 9 +++++ mlir/test/Target/LLVMIR/Import/instructions.ll | 9 ++++- mlir/test/Target/LLVMIR/llvmir.mlir | 17 +++++--- 6 files changed, 145 insertions(+), 30 deletions(-) diff --git a/mlir/include/mlir/Dialect/LLVMIR/LLVMOps.td b/mlir/include/mlir/Dialect/LLVMIR/LLVMOps.td index 8d02ae1..80818ff 100644 --- a/mlir/include/mlir/Dialect/LLVMIR/LLVMOps.td +++ b/mlir/include/mlir/Dialect/LLVMIR/LLVMOps.td @@ -430,16 +430,47 @@ def LLVM_StoreOp : LLVM_Op<"store">, MemoryOpBase { OptionalAttr:$alias_scopes, OptionalAttr:$noalias_scopes, OptionalAttr:$tbaa, - OptionalAttr:$alignment, UnitAttr:$volatile_, - UnitAttr:$nontemporal); + OptionalAttr:$alignment, + UnitAttr:$volatile_, + UnitAttr:$nontemporal, + DefaultValuedAttr:$ordering, + OptionalAttr:$syncscope); string llvmInstName = "Store"; + let description = [{ + The `store` operation is used to write to memory. A store may be marked as + atomic, volatile, and/or nontemporal, and takes a number of optional + attributes that specify aliasing information. + + An atomic store only supports a limited set of pointer, integer, and + floating point types, and requires an explicit alignment. + + Examples: + ```mlir + // A volatile store of a float variable. + llvm.store volatile %val, %ptr : f32, !llvm.ptr + + // A nontemporal store of a float variable. + llvm.store %val, %ptr {nontemporal} : f32, !llvm.ptr + + // An atomic store of an integer variable. + llvm.store %val, %ptr atomic monotonic {alignment = 8 : i64} + : i64, !llvm.ptr + ``` + + See the following link for more details: + https://llvm.org/docs/LangRef.html#store-instruction + }]; let assemblyFormat = [{ - (`volatile` $volatile_^)? $value `,` $addr attr-dict `:` - custom(type($value), type($addr)) + (`volatile` $volatile_^)? $value `,` $addr + (`atomic` (`syncscope` `(` $syncscope^ `)`)? $ordering^)? + attr-dict `:` custom(type($value), type($addr)) }]; string llvmBuilder = [{ auto *inst = builder.CreateStore($value, $addr, $volatile_); - }] # setAlignmentCode + }] # setOrderingCode + # setSyncScopeCode + # setAlignmentCode # setNonTemporalMetadataCode # setAccessGroupsMetadataCode # setAliasScopeMetadataCode @@ -449,12 +480,16 @@ def LLVM_StoreOp : LLVM_Op<"store">, MemoryOpBase { unsigned alignment = storeInst->getAlign().value(); $_op = $_builder.create($_location, $value, $addr, alignment, storeInst->isVolatile(), - storeInst->hasMetadata(llvm::LLVMContext::MD_nontemporal)); + storeInst->hasMetadata(llvm::LLVMContext::MD_nontemporal), + convertAtomicOrderingFromLLVM(storeInst->getOrdering()), + getLLVMSyncScope(storeInst)); }]; let builders = [ OpBuilder<(ins "Value":$value, "Value":$addr, CArg<"unsigned", "0">:$alignment, CArg<"bool", "false">:$isVolatile, - CArg<"bool", "false">:$isNonTemporal)> + CArg<"bool", "false">:$isNonTemporal, + CArg<"AtomicOrdering", "AtomicOrdering::not_atomic">:$ordering, + CArg<"StringRef", "StringRef()">:$syncscope)> ]; let hasVerifier = 1; } diff --git a/mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp b/mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp index 66766aa..f13f56f 100644 --- a/mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp +++ b/mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp @@ -826,21 +826,35 @@ static bool isTypeCompatibleWithAtomicOp(Type type, bool isPointerTypeAllowed) { *bitWidth == 64; } -LogicalResult LoadOp::verify() { - if (getOrdering() != AtomicOrdering::not_atomic) { - if (!isTypeCompatibleWithAtomicOp(getResult().getType(), +/// Verifies the attributes and the type of atomic memory access operations. +template +LogicalResult verifyAtomicMemOp(OpTy memOp, Type valueType, + ArrayRef unsupportedOrderings) { + if (memOp.getOrdering() != AtomicOrdering::not_atomic) { + if (!isTypeCompatibleWithAtomicOp(valueType, /*isPointerTypeAllowed=*/true)) - return emitOpError("unsupported type ") - << getResult().getType() << " for atomic access"; - if (getOrdering() == AtomicOrdering::release || - getOrdering() == AtomicOrdering::acq_rel) - return emitOpError("unsupported ordering '") - << stringifyAtomicOrdering(getOrdering()) << "'"; - if (!getAlignment()) - return emitOpError("expected alignment for atomic access"); - } else if (getSyncscope()) { - return emitOpError("expected syncscope to be null for non-atomic access"); + return memOp.emitOpError("unsupported type ") + << valueType << " for atomic access"; + if (llvm::is_contained(unsupportedOrderings, memOp.getOrdering())) + return memOp.emitOpError("unsupported ordering '") + << stringifyAtomicOrdering(memOp.getOrdering()) << "'"; + if (!memOp.getAlignment()) + return memOp.emitOpError("expected alignment for atomic access"); + return success(); } + if (memOp.getSyncscope()) + return memOp.emitOpError( + "expected syncscope to be null for non-atomic access"); + return success(); +} + +LogicalResult LoadOp::verify() { + Type valueType = getResult().getType(); + if (failed(verifyAtomicMemOp( + *this, valueType, + {AtomicOrdering::release, AtomicOrdering::acq_rel}))) + return failure(); + return verifyMemOpMetadata(*this); } @@ -914,15 +928,25 @@ static void printLoadType(OpAsmPrinter &printer, Operation *op, Type type, // StoreOp //===----------------------------------------------------------------------===// -LogicalResult StoreOp::verify() { return verifyMemOpMetadata(*this); } +LogicalResult StoreOp::verify() { + Type valueType = getValue().getType(); + if (failed(verifyAtomicMemOp( + *this, valueType, + {AtomicOrdering::acquire, AtomicOrdering::acq_rel}))) + return failure(); + + return verifyMemOpMetadata(*this); +} void StoreOp::build(OpBuilder &builder, OperationState &state, Value value, Value addr, unsigned alignment, bool isVolatile, - bool isNonTemporal) { + bool isNonTemporal, AtomicOrdering ordering, + StringRef syncscope) { build(builder, state, value, addr, /*access_groups=*/nullptr, /*alias_scopes=*/nullptr, /*noalias_scopes=*/nullptr, /*tbaa=*/nullptr, alignment ? builder.getI64IntegerAttr(alignment) : nullptr, isVolatile, - isNonTemporal); + isNonTemporal, ordering, + syncscope.empty() ? nullptr : builder.getStringAttr(syncscope)); } /// Parses the StoreOp type either using the typed or opaque pointer format. diff --git a/mlir/test/Dialect/LLVMIR/invalid.mlir b/mlir/test/Dialect/LLVMIR/invalid.mlir index cbcc633..642639b 100644 --- a/mlir/test/Dialect/LLVMIR/invalid.mlir +++ b/mlir/test/Dialect/LLVMIR/invalid.mlir @@ -209,6 +209,41 @@ func.func @store_malformed_elem_type(%foo: !llvm.ptr, %bar: f32) { // ----- +func.func @store_syncscope(%val : f32, %ptr : !llvm.ptr) { + // expected-error@below {{expected syncscope to be null for non-atomic access}} + "llvm.store"(%val, %ptr) {syncscope = "singlethread"} : (f32, !llvm.ptr) -> () +} + +// ----- + +func.func @store_unsupported_ordering(%val : f32, %ptr : !llvm.ptr) { + // expected-error@below {{unsupported ordering 'acquire'}} + llvm.store %val, %ptr atomic acquire {alignment = 4 : i64} : f32, !llvm.ptr +} + +// ----- + +func.func @store_unsupported_type(%val : f80, %ptr : !llvm.ptr) { + // expected-error@below {{unsupported type 'f80' for atomic access}} + llvm.store %val, %ptr atomic monotonic {alignment = 16 : i64} : f80, !llvm.ptr +} + +// ----- + +func.func @store_unsupported_type(%val : i1, %ptr : !llvm.ptr) { + // expected-error@below {{unsupported type 'i1' for atomic access}} + llvm.store %val, %ptr atomic monotonic {alignment = 16 : i64} : i1, !llvm.ptr +} + +// ----- + +func.func @store_unaligned_atomic(%val : f32, %ptr : !llvm.ptr) { + // expected-error@below {{expected alignment for atomic access}} + llvm.store %val, %ptr atomic monotonic : f32, !llvm.ptr +} + +// ----- + func.func @invalid_call() { // expected-error@+1 {{'llvm.call' op must have either a `callee` attribute or at least an operand}} "llvm.call"() : () -> () diff --git a/mlir/test/Dialect/LLVMIR/roundtrip.mlir b/mlir/test/Dialect/LLVMIR/roundtrip.mlir index ebab8c3..1e49b4e 100644 --- a/mlir/test/Dialect/LLVMIR/roundtrip.mlir +++ b/mlir/test/Dialect/LLVMIR/roundtrip.mlir @@ -348,6 +348,15 @@ func.func @atomic_load(%ptr : !llvm.ptr) { llvm.return } +// CHECK-LABEL: @atomic_store +func.func @atomic_store(%val : f32, %ptr : !llvm.ptr) { + // CHECK: llvm.store %{{.*}}, %{{.*}} atomic monotonic {alignment = 4 : i64} : f32, !llvm.ptr + llvm.store %val, %ptr atomic monotonic {alignment = 4 : i64} : f32, !llvm.ptr + // CHECK: llvm.store volatile %{{.*}}, %{{.*}} atomic syncscope("singlethread") monotonic {alignment = 16 : i64} : f32, !llvm.ptr + llvm.store volatile %val, %ptr atomic syncscope("singlethread") monotonic {alignment = 16 : i64} : f32, !llvm.ptr + llvm.return +} + // CHECK-LABEL: @atomicrmw func.func @atomicrmw(%ptr : !llvm.ptr, %val : f32) { // CHECK: llvm.atomicrmw fadd %{{.*}}, %{{.*}} monotonic : !llvm.ptr, f32 diff --git a/mlir/test/Target/LLVMIR/Import/instructions.ll b/mlir/test/Target/LLVMIR/Import/instructions.ll index f66d7b9..47076a7 100644 --- a/mlir/test/Target/LLVMIR/Import/instructions.ll +++ b/mlir/test/Target/LLVMIR/Import/instructions.ll @@ -368,13 +368,18 @@ define void @load_store(ptr %ptr) { ; // ----- -; CHECK-LABEL: @atomic_load +; CHECK-LABEL: @atomic_load_store ; CHECK-SAME: %[[PTR:[a-zA-Z0-9]+]] -define void @atomic_load(ptr %ptr) { +define void @atomic_load_store(ptr %ptr) { ; CHECK: %[[V1:[0-9]+]] = llvm.load %[[PTR]] atomic acquire {alignment = 8 : i64} : !llvm.ptr -> f64 ; CHECK: %[[V2:[0-9]+]] = llvm.load volatile %[[PTR]] atomic syncscope("singlethreaded") acquire {alignment = 16 : i64} : !llvm.ptr -> f64 %1 = load atomic double, ptr %ptr acquire, align 8 %2 = load atomic volatile double, ptr %ptr syncscope("singlethreaded") acquire, align 16 + + ; CHECK: llvm.store %[[V1]], %[[PTR]] atomic release {alignment = 8 : i64} : f64, !llvm.ptr + ; CHECK: llvm.store volatile %[[V2]], %[[PTR]] atomic syncscope("singlethreaded") release {alignment = 16 : i64} : f64, !llvm.ptr + store atomic double %1, ptr %ptr release, align 8 + store atomic volatile double %2, ptr %ptr syncscope("singlethreaded") release, align 16 ret void } diff --git a/mlir/test/Target/LLVMIR/llvmir.mlir b/mlir/test/Target/LLVMIR/llvmir.mlir index da7dc7f..a58dfde 100644 --- a/mlir/test/Target/LLVMIR/llvmir.mlir +++ b/mlir/test/Target/LLVMIR/llvmir.mlir @@ -1780,13 +1780,20 @@ llvm.func @nontemporal_store_and_load() { // ----- -llvm.func @atomic_load(%ptr : !llvm.ptr) { +llvm.func @atomic_store_and_load(%ptr : !llvm.ptr) { // CHECK: load atomic - // CHECK-SAME: monotonic, align 4 - %1 = llvm.load %ptr atomic monotonic {alignment = 4 : i64} : !llvm.ptr -> f32 + // CHECK-SAME: acquire, align 4 + %1 = llvm.load %ptr atomic acquire {alignment = 4 : i64} : !llvm.ptr -> f32 // CHECK: load atomic - // CHECK-SAME: syncscope("singlethread") monotonic, align 4 - %2 = llvm.load %ptr atomic syncscope("singlethread") monotonic {alignment = 4 : i64} : !llvm.ptr -> f32 + // CHECK-SAME: syncscope("singlethread") acquire, align 4 + %2 = llvm.load %ptr atomic syncscope("singlethread") acquire {alignment = 4 : i64} : !llvm.ptr -> f32 + + // CHECK: store atomic + // CHECK-SAME: release, align 4 + llvm.store %1, %ptr atomic release {alignment = 4 : i64} : f32, !llvm.ptr + // CHECK: store atomic + // CHECK-SAME: syncscope("singlethread") release, align 4 + llvm.store %2, %ptr atomic syncscope("singlethread") release {alignment = 4 : i64} : f32, !llvm.ptr llvm.return } -- 2.7.4