This patch adds omp.atomic.write lowering to LLVM IR.
Also, changed the syntax to have equal symbol instead of the comma to
make it more intuitive.
Reviewed By: kiranchandramohan, peixin
Differential Revision: https://reviews.llvm.org/D116416
SmallVector<ClauseType> clauses = {memoryOrderClause, hintClause};
SmallVector<int> segments;
- if (parser.parseOperand(address) || parser.parseComma() ||
+ if (parser.parseOperand(address) || parser.parseEqual() ||
parser.parseOperand(value) ||
parseClauses(parser, result, clauses, segments) ||
parser.parseColonType(addrType) || parser.parseComma() ||
/// Printer for AtomicWriteOp
static void printAtomicWriteOp(OpAsmPrinter &p, AtomicWriteOp op) {
- p << " " << op.address() << ", " << op.value() << " ";
+ p << " " << op.address() << " = " << op.value() << " ";
if (op.memory_order())
p << "memory_order(" << op.memory_order() << ") ";
if (op.hintAttr())
return success();
}
+/// Converts an omp.atomic.write operation to LLVM IR.
+static LogicalResult
+convertOmpAtomicWrite(Operation &opInst, llvm::IRBuilderBase &builder,
+ LLVM::ModuleTranslation &moduleTranslation) {
+ auto writeOp = cast<omp::AtomicWriteOp>(opInst);
+ llvm::OpenMPIRBuilder *ompBuilder = moduleTranslation.getOpenMPBuilder();
+
+ // Set up the source location value for OpenMP runtime.
+ llvm::DISubprogram *subprogram =
+ builder.GetInsertBlock()->getParent()->getSubprogram();
+ const llvm::DILocation *diLoc =
+ moduleTranslation.translateLoc(opInst.getLoc(), subprogram);
+ llvm::OpenMPIRBuilder::LocationDescription ompLoc(builder.saveIP(),
+ llvm::DebugLoc(diLoc));
+ llvm::AtomicOrdering ao = convertAtomicOrdering(writeOp.memory_order());
+ llvm::Value *expr = moduleTranslation.lookupValue(writeOp.value());
+ llvm::Value *dest = moduleTranslation.lookupValue(writeOp.address());
+ llvm::OpenMPIRBuilder::AtomicOpValue x = {dest, /*isSigned=*/false,
+ /*isVolatile=*/false};
+ builder.restoreIP(ompBuilder->createAtomicWrite(ompLoc, x, expr, ao));
+ return success();
+}
+
/// Converts an OpenMP reduction operation using OpenMPIRBuilder. Expects the
/// mapping between reduction variables and their private equivalents to have
/// been stored on the ModuleTranslation stack. Currently only supports
.Case([&](omp::AtomicReadOp) {
return convertOmpAtomicRead(*op, builder, moduleTranslation);
})
+ .Case([&](omp::AtomicWriteOp) {
+ return convertOmpAtomicWrite(*op, builder, moduleTranslation);
+ })
.Case([&](omp::SectionsOp) {
return convertOmpSections(*op, builder, moduleTranslation);
})
func @omp_atomic_write1(%addr : memref<i32>, %val : i32) {
// expected-error @below {{the hints omp_sync_hint_uncontended and omp_sync_hint_contended cannot be combined}}
- omp.atomic.write %addr, %val hint(contended, uncontended) : memref<i32>, i32
+ omp.atomic.write %addr = %val hint(contended, uncontended) : memref<i32>, i32
return
}
func @omp_atomic_write2(%addr : memref<i32>, %val : i32) {
// expected-error @below {{memory-order must not be acq_rel or acquire for atomic writes}}
- omp.atomic.write %addr, %val memory_order(acq_rel) : memref<i32>, i32
+ omp.atomic.write %addr = %val memory_order(acq_rel) : memref<i32>, i32
return
}
func @omp_atomic_write3(%addr : memref<i32>, %val : i32) {
// expected-error @below {{memory-order must not be acq_rel or acquire for atomic writes}}
- omp.atomic.write %addr, %val memory_order(acquire) : memref<i32>, i32
+ omp.atomic.write %addr = %val memory_order(acquire) : memref<i32>, i32
return
}
func @omp_atomic_write4(%addr : memref<i32>, %val : i32) {
// expected-error @below {{at most one memory_order clause can appear on the omp.atomic.write operation}}
- omp.atomic.write %addr, %val memory_order(release) memory_order(seq_cst) : memref<i32>, i32
+ omp.atomic.write %addr = %val memory_order(release) memory_order(seq_cst) : memref<i32>, i32
return
}
func @omp_atomic_write5(%addr : memref<i32>, %val : i32) {
// expected-error @below {{at most one hint clause can appear on the omp.atomic.write operation}}
- omp.atomic.write %addr, %val hint(contended) hint(speculative) : memref<i32>, i32
+ omp.atomic.write %addr = %val hint(contended) hint(speculative) : memref<i32>, i32
return
}
func @omp_atomic_write6(%addr : memref<i32>, %val : i32) {
// expected-error @below {{attribute 'memory_order' failed to satisfy constraint: MemoryOrderKind Clause}}
- omp.atomic.write %addr, %val memory_order(xyz) : memref<i32>, i32
+ omp.atomic.write %addr = %val memory_order(xyz) : memref<i32>, i32
return
}
// CHECK-LABEL: omp_atomic_write
// CHECK-SAME: (%[[ADDR:.*]]: memref<i32>, %[[VAL:.*]]: i32)
func @omp_atomic_write(%addr : memref<i32>, %val : i32) {
- // CHECK: omp.atomic.write %[[ADDR]], %[[VAL]] : memref<i32>, i32
- omp.atomic.write %addr, %val : memref<i32>, i32
- // CHECK: omp.atomic.write %[[ADDR]], %[[VAL]] memory_order(seq_cst) : memref<i32>, i32
- omp.atomic.write %addr, %val memory_order(seq_cst) : memref<i32>, i32
- // CHECK: omp.atomic.write %[[ADDR]], %[[VAL]] memory_order(release) : memref<i32>, i32
- omp.atomic.write %addr, %val memory_order(release) : memref<i32>, i32
- // CHECK: omp.atomic.write %[[ADDR]], %[[VAL]] memory_order(relaxed) : memref<i32>, i32
- omp.atomic.write %addr, %val memory_order(relaxed) : memref<i32>, i32
- // CHECK: omp.atomic.write %[[ADDR]], %[[VAL]] hint(uncontended, speculative) : memref<i32>, i32
- omp.atomic.write %addr, %val hint(speculative, uncontended) : memref<i32>, i32
+ // CHECK: omp.atomic.write %[[ADDR]] = %[[VAL]] : memref<i32>, i32
+ omp.atomic.write %addr = %val : memref<i32>, i32
+ // CHECK: omp.atomic.write %[[ADDR]] = %[[VAL]] memory_order(seq_cst) : memref<i32>, i32
+ omp.atomic.write %addr = %val memory_order(seq_cst) : memref<i32>, i32
+ // CHECK: omp.atomic.write %[[ADDR]] = %[[VAL]] memory_order(release) : memref<i32>, i32
+ omp.atomic.write %addr = %val memory_order(release) : memref<i32>, i32
+ // CHECK: omp.atomic.write %[[ADDR]] = %[[VAL]] memory_order(relaxed) : memref<i32>, i32
+ omp.atomic.write %addr = %val memory_order(relaxed) : memref<i32>, i32
+ // CHECK: omp.atomic.write %[[ADDR]] = %[[VAL]] hint(uncontended, speculative) : memref<i32>, i32
+ omp.atomic.write %addr = %val hint(speculative, uncontended) : memref<i32>, i32
return
}
// -----
+// CHECK-LABEL: @omp_atomic_write
+// CHECK-SAME: (i32* %[[x:.*]], i32 %[[expr:.*]])
+llvm.func @omp_atomic_write(%x: !llvm.ptr<i32>, %expr: i32) -> () {
+ // CHECK: store atomic i32 %[[expr]], i32* %[[x]] monotonic, align 4
+ omp.atomic.write %x = %expr : !llvm.ptr<i32>, i32
+ // CHECK: store atomic i32 %[[expr]], i32* %[[x]] seq_cst, align 4
+ // CHECK: call void @__kmpc_flush(%struct.ident_t* @{{.*}})
+ omp.atomic.write %x = %expr memory_order(seq_cst) : !llvm.ptr<i32>, i32
+ // CHECK: store atomic i32 %[[expr]], i32* %[[x]] release, align 4
+ // CHECK: call void @__kmpc_flush(%struct.ident_t* @{{.*}})
+ omp.atomic.write %x = %expr memory_order(release) : !llvm.ptr<i32>, i32
+ // CHECK: store atomic i32 %[[expr]], i32* %[[x]] monotonic, align 4
+ omp.atomic.write %x = %expr memory_order(relaxed) : !llvm.ptr<i32>, i32
+ llvm.return
+}
+
+// -----
+
// CHECK-LABEL: @omp_sections_empty
llvm.func @omp_sections_empty() -> () {
omp.sections {