/// Returns true if 'forOp' is parallel.
bool mlir::isLoopParallel(AffineForOp forOp) {
+ // Loop is not parallel if it has SSA loop-carried dependences.
+ // TODO: Conditionally support reductions and other loop-carried dependences
+ // that could be handled in the context of a parallel loop.
+ if (forOp.getNumIterOperands() > 0)
+ return false;
+
// Collect all load and store ops in loop nest rooted at 'forOp'.
SmallVector<Operation *, 8> loadAndStoreOpInsts;
auto walkResult = forOp.walk([&](Operation *opInst) -> WalkResult {
return
}
+// CHECK-LABEL: @unsupported_iter_args
+func @unsupported_iter_args(%in: memref<10xf32>) {
+ %cst = constant 0.000000e+00 : f32
+ // CHECK-NOT: affine.parallel
+ %final_red = affine.for %i = 0 to 10 iter_args(%red_iter = %cst) -> (f32) {
+ %ld = affine.load %in[%i] : memref<10xf32>
+ %add = addf %red_iter, %ld : f32
+ affine.yield %add : f32
+ }
+ return
+}
+// CHECK-LABEL: @unsupported_nested_iter_args
+func @unsupported_nested_iter_args(%in: memref<20x10xf32>) {
+ %cst = constant 0.000000e+00 : f32
+ // CHECK: affine.parallel
+ affine.for %i = 0 to 20 {
+ // CHECK: affine.for
+ %final_red = affine.for %j = 0 to 10 iter_args(%red_iter = %cst) -> (f32) {
+ %ld = affine.load %in[%i, %j] : memref<20x10xf32>
+ %add = addf %red_iter, %ld : f32
+ affine.yield %add : f32
+ }
+ }
+ return
+}