From 7a43da60608f66f8fc03af4607fef02ee48de8a8 Mon Sep 17 00:00:00 2001 From: Amit Sabne Date: Fri, 31 May 2019 13:56:47 -0700 Subject: [PATCH] Loop invariant code motion - remove reliance on getForwardSlice. Add more tests. -- PiperOrigin-RevId: 250950703 --- mlir/lib/Transforms/LoopInvariantCodeMotion.cpp | 173 ++++++++-- .../Transforms/loop-invariant-code-motion.mlir | 352 ++++++++++++++++++++- 2 files changed, 497 insertions(+), 28 deletions(-) diff --git a/mlir/lib/Transforms/LoopInvariantCodeMotion.cpp b/mlir/lib/Transforms/LoopInvariantCodeMotion.cpp index 402f7d9..3187566 100644 --- a/mlir/lib/Transforms/LoopInvariantCodeMotion.cpp +++ b/mlir/lib/Transforms/LoopInvariantCodeMotion.cpp @@ -35,15 +35,13 @@ #include "mlir/Transforms/Utils.h" #include "llvm/ADT/DenseMap.h" #include "llvm/ADT/DenseSet.h" -#include "llvm/ADT/SetVector.h" +#include "llvm/ADT/SmallPtrSet.h" #include "llvm/Support/CommandLine.h" #include "llvm/Support/Debug.h" #include "llvm/Support/raw_ostream.h" #define DEBUG_TYPE "licm" -using llvm::SetVector; - using namespace mlir; namespace { @@ -57,45 +55,177 @@ struct LoopInvariantCodeMotion : public FunctionPass { }; } // end anonymous namespace +static bool +checkInvarianceOfNestedIfOps(Operation *op, Value *indVar, + SmallPtrSetImpl &definedOps, + SmallPtrSetImpl &opsToHoist); +static bool isOpLoopInvariant(Operation &op, Value *indVar, + SmallPtrSetImpl &definedOps, + SmallPtrSetImpl &opsToHoist); + +static bool +areAllOpsInTheBlockListInvariant(Region &blockList, Value *indVar, + SmallPtrSetImpl &definedOps, + SmallPtrSetImpl &opsToHoist); + +static bool isMemRefDereferencingOp(Operation &op) { + // TODO(asabne): Support DMA Ops. + if (isa(op) || isa(op)) { + return true; + } + return false; +} + FunctionPassBase *mlir::createLoopInvariantCodeMotionPass() { return new LoopInvariantCodeMotion(); } +// Returns true if the individual op is loop invariant. +bool isOpLoopInvariant(Operation &op, Value *indVar, + SmallPtrSetImpl &definedOps, + SmallPtrSetImpl &opsToHoist) { + LLVM_DEBUG(llvm::dbgs() << "iterating on op: " << op;); + + if (isa(op)) { + if (!checkInvarianceOfNestedIfOps(&op, indVar, definedOps, opsToHoist)) { + return false; + } + } else if (isa(op)) { + // If the body of a predicated region has a for loop, we don't hoist the + // 'affine.if'. + return false; + } else if (isa(op) || isa(op)) { + // TODO(asabne): Support DMA ops. + return false; + } else if (!isa(op)) { + if (isMemRefDereferencingOp(op)) { + Value *memref = isa(op) ? cast(op).getMemRef() + : cast(op).getMemRef(); + for (auto *user : memref->getUsers()) { + // If this memref has a user that is a DMA, give up because these + // operations write to this memref. + if (isa(op) || isa(op)) { + return false; + } + // If the memref used by the load/store is used in a store elsewhere in + // the loop nest, we do not hoist. Similarly, if the memref used in a + // load is also being stored too, we do not hoist the load. + if (isa(user) || (isa(user) && isa(op))) { + if (&op != user) { + SmallVector userIVs; + getLoopIVs(*user, &userIVs); + // Check that userIVs don't contain the for loop around the op. + if (llvm::is_contained(userIVs, getForInductionVarOwner(indVar))) { + return false; + } + } + } + } + } + + // Insert this op in the defined ops list. + definedOps.insert(&op); + + if (op.getNumOperands() == 0 && !isa(op)) { + LLVM_DEBUG(llvm::dbgs() << "\nNon-constant op with 0 operands\n"); + return false; + } + for (unsigned int i = 0; i < op.getNumOperands(); ++i) { + auto *operandSrc = op.getOperand(i)->getDefiningOp(); + + LLVM_DEBUG( + op.getOperand(i)->print(llvm::dbgs() << "\nIterating on operand\n")); + + // If the loop IV is the operand, this op isn't loop invariant. + if (indVar == op.getOperand(i)) { + LLVM_DEBUG(llvm::dbgs() << "\nLoop IV is the operand\n"); + return false; + } + + if (operandSrc != nullptr) { + LLVM_DEBUG(llvm::dbgs() + << *operandSrc << "\nIterating on operand src\n"); + + // If the value was defined in the loop (outside of the + // if/else region), and that operation itself wasn't meant to + // be hoisted, then mark this operation loop dependent. + if (definedOps.count(operandSrc) && opsToHoist.count(operandSrc) == 0) { + return false; + } + } + } + } + + // If no operand was loop variant, mark this op for motion. + opsToHoist.insert(&op); + return true; +} + +// Checks if all ops in a region (i.e. list of blocks) are loop invariant. +bool areAllOpsInTheBlockListInvariant( + Region &blockList, Value *indVar, SmallPtrSetImpl &definedOps, + SmallPtrSetImpl &opsToHoist) { + + for (auto &b : blockList) { + for (auto &op : b) { + if (!isOpLoopInvariant(op, indVar, definedOps, opsToHoist)) { + return false; + } + } + } + + return true; +} + +// Returns true if the affine.if op can be hoisted. +bool checkInvarianceOfNestedIfOps(Operation *op, Value *indVar, + SmallPtrSetImpl &definedOps, + SmallPtrSetImpl &opsToHoist) { + assert(isa(op)); + auto ifOp = cast(op); + + if (!areAllOpsInTheBlockListInvariant(ifOp.getThenBlocks(), indVar, + definedOps, opsToHoist)) { + return false; + } + + if (!areAllOpsInTheBlockListInvariant(ifOp.getElseBlocks(), indVar, + definedOps, opsToHoist)) { + return false; + } + + return true; +} + void LoopInvariantCodeMotion::runOnAffineForOp(AffineForOp forOp) { auto *loopBody = forOp.getBody(); + auto *indVar = forOp.getInductionVar(); + SmallPtrSet definedOps; // This is the place where hoisted instructions would reside. FuncBuilder b(forOp.getOperation()); - // This vector is used to place loop invariant operations. + SmallPtrSet opsToHoist; SmallVector opsToMove; - SetVector loopDefinedOps; - // Generate forward slice which contains ops that fall under the transitive - // definition closure following the loop induction variable. - getForwardSlice(forOp, &loopDefinedOps); - - LLVM_DEBUG(for (auto i - : loopDefinedOps) { - i->print(llvm::dbgs() << "\nLoop-dependent op\n"); - }); - for (auto &op : *loopBody) { - // If the operation is loop invariant, insert it into opsToMove. - if (!isa(op) && !isa(op) && - loopDefinedOps.count(&op) != 1) { - LLVM_DEBUG(op.print(llvm::dbgs() << "\nLICM'ing op\n")); - opsToMove.push_back(&op); + // We don't hoist for loops. + if (!isa(op)) { + if (!isa(op)) { + if (isOpLoopInvariant(op, indVar, definedOps, opsToHoist)) { + opsToMove.push_back(&op); + } + } } } - // For all instructions that we found to be invariant, place them sequentially + // For all instructions that we found to be invariant, place sequentially // right before the for loop. for (auto *op : opsToMove) { op->moveBefore(forOp); } - LLVM_DEBUG(forOp.getOperation()->print(llvm::dbgs() << "\nModified loop\n")); + LLVM_DEBUG(forOp.getOperation()->print(llvm::dbgs() << "Modified loop\n")); // If the for loop body has a single operation (the terminator), erase it. if (forOp.getBody()->getOperations().size() == 1) { @@ -105,7 +235,6 @@ void LoopInvariantCodeMotion::runOnAffineForOp(AffineForOp forOp) { } void LoopInvariantCodeMotion::runOnFunction() { - // Walk through all loops in a function in innermost-loop-first order. This // way, we first LICM from the inner loop, and place the ops in // the outer loop, which in turn can be further LICM'ed. diff --git a/mlir/test/Transforms/loop-invariant-code-motion.mlir b/mlir/test/Transforms/loop-invariant-code-motion.mlir index af9560b..58cdfc6 100644 --- a/mlir/test/Transforms/loop-invariant-code-motion.mlir +++ b/mlir/test/Transforms/loop-invariant-code-motion.mlir @@ -113,7 +113,7 @@ func @invariant_code_inside_affine_if() { } -func @nested_loops_with_common_and_uncommon_invariant_code() { +func @dependent_stores() { %m = alloc() : memref<10xf32> %cf7 = constant 7.0 : f32 %cf8 = constant 8.0 : f32 @@ -122,7 +122,7 @@ func @nested_loops_with_common_and_uncommon_invariant_code() { %v0 = addf %cf7, %cf8 : f32 affine.for %i1 = 0 to 10 { %v1 = addf %cf7, %cf7 : f32 - store %v0, %m[%i1] : memref<10xf32> + store %v1, %m[%i1] : memref<10xf32> store %v0, %m[%i0] : memref<10xf32> } } @@ -133,9 +133,97 @@ func @nested_loops_with_common_and_uncommon_invariant_code() { // CHECK-NEXT: %1 = addf %cst, %cst_0 : f32 // CHECK-NEXT: %2 = addf %cst, %cst : f32 // CHECK-NEXT: affine.for %i0 = 0 to 10 { - // CHECK-NEXT: store %1, %0[%i0] : memref<10xf32> + + // CHECK-NEXT: affine.for %i1 = 0 to 10 { + // CHECK-NEXT: store %2, %0[%i1] : memref<10xf32> + // CHECK-NEXT: store %1, %0[%i0] : memref<10xf32> + // CHECK-NEXT: } + // CHECK-NEXT: } + // CHECK-NEXT: return + return +} + +func @independent_stores() { + %m = alloc() : memref<10xf32> + %cf7 = constant 7.0 : f32 + %cf8 = constant 8.0 : f32 + + affine.for %i0 = 0 to 10 { + %v0 = addf %cf7, %cf8 : f32 + affine.for %i1 = 0 to 10 { + %v1 = addf %cf7, %cf7 : f32 + store %v0, %m[%i0] : memref<10xf32> + store %v1, %m[%i1] : memref<10xf32> + } + } + + // CHECK: %0 = alloc() : memref<10xf32> + // CHECK-NEXT: %cst = constant 7.000000e+00 : f32 + // CHECK-NEXT: %cst_0 = constant 8.000000e+00 : f32 + // CHECK-NEXT: %1 = addf %cst, %cst_0 : f32 + // CHECK-NEXT: %2 = addf %cst, %cst : f32 + // CHECK-NEXT: affine.for %i0 = 0 to 10 { + // CHECK-NEXT: affine.for %i1 = 0 to 10 { + // CHECK-NEXT: store %1, %0[%i0] : memref<10xf32> + // CHECK-NEXT: store %2, %0[%i1] : memref<10xf32> + // CHECK-NEXT: } + // CHECK-NEXT: } + // CHECK-NEXT: return + return +} + +func @load_dependent_store() { + %m = alloc() : memref<10xf32> + %cf7 = constant 7.0 : f32 + %cf8 = constant 8.0 : f32 + + affine.for %i0 = 0 to 10 { + %v0 = addf %cf7, %cf8 : f32 + affine.for %i1 = 0 to 10 { + %v1 = addf %cf7, %cf7 : f32 + store %v0, %m[%i1] : memref<10xf32> + %v2 = load %m[%i0] : memref<10xf32> + } + } + + // CHECK: %0 = alloc() : memref<10xf32> + // CHECK-NEXT: %cst = constant 7.000000e+00 : f32 + // CHECK-NEXT: %cst_0 = constant 8.000000e+00 : f32 + // CHECK-NEXT: %1 = addf %cst, %cst_0 : f32 + // CHECK-NEXT: %2 = addf %cst, %cst : f32 + // CHECK-NEXT: affine.for %i0 = 0 to 10 { // CHECK-NEXT: affine.for %i1 = 0 to 10 { // CHECK-NEXT: store %1, %0[%i1] : memref<10xf32> + // CHECK-NEXT: %3 = load %0[%i0] : memref<10xf32> + // CHECK-NEXT: } + // CHECK-NEXT: } + // CHECK-NEXT: return + return +} + +func @load_after_load() { + %m = alloc() : memref<10xf32> + %cf7 = constant 7.0 : f32 + %cf8 = constant 8.0 : f32 + + affine.for %i0 = 0 to 10 { + %v0 = addf %cf7, %cf8 : f32 + affine.for %i1 = 0 to 10 { + %v1 = addf %cf7, %cf7 : f32 + %v3 = load %m[%i1] : memref<10xf32> + %v2 = load %m[%i0] : memref<10xf32> + } + } + + // CHECK: %0 = alloc() : memref<10xf32> + // CHECK-NEXT: %cst = constant 7.000000e+00 : f32 + // CHECK-NEXT: %cst_0 = constant 8.000000e+00 : f32 + // CHECK-NEXT: %1 = addf %cst, %cst_0 : f32 + // CHECK-NEXT: %2 = addf %cst, %cst : f32 + // CHECK-NEXT: affine.for %i0 = 0 to 10 { + // CHECK-NEXT: %3 = load %0[%i0] : memref<10xf32> + // CHECK-NEXT: affine.for %i1 = 0 to 10 { + // CHECK-NEXT: %4 = load %0[%i1] : memref<10xf32> // CHECK-NEXT: } // CHECK-NEXT: } // CHECK-NEXT: return @@ -168,22 +256,274 @@ func @invariant_affine_if() { return } +func @invariant_affine_if2() { + %m = alloc() : memref<10xf32> + %cf8 = constant 8.0 : f32 + affine.for %i0 = 0 to 10 { + affine.for %i1 = 0 to 10 { + affine.if (d0, d1) : (d1 - d0 >= 0) (%i0, %i0) { + %cf9 = addf %cf8, %cf8 : f32 + store %cf9, %m[%i1] : memref<10xf32> + + } + } + } + + // CHECK: %0 = alloc() : memref<10xf32> + // CHECK-NEXT: %cst = constant 8.000000e+00 : f32 + // CHECK-NEXT: affine.for %i0 = 0 to 10 { + // CHECK-NEXT: affine.for %i1 = 0 to 10 { + // CHECK-NEXT: affine.if #set0(%i0, %i0) { + // CHECK-NEXT: %1 = addf %cst, %cst : f32 + // CHECK-NEXT: store %1, %0[%i1] : memref<10xf32> + // CHECK-NEXT: } + // CHECK-NEXT: } + // CHECK-NEXT: } + // CHECK-NEXT: return + + return +} + +func @invariant_affine_nested_if() { + %m = alloc() : memref<10xf32> + %cf8 = constant 8.0 : f32 + affine.for %i0 = 0 to 10 { + affine.for %i1 = 0 to 10 { + affine.if (d0, d1) : (d1 - d0 >= 0) (%i0, %i0) { + %cf9 = addf %cf8, %cf8 : f32 + store %cf9, %m[%i0] : memref<10xf32> + affine.if (d0, d1) : (d1 - d0 >= 0) (%i0, %i0) { + store %cf9, %m[%i1] : memref<10xf32> + } + } + } + } + + // CHECK: %0 = alloc() : memref<10xf32> + // CHECK-NEXT: %cst = constant 8.000000e+00 : f32 + // CHECK-NEXT: affine.for %i0 = 0 to 10 { + // CHECK-NEXT: affine.for %i1 = 0 to 10 { + // CHECK-NEXT: affine.if #set0(%i0, %i0) { + // CHECK-NEXT: %1 = addf %cst, %cst : f32 + // CHECK-NEXT: store %1, %0[%i0] : memref<10xf32> + // CHECK-NEXT: affine.if #set0(%i0, %i0) { + // CHECK-NEXT: store %1, %0[%i1] : memref<10xf32> + // CHECK-NEXT: } + // CHECK-NEXT: } + // CHECK-NEXT: } + // CHECK-NEXT: } + // CHECK-NEXT: return + + return +} + +func @invariant_affine_nested_if_else() { + %m = alloc() : memref<10xf32> + %cf8 = constant 8.0 : f32 + affine.for %i0 = 0 to 10 { + affine.for %i1 = 0 to 10 { + affine.if (d0, d1) : (d1 - d0 >= 0) (%i0, %i0) { + %cf9 = addf %cf8, %cf8 : f32 + store %cf9, %m[%i0] : memref<10xf32> + affine.if (d0, d1) : (d1 - d0 >= 0) (%i0, %i0) { + store %cf9, %m[%i0] : memref<10xf32> + } else { + store %cf9, %m[%i1] : memref<10xf32> + } + } + } + } + + // CHECK: %0 = alloc() : memref<10xf32> + // CHECK-NEXT: %cst = constant 8.000000e+00 : f32 + // CHECK-NEXT: affine.for %i0 = 0 to 10 { + // CHECK-NEXT: affine.for %i1 = 0 to 10 { + // CHECK-NEXT: affine.if #set0(%i0, %i0) { + // CHECK-NEXT: %1 = addf %cst, %cst : f32 + // CHECK-NEXT: store %1, %0[%i0] : memref<10xf32> + // CHECK-NEXT: affine.if #set0(%i0, %i0) { + // CHECK-NEXT: store %1, %0[%i0] : memref<10xf32> + // CHECK-NEXT: } else { + // CHECK-NEXT: store %1, %0[%i1] : memref<10xf32> + // CHECK-NEXT: } + // CHECK-NEXT: } + // CHECK-NEXT: } + // CHECK-NEXT: } + // CHECK-NEXT: return + + return +} + +func @invariant_affine_nested_if_else2() { + %m = alloc() : memref<10xf32> + %m2 = alloc() : memref<10xf32> + %cf8 = constant 8.0 : f32 + affine.for %i0 = 0 to 10 { + affine.for %i1 = 0 to 10 { + affine.if (d0, d1) : (d1 - d0 >= 0) (%i0, %i0) { + %cf9 = addf %cf8, %cf8 : f32 + %tload1 = load %m[%i0] : memref<10xf32> + affine.if (d0, d1) : (d1 - d0 >= 0) (%i0, %i0) { + store %cf9, %m2[%i0] : memref<10xf32> + } else { + %tload2 = load %m[%i0] : memref<10xf32> + } + } + } + } + + // CHECK: %0 = alloc() : memref<10xf32> + // CHECK-NEXT: %1 = alloc() : memref<10xf32> + // CHECK-NEXT: %cst = constant 8.000000e+00 : f32 + // CHECK-NEXT: affine.for %i0 = 0 to 10 { + // CHECK-NEXT: affine.if #set0(%i0, %i0) { + // CHECK-NEXT: %2 = addf %cst, %cst : f32 + // CHECK-NEXT: %3 = load %0[%i0] : memref<10xf32> + // CHECK-NEXT: affine.if #set0(%i0, %i0) { + // CHECK-NEXT: store %2, %1[%i0] : memref<10xf32> + // CHECK-NEXT: } else { + // CHECK-NEXT: %4 = load %0[%i0] : memref<10xf32> + // CHECK-NEXT: } + // CHECK-NEXT: } + // CHECK-NEXT: } + // CHECK-NEXT: return + + return +} + + +func @invariant_affine_nested_if2() { + %m = alloc() : memref<10xf32> + %cf8 = constant 8.0 : f32 + affine.for %i0 = 0 to 10 { + affine.for %i1 = 0 to 10 { + affine.if (d0, d1) : (d1 - d0 >= 0) (%i0, %i0) { + %cf9 = addf %cf8, %cf8 : f32 + %v1 = load %m[%i0] : memref<10xf32> + affine.if (d0, d1) : (d1 - d0 >= 0) (%i0, %i0) { + %v2 = load %m[%i0] : memref<10xf32> + } + } + } + } + + // CHECK: %0 = alloc() : memref<10xf32> + // CHECK-NEXT: %cst = constant 8.000000e+00 : f32 + // CHECK-NEXT: affine.for %i0 = 0 to 10 { + // CHECK-NEXT: affine.if #set0(%i0, %i0) { + // CHECK-NEXT: %1 = addf %cst, %cst : f32 + // CHECK-NEXT: %2 = load %0[%i0] : memref<10xf32> + // CHECK-NEXT: affine.if #set0(%i0, %i0) { + // CHECK-NEXT: %3 = load %0[%i0] : memref<10xf32> + // CHECK-NEXT: } + // CHECK-NEXT: } + // CHECK-NEXT: } + // CHECK-NEXT: return + + return +} + +func @invariant_affine_for_inside_affine_if() { + %m = alloc() : memref<10xf32> + %cf8 = constant 8.0 : f32 + affine.for %i0 = 0 to 10 { + affine.for %i1 = 0 to 10 { + affine.if (d0, d1) : (d1 - d0 >= 0) (%i0, %i0) { + %cf9 = addf %cf8, %cf8 : f32 + store %cf9, %m[%i0] : memref<10xf32> + affine.for %i2 = 0 to 10 { + store %cf9, %m[%i2] : memref<10xf32> + } + } + } + } + + // CHECK: %0 = alloc() : memref<10xf32> + // CHECK-NEXT: %cst = constant 8.000000e+00 : f32 + // CHECK-NEXT: affine.for %i0 = 0 to 10 { + // CHECK-NEXT: affine.for %i1 = 0 to 10 { + // CHECK-NEXT: affine.if #set0(%i0, %i0) { + // CHECK-NEXT: %1 = addf %cst, %cst : f32 + // CHECK-NEXT: store %1, %0[%i0] : memref<10xf32> + // CHECK-NEXT: affine.for %i2 = 0 to 10 { + // CHECK-NEXT: store %1, %0[%i2] : memref<10xf32> + // CHECK-NEXT: } + // CHECK-NEXT: } + // CHECK-NEXT: } + // CHECK-NEXT: } + // CHECK-NEXT: return + + return +} + + func @invariant_constant_and_load() { %m = alloc() : memref<100xf32> + %m2 = alloc() : memref<100xf32> affine.for %i0 = 0 to 5 { %c0 = constant 0 : index - %v = load %m[%c0] : memref<100xf32> + %v = load %m2[%c0] : memref<100xf32> store %v, %m[%i0] : memref<100xf32> } // CHECK: %0 = alloc() : memref<100xf32> + // CHECK-NEXT: %1 = alloc() : memref<100xf32> // CHECK-NEXT: %c0 = constant 0 : index - // CHECK-NEXT: %1 = load %0[%c0] : memref<100xf32> + // CHECK-NEXT: %2 = load %1[%c0] : memref<100xf32> // CHECK-NEXT: affine.for %i0 = 0 to 5 { - // CHECK-NEXT: store %1, %0[%i0] : memref<100xf32> + // CHECK-NEXT: store %2, %0[%i0] : memref<100xf32> // CHECK-NEXT: } // CHECK-NEXT: return return } + +func @nested_load_store_same_memref() { + %m = alloc() : memref<10xf32> + %cst = constant 8.0 : f32 + %c0 = constant 0 : index + affine.for %i0 = 0 to 10 { + %v0 = load %m[%c0] : memref<10xf32> + affine.for %i1 = 0 to 10 { + store %cst, %m[%i1] : memref<10xf32> + } + } + + // CHECK: %0 = alloc() : memref<10xf32> + // CHECK-NEXT: %cst = constant 8.000000e+00 : f32 + // CHECK-NEXT: %c0 = constant 0 : index + // CHECK-NEXT: affine.for %i0 = 0 to 10 { + // CHECK-NEXT: %1 = load %0[%c0] : memref<10xf32> + // CHECK-NEXT: affine.for %i1 = 0 to 10 { + // CHECK-NEXT: store %cst, %0[%i1] : memref<10xf32> + // CHECK-NEXT: } + // CHECK-NEXT: } + // CHECK-NEXT: return + + return +} + + +func @nested_load_store_same_memref2() { + %m = alloc() : memref<10xf32> + %cst = constant 8.0 : f32 + %c0 = constant 0 : index + affine.for %i0 = 0 to 10 { + store %cst, %m[%c0] : memref<10xf32> + affine.for %i1 = 0 to 10 { + %v0 = load %m[%i0] : memref<10xf32> + } + } + + // CHECK: %0 = alloc() : memref<10xf32> + // CHECK-NEXT: %cst = constant 8.000000e+00 : f32 + // CHECK-NEXT: %c0 = constant 0 : index + // CHECK-NEXT: affine.for %i0 = 0 to 10 { + // CHECK-NEXT: store %cst, %0[%c0] : memref<10xf32> + // CHECK-NEXT: %1 = load %0[%i0] : memref<10xf32> + // CHECK-NEXT: } + // CHECK-NEXT: return + + return +} -- 2.7.4