From cbb0981388b6b2c4ccc574a674841ecd517115e5 Mon Sep 17 00:00:00 2001 From: Fangrui Song Date: Sat, 17 Dec 2022 19:07:38 +0000 Subject: [PATCH] [mlir] llvm::Optional::value => operator*/operator-> std::optional::value() has undesired exception checking semantics and is unavailable in older Xcode (see _LIBCPP_AVAILABILITY_BAD_OPTIONAL_ACCESS). The call sites block std::optional migration. --- .../Dialect/Affine/Analysis/AffineStructures.h | 2 +- mlir/include/mlir/IR/OpDefinition.h | 2 +- mlir/include/mlir/Transforms/DialectConversion.h | 4 ++-- mlir/lib/AsmParser/AffineParser.cpp | 4 ++-- mlir/lib/AsmParser/LocationParser.cpp | 2 +- mlir/lib/AsmParser/Parser.cpp | 2 +- mlir/lib/AsmParser/TypeParser.cpp | 2 +- mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp | 4 ++-- .../Conversion/TosaToLinalg/TosaToLinalgNamed.cpp | 4 ++-- mlir/lib/Conversion/VectorToSCF/VectorToSCF.cpp | 5 ++-- mlir/lib/Dialect/AMDGPU/IR/AMDGPUDialect.cpp | 2 +- .../Dialect/Affine/Analysis/AffineStructures.cpp | 18 +++++++-------- mlir/lib/Dialect/Affine/Analysis/LoopAnalysis.cpp | 4 ++-- mlir/lib/Dialect/Affine/Analysis/Utils.cpp | 6 ++--- .../Affine/Transforms/AffineDataCopyGeneration.cpp | 3 +-- mlir/lib/Dialect/Affine/Utils/LoopUtils.cpp | 26 ++++++++++----------- mlir/lib/Dialect/Affine/Utils/Utils.cpp | 4 ++-- .../lib/Dialect/GPU/Transforms/SerializeToBlob.cpp | 2 +- mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp | 3 +-- .../Linalg/Transforms/DataLayoutPropagation.cpp | 2 +- .../Linalg/Transforms/ElementwiseOpFusion.cpp | 2 +- mlir/lib/Dialect/Linalg/Transforms/Promotion.cpp | 2 +- mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp | 4 ++-- .../MemRef/TransformOps/MemRefTransformOps.cpp | 2 +- mlir/lib/Dialect/Quant/IR/QuantDialectBytecode.cpp | 9 ++++---- .../Dialect/SCF/Transforms/TileUsingInterface.cpp | 22 ++++++++---------- .../lib/Dialect/SPIRV/IR/SPIRVCanonicalization.cpp | 8 +++---- .../SPIRV/Transforms/LowerABIAttributesPass.cpp | 4 ++-- .../SparseTensor/Transforms/Sparsification.cpp | 16 ++++++------- mlir/lib/Dialect/SparseTensor/Utils/Merger.cpp | 10 ++++---- mlir/lib/Dialect/Tensor/IR/TensorOps.cpp | 2 +- .../Tensor/IR/TensorTilingInterfaceImpl.cpp | 4 ++-- .../Transforms/ExtractSliceFromReshapeUtils.cpp | 2 +- .../SwapExtractSliceWithProducerPatterns.cpp | 2 +- mlir/lib/Dialect/Utils/ReshapeOpsUtils.cpp | 2 +- .../Dialect/Vector/Transforms/VectorTransforms.cpp | 17 +++++++------- mlir/lib/Target/LLVMIR/ConvertFromLLVMIR.cpp | 27 +++++++++++----------- .../Dialect/LLVMIR/LLVMToLLVMIRTranslation.cpp | 2 +- .../lib/Analysis/TestMemRefDependenceCheck.cpp | 8 +++---- .../lib/Dialect/Shape/TestShapeMappingAnalysis.cpp | 2 +- .../TilingInterface/TestTilingInterface.cpp | 2 +- mlir/tools/mlir-tblgen/LLVMIRConversionGen.cpp | 2 +- 42 files changed, 120 insertions(+), 132 deletions(-) diff --git a/mlir/include/mlir/Dialect/Affine/Analysis/AffineStructures.h b/mlir/include/mlir/Dialect/Affine/Analysis/AffineStructures.h index d6cd025..cd23e12 100644 --- a/mlir/include/mlir/Dialect/Affine/Analysis/AffineStructures.h +++ b/mlir/include/mlir/Dialect/Affine/Analysis/AffineStructures.h @@ -413,7 +413,7 @@ public: inline Value getValue(unsigned pos) const { assert(pos < getNumDimAndSymbolVars() && "Invalid position"); assert(hasValue(pos) && "variable's Value not set"); - return values[pos].value(); + return *values[pos]; } /// Returns true if the pos^th variable has an associated Value. diff --git a/mlir/include/mlir/IR/OpDefinition.h b/mlir/include/mlir/IR/OpDefinition.h index 268f2e45..002bec3 100644 --- a/mlir/include/mlir/IR/OpDefinition.h +++ b/mlir/include/mlir/IR/OpDefinition.h @@ -47,7 +47,7 @@ public: bool has_value() const { return impl.has_value(); } /// Access the internal ParseResult value. - ParseResult value() const { return impl.value(); } + ParseResult value() const { return *impl; } ParseResult operator*() const { return value(); } private: diff --git a/mlir/include/mlir/Transforms/DialectConversion.h b/mlir/include/mlir/Transforms/DialectConversion.h index 343d8db..871f4b0 100644 --- a/mlir/include/mlir/Transforms/DialectConversion.h +++ b/mlir/include/mlir/Transforms/DialectConversion.h @@ -252,9 +252,9 @@ private: [callback = std::forward(callback)]( T type, SmallVectorImpl &results, ArrayRef) { if (Optional resultOpt = callback(type)) { - bool wasSuccess = static_cast(resultOpt.value()); + bool wasSuccess = static_cast(*resultOpt); if (wasSuccess) - results.push_back(resultOpt.value()); + results.push_back(*resultOpt); return Optional(success(wasSuccess)); } return Optional(); diff --git a/mlir/lib/AsmParser/AffineParser.cpp b/mlir/lib/AsmParser/AffineParser.cpp index c6af9ee..82433be 100644 --- a/mlir/lib/AsmParser/AffineParser.cpp +++ b/mlir/lib/AsmParser/AffineParser.cpp @@ -332,11 +332,11 @@ AffineExpr AffineParser::parseSymbolSSAIdExpr() { /// affine-expr ::= integer-literal AffineExpr AffineParser::parseIntegerExpr() { auto val = getToken().getUInt64IntegerValue(); - if (!val.has_value() || (int64_t)val.value() < 0) + if (!val.has_value() || (int64_t)*val < 0) return emitError("constant too large for index"), nullptr; consumeToken(Token::integer); - return builder.getAffineConstantExpr((int64_t)val.value()); + return builder.getAffineConstantExpr((int64_t)*val); } /// Parses an expression that can be a valid operand of an affine expression. diff --git a/mlir/lib/AsmParser/LocationParser.cpp b/mlir/lib/AsmParser/LocationParser.cpp index 02cb3ed..61b2017 100644 --- a/mlir/lib/AsmParser/LocationParser.cpp +++ b/mlir/lib/AsmParser/LocationParser.cpp @@ -122,7 +122,7 @@ ParseResult Parser::parseNameOrFileLineColLocation(LocationAttr &loc) { return emitError("expected integer column number in FileLineColLoc"); consumeToken(Token::integer); - loc = FileLineColLoc::get(ctx, str, line.value(), column.value()); + loc = FileLineColLoc::get(ctx, str, *line, *column); return success(); } diff --git a/mlir/lib/AsmParser/Parser.cpp b/mlir/lib/AsmParser/Parser.cpp index 6fb32de..864ed2d 100644 --- a/mlir/lib/AsmParser/Parser.cpp +++ b/mlir/lib/AsmParser/Parser.cpp @@ -2057,7 +2057,7 @@ ParseResult OperationParser::parseRegionBody(Region ®ion, SMLoc startLoc, << "previously referenced here"; } Location loc = entryArg.sourceLoc.has_value() - ? entryArg.sourceLoc.value() + ? *entryArg.sourceLoc : getEncodedSourceLocation(argInfo.location); BlockArgument arg = block->addArgument(entryArg.type, loc); diff --git a/mlir/lib/AsmParser/TypeParser.cpp b/mlir/lib/AsmParser/TypeParser.cpp index 2595722..38ae3ff 100644 --- a/mlir/lib/AsmParser/TypeParser.cpp +++ b/mlir/lib/AsmParser/TypeParser.cpp @@ -273,7 +273,7 @@ Type Parser::parseNonFunctionType() { auto width = getToken().getIntTypeBitwidth(); if (!width.has_value()) return (emitError("invalid integer width"), nullptr); - if (width.value() > IntegerType::kMaxWidth) { + if (*width > IntegerType::kMaxWidth) { emitError(getToken().getLoc(), "integer bitwidth is limited to ") << IntegerType::kMaxWidth << " bits"; return nullptr; diff --git a/mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp b/mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp index ebc63bd..7cb72f4 100644 --- a/mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp +++ b/mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp @@ -1453,7 +1453,7 @@ public: SmallVector affineMaps = { rewriter.getMultiDimIdentityMap(resultTy.getRank())}; auto emptyTensor = b.create(resultTy.getShape(), resultETy, - dynamicDimsOr.value()); + *dynamicDimsOr); auto genericOp = b.create( resultTy, ValueRange({}), ValueRange{emptyTensor}, affineMaps, getNParallelLoopsAttrs(resultTy.getRank())); @@ -2051,7 +2051,7 @@ public: if (!dynamicDimsOr.has_value()) return rewriter.notifyMatchFailure( op, "tosa.gather currently only supports dynamic batch dimensions"); - SmallVector dynamicDims = dynamicDimsOr.value(); + SmallVector dynamicDims = *dynamicDimsOr; auto resultElementTy = resultTy.getElementType(); diff --git a/mlir/lib/Conversion/TosaToLinalg/TosaToLinalgNamed.cpp b/mlir/lib/Conversion/TosaToLinalg/TosaToLinalgNamed.cpp index 89cfd7c..287f841 100644 --- a/mlir/lib/Conversion/TosaToLinalg/TosaToLinalgNamed.cpp +++ b/mlir/lib/Conversion/TosaToLinalg/TosaToLinalgNamed.cpp @@ -695,7 +695,7 @@ public: checkHasDynamicBatchDims(rewriter, op, {input, op.getOutput()}); if (!dynamicDimsOr.has_value()) return failure(); - SmallVector dynamicDims = dynamicDimsOr.value(); + SmallVector dynamicDims = *dynamicDimsOr; // Determine what the initial value needs to be for the max pool op. Attribute initialAttr; @@ -772,7 +772,7 @@ public: checkHasDynamicBatchDims(rewriter, op, {input, op.getOutput()}); if (!dynamicDimsOr.has_value()) return failure(); - SmallVector dynamicDims = dynamicDimsOr.value(); + SmallVector dynamicDims = *dynamicDimsOr; // Apply padding as necessary. llvm::SmallVector pad; diff --git a/mlir/lib/Conversion/VectorToSCF/VectorToSCF.cpp b/mlir/lib/Conversion/VectorToSCF/VectorToSCF.cpp index f7eb550..82ec921 100644 --- a/mlir/lib/Conversion/VectorToSCF/VectorToSCF.cpp +++ b/mlir/lib/Conversion/VectorToSCF/VectorToSCF.cpp @@ -98,9 +98,8 @@ static void getXferIndices(OpBuilder &b, OpTy xferOp, Value iv, if (!isBroadcast) { AffineExpr d0, d1; bindDims(xferOp.getContext(), d0, d1); - Value offset = adaptor.getIndices()[dim.value()]; - indices[dim.value()] = - makeComposedAffineApply(b, loc, d0 + d1, {offset, iv}); + Value offset = adaptor.getIndices()[*dim]; + indices[*dim] = makeComposedAffineApply(b, loc, d0 + d1, {offset, iv}); } } diff --git a/mlir/lib/Dialect/AMDGPU/IR/AMDGPUDialect.cpp b/mlir/lib/Dialect/AMDGPU/IR/AMDGPUDialect.cpp index c681f2d..d7732b8 100644 --- a/mlir/lib/Dialect/AMDGPU/IR/AMDGPUDialect.cpp +++ b/mlir/lib/Dialect/AMDGPU/IR/AMDGPUDialect.cpp @@ -103,7 +103,7 @@ static bool staticallyOutOfBounds(OpType op) { Optional idxVal = getConstantUint32(idx); if (!idxVal) return false; - indexVal += stride * idxVal.value(); + indexVal += stride * *idxVal; } result += indexVal; if (result > std::numeric_limits::max()) diff --git a/mlir/lib/Dialect/Affine/Analysis/AffineStructures.cpp b/mlir/lib/Dialect/Affine/Analysis/AffineStructures.cpp index 39f3a1d..453f20c 100644 --- a/mlir/lib/Dialect/Affine/Analysis/AffineStructures.cpp +++ b/mlir/lib/Dialect/Affine/Analysis/AffineStructures.cpp @@ -1036,16 +1036,15 @@ void FlatAffineValueConstraints::getSliceBounds( auto ubConst = getConstantBound64(BoundType::UB, pos); if (lbConst.has_value() && ubConst.has_value()) { // Detect equality to a constant. - if (lbConst.value() == ubConst.value()) { - memo[pos] = getAffineConstantExpr(lbConst.value(), context); + if (*lbConst == *ubConst) { + memo[pos] = getAffineConstantExpr(*lbConst, context); changed = true; continue; } // Detect an variable as modulo of another variable w.r.t a // constant. - if (detectAsMod(*this, pos, lbConst.value(), ubConst.value(), memo, - context)) { + if (detectAsMod(*this, pos, *lbConst, *ubConst, memo, context)) { changed = true; continue; } @@ -1146,9 +1145,8 @@ void FlatAffineValueConstraints::getSliceBounds( << "WARNING: Potentially over-approximating slice lb\n"); auto lbConst = getConstantBound64(BoundType::LB, pos + offset); if (lbConst.has_value()) { - lbMap = - AffineMap::get(numMapDims, numMapSymbols, - getAffineConstantExpr(lbConst.value(), context)); + lbMap = AffineMap::get(numMapDims, numMapSymbols, + getAffineConstantExpr(*lbConst, context)); } } if (!ubMap || ubMap.getNumResults() > 1) { @@ -1158,7 +1156,7 @@ void FlatAffineValueConstraints::getSliceBounds( if (ubConst.has_value()) { ubMap = AffineMap::get( numMapDims, numMapSymbols, - getAffineConstantExpr(ubConst.value() + ubAdjustment, context)); + getAffineConstantExpr(*ubConst + ubAdjustment, context)); } } } @@ -1698,12 +1696,12 @@ void FlatAffineRelation::compose(const FlatAffineRelation &other) { // Add and match domain of `rel` to domain of `this`. for (unsigned i = 0, e = rel.getNumDomainDims(); i < e; ++i) if (relMaybeValues[i].has_value()) - setValue(i, relMaybeValues[i].value()); + setValue(i, *relMaybeValues[i]); // Add and match range of `this` to range of `rel`. for (unsigned i = 0, e = getNumRangeDims(); i < e; ++i) { unsigned rangeIdx = rel.getNumDomainDims() + i; if (thisMaybeValues[rangeIdx].has_value()) - rel.setValue(rangeIdx, thisMaybeValues[rangeIdx].value()); + rel.setValue(rangeIdx, *thisMaybeValues[rangeIdx]); } // Append `this` to `rel` and simplify constraints. diff --git a/mlir/lib/Dialect/Affine/Analysis/LoopAnalysis.cpp b/mlir/lib/Dialect/Affine/Analysis/LoopAnalysis.cpp index babaab3..10d7426 100644 --- a/mlir/lib/Dialect/Affine/Analysis/LoopAnalysis.cpp +++ b/mlir/lib/Dialect/Affine/Analysis/LoopAnalysis.cpp @@ -96,8 +96,8 @@ Optional mlir::getConstantTripCount(AffineForOp forOp) { for (auto resultExpr : map.getResults()) { if (auto constExpr = resultExpr.dyn_cast()) { if (tripCount.has_value()) - tripCount = std::min(tripCount.value(), - static_cast(constExpr.getValue())); + tripCount = + std::min(*tripCount, static_cast(constExpr.getValue())); else tripCount = constExpr.getValue(); } else diff --git a/mlir/lib/Dialect/Affine/Analysis/Utils.cpp b/mlir/lib/Dialect/Affine/Analysis/Utils.cpp index bbcf0e9..47e26e0 100644 --- a/mlir/lib/Dialect/Affine/Analysis/Utils.cpp +++ b/mlir/lib/Dialect/Affine/Analysis/Utils.cpp @@ -1010,7 +1010,7 @@ bool mlir::buildSliceTripCountMap( } Optional maybeConstTripCount = getConstantTripCount(forOp); if (maybeConstTripCount.has_value()) { - (*tripCountMap)[op] = maybeConstTripCount.value(); + (*tripCountMap)[op] = *maybeConstTripCount; continue; } return false; @@ -1019,7 +1019,7 @@ bool mlir::buildSliceTripCountMap( // Slice bounds are created with a constant ub - lb difference. if (!tripCount.has_value()) return false; - (*tripCountMap)[op] = tripCount.value(); + (*tripCountMap)[op] = *tripCount; } return true; } @@ -1319,7 +1319,7 @@ static Optional getMemoryFootprintBytes(Block &block, Optional size = region.second->getRegionSize(); if (!size.has_value()) return std::nullopt; - totalSizeInBytes += size.value(); + totalSizeInBytes += *size; } return totalSizeInBytes; } diff --git a/mlir/lib/Dialect/Affine/Transforms/AffineDataCopyGeneration.cpp b/mlir/lib/Dialect/Affine/Transforms/AffineDataCopyGeneration.cpp index 6efef00..43b1637 100644 --- a/mlir/lib/Dialect/Affine/Transforms/AffineDataCopyGeneration.cpp +++ b/mlir/lib/Dialect/Affine/Transforms/AffineDataCopyGeneration.cpp @@ -149,8 +149,7 @@ void AffineDataCopyGeneration::runOnBlock(Block *block, getMemoryFootprintBytes(forOp, /*memorySpace=*/0); return (footprint.has_value() && - static_cast(footprint.value()) > - fastMemCapacityBytes); + static_cast(*footprint) > fastMemCapacityBytes); }; // If the memory footprint of the 'affine.for' loop is higher than fast diff --git a/mlir/lib/Dialect/Affine/Utils/LoopUtils.cpp b/mlir/lib/Dialect/Affine/Utils/LoopUtils.cpp index bb626e2..963602b 100644 --- a/mlir/lib/Dialect/Affine/Utils/LoopUtils.cpp +++ b/mlir/lib/Dialect/Affine/Utils/LoopUtils.cpp @@ -405,12 +405,10 @@ checkTilingLegalityImpl(MutableArrayRef origLoops) { for (unsigned k = 0, e = depComps.size(); k < e; k++) { DependenceComponent depComp = depComps[k]; if (depComp.lb.has_value() && depComp.ub.has_value() && - depComp.lb.value() < depComp.ub.value() && - depComp.ub.value() < 0) { + *depComp.lb < *depComp.ub && *depComp.ub < 0) { LLVM_DEBUG(llvm::dbgs() - << "Dependence component lb = " - << Twine(depComp.lb.value()) - << " ub = " << Twine(depComp.ub.value()) + << "Dependence component lb = " << Twine(*depComp.lb) + << " ub = " << Twine(*depComp.ub) << " is negative at depth: " << Twine(d) << " and thus violates the legality rule.\n"); return false; @@ -801,11 +799,11 @@ constructTiledIndexSetHyperRect(MutableArrayRef origLoops, newLoops[width + i].setStep(origLoops[i].getStep()); // Set the upper bound. - if (mayBeConstantCount && mayBeConstantCount.value() < tileSizes[i]) { + if (mayBeConstantCount && *mayBeConstantCount < tileSizes[i]) { // Trip count is less than the tile size: upper bound is lower bound + // trip count * stepSize. - AffineMap ubMap = b.getSingleDimShiftAffineMap( - mayBeConstantCount.value() * origLoops[i].getStep()); + AffineMap ubMap = b.getSingleDimShiftAffineMap(*mayBeConstantCount * + origLoops[i].getStep()); newLoops[width + i].setUpperBound( /*operands=*/newLoops[i].getInductionVar(), ubMap); } else if (largestDiv % tileSizes[i] != 0) { @@ -974,7 +972,7 @@ void mlir::getTileableBands(func::FuncOp f, LogicalResult mlir::loopUnrollFull(AffineForOp forOp) { Optional mayBeConstantTripCount = getConstantTripCount(forOp); if (mayBeConstantTripCount.has_value()) { - uint64_t tripCount = mayBeConstantTripCount.value(); + uint64_t tripCount = *mayBeConstantTripCount; if (tripCount == 0) return success(); if (tripCount == 1) @@ -990,8 +988,8 @@ LogicalResult mlir::loopUnrollUpToFactor(AffineForOp forOp, uint64_t unrollFactor) { Optional mayBeConstantTripCount = getConstantTripCount(forOp); if (mayBeConstantTripCount.has_value() && - mayBeConstantTripCount.value() < unrollFactor) - return loopUnrollByFactor(forOp, mayBeConstantTripCount.value()); + *mayBeConstantTripCount < unrollFactor) + return loopUnrollByFactor(forOp, *mayBeConstantTripCount); return loopUnrollByFactor(forOp, unrollFactor); } @@ -1159,8 +1157,8 @@ LogicalResult mlir::loopUnrollJamUpToFactor(AffineForOp forOp, uint64_t unrollJamFactor) { Optional mayBeConstantTripCount = getConstantTripCount(forOp); if (mayBeConstantTripCount.has_value() && - mayBeConstantTripCount.value() < unrollJamFactor) - return loopUnrollJamByFactor(forOp, mayBeConstantTripCount.value()); + *mayBeConstantTripCount < unrollJamFactor) + return loopUnrollJamByFactor(forOp, *mayBeConstantTripCount); return loopUnrollJamByFactor(forOp, unrollJamFactor); } @@ -1582,7 +1580,7 @@ AffineForOp mlir::sinkSequentialLoops(AffineForOp forOp) { for (unsigned j = 0; j < maxLoopDepth; ++j) { DependenceComponent &depComp = depComps[j]; assert(depComp.lb.has_value() && depComp.ub.has_value()); - if (depComp.lb.value() != 0 || depComp.ub.value() != 0) + if (*depComp.lb != 0 || *depComp.ub != 0) isParallelLoop[j] = false; } } diff --git a/mlir/lib/Dialect/Affine/Utils/Utils.cpp b/mlir/lib/Dialect/Affine/Utils/Utils.cpp index eb7224f..9359fde 100644 --- a/mlir/lib/Dialect/Affine/Utils/Utils.cpp +++ b/mlir/lib/Dialect/Affine/Utils/Utils.cpp @@ -1796,13 +1796,13 @@ MemRefType mlir::normalizeMemRefType(MemRefType memrefType, // always bounded. However, when we have symbols, we may not be able to // obtain a constant upper bound. Also, mapping to a negative space is // invalid for normalization. - if (!ubConst.has_value() || ubConst.value() < 0) { + if (!ubConst.has_value() || *ubConst < 0) { LLVM_DEBUG(llvm::dbgs() << "can't normalize map due to unknown/invalid upper bound"); return memrefType; } // If dimension of new memrefType is dynamic, the value is -1. - newShape[d] = ubConst.value() + 1; + newShape[d] = *ubConst + 1; } } diff --git a/mlir/lib/Dialect/GPU/Transforms/SerializeToBlob.cpp b/mlir/lib/Dialect/GPU/Transforms/SerializeToBlob.cpp index fd745bd..6493ab6 100644 --- a/mlir/lib/Dialect/GPU/Transforms/SerializeToBlob.cpp +++ b/mlir/lib/Dialect/GPU/Transforms/SerializeToBlob.cpp @@ -78,7 +78,7 @@ void gpu::SerializeToBlobPass::runOnOperation() { if (!maybeTargetISA.has_value()) return signalPassFailure(); - std::string targetISA = std::move(maybeTargetISA.value()); + std::string targetISA = std::move(*maybeTargetISA); LLVM_DEBUG({ llvm::dbgs() << "ISA for module: " << getOperation().getNameAttr() << "\n"; diff --git a/mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp b/mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp index 33e7a7c..211080e 100644 --- a/mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp +++ b/mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp @@ -220,8 +220,7 @@ ParseResult AllocaOp::parse(OpAsmParser &parser, OperationState &result) { Optional alignmentAttr = result.attributes.getNamed("alignment"); if (alignmentAttr.has_value()) { - auto alignmentInt = - alignmentAttr.value().getValue().dyn_cast(); + auto alignmentInt = alignmentAttr->getValue().dyn_cast(); if (!alignmentInt) return parser.emitError(parser.getNameLoc(), "expected integer alignment"); diff --git a/mlir/lib/Dialect/Linalg/Transforms/DataLayoutPropagation.cpp b/mlir/lib/Dialect/Linalg/Transforms/DataLayoutPropagation.cpp index 603faf2..5e54097 100644 --- a/mlir/lib/Dialect/Linalg/Transforms/DataLayoutPropagation.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/DataLayoutPropagation.cpp @@ -298,7 +298,7 @@ struct BubbleUpPackOpThroughElemGenericOpPattern auto genericOp = bubbleUpPackOpThroughElemGenericOp(rewriter, packOp); if (failed(genericOp)) return failure(); - rewriter.replaceOp(packOp, genericOp.value().getResults()); + rewriter.replaceOp(packOp, genericOp->getResults()); return success(); } }; diff --git a/mlir/lib/Dialect/Linalg/Transforms/ElementwiseOpFusion.cpp b/mlir/lib/Dialect/Linalg/Transforms/ElementwiseOpFusion.cpp index 1f8d7af..7f2b915 100644 --- a/mlir/lib/Dialect/Linalg/Transforms/ElementwiseOpFusion.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/ElementwiseOpFusion.cpp @@ -413,7 +413,7 @@ public: FailureOr fusedOp = fuseElementwiseOps(rewriter, &opOperand); if (succeeded(fusedOp)) { auto replacements = - fusedOp.value()->getResults().take_back(genericOp.getNumResults()); + (*fusedOp)->getResults().take_back(genericOp.getNumResults()); rewriter.replaceOp(genericOp, replacements); return success(); } diff --git a/mlir/lib/Dialect/Linalg/Transforms/Promotion.cpp b/mlir/lib/Dialect/Linalg/Transforms/Promotion.cpp index 049f5cc..fd41ed3 100644 --- a/mlir/lib/Dialect/Linalg/Transforms/Promotion.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/Promotion.cpp @@ -235,7 +235,7 @@ FailureOr mlir::linalg::promoteSubviewAsNewBuffer( getConstantUpperBoundForIndex(materializedSize); size = failed(upperBound) ? materializedSize - : b.create(loc, upperBound.value()); + : b.create(loc, *upperBound); } LLVM_DEBUG(llvm::dbgs() << "Extracted tightest: " << size << "\n"); fullSizes.push_back(size); diff --git a/mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp b/mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp index 8123b66..7ffc586 100644 --- a/mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp @@ -650,7 +650,7 @@ linalg::tileReductionUsingForeachThread(RewriterBase &b, // 2. Create the ForeachThreadOp with an empty region. scf::ForeachThreadOp foreachThreadOp = b.create( - loc, identityTensor.value()->getResults(), + loc, (*identityTensor)->getResults(), ValueRange(materializedNonZeroNumThreads), mapping); // 3. Calculate the tile offsets and sizes for the subsequent loop that will @@ -768,7 +768,7 @@ linalg::tileReductionUsingForeachThread(RewriterBase &b, // 8. Return. ForeachThreadReductionTilingResult results; - results.initialOp = identityTensor.value(); + results.initialOp = *identityTensor; results.loops = foreachThreadOp; results.parallelTiledOp = tiledOp; results.mergeOp = mergeOp; diff --git a/mlir/lib/Dialect/MemRef/TransformOps/MemRefTransformOps.cpp b/mlir/lib/Dialect/MemRef/TransformOps/MemRefTransformOps.cpp index 391164c..c1755ab 100644 --- a/mlir/lib/Dialect/MemRef/TransformOps/MemRefTransformOps.cpp +++ b/mlir/lib/Dialect/MemRef/TransformOps/MemRefTransformOps.cpp @@ -32,7 +32,7 @@ transform::MemRefMultiBufferOp::applyToOne(memref::AllocOp target, return DiagnosedSilenceableFailure::silenceableFailure(std::move(diag)); } - results.push_back(newBuffer.value()); + results.push_back(*newBuffer); return DiagnosedSilenceableFailure::success(); } diff --git a/mlir/lib/Dialect/Quant/IR/QuantDialectBytecode.cpp b/mlir/lib/Dialect/Quant/IR/QuantDialectBytecode.cpp index a44bfa4..87cfe7d 100644 --- a/mlir/lib/Dialect/Quant/IR/QuantDialectBytecode.cpp +++ b/mlir/lib/Dialect/Quant/IR/QuantDialectBytecode.cpp @@ -205,9 +205,8 @@ QuantDialectBytecodeInterface::readCalibratedQuantizedType( llvm::APFloat::IEEEdouble()))) return reader.emitError("invalid CalibratedQuantizedType"), CalibratedQuantizedType(); - return CalibratedQuantizedType::get(expressedType, - min.value().convertToDouble(), - max.value().convertToDouble()); + return CalibratedQuantizedType::get(expressedType, min->convertToDouble(), + max->convertToDouble()); } void QuantDialectBytecodeInterface::write(CalibratedQuantizedType type, DialectBytecodeWriter &writer) const { @@ -234,7 +233,7 @@ UniformQuantizedType QuantDialectBytecodeInterface::readUniformQuantizedType( return reader.emitError("invalid UniformQuantizedType"), UniformQuantizedType(); return UniformQuantizedType::get(flags, storageType, expressedType, - scale.value().convertToDouble(), zeroPoint, + scale->convertToDouble(), zeroPoint, storageTypeMin, storageTypeMax); } void QuantDialectBytecodeInterface::write(UniformQuantizedType type, @@ -263,7 +262,7 @@ QuantDialectBytecodeInterface::readUniformQuantizedPerAxisType( FailureOr fl = reader.readAPFloatWithKnownSemantics(APFloat::IEEEdouble()); if (succeeded(fl)) { - val = fl.value().convertToDouble(); + val = fl->convertToDouble(); return success(); } return failure(); diff --git a/mlir/lib/Dialect/SCF/Transforms/TileUsingInterface.cpp b/mlir/lib/Dialect/SCF/Transforms/TileUsingInterface.cpp index 7fe9b4b..9d39a13 100644 --- a/mlir/lib/Dialect/SCF/Transforms/TileUsingInterface.cpp +++ b/mlir/lib/Dialect/SCF/Transforms/TileUsingInterface.cpp @@ -379,7 +379,7 @@ mlir::scf::tileUsingSCFForOp(RewriterBase &rewriter, TilingInterface op, innerMostLoop.getRegionIterArgs()); } - tilingResult.replacements = replacementOr.value(); + tilingResult.replacements = *replacementOr; LLVM_DEBUG({ if (!tilingResult.loops.empty()) { @@ -438,9 +438,8 @@ mlir::scf::tileReductionUsingScf(PatternRewriter &b, // 3. Generate the tiled implementation within the inner most loop. b.setInsertionPoint(loops.back().getBody()->getTerminator()); - Operation *parallelOp = - op.tileToPartialReduction(b, loc, identityTensor.value()->getResults(), - offsets, sizes, reductionDim); + Operation *parallelOp = op.tileToPartialReduction( + b, loc, (*identityTensor)->getResults(), offsets, sizes, reductionDim); SmallVector resultSizesList; for (size_t i = 0; i < offsets.size(); i++) @@ -448,8 +447,8 @@ mlir::scf::tileReductionUsingScf(PatternRewriter &b, b.createOrFold(loc, parallelOp->getResult(0), i)); SmallVector outOffsets(offsets.size(), b.getIndexAttr(0)); FailureOr> replacementOr = yieldTiledValues( - b, identityTensor.value()->getResults(), parallelOp->getResults(), - outOffsets, resultSizesList, loops); + b, (*identityTensor)->getResults(), parallelOp->getResults(), outOffsets, + resultSizesList, loops); if (failed(replacementOr)) return b.notifyMatchFailure(op, "failed to yield replacement"); @@ -464,12 +463,11 @@ mlir::scf::tileReductionUsingScf(PatternRewriter &b, // 4. Apply the merge reduction to combine all the partial values. b.setInsertionPointAfter(*loops.begin()); - Operation *mergeOp = - op.mergeReductions(b, loc, replacementOr.value(), reductionDim); + Operation *mergeOp = op.mergeReductions(b, loc, *replacementOr, reductionDim); b.replaceOp(op, mergeOp->getResults()); SCFReductionTilingResult results; - results.initialOp = identityTensor.value(); + results.initialOp = *identityTensor; results.loops = std::move(loops); results.parallelTiledOp = parallelOp; results.mergeOp = mergeOp; @@ -574,7 +572,7 @@ mlir::scf::tileConsumerAndFuseProducerGreedilyUsingSCFForOp( fusableProducer); if (failed(fusedProducerValue)) continue; - rewriter.replaceOp(candidateSliceOp, fusedProducerValue.value()); + rewriter.replaceOp(candidateSliceOp, *fusedProducerValue); // 2d. The operands of the fused producer might themselved be slices of // values produced by operations that implement the `TilingInterface`. @@ -646,8 +644,8 @@ mlir::scf::tileConsumerAndFuseProducerGreedilyUsingSCFForOp( iterArgNumber.value(), dstOp.getTiedOpOperand(fusableProducer)->get()); } - if (auto dstOp = fusedProducerValue.value() - .getDefiningOp()) { + if (auto dstOp = fusedProducerValue + ->getDefiningOp()) { scf::ForOp innerMostLoop = tileAndFuseResult.loops.back(); updateDestinationOperandsForTiledOp( rewriter, dstOp.getDpsInitOperand(resultNumber)->get(), diff --git a/mlir/lib/Dialect/SPIRV/IR/SPIRVCanonicalization.cpp b/mlir/lib/Dialect/SPIRV/IR/SPIRVCanonicalization.cpp index 306434c..bca91e5 100644 --- a/mlir/lib/Dialect/SPIRV/IR/SPIRVCanonicalization.cpp +++ b/mlir/lib/Dialect/SPIRV/IR/SPIRVCanonicalization.cpp @@ -240,11 +240,11 @@ OpFoldResult spirv::LogicalAndOp::fold(ArrayRef operands) { if (Optional rhs = getScalarOrSplatBoolAttr(operands.back())) { // x && true = x - if (rhs.value()) + if (*rhs) return getOperand1(); // x && false = false - if (!rhs.value()) + if (!*rhs) return operands.back(); } @@ -271,12 +271,12 @@ OpFoldResult spirv::LogicalOrOp::fold(ArrayRef operands) { assert(operands.size() == 2 && "spirv.LogicalOr should take two operands"); if (auto rhs = getScalarOrSplatBoolAttr(operands.back())) { - if (rhs.value()) + if (*rhs) // x || true = true return operands.back(); // x || false = x - if (!rhs.value()) + if (!*rhs) return getOperand1(); } diff --git a/mlir/lib/Dialect/SPIRV/Transforms/LowerABIAttributesPass.cpp b/mlir/lib/Dialect/SPIRV/Transforms/LowerABIAttributesPass.cpp index d77f7b5..04ebea7 100644 --- a/mlir/lib/Dialect/SPIRV/Transforms/LowerABIAttributesPass.cpp +++ b/mlir/lib/Dialect/SPIRV/Transforms/LowerABIAttributesPass.cpp @@ -143,8 +143,8 @@ static LogicalResult lowerEntryPointABIAttr(spirv::FuncOp funcOp, return funcOp.emitRemark("lower entry point failure: could not select " "execution model based on 'spirv.target_env'"); - builder.create(funcOp.getLoc(), executionModel.value(), - funcOp, interfaceVars); + builder.create(funcOp.getLoc(), *executionModel, funcOp, + interfaceVars); // Specifies the spirv.ExecutionModeOp. if (DenseI32ArrayAttr workgroupSizeAttr = entryPointAttr.getWorkgroupSize()) { diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/Sparsification.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/Sparsification.cpp index 8fbbf6a..4bc24af 100644 --- a/mlir/lib/Dialect/SparseTensor/Transforms/Sparsification.cpp +++ b/mlir/lib/Dialect/SparseTensor/Transforms/Sparsification.cpp @@ -505,7 +505,7 @@ static bool computeIterationGraph(Merger &merger, linalg::GenericOp op, // Filter loops should be constructed after all the dependent loops, // i.e., d0 + d1 < filter_loop(d0 + d1) - if (tldx && merger.isFilterLoop(tldx.value())) { + if (tldx && merger.isFilterLoop(*tldx)) { assert(!ta.isa() && !isDenseDLT(getDimLevelType(enc, d))); addAffineOrderings(adjM, inDegree, ta, AffineExpr(), std::nullopt, @@ -1042,11 +1042,11 @@ static void genInvariants(Merger &merger, CodeGen &codegen, OpBuilder &builder, for (unsigned d = 0, rank = map.getNumResults(); d < rank; d++) { AffineExpr a = map.getResult(toOrigDim(enc, d)); Optional sldx = merger.getLoopIdx(t.getOperandNumber(), d); - if (sldx && merger.isFilterLoop(sldx.value())) { - if (!codegen.getLoopIdxValue(sldx.value())) + if (sldx && merger.isFilterLoop(*sldx)) { + if (!codegen.getLoopIdxValue(*sldx)) // The filter loops has not been constructed. return; - if (sldx.value() == ldx) + if (*sldx == ldx) atLevel = true; } else if (!isInvariantAffine(codegen, a, ldx, atLevel)) return; // still in play @@ -1351,7 +1351,7 @@ static bool startLoopSeq(Merger &merger, CodeGen &codegen, OpBuilder &builder, } else { // sparse/singleton dim levels. tids.push_back(tid); - dims.push_back(dim.value()); + dims.push_back(*dim); } }); @@ -1435,11 +1435,11 @@ static void translateBitsToTidDimPairs( return; } condTids.push_back(tid); - condDims.push_back(dim.value()); + condDims.push_back(*dim); } else if (isDenseDLT(dlt)) { // TODO: get rid of extraTids and extraDims. extraTids.push_back(tid); - extraDims.push_back(dim.value()); + extraDims.push_back(*dim); } else { assert(isUndefDLT(dlt)); if (tid >= op.getNumDpsInputs()) @@ -1680,7 +1680,7 @@ public: if (!optExp.has_value()) return failure(); - unsigned exp = optExp.value(); + unsigned exp = *optExp; OpOperand *sparseOut = nullptr; unsigned outerParNest = 0; // Computes a topologically sorted iteration graph to ensure tensors diff --git a/mlir/lib/Dialect/SparseTensor/Utils/Merger.cpp b/mlir/lib/Dialect/SparseTensor/Utils/Merger.cpp index bf66120..b9f8d26 100644 --- a/mlir/lib/Dialect/SparseTensor/Utils/Merger.cpp +++ b/mlir/lib/Dialect/SparseTensor/Utils/Merger.cpp @@ -1053,7 +1053,7 @@ Optional Merger::buildTensorExp(linalg::GenericOp op, Value v) { if (def->getNumOperands() == 1) { auto x = buildTensorExp(op, def->getOperand(0)); if (x.has_value()) { - unsigned e = x.value(); + unsigned e = *x; if (isa(def)) return addExp(kAbsF, e); if (isa(def)) @@ -1132,8 +1132,8 @@ Optional Merger::buildTensorExp(linalg::GenericOp op, Value v) { auto x = buildTensorExp(op, def->getOperand(0)); auto y = buildTensorExp(op, def->getOperand(1)); if (x.has_value() && y.has_value()) { - unsigned e0 = x.value(); - unsigned e1 = y.value(); + unsigned e0 = *x; + unsigned e1 = *y; if (isa(def)) return addExp(kMulF, e0, e1); if (isa(def)) @@ -1188,8 +1188,8 @@ Optional Merger::buildTensorExp(linalg::GenericOp op, Value v) { auto y = buildTensorExp(op, def->getOperand(1)); auto z = buildTensorExp(op, def->getOperand(2)); if (x.has_value() && y.has_value() && z.has_value()) { - unsigned e0 = x.value(); - unsigned e1 = y.value(); + unsigned e0 = *x; + unsigned e1 = *y; if (auto redop = dyn_cast(def)) { if (isAdmissibleBranch(redop, redop.getRegion())) return addExp(kReduce, e0, e1, Value(), def); diff --git a/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp b/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp index a469574..99898f6 100644 --- a/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp +++ b/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp @@ -3197,7 +3197,7 @@ void PackOp::build(OpBuilder &builder, OperationState &state, Value source, SmallVector dynamicTileSizes; dispatchIndexOpFoldResults(innerTiles, dynamicTileSizes, staticTileSizes); build(builder, state, dest.getType(), source, dest, - paddingValue ? paddingValue.value() : nullptr, + paddingValue ? *paddingValue : nullptr, outerDimsPerm.empty() ? nullptr : builder.getDenseI64ArrayAttr(outerDimsPerm), builder.getDenseI64ArrayAttr(innerDimsPos), dynamicTileSizes, diff --git a/mlir/lib/Dialect/Tensor/IR/TensorTilingInterfaceImpl.cpp b/mlir/lib/Dialect/Tensor/IR/TensorTilingInterfaceImpl.cpp index c476cd1..4632467 100644 --- a/mlir/lib/Dialect/Tensor/IR/TensorTilingInterfaceImpl.cpp +++ b/mlir/lib/Dialect/Tensor/IR/TensorTilingInterfaceImpl.cpp @@ -267,12 +267,12 @@ static UnpackTileDimInfo getUnpackTileDimInfo(OpBuilder &b, UnPackOp unpackOp, getValueOrCreateConstantIndexOp(b, loc, tileSize)); std::optional cstInnerSize = getConstantIntValue(innerTileSize); if (!failed(cstSize) && cstInnerSize) { - if (cstSize.value() % cstInnerSize.value() == 0) + if (*cstSize % *cstInnerSize == 0) info.isAlignedToInnerTileSize = true; // If the tiling size equals to the inner tiling size, the outer dims are // always 1. - if (cstInnerSize.value() == cstSize.value()) { + if (*cstInnerSize == *cstSize) { auto lhs = AV(dim0).bind(tileOffset); auto rhs = AV(dim1).bind(innerTileSize); info.sourceOffset = ab.floor(lhs, rhs); diff --git a/mlir/lib/Dialect/Tensor/Transforms/ExtractSliceFromReshapeUtils.cpp b/mlir/lib/Dialect/Tensor/Transforms/ExtractSliceFromReshapeUtils.cpp index 67c949c..fbae33e 100644 --- a/mlir/lib/Dialect/Tensor/Transforms/ExtractSliceFromReshapeUtils.cpp +++ b/mlir/lib/Dialect/Tensor/Transforms/ExtractSliceFromReshapeUtils.cpp @@ -210,6 +210,6 @@ tensor::simplifyCollapseShapeWithRankReducingExtractSlice( return rewriter .replaceOpWithNewOp( - op, sliceOp.getResult(), info->newReassociationIndices.value()) + op, sliceOp.getResult(), *info->newReassociationIndices) .getOperation(); } diff --git a/mlir/lib/Dialect/Tensor/Transforms/SwapExtractSliceWithProducerPatterns.cpp b/mlir/lib/Dialect/Tensor/Transforms/SwapExtractSliceWithProducerPatterns.cpp index 43c0cdc..65176ed 100644 --- a/mlir/lib/Dialect/Tensor/Transforms/SwapExtractSliceWithProducerPatterns.cpp +++ b/mlir/lib/Dialect/Tensor/Transforms/SwapExtractSliceWithProducerPatterns.cpp @@ -38,5 +38,5 @@ FailureOr tensor::replaceExtractSliceWithTiledProducer( if (failed(tiledResult)) return failure(); - return tiledResult.value(); + return *tiledResult; } diff --git a/mlir/lib/Dialect/Utils/ReshapeOpsUtils.cpp b/mlir/lib/Dialect/Utils/ReshapeOpsUtils.cpp index cb5b93b..c0f55b3 100644 --- a/mlir/lib/Dialect/Utils/ReshapeOpsUtils.cpp +++ b/mlir/lib/Dialect/Utils/ReshapeOpsUtils.cpp @@ -414,7 +414,7 @@ mlir::getSimplifyCollapseShapeWithRankReducingSliceInfo( for (const auto &[nonUnitDim, indices] : llvm::zip(*trivialSegments, reassociationIndices)) { if (nonUnitDim) { - sliceShape.push_back(sourceType.getDimSize(nonUnitDim.value())); + sliceShape.push_back(sourceType.getDimSize(*nonUnitDim)); continue; } llvm::append_range(sliceShape, llvm::map_range(indices, [&](int64_t idx) { diff --git a/mlir/lib/Dialect/Vector/Transforms/VectorTransforms.cpp b/mlir/lib/Dialect/Vector/Transforms/VectorTransforms.cpp index 048c329..e67f14739 100644 --- a/mlir/lib/Dialect/Vector/Transforms/VectorTransforms.cpp +++ b/mlir/lib/Dialect/Vector/Transforms/VectorTransforms.cpp @@ -557,7 +557,7 @@ public: loc, op.getLhs(), b, acc, kind, rewriter, isInt); if (!mult.has_value()) return failure(); - rewriter.replaceOp(op, mult.value()); + rewriter.replaceOp(op, *mult); return success(); } @@ -575,8 +575,7 @@ public: createContractArithOp(loc, a, op.getRhs(), r, kind, rewriter, isInt); if (!m.has_value()) return failure(); - result = rewriter.create(loc, resType, m.value(), - result, pos); + result = rewriter.create(loc, resType, *m, result, pos); } rewriter.replaceOp(op, result); return success(); @@ -1878,7 +1877,7 @@ ContractionOpLowering::matchAndRewrite(vector::ContractionOp op, auto newOp = lowerParallel(op, lhsIndex, rhsIndex, rewriter); if (failed(newOp)) return failure(); - rewriter.replaceOp(op, newOp.value()); + rewriter.replaceOp(op, *newOp); return success(); } @@ -1899,7 +1898,7 @@ ContractionOpLowering::matchAndRewrite(vector::ContractionOp op, auto newOp = lowerParallel(op, lhsIndex, /*rhsIndex=*/-1, rewriter); if (failed(newOp)) return failure(); - rewriter.replaceOp(op, newOp.value()); + rewriter.replaceOp(op, *newOp); return success(); } } @@ -1911,7 +1910,7 @@ ContractionOpLowering::matchAndRewrite(vector::ContractionOp op, auto newOp = lowerParallel(op, /*lhsIndex=*/-1, rhsIndex, rewriter); if (failed(newOp)) return failure(); - rewriter.replaceOp(op, newOp.value()); + rewriter.replaceOp(op, *newOp); return success(); } } @@ -1921,7 +1920,7 @@ ContractionOpLowering::matchAndRewrite(vector::ContractionOp op, auto newOp = lowerReduction(op, rewriter); if (failed(newOp)) return failure(); - rewriter.replaceOp(op, newOp.value()); + rewriter.replaceOp(op, *newOp); return success(); } @@ -2021,8 +2020,8 @@ ContractionOpLowering::lowerReduction(vector::ContractionOp op, return rewriter.notifyMatchFailure(op, [&](Diagnostic &diag) { diag << "expected iterIndex=" << iterIndex << "to map to a RHS dimension"; }); - int64_t lhsIndex = lookupLhs.value(); - int64_t rhsIndex = lookupRhs.value(); + int64_t lhsIndex = *lookupLhs; + int64_t rhsIndex = *lookupRhs; int64_t dimSize = lhsType.getDimSize(lhsIndex); if (dimSize != rhsType.getDimSize(rhsIndex)) return rewriter.notifyMatchFailure(op, [&](Diagnostic &diag) { diff --git a/mlir/lib/Target/LLVMIR/ConvertFromLLVMIR.cpp b/mlir/lib/Target/LLVMIR/ConvertFromLLVMIR.cpp index 77de573..c1a83e8 100644 --- a/mlir/lib/Target/LLVMIR/ConvertFromLLVMIR.cpp +++ b/mlir/lib/Target/LLVMIR/ConvertFromLLVMIR.cpp @@ -702,7 +702,7 @@ GlobalOp Importer::processGlobal(llvm::GlobalVariable *globalVar) { convertConstantExpr(globalVar->getInitializer()); if (failed(initializer)) return {}; - builder.create(globalOp.getLoc(), initializer.value()); + builder.create(globalOp.getLoc(), *initializer); } if (globalVar->hasAtLeastLocalUnnamedAddr()) { globalOp.setUnnamedAddr( @@ -865,7 +865,7 @@ FailureOr Importer::convertConstantExpr(llvm::Constant *constant) { FailureOr converted = convertConstant(constantToConvert); if (failed(converted)) return failure(); - mapValue(constantToConvert, converted.value()); + mapValue(constantToConvert, *converted); } // Update the constant insertion point and return the converted constant. @@ -903,7 +903,7 @@ Importer::convertValues(ArrayRef values) { FailureOr converted = convertValue(value); if (failed(converted)) return failure(); - remapped.push_back(converted.value()); + remapped.push_back(*converted); } return remapped; } @@ -912,7 +912,7 @@ IntegerAttr Importer::matchIntegerAttr(llvm::Value *value) { IntegerAttr integerAttr; FailureOr converted = convertValue(value); bool success = succeeded(converted) && - matchPattern(converted.value(), m_Constant(&integerAttr)); + matchPattern(*converted, m_Constant(&integerAttr)); assert(success && "expected a constant value"); (void)success; return integerAttr; @@ -933,7 +933,7 @@ Importer::convertBranchArgs(llvm::Instruction *branch, llvm::BasicBlock *target, FailureOr converted = convertValue(value); if (failed(converted)) return failure(); - blockArguments.push_back(converted.value()); + blockArguments.push_back(*converted); } return success(); } @@ -949,13 +949,13 @@ Importer::convertCallTypeAndOperands(llvm::CallBase *callInst, FailureOr called = convertValue(callInst->getCalledOperand()); if (failed(called)) return failure(); - operands.push_back(called.value()); + operands.push_back(*called); } SmallVector args(callInst->args()); FailureOr> arguments = convertValues(args); if (failed(arguments)) return failure(); - llvm::append_range(operands, arguments.value()); + llvm::append_range(operands, *arguments); return success(); } @@ -1004,7 +1004,7 @@ LogicalResult Importer::convertOperation(OpBuilder &odsBuilder, FailureOr condition = convertValue(brInst->getCondition()); if (failed(condition)) return failure(); - builder.create(loc, condition.value(), succBlocks.front(), + builder.create(loc, *condition, succBlocks.front(), succBlockArgs.front(), succBlocks.back(), succBlockArgs.back()); } else { @@ -1041,7 +1041,7 @@ LogicalResult Importer::convertOperation(OpBuilder &odsBuilder, caseBlocks[it.index()] = lookupBlock(succBB); } - builder.create(loc, condition.value(), lookupBlock(defaultBB), + builder.create(loc, *condition, lookupBlock(defaultBB), defaultBlockArgs, caseValues, caseBlocks, caseOperandRefs); return success(); @@ -1081,7 +1081,7 @@ LogicalResult Importer::convertOperation(OpBuilder &odsBuilder, FailureOr operand = convertConstantExpr(lpInst->getClause(i)); if (failed(operand)) return failure(); - operands.push_back(operand.value()); + operands.push_back(*operand); } Type type = convertType(lpInst->getType()); @@ -1136,13 +1136,12 @@ LogicalResult Importer::convertOperation(OpBuilder &odsBuilder, FailureOr index = convertValue(operand); if (failed(index)) return failure(); - indices.push_back(index.value()); + indices.push_back(*index); } Type type = convertType(inst->getType()); - Value res = - builder.create(loc, type, sourceElementType, basePtr.value(), - indices, gepInst->isInBounds()); + Value res = builder.create(loc, type, sourceElementType, *basePtr, + indices, gepInst->isInBounds()); mapValue(inst, res); return success(); } diff --git a/mlir/lib/Target/LLVMIR/Dialect/LLVMIR/LLVMToLLVMIRTranslation.cpp b/mlir/lib/Target/LLVMIR/Dialect/LLVMIR/LLVMToLLVMIRTranslation.cpp index f8f1637..697121e 100644 --- a/mlir/lib/Target/LLVMIR/Dialect/LLVMIR/LLVMToLLVMIRTranslation.cpp +++ b/mlir/lib/Target/LLVMIR/Dialect/LLVMIR/LLVMToLLVMIRTranslation.cpp @@ -310,7 +310,7 @@ convertCallLLVMIntrinsicOp(CallIntrinsicOp &op, llvm::IRBuilderBase &builder, getOverloadedDeclaration(op, id, module, moduleTranslation); if (failed(fnOrFailure)) return failure(); - fn = fnOrFailure.value(); + fn = *fnOrFailure; } else { fn = llvm::Intrinsic::getDeclaration(module, id, {}); } diff --git a/mlir/test/lib/Analysis/TestMemRefDependenceCheck.cpp b/mlir/test/lib/Analysis/TestMemRefDependenceCheck.cpp index d1bcb8d..7c384e1 100644 --- a/mlir/test/lib/Analysis/TestMemRefDependenceCheck.cpp +++ b/mlir/test/lib/Analysis/TestMemRefDependenceCheck.cpp @@ -53,13 +53,13 @@ getDirectionVectorStr(bool ret, unsigned numCommonLoops, unsigned loopNestDepth, for (const auto &dependenceComponent : dependenceComponents) { std::string lbStr = "-inf"; if (dependenceComponent.lb.has_value() && - dependenceComponent.lb.value() != std::numeric_limits::min()) - lbStr = std::to_string(dependenceComponent.lb.value()); + *dependenceComponent.lb != std::numeric_limits::min()) + lbStr = std::to_string(*dependenceComponent.lb); std::string ubStr = "+inf"; if (dependenceComponent.ub.has_value() && - dependenceComponent.ub.value() != std::numeric_limits::max()) - ubStr = std::to_string(dependenceComponent.ub.value()); + *dependenceComponent.ub != std::numeric_limits::max()) + ubStr = std::to_string(*dependenceComponent.ub); result += "[" + lbStr + ", " + ubStr + "]"; } diff --git a/mlir/test/lib/Dialect/Shape/TestShapeMappingAnalysis.cpp b/mlir/test/lib/Dialect/Shape/TestShapeMappingAnalysis.cpp index f50988e..7310958 100644 --- a/mlir/test/lib/Dialect/Shape/TestShapeMappingAnalysis.cpp +++ b/mlir/test/lib/Dialect/Shape/TestShapeMappingAnalysis.cpp @@ -26,7 +26,7 @@ struct TestShapeMappingPass llvm::Optional> maybeAnalysis = getCachedAnalysis(); if (maybeAnalysis.has_value()) - maybeAnalysis.value().get().print(llvm::errs()); + maybeAnalysis->get().print(llvm::errs()); else llvm::errs() << "No cached ShapeMappingAnalysis existed."; } diff --git a/mlir/test/lib/Interfaces/TilingInterface/TestTilingInterface.cpp b/mlir/test/lib/Interfaces/TilingInterface/TestTilingInterface.cpp index b8c67c3..382847d 100644 --- a/mlir/test/lib/Interfaces/TilingInterface/TestTilingInterface.cpp +++ b/mlir/test/lib/Interfaces/TilingInterface/TestTilingInterface.cpp @@ -135,7 +135,7 @@ LinalgTransformationFilter::checkAndNotify(PatternRewriter &rewriter, void LinalgTransformationFilter::replaceLinalgTransformationFilter( PatternRewriter &rewriter, Operation *op) const { if (replacement.has_value()) - op->setAttr(kLinalgTransformMarker, replacement.value()); + op->setAttr(kLinalgTransformMarker, *replacement); else op->removeAttr(rewriter.getStringAttr(kLinalgTransformMarker)); } diff --git a/mlir/tools/mlir-tblgen/LLVMIRConversionGen.cpp b/mlir/tools/mlir-tblgen/LLVMIRConversionGen.cpp index 555835f..2297c55 100644 --- a/mlir/tools/mlir-tblgen/LLVMIRConversionGen.cpp +++ b/mlir/tools/mlir-tblgen/LLVMIRConversionGen.cpp @@ -256,7 +256,7 @@ static LogicalResult emitOneMLIRBuilder(const Record &record, raw_ostream &os, as << formatv("if (failed(_llvmir_gen_operand_{0}))\n" " return failure();\n", name); - bs << formatv("_llvmir_gen_operand_{0}.value()", name); + bs << formatv("*_llvmir_gen_operand_{0}", name); } } else if (isResultName(op, name)) { if (op.getNumResults() != 1) -- 2.7.4