let extraClassDeclaration = [{
bool isExternal() { return !initial_value(); }
bool isUninitialized() {
- return !isExternal() && initial_value().getValue().isa<UnitAttr>();
+ return !isExternal() && initial_value()->isa<UnitAttr>();
}
/// Returns the constant initial value if the memref.global is a constant,
/// or null otherwise.
if (parser.parseKeyword(&keyword))
return failure();
if (Optional<EnumClass> attr = spirv::symbolizeEnum<EnumClass>(keyword)) {
- value = attr.getValue();
+ value = *attr;
return success();
}
return parser.emitError(loc, "invalid ")
auto element = FieldParser<ElementT>::parse(parser);
if (failed(element))
return failure();
- elements.push_back(element.getValue());
+ elements.push_back(*element);
return success();
};
if (parser.parseCommaSeparatedList(elementParser))
SmallVector<int64_t, 8> coneSample(llvm::map_range(shrunkenConeSample, ceil));
// 6) Return transform * concat(boundedSample, coneSample).
- SmallVector<int64_t, 8> &sample = boundedSample.getValue();
+ SmallVector<int64_t, 8> &sample = *boundedSample;
sample.append(coneSample.begin(), coneSample.end());
return transform.postMultiplyWithColumn(sample);
}
arith::symbolizeAtomicRMWKind(
static_cast<uint64_t>(reduction.cast<IntegerAttr>().getInt()));
assert(reductionOp && "Reduction operation cannot be of None Type");
- arith::AtomicRMWKind reductionOpValue = reductionOp.getValue();
+ arith::AtomicRMWKind reductionOpValue = *reductionOp;
identityVals.push_back(
arith::getIdentityValue(reductionOpValue, resultType, rewriter, loc));
}
arith::symbolizeAtomicRMWKind(
reductions[i].cast<IntegerAttr>().getInt());
assert(reductionOp && "Reduction Operation cannot be of None Type");
- arith::AtomicRMWKind reductionOpValue = reductionOp.getValue();
+ arith::AtomicRMWKind reductionOpValue = *reductionOp;
rewriter.setInsertionPoint(&parOp.getBody()->back());
auto reduceOp = rewriter.create<scf::ReduceOp>(
loc, affineParOpTerminator->getOperand(i));
// Add a keyword to the module name to avoid symbolic conflict.
std::string spvModuleName = (kSPIRVModule + moduleOp.getName()).str();
auto spvModule = rewriter.create<spirv::ModuleOp>(
- moduleOp.getLoc(), addressingModel, memoryModel.getValue(), llvm::None,
+ moduleOp.getLoc(), addressingModel, *memoryModel, llvm::None,
StringRef(spvModuleName));
// Move the region from the module op into the SPIR-V module.
/// Checks whether the given LLVM::CallOp is a vulkan launch call op.
bool isVulkanLaunchCallOp(LLVM::CallOp callOp) {
- return (callOp.getCallee() &&
- callOp.getCallee().getValue() == kVulkanLaunch &&
+ return (callOp.getCallee() && *callOp.getCallee() == kVulkanLaunch &&
callOp.getNumOperands() >= kVulkanLaunchNumConfigOperands);
}
/// op.
bool isCInterfaceVulkanLaunchCallOp(LLVM::CallOp callOp) {
return (callOp.getCallee() &&
- callOp.getCallee().getValue() == kCInterfaceVulkanLaunch &&
+ *callOp.getCallee() == kCInterfaceVulkanLaunch &&
callOp.getNumOperands() >= kVulkanLaunchNumConfigOperands);
}
// Take advantage if index is constant.
MemRefType memRefType = operandType.cast<MemRefType>();
if (Optional<int64_t> index = getConstantDimIndex(dimOp)) {
- int64_t i = index.getValue();
+ int64_t i = *index;
if (memRefType.isDynamicDim(i)) {
// extract dynamic size from the memref descriptor.
MemRefDescriptor descriptor(adaptor.source());
/// Encodes the SPIR-V module's symbolic name into the name of the entry point
/// function.
static LogicalResult encodeKernelName(spirv::ModuleOp module) {
- StringRef spvModuleName = module.sym_name().getValue();
+ StringRef spvModuleName = *module.sym_name();
// We already know that the module contains exactly one entry point function
// based on `getKernelGlobalVariables()` call. Update this function's name
// to:
ElementsAttr branchWeights = nullptr;
if (auto weights = op.branch_weights()) {
VectorType weightType = VectorType::get(2, rewriter.getI32Type());
- branchWeights =
- DenseElementsAttr::get(weightType, weights.getValue().getValue());
+ branchWeights = DenseElementsAttr::get(weightType, weights->getValue());
}
rewriter.replaceOpWithNewOp<LLVM::CondBrOp>(
/*isVolatile=*/false,
/*isNonTemporal=*/false);
}
- auto memoryAccess = op.memory_access().getValue();
+ auto memoryAccess = *op.memory_access();
switch (memoryAccess) {
case spirv::MemoryAccess::Aligned:
case spirv::MemoryAccess::None:
} else if (elementTy.isa<IntegerType>() && !padOp.quantization_info()) {
constantAttr = rewriter.getIntegerAttr(elementTy, 0);
} else if (elementTy.isa<IntegerType>() && padOp.quantization_info()) {
- int64_t value = padOp.quantization_info().getValue().getInputZp();
+ int64_t value = padOp.quantization_info()->getInputZp();
constantAttr = rewriter.getIntegerAttr(elementTy, value);
}
if (constantAttr)
return success();
}
- auto quantizationInfo = op.quantization_info().getValue();
+ auto quantizationInfo = *op.quantization_info();
auto aZp = rewriter.create<arith::ConstantOp>(
loc, rewriter.getI32IntegerAttr(quantizationInfo.getAZp()));
auto bZp = rewriter.create<arith::ConstantOp>(
return success();
}
- auto quantizationInfo = op.quantization_info().getValue();
+ auto quantizationInfo = *op.quantization_info();
auto inputZp = rewriter.create<arith::ConstantOp>(
loc, rewriter.getI32IntegerAttr(quantizationInfo.getInputZp()));
auto outputZp = rewriter.create<arith::ConstantOp>(
// If we have quantization information we need to apply an offset
// for the input zp value.
if (op.quantization_info()) {
- auto quantizationInfo = op.quantization_info().getValue();
+ auto quantizationInfo = *op.quantization_info();
auto inputZp = rewriter.create<arith::ConstantOp>(
loc, b.getIntegerAttr(accETy, quantizationInfo.getInputZp()));
Value offset =
// If we have quantization information we need to apply output
// zeropoint.
if (op.quantization_info()) {
- auto quantizationInfo = op.quantization_info().getValue();
+ auto quantizationInfo = *op.quantization_info();
auto outputZp = rewriter.create<arith::ConstantOp>(
loc, b.getIntegerAttr(scaled.getType(),
quantizationInfo.getOutputZp()));
vector::createOrFoldDimOp(b, loc, xferOp.getSource(), *dim);
AffineExpr d0, d1;
bindDims(xferOp.getContext(), d0, d1);
- Value base = xferOp.getIndices()[dim.getValue()];
+ Value base = xferOp.getIndices()[*dim];
Value memrefIdx = makeComposedAffineApply(b, loc, d0 + d1, {base, iv});
cond = lb.create<arith::CmpIOp>(arith::CmpIPredicate::sgt, memrefDim,
memrefIdx);
LLVM_DEBUG(llvm::dbgs() << "Dynamic shapes not yet supported\n");
return None;
}
- return getMemRefEltSizeInBytes(memRefType) * numElements.getValue();
+ return getMemRefEltSizeInBytes(memRefType) * *numElements;
}
/// Returns the size of memref data in bytes if it's statically shaped, None
LLVM_DEBUG(llvm::dbgs() << "Cannot determine if the slice is valid\n");
return SliceComputationResult::GenericFailure;
}
- if (!isSliceValid.getValue())
+ if (!*isSliceValid)
return SliceComputationResult::IncorrectSliceFailure;
return SliceComputationResult::Success;
return false;
}
- if (!isMaximal.getValue()) {
+ if (!*isMaximal) {
LLVM_DEBUG(llvm::dbgs()
<< "Src loop can't be removed: fusion is not maximal\n");
return false;
}
// Set dstLoopDepth based on best values from search.
- *dstLoopDepth = bestDstLoopDepth.getValue();
+ *dstLoopDepth = *bestDstLoopDepth;
LLVM_DEBUG(
llvm::dbgs() << " LoopFusion fusion stats:"
continue;
// Adjust the tile size to largest factor of the trip count less than
// tSize.
- uint64_t constTripCount = mayConst.getValue();
+ uint64_t constTripCount = *mayConst;
if (constTripCount > 1 && tSizeAdjusted > constTripCount / 2)
tSizeAdjusted = constTripCount / 2;
while (constTripCount % tSizeAdjusted != 0)
// Check how many times larger the cache size is when compared to footprint.
uint64_t cacheSizeBytes = cacheSizeInKiB * 1024;
- uint64_t excessFactor = llvm::divideCeil(fp.getValue(), cacheSizeBytes);
+ uint64_t excessFactor = llvm::divideCeil(*fp, cacheSizeBytes);
if (excessFactor <= 1) {
// No need of any tiling - set tile size to 1.
std::fill(tileSizes->begin(), tileSizes->end(), 1);
bool siblingFusionUser) {
// Check if the reduction loop is a single iteration loop.
Optional<uint64_t> tripCount = getConstantTripCount(forOp);
- if (!tripCount || tripCount.getValue() != 1)
+ if (!tripCount || *tripCount != 1)
return failure();
auto iterOperands = forOp.getIterOperands();
auto *parentOp = forOp->getParentOp();
return WalkResult::interrupt();
}
- stats->tripCountMap[childForOp] = maybeConstTripCount.getValue();
+ stats->tripCountMap[childForOp] = *maybeConstTripCount;
return WalkResult::advance();
});
return !walkResult.wasInterrupted();
// TODO: extend this for arbitrary affine bounds.
LogicalResult mlir::promoteIfSingleIteration(AffineForOp forOp) {
Optional<uint64_t> tripCount = getConstantTripCount(forOp);
- if (!tripCount || tripCount.getValue() != 1)
+ if (!tripCount || *tripCount != 1)
return failure();
if (forOp.getLowerBoundMap().getNumResults() != 1)
LLVM_DEBUG(forOp.emitRemark("non-constant trip count loop not handled"));
return success();
}
- uint64_t tripCount = mayBeConstTripCount.getValue();
+ uint64_t tripCount = *mayBeConstTripCount;
assert(isOpwiseShiftValid(forOp, shifts) &&
"shifts will lead to an invalid transformation\n");
for (unsigned j = 0; j < maxLoopDepth; ++j) {
unsigned permIndex = loopPermMapInv[j];
assert(depComps[permIndex].lb);
- int64_t depCompLb = depComps[permIndex].lb.getValue();
+ int64_t depCompLb = *depComps[permIndex].lb;
if (depCompLb > 0)
break;
if (depCompLb < 0)
return failure();
}
- if (numElements.getValue() == 0) {
+ if (*numElements == 0) {
LLVM_DEBUG(llvm::dbgs() << "Nothing to copy\n");
*sizeInBytes = 0;
return success();
// Record it.
fastBufferMap[memref] = fastMemRef;
// fastMemRefType is a constant shaped memref.
- *sizeInBytes = getMemRefSizeInBytes(fastMemRefType).getValue();
+ *sizeInBytes = *getMemRefSizeInBytes(fastMemRefType);
LLVM_DEBUG(emitRemarkForBlock(*block)
<< "Creating fast buffer of type " << fastMemRefType
<< " and size " << llvm::divideCeil(*sizeInBytes, 1024)
*sizeInBytes = 0;
}
- auto numElementsSSA =
- top.create<arith::ConstantIndexOp>(loc, numElements.getValue());
+ auto numElementsSSA = top.create<arith::ConstantIndexOp>(loc, *numElements);
Value dmaStride = nullptr;
Value numEltPerDmaStride = nullptr;
getGlobalFor(constantOp, options.bufferAlignment);
if (failed(globalOp))
return failure();
- memref::GlobalOp globalMemref = globalOp.getValue();
+ memref::GlobalOp globalMemref = *globalOp;
replaceOpWithNewBufferizedOp<memref::GetGlobalOp>(
rewriter, op, globalMemref.type(), globalMemref.getName());
if (failed(refCount))
return failure();
- int cnt = refCount.getValue();
+ int cnt = *refCount;
// Create `add_ref` operation before the operand owner.
if (cnt > 0) {
getAliasingOpOperand(op, opResult, state);
assert(aliasingOpOperands.size() == 1 &&
"expected exactly 1 aliasing OpOperand");
- assert(aliasingOpOperands.front()->getOperandNumber() ==
- maybeEquiv.getValue() &&
+ assert(aliasingOpOperands.front()->getOperandNumber() == *maybeEquiv &&
"inconsistent analysis state");
#endif
return BufferRelation::Equivalent;
return failure();
rewriter.replaceOpWithNewOp<SwitchOp>(op, op.getFlag(), defaultDest,
- defaultOperands, caseValues.getValue(),
+ defaultOperands, *caseValues,
newCaseDests, newCaseOperands);
return success();
}
return emitOpError("callee must not be empty");
if (Optional<ArrayAttr> argsAttr = args()) {
- for (Attribute arg : argsAttr.getValue()) {
+ for (Attribute arg : *argsAttr) {
if (arg.getType().isa<IndexType>()) {
int64_t index = arg.cast<IntegerAttr>().getInt();
// Args with elements of type index must be in range
}
if (Optional<ArrayAttr> templateArgsAttr = template_args()) {
- for (Attribute tArg : templateArgsAttr.getValue()) {
+ for (Attribute tArg : *templateArgsAttr) {
if (!tArg.isa<TypeAttr>() && !tArg.isa<IntegerAttr>() &&
!tArg.isa<FloatAttr>() && !tArg.isa<emitc::OpaqueAttr>())
return emitOpError("template argument has invalid type");
}
llvm::Linker linker(*ret);
- for (std::unique_ptr<llvm::Module> &libModule : mbModules.getValue()) {
+ for (std::unique_ptr<llvm::Module> &libModule : *mbModules) {
// This bitcode linking code is substantially similar to what is used in
// hip-clang It imports the library functions into the module, allowing LLVM
// optimization passes (which must run after linking) to optimize across the
return parser.emitError(predicateLoc)
<< "'" << predicateAttr.getValue()
<< "' is an incorrect value of the 'predicate' attribute";
- predicateValue = static_cast<int64_t>(predicate.getValue());
+ predicateValue = static_cast<int64_t>(*predicate);
} else {
Optional<FCmpPredicate> predicate =
symbolizeFCmpPredicate(predicateAttr.getValue());
return parser.emitError(predicateLoc)
<< "'" << predicateAttr.getValue()
<< "' is an incorrect value of the 'predicate' attribute";
- predicateValue = static_cast<int64_t>(predicate.getValue());
+ predicateValue = static_cast<int64_t>(*predicate);
}
result.attributes.set("predicate",
<< "' attribute";
}
- auto value = static_cast<int64_t>(kind.getValue());
+ auto value = static_cast<int64_t>(*kind);
auto attr = parser.getBuilder().getI64IntegerAttr(value);
result.addAttribute(attrName, attr);
<< "' attribute";
}
- auto value = static_cast<int64_t>(kind.getValue());
+ auto value = static_cast<int64_t>(*kind);
auto attr = parser.getBuilder().getI64IntegerAttr(value);
result.addAttribute(attrName, attr);
if (mmaShape[0] == 16) {
int64_t kFactor;
Type multiplicandFragType;
- switch (getMultiplicandAPtxType().getValue()) {
+ switch (*getMultiplicandAPtxType()) {
case MMATypes::tf32:
kFactor = 4;
multiplicandFragType = i32Ty;
// In the M=8 case, there is only 1 possible case per data type.
if (mmaShape[0] == 8) {
- if (getMultiplicandAPtxType().getValue() == MMATypes::f16) {
+ if (*getMultiplicandAPtxType() == MMATypes::f16) {
expectedA.emplace_back(2, f16x2Ty);
expectedB.emplace_back(2, f16x2Ty);
expectedResult.push_back(f16x2x4StructTy);
expectedC.emplace_back(8, f32Ty);
allowedShapes.push_back({8, 8, 4});
}
- if (getMultiplicandAPtxType().getValue() == MMATypes::f64) {
+ if (*getMultiplicandAPtxType() == MMATypes::f64) {
Type f64Ty = Float64Type::get(context);
expectedA.emplace_back(1, f64Ty);
expectedB.emplace_back(1, f64Ty);
unsigned rank,
MLIRContext *context) {
if (maybeMap)
- return maybeMap.getValue();
+ return *maybeMap;
if (rank == 0)
return AffineMap::get(context);
return AffineMap::getMultiDimIdentityMap(rank, context);
fuseWithReshapeByExpansion(genericOp, reshapeOp, opOperand, rewriter);
if (!replacementValues)
return failure();
- rewriter.replaceOp(genericOp, replacementValues.getValue());
+ rewriter.replaceOp(genericOp, *replacementValues);
return success();
}
return failure();
producer, reshapeOp, producer.getOutputOperand(0), rewriter);
if (!replacementValues)
return failure();
- rewriter.replaceOp(reshapeOp, replacementValues.getValue());
+ rewriter.replaceOp(reshapeOp, *replacementValues);
return success();
}
genericOp, "failed to do the fusion by collapsing transformation");
}
- rewriter.replaceOp(genericOp, replacements.getValue());
+ rewriter.replaceOp(genericOp, *replacements);
return success();
}
return failure();
elem.getIndexingOpViewOperandNum();
return isa<LinalgOp>(elem.getDependentOp()) &&
v == consumerOpOperand.get() && operandNum &&
- operandNum.getValue() ==
- consumerOpOperand.getOperandNumber();
+ *operandNum == consumerOpOperand.getOperandNumber();
})) {
// Consumer consumes this view, `isStructurallyFusableProducer` also
// checks whether it is a strict subview of the producer view.
lastFusableLoop = pos;
continue;
}
- if (pos <= lastFusableLoop.getValue())
+ if (pos <= *lastFusableLoop)
return true;
lastFusableLoop = pos;
}
.setTileSizes(tileSizes)
.setLoopType(LinalgTilingLoopType::Loops);
if (tileDistribution)
- tilingOptions =
- tilingOptions.setDistributionOptions(tileDistribution.getValue());
+ tilingOptions = tilingOptions.setDistributionOptions(*tileDistribution);
// TODO: Propagate RewriterBase everywhere.
IRRewriter rewriter(b);
return false;
auto attr = op->getAttr(LinalgTransforms::kLinalgTransformMarker)
.dyn_cast<StringAttr>();
- return attr && attr == replacement.getValue();
+ return attr && attr == *replacement;
}
LinalgTilingOptions &
LLVM_DEBUG(DBGS() << "No constant bounding box can be found for padding");
return failure();
}
- paddedShape[shapeIdx++] = upperBound.getValue();
+ paddedShape[shapeIdx++] = *upperBound;
}
assert(shapeIdx == static_cast<int64_t>(shape.size()) &&
"expect the dynamic and static ranks to match");
// could assert, but continue if this is not the case.
if (!operandNumber)
continue;
- if (!fusionOptions.indicesToFuse.count(operandNumber.getValue()))
+ if (!fusionOptions.indicesToFuse.count(*operandNumber))
continue;
if (isa<LinalgOp>(dependence.getDependentOp()))
producers.insert(dependence.getDependentOp());
padOp, en.value(), transposeVector, hoistedOp, transposeOps);
if (failed(newResult))
continue;
- rewriter.replaceOp(padOp, newResult.getValue());
+ rewriter.replaceOp(padOp, *newResult);
// Do not apply hoist padding to the newly introduced transpose operations.
for (GenericOp transposeOp : transposeOps)
}
// Replace the original operation to pad.
- rewriter.replaceOp(linalgOp, newResults.getValue());
+ rewriter.replaceOp(linalgOp, *newResults);
filter.replaceLinalgTransformationFilter(rewriter, paddedOp);
return paddedOp;
bool zeroSliceGuard = true;
if (controlFn) {
if (Optional<bool> control = controlFn(sliceOp))
- zeroSliceGuard = control.getValue();
+ zeroSliceGuard = *control;
else
return failure();
}
if (!ubConst)
return;
- boundMap =
- AffineMap::getConstantMap(ubConst.getValue(), value.getContext());
+ boundMap = AffineMap::getConstantMap(*ubConst, value.getContext());
return;
}
// Modify the lb, ub, and step based on the distribution options.
SmallVector<DistributionMethod, 0> distributionMethod;
if (distributionOptions) {
- auto &options = distributionOptions.getValue();
+ auto &options = *distributionOptions;
distributionMethod.assign(distributionOptions->distributionMethod.begin(),
distributionOptions->distributionMethod.end());
SmallVector<Range, 2> parallelLoopRanges;
// Check that constant index is not knowingly out of range.
auto type = source().getType();
if (auto memrefType = type.dyn_cast<MemRefType>()) {
- if (index.getValue() >= memrefType.getRank())
+ if (*index >= memrefType.getRank())
return emitOpError("index is out of range");
} else if (type.isa<UnrankedMemRefType>()) {
// Assume index to be in range.
}
if (Optional<uint64_t> alignAttr = alignment()) {
- uint64_t alignment = alignAttr.getValue();
+ uint64_t alignment = *alignAttr;
if (!llvm::isPowerOf2_64(alignment))
return emitError() << "alignment attribute value " << alignment
}
AffineMap layoutMap = nonRankReducedType.getLayout().getAffineMap();
if (!layoutMap.isIdentity())
- layoutMap = getProjectedMap(layoutMap, unusedDims.getValue());
+ layoutMap = getProjectedMap(layoutMap, *unusedDims);
return MemRefType::get(shape, nonRankReducedType.getElementType(), layoutMap,
nonRankReducedType.getMemorySpace());
}
ArrayRef<int64_t> sourceShape = subViewOp.getSourceType().getShape();
for (const auto &size : llvm::enumerate(mixedSizes)) {
Optional<int64_t> intValue = getConstantIntValue(size.value());
- if (!intValue || intValue.getValue() != sourceShape[size.index()])
+ if (!intValue || *intValue != sourceShape[size.index()])
return false;
}
// All conditions met. The `SubViewOp` is foldable as a no-op.
<< "nested inside a worksharing-loop with ordered "
<< "clause with parameter present";
- if (container.ordered_valAttr().getInt() !=
- (int64_t)num_loops_val().getValue())
+ if (container.ordered_valAttr().getInt() != (int64_t)*num_loops_val())
return emitOpError() << "number of variables in depend clause does not "
<< "match number of iteration variables in the "
<< "doacross loop";
}
// Otherwise, the loop may branch back to itself or the parent operation.
- assert(index.getValue() == 0 && "expected loop region");
+ assert(*index == 0 && "expected loop region");
regions.push_back(RegionSuccessor(&getLoopBody(), getRegionIterArgs()));
regions.push_back(RegionSuccessor(getResults()));
}
if (!optStride)
return failure();
- if (!(stride = optStride.getValue())) {
+ if (!(stride = *optStride)) {
parser.emitError(strideLoc, "ArrayStride must be greater than zero");
return failure();
}
Optional<std::tuple<ParseType>> operator()(SPIRVDialect const &dialect,
DialectAsmParser &parser) const {
if (auto value = parseAndVerify<ParseType>(dialect, parser))
- return std::tuple<ParseType>(value.getValue());
+ return std::tuple<ParseType>(*value);
return llvm::None;
}
};
if (parser.parseGreater())
return Type();
- return ImageType::get(value.getValue());
+ return ImageType::get(*value);
}
// sampledImage-type :: = `!spv.sampledImage<` image-type `>`
return parser.emitError(loc, "invalid ")
<< attrName << " attribute specification: " << attrVal;
}
- value = attrOptional.getValue();
+ value = *attrOptional;
return success();
}
// Print optional initializer
if (auto initializer = this->initializer()) {
printer << " " << kInitializerAttrName << '(';
- printer.printSymbolName(initializer.getValue());
+ printer.printSymbolName(*initializer);
printer << ')';
elidedAttrs.push_back(kInitializerAttrName);
}
auto elementSize = getTypeNumBytes(options, vecType.getElementType());
if (!elementSize)
return llvm::None;
- return vecType.getNumElements() * elementSize.getValue();
+ return vecType.getNumElements() * *elementSize;
}
if (auto memRefType = type.dyn_cast<MemRefType>()) {
for (const auto &shape : enumerate(dims))
memrefSize = std::max(memrefSize, shape.value() * strides[shape.index()]);
- return (offset + memrefSize) * elementSize.getValue();
+ return (offset + memrefSize) * *elementSize;
}
if (auto tensorType = type.dyn_cast<TensorType>()) {
if (!elementSize)
return llvm::None;
- int64_t size = elementSize.getValue();
+ int64_t size = *elementSize;
for (auto shape : tensorType.getShape())
size *= shape;
spirv::SPIRVDialect::getAttributeName(
spirv::Decoration::BuiltIn))) {
auto varBuiltIn = spirv::symbolizeBuiltIn(builtinAttr.getValue());
- if (varBuiltIn && varBuiltIn.getValue() == builtin) {
+ if (varBuiltIn && *varBuiltIn == builtin) {
return varOp;
}
}
return failure();
// Generate the call.
Value src = adaptor.getOperands()[0];
- int64_t idx = index.getValue();
+ int64_t idx = *index;
rewriter.replaceOp(op, genDimSizeCall(rewriter, op, enc, src, idx));
return success();
}
// Check that constant index is not knowingly out of range.
auto type = source().getType();
if (auto tensorType = type.dyn_cast<RankedTensorType>()) {
- if (index.getValue() >= tensorType.getRank())
+ if (*index >= tensorType.getRank())
return emitOpError("index is out of range");
} else if (type.isa<UnrankedTensorType>()) {
// Assume index to be in range.
// If the size is not 1, or if the current matched dimension of the result
// is the same static shape as the size value (which is 1), then the
// dimension is preserved.
- if (!sizeVal || sizeVal.getValue() != 1 ||
+ if (!sizeVal || *sizeVal != 1 ||
(shapePos < resultShape.size() && resultShape[shapePos] == 1)) {
shapePos++;
continue;
/// Returns true if the the given `attrOrValue` is a constant zero.
static bool isZero(OpFoldResult attrOrValue) {
if (Optional<int64_t> val = getConstantIntValue(attrOrValue))
- return val.getValue() == 0;
+ return *val == 0;
return false;
}
} else if (elementTy.isa<IntegerType>() && !op.quantization_info()) {
constantAttr = rewriter.getIntegerAttr(elementTy, 0);
} else if (elementTy.isa<IntegerType>() && op.quantization_info()) {
- auto value = op.quantization_info().getValue().getInputZp();
+ auto value = op.quantization_info()->getInputZp();
constantAttr = rewriter.getIntegerAttr(elementTy, value);
}
rewriter
.create<tosa::FullyConnectedOp>(
op.getLoc(), fullyConnectedShapeType, reshapedInput,
- reshapedWeight, op.bias(), op.quantization_info().getValue())
+ reshapedWeight, op.bias(), *op.quantization_info())
.getResult();
} else {
fullyConnectedValue = rewriter
conv2d = rewriter.create<tosa::Conv2DOp>(
loc, resultTy, input, reverse2, bias,
rewriter.getI64ArrayAttr(convPad), rewriter.getI64ArrayAttr(stride),
- rewriter.getI64ArrayAttr(dilation),
- op.quantization_info().getValue());
+ rewriter.getI64ArrayAttr(dilation), *op.quantization_info());
} else {
conv2d = rewriter.create<tosa::Conv2DOp>(
loc, resultTy, input, reverse2, bias,
/*pad=*/rewriter.getI64ArrayAttr({0, 0, 0, 0}),
/*stride=*/rewriter.getI64ArrayAttr({1, 1}),
/*dilation=*/rewriter.getI64ArrayAttr({1, 1}),
- op.quantization_info().getValue())
+ *op.quantization_info())
.getResult();
} else {
conv2d = createOpAndInfer<tosa::Conv2DOp>(
if (failed(parser.parseGreater()))
return {};
- return CombiningKindAttr::get(kind.getValue(), parser.getContext());
+ return CombiningKindAttr::get(*kind, parser.getContext());
}
Attribute VectorDialect::parseAttribute(DialectAsmParser &parser,
auto compileFunctionCreator = [&](JITTargetMachineBuilder jtmb)
-> Expected<std::unique_ptr<IRCompileLayer::IRCompiler>> {
if (options.jitCodeGenOptLevel)
- jtmb.setCodeGenOptLevel(options.jitCodeGenOptLevel.getValue());
+ jtmb.setCodeGenOptLevel(*options.jitCodeGenOptLevel);
auto tm = jtmb.createTargetMachine();
if (!tm)
return tm.takeError();
CompileAndExecuteConfig config, void **args) {
Optional<llvm::CodeGenOpt::Level> jitCodeGenOptLevel;
if (auto clOptLevel = getCommandLineOptLevel(options))
- jitCodeGenOptLevel =
- static_cast<llvm::CodeGenOpt::Level>(clOptLevel.getValue());
+ jitCodeGenOptLevel = static_cast<llvm::CodeGenOpt::Level>(*clOptLevel);
// If shared library implements custom mlir-runner library init and destroy
// functions, we'll use them to register the library with the execution
if (rLhsConst && rRhsConst && firstExpr == secondExpr)
return getAffineBinaryOpExpr(
AffineExprKind::Mul, firstExpr,
- getAffineConstantExpr(rLhsConst.getValue() + rRhsConst.getValue(),
- lhs.getContext()));
+ getAffineConstantExpr(*rLhsConst + *rRhsConst, lhs.getContext()));
// When doing successive additions, bring constant to the right: turn (d0 + 2)
// + d1 into (d0 + d1) + 2.
if (!type.isa<FloatType>())
return (emitError("floating point value not valid for specified type"),
nullptr);
- return FloatAttr::get(type, isNegative ? -val.getValue() : val.getValue());
+ return FloatAttr::get(type, isNegative ? -*val : *val);
}
/// Construct an APint from a parsed value, a known attribute type and
}
std::string data;
- if (parseElementAttrHexValues(p, hexStorage.getValue(), data))
+ if (parseElementAttrHexValues(p, *hexStorage, data))
return nullptr;
ArrayRef<char> rawData(data.data(), data.size());
return emitError("result number not allowed in argument list");
if (auto value = getToken().getHashIdentifierNumber())
- result.number = value.getValue();
+ result.number = *value;
else
return emitError("invalid SSA value result number");
consumeToken(Token::hash_identifier);
bool question = getToken().is(Token::question);
if (!maybeOffset && !question)
return emitWrongTokenError("invalid offset");
- offset = maybeOffset ? static_cast<int64_t>(maybeOffset.getValue())
+ offset = maybeOffset ? static_cast<int64_t>(*maybeOffset)
: MemRefType::getDynamicStrideOrOffset();
consumeToken();
signSemantics = *signedness ? IntegerType::Signed : IntegerType::Unsigned;
consumeToken(Token::inttype);
- return IntegerType::get(getContext(), width.getValue(), signSemantics);
+ return IntegerType::get(getContext(), *width, signSemantics);
}
// float-type
if (!dimension ||
*dimension > (uint64_t)std::numeric_limits<int64_t>::max())
return emitError("invalid dimension");
- value = (int64_t)dimension.getValue();
+ value = (int64_t)*dimension;
consumeToken(Token::integer);
}
return success();
omp::ClauseDepend dependType = *orderedOp.depend_type_val();
bool isDependSource = dependType == omp::ClauseDepend::dependsource;
- unsigned numLoops = orderedOp.num_loops_val().getValue();
+ unsigned numLoops = *orderedOp.num_loops_val();
SmallVector<llvm::Value *> vecValues =
moduleTranslation.lookupValues(orderedOp.depend_vec_vars());
llvm::SmallVector<llvm::Metadata *, 2> operands;
operands.push_back({}); // Placeholder for self-reference
if (Optional<StringRef> description = op.getDescription())
- operands.push_back(llvm::MDString::get(ctx, description.getValue()));
+ operands.push_back(llvm::MDString::get(ctx, *description));
llvm::MDNode *domain = llvm::MDNode::get(ctx, operands);
domain->replaceOperandWith(0, domain); // Self-reference for uniqueness
aliasScopeDomainMetadataMapping.insert({op, domain});
operands.push_back({}); // Placeholder for self-reference
operands.push_back(domain);
if (Optional<StringRef> description = op.getDescription())
- operands.push_back(llvm::MDString::get(ctx, description.getValue()));
+ operands.push_back(llvm::MDString::get(ctx, *description));
llvm::MDNode *scope = llvm::MDNode::get(ctx, operands);
scope->replaceOperandWith(0, scope); // Self-reference for uniqueness
aliasScopeMetadataMapping.insert({op, scope});
return failure();
}
- operands.push_back(static_cast<uint32_t>(enclosedOpcode.getValue()));
+ operands.push_back(static_cast<uint32_t>(*enclosedOpcode));
// Append operands to the enclosed op to the list of operands.
for (Value operand : enclosedOp.getOperands()) {
// Encode initialization.
if (auto initializer = varOp.initializer()) {
- auto initializerID = getVariableID(initializer.getValue());
+ auto initializerID = getVariableID(*initializer);
if (!initializerID) {
return emitError(varOp.getLoc(),
"invalid usage of undefined variable as initializer");
<< attrName;
}
SmallVector<uint32_t, 1> args;
- switch (decoration.getValue()) {
+ switch (*decoration) {
case spirv::Decoration::Binding:
case spirv::Decoration::DescriptorSet:
case spirv::Decoration::Location:
if (auto strAttr = attr.getValue().dyn_cast<StringAttr>()) {
auto enumVal = spirv::symbolizeBuiltIn(strAttr.getValue());
if (enumVal) {
- args.push_back(static_cast<uint32_t>(enumVal.getValue()));
+ args.push_back(static_cast<uint32_t>(*enumVal));
break;
}
return emitError(loc, "invalid ")
default:
return emitError(loc, "unhandled decoration ") << decorationName;
}
- return emitDecoration(resultID, decoration.getValue(), args);
+ return emitDecoration(resultID, *decoration, args);
}
LogicalResult Serializer::processName(uint32_t resultID, StringRef name) {
argReplacements);
if (failed(result))
return failure();
- if (Block *newBlock = result.getValue()) {
+ if (Block *newBlock = *result) {
if (newBlock != block)
blockActions.push_back(BlockAction::getTypeConversion(newBlock));
}
OpBuilder &builder, Location loc, Type resultType, ValueRange inputs) {
for (MaterializationCallbackFn &fn : llvm::reverse(materializations))
if (Optional<Value> result = fn(builder, resultType, inputs, loc))
- return result.getValue();
+ return *result;
return nullptr;
}
auto value = FieldParser<int>::parse(parser);
if (failed(value))
return failure();
- return test::CustomParam{value.getValue()};
+ return test::CustomParam{*value};
}
};
it.value().kind != LinalgOperandDefKind::Scalar &&
it.value().kind != LinalgOperandDefKind::OutputTensor)
continue;
- if (it.value().typeVar.getValue() == typeVar)
+ if (*it.value().typeVar == typeVar)
return llvm::formatv("block.getArgument({0}).getType()", it.index())
.str();
}
{
llvm::raw_string_ostream os(attrBuilderStr);
os << tgfmt(enumAttr.getConstBuilderTemplate(), &attrTypeCtx,
- "attrOptional.getValue()");
+ "attrOptional.value()");
}
// Build a string containing the cases that can be formatted as a keyword.