def IsScalarOrSplatOne :
Constraint<And<[
CPred<"succeeded(getIntOrSplatIntValue($0))">,
- CPred<"getIntOrSplatIntValue($0).value() == 1">]>>;
+ CPred<"*getIntOrSplatIntValue($0) == 1">]>>;
// mulsi_extended(x, 1) -> [x, extsi(cmpi slt, x, 0)]
def MulSIExtendedRHSOne :
Constraint<And<[
CPred<"succeeded(getIntOrSplatIntValue($2))">,
CPred<"(getScalarOrElementWidth($0) - getScalarOrElementWidth($1)) == "
- "getIntOrSplatIntValue($2).value()">]>>;
+ "*getIntOrSplatIntValue($2)">]>>;
// trunci(shrsi(x, c)) -> trunci(shrui(x, c))
def TruncIShrSIToTrunciShrUI :
bool isParallel = isParallelFor(codegen, isOuter, isSparse);
Operation *loop =
- genLoopBoundary(codegen, merger, [&](MutableArrayRef<Value> reduc) {
+ *genLoopBoundary(codegen, merger, [&](MutableArrayRef<Value> reduc) {
if (merger.isFilterLoop(idx)) {
// extraTids/extraDims must be empty because filter loops only
// corresponding to the one and only sparse tensor level.
}
return codegen.loopEmitter.enterLoopOverTensorAtDim(
builder, loc, tid, dim, reduc, isParallel, extraTids, extraDims);
- }).value();
+ });
assert(loop);
return loop;
}
ArrayRef<size_t> extraDims) {
Operation *loop =
- genLoopBoundary(codegen, merger, [&](MutableArrayRef<Value> reduc) {
+ *genLoopBoundary(codegen, merger, [&](MutableArrayRef<Value> reduc) {
// Construct the while-loop with a parameter for each index.
return codegen.loopEmitter.enterCoIterationOverTensorsAtDims(
builder, op.getLoc(), condTids, condDims, needsUniv, reduc,
extraTids, extraDims);
- }).value();
+ });
assert(loop);
return loop;
}
Value clause;
if (isCompressedDLT(merger.getDimLevelType(b)) ||
isSingletonDLT(merger.getDimLevelType(b))) {
- auto dim = merger.getDimNum(tensor, idx).value();
+ auto dim = *merger.getDimNum(tensor, idx);
Value op1 = codegen.loopEmitter.getCoord()[tensor][dim];
Value op2 = codegen.getLoopIdxValue(idx);
clause = builder.create<arith::CmpIOp>(loc, arith::CmpIPredicate::eq, op1,
// Note that we generate dense indices of the output tensor
// unconditionally, since they may not appear in the lattice, but may be
// needed for linearized codegen.
- auto dim = merger.getDimNum(merger.getOutTensorID(), idx).value();
+ auto dim = *merger.getDimNum(merger.getOutTensorID(), idx);
extraTids.push_back(merger.getOutTensorID());
extraDims.push_back(dim);
}