/// Enum that represents the different kinds of diagnostic arguments
/// supported.
enum class DiagnosticArgumentKind {
+ Double,
Integer,
String,
Type,
/// Returns the kind of this argument.
DiagnosticArgumentKind getKind() const { return kind; }
- /// Returns this argument as a string.
- StringRef getAsString() const {
- assert(getKind() == DiagnosticArgumentKind::String);
- return stringVal;
+ /// Returns this argument as a double.
+ double getAsDouble() const {
+ assert(getKind() == DiagnosticArgumentKind::Double);
+ return doubleVal;
}
/// Returns this argument as a signed integer.
return static_cast<int64_t>(opaqueVal);
}
+ /// Returns this argument as a string.
+ StringRef getAsString() const {
+ assert(getKind() == DiagnosticArgumentKind::String);
+ return stringVal;
+ }
+
/// Returns this argument as a Type.
Type getAsType() const;
private:
friend class Diagnostic;
+ // Construct from a floating point number.
+ explicit DiagnosticArgument(double val)
+ : kind(DiagnosticArgumentKind::Double), doubleVal(val) {}
+ explicit DiagnosticArgument(float val) : DiagnosticArgument(double(val)) {}
+
// Construct from a signed integer.
explicit DiagnosticArgument(int64_t val)
: kind(DiagnosticArgumentKind::Integer), opaqueVal(val) {}
/// The value of this argument.
union {
+ double doubleVal;
intptr_t opaqueVal;
StringRef stringVal;
};
retType.isa<UnrankedTensorType>() ||
isSameShapedVectorOrTensor(retType, broadcastedType);
if (!hasCompatRetType)
- return op->emitOpError(
- llvm::formatv("result type '{0}' does not have the same shape as the "
- "broadcasted type '{1}' computed from the operand types",
- retType, broadcastedType));
+ return op->emitOpError()
+ << "result type '" << retType
+ << "' does not have the same shape as the broadcasted type '"
+ << broadcastedType << "' computed from the operand types";
return success();
}
/// Outputs this argument to a stream.
void DiagnosticArgument::print(raw_ostream &os) const {
switch (kind) {
+ case DiagnosticArgumentKind::Double:
+ os << getAsDouble();
+ break;
case DiagnosticArgumentKind::Integer:
os << getAsInteger();
break;
NamedAttribute argAttr) {
// Check that llvm.noalias is a boolean attribute.
if (argAttr.first == "llvm.noalias" && !argAttr.second.isa<BoolAttr>())
- return func->emitError(
- "llvm.noalias argument attribute of non boolean type");
+ return func->emitError()
+ << "llvm.noalias argument attribute of non boolean type";
return success();
}
return result;
auto *mlirContext = llvmDialect->getContext();
- std::string message;
- llvm::raw_string_ostream os(message);
- os << "unsupported type: ";
- t.print(os);
- mlirContext->emitError(UnknownLoc::get(mlirContext), os.str());
+ mlirContext->emitError(UnknownLoc::get(mlirContext))
+ << "unsupported type: " << t;
return {};
}
return emitOpError("first operand must come from a ViewOp");
unsigned rank = getBaseViewRank();
if (llvm::size(getIndexings()) != rank) {
- return emitOpError("requires at least a view operand followed by " +
- Twine(rank) + " indexings");
+ return emitOpError("requires at least a view operand followed by ")
+ << rank << " indexings";
}
unsigned index = 0;
for (auto indexing : getIndexings()) {
if (!indexing->getType().isa<RangeType>() &&
!indexing->getType().isa<IndexType>()) {
- return emitOpError(Twine(index) +
- "^th index must be of range or index type");
+ return emitOpError() << index
+ << "^th index must be of range or index type";
}
if (indexing->getType().isa<IndexType>())
--rank;
++index;
}
if (getRank() != rank) {
- return emitOpError("the rank of the view must be the number of its range "
- "indices (" +
- Twine(rank) + ") but got: " + Twine(getRank()));
+ return emitOpError()
+ << "the rank of the view must be the number of its range indices ("
+ << rank << ") but got: " << getRank();
}
return success();
}
unsigned index = 0;
for (auto indexing : getIndexings()) {
if (!indexing->getType().isa<RangeType>()) {
- return emitOpError(Twine(index) + "^th index must be of range type");
+ return emitOpError() << index << "^th index must be of range type";
}
++index;
}
if (getViewType().getRank() != index)
- return emitOpError(
- "the rank of the view must be the number of its indexings");
+ return emitOpError()
+ << "the rank of the view must be the number of its indexings";
return success();
}
// Verify storage width.
if (integralWidth == 0 || integralWidth > MaxStorageBits) {
if (loc) {
- context->emitError(*loc,
- "illegal storage type size: " + Twine(integralWidth));
+ context->emitError(*loc, "illegal storage type size: ") << integralWidth;
}
return failure();
}
storageTypeMin < defaultIntegerMin ||
storageTypeMax > defaultIntegerMax) {
if (loc) {
- context->emitError(*loc, "illegal storage min and storage max: (" +
- Twine(storageTypeMin) + ":" +
- Twine(storageTypeMax) + ")");
+ context->emitError(*loc, "illegal storage min and storage max: (")
+ << storageTypeMin << ":" << storageTypeMax << ")";
}
return failure();
}
// Verify scale.
if (scale <= 0.0 || std::isinf(scale) || std::isnan(scale)) {
if (loc) {
- context->emitError(*loc,
- "illegal scale: " + Twine(std::to_string(scale)));
+ context->emitError(*loc) << "illegal scale: " << scale;
}
return failure();
}
// Ensure that the number of scales and zeroPoints match.
if (scales.size() != zeroPoints.size()) {
if (loc) {
- context->emitError(*loc, "illegal number of scales and zeroPoints: " +
- Twine(scales.size()) + ", " +
- Twine(zeroPoints.size()));
+ context->emitError(*loc, "illegal number of scales and zeroPoints: ")
+ << scales.size() << ", " << zeroPoints.size();
}
return failure();
}
for (double scale : scales) {
if (scale <= 0.0 || std::isinf(scale) || std::isnan(scale)) {
if (loc) {
- context->emitError(*loc,
- "illegal scale: " + Twine(std::to_string(scale)));
+ context->emitError(*loc) << "illegal scale: " << scale;
}
return failure();
}
qmin = -32768;
qmax = 32767;
} else {
- ctx->emitError(loc,
- "unsupported FakeQuant number of bits: " + Twine(numBits));
+ ctx->emitError(loc, "unsupported FakeQuant number of bits: ") << numBits;
return nullptr;
}
// Range must straddle zero.
if (rmin > 0.0 || rmax < 0.0) {
- return (ctx->emitError(loc, "FakeQuant range must straddle zero: [" +
- Twine(std::to_string(rmin)) + "," +
- Twine(std::to_string(rmax)) + "]"),
+ return (ctx->emitError(loc, "FakeQuant range must straddle zero: [")
+ << rmin << "," << rmax << "]",
nullptr);
}
(optionalPaddingValue ? 1 : 0);
// Checks on the actual operands and their types.
if (getNumOperands() != expectedNumOperands) {
- return emitOpError("expects " + Twine(expectedNumOperands) + " operands " +
- "(of which " + Twine(memrefType.getRank()) +
- " indices)");
+ return emitOpError("expects ")
+ << expectedNumOperands << " operands (of which "
+ << memrefType.getRank() << " indices)";
}
// Consistency of padding value with vector type.
if (optionalPaddingValue) {
++numIndices;
}
if (numIndices != memrefType.getRank()) {
- return emitOpError("requires at least a memref operand followed by " +
- Twine(memrefType.getRank()) + " indices");
+ return emitOpError("requires at least a memref operand followed by ")
+ << memrefType.getRank() << " indices";
}
// Consistency of AffineMap attribute.
}
if (permutationMap.getNumResults() != vectorType.getRank()) {
return emitOpError("requires a permutation_map with result dims of the "
- "same rank as the vector type (" +
- Twine(permutationMap.getNumResults()) + " vs " +
- Twine(vectorType.getRank()));
+ "same rank as the vector type (")
+ << permutationMap.getNumResults() << " vs " << vectorType.getRank();
}
return verifyPermutationMap(permutationMap,
[this](Twine t) { return emitOpError(t); });
Offsets::FirstIndexOffset + memrefType.getRank();
// Checks on the actual operands and their types.
if (getNumOperands() != expectedNumOperands) {
- return emitOpError("expects " + Twine(expectedNumOperands) + " operands " +
- "(of which " + Twine(memrefType.getRank()) +
- " indices)");
+ return emitOpError() << "expects " << expectedNumOperands
+ << " operands (of which " << memrefType.getRank()
+ << " indices)";
}
// Consistency of indices types.
unsigned numIndices = 0;
numIndices++;
}
if (numIndices != memrefType.getRank()) {
- return emitOpError("requires at least a memref operand followed by " +
- Twine(memrefType.getRank()) + " indices");
+ return emitOpError("requires at least a memref operand followed by ")
+ << memrefType.getRank() << " indices";
}
// Consistency of AffineMap attribute.
}
if (permutationMap.getNumResults() != vectorType.getRank()) {
return emitOpError("requires a permutation_map with result dims of the "
- "same rank as the vector type (" +
- Twine(permutationMap.getNumResults()) + " vs " +
- Twine(vectorType.getRank()));
+ "same rank as the vector type (")
+ << permutationMap.getNumResults() << " vs " << vectorType.getRank();
}
return verifyPermutationMap(permutationMap,
[this](Twine t) { return emitOpError(t); });
// Verify that a mismatched range errors.
func @fakeQuantArgs(tensor<8x4x3xf32>) -> tensor<8x4x3xf32> {
^bb0(%arg0: tensor<8x4x3xf32>):
- // expected-error@+1 {{FakeQuant range must straddle zero: [1.100000,1.500000]}}
+ // expected-error@+1 {{FakeQuant range must straddle zero: [1.100000e+00,1.500000e+00]}}
%0 = "quant.const_fake_quant"(%arg0) {
min: 1.1 : f32, max: 1.5 : f32, num_bits: 8
} : (tensor<8x4x3xf32>) -> tensor<8x4x3xf32>
// Verify that a valid range errors.
func @fakeQuantArgs(tensor<8x4x3xf32>) -> tensor<8x4x3xf32> {
^bb0(%arg0: tensor<8x4x3xf32>):
- // expected-error@+1 {{FakeQuant range must straddle zero: [1.100000,1.000000}}
+ // expected-error@+1 {{FakeQuant range must straddle zero: [1.100000e+00,1.000000e+00}}
%0 = "quant.const_fake_quant"(%arg0) {
min: 1.1 : f32, max: 1.0 : f32, num_bits: 8
} : (tensor<8x4x3xf32>) -> tensor<8x4x3xf32>