auto add_node = to<Add>(v->value());
auto mul_node = to<Mul>(v->value());
// This means for now, v->value() can be Add or Mul
- TORCH_INTERNAL_ASSERT((add_node || mul_node));
+ TORCH_INTERNAL_ASSERT(add_node || mul_node, buildErrorMessage());
map_input_to_tensor_bufs_.emplace(v->buf()->name_hint(), v->buf());
}
v->value()->accept(this);
}
auto vtbIt = varToBuf.find(access->var());
- TORCH_INTERNAL_ASSERT(vtbIt != varToBuf.end());
+ TORCH_INTERNAL_ASSERT(vtbIt != varToBuf.end(), buildErrorMessage());
BufPtr buf = vtbIt->second;
std::vector<TensorAccessBoundsInfo>& infos = ret[buf];
for (auto& TABI : infos) {
TensorAccessKind kind = access->isWrite() ? kStore : kLoad;
if (!distinctAccessKinds || kind == TABI.kind) {
- TORCH_INTERNAL_ASSERT(TABI.start.size() == access->bounds().size());
- TORCH_INTERNAL_ASSERT(TABI.stop.size() == access->bounds().size());
+ TORCH_INTERNAL_ASSERT(
+ TABI.start.size() == access->bounds().size(), buildErrorMessage());
+ TORCH_INTERNAL_ASSERT(
+ TABI.stop.size() == access->bounds().size(), buildErrorMessage());
for (size_t i = 0; i < TABI.start.size(); ++i) {
TABI.start[i] = IRSimplifier::simplify(
alloc<Min>(TABI.start[i], access->bounds()[i].start, true));
}
IndexBounds getIndexBounds(const TensorAccessBoundsInfo& tabi) {
- TORCH_INTERNAL_ASSERT(tabi.start.size() == tabi.stop.size());
+ TORCH_INTERNAL_ASSERT(
+ tabi.start.size() == tabi.stop.size(), buildErrorMessage());
IndexBounds ret(tabi.start.size());
if (tabi.start.empty()) {
return ret;
return {};
}
// All accesses to a buf must have the same dimensionality.
- TORCH_INTERNAL_ASSERT(A.size() == B.size());
+ TORCH_INTERNAL_ASSERT(A.size() == B.size(), buildErrorMessage());
// Each dimension can be sliced into multiple bound segments.
std::vector<IndexBounds> boundSlices;
for (auto slice : slices) {
IndexBounds newRegion;
newRegion.reserve(A.size());
- TORCH_INTERNAL_ASSERT(remainingOuterBounds.size() == i);
+ TORCH_INTERNAL_ASSERT(
+ remainingOuterBounds.size() == i, buildErrorMessage());
for (size_t j = 0; j < i; ++j) {
newRegion.push_back(remainingOuterBounds[j]);
remaining = A[i];
} else {
auto remainingSlices = subtractBound(remaining, slice);
- TORCH_INTERNAL_ASSERT(remainingSlices.size() == 1);
+ TORCH_INTERNAL_ASSERT(remainingSlices.size() == 1, buildErrorMessage());
remaining = remainingSlices[0];
}
}
bool need_sync = false;
// We never mask loops, they'll mask their contents.
if (!segment.mask()) {
- TORCH_INTERNAL_ASSERT(segment.stmts().size() == 1);
+ TORCH_INTERNAL_ASSERT(segment.stmts().size() == 1, buildErrorMessage());
stmts.push_back(segment.stmts()[0]);
continue;
}
"MALFORMED IR: " + err + " - " + std::to_string(stmt)) {}
};
-TORCH_API std::string buildErrorMessage(const std::string& s);
+TORCH_API std::string buildErrorMessage(const std::string& s = "");
} // namespace tensorexpr
} // namespace jit
#include <ATen/core/dispatch/Dispatcher.h>
#include <ATen/native/xnnpack/OpContext.h>
#include <c10/util/irange.h>
+#include <torch/csrc/jit/tensorexpr/exceptions.h>
#include <torch/csrc/jit/tensorexpr/external_functions_registry.h>
namespace torch {
if (args_num > 0) {
// Check that if the extra arguments are provided, then the bias tensor is
// also present
- TORCH_INTERNAL_ASSERT(args_num == 7 && bufs_num == 4);
+ TORCH_INTERNAL_ASSERT(args_num == 7 && bufs_num == 4, buildErrorMessage());
const at::Tensor& b = tensors[3];
int64_t strideH = extra_args[0];
std::string buildErrorMessage(const std::string& s) {
static const std::string generic_error_message =
"This error occured in the fuser. You can turn off the fuser with "
- "torch._C._jit_override_can_fuse_on_cpu(False)";
+ "torch.jit.enable_fusion(False).";
if (s.empty()) {
return generic_error_message;
}