This replaces throw statements to oops InternalExn.
Signed-off-by: Hyun Sik Yoon <hyunsik.yoon@samsung.com>
target_link_libraries(exo_test exo)
target_link_libraries(exo_test hermes_std)
target_link_libraries(exo_test logo)
+target_link_libraries(exo_test oops)
target_link_libraries(exo_test locoex_customop)
// TODO Add macro for Release version
-#define EXO_THROW(msg) throw std::runtime_error(pepper::str(msg, " at ", __FILE__, ":", __LINE__))
-
#define EXO_ASSERT(condition, msg) \
{ \
if (!(condition)) \
#include <loco/Service/ShapeInference.h>
#include <locoex/COpCall.h>
+#include <oops/InternalExn.h>
+
#include <flatbuffers/flexbuffers.h>
using namespace flatbuffers;
// TODO Support more reduce type operation
default:
- throw std::runtime_error("Not supported reduce type");
+ INTERNAL_EXN_V("Unsupported reduce type", oops::to_uint32(node->func()));
}
// Create a vector for axes data
void OperationExporter::visit(loco::TensorBroadcast *)
{
- throw std::runtime_error("TensorBroadcast should not exist in the graph");
+ INTERNAL_EXN("loco graph has loco::TensorBroadcast, which should not exist in the graph");
}
void OperationExporter::visit(loco::TensorConstantPad *node)
flexbuf.Float(name.c_str(), float_val->val());
else
// TODO Support more attribute types
- throw std::runtime_error("Not supported type while writing flexbuffer");
+ INTERNAL_EXN_V("Unsupported dtype while writing flexbuffer for customop attr", name);
}
flexbuf.EndMap(map_start);
}
else
{
- assert(false && "unsupported node found");
+ INTERNAL_EXN("Node with unsupported dialect found");
}
}
#include "Dialect/IR/TFLNodes.h"
-#include "Check.h"
+#include <oops/InternalExn.h>
using namespace circle;
using namespace flatbuffers;
return encodeOpBufferByDType<loco::DataType::S32>(builder, c);
}
- EXO_THROW("not supported datatype");
+ INTERNAL_EXN_V("Unsupported datatype", oops::to_uint32(c->dtype()));
}
} // namespace
#include <locoex/COpDialect.h>
#include <locoex/Service/COpTypeInference.h>
+#include <oops/InternalExn.h>
+
#include <stdex/Memory.h>
#include <stdexcept>
break;
}
- throw std::invalid_argument{"dtype"};
+ INTERNAL_EXN_V("Invalid loco dtype", oops::to_uint32(dtype));
}
} // namespace
#include <loco.h>
+#include <oops/InternalExn.h>
+
namespace exo
{
}
}
else
- EXO_THROW("Unsupported DataType");
+ INTERNAL_EXN_V("Unsupported DataType", oops::to_uint32(constgen->dtype()));
}
loco::replace(constgen).with(tfl_const);
#include "Dialect/IR/TFLNodes.h"
#include "Check.h"
+#include <oops/InternalExn.h>
+
#include <loco.h>
#include <loco/Service/ShapeInference.h>
if (origin->func() == loco::ReduceFunc::Mean)
return convert_as_mean(origin);
else
- throw std::runtime_error("NYI ReduceFunc");
+ INTERNAL_EXN_V("Unsupported ReduceFunc", oops::to_uint32(origin->func()));
}
} // namespace exo
#include "TensorTransposeConverter.h"
#include "Dialect/IR/TFLNodes.h"
-#include "Check.h"
#include <loco.h>
#include <loco/Service/ShapeInference.h>
+#include <oops/InternalExn.h>
+
#include <algorithm>
#include <cassert>
#include <vector>
}
if (!std::is_permutation(base_perms.begin(), base_perms.end(), perms.begin()))
- EXO_THROW("wrong perm value");
+ INTERNAL_EXN("wrong perm value");
}
} // namespace
auto input_rank = loco::shape_get(origin->input()).as<loco::TensorShape>().rank();
if (input_rank != origin->perm()->size())
- EXO_THROW("perm size should be same with input rank");
+ INTERNAL_EXN_V("perm size should be same with input rank",
+ oops::to_uint32(origin->perm()->size()));
validate_perm(origin);
}
#include "CircleNodes.h"
#include "CircleNodeVisitor.h"
+#include <oops/InternalExn.h>
+
+#include <cassert>
+
namespace locoex
{
break;
}
- // TODO throw as internal error
- throw std::runtime_error{"CircleNode::accept(CircleNodeVisitorBase) not handled"};
+ INTERNAL_EXN("CircleNode::accept(CircleNodeVisitorBase) not handled");
}
template <typename T> T CircleNode::accept(CircleNodeMutableVisitorBase<T> *v)
break;
}
- // TODO throw as internal error
- throw std::runtime_error{"CircleNode::accept(CircleNodeMutableVisitorBase) not handled"};
+ INTERNAL_EXN("CircleNode::accept(CircleNodeMutableVisitorBase) not handled");
}
} // namespace locoex
#include "CircleNode.h"
#include "CircleNodes.h"
+#include <oops/InternalExn.h>
+
namespace locoex
{
#undef CIRCLE_NODE
/// @brief Default fallback
- virtual T visit(const CircleNode *) { throw std::runtime_error{"CircleNodeVisistor: NYI node"}; }
+ virtual T visit(const CircleNode *) { INTERNAL_EXN("CircleNodeVisistor: NYI node"); }
};
/**
#undef CIRCLE_NODE
/// @brief Default fallback
- virtual T visit(CircleNode *) { throw std::runtime_error{"CircleMutableNodeVisistor: NYI node"}; }
+ virtual T visit(CircleNode *) { INTERNAL_EXN("CircleMutableNodeVisistor: NYI node"); }
};
} // namespace locoex
#include "TFLNodes.h"
#include "TFLNodeVisitor.h"
+#include <oops/InternalExn.h>
+
+#include <cassert>
+
namespace locoex
{
break;
}
- // TODO throw as internal error
- throw std::runtime_error{"TFLNode::accept(TFLNodeVisitorBase) not handled"};
+ INTERNAL_EXN("TFLNode::accept(TFLNodeVisitorBase) not handled");
}
template <typename T> T TFLNode::accept(TFLNodeMutableVisitorBase<T> *v)
break;
}
- // TODO throw as internal error
- throw std::runtime_error{"TFLNode::accept(TFLNodeMutableVisitorBase) not handled"};
+ INTERNAL_EXN("TFLNode::accept(TFLNodeMutableVisitorBase) not handled");
}
} // namespace locoex
#include "TFLNode.h"
#include "TFLNodes.h"
+#include <oops/InternalExn.h>
+
namespace locoex
{
#undef TFL_NODE
/// @brief Default fallback
- virtual T visit(const TFLNode *) { throw std::runtime_error{"TFLNodeVisitor: NYI node"}; }
+ virtual T visit(const TFLNode *) { INTERNAL_EXN("TFLNodeVisitor: NYI node"); }
};
/**
#undef TFL_NODE
/// @brief Default fallback
- virtual T visit(TFLNode *) { throw std::runtime_error{"TFLNodeMutableVisitor: NYI node"}; }
+ virtual T visit(TFLNode *) { INTERNAL_EXN("TFLNodeMutableVisitor: NYI node"); }
};
} // namespace locoex
#include "Check.h"
+#include <oops/InternalExn.h>
+
#include <algorithm>
#include <cassert>
#include <stdexcept>
// each dimension of x and y should be same or one must be 1 if different
if (!((x_dim == y_dim) || (x_dim == 1 || y_dim == 1)))
- throw std::runtime_error("Cannot produce expand_dimension of two shapes");
+ INTERNAL_EXN("Cannot produce expand_dimension of two shapes");
output_shape.dim(axis) = std::max(x_dim, y_dim);
}
if (axis < 0)
axis += input_shape.rank();
if (not(0 <= axis and axis < static_cast<int32_t>(input_shape.rank())))
- EXO_THROW("Invalid reduction axis for MEAN");
+ INTERNAL_EXN_V("Invalid reduction axis for MEAN", oops::to_uint32(axis));
reduction_values.push_back(axis);
}
EXO_ASSERT(const_shape_node->dtype() == S32, "Only support int32 TFLConst");
if (const_shape_node->rank() != 1)
- EXO_THROW("Only support rank 1 TFLConst");
+ INTERNAL_EXN_V("Only support rank 1 TFLConst", oops::to_uint32(const_shape_node->rank()));
shape_by_input.rank(const_shape_node->dim(0).value());
return loco::NodeShape{output_shape_of_transpose(input_shape, tfl_perm)};
}
else
- EXO_THROW("perm of TFLTranspose should be either ConstGen or TFLConst");
+ INTERNAL_EXN("perm of TFLTranspose should be either ConstGen or TFLConst");
}
loco::NodeShape visit(const locoex::TFLTransposeConv *node) final
#include "ExporterUtils.h"
-#include "Check.h"
+#include <oops/InternalExn.h>
+
+#include <cassert>
namespace exo
{
break;
}
- throw std::runtime_error{"Not implemented yet"};
+ INTERNAL_EXN_V("Unsupported loco domain", oops::to_uint32(shape.domain()));
}
} // namespace exo
#ifndef __GRAPH_BLOCK_H__
#define __GRAPH_BLOCK_H__
-#include "Check.h"
-
#include <loco.h>
#include <loco/Service/ShapeInference.h>
+#include <oops/InternalExn.h>
+
#include <functional>
namespace exo
return tfl_node;
}
else
- EXO_THROW("Not yet supported loco::Domain");
+ INTERNAL_EXN_V("Unsupported loco::Domain", oops::to_uint32(loco::shape_get(origin).domain()));
}
} // namespace exo
#include <loco/Service/ShapeInference.h>
+#include <oops/InternalExn.h>
+
namespace
{
EXO_ASSERT(const_orig, "Only support for Reshape-Const pair");
// TODO support other data types
if (const_orig->dtype() != FLOAT32)
- EXO_THROW("NYI for this data type");
+ INTERNAL_EXN_V("NYI for this data type", oops::to_uint32(const_orig->dtype()));
if (volume(const_orig) != volume(reshape))
- EXO_THROW("New shape of Reshape is not matched");
+ INTERNAL_EXN("New shape of Reshape is not matched");
}
auto new_shape = loco::shape_get(reshape).as<loco::TensorShape>();
#include <nncc/core/ADT/tensor/IndexEnumerator.h>
#include <nncc/core/ADT/tensor/LexicalLayout.h>
+#include <oops/InternalExn.h>
+
namespace
{
EXO_ASSERT(const_orig, "Only support for Transpose-Const pair");
// TODO support other data types
if (const_orig->dtype() != FLOAT32)
- EXO_THROW("NYI for this data type");
+ INTERNAL_EXN_V("NYI for this data type", oops::to_uint32(const_orig->dtype()));
EXO_ASSERT(perm, "Only support for constant permutation for Transpose");
// TODO support other data types
if (perm->dtype() != S32)
- EXO_THROW("NYI for this data type");
+ INTERNAL_EXN_V("NYI for this data type", oops::to_uint32(perm->dtype()));
auto okay = [&]() {
if (perm->rank() != 1)
return true;
};
if (not okay())
- EXO_THROW("Input and permutation for Transpose is not congruent");
+ INTERNAL_EXN("Input and permutation for Transpose is not congruent");
}
uint32_t rank = const_orig->rank();
#include "Dialect/IR/TFLDialect.h"
#include "Dialect/IR/TFLNodeVisitor.h"
-#include "Check.h"
-
#include <loco/Service/TypeInference.h>
#include <loco/Service/ShapeInference.h>
+#include <oops/InternalExn.h>
+
#include <set>
/*
else if (const_shape.rank() == 0 or (const_shape.rank() == 1 and const_shape.dim(0) == 1))
{
if (!(loco::dtype_get(as_loco_node(former)) == loco::DataType::FLOAT32))
- EXO_THROW("unsupported data type");
+ INTERNAL_EXN_V("Unsupported data type",
+ oops::to_uint32(loco::dtype_get(as_loco_node(former))));
if (!(const_node->dtype() == loco::DataType::FLOAT32))
- EXO_THROW("unsupported data type");
+ INTERNAL_EXN_V("Unsupported data type", oops::to_uint32(const_node->dtype()));
auto new_bias_node = create_widened(const_node, depth);
#include "MergeConcatNodesPass.h"
#include "Dialect/IR/TFLNodes.h"
+#include <oops/InternalExn.h>
+
#include <vector>
namespace
// return false;
default:
- throw std::runtime_error("Unknown FusedActFunc");
+ INTERNAL_EXN_V("Unknown FusedActFunc", oops::to_uint32(node1->fusedActivationFunction()));
}
}
#include "Log.h"
#include "Knob.h"
+#include <oops/InternalExn.h>
+
#include <cassert>
#include <unordered_map>
#include <string>
auto opCode = it.first;
auto custom_code = custom_opcodes.find(opCode);
if (custom_code == custom_opcodes.end())
- throw std::runtime_error("Cannot find code for custom op");
+ INTERNAL_EXN("Cannot find code for custom op");
operator_codes_vec[idx] =
CreateOperatorCode(builder, it.first.opcode, builder.CreateString(custom_code->second));
#include "TFLExporterUtils.h"
-#include "Check.h"
+#include <oops/InternalExn.h>
namespace exo
{
case locoex::FusedActFunc::RELU6:
return tflite::ActivationFunctionType_RELU6;
default:
- throw std::runtime_error("Not supported locoex FusedActFunc Type");
+ INTERNAL_EXN_V("Unsupported locoex FusedActFunc Type", oops::to_uint32(func));
}
}
if (same_padding_criterion_1 && same_padding_criterion_2)
return tflite::Padding_SAME;
- throw std::runtime_error("NYI for custom PAD");
+ INTERNAL_EXN("NYI for custom PAD");
}
tflite::Padding getOpPadding(const locoex::Padding pad)
if (pad == locoex::Padding::SAME)
return tflite::Padding_SAME;
- EXO_THROW("Unknown padding");
+ INTERNAL_EXN_V("Unknown padding", oops::to_uint32(pad));
}
void registerGraphIOName(loco::Graph *graph, SerializedModelData &gd)
#include <loco/Service/ShapeInference.h>
#include <locoex/COpCall.h>
+#include <oops/InternalExn.h>
+
#include <flatbuffers/flexbuffers.h>
using namespace flatbuffers;
// TODO Support more reduce type operation
default:
- throw std::runtime_error("Not supported reduce type");
+ INTERNAL_EXN_V("Not supported reduce type", oops::to_uint32(node->func()));
}
// Create a vector for axes data
void OperationExporter::visit(loco::TensorBroadcast *)
{
- throw std::runtime_error("TensorBroadcast should not exist in the graph");
+ INTERNAL_EXN("TensorBroadcast should not exist in the graph");
}
void OperationExporter::visit(loco::TensorConstantPad *node)
flexbuf.Float(name.c_str(), float_val->val());
else
// TODO Support more attribute types
- throw std::runtime_error("Not supported type while writing flexbuffer");
+ INTERNAL_EXN("Not supported type while writing flexbuffer");
}
flexbuf.EndMap(map_start);
#include "Dialect/IR/TFLNodes.h"
-#include "Check.h"
+#include <oops/InternalExn.h>
using namespace tflite;
using namespace flatbuffers;
return encodeOpBufferByDType<loco::DataType::S32>(builder, c);
}
- EXO_THROW("not supported datatype");
+ INTERNAL_EXN_V("Unsupported datatype", oops::to_uint32(c->dtype()));
}
} // namespace
#include <locoex/COpDialect.h>
#include <locoex/Service/COpTypeInference.h>
+#include <oops/InternalExn.h>
+
#include <stdex/Memory.h>
#include <stdexcept>
break;
}
- throw std::invalid_argument{"dtype"};
+ INTERNAL_EXN_V("Trying to converte unsupported loco dtype", oops::to_uint32(dtype));
}
} // namespace