#include <loco/IR/PermutingCodec.h>
#include <stdex/Memory.h>
#include <plier/tf/Convert.h>
+#include <oops/UserExn.h>
#include <cassert>
#include <vector>
bool BiasAddGraphBuilder::validate(const tensorflow::NodeDef &node) const
{
- assert(node.input_size() == 2);
+ if (node.input_size() != 2)
+ return false;
// note: even though "data_format" is not entered when a model is written,
// TF seems to generate "data_format" field into a pb file
// but "data_format: A string. 'NHWC' and 'NCHW' are supported."
// Not sure if value should be 4-D tensor. Let's skip this check for now.
+ auto data_layout = plier::tf::get_string_attr(node, "data_format");
+ if (!(data_layout == "NHWC" || data_layout == "NCHW"))
+ {
+ throw oops::UserExn("BiasAdd Unsupported data_format", node.name());
+ }
+
return true;
}
// Concat node SHOULD have 3 or more inputs, that is 2 + axis
const int num_inputs = node.input_size() - 1;
- assert(num_inputs >= 2);
- assert(num_inputs == plier::tf::get_int_attr(node, "N"));
return (num_inputs >= 2) && (num_inputs == plier::tf::get_int_attr(node, "N"));
}
#include <loco.h>
#include <plier/tf/Convert.h>
+#include <oops/UserExn.h>
#include <cassert>
#include <stdexcept>
}
else
{
- throw std::runtime_error("Error: Invalid Const values");
+ throw oops::UserExn("Invalid Const values", const_node->name());
}
}
}
else
{
- throw std::runtime_error("Error: Invalid Const values");
+ throw oops::UserExn("Invalid Const values", const_node->name());
}
}
bool ConstGraphBuilder::validate(const tensorflow::NodeDef &node) const
{
- return plier::tf::has_attrs(node, {"dtype", "value"});
+ if (!plier::tf::has_attrs(node, {"dtype", "value"}))
+ return false;
+
+ const auto &input_tensor = plier::tf::get_tensor_attr(node, "value");
+ const auto &input_shape = input_tensor.tensor_shape();
+ const auto &input_dims = input_shape.dim();
+
+ if (!(input_shape.dim_size() <= 6))
+ return false;
+
+ for (auto &d : input_dims)
+ {
+ if (d.size() > std::numeric_limits<int>::max())
+ throw oops::UserExn("Const Shape element overflows", node.name());
+
+ if (d.size() < 0)
+ throw oops::UserExn("Unknown dim size", node.name());
+ }
+
+ auto dtype = plier::tf::as_loco_datatype(plier::tf::get_datatype_attr(node, "dtype"));
+ if (!(dtype == loco::DataType::S32 || dtype == loco::DataType::FLOAT32))
+ return false;
+ // TODO support other dtype
+
+ return true;
}
void ConstGraphBuilder::build(const tensorflow::NodeDef &node, GraphBuilderContext *context) const
bool zero_sized_shape = false;
for (auto &d : input_dims)
{
- if (d.size() > std::numeric_limits<int>::max())
- throw std::runtime_error("Shape element overflows");
+ assert(d.size() <= std::numeric_limits<int>::max());
if (d.size() == 0)
zero_sized_shape = true;
- if (d.size() >= 0)
- const_node->dim(index++) = d.size();
- else
- throw std::runtime_error{"Error: Unknown dim size for " + node.name()};
+ assert(d.size() >= 0);
+ const_node->dim(index++) = d.size();
}
int num_elements = 1;
// TODO support other types
default:
- throw std::runtime_error{"Error: Unsupported data type for " + node.name()};
+ assert(false);
}
// register string-name to node
#include <loco/IR/PermutingCodec.h>
#include <stdex/Memory.h>
#include <plier/tf/Convert.h>
+#include <oops/UserExn.h>
#include <cassert>
#include <stdexcept>
bool Conv2DGraphBuilder::validate(const tensorflow::NodeDef &node) const
{
- assert(node.input_size() == 2);
+ if (node.input_size() != 2)
+ return false;
// note: even though "data_format" is not entered when a model is written,
// TF seems to generate "data_format" field into a pb file
- bool has_mandatory_attrs = plier::tf::has_attrs(node, {"T", "data_format", "padding", "strides"});
+ if (!plier::tf::has_attrs(node, {"T", "data_format", "padding", "strides"}))
+ return false;
+
+ auto data_layout = plier::tf::get_string_attr(node, "data_format");
+ if (!(data_layout == "NHWC" || data_layout == "NCHW"))
+ {
+ throw oops::UserExn("Conv2D Unsupported data_format", node.name());
+ }
+
// dilation attribute is not fully supported
- bool supported_dilations = true;
if (plier::tf::has_attr(node, "dilations"))
{
+ // TODO Support non-default dilations
auto dilation = plier::tf::get_list_attr(node, "dilations").i();
- supported_dilations =
- std::all_of(dilation.begin(), dilation.end(), [](std::int64_t dil) { return dil == 1; });
+ if (!std::all_of(dilation.begin(), dilation.end(), [](std::int64_t dil) { return dil == 1; }))
+ return false;
}
- return has_mandatory_attrs && supported_dilations;
+ // Else, dilations are automatically set to default [1,1,1,1] which we assumes now
+
+ return true;
}
void Conv2DGraphBuilder::build(const tensorflow::NodeDef &node, GraphBuilderContext *context) const
// read attributes
auto data_layout = plier::tf::get_string_attr(node, "data_format");
- if (!(data_layout == "NHWC" || data_layout == "NCHW"))
- {
- throw std::runtime_error("Not yet supported");
- }
+ assert(data_layout == "NHWC" || data_layout == "NCHW");
conv2d->data_layout(data_layout);
auto tf_strides = plier::tf::get_list_attr(node, "strides");
#include <loco.h>
#include <stdex/Memory.h>
#include <plier/tf/Convert.h>
+#include <oops/UserExn.h>
namespace
{
if (node.input_size() != 3)
return false;
- auto check_default_dilations = [](std::vector<int64_t> dilations) -> bool {
- assert(dilations.size() == 4);
- for (auto d : dilations)
- {
- if (d != 1)
- return false;
- }
- return true;
- };
+ if (!plier::tf::has_attrs(node, {"T", "data_format", "padding", "strides"}))
+ return false;
- if (plier::tf::has_attr(node, "dilations"))
+ auto data_layout = plier::tf::get_string_attr(node, "data_format");
+ if (!(data_layout == "NHWC" || data_layout == "NCHW"))
{
- auto tf_dilations = plier::tf::get_list_attr(node, "dilations");
- auto dilations = plier::tf::as_int64_list(tf_dilations);
+ throw oops::UserExn("Conv2DBackprop Unsupported data_format", node.name());
+ }
+ // dilation attribute is not fully supported
+ if (plier::tf::has_attr(node, "dilations"))
+ {
// TODO Support non-default dilations
- if (!check_default_dilations(dilations))
+ auto dilation = plier::tf::get_list_attr(node, "dilations").i();
+ if (!std::all_of(dilation.begin(), dilation.end(), [](std::int64_t dil) { return dil == 1; }))
return false;
}
// Else, dilations are automatically set to default [1,1,1,1] which we assumes now
- return plier::tf::has_attrs(node, {"T", "data_format", "padding", "strides"});
+ return true;
}
void Conv2DBackpropInputGraphBuilder::build(const tensorflow::NodeDef &node,
// read attributes
auto data_layout = plier::tf::get_string_attr(node, "data_format");
- if (!(data_layout == "NHWC" || data_layout == "NCHW"))
- throw std::runtime_error("Not yet supported");
+ assert(data_layout == "NHWC" || data_layout == "NCHW");
conv2d_backprop->data_layout(data_layout);
auto tf_strides = plier::tf::get_list_attr(node, "strides");
#include <plier/tf/Convert.h>
#include <loco/IR/PermutingCodec.h>
#include <stdex/Memory.h>
+#include <oops/UserExn.h>
#include <cassert>
if (node.input_size() != 2)
return false;
- auto data_layout = get_string_attr(node, "data_format");
+ // note: even though "data_format" and "dilations" are not entered when a model is written,
+ // TF seems to generate those field into a pb file.
+ if (!has_attrs(node, {"T", "data_format", "dilations", "padding", "strides"}))
+ return false;
+
+ auto data_layout = plier::tf::get_string_attr(node, "data_format");
if (!(data_layout == "NHWC" || data_layout == "NCHW"))
{
- throw std::runtime_error("Not yet supported data layout of DepthwiseConv2dNative: " +
- data_layout);
+ throw oops::UserExn("DepthwiseConv2dNative Unsupported data_format", node.name());
}
auto padding = moco::str_toupper(get_string_attr(node, "padding"));
auto strides = as_int64_list(tf_strides);
if (!(strides.size() == 4))
{
- throw std::runtime_error("DepthwiseConv2dNative strides requires rank 4");
+ throw oops::UserExn("DepthwiseConv2dNative strides requires rank 4", node.name());
}
auto stride_n = strides.at(0);
auto stride_h = strides.at(1);
if (!(stride_n == 1 && stride_c == 1) || !(stride_h == stride_w))
{
// TODO this message may need to be refined
- throw std::runtime_error("DepthwiseConv2dNative strides requires N==C==1, H==W");
+ throw oops::UserExn("DepthwiseConv2dNative strides requires N=C=1, H=W", node.name());
}
- // note: even though "data_format" and "dilations" are not entered when a model is written,
- // TF seems to generate those field into a pb file.
- return has_attrs(node, {"T", "data_format", "dilations", "padding", "strides"});
+ return true;
}
void DepthwiseConv2dNativeGraphBuilder::build(const tensorflow::NodeDef &node,
bool FusedBatchNormGraphBuilder::validate(const tensorflow::NodeDef &node) const
{
- assert(node.input_size() == 5);
+ if (node.input_size() != 5)
+ return false;
return plier::tf::has_attrs(node, {"epsilon"});
}
#include <loco/IR/PermutingCodec.h>
#include <stdex/Memory.h>
#include <plier/tf/Convert.h>
+#include <oops/UserExn.h>
#include <cassert>
#include <stdexcept>
{
// note: even though "data_format" is not entered when a model is written,
// TF seems to generate "data_format" field into a pb file
- return plier::tf::has_attrs(node, {"T", "data_format", "ksize", "padding", "strides"});
+ if (!plier::tf::has_attrs(node, {"T", "data_format", "ksize", "padding", "strides"}))
+ return false;
+
+ auto data_layout = plier::tf::get_string_attr(node, "data_format");
+ if (!(data_layout == "NHWC" || data_layout == "NCHW"))
+ {
+ throw oops::UserExn("MaxPool Unsupported data_format", node.name());
+ }
+
+ auto tf_ksize = plier::tf::get_list_attr(node, "ksize");
+ auto ksize = plier::tf::as_int64_list(tf_ksize);
+ if (ksize.size() != 4)
+ {
+ // TODO support ksize length for 1 and 2
+ throw oops::UserExn("MaxPool ksize requires rank 4", node.name());
+ }
+
+ auto tf_strides = plier::tf::get_list_attr(node, "strides");
+ auto strides = plier::tf::as_int64_list(tf_strides);
+ if (strides.size() != 4)
+ {
+ // TODO support strides length for 1 and 2
+ throw oops::UserExn("MaxPool strides requires rank 4", node.name());
+ }
+
+ return true;
}
void MaxPoolGraphBuilder::build(const tensorflow::NodeDef &node, GraphBuilderContext *context) const
// ksize
auto tf_ksize = plier::tf::get_list_attr(node, "ksize");
auto ksize = plier::tf::as_int64_list(tf_ksize);
- if (ksize.size() != 4)
- {
- // TODO support ksize length for 1 and 2
- throw std::runtime_error("MaxPool only supports ksize length 4");
- }
+ assert(ksize.size() == 4);
maxPool_node->ksize(ksize);
// strides
auto tf_strides = plier::tf::get_list_attr(node, "strides");
auto strides = plier::tf::as_int64_list(tf_strides);
- if (strides.size() != 4)
- {
- // TODO support strides length for 1 and 2
- throw std::runtime_error("MaxPool only supports strides length 4");
- }
+ assert(strides.size() == 4);
maxPool_node->strides(strides);
// To set the input node of encode_node with node_name
if (node.input_size() != 2)
return false;
- if (plier::tf::has_attrs(node, {"T", "Tidx", "keep_dims"}) == false)
+ if (!plier::tf::has_attrs(node, {"T", "Tidx", "keep_dims"}))
return false;
auto dtype = plier::tf::get_datatype_attr(node, "Tidx");
bool PlaceholderGraphBuilder::validate(const tensorflow::NodeDef &node) const
{
- return plier::tf::has_attrs(node, {"dtype", "shape"});
+ if (!plier::tf::has_attrs(node, {"dtype", "shape"}))
+ return false;
+
+ loco::DataType dtype = plier::tf::as_loco_datatype(plier::tf::get_datatype_attr(node, "dtype"));
+ if (dtype != loco::DataType::FLOAT32)
+ return false;
+ // TODO support other types
+
+ return true;
}
void PlaceholderGraphBuilder::build(const tensorflow::NodeDef &node,
bool ShapeGraphBuilder::validate(const tensorflow::NodeDef &node) const
{
- assert(node.input_size() == 1);
+ if (node.input_size() != 1)
+ return false;
return plier::tf::has_attrs(node, {"T"});
}
bool SoftmaxGraphBuilder::validate(const tensorflow::NodeDef &node) const
{
- assert(node.input_size() == 1);
+ if (node.input_size() != 1)
+ return false;
return plier::tf::has_attrs(node, {"T"});
}
#include <loco.h>
#include <stdex/Memory.h>
#include <plier/tf/Convert.h>
+#include <oops/UserExn.h>
namespace
{
bool SqueezeGraphBuilder::validate(const tensorflow::NodeDef &node) const
{
- assert(node.input_size() == 1);
+ if (node.input_size() != 1)
+ return false;
- return plier::tf::has_attrs(node, {"T"});
+ if (!plier::tf::has_attrs(node, {"T"}))
+ return false;
+
+ if (plier::tf::has_attrs(node, {"axis"}))
+ {
+ // TODO support 'axis' attribute
+ oops::UserExn("Squeeze: Unsupported 'axis' attribute", node.name());
+ }
+
+ return true;
}
void SqueezeGraphBuilder::build(const tensorflow::NodeDef &node, GraphBuilderContext *context) const
SymbolTable *tensor_names = context->tensor_names();
UpdateQueue *updates = context->updates();
- if (plier::tf::has_attrs(node, {"axis"}))
- {
- // TODO support 'axis' attribute
- std::runtime_error("Import Squeeze: 'axis' attribute is not supported yet");
- }
+ // TODO support 'axis' attribute
+ assert(!plier::tf::has_attrs(node, {"axis"}));
std::vector<int64_t> squeeze_dims;
if (plier::tf::has_attrs(node, {"squeeze_dims"}))
bool StopGradientGraphBuilder::validate(const tensorflow::NodeDef &node) const
{
- assert(node.input_size() == 1);
+ if (node.input_size() != 1)
+ return false;
return plier::tf::has_attrs(node, {"T"});
}