return loco::NodeShape(output_tensor_shape);
}
+ loco::NodeShape visit(const moco::tf::TFConst *node) final
+ {
+ loco::TensorShape output_tensor_shape;
+
+ uint32_t rank = node->rank();
+ output_tensor_shape.rank(rank);
+ for (uint32_t index = 0; index < rank; ++index)
+ {
+ if (node->dim(index).known())
+ output_tensor_shape.dim(index) = node->dim(index).value();
+ else
+ output_tensor_shape.dim(index).unset();
+ }
+
+ return loco::NodeShape(output_tensor_shape);
+ }
+
+ loco::NodeShape visit(const moco::tf::TFConv2D *node) final
+ {
+ auto input_shape = moco::tf::node_shape(node->input());
+ auto ker_shape = moco::tf::node_shape(node->filter());
+ auto ker_tensor_shape = ker_shape.as<loco::TensorShape>(); // in HWIO
+ auto node_stride = moco::tf::stride_of(node->strides(), node->data_layout());
+ auto node_window = moco::tf::window_of(ker_tensor_shape, "HWIO");
+
+ moco::tf::PlaneInference infer_plane_shape;
+
+ infer_plane_shape.padding(node->padding());
+ infer_plane_shape.stride(node_stride);
+ infer_plane_shape.window(node_window);
+
+ auto input_feature_shape = moco::tf::as_feature_shape(input_shape, node->data_layout());
+ auto input_plane_shape = moco::tf::make_plane_shape(input_feature_shape);
+ // output count is from input count, depth is from kernel 'O' which is dim(3)
+ auto output_feature_shape = input_feature_shape;
+ output_feature_shape.depth() = ker_tensor_shape.dim(3).value();
+
+ auto output_plane_shape = infer_plane_shape(input_plane_shape);
+
+ moco::tf::update(output_feature_shape).with(output_plane_shape);
+
+ return moco::tf::as_tensor_shape(output_feature_shape, node->data_layout());
+ }
+
+ loco::NodeShape visit(const moco::tf::TFConv2DBackpropInput *node) final
+ {
+ // TFConv2DBackpropInput's first input, named 'input_sizes', actually contains shape of node
+ // output's feature map. We can get shape of TFConv2DBackpropInput by just copying this.
+ // TODO Support when 'input_sizes' is not TFConst, or support constant folding
+ auto input_sizes_node = dynamic_cast<moco::tf::TFConst *>(node->input_sizes());
+ assert(input_sizes_node);
+
+ // Let's support S32 for time being
+ // TODO Support other integer types
+ assert(input_sizes_node->dtype() == loco::DataType::S32);
+ assert(input_sizes_node->size<loco::DataType::S32>() == 4);
+
+ // copy!
+ loco::TensorShape ofm_tensor_shape;
+ ofm_tensor_shape.rank(4);
+ for (uint32_t i = 0; i < 4; ++i)
+ {
+ int32_t dim = input_sizes_node->at<loco::DataType::S32>(i);
+ assert(dim > 0);
+ ofm_tensor_shape.dim(i) = (uint32_t)dim;
+ }
+
+ return loco::NodeShape(ofm_tensor_shape);
+ }
+
loco::NodeShape visit(const moco::tf::TFDepthwiseConv2dNative *node) final
{
auto input_shape = moco::tf::node_shape(node->input()); // NHWC
bool fix_shape(moco::tf::TFConcatV2 *node) { return false; }
-bool fix_shape(moco::tf::TFConst *node)
-{
- if (shape_inference_done(node))
- return false;
-
- // TFConst itself has shape information, copy them
- auto shape_data = make_shape_inference_data(node);
- node->annot(std::move(shape_data));
+bool fix_shape(moco::tf::TFConst *node) { return false; }
- {
- LOGGER(l);
- auto shapedata = node->annot<ShapeInferenceData>();
- assert(shapedata != nullptr);
- INFO(l) << "Fix TFConst shape = " << shapedata->tensor_shape();
- }
+bool fix_shape(moco::tf::TFConv2D *node) { return false; }
- return true;
-}
-
-bool fix_shape(moco::tf::TFConv2D *node)
-{
- LOGGER(l);
-
- if (shape_inference_done(node))
- return false;
-
- auto ifm = node->input();
- loco::NodeShape ifm_shape;
- if (!node_shape(ifm, ifm_shape))
- {
- // input node shape inference is not ready
- return false;
- }
-
- auto ker = node->filter();
- loco::NodeShape ker_shape;
- if (!node_shape(ker, ker_shape))
- {
- return false;
- }
-
- auto padding = node->padding();
- assert(padding == "VALID" || padding == "SAME");
-
- update_stride_data(node);
-
- auto stride_data = node->annot<StrideData>();
- assert(stride_data != nullptr);
- // TODO add and use 'stride_data->stride()' stream out
- INFO(l) << "Fix TFConv2D strides = " << stride_data->stride()->vertical() << ", "
- << stride_data->stride()->horizontal();
-
- auto ifm_tensor_shape = ifm_shape.as<loco::TensorShape>(); // in NHWC
- auto ker_tensor_shape = ker_shape.as<loco::TensorShape>(); // in HWIO
- assert(ifm_tensor_shape.rank() == 4);
- assert(ker_tensor_shape.rank() == 4);
-
- uint32_t input_height = ifm_tensor_shape.dim(1).value();
- uint32_t input_width = ifm_tensor_shape.dim(2).value();
- uint32_t stride_height = stride_data->stride()->vertical();
- uint32_t stride_width = stride_data->stride()->horizontal();
- uint32_t ker_height = ker_tensor_shape.dim(0).value();
- uint32_t ker_width = ker_tensor_shape.dim(1).value();
- uint32_t dilation_height = 1; // TODO Consider dilation
- uint32_t dilation_width = 1;
- uint32_t effective_ker_height = dilation_height * (ker_height - 1) + 1;
- uint32_t effective_ker_width = dilation_width * (ker_width - 1) + 1;
- uint32_t output_height;
- uint32_t output_width;
-
- if (padding == "VALID")
- {
- output_height = (input_height + stride_height - effective_ker_height) / stride_height;
- output_width = (input_width + stride_width - effective_ker_width) / stride_width;
- }
- else if (padding == "SAME")
- {
- output_height = (input_height + stride_height - 1) / stride_height;
- output_width = (input_width + stride_width - 1) / stride_width;
- }
- else
- {
- assert(false && "Unknown padding in fix_shape for TFConv2D");
- }
-
- loco::TensorShape ofm_tensor_shape;
- ofm_tensor_shape.rank(4);
- ofm_tensor_shape.dim(0) = ifm_tensor_shape.dim(0);
- ofm_tensor_shape.dim(1) = output_height;
- ofm_tensor_shape.dim(2) = output_width;
- ofm_tensor_shape.dim(3) = ker_tensor_shape.dim(3);
-
- auto shape_data = stdex::make_unique<ShapeInferenceData>();
- shape_data->tensor_shape(ofm_tensor_shape);
- node->annot(std::move(shape_data));
-
- FixPadContext ctx = {input_height, input_width, output_height, output_width,
- stride_height, stride_width, effective_ker_height, effective_ker_width};
-
- calc_annot_paddata(node, ctx);
-
- INFO(l) << "Fix TFConv2D shape = ifm" << ifm_tensor_shape << " ker" << ker_tensor_shape
- << " --> ofm" << ofm_tensor_shape;
- INFO(l) << " pad = " << *node->annot<PadData>();
-
- return true;
-}
-
-bool fix_shape(moco::tf::TFConv2DBackpropInput *node)
-{
- LOGGER(l);
-
- if (shape_inference_done(node))
- return false;
-
- // TFConv2DBackpropInput's first input, named 'input_sizes', actually contains shape of node
- // output's feature map. We can get shape of TFConv2DBackpropInput by just copying this.
- // TODO Support when 'input_sizes' is not TFConst, or support constant folding
- auto input_sizes_node = dynamic_cast<TFConst *>(node->input_sizes());
- assert(input_sizes_node);
-
- // Let's support S32 for time being
- // TODO Support other integer types
- assert(input_sizes_node->dtype() == loco::DataType::S32);
- assert(input_sizes_node->size<loco::DataType::S32>() == 4);
-
- // copy!
- loco::TensorShape ofm_tensor_shape;
- ofm_tensor_shape.rank(4);
- for (uint32_t i = 0; i < 4; ++i)
- {
- int32_t dim = input_sizes_node->at<loco::DataType::S32>(i);
- assert(dim > 0);
- ofm_tensor_shape.dim(i) = (uint32_t)dim;
- }
-
- auto shape_data = stdex::make_unique<ShapeInferenceData>();
- shape_data->tensor_shape(ofm_tensor_shape);
- node->annot(std::move(shape_data));
-
- INFO(l) << "Fix TFConv2DBackpropInput shape = " << ofm_tensor_shape;
- return true;
-}
+bool fix_shape(moco::tf::TFConv2DBackpropInput *node) { return false; }
bool fix_shape(moco::tf::TFDepthwiseConv2dNative *node) { return false; }