}
}
+void exportBiasAdd(loco::BiasAdd<loco::Domain::Tensor> *node, FlatBufferBuilder &builder,
+ SerializedModelData &gd)
+{
+ uint32_t op_idx = gd.registerBuiltinOpcode(tflite::BuiltinOperator_ADD);
+ std::vector<int32_t> inputs_vec{gd._node_to_tensor_id[node->value()],
+ gd._node_to_tensor_id[node->bias()]};
+ std::vector<int32_t> outputs_vec{gd._node_to_tensor_id[static_cast<loco::Node *>(node)]};
+ auto inputs = builder.CreateVector(inputs_vec);
+ auto outputs = builder.CreateVector(outputs_vec);
+ auto options = CreateAddOptions(builder); // dummy option
+ auto op_offset = CreateOperator(builder, op_idx, inputs, outputs,
+ tflite::BuiltinOptions_AddOptions, options.Union());
+ gd._operators.push_back(op_offset);
+}
+
/// @brief Export CONCATENATION of **TWO** tensors only
void exportConcat(loco::TensorConcat *node, FlatBufferBuilder &builder, SerializedModelData &gd)
{
{
exportIdentity(encode, builder, data);
}
+ else if (auto *biasadd = dynamic_cast<loco::BiasAdd<loco::Domain::Tensor> *>(node))
+ {
+ exportBiasAdd(biasadd, builder, data);
+ }
else
{
assert(false && "unsupported node found");
{
exportOpDefinedTensor(encode, builder, gd);
}
+ else if (auto *biasadd = dynamic_cast<loco::BiasAdd<loco::Domain::Tensor> *>(node))
+ {
+ exportOpDefinedTensor(biasadd, builder, gd);
+ }
else
{
assert(false && "unsupported node type");
return gd._node_to_type[node->input()];
}
+tflite::TensorType getOpResultType(loco::BiasAdd<loco::Domain::Tensor> *node,
+ SerializedModelData &gd)
+{
+ tflite::TensorType value_type = gd._node_to_type[node->value()];
+ tflite::TensorType bias_type = gd._node_to_type[node->bias()];
+
+ // TODO support heterogenous type combination
+ assert(value_type == bias_type);
+
+ return value_type;
+}
+
int32_t decodeShapeDimension(const loco::Dimension &dim)
{
if (!dim.known())
return input_shape;
}
+ShapeDescription getOpResultShape(loco::BiasAdd<loco::Domain::Tensor> *node,
+ SerializedModelData &gd)
+{
+ const ShapeDescription &value_shape = gd._node_to_shape[node->value()];
+ const ShapeDescription &bias_shape = gd._node_to_shape[node->bias()];
+
+ // For TFlite, only supports last bias add axis. Unless, broadcasting is not performed as
+ // expected.
+ assert(node->axis() == value_shape._dims.size() - 1);
+
+ // Bias should be rank 1
+ assert(bias_shape._dims.size() == 1);
+
+ // Channel count coherency for proper broadcast
+ assert(bias_shape._dims[0] == value_shape._dims[node->axis()]);
+
+ return value_shape;
+}
+
} // namespace loco_exporter
tflite::TensorType getOpResultType(loco::BiasEncode *node, SerializedModelData &gd);
+tflite::TensorType getOpResultType(loco::BiasAdd<loco::Domain::Tensor> *node,
+ SerializedModelData &gd);
+
// Shape inference functions
ShapeDescription getOpResultShape(loco::Pull *node, SerializedModelData &);
ShapeDescription getOpResultShape(loco::TensorConcat *node, SerializedModelData &gd);
ShapeDescription getOpResultShape(loco::BiasEncode *node, SerializedModelData &gd);
+
+ShapeDescription getOpResultShape(loco::BiasAdd<loco::Domain::Tensor> *node,
+ SerializedModelData &gd);
}
#endif //__LOCO_EXPORTER_TYPEINFERENCE_H__