--- /dev/null
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "TensorConcatConverter.h"
+
+#include "GraphBlock.h"
+#include "Check.h"
+
+#include "Dialect/IR/TFLNodes.h"
+
+#include <loco/Service/ShapeInference.h>
+
+namespace exo
+{
+/**
+ * @brief Converts loco::TensorConcat to locoex::TFLConcatenate
+ *
+ * Before:
+ * input:0 ----- loco::TensorConcat ------- C
+ * input:1 ----/
+ *
+ * After:
+ * input:0 ----- locoex::TFLConcatenate --- C
+ * input:1 ----/
+ *
+ * input:0 ----- loco::TensorConcat ---
+ * input:1 ----/
+ *
+ */
+bool TensorConcatConverter::convert(loco::TensorConcat *origin)
+{
+ assert(loco::shape_get(origin).domain() == loco::Domain::Tensor);
+
+ if (!loco::shape_known(origin))
+ {
+ return false;
+ }
+
+ auto tfl_concat = origin->graph()->nodes()->create<locoex::TFLConcatenation>(2);
+ tfl_concat->values(0, origin->lhs());
+ tfl_concat->values(1, origin->rhs());
+ tfl_concat->axis(origin->axis());
+ tfl_concat->fusedActivationFunction(locoex::FusedActFunc::NONE);
+
+ loco::replace(origin).with(tfl_concat);
+
+ origin->lhs(nullptr);
+ origin->rhs(nullptr);
+
+ return true;
+}
+
+} // namespace exo
--- /dev/null
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __CONVERSION_TENSORCONCAT_CONVERTER_H__
+#define __CONVERSION_TENSORCONCAT_CONVERTER_H__
+
+#include "CanonicalNodeConverter.h"
+
+#include <loco.h>
+
+namespace exo
+{
+
+/**
+ * @brief Convert loco::TensorConcat to TFLConcatenate
+ */
+class TensorConcatConverter : public CanonicalNodeConverter<loco::TensorConcat>
+{
+public:
+ const char *name(void) const final { return "exo::TensorConcatConverter"; }
+
+public:
+ bool convert(loco::TensorConcat *origin) final;
+};
+
+} // namespace exo
+
+#endif // __CONVERSION_TENSORCONCAT_CONVERTER_H__
#include "Conversion/ReluConverter.h"
// TODO loco::ReLU6
// TODO loco::Tanh
-// TODO loco::TensorConcat
+#include "Conversion/TensorConcatConverter.h"
// TODO loco::TensorBiasAdd
#include "Conversion/TensorBroadcastConverter.h"
// TODO loco::TensorSoftmax
// TODO loco::ReLU6
// TODO loco::Tanh
- // TODO loco::TensorConcat
+
+ if (get<Knob::ConvertTensorConcat>())
+ phase.emplace_back(stdex::make_unique<TensorConcatConverter>());
+
// TODO loco::TensorBiasAdd
if (get<Knob::ConvertTensorBroadcast>())
phase.emplace_back(stdex::make_unique<TensorBroadcastConverter>());
return infer_pool_2d_shape(node);
}
- // TODO TFLConcatenation
+ loco::NodeShape visit(const locoex::TFLConcatenation *node) final
+ {
+ // TODO Support when TFLConcatenation has 0 input
+ assert(node->numValues() > 0);
+
+ auto axis = node->axis();
+ auto first_shape = loco::shape_get(node->values(0)).as<loco::TensorShape>();
+
+ loco::TensorShape output_shape;
+
+ output_shape.rank(first_shape.rank());
+ for (uint32_t i = 0; i < output_shape.rank(); ++i)
+ output_shape.dim(i) = first_shape.dim(i);
+
+ for (uint32_t i = 1; i < node->numValues(); ++i)
+ {
+ auto input_shape = loco::shape_get(node->values(i)).as<loco::TensorShape>();
+
+ for (uint32_t j = 0; j < output_shape.rank(); ++j)
+ {
+ if (j == axis)
+ output_shape.dim(j) = output_shape.dim(j).value() + input_shape.dim(j).value();
+ else
+ assert(output_shape.dim(j) == input_shape.dim(j));
+ }
+ }
+
+ return loco::NodeShape{output_shape};
+ }
loco::NodeShape visit(const locoex::TFLConst *node) final
{
return loco::dtype_get(node->value());
}
- // TODO TFLConcatenation
+ loco::DataType visit(const locoex::TFLConcatenation *node) final
+ {
+ // TODO Support when TFLConcatenation has 0 input
+ assert(node->numValues() > 0);
+
+ for (uint32_t i = 1; i < node->numValues(); ++i)
+ assert(loco::dtype_get(node->values(i - 1)) == loco::dtype_get(node->values(i)));
+
+ return loco::dtype_get(node->values(0));
+ }
loco::DataType visit(const locoex::TFLConst *node) final { return node->dtype(); }
KNOB_BOOL(ConvertRelu, true, Convert loco::Relu to TFLRelu)
// TODO loco::ReLU6
// TODO loco::Tanh
-// TODO loco::TensorConcat
+KNOB_BOOL(ConvertTensorConcat, false, Convert loco::TensorConcat to TFLConcatenate)
// TODO loco::TensorBiasAdd
KNOB_BOOL(ConvertTensorBroadcast, true, Resolve loco::TensorBroadcast)
// TODO loco::TensorSoftmax