file(GLOB SOURCES "src/*.cc")
file(GLOB_RECURSE SOURCES_FRONTEND "src/frontend/*.cc")
+file(GLOB_RECURSE SOURCES_MIDDLEEND "src/middleend/*.cc")
file(GLOB SOURCES_BACKEND "src/backend/*.cc")
file(GLOB_RECURSE SOURCES_INTERNAL "src/internal/*.cc")
file(GLOB_RECURSE SOURCES_GRAPH "src/graph/*.cc")
file(GLOB_RECURSE SOURCES_VERIFIER "src/verifier/*.cc")
file(GLOB_RECURSE SOURCES_VERIFIER "src/util/*.cc")
-set(SOURCES ${SOURCES} ${SOURCES_FRONTEND} ${SOURCES_BACKEND} ${SOURCES_INTERNAL} ${SOURCES_GRAPH} ${SOURCES_LINEAR} ${SOURCES_CODEGEN} ${SOURCES_DUMPER} ${SOURCES_VERIFIER})
+set(SOURCES ${SOURCES} ${SOURCES_FRONTEND} ${SOURCES_MIDDLEEND} ${SOURCES_BACKEND} ${SOURCES_INTERNAL} ${SOURCES_GRAPH} ${SOURCES_LINEAR} ${SOURCES_CODEGEN} ${SOURCES_DUMPER} ${SOURCES_VERIFIER})
# NOTE For now ARMCompute is necessary
# TODO Remove required package below(should be optional)
#include "codegen/IPlanBuilder.h"
#include "codegen/Planner.h"
#include "codegen/PlanBuilder.h"
+#include "middleend/SubTensorAnalyzer.h"
#include "linear/Linear.h"
#include "util/EnvVar.h"
// Dump ops
linear->accept(neurun::graph::dumper::Dumper{});
+ // SubTensorInfo should be generated after lower, before planner & finalize
+ // lower: decide backend and insert permutation
+ // planner: stage generate (use SubTensorInfo to return stage. prepare to optimization)
+ // finalize: generate tensor using subtensor info, then execute stage
+ // Generated SubTensorInfo is in operand(Object)
+ // for easy pass SubTensorInfo to plan builder and tensor builder
+ linear->accept(neurun::middleend::SubTensorAnalyzer{graph.operands()});
+
neurun::codegen::PlanBuilder plan_builder{plan};
auto tensor_builders = linear->markTensors();
--- /dev/null
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "SubTensorAnalyzer.h"
+
+#include <typeinfo>
+
+#include "nnfw/std/memory.h"
+#include "internal/Convert.h"
+#include "graph/operand/Set.h"
+#include "codegen/IPlanBuilder.h"
+#include "graph/operation/LowerInfo.h"
+#include "logging.h"
+
+namespace neurun
+{
+namespace middleend
+{
+
+void SubTensorAnalyzer::visit(const graph::operation::Conv2D::Implicit::Node &)
+{
+ // DO NOTHING
+}
+
+void SubTensorAnalyzer::visit(const graph::operation::MaxPool2D::Implicit::Node &)
+{
+ // DO NOTHING
+}
+
+void SubTensorAnalyzer::visit(const graph::operation::AvgPool2D::Implicit::Node &)
+{
+ // DO NOTHING
+}
+
+void SubTensorAnalyzer::visit(const graph::operation::Concat::Node &node)
+{
+ // If operator is concat (or other operators related with subsumption), fill subsumption info
+ // TODO: if one tensor is subset of many parents or model input
+ // Solution 1. Handle 1st parent only, ignore others (need to invert for other childrun)
+ // Solution 2. Insert copy operation for other parents
+ auto axis_index = node.param().axis_index;
+
+ // To prepare concat elimination, axis should be constant
+ if (_ctx.at(axis_index).getUsage() != graph::operand::OperandUsage::CONSTANT)
+ {
+ VERBOSE(SUBTENSOR) << "Cannot handle non-constant axis" << std::endl;
+ return;
+ }
+
+ // NOTE This implementation assumes concat over feature depth
+ // TODO Remove this assumption
+ int32_t axis = _ctx.at(axis_index).asScalar<int32_t>();
+ if (axis != 3)
+ {
+ VERBOSE(SUBTENSOR) << "Cannot handle axis is not channel" << std::endl;
+ return;
+ }
+
+ auto &output_index = node.getOutputs().at(0);
+ auto &inputs = node.getInputs();
+
+ int32_t axis_point = 0;
+ for (auto &input_index : inputs)
+ {
+ auto input_shape_4D = _ctx.at(input_index).lower_info()->shape();
+ std::vector<int32_t> offset = {0, 0, 0, 0};
+ offset[axis] = axis_point;
+ graph::operand::ParentInfo::Coordinate4D coordinate_info(offset[0], offset[1], offset[2],
+ offset[3]);
+ std::unique_ptr<graph::operand::ParentInfo> parentInfo =
+ nnfw::make_unique<graph::operand::ParentInfo>(output_index, coordinate_info);
+
+ // NOTD Not support multiple parent tensor yet
+ assert(_ctx.at(input_index).parent_info() == nullptr);
+ _ctx.at(input_index).parent_info(std::move(parentInfo));
+
+ // NOTE Only support when axis is 3(channel)
+ axis_point += input_shape_4D.c();
+ }
+}
+
+void SubTensorAnalyzer::visit(const graph::operation::FullyConnected::Node &)
+{
+ // DO NOTHING
+}
+
+void SubTensorAnalyzer::visit(const graph::operation::Reshape::Node &)
+{
+ // DO NOTHING
+}
+
+void SubTensorAnalyzer::visit(const graph::operation::Softmax::Node &)
+{
+ // DO NOTHING
+}
+
+void SubTensorAnalyzer::visit(const graph::operation::NOP::Node &)
+{
+ // DO NOTHING
+}
+
+void SubTensorAnalyzer::visit(const graph::operation::Permute::Node &)
+{
+ // DO NOTHING
+}
+
+void SubTensorAnalyzer::visit(const graph::operation::AddNode &)
+{
+ // DO NOTHING
+}
+
+} // namespace middleend
+} // namespace neurun
--- /dev/null
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * @file SubTensorAnalyzer.h
+ * @brief This file contains SubTensorAnalyzer to analyze tensor subsumption
+ * using operation visitor
+ */
+
+#ifndef __NEURUN_MIDDLEEND_SUBTENSOR_ANALYZER_H__
+#define __NEURUN_MIDDLEEND_SUBTENSOR_ANALYZER_H__
+
+#include "graph/operation/NodeVisitor.h"
+
+namespace neurun
+{
+namespace graph
+{
+namespace operand
+{
+class Set;
+} // namespace operation
+} // namespace graph
+} // namespace neurun
+
+namespace neurun
+{
+namespace middleend
+{
+
+/**
+ * @brief Class to analyze tensor subsumption
+ */
+class SubTensorAnalyzer : public graph::operation::NodeVisitor
+{
+public:
+ /**
+ * @brief Construct a new SubTensorAnalyzer object
+ * @param[in] ctx Graph operand set
+ */
+ SubTensorAnalyzer(neurun::graph::operand::Set &ctx) : _ctx{ctx} {}
+
+public:
+ virtual void visit(const graph::operation::Conv2D::Implicit::Node &) override;
+ virtual void visit(const graph::operation::MaxPool2D::Implicit::Node &) override;
+ virtual void visit(const graph::operation::AvgPool2D::Implicit::Node &) override;
+ virtual void visit(const graph::operation::Concat::Node &) override;
+ virtual void visit(const graph::operation::Reshape::Node &) override;
+ virtual void visit(const graph::operation::FullyConnected::Node &) override;
+ virtual void visit(const graph::operation::Softmax::Node &) override;
+ virtual void visit(const graph::operation::NOP::Node &) override;
+ virtual void visit(const graph::operation::Permute::Node &) override;
+ virtual void visit(const graph::operation::AddNode &) override;
+
+private:
+ neurun::graph::operand::Set &_ctx;
+};
+
+} // namespace middleend
+} // namespace neurun
+
+#endif // __NEURUN_MIDDLEEND_SUBTENSOR_ANALYZER_H__