$(NNTRAINER_ROOT)/nntrainer/compiler/recurrent_realizer.cpp \
$(NNTRAINER_ROOT)/nntrainer/compiler/remap_realizer.cpp \
$(NNTRAINER_ROOT)/nntrainer/compiler/slice_realizer.cpp \
+ $(NNTRAINER_ROOT)/nntrainer/compiler/input_realizer.cpp \
$(NNTRAINER_ROOT)/nntrainer/app_context.cpp
ifeq ($(ENABLE_TFLITE_INTERPRETER), 1)
--- /dev/null
+// SPDX-License-Identifier: Apache-2.0
+/**
+ * Copyright (C) 2021 Jihoon Lee <jhoon.it.lee@samsung.com>
+ *
+ * @file inputremap_realizer.cpp
+ * @date 14 October 2021
+ * @brief NNTrainer graph realizer which remaps input to the external graph
+ * @see https://github.com/nnstreamer/nntrainer
+ * @author Jihoon Lee <jhoon.it.lee@samsung.com>
+ * @bug No known bugs except for NYI items
+ */
+#include <input_realizer.h>
+#include <layer_node.h>
+#include <nntrainer_error.h>
+#include <nntrainer_log.h>
+
+#include <algorithm>
+#include <unordered_map>
+
+namespace nntrainer {
+InputRealizer::InputRealizer(const std::vector<std::string> &start_layers,
+ const std::vector<std::string> &input_layers) :
+ start_layers(start_layers),
+ input_layers(input_layers) {}
+
+InputRealizer::~InputRealizer() {}
+
+GraphRepresentation
+InputRealizer::realize(const GraphRepresentation &reference) {
+ std::unordered_map<std::string, LayerNode *> existing_nodes;
+
+ std::transform(
+ reference.begin(), reference.end(),
+ std::inserter(existing_nodes, existing_nodes.end()),
+ [](auto &node) { return std::pair(node->getName(), node.get()); });
+
+ /// if start_layer is empty, it's not a hard error but likely to be wrong if
+ /// there is two inputs
+ ml_logw("trying to realize without start_layer specified, if there is more "
+ "than two inputs, sort order make setting graph not determinated");
+
+ auto get_next_input_ref = [input_ref_iter = input_layers.begin(),
+ this]() mutable {
+ NNTR_THROW_IF(input_ref_iter == input_layers.end(), std::invalid_argument)
+ << "there is no more input layers";
+ return input_ref_iter++;
+ };
+
+ for (auto &start_name : start_layers) {
+ auto node = existing_nodes.at(start_name);
+ auto node_input_layers = node->getInputLayers();
+
+ if (node_input_layers.empty()) {
+ // case1. There is no input layers presented -> push single input
+ node_input_layers.push_back(*get_next_input_ref());
+ } else {
+ /// case2. There is multiple input layers -> substitute orphaned node
+ /// Orphaned node probably is being created from slicing or it is also a
+ /// possible scenario that the graph in the first place is designed to
+ /// have a orphaned node. In the latter case, the graph was non-compilable
+ /// from the first time.
+ for (auto &name : node_input_layers) {
+ if (!existing_nodes.count(name)) {
+ name = *get_next_input_ref();
+ }
+ }
+ }
+
+ node->setInputLayers(node_input_layers);
+ }
+
+ return reference;
+}
+
+} // namespace nntrainer
--- /dev/null
+// SPDX-License-Identifier: Apache-2.0
+/**
+ * Copyright (C) 2021 Jihoon Lee <jhoon.it.lee@samsung.com>
+ *
+ * @file inputremap_realizer.h
+ * @date 14 October 2021
+ * @brief NNTrainer graph realizer which remaps input to the external graph
+ * @see https://github.com/nnstreamer/nntrainer
+ * @author Jihoon Lee <jhoon.it.lee@samsung.com>
+ * @bug No known bugs except for NYI items
+ */
+#ifndef __INPUTREMAP_REALIZER_H__
+#define __INPUTREMAP_REALIZER_H__
+
+#include <memory>
+#include <string>
+#include <vector>
+
+#include <realizer.h>
+
+namespace nntrainer {
+
+/**
+ * @brief Graph realizer class which remaps input from start -> input layers
+ * @note This class find orphaned identifer in order from start_layers and
+ * change the identifier to input_layers. If start_layers does not have any
+ * input layers, push single input identifier, if start_layers have
+ * input_layers, check if the given input layer exists starting from the first
+ * input layers, if not exist, change to the given input layer in order. In case
+ * of start_layer contains n input_layers to be replaced.
+ *
+ */
+class InputRealizer final : public GraphRealizer {
+public:
+ /**
+ * @brief Construct a new Input Realizer object
+ *
+ * @param start_layers start layers
+ * @param input_layers input layers
+ */
+ InputRealizer(const std::vector<std::string> &start_layers,
+ const std::vector<std::string> &input_layers);
+
+ /**
+ * @brief Destroy the Graph Realizer object
+ *
+ */
+ ~InputRealizer();
+
+ /**
+ * @brief graph realizer creates a shallow copied graph based on the reference
+ * @note input realizer resets input_layers of start_layers so that it can be
+ * connected to the external network
+ * @throw std::invalid_argument if graph is ill formed
+ *
+ */
+ GraphRepresentation realize(const GraphRepresentation &reference) override;
+
+private:
+ std::vector<std::string> start_layers;
+ std::vector<std::string> input_layers;
+};
+
+} // namespace nntrainer
+
+#endif // __INPUTREMAP_REALIZER_H__
'flatten_realizer.cpp',
'recurrent_realizer.cpp',
'remap_realizer.cpp',
- 'slice_realizer.cpp'
+ 'slice_realizer.cpp',
+ 'input_realizer.cpp'
]
compiler_headers = []
auto iter = cur_end_layers.find(name);
return iter != cur_end_layers.end();
};
+
while (!dfs_stack.empty()) {
auto &node_info = mp.at(dfs_stack.back());
auto &path = node_info.path;
#include <flatten_realizer.h>
#include <ini_interpreter.h>
#include <ini_wrapper.h>
+#include <input_realizer.h>
#include <model_loader.h>
#include <neuralnet.h>
#include <nntrainer_error.h>
std::vector<std::unique_ptr<GraphRealizer>> realizers;
realizers.emplace_back(new SliceRealizer(start_layers, end_layers));
+ if (!input_layers.empty()) {
+ realizers.emplace_back(new InputRealizer(start_layers, input_layers));
+ }
+
if (type == ml::train::ReferenceLayersType::RECURRENT) {
realizers.emplace_back(
new RecurrentRealizer(type_properties, input_layers));
}
- if (input_layers.empty()) {
- /// @todo add input setter realizer
- }
-
if (!scope.empty()) {
realizers.emplace_back(
new RemapRealizer([&scope, &input_layers](std::string &name) {
#include <vector>
#include <flatten_realizer.h>
+#include <input_realizer.h>
#include <realizer.h>
#include <recurrent_realizer.h>
#include <remap_realizer.h>
realizeAndEqual(r, before, after);
}
+
+TEST(InputRealizer, remap_p) {
+
+ std::vector<LayerRepresentation> before = {
+ {"fully_connected", {"name=fc1"}}, // no input_layers specified
+ {"fully_connected",
+ {"name=fc2", "input_layers=none1,fc1"}}, // single orphaned node
+ {"fully_connected",
+ {"name=fc3", "input_layers=none2,fc2,none3"}}, // multi orphaned node
+ };
+
+ std::vector<LayerRepresentation> after = {
+ {"fully_connected",
+ {"name=fc1", "input_layers=in1"}}, // no input_layers specified
+ {"fully_connected",
+ {"name=fc2", "input_layers=in2,fc1"}}, // single orphaned node
+ {"fully_connected",
+ {"name=fc3", "input_layers=in3,fc2,in4"}}, // multi orphaned node
+ };
+
+ InputRealizer r({"fc1", "fc2", "fc3"}, {"in1", "in2", "in3", "in4"});
+ realizeAndEqual(r, before, after);
+}
EXPECT_THROW(NN.save("model.bin"), std::runtime_error);
}
-TEST(nntrainerModels, DISABLED_loadFromLayersBackbone_p) {
- // std::vector<std::shared_ptr<ml::train::Layer>> reference;
- // reference.emplace_back(ml::train::layer::FullyConnected({"name=fc1"}));
- // reference.emplace_back(ml::train::layer::FullyConnected({"name=layer1"}));
-
- // nntrainer::NeuralNetwork nn;
- // EXPECT_NO_THROW(nn.addWithReferenceLayers(
- // reference, ml::train::ReferenceLayersType::BACKBONE, "backbone",
- // {"out_source"}));
-
- // auto graph = nn.getFlatGraph();
- // for (unsigned int i = 0; i < graph.size(); ++i) {
- // EXPECT_EQ(graph.at(i)->getName(), "backbone/" +
- // reference.at(i)->getName());
- // };
+TEST(nntrainerModels, loadFromLayersBackbone_p) {
+ std::vector<std::shared_ptr<ml::train::Layer>> reference;
+ reference.emplace_back(ml::train::layer::FullyConnected({"name=fc1"}));
+ reference.emplace_back(
+ ml::train::layer::FullyConnected({"name=fc2", "input_layers=fc1"}));
+
+ nntrainer::NeuralNetwork nn;
+ nn.addWithReferenceLayers(reference, "backbone", {}, {"fc1"}, {"fc2"},
+ ml::train::ReferenceLayersType::BACKBONE, {});
+
+ auto graph = nn.getFlatGraph();
+ for (unsigned int i = 0; i < graph.size(); ++i) {
+ EXPECT_EQ(graph.at(i)->getName(), "backbone/" + reference.at(i)->getName());
+ };
}
TEST(nntrainerModels, DISABLED_loadFromLayersRecurrent_p) {
- // std::vector<std::shared_ptr<ml::train::Layer>> reference;
- // reference.emplace_back(
- // ml::train::layer::FullyConnected({"name=fc1", "input_layers=init"}));
- // reference.emplace_back(
- // ml::train::layer::FullyConnected({"name=fc2", "input_layers=fc1"}));
-
- // nntrainer::NeuralNetwork nn;
- // EXPECT_NO_THROW(nn.addWithReferenceLayers(
- // reference, ml::train::ReferenceLayersType::RECURRENT, "recurrent",
- // {"out_source"},
- // {
- // "unroll_for=3",
- // "return_sequences=true",
- // "input_layers=init",
- // "output_layers=fc2",
- // "recurrent_input=fc1",
- // "recurrent_output=fc2",
- // }));
-
- // std::vector<std::string> expected_node_names = {
- // "recurrent/fc1/0", "recurrent/fc2/0", "recurrent/fc1/1",
- // "recurrent/fc2/1", "recurrent/fc1/2", "recurrent/fc2/2",
- // "recurrent/fc2"};
- // std::vector<std::string> expected_input_layers = {
- // "out_source" /**< input substituted with external_input */,
- // "recurrent/fc1/0",
- // "recurrent/fc2/0",
- // "recurrent/fc1/1",
- // "recurrent/fc2/1",
- // "recurrent/fc1/2",
- // "recurrent/fc2/0" /**< out source's first input */,
- // };
-
- // auto graph = nn.getFlatGraph();
- // for (unsigned int i = 0; i < graph.size(); ++i) {
- // EXPECT_EQ(graph.at(i)->getName(), expected_node_names.at(i)) << "at " <<
- // i; EXPECT_EQ(graph.at(i)->getInputLayers().front(),
- // expected_input_layers.at(i))
- // << "at " << i;
- // };
+ std::vector<std::shared_ptr<ml::train::Layer>> reference;
+ reference.emplace_back(
+ ml::train::layer::FullyConnected({"name=fc1", "input_layers=init"}));
+ reference.emplace_back(
+ ml::train::layer::FullyConnected({"name=fc2", "input_layers=fc1"}));
+
+ nntrainer::NeuralNetwork nn;
+ nn.addWithReferenceLayers(reference, "recurrent", {"out_source"}, {"fc1"},
+ {"fc2"}, ml::train::ReferenceLayersType::RECURRENT,
+ {
+ "unroll_for=3",
+ "return_sequences=true",
+ "recurrent_input=fc1",
+ "recurrent_output=fc2",
+ });
+
+ std::vector<std::string> expected_node_names = {
+ "recurrent/fc1/0", "recurrent/fc2/0", "recurrent/fc1/1", "recurrent/fc2/1",
+ "recurrent/fc1/2", "recurrent/fc2/2", "recurrent/fc2"};
+ std::vector<std::string> expected_input_layers = {
+ "out_source" /**< input substituted with external_input */,
+ "recurrent/fc1/0",
+ "recurrent/fc2/0",
+ "recurrent/fc1/1",
+ "recurrent/fc2/1",
+ "recurrent/fc1/2",
+ "recurrent/fc2/0" /**< out source's first input */,
+ };
+
+ auto graph = nn.getFlatGraph();
+ for (unsigned int i = 0; i < graph.size(); ++i) {
+ EXPECT_EQ(graph.at(i)->getName(), expected_node_names.at(i)) << "at " << i;
+ EXPECT_EQ(graph.at(i)->getInputLayers().front(),
+ expected_input_layers.at(i))
+ << "at " << i;
+ };
}
/**