[ Compiler ] Implement bn realizer with test
authorjijoong.moon <jijoong.moon@samsung.com>
Mon, 18 Apr 2022 05:55:34 +0000 (14:55 +0900)
committerJijoong Moon <jijoong.moon@samsung.com>
Tue, 26 Apr 2022 01:36:33 +0000 (10:36 +0900)
This patch completes the bn realizer for the inference.
This path is only for the inference and therefore it supposed to 1 to
1 connection ( one input and one output for the bn layer ) in the
model graph. That means if there are multiple connections, then
multi-out layer is follows and make sure bn layer has 1 to 1
in/output during compilation.

**Self evaluation:**
1. Build test:  [X]Passed [ ]Failed [ ]Skipped
2. Run test:  [X]Passed [ ]Failed [ ]Skipped

Signed-off-by: jijoong.moon <jijoong.moon@samsung.com>
nntrainer/compiler/bn_realizer.cpp
nntrainer/layers/layer_node.cpp
test/include/nntrainer_test_util.h
test/nntrainer_test_util.cpp
test/unittest/compiler/unittest_realizer.cpp

index d441e4c..a3cbbe0 100644 (file)
 #include <unordered_map>
 
 namespace nntrainer {
+
+static constexpr size_t SINGLE_INOUT_IDX = 0;
+
 BnRealizer::BnRealizer() {}
 
 BnRealizer::~BnRealizer() {}
 
 GraphRepresentation BnRealizer::realize(const GraphRepresentation &reference) {
   std::unordered_map<std::string, LayerNode *> existing_nodes;
+  std::vector<LayerNode *> bn_layers;
 
   std::transform(
     reference.begin(), reference.end(),
     std::inserter(existing_nodes, existing_nodes.end()),
     [](auto &node) { return std::pair(node->getName(), node.get()); });
 
-  // NYI
+  for (auto &node : reference) {
+    if (istrequal(node->getType(), "batch_normalization")) {
+      bn_layers.push_back(node.get());
+    }
+  }
+
+  for (auto iter = bn_layers.begin(); iter != bn_layers.end(); ++iter) {
+    auto node = (*iter);
+    auto &input_name = node->getInputConnectionName(SINGLE_INOUT_IDX);
+    auto input_node = existing_nodes.at(input_name);
+
+    for (unsigned int i = 0; i < input_node->getNumOutputConnections(); ++i) {
+      if (istrequal(node->getName(),
+                    input_node->getOutputConnection(i)->getName())) {
+        input_node->setOutputConnection(
+          i, node->getOutputConnection(SINGLE_INOUT_IDX)->getName(),
+          SINGLE_INOUT_IDX);
+      }
+    }
+
+    auto &output_name = node->getOutputConnection(SINGLE_INOUT_IDX)->getName();
+    auto output_node = existing_nodes.at(output_name);
+
+    for (unsigned int i = 0; i < output_node->getNumInputConnections(); ++i) {
+      if (istrequal(node->getName(), output_node->getInputConnectionName(i))) {
+        output_node->setInputConnectionName(
+          i, node->getInputConnectionName(SINGLE_INOUT_IDX));
+      }
+    }
+  }
+
+  GraphRepresentation processed;
+  for (auto &node : reference) {
+    if (!istrequal(node->getType(), "batch_normalization")) {
+      processed.push_back(node);
+    }
+  }
 
-  return reference;
+  return processed;
 }
 
 } // namespace nntrainer
index 458df5b..4c669f7 100644 (file)
@@ -223,9 +223,10 @@ void LayerNode::setOutputConnection(unsigned nth, const std::string &name,
   }
 
   auto &con = output_connections[nth];
-  NNTR_THROW_IF(con, std::invalid_argument)
-    << "cannot override connection, this slot is reserved for "
-    << con->toString();
+  // Should be override connection for the batch normalization realizer
+  // NNTR_THROW_IF(con, std::invalid_argument)
+  //   << "cannot override connection, this slot is reserved for "
+  //   << con->toString();
 
   con = std::make_unique<Connection>(name, index);
 }
index 97bfab6..d28a09f 100644 (file)
@@ -208,6 +208,15 @@ nntrainer::GraphRepresentation
 makeGraph(const std::vector<LayerRepresentation> &layer_reps);
 
 /**
+ * @brief make graph of a representation after compile
+ *
+ * @param layer_reps layer representation (pair of type, properties)
+ * @return nntrainer::GraphRepresentation synthesized graph representation
+ */
+nntrainer::GraphRepresentation
+makeGraph_V2(const std::vector<LayerRepresentation> &layer_reps);
+
+/**
  * @brief read tensor after reading tensor size
  *
  * @param t tensor to fill
index dbfefc0..44f6ae1 100644 (file)
@@ -218,6 +218,30 @@ makeGraph(const std::vector<LayerRepresentation> &layer_reps) {
   return graph_rep;
 }
 
+nntrainer::GraphRepresentation
+makeGraph_V2(const std::vector<LayerRepresentation> &layer_reps) {
+  static auto &ac = nntrainer::AppContext::Global();
+
+  nntrainer::GraphRepresentation graph_rep;
+  auto model_graph = nntrainer::NetworkGraph();
+  for (auto &layer_representation : layer_reps) {
+    std::shared_ptr<nntrainer::LayerNode> layer = nntrainer::createLayerNode(
+      ac.createObject<nntrainer::Layer>(layer_representation.first),
+      layer_representation.second);
+    model_graph.addLayer(layer);
+  }
+  // compile with loss
+  model_graph.compile("mse");
+
+  for (auto &node : model_graph.getLayerNodes()) {
+    graph_rep.push_back(node);
+  }
+
+  // remove loss layer
+  graph_rep.pop_back();
+  return graph_rep;
+}
+
 void sizeCheckedReadTensor(nntrainer::Tensor &t, std::ifstream &file,
                            const std::string &error_msg) {
   unsigned int sz = 0;
index 43d74cc..b77a54d 100644 (file)
@@ -14,6 +14,7 @@
 #include <vector>
 
 #include <activation_realizer.h>
+#include <bn_realizer.h>
 #include <connection.h>
 #include <flatten_realizer.h>
 #include <input_realizer.h>
@@ -44,6 +45,15 @@ static void realizeAndEqual(GraphRealizer &realizer,
   graphEqual(processed, expected_graph);
 }
 
+static void
+compileAndRealizeAndEqual(GraphRealizer &realizer,
+                          const std::vector<LayerRepresentation> &input,
+                          const std::vector<LayerRepresentation> &expected) {
+  auto processed = realizer.realize(makeGraph_V2(input));
+  auto expected_graph = makeGraph(expected);
+  graphEqual(processed, expected_graph);
+}
+
 TEST(FlattenRealizer, flatten_p) {
   FlattenRealizer fr;
 
@@ -764,3 +774,38 @@ TEST(ActivationRealizer, activation_unknown_n) {
 
   EXPECT_ANY_THROW(realizeAndEqual(ar, before, {}));
 }
+
+TEST(BnRealizer, bn_realizer_p) {
+  /// realization without identifying custom input
+  std::vector<LayerRepresentation> before = {
+    {"fully_connected", {"name=fc1"}},
+    {"batch_normalization",
+     {"name=bn1", "input_layers=fc1"}}, // auto connected to fc 1
+    {"activation",
+     {"name=ac1", "activation=relu",
+      "input_layers=bn1"}}, // auto connected to bn 1
+    {"fully_connected",
+     {"name=fc2", "input_layers=ac1"}}, // auto connected to ac 1
+    {"batch_normalization",
+     {"name=bn2", "input_layers=fc2"}}, // auto connected to fc 2
+    {"activation",
+     {"name=ac2", "activation=relu",
+      "input_layers=bn2"}}, // auto connected to fc 2
+    {"fully_connected",
+     {"name=fc3", "input_layers=ac2"}}, // auto connected to ac 2
+  };
+  std::vector<LayerRepresentation> after = {
+    {"fully_connected", {"name=fc1"}},
+    {"activation",
+     {"name=ac1", "activation=relu",
+      "input_layers=fc1"}}, // auto connected to fc 1
+    {"fully_connected", {"name=fc2", "input_layers=ac1"}},
+    {"activation",
+     {"name=ac2", "activation=relu",
+      "input_layers=fc2"}}, // auto connected to fc 1
+    {"fully_connected",
+     {"name=fc3", "input_layers=ac2"}}, // auto connected to fc 3
+  };
+  BnRealizer r({});
+  compileAndRealizeAndEqual(r, before, after);
+}