[Test] Add test with dangling output
authorJihoon Lee <jhoon.it.lee@samsung.com>
Mon, 20 Dec 2021 07:26:21 +0000 (16:26 +0900)
committerJijoong Moon <jijoong.moon@samsung.com>
Wed, 29 Dec 2021 07:48:32 +0000 (16:48 +0900)
This patch add test with dangling output, this test passes test but
fails optimized run.

Current aim is to make this pass optimized run as well.

**Self evaluation:**
1. Build test: [X]Passed [ ]Failed [ ]Skipped
2. Run test: [X]Passed [ ]Failed [ ]Skipped

Signed-off-by: Jihoon Lee <jhoon.it.lee@samsung.com>
packaging/unittest_models_multiout.tar.gz
test/input_gen/genModelsMultiout_v2.py
test/unittest/models/unittest_models_multiout.cpp

index 14c42f0..02c486b 100644 (file)
Binary files a/packaging/unittest_models_multiout.tar.gz and b/packaging/unittest_models_multiout.tar.gz differ
index aa3a8ea..6fff417 100644 (file)
@@ -28,6 +28,39 @@ class SplitAndJoin(torch.nn.Module):
         return out, loss
 
 
+class SplitAndJoinDangle(torch.nn.Module):
+    def __init__(self):
+        super().__init__()
+        self.fc = torch.nn.Linear(3, 4)
+        self.sigmoid = torch.nn.Sigmoid()
+        self.fc1 = torch.nn.Linear(1, 3)
+        self.loss = torch.nn.MSELoss()
+
+    def forward(self, inputs, labels):
+        out = self.fc(inputs[0])
+        #         input
+        #        (split)
+        #   a0   a1   a2   a3
+        #   |               |
+        #   b0             b1
+        #         (add)
+        #          c0
+        #
+        # only c0 is fed to loss, a1 and a2 is dangled
+        a0, a1, a2, a3 = torch.split(out, 1, dim=1)
+        a0 = self.sigmoid(a0)
+        a1 = self.sigmoid(a1)
+        a2 = self.sigmoid(a2)
+        a3 = self.sigmoid(a3)
+
+        b0 = self.fc1(a0)
+        b3 = self.fc1(a3) # shared
+        c0 = b0 + b3
+        out = self.sigmoid(c0)
+        loss = self.loss(out, labels[0])
+        return out, loss
+
+
 class OneToOne(torch.nn.Module):
     def __init__(self):
         super().__init__()
@@ -41,6 +74,7 @@ class OneToOne(torch.nn.Module):
         loss = self.loss(out, labels[0])
         return out, loss
 
+
 class OneToMany(torch.nn.Module):
     def __init__(self):
         super().__init__()
@@ -56,6 +90,7 @@ class OneToMany(torch.nn.Module):
         loss = self.loss(d0, labels[0])
         return d0, loss
 
+
 if __name__ == "__main__":
     record_v2(
         SplitAndJoin(),
@@ -66,6 +101,14 @@ if __name__ == "__main__":
     )
 
     record_v2(
+        SplitAndJoinDangle(),
+        iteration=3,
+        input_dims=[(5, 3)],
+        label_dims=[(5, 3)],
+        name="split_and_join_dangle"
+    )
+
+    record_v2(
         OneToOne(),
         iteration=2,
         input_dims=[(5, 3)],
@@ -81,4 +124,4 @@ if __name__ == "__main__":
         name="one_to_many"
     )
 
-    inspect_file("one_to_many.nnmodelgolden")
+#    inspect_file("split_and_join_dangle.nnmodelgolden")
index db1452f..6d874dc 100644 (file)
@@ -137,6 +137,42 @@ static std::unique_ptr<NeuralNetwork> one_to_many() {
   return nn;
 }
 
+///         input
+///        (split)
+///   a0   a1   a2   a3
+///  (fc)           (fc)
+///   b0             b1
+///         (add)
+///          c0
+///
+/// only c0 is fed to loss, a1 and a2 is dangled
+/// @note we are not supporting explicit layer with dangling output, It is also
+/// unclear that if this should be supported.
+static std::unique_ptr<NeuralNetwork> split_and_join_dangle() {
+  std::unique_ptr<NeuralNetwork> nn(new NeuralNetwork());
+  nn->setProperty({"batch_size=5"});
+
+  auto graph = makeGraph({
+    {"fully_connected", {"name=fc", "input_shape=1:1:3", "unit=4"}},
+    {"split", {"name=a", "input_layers=fc", "axis=3"}},
+    {"activation", {"name=a0", "activation=sigmoid", "input_layers=a(0)"}},
+    {"activation", {"name=a3", "activation=sigmoid", "input_layers=a(3)"}},
+    {"fully_connected",
+     {"name=b0", "input_layers=a0", "unit=3", "shared_from=b0"}},
+    {"fully_connected",
+     {"name=b1", "input_layers=a3", "unit=3", "shared_from=b0"}},
+    {"addition", {"name=c0", "input_layers=b0,b1", "activation=sigmoid"}},
+    {"mse", {"name=loss", "input_layers=c0"}},
+  });
+
+  for (auto &node : graph) {
+    nn->addLayer(node);
+  }
+
+  nn->setOptimizer(ml::train::createOptimizer("sgd", {"learning_rate = 0.1"}));
+  return nn;
+}
+
 INSTANTIATE_TEST_CASE_P(
   multiInoutModels, nntrainerModelTest,
   ::testing::ValuesIn({
@@ -145,6 +181,9 @@ INSTANTIATE_TEST_CASE_P(
     mkModelTc_V2(one_to_one_reversed, "one_to_one__reversed",
                  ModelTestOption::ALL_V2),
     mkModelTc_V2(one_to_many, "one_to_many", ModelTestOption::ALL_V2),
+    mkModelTc_V2(split_and_join_dangle, "split_and_join_dangle",
+                 ModelTestOption::NO_THROW_RUN_V2),
+    //  ModelTestOption::ALL_V2),
   }),
   [](const testing::TestParamInfo<nntrainerModelTest::ParamType> &info) {
     return std::get<1>(info.param);