#include <app_context.h>
#include <base_properties.h>
#include <common_properties.h>
+#include <connection.h>
#include <layer_node.h>
#include <nntrainer_error.h>
#include <nntrainer_log.h>
std::get<std::vector<props::InputConnection>>(*l.layer_node_props);
out << "[" << l.getName() << '/' << l.getType() << "]\n";
- auto print_vector = [&out](const auto &layers, const std::string &title) {
- out << title << "[" << layers.size() << "] ";
- for (auto &layer : layers) {
- out << to_string(layer) << ' ';
+ auto print_vector = [&out](const auto &cons, const std::string &title) {
+ out << title << "[" << cons.size() << "] ";
+ for (auto &con : cons) {
+ out << con.toString() << ' ';
}
out << '\n';
};
- print_vector(input_connections, " input_connections");
- // print_vector(l.output_connections, "output_connections");
+ auto print_vector_2 = [&out](const auto &cons, const std::string &title) {
+ out << title << "[" << cons.size() << "] ";
+ for (auto &con : cons) {
+ out << con->toString() << ' ';
+ }
+ out << '\n';
+ };
+
+ print_vector(
+ std::vector<Connection>(input_connections.begin(), input_connections.end()),
+ " input_connections");
+ print_vector_2(l.output_connections, "output_connections");
return out;
}
--- /dev/null
+#!/usr/bin/env python3
+# SPDX-License-Identifier: Apache-2.0
+##
+# Copyright (C) 2021 Jihoon Lee <jhoon.it.lee@samsung.com>
+#
+# @file genModelsMultiout_v2.py
+# @date 24 November 2021
+# @brief Generate multiout model tcs
+# @author Jihoon lee <jhoon.it.lee@samsung.com>
+
+from recorder_v2 import record_v2, inspect_file
+import torch
+
+
+class SplitAndJoin(torch.nn.Module):
+ def __init__(self):
+ super().__init__()
+ self.fc = torch.nn.Linear(3, 2)
+ self.fc1 = torch.nn.Linear(1, 3)
+ self.fc2 = torch.nn.Linear(1, 3)
+ self.loss = torch.nn.MSELoss()
+
+ def forward(self, inputs, labels):
+ out = self.fc(inputs[0])
+ a0, a1 = torch.split(out, 1, dim=1)
+ out = self.fc1(a0) + self.fc2(a1)
+ loss = self.loss(out, labels[0])
+ return out, loss
+
+
+class OneToOne(torch.nn.Module):
+ def __init__(self):
+ super().__init__()
+ self.fc = torch.nn.Linear(3, 2)
+ self.loss = torch.nn.MSELoss()
+
+ def forward(self, inputs, labels):
+ out = self.fc(inputs[0])
+ a0, a1 = torch.split(out, 1, dim=1)
+ out = a0 + a1
+ loss = self.loss(out, labels[0])
+ return out, loss
+
+class OneToMany(torch.nn.Module):
+ def __init__(self):
+ super().__init__()
+ self.fc = torch.nn.Linear(2, 3)
+ self.loss = torch.nn.MSELoss()
+
+ def forward(self, inputs, labels):
+ out = self.fc(inputs[0])
+ a0, a1, a2 = torch.split(out, 1, dim=1)
+ b0 = a0 + a1
+ c0 = a0 + a1
+ d0 = b0 + c0 + a2
+ loss = self.loss(d0, labels[0])
+ return d0, loss
+
+if __name__ == "__main__":
+ record_v2(
+ SplitAndJoin(),
+ iteration=2,
+ input_dims=[(5, 3)],
+ label_dims=[(5, 3)],
+ name="split_and_join"
+ )
+
+ record_v2(
+ OneToOne(),
+ iteration=2,
+ input_dims=[(5, 3)],
+ label_dims=[(5, 1)],
+ name="one_to_one"
+ )
+
+ record_v2(
+ OneToMany(),
+ iteration=2,
+ input_dims=[(5, 2)],
+ label_dims=[(5, 1)],
+ name="one_to_many"
+ )
+
+ inspect_file("one_to_many.nnmodelgolden")
/// D
static std::unique_ptr<NeuralNetwork> split_and_join() {
std::unique_ptr<NeuralNetwork> nn(new NeuralNetwork());
- nn->setProperty({"batch_size=1"});
+ nn->setProperty({"batch_size=5"});
auto graph = makeGraph({
- {"fully_connected", {"name=fc", "input_shape=1:1:2", "unit=2"}},
+ {"fully_connected", {"name=fc", "input_shape=1:1:3", "unit=2"}},
{"split", {"name=a", "input_layers=fc", "axis=3"}},
- {"fully_connected", {"name=b", "input_layers=a(0)", "unit=2"}},
- {"fully_connected", {"name=c", "input_layers=a(1)", "unit=2"}},
+ {"fully_connected", {"name=c", "input_layers=a(1)", "unit=3"}},
+ {"fully_connected", {"name=b", "input_layers=a(0)", "unit=3"}},
{"addition", {"name=d", "input_layers=b,c"}},
- {"constant_derivative", {"name=loss", "input_layers=d"}},
+ {"mse", {"name=loss", "input_layers=d"}},
});
for (auto &node : graph) {
nn->addLayer(node);
/// v v
/// (a0, a1)
/// B
-[[maybe_unused]] static std::unique_ptr<NeuralNetwork> one_to_one() {
+static std::unique_ptr<NeuralNetwork> one_to_one() {
std::unique_ptr<NeuralNetwork> nn(new NeuralNetwork());
- nn->setProperty({"batch_size=1"});
+ nn->setProperty({"batch_size=5"});
auto graph = makeGraph({
- {"fully_connected", {"name=fc", "input_shape=1:1:2", "unit=2"}},
+ {"fully_connected", {"name=fc", "input_shape=1:1:3", "unit=2"}},
{"split", {"name=a", "input_layers=fc", "axis=3"}},
{"addition", {"name=b", "input_layers=a(0),a(1)"}},
- {"constant_derivative", {"name=loss", "input_layers=b"}},
+ {"mse", {"name=loss", "input_layers=b"}},
});
for (auto &node : graph) {
nn->addLayer(node);
/// v v
/// (a0, a1)
/// B
-[[maybe_unused]] static std::unique_ptr<NeuralNetwork> one_to_one_reversed() {
+static std::unique_ptr<NeuralNetwork> one_to_one_reversed() {
std::unique_ptr<NeuralNetwork> nn(new NeuralNetwork());
- nn->setProperty({"batch_size=1"});
+ nn->setProperty({"batch_size=5"});
auto graph = makeGraph({
- {"fully_connected", {"name=fc", "input_shape=1:1:2", "unit=2"}},
+ {"fully_connected", {"name=fc", "input_shape=1:1:3", "unit=2"}},
{"split", {"name=a", "input_layers=fc", "axis=3"}},
{"addition", {"name=b", "input_layers=a(1),a(0)"}},
- {"constant_derivative", {"name=loss", "input_layers=b"}},
+ {"mse", {"name=loss", "input_layers=b"}},
});
for (auto &node : graph) {
nn->addLayer(node);
/// A has two output tensor a1, a2 and B and C takes it
/// A
-/// (a0, a1)
-/// | \ |-------
-/// | - + - \ |
-/// v v v v
-/// (a0, a1) (a0, a1)
-/// B C
-/// (b0) (c0)
-/// \ /
-/// \ /
+/// (a0, a1, a2)---------------->(a2)
+/// | \ |------- E
+/// | - + - \ | /
+/// v v v v /
+/// (a0, a1) (a0, a1) /
+/// B C /
+/// (b0) (c0) /
+/// \ / /
+/// \ /----------
/// v
/// (d0)
/// D
-[[maybe_unused]] static std::unique_ptr<NeuralNetwork> one_to_many() {
+static std::unique_ptr<NeuralNetwork> one_to_many() {
std::unique_ptr<NeuralNetwork> nn(new NeuralNetwork());
- nn->setProperty({"batch_size=1"});
+ nn->setProperty({"batch_size=5"});
auto graph = makeGraph({
- {"fully_connected", {"name=fc", "input_shape=1:1:2", "unit=2"}},
+ {"fully_connected", {"name=fc", "input_shape=1:1:2", "unit=3"}},
{"split", {"name=a", "input_layers=fc", "axis=3"}},
{"addition", {"name=b", "input_layers=a(0),a(1)"}},
{"addition", {"name=c", "input_layers=a(0),a(1)"}},
- {"addition", {"name=d", "input_layers=b,c"}},
- {"constant_derivative", {"name=loss", "input_layers=d"}},
+ {"addition", {"name=d", "input_layers=b,c,a(2)"}},
+ {"mse", {"name=loss", "input_layers=d"}},
});
+
for (auto &node : graph) {
nn->addLayer(node);
}
INSTANTIATE_TEST_CASE_P(
multiInoutModels, nntrainerModelTest,
::testing::ValuesIn({
- mkModelTc_V2(split_and_join, "split_and_join",
- ModelTestOption::SAVE_AND_LOAD_V2),
+ mkModelTc_V2(split_and_join, "split_and_join", ModelTestOption::ALL_V2),
+ mkModelTc_V2(one_to_one, "one_to_one", ModelTestOption::ALL_V2),
+ mkModelTc_V2(one_to_one_reversed, "one_to_one__reversed",
+ ModelTestOption::ALL_V2),
+ mkModelTc_V2(one_to_many, "one_to_many", ModelTestOption::ALL_V2),
}),
[](const testing::TestParamInfo<nntrainerModelTest::ParamType> &info) {
return std::get<1>(info.param);