Remove SetIO test's dependency with frontend (#4742)
author오형석/On-Device Lab(SR)/Staff Engineer/삼성전자 <hseok82.oh@samsung.com>
Thu, 14 Mar 2019 23:41:28 +0000 (08:41 +0900)
committer박세희/On-Device Lab(SR)/Principal Engineer/삼성전자 <saehie.park@samsung.com>
Thu, 14 Mar 2019 23:41:28 +0000 (08:41 +0900)
Remove SetIO test's dependency by using neurun core
Prepare divide frontend-unittest, core-unittest and backend-unittest,

Signed-off-by: Hyeongseok Oh <hseok82.oh@samsung.com>
runtimes/neurun/test/frontend/nnapi/model.cc [moved from runtimes/neurun/test/model.cc with 100% similarity]
runtimes/neurun/test/graph/operation/SetIO.cc

index ccba87d..ec57774 100644 (file)
 #include "graph/Graph.h"
 #include "model/operand/Index.h"
 #include "model/operand/IndexSet.h"
-#include "frontend/wrapper/OperationFactory.h"
+#include "model/operation/Conv2DNode.h"
+#include "model/operation/ConcatNode.h"
+
+#include <cpp14/memory.h>
 
 #include <stdexcept>
 
@@ -35,22 +38,29 @@ TEST(graph_operation_setIO, operation_setIO_conv)
   shape.dim(0) = 3;
 
   // Add Conv
-  std::vector<uint32_t> params;
-  for (int i = 0; i < 7; ++i)
-  {
-    params.emplace_back(graph.addOperand(shape, type).value());
-  }
-  uint32_t outoperand = graph.addOperand(shape, type).value();
+  using GraphNode = neurun::model::operation::Conv2DNode;
+
+  auto input_operand = graph.addOperand(shape, type);
+  auto kernel_operand = graph.addOperand(shape, type);
+  auto bias_operand = graph.addOperand(shape, type);
+  IndexSet inputs{input_operand, kernel_operand, bias_operand};
 
-  using GraphNode = neurun::model::operation::Node;
+  GraphNode::Param conv_params;
+  conv_params.explicit_padding = false;
+  conv_params.padding_code_index = graph.addOperand(shape, type);
+  conv_params.hstride_index = graph.addOperand(shape, type);
+  conv_params.vstride_index = graph.addOperand(shape, type);
+  conv_params.activation_index = graph.addOperand(shape, type);
 
-  auto conv = std::unique_ptr<GraphNode>{OperationFactory::instance().create(
-      ANEURALNETWORKS_CONV_2D, {7, params.data(), 1, &outoperand})};
+  auto output_operand = graph.addOperand(shape, type).value();
+  IndexSet outputs{output_operand};
+
+  auto conv = nnfw::cpp14::make_unique<GraphNode>(inputs, outputs, conv_params);
 
   ASSERT_NE(conv, nullptr);
-  ASSERT_EQ(conv->getInputs().at(Index{0}).value(), params[0]);
+  ASSERT_EQ(conv->getInputs().at(Index{0}).value(), inputs.at(0).value());
   conv->setInputs({8, 9, 10});
-  ASSERT_NE(conv->getInputs().at(Index{0}).value(), params[0]);
+  ASSERT_NE(conv->getInputs().at(Index{0}).value(), inputs.at(0).value());
   ASSERT_EQ(conv->getInputs().at(Index{0}).value(), 8);
 }
 
@@ -62,26 +72,30 @@ TEST(graph_operation_setIO, operation_setIO_concat)
   neurun::model::operand::TypeInfo type{neurun::model::operand::DataType::TENSOR_INT32, 0, 0};
   shape.dim(0) = 3;
 
+  using GraphNode = neurun::model::operation::ConcatNode;
+
   // Add Concat
-  std::vector<uint32_t> params;
-  for (int i = 0; i < 7; ++i)
+  IndexSet inputs;
+  for (int i = 0; i < 6; ++i)
   {
-    params.emplace_back(graph.addOperand(shape, type).value());
+    inputs.append(graph.addOperand(shape, type));
   }
-  uint32_t outoperand = graph.addOperand(shape, type).value();
 
-  using GraphNode = neurun::model::operation::Node;
+  GraphNode::Param concat_params;
+  concat_params.axis_index = graph.addOperand(shape, type);
+
+  auto output_operand = graph.addOperand(shape, type).value();
+  IndexSet outputs{output_operand};
 
-  auto concat = std::unique_ptr<GraphNode>{OperationFactory::instance().create(
-      ANEURALNETWORKS_CONCATENATION, {7, params.data(), 1, &outoperand})};
+  auto concat = nnfw::cpp14::make_unique<GraphNode>(inputs, outputs, concat_params);
 
   ASSERT_NE(concat, nullptr);
   ASSERT_EQ(concat->getInputs().size(), 6);
-  ASSERT_EQ(concat->getInputs().at(Index{0}).value(), params[0]);
+  ASSERT_EQ(concat->getInputs().at(Index{0}).value(), inputs.at(0).value());
 
   concat->setInputs({80, 6, 9, 11});
   ASSERT_EQ(concat->getInputs().size(), 4);
-  ASSERT_NE(concat->getInputs().at(Index{0}).value(), params[0]);
+  ASSERT_NE(concat->getInputs().at(Index{0}).value(), inputs.at(0).value());
   ASSERT_EQ(concat->getInputs().at(Index{0}).value(), 80);
   ASSERT_EQ(concat->getInputs().at(Index{2}).value(), 9);
   ASSERT_THROW(concat->getInputs().at(Index{5}), std::out_of_range);