[unittest/gpu] Added LayerSemanticsGpu test suite
authorDebadri Samaddar <s.debadri@samsung.com>
Tue, 21 May 2024 11:29:36 +0000 (16:59 +0530)
committerJijoong Moon <jijoong.moon@samsung.com>
Tue, 4 Jun 2024 09:47:12 +0000 (18:47 +0900)
Added semantics test for layers on GPU.
Removed redundant run_context compute engine setter call.

Signed-off-by: Debadri Samaddar <s.debadri@samsung.com>
nntrainer/layers/layer_node.cpp
nntrainer/layers/layer_node.h
test/unittest/layers/layers_common_tests.h
test/unittest/layers/layers_dependent_common_tests.cpp
test/unittest/layers/layers_standalone_common_tests.cpp

index 8b18d80762c311d39bb37f5ef91ca9652cfe9451..36563b6570c716945c406dc42058f58930355c2a 100644 (file)
@@ -152,7 +152,15 @@ createLayerNode(const ml::train::LayerType &type,
  */
 std::unique_ptr<LayerNode>
 createLayerNode(const std::string &type,
-                const std::vector<std::string> &properties) {
+                const std::vector<std::string> &properties,
+                const ml::train::LayerComputeEngine &compute_engine) {
+#ifdef ENABLE_OPENCL
+  if (compute_engine == ml::train::LayerComputeEngine::GPU) {
+    auto &cc = nntrainer::ClContext::Global();
+    return createLayerNode(cc.createObject<nntrainer::Layer>(type), properties,
+                           compute_engine);
+  }
+#endif
   auto &ac = nntrainer::AppContext::Global();
   return createLayerNode(ac.createObject<nntrainer::Layer>(type), properties);
 }
@@ -269,7 +277,9 @@ void LayerNode::setOutputConnection(unsigned nth, const std::string &name,
 
 void LayerNode::setComputeEngine(
   const ml::train::LayerComputeEngine &compute_engine) {
-  run_context->setComputeEngine(compute_engine);
+  // setting compute_engine of LayerNode
+  // can be reused later to propagate this info
+  this->compute_engine = compute_engine;
 }
 
 const std::string LayerNode::getName() const noexcept {
index 93e7ac7069161c77bee88b03fc3baf5fdc44a57d..f37338660565c2a45d6783a501d7905a8b2cd78a 100644 (file)
@@ -925,6 +925,13 @@ private:
   std::vector<std::unique_ptr<Connection>>
     output_connections; /**< output layer names */
 
+  /**
+   * @brief compute_engine Information about the compute backend being used
+   *
+   */
+  ml::train::LayerComputeEngine compute_engine =
+    ml::train::LayerComputeEngine::CPU;
+
 #ifdef ENABLE_TEST
   /**
    * @brief   Init context which is stored for debugging issue
@@ -1023,7 +1030,9 @@ createLayerNode(const ml::train::LayerType &type,
  */
 std::unique_ptr<LayerNode>
 createLayerNode(const std::string &type,
-                const std::vector<std::string> &properties = {});
+                const std::vector<std::string> &properties = {},
+                const ml::train::LayerComputeEngine &compute_engine =
+                  ml::train::LayerComputeEngine::CPU);
 
 /**
  * @brief LayerNode creator with constructor
index d63357c80503da03c82317672dda42a2b53be3c7..48c1a8a242733362b0e3ea0930d4e1d12693e9b8 100644 (file)
@@ -7,6 +7,7 @@
  * @brief Common test for nntrainer layers (Param Tests)
  * @see        https://github.com/nnstreamer/nntrainer
  * @author Jihoon Lee <jhoon.it.lee@samsung.com>
+ * @author Debadri Samaddar <s.debadri@samsung.com>
  * @bug No known bugs except for NYI items
  */
 #ifndef __LAYERS_COMMON_TESTS_H__
@@ -31,6 +32,8 @@ typedef enum {
 using LayerFactoryType = std::function<std::unique_ptr<nntrainer::Layer>(
   const std::vector<std::string> &)>;
 
+using ComputeEngine = ml::train::LayerComputeEngine;
+
 using LayerSemanticsParamType =
   std::tuple<LayerFactoryType /** layer factory */,
              std::string /** Type of Layer */,
@@ -84,6 +87,12 @@ protected:
   unsigned int num_inputs;
 };
 
+/**
+ * @brief LayerSemanticsGpu
+ * @details Inherit LayerSemantics to test layers on GPU
+ */
+class LayerSemanticsGpu : public LayerSemantics {};
+
 /**
  * @brief LayerPropertySemantics
  * @details Inherit LayerSemantics to solely test negative property cases
index 5db3fe40c5f85a1153a1383cc4e2dcfb16a1d69b..f1c87b2426ec5e12dd417ef22b52e9c521d8507b 100644 (file)
@@ -8,6 +8,7 @@
  * @see        https://github.com/nnstreamer/nntrainer
  * @author Parichay Kapoor <pk.kapoor@samsung.com>
  * @author Jihoon Lee <jhoon.it.lee@samsung.com>
+ * @author Debadri Samaddar <s.debadri@samsung.com>
  * @bug No known bugs except for NYI items
  */
 
@@ -33,18 +34,6 @@ TEST_P(LayerSemantics, createFromAppContext_pn) {
             expected_type);
 }
 
-#ifdef ENABLE_OPENCL
-TEST_P(LayerSemantics, createFromClContext_pn) {
-  auto &ac = nntrainer::ClContext::Global();
-  if (!(options & LayerCreateSetPropertyOptions::AVAILABLE_FROM_APP_CONTEXT)) {
-    ac.registerFactory<nntrainer::Layer>(std::get<0>(GetParam()));
-  }
-
-  EXPECT_EQ(ac.createObject<nntrainer::Layer>(expected_type)->getType(),
-            expected_type);
-}
-#endif
-
 TEST_P(LayerPropertySemantics, setPropertiesInvalid_n) {
   auto lnode = nntrainer::createLayerNode(expected_type);
   EXPECT_THROW(layer->setProperty({valid_properties}), std::invalid_argument);
@@ -124,3 +113,101 @@ TEST_P(LayerSemantics, setBatchValidateLayerNode_p) {
     EXPECT_THROW(lnode->finalize(), nntrainer::exception::not_supported);
   }
 }
+
+#ifdef ENABLE_OPENCL
+TEST_P(LayerSemanticsGpu, createFromClContext_pn) {
+  auto &ac = nntrainer::ClContext::Global();
+  if (!(options & LayerCreateSetPropertyOptions::AVAILABLE_FROM_APP_CONTEXT)) {
+    ac.registerFactory<nntrainer::Layer>(std::get<0>(GetParam()));
+  }
+
+  EXPECT_EQ(ac.createObject<nntrainer::Layer>(expected_type)->getType(),
+            expected_type);
+}
+
+TEST_P(LayerPropertySemantics, setPropertiesInvalid_n_gpu) {
+  auto lnode =
+    nntrainer::createLayerNode(expected_type, {}, ComputeEngine::GPU);
+  EXPECT_THROW(layer->setProperty({valid_properties}), std::invalid_argument);
+}
+
+TEST_P(LayerSemanticsGpu, setPropertiesInvalid_n) {
+  auto lnode =
+    nntrainer::createLayerNode(expected_type, {}, ComputeEngine::GPU);
+  /** must not crash */
+  EXPECT_THROW(layer->setProperty({"unknown_props=2"}), std::invalid_argument);
+}
+
+TEST_P(LayerSemanticsGpu, finalizeValidateLayerNode_p) {
+  auto lnode =
+    nntrainer::createLayerNode(expected_type, {}, ComputeEngine::GPU);
+  std::vector<std::string> props = {"name=test"};
+  std::string input_shape = "input_shape=1:1:1";
+  std::string input_layers = "input_layers=a";
+  for (auto idx = 1u; idx < num_inputs; idx++) {
+    input_shape += ",1:1:1";
+    input_layers += ",a";
+  }
+  props.push_back(input_shape);
+  props.push_back(input_layers);
+  lnode->setProperty(props);
+  lnode->setOutputLayers({"dummy"});
+
+  EXPECT_NO_THROW(lnode->setProperty(valid_properties));
+
+  if (!must_fail) {
+    nntrainer::InitLayerContext init_context = lnode->finalize();
+
+    for (auto const &spec : init_context.getOutSpecs())
+      EXPECT_GT(spec.variable_spec.dim.getDataLen(), size_t(0));
+    for (auto const &ws : init_context.getWeightsSpec())
+      EXPECT_GT(std::get<0>(ws).getDataLen(), size_t(0));
+    for (auto const &ts : init_context.getTensorsSpec())
+      EXPECT_GT(std::get<0>(ts).getDataLen(), size_t(0));
+  } else {
+    EXPECT_THROW(lnode->finalize(), nntrainer::exception::not_supported);
+  }
+}
+
+TEST_P(LayerSemanticsGpu, getTypeValidateLayerNode_p) {
+  auto lnode =
+    nntrainer::createLayerNode(expected_type, {}, ComputeEngine::GPU);
+  std::string type;
+
+  EXPECT_NO_THROW(type = lnode->getType());
+  EXPECT_GT(type.size(), size_t(0));
+}
+
+TEST_P(LayerSemanticsGpu, gettersValidateLayerNode_p) {
+  auto lnode =
+    nntrainer::createLayerNode(expected_type, {}, ComputeEngine::GPU);
+
+  EXPECT_NO_THROW(lnode->supportInPlace());
+  EXPECT_NO_THROW(lnode->requireLabel());
+  EXPECT_NO_THROW(lnode->supportBackwarding());
+}
+
+TEST_P(LayerSemanticsGpu, setBatchValidateLayerNode_p) {
+  auto lnode =
+    nntrainer::createLayerNode(expected_type, {}, ComputeEngine::GPU);
+  std::vector<std::string> props = {"name=test"};
+  std::string input_shape = "input_shape=1:1:1";
+  std::string input_layers = "input_layers=a";
+  for (auto idx = 1u; idx < num_inputs; idx++) {
+    input_shape += ",1:1:1";
+    input_layers += ",a";
+  }
+  props.push_back(input_shape);
+  props.push_back(input_layers);
+  lnode->setProperty(props);
+  lnode->setOutputLayers({"dummy"});
+
+  EXPECT_NO_THROW(lnode->setProperty(valid_properties));
+
+  if (!must_fail) {
+    EXPECT_NO_THROW(lnode->finalize());
+  } else {
+    EXPECT_THROW(lnode->finalize(), nntrainer::exception::not_supported);
+  }
+}
+#endif
index 4c602218d239f0e86091cd7d3d0df2278b3658bf..a266ac3a70d0817262a3ff5a287d62c0d1d280c0 100644 (file)
@@ -7,6 +7,7 @@
  * @brief Common test for nntrainer layers (Param Tests)
  * @see        https://github.com/nnstreamer/nntrainer
  * @author Jihoon Lee <jhoon.it.lee@samsung.com>
+ * @author Debadri Samaddar <s.debadri@samsung.com>
  * @bug No known bugs except for NYI items
  */
 
@@ -80,3 +81,63 @@ TEST_P(LayerSemantics, setBatchValidate_p) {
                  nntrainer::exception::not_supported);
   }
 }
+
+#ifdef ENABLE_OPENCL
+TEST_P(LayerSemanticsGpu, setProperties_n) {
+  /** must not crash */
+  EXPECT_THROW(layer->setProperty({"unknown_props=2"}), std::invalid_argument);
+}
+
+TEST_P(LayerSemanticsGpu, gettersValidate_p) {
+  std::string type;
+
+  EXPECT_NO_THROW(type = layer->getType());
+  EXPECT_GT(type.size(), size_t(0));
+  EXPECT_NO_THROW(layer->supportInPlace());
+  EXPECT_NO_THROW(layer->requireLabel());
+  EXPECT_NO_THROW(layer->supportBackwarding());
+}
+
+TEST_P(LayerSemanticsGpu, finalizeValidate_p) {
+  ml::train::TensorDim in_dim({1, 1, 1, 1});
+  std::vector<ml::train::TensorDim> input_dims(num_inputs, in_dim);
+  nntrainer::InitLayerContext init_context =
+    nntrainer::InitLayerContext(input_dims, {true}, false, "layer");
+  EXPECT_EQ(init_context.validate(), true);
+
+  // set necessary properties only
+  EXPECT_NO_THROW(layer->setProperty(valid_properties));
+
+  if (!must_fail) {
+    EXPECT_NO_THROW(layer->finalize(init_context));
+
+    for (auto const &spec : init_context.getOutSpecs())
+      EXPECT_GT(spec.variable_spec.dim.getDataLen(), size_t(0));
+    for (auto const &ws : init_context.getWeightsSpec())
+      EXPECT_GT(std::get<0>(ws).getDataLen(), size_t(0));
+    for (auto const &ts : init_context.getTensorsSpec())
+      EXPECT_GT(std::get<0>(ts).getDataLen(), size_t(0));
+  } else {
+    EXPECT_THROW(layer->finalize(init_context),
+                 nntrainer::exception::not_supported);
+  }
+}
+
+TEST_P(LayerSemanticsGpu, setBatchValidate_p) {
+  ml::train::TensorDim in_dim({1, 1, 1, 1});
+  std::vector<ml::train::TensorDim> input_dims(num_inputs, in_dim);
+  nntrainer::InitLayerContext init_context =
+    nntrainer::InitLayerContext(input_dims, {true}, false, "layer");
+  EXPECT_EQ(init_context.validate(), true);
+
+  // set necessary properties only
+  EXPECT_NO_THROW(layer->setProperty(valid_properties));
+
+  if (!must_fail) {
+    EXPECT_NO_THROW(layer->finalize(init_context));
+  } else {
+    EXPECT_THROW(layer->finalize(init_context),
+                 nntrainer::exception::not_supported);
+  }
+}
+#endif