Added semantics test for layers on GPU.
Removed redundant run_context compute engine setter call.
Signed-off-by: Debadri Samaddar <s.debadri@samsung.com>
*/
std::unique_ptr<LayerNode>
createLayerNode(const std::string &type,
- const std::vector<std::string> &properties) {
+ const std::vector<std::string> &properties,
+ const ml::train::LayerComputeEngine &compute_engine) {
+#ifdef ENABLE_OPENCL
+ if (compute_engine == ml::train::LayerComputeEngine::GPU) {
+ auto &cc = nntrainer::ClContext::Global();
+ return createLayerNode(cc.createObject<nntrainer::Layer>(type), properties,
+ compute_engine);
+ }
+#endif
auto &ac = nntrainer::AppContext::Global();
return createLayerNode(ac.createObject<nntrainer::Layer>(type), properties);
}
void LayerNode::setComputeEngine(
const ml::train::LayerComputeEngine &compute_engine) {
- run_context->setComputeEngine(compute_engine);
+ // setting compute_engine of LayerNode
+ // can be reused later to propagate this info
+ this->compute_engine = compute_engine;
}
const std::string LayerNode::getName() const noexcept {
std::vector<std::unique_ptr<Connection>>
output_connections; /**< output layer names */
+ /**
+ * @brief compute_engine Information about the compute backend being used
+ *
+ */
+ ml::train::LayerComputeEngine compute_engine =
+ ml::train::LayerComputeEngine::CPU;
+
#ifdef ENABLE_TEST
/**
* @brief Init context which is stored for debugging issue
*/
std::unique_ptr<LayerNode>
createLayerNode(const std::string &type,
- const std::vector<std::string> &properties = {});
+ const std::vector<std::string> &properties = {},
+ const ml::train::LayerComputeEngine &compute_engine =
+ ml::train::LayerComputeEngine::CPU);
/**
* @brief LayerNode creator with constructor
* @brief Common test for nntrainer layers (Param Tests)
* @see https://github.com/nnstreamer/nntrainer
* @author Jihoon Lee <jhoon.it.lee@samsung.com>
+ * @author Debadri Samaddar <s.debadri@samsung.com>
* @bug No known bugs except for NYI items
*/
#ifndef __LAYERS_COMMON_TESTS_H__
using LayerFactoryType = std::function<std::unique_ptr<nntrainer::Layer>(
const std::vector<std::string> &)>;
+using ComputeEngine = ml::train::LayerComputeEngine;
+
using LayerSemanticsParamType =
std::tuple<LayerFactoryType /** layer factory */,
std::string /** Type of Layer */,
unsigned int num_inputs;
};
+/**
+ * @brief LayerSemanticsGpu
+ * @details Inherit LayerSemantics to test layers on GPU
+ */
+class LayerSemanticsGpu : public LayerSemantics {};
+
/**
* @brief LayerPropertySemantics
* @details Inherit LayerSemantics to solely test negative property cases
* @see https://github.com/nnstreamer/nntrainer
* @author Parichay Kapoor <pk.kapoor@samsung.com>
* @author Jihoon Lee <jhoon.it.lee@samsung.com>
+ * @author Debadri Samaddar <s.debadri@samsung.com>
* @bug No known bugs except for NYI items
*/
expected_type);
}
-#ifdef ENABLE_OPENCL
-TEST_P(LayerSemantics, createFromClContext_pn) {
- auto &ac = nntrainer::ClContext::Global();
- if (!(options & LayerCreateSetPropertyOptions::AVAILABLE_FROM_APP_CONTEXT)) {
- ac.registerFactory<nntrainer::Layer>(std::get<0>(GetParam()));
- }
-
- EXPECT_EQ(ac.createObject<nntrainer::Layer>(expected_type)->getType(),
- expected_type);
-}
-#endif
-
TEST_P(LayerPropertySemantics, setPropertiesInvalid_n) {
auto lnode = nntrainer::createLayerNode(expected_type);
EXPECT_THROW(layer->setProperty({valid_properties}), std::invalid_argument);
EXPECT_THROW(lnode->finalize(), nntrainer::exception::not_supported);
}
}
+
+#ifdef ENABLE_OPENCL
+TEST_P(LayerSemanticsGpu, createFromClContext_pn) {
+ auto &ac = nntrainer::ClContext::Global();
+ if (!(options & LayerCreateSetPropertyOptions::AVAILABLE_FROM_APP_CONTEXT)) {
+ ac.registerFactory<nntrainer::Layer>(std::get<0>(GetParam()));
+ }
+
+ EXPECT_EQ(ac.createObject<nntrainer::Layer>(expected_type)->getType(),
+ expected_type);
+}
+
+TEST_P(LayerPropertySemantics, setPropertiesInvalid_n_gpu) {
+ auto lnode =
+ nntrainer::createLayerNode(expected_type, {}, ComputeEngine::GPU);
+ EXPECT_THROW(layer->setProperty({valid_properties}), std::invalid_argument);
+}
+
+TEST_P(LayerSemanticsGpu, setPropertiesInvalid_n) {
+ auto lnode =
+ nntrainer::createLayerNode(expected_type, {}, ComputeEngine::GPU);
+ /** must not crash */
+ EXPECT_THROW(layer->setProperty({"unknown_props=2"}), std::invalid_argument);
+}
+
+TEST_P(LayerSemanticsGpu, finalizeValidateLayerNode_p) {
+ auto lnode =
+ nntrainer::createLayerNode(expected_type, {}, ComputeEngine::GPU);
+ std::vector<std::string> props = {"name=test"};
+ std::string input_shape = "input_shape=1:1:1";
+ std::string input_layers = "input_layers=a";
+ for (auto idx = 1u; idx < num_inputs; idx++) {
+ input_shape += ",1:1:1";
+ input_layers += ",a";
+ }
+ props.push_back(input_shape);
+ props.push_back(input_layers);
+ lnode->setProperty(props);
+ lnode->setOutputLayers({"dummy"});
+
+ EXPECT_NO_THROW(lnode->setProperty(valid_properties));
+
+ if (!must_fail) {
+ nntrainer::InitLayerContext init_context = lnode->finalize();
+
+ for (auto const &spec : init_context.getOutSpecs())
+ EXPECT_GT(spec.variable_spec.dim.getDataLen(), size_t(0));
+ for (auto const &ws : init_context.getWeightsSpec())
+ EXPECT_GT(std::get<0>(ws).getDataLen(), size_t(0));
+ for (auto const &ts : init_context.getTensorsSpec())
+ EXPECT_GT(std::get<0>(ts).getDataLen(), size_t(0));
+ } else {
+ EXPECT_THROW(lnode->finalize(), nntrainer::exception::not_supported);
+ }
+}
+
+TEST_P(LayerSemanticsGpu, getTypeValidateLayerNode_p) {
+ auto lnode =
+ nntrainer::createLayerNode(expected_type, {}, ComputeEngine::GPU);
+ std::string type;
+
+ EXPECT_NO_THROW(type = lnode->getType());
+ EXPECT_GT(type.size(), size_t(0));
+}
+
+TEST_P(LayerSemanticsGpu, gettersValidateLayerNode_p) {
+ auto lnode =
+ nntrainer::createLayerNode(expected_type, {}, ComputeEngine::GPU);
+
+ EXPECT_NO_THROW(lnode->supportInPlace());
+ EXPECT_NO_THROW(lnode->requireLabel());
+ EXPECT_NO_THROW(lnode->supportBackwarding());
+}
+
+TEST_P(LayerSemanticsGpu, setBatchValidateLayerNode_p) {
+ auto lnode =
+ nntrainer::createLayerNode(expected_type, {}, ComputeEngine::GPU);
+ std::vector<std::string> props = {"name=test"};
+ std::string input_shape = "input_shape=1:1:1";
+ std::string input_layers = "input_layers=a";
+ for (auto idx = 1u; idx < num_inputs; idx++) {
+ input_shape += ",1:1:1";
+ input_layers += ",a";
+ }
+ props.push_back(input_shape);
+ props.push_back(input_layers);
+ lnode->setProperty(props);
+ lnode->setOutputLayers({"dummy"});
+
+ EXPECT_NO_THROW(lnode->setProperty(valid_properties));
+
+ if (!must_fail) {
+ EXPECT_NO_THROW(lnode->finalize());
+ } else {
+ EXPECT_THROW(lnode->finalize(), nntrainer::exception::not_supported);
+ }
+}
+#endif
* @brief Common test for nntrainer layers (Param Tests)
* @see https://github.com/nnstreamer/nntrainer
* @author Jihoon Lee <jhoon.it.lee@samsung.com>
+ * @author Debadri Samaddar <s.debadri@samsung.com>
* @bug No known bugs except for NYI items
*/
nntrainer::exception::not_supported);
}
}
+
+#ifdef ENABLE_OPENCL
+TEST_P(LayerSemanticsGpu, setProperties_n) {
+ /** must not crash */
+ EXPECT_THROW(layer->setProperty({"unknown_props=2"}), std::invalid_argument);
+}
+
+TEST_P(LayerSemanticsGpu, gettersValidate_p) {
+ std::string type;
+
+ EXPECT_NO_THROW(type = layer->getType());
+ EXPECT_GT(type.size(), size_t(0));
+ EXPECT_NO_THROW(layer->supportInPlace());
+ EXPECT_NO_THROW(layer->requireLabel());
+ EXPECT_NO_THROW(layer->supportBackwarding());
+}
+
+TEST_P(LayerSemanticsGpu, finalizeValidate_p) {
+ ml::train::TensorDim in_dim({1, 1, 1, 1});
+ std::vector<ml::train::TensorDim> input_dims(num_inputs, in_dim);
+ nntrainer::InitLayerContext init_context =
+ nntrainer::InitLayerContext(input_dims, {true}, false, "layer");
+ EXPECT_EQ(init_context.validate(), true);
+
+ // set necessary properties only
+ EXPECT_NO_THROW(layer->setProperty(valid_properties));
+
+ if (!must_fail) {
+ EXPECT_NO_THROW(layer->finalize(init_context));
+
+ for (auto const &spec : init_context.getOutSpecs())
+ EXPECT_GT(spec.variable_spec.dim.getDataLen(), size_t(0));
+ for (auto const &ws : init_context.getWeightsSpec())
+ EXPECT_GT(std::get<0>(ws).getDataLen(), size_t(0));
+ for (auto const &ts : init_context.getTensorsSpec())
+ EXPECT_GT(std::get<0>(ts).getDataLen(), size_t(0));
+ } else {
+ EXPECT_THROW(layer->finalize(init_context),
+ nntrainer::exception::not_supported);
+ }
+}
+
+TEST_P(LayerSemanticsGpu, setBatchValidate_p) {
+ ml::train::TensorDim in_dim({1, 1, 1, 1});
+ std::vector<ml::train::TensorDim> input_dims(num_inputs, in_dim);
+ nntrainer::InitLayerContext init_context =
+ nntrainer::InitLayerContext(input_dims, {true}, false, "layer");
+ EXPECT_EQ(init_context.validate(), true);
+
+ // set necessary properties only
+ EXPECT_NO_THROW(layer->setProperty(valid_properties));
+
+ if (!must_fail) {
+ EXPECT_NO_THROW(layer->finalize(init_context));
+ } else {
+ EXPECT_THROW(layer->finalize(init_context),
+ nntrainer::exception::not_supported);
+ }
+}
+#endif