From: Jihoon Lee Date: Tue, 28 Dec 2021 08:01:07 +0000 (+0900) Subject: [Clean] Remove getOutputDimensions() X-Git-Tag: accepted/tizen/unified/20220323.062643~54 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=e6ecd5aca940a3fe4521dde37336c354d9dd29e6;p=platform%2Fcore%2Fml%2Fnntrainer.git [Clean] Remove getOutputDimensions() This patch removes initContext::getOutputDimensions(). This function is only used in the test so removed (It is used in network_graph but soon will be substitued) **Self evaluation:** 1. Build test: [X]Passed [ ]Failed [ ]Skipped 2. Run test: [X]Passed [ ]Failed [ ]Skipped Signed-off-by: Jihoon Lee --- diff --git a/nntrainer/graph/network_graph.cpp b/nntrainer/graph/network_graph.cpp index 36cc112..e682d79 100644 --- a/nntrainer/graph/network_graph.cpp +++ b/nntrainer/graph/network_graph.cpp @@ -12,6 +12,7 @@ * @todo Support multi-input graph. */ +#include "tensor.h" #include #include #include @@ -696,10 +697,13 @@ NetworkGraph::finalizeContext(const std::shared_ptr &lnode, * allocated input. This is neccesary for manager to know when this output * node is going to be used with in-place optimizations. */ + std::vector out_dims; + for (auto &spec : init_context.getOutSpecs()) { + out_dims.push_back(spec.variable_spec.dim); + } unsigned int max_fwd_exec_order = graph.size(); const std::vector &outputs = tensor_manager->requestOutputs( - gnode, init_context.getOutputDimensions(), inputs_name, max_fwd_exec_order, - shared_var, shared_grad); + gnode, out_dims, inputs_name, max_fwd_exec_order, shared_var, shared_grad); /** create shared weight names if requested */ std::vector shared_weight_names; diff --git a/nntrainer/layers/layer_context.cpp b/nntrainer/layers/layer_context.cpp index d75a743..66e8b64 100644 --- a/nntrainer/layers/layer_context.cpp +++ b/nntrainer/layers/layer_context.cpp @@ -116,7 +116,7 @@ void InitLayerContext::requestOutputs(std::vector &&out_specs) { } } -const std::vector &InitLayerContext::getOutSpecs() { +const std::vector &InitLayerContext::getOutSpecs() const { return output_specs; } diff --git a/nntrainer/layers/layer_context.h b/nntrainer/layers/layer_context.h index 0db6447..3f0a0c7 100644 --- a/nntrainer/layers/layer_context.h +++ b/nntrainer/layers/layer_context.h @@ -105,19 +105,6 @@ public: } /** - * @brief Get the Output Dimensions object - * - * @return std::vector& Output dimensions - */ - const std::vector getOutputDimensions() const { - std::vector output_dim; - for (auto &spec : output_specs) { - output_dim.push_back(spec.variable_spec.dim); - } - return output_dim; - } - - /** * @brief Set the Output Dimensions object * * @param out_dim the output dimension to set to @@ -261,7 +248,7 @@ public: * * @return std::vector out specification */ - const std::vector &getOutSpecs(); + const std::vector &getOutSpecs() const; /** * @brief Validate the context diff --git a/test/unittest/layers/layers_dependent_common_tests.cpp b/test/unittest/layers/layers_dependent_common_tests.cpp index 6d37c87..e2ee521 100644 --- a/test/unittest/layers/layers_dependent_common_tests.cpp +++ b/test/unittest/layers/layers_dependent_common_tests.cpp @@ -58,8 +58,8 @@ TEST_P(LayerSemantics, finalizeValidateLayerNode_p) { if (!must_fail) { nntrainer::InitLayerContext init_context = lnode->finalize(); - for (auto const &dim : init_context.getOutputDimensions()) - EXPECT_GT(dim.getDataLen(), size_t(0)); + for (auto const &spec : init_context.getOutSpecs()) + EXPECT_GT(spec.variable_spec.dim.getDataLen(), size_t(0)); for (auto const &ws : init_context.getWeightsSpec()) EXPECT_GT(std::get<0>(ws).getDataLen(), size_t(0)); for (auto const &ts : init_context.getTensorsSpec()) diff --git a/test/unittest/layers/layers_golden_tests.cpp b/test/unittest/layers/layers_golden_tests.cpp index d53985f..4842f1f 100644 --- a/test/unittest/layers/layers_golden_tests.cpp +++ b/test/unittest/layers/layers_golden_tests.cpp @@ -10,6 +10,7 @@ * @bug No known bugs except for NYI items */ #include +#include #include #include @@ -80,6 +81,18 @@ static TensorPacks prepareTensors(const InitLayerContext &context, return vg; }; + auto allocate_tensors_v2 = [](const std::vector &specs) { + std::vector vg; + vg.reserve(specs.size()); + + for (auto &spec : specs) { + /// todo initializer should be depending is as well + vg.emplace_back(spec.variable_spec.dim, Tensor::Initializer::NONE, true, + true, "golden"); + } + return vg; + }; + auto allocate_weights = [&file](const auto &specs) { std::vector weights; weights.reserve(specs.size()); @@ -96,7 +109,7 @@ static TensorPacks prepareTensors(const InitLayerContext &context, return { allocate_weights(context.getWeightsSpec()), allocate_inouts(context.getInputDimensions()), - allocate_inouts(context.getOutputDimensions()), + allocate_tensors_v2(context.getOutSpecs()), allocate_tensors(context.getTensorsSpec()), }; } diff --git a/test/unittest/layers/layers_standalone_common_tests.cpp b/test/unittest/layers/layers_standalone_common_tests.cpp index 223ae47..a828c36 100644 --- a/test/unittest/layers/layers_standalone_common_tests.cpp +++ b/test/unittest/layers/layers_standalone_common_tests.cpp @@ -51,8 +51,8 @@ TEST_P(LayerSemantics, finalizeValidate_p) { if (!must_fail) { EXPECT_NO_THROW(layer->finalize(init_context)); - for (auto const &dim : init_context.getOutputDimensions()) - EXPECT_GT(dim.getDataLen(), size_t(0)); + for (auto const &spec : init_context.getOutSpecs()) + EXPECT_GT(spec.variable_spec.dim.getDataLen(), size_t(0)); for (auto const &ws : init_context.getWeightsSpec()) EXPECT_GT(std::get<0>(ws).getDataLen(), size_t(0)); for (auto const &ts : init_context.getTensorsSpec())