From: Parichay Kapoor Date: Tue, 20 Jul 2021 09:51:19 +0000 (+0900) Subject: [layer_v2] Merge commit for branch layer_v2 X-Git-Tag: accepted/tizen/unified/20210829.234903~118 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=70855c456a0d42b0b1b372dc349e2cdc11b968bd;p=platform%2Fcore%2Fml%2Fnntrainer.git [layer_v2] Merge commit for branch layer_v2 This commit forms the merge commit including minor updates while rebase layer_v2 with main branch so as to apply it. Signed-off-by: Parichay Kapoor --- diff --git a/Applications/Custom/mae_loss.cpp b/Applications/Custom/mae_loss.cpp index 4460633..2293f6a 100644 --- a/Applications/Custom/mae_loss.cpp +++ b/Applications/Custom/mae_loss.cpp @@ -21,12 +21,8 @@ namespace custom { static constexpr size_t SINGLE_INOUT_IDX = 0; -int MaeLossLayer::setProperty(std::vector values) { - /// this implementation makes to pass the test, this will change soon. - return values.size(); -} - -void MaeLossLayer::forwarding(nntrainer::RunLayerContext &context, bool training) { +void MaeLossLayer::forwarding(nntrainer::RunLayerContext &context, + bool training) { nntrainer::Tensor &predicted = context.getInput(SINGLE_INOUT_IDX); nntrainer::Tensor &output = context.getOutput(SINGLE_INOUT_IDX); @@ -40,7 +36,7 @@ void MaeLossLayer::forwarding(nntrainer::RunLayerContext &context, bool training } } -void MaeLossLayer::calcDerivative(RunLayerContext &context) { +void MaeLossLayer::calcDerivative(nntrainer::RunLayerContext &context) { nntrainer::Tensor &predicted = context.getInput(SINGLE_INOUT_IDX); nntrainer::Tensor &label = context.getLabel(SINGLE_INOUT_IDX); @@ -58,7 +54,6 @@ void MaeLossLayer::calcDerivative(RunLayerContext &context) { }); } - #ifdef PLUGGABLE nntrainer::Layer *create_mae_loss_layer() { diff --git a/Applications/Custom/mae_loss.h b/Applications/Custom/mae_loss.h index b84f2f2..9f68375 100644 --- a/Applications/Custom/mae_loss.h +++ b/Applications/Custom/mae_loss.h @@ -49,12 +49,12 @@ public: /** * @copydoc Layer::forwarding(RunLayerContext &context, bool training) */ - void forwarding(nntrainer::RunLayerContext &context, bool training) override + void forwarding(nntrainer::RunLayerContext &context, bool training) override; /** * @copydoc Layer::calcDerivative(RunLayerContext &context) */ - void calcDerivative(nntrainer::RunLayerContext &context) override + void calcDerivative(nntrainer::RunLayerContext &context) override; /** * @copydoc bool supportBackwarding() const @@ -86,7 +86,7 @@ public: /** * @copydoc Layer::requireLabel() */ - bool MaeLossLayer::requireLabel() const { return true; } + bool requireLabel() const { return true; } inline static const std::string type = "mae_loss"; }; diff --git a/nntrainer/layers/conv2d_layer.cpp b/nntrainer/layers/conv2d_layer.cpp index d7ec447..953702e 100644 --- a/nntrainer/layers/conv2d_layer.cpp +++ b/nntrainer/layers/conv2d_layer.cpp @@ -114,17 +114,19 @@ static void col2im(const Tensor &col_matrix, const TensorDim &kdim, static TensorDim calcIm2ColOutputDim(const TensorDim &in, const TensorDim &kdim, - const std::array &padding, + const std::array &padding, const std::array &mstride, const std::array &dilation) { - unsigned int ph = padding[0]; - unsigned int pw = padding[1]; + unsigned pt = padding[0]; + unsigned pb = padding[1]; + unsigned pl = padding[2]; + unsigned pr = padding[3]; int in_height = in.height(); int in_width = in.width(); - unsigned int height = in_height + ph * 2; - unsigned int width = in_width + pw * 2; + unsigned int height = in_height + pt + pb; + unsigned int width = in_width + pl + pr; unsigned int k_height = kdim.height(); unsigned int k_width = kdim.width(); @@ -205,7 +207,6 @@ static void im2col(const Tensor &in, const TensorDim &kdim, // } */ - const int pad_value = 0; unsigned pt = padding[0]; unsigned pb = padding[1]; unsigned pl = padding[2]; diff --git a/nntrainer/layers/conv2d_layer.h b/nntrainer/layers/conv2d_layer.h index a0fecec..510b6c6 100644 --- a/nntrainer/layers/conv2d_layer.h +++ b/nntrainer/layers/conv2d_layer.h @@ -35,7 +35,9 @@ public: Conv2DLayer(unsigned int filter_size_ = 0, const std::array &kernel_size_ = {0, 0}, const std::array &stride_ = {1, 1}, - const std::array &padding_ = {0, 0, 0, 0}) : + const std::array &padding_ = {0, 0, + 0, + 0}) : LayerImpl(), filter_size(filter_size_), kernel_size(kernel_size_), diff --git a/nntrainer/layers/layer_devel.h b/nntrainer/layers/layer_devel.h index 28a21c4..b3fdd68 100644 --- a/nntrainer/layers/layer_devel.h +++ b/nntrainer/layers/layer_devel.h @@ -88,6 +88,7 @@ public: * 33. split_dimension : string (type) * 34. return_sequences : bool (type) - lstm * 35. hidden_state_activation : string (type) - lstm + * 36. dropout : bool */ enum class PropertyType { input_shape = 0, @@ -126,6 +127,7 @@ public: split_dimension = 33, return_sequences = 34, hidden_state_activation = 35, + dropout = 36, unknown }; diff --git a/nntrainer/layers/pooling2d_layer.h b/nntrainer/layers/pooling2d_layer.h index 3954320..81cfced 100644 --- a/nntrainer/layers/pooling2d_layer.h +++ b/nntrainer/layers/pooling2d_layer.h @@ -60,7 +60,8 @@ public: PoolingType pooling_type_ = PoolingType::average, const std::array &pool_size_ = {0, 0}, const std::array &stride_ = {1, 1}, - const std::array &padding_ = {0, 0, 0, 0}) : + const std::array &padding_ = {0, 0, 0, + 0}) : Layer(), pool_size(pool_size_), stride(stride_), diff --git a/nntrainer/layers/rnn.h b/nntrainer/layers/rnn.h index 0bd82c6..84d3ed8 100644 --- a/nntrainer/layers/rnn.h +++ b/nntrainer/layers/rnn.h @@ -125,7 +125,6 @@ private: */ float dropout_rate; - /** * @brief setProperty by type and value separated * @param[in] type property type to be passed diff --git a/test/unittest/unittest_nntrainer_modelfile.cpp b/test/unittest/unittest_nntrainer_modelfile.cpp index c40636f..d09c4ef 100644 --- a/test/unittest/unittest_nntrainer_modelfile.cpp +++ b/test/unittest/unittest_nntrainer_modelfile.cpp @@ -378,13 +378,13 @@ INSTANTIATE_TEST_CASE_P( mkIniTc("unknown_layer_type2_n", {nw_base_cross, adam, input, out + "Type = asdf"+"input_layers=inputlayer", I(out, "outlayer", "")}, ALLFAIL), /**< negative: little bit of tweeks to check determinancy (5 negative cases) */ - mkIniTc("wrong_nw_dataset_n", {nw_base_cross, adam, input, out+"input_layers=inputlayer", dataset + "-LabelData"}, ALLFAIL), - mkIniTc("wrong_nw_dataset2_n", {nw_base_cross, adam, dataset + "-LabelData", input, out+"input_layers=inputlayer"}, ALLFAIL), + mkIniTc("wrong_nw_dataset_n", {nw_base_cross, adam, input, out+"input_layers=inputlayer", dataset + "-TrainData"}, ALLFAIL), + mkIniTc("wrong_nw_dataset2_n", {nw_base_cross, adam, dataset + "-TrainData", input, out+"input_layers=inputlayer"}, ALLFAIL), /**< negative: dataset is not complete (5 negative cases) */ mkIniTc("no_trainingSet_n", {nw_base_cross, adam, dataset + "-TrainData", input, out+"input_layers=inputlayer"}, ALLFAIL), - mkIniTc("backbone_filemissing_n", {nw_base_cross, adam, dataset + "-LabelData", input, out+"input_layers=inputlayer"}, ALLFAIL) + mkIniTc("backbone_filemissing_n", {nw_base_cross, adam, backbone_random, out+"input_layers=inputlayer"}, ALLFAIL) ), [](const testing::TestParamInfo& info){ return std::get<0>(info.param); });