[layer_v2] Merge commit for branch layer_v2
authorParichay Kapoor <pk.kapoor@samsung.com>
Tue, 20 Jul 2021 09:51:19 +0000 (18:51 +0900)
committerJijoong Moon <jijoong.moon@samsung.com>
Thu, 22 Jul 2021 11:47:24 +0000 (20:47 +0900)
This commit forms the merge commit including minor updates
while rebase layer_v2 with main branch so as to apply it.

Signed-off-by: Parichay Kapoor <pk.kapoor@samsung.com>
Applications/Custom/mae_loss.cpp
Applications/Custom/mae_loss.h
nntrainer/layers/conv2d_layer.cpp
nntrainer/layers/conv2d_layer.h
nntrainer/layers/layer_devel.h
nntrainer/layers/pooling2d_layer.h
nntrainer/layers/rnn.h
test/unittest/unittest_nntrainer_modelfile.cpp

index 4460633..2293f6a 100644 (file)
@@ -21,12 +21,8 @@ namespace custom {
 
 static constexpr size_t SINGLE_INOUT_IDX = 0;
 
-int MaeLossLayer::setProperty(std::vector<std::string> values) {
-  /// this implementation makes to pass the test, this will change soon.
-  return values.size();
-}
-
-void MaeLossLayer::forwarding(nntrainer::RunLayerContext &context, bool training) {
+void MaeLossLayer::forwarding(nntrainer::RunLayerContext &context,
+                              bool training) {
   nntrainer::Tensor &predicted = context.getInput(SINGLE_INOUT_IDX);
   nntrainer::Tensor &output = context.getOutput(SINGLE_INOUT_IDX);
 
@@ -40,7 +36,7 @@ void MaeLossLayer::forwarding(nntrainer::RunLayerContext &context, bool training
   }
 }
 
-void MaeLossLayer::calcDerivative(RunLayerContext &context) {
+void MaeLossLayer::calcDerivative(nntrainer::RunLayerContext &context) {
   nntrainer::Tensor &predicted = context.getInput(SINGLE_INOUT_IDX);
   nntrainer::Tensor &label = context.getLabel(SINGLE_INOUT_IDX);
 
@@ -58,7 +54,6 @@ void MaeLossLayer::calcDerivative(RunLayerContext &context) {
   });
 }
 
-
 #ifdef PLUGGABLE
 
 nntrainer::Layer *create_mae_loss_layer() {
index b84f2f2..9f68375 100644 (file)
@@ -49,12 +49,12 @@ public:
   /**
    * @copydoc Layer::forwarding(RunLayerContext &context, bool training)
    */
-  void forwarding(nntrainer::RunLayerContext &context, bool training) override
+  void forwarding(nntrainer::RunLayerContext &context, bool training) override;
 
   /**
    * @copydoc Layer::calcDerivative(RunLayerContext &context)
    */
-  void calcDerivative(nntrainer::RunLayerContext &context) override
+  void calcDerivative(nntrainer::RunLayerContext &context) override;
 
   /**
    * @copydoc bool supportBackwarding() const
@@ -86,7 +86,7 @@ public:
   /**
    * @copydoc Layer::requireLabel()
    */
-  bool MaeLossLayer::requireLabel() const { return true; }
+  bool requireLabel() const { return true; }
 
   inline static const std::string type = "mae_loss";
 };
index d7ec447..953702e 100644 (file)
@@ -114,17 +114,19 @@ static void col2im(const Tensor &col_matrix, const TensorDim &kdim,
 
 static TensorDim
 calcIm2ColOutputDim(const TensorDim &in, const TensorDim &kdim,
-                    const std::array<unsigned int, CONV2D_DIM> &padding,
+                    const std::array<unsigned int, CONV2D_DIM * 2> &padding,
                     const std::array<unsigned int, CONV2D_DIM> &mstride,
                     const std::array<unsigned int, CONV2D_DIM> &dilation) {
 
-  unsigned int ph = padding[0];
-  unsigned int pw = padding[1];
+  unsigned pt = padding[0];
+  unsigned pb = padding[1];
+  unsigned pl = padding[2];
+  unsigned pr = padding[3];
 
   int in_height = in.height();
   int in_width = in.width();
-  unsigned int height = in_height + ph * 2;
-  unsigned int width = in_width + pw * 2;
+  unsigned int height = in_height + pt + pb;
+  unsigned int width = in_width + pl + pr;
   unsigned int k_height = kdim.height();
   unsigned int k_width = kdim.width();
 
@@ -205,7 +207,6 @@ static void im2col(const Tensor &in, const TensorDim &kdim,
   //   }
   */
 
-  const int pad_value = 0;
   unsigned pt = padding[0];
   unsigned pb = padding[1];
   unsigned pl = padding[2];
index a0fecec..510b6c6 100644 (file)
@@ -35,7 +35,9 @@ public:
   Conv2DLayer(unsigned int filter_size_ = 0,
               const std::array<unsigned int, CONV2D_DIM> &kernel_size_ = {0, 0},
               const std::array<unsigned int, CONV2D_DIM> &stride_ = {1, 1},
-              const std::array<unsigned int, CONV2D_DIM * 2> &padding_ = {0, 0, 0, 0}) :
+              const std::array<unsigned int, CONV2D_DIM * 2> &padding_ = {0, 0,
+                                                                          0,
+                                                                          0}) :
     LayerImpl(),
     filter_size(filter_size_),
     kernel_size(kernel_size_),
index 28a21c4..b3fdd68 100644 (file)
@@ -88,6 +88,7 @@ public:
    *            33. split_dimension : string (type)
    *            34. return_sequences :  bool (type) - lstm
    *            35. hidden_state_activation :  string (type) - lstm
+   *            36. dropout : bool
    */
   enum class PropertyType {
     input_shape = 0,
@@ -126,6 +127,7 @@ public:
     split_dimension = 33,
     return_sequences = 34,
     hidden_state_activation = 35,
+    dropout = 36,
     unknown
   };
 
index 3954320..81cfced 100644 (file)
@@ -60,7 +60,8 @@ public:
     PoolingType pooling_type_ = PoolingType::average,
     const std::array<unsigned int, POOLING2D_DIM> &pool_size_ = {0, 0},
     const std::array<unsigned int, POOLING2D_DIM> &stride_ = {1, 1},
-    const std::array<unsigned int, POOLING2D_DIM * 2> &padding_ = {0, 0, 0, 0}) :
+    const std::array<unsigned int, POOLING2D_DIM * 2> &padding_ = {0, 0, 0,
+                                                                   0}) :
     Layer(),
     pool_size(pool_size_),
     stride(stride_),
index 0bd82c6..84d3ed8 100644 (file)
@@ -125,7 +125,6 @@ private:
    */
   float dropout_rate;
 
-
   /**
    * @brief setProperty by type and value separated
    * @param[in] type property type to be passed
index c40636f..d09c4ef 100644 (file)
@@ -378,13 +378,13 @@ INSTANTIATE_TEST_CASE_P(
     mkIniTc("unknown_layer_type2_n", {nw_base_cross, adam, input, out + "Type = asdf"+"input_layers=inputlayer", I(out, "outlayer", "")}, ALLFAIL),
 
   /**< negative: little bit of tweeks to check determinancy (5 negative cases) */
-    mkIniTc("wrong_nw_dataset_n", {nw_base_cross, adam, input, out+"input_layers=inputlayer", dataset + "-LabelData"}, ALLFAIL),
-    mkIniTc("wrong_nw_dataset2_n", {nw_base_cross, adam, dataset + "-LabelData", input, out+"input_layers=inputlayer"}, ALLFAIL),
+    mkIniTc("wrong_nw_dataset_n", {nw_base_cross, adam, input, out+"input_layers=inputlayer", dataset + "-TrainData"}, ALLFAIL),
+    mkIniTc("wrong_nw_dataset2_n", {nw_base_cross, adam, dataset + "-TrainData", input, out+"input_layers=inputlayer"}, ALLFAIL),
 
   /**< negative: dataset is not complete (5 negative cases) */
     mkIniTc("no_trainingSet_n", {nw_base_cross, adam, dataset + "-TrainData", input, out+"input_layers=inputlayer"}, ALLFAIL),
 
-    mkIniTc("backbone_filemissing_n", {nw_base_cross, adam, dataset + "-LabelData", input, out+"input_layers=inputlayer"}, ALLFAIL)
+    mkIniTc("backbone_filemissing_n", {nw_base_cross, adam, backbone_random, out+"input_layers=inputlayer"}, ALLFAIL)
 ), [](const testing::TestParamInfo<nntrainerIniTest::ParamType>& info){
  return std::get<0>(info.param);
 });