[layer] Update the layer constructors
authorParichay Kapoor <pk.kapoor@samsung.com>
Thu, 8 Oct 2020 08:47:29 +0000 (17:47 +0900)
committerJijoong Moon <jijoong.moon@samsung.com>
Tue, 13 Oct 2020 05:01:17 +0000 (14:01 +0900)
Update the constructors of layer to take arguments
Added layer_factor creator method
Updated model_loader and capi to use the factory creator

**Self evaluation:**
1. Build test: [x]Passed [ ]Failed [ ]Skipped
2. Run test: [x]Passed [ ]Failed [ ]Skipped

Signed-off-by: Parichay Kapoor <pk.kapoor@samsung.com>
20 files changed:
api/capi/include/nntrainer_internal.h
api/capi/src/nntrainer.cpp
api/capi/src/nntrainer_util.cpp
nntrainer/include/activation_layer.h
nntrainer/include/addition_layer.h
nntrainer/include/bn_layer.h
nntrainer/include/conv2d_layer.h
nntrainer/include/fc_layer.h
nntrainer/include/flatten_layer.h
nntrainer/include/input_layer.h
nntrainer/include/layer.h
nntrainer/include/layer_factory.h [new file with mode: 0644]
nntrainer/include/loss_layer.h
nntrainer/include/pooling2d_layer.h
nntrainer/meson.build
nntrainer/src/activation_layer.cpp
nntrainer/src/conv2d_layer.cpp
nntrainer/src/model_loader.cpp
nntrainer/src/pooling2d_layer.cpp
packaging/nntrainer.spec

index b0e3d1c..ed1a2c4 100644 (file)
@@ -337,4 +337,11 @@ void ml_tizen_set_feature_state(feature_state_t state);
 nntrainer::OptType
 ml_optimizer_to_nntrainer_type(ml_train_optimizer_type_e type);
 
+/**
+ * @brief Convert nntrainer API layer type to neural network layer type
+ * @param[in] type Layer type API enum
+ * @return nntrainer::LayerType layer type
+ */
+nntrainer::LayerType ml_layer_to_nntrainer_type(ml_train_layer_type_e type);
+
 #endif
index 05d0287..8c9ff35 100644 (file)
@@ -24,6 +24,7 @@
 #include <databuffer.h>
 #include <databuffer_file.h>
 #include <databuffer_func.h>
+#include <layer_factory.h>
 #include <neuralnet.h>
 #include <nntrainer_error.h>
 #include <nntrainer_internal.h>
@@ -513,38 +514,27 @@ int ml_train_model_get_layer(ml_train_model_h model, const char *layer_name,
 
 int ml_train_layer_create(ml_train_layer_h *layer, ml_train_layer_type_e type) {
   int status = ML_ERROR_NONE;
-  returnable f;
   ml_train_layer *nnlayer;
 
   check_feature_state();
 
   nnlayer = new ml_train_layer;
   nnlayer->magic = ML_NNTRAINER_MAGIC;
+  nnlayer->in_use = false;
 
-  switch (type) {
-  case ML_TRAIN_LAYER_TYPE_INPUT:
-    status =
-      exception_bounded_make_shared<nntrainer::InputLayer>(nnlayer->layer);
-    break;
-  case ML_TRAIN_LAYER_TYPE_FC:
-    status = exception_bounded_make_shared<nntrainer::FullyConnectedLayer>(
-      nnlayer->layer);
-    break;
-  default:
-    delete nnlayer;
-    ml_loge("Error: Unknown layer type");
-    status = ML_ERROR_INVALID_PARAMETER;
-    return status;
-  }
+  returnable f = [&]() {
+    nnlayer->layer = createLayer(ml_layer_to_nntrainer_type(type));
+    return ML_ERROR_NONE;
+  };
 
+  status = nntrainer_exception_boundary(f);
   if (status != ML_ERROR_NONE) {
     delete nnlayer;
     ml_loge("Error: Create layer failed");
-    return status;
+  } else {
+    *layer = nnlayer;
   }
 
-  nnlayer->in_use = false;
-  *layer = nnlayer;
   return status;
 }
 
index 74bcd29..43716ff 100644 (file)
@@ -26,3 +26,17 @@ ml_optimizer_to_nntrainer_type(ml_train_optimizer_type_e type) {
     return nntrainer::OptType::unknown;
   }
 }
+
+/**
+ * @brief Convert nntrainer API layer type to neural network layer type
+ */
+nntrainer::LayerType ml_layer_to_nntrainer_type(ml_train_layer_type_e type) {
+  switch (type) {
+  case ML_TRAIN_LAYER_TYPE_FC:
+    return nntrainer::LayerType::LAYER_FC;
+  case ML_TRAIN_LAYER_TYPE_INPUT:
+    return nntrainer::LayerType::LAYER_IN;
+  default:
+    return nntrainer::LayerType::LAYER_UNKNOWN;
+  }
+}
index 26118ea..b811efe 100644 (file)
@@ -30,7 +30,11 @@ public:
   /**
    * @brief     Constructor of Activation Layer
    */
-  ActivationLayer(ActivationType at = ActivationType::ACT_NONE);
+  template <typename... Args>
+  ActivationLayer(ActivationType at = ActivationType::ACT_NONE, Args... args) :
+    Layer(LayerType::LAYER_ACTIVATION, args...) {
+    setActivation(at);
+  }
 
   /**
    * @brief     Destructor of Activation Layer
index e5407ce..1027401 100644 (file)
@@ -29,10 +29,11 @@ public:
   /**
    * @brief     Constructor of Addition Layer
    */
-  AdditionLayer() {
-    setType(LayerType::LAYER_ADDITION);
-    num_inputs = 0;
-  };
+  template <typename... Args>
+  AdditionLayer(unsigned int num_inputs_ = 0, Args... args) :
+    Layer(LayerType::LAYER_ADDITION, args...) {
+    num_inputs = num_inputs_;
+  }
 
   /**
    * @brief     Destructor of Addition Layer
index fab3d4b..5198eeb 100644 (file)
@@ -42,21 +42,28 @@ public:
   /**
    * @brief     Constructor of Batch Noramlization Layer
    */
-  BatchNormalizationLayer(float epsilon = 0.001, float momentum = 0.99,
-                          int axis = -1) :
+  template <typename... Args>
+  BatchNormalizationLayer(
+    int axis = -1, float momentum = 0.99,
+
+    float epsilon = 0.001,
+    WeightInitializer moving_mean_initializer = WeightInitializer::WEIGHT_ZEROS,
+    WeightInitializer moving_variance_initializer =
+      WeightInitializer::WEIGHT_ZEROS,
+    WeightInitializer gamma_initializer = WeightInitializer::WEIGHT_ONES,
+    WeightInitializer beta_initializer = WeightInitializer::WEIGHT_ONES,
+    Args... args) :
+    Layer(LayerType::LAYER_BN, args...),
     epsilon(epsilon),
     momentum(momentum),
     axis(axis),
-    initializers{
-      WeightInitializer::WEIGHT_ZEROS, WeightInitializer::WEIGHT_ONES,
-      WeightInitializer::WEIGHT_ZEROS, WeightInitializer::WEIGHT_ONES} {
-    setType(LayerType::LAYER_BN);
-  };
+    initializers{moving_variance_initializer, moving_variance_initializer,
+                 gamma_initializer, beta_initializer} {}
 
   /**
    * @brief     Destructor of BatchNormalizationLayer
    */
-  ~BatchNormalizationLayer(){};
+  ~BatchNormalizationLayer() {}
 
   /**
    *  @brief  Move constructor of Pooling 2D Layer.
index 76e665e..18526e1 100644 (file)
@@ -31,20 +31,25 @@ public:
   /**
    * @brief     Constructor of Conv 2D Layer
    */
-  Conv2DLayer() :
-    filter_size(0),
-    kernel_size{0, 0},
-    stride{1, 1},
-    padding{0, 0},
-    normalization(false),
-    standardization(false) {
-    setType(LayerType::LAYER_CONV2D);
-  };
+  template <typename... Args>
+  Conv2DLayer(unsigned int filter_size_ = 0,
+              const std::array<unsigned int, CONV2D_DIM> &kernel_size_ = {0, 0},
+              const std::array<unsigned int, CONV2D_DIM> &stride_ = {1, 1},
+              const std::array<unsigned int, CONV2D_DIM> &padding_ = {0, 0},
+              bool normalization_ = false, bool standardization_ = false,
+              Args... args) :
+    Layer(LayerType::LAYER_CONV2D, args...),
+    filter_size(filter_size_),
+    kernel_size(kernel_size_),
+    stride(stride_),
+    padding(padding_),
+    normalization(normalization_),
+    standardization(standardization_) {}
 
   /**
    * @brief     Destructor of Conv 2D Layer
    */
-  ~Conv2DLayer(){};
+  ~Conv2DLayer() {}
 
   /**
    *  @brief  Move constructor of Conv 2D Layer.
@@ -117,9 +122,9 @@ public:
 
 private:
   unsigned int filter_size;
-  unsigned int kernel_size[CONV2D_DIM];
-  unsigned int stride[CONV2D_DIM];
-  unsigned int padding[CONV2D_DIM];
+  std::array<unsigned int, CONV2D_DIM> kernel_size;
+  std::array<unsigned int, CONV2D_DIM> stride;
+  std::array<unsigned int, CONV2D_DIM> padding;
 
   bool normalization;
   bool standardization;
@@ -183,9 +188,10 @@ private:
    * @retval #ML_ERROR_INVALID_PARAMETER invalid parameter.
    */
   int conv2d_gemm(const float *mkernel, TensorDim kdim, Tensor const &in,
-                  TensorDim outdim, unsigned int const *mstride,
-                  unsigned int const *pad, float *out, unsigned int osize,
-                  bool channel_mode);
+                  TensorDim outdim,
+                  const std::array<unsigned int, CONV2D_DIM> &stride,
+                  const std::array<unsigned int, CONV2D_DIM> &pad, float *out,
+                  unsigned int osize, bool channel_mode);
 
   /**
    * @brief     reform the data to 2d matrix
@@ -199,7 +205,8 @@ private:
    * @retval #ML_ERROR_INVALID_PARAMETER invalid parameter.
    */
   int im2col(Tensor in_padded, TensorDim kdim, float *inCol, TensorDim outdim,
-             unsigned int const *mstride, bool channel_mode);
+             const std::array<unsigned int, CONV2D_DIM> &mstride,
+             bool channel_mode);
 };
 
 } // namespace nntrainer
index 0581a5b..3d4185d 100644 (file)
@@ -29,7 +29,10 @@ public:
   /**
    * @brief     Constructor of Fully Connected Layer
    */
-  FullyConnectedLayer() : unit(0) { setType(LayerType::LAYER_FC); };
+  template <typename... Args>
+  FullyConnectedLayer(unsigned int unit_ = 0, Args... args) :
+    Layer(LayerType::LAYER_FC, args...),
+    unit(unit_) {}
 
   /**
    * @brief     Destructor of Fully Connected Layer
index 54501c2..d10253a 100644 (file)
@@ -29,7 +29,8 @@ public:
   /**
    * @brief     Constructor of Flatten Layer
    */
-  FlattenLayer() { setType(LayerType::LAYER_FLATTEN); };
+  template <typename... Args>
+  FlattenLayer(Args... args) : Layer(LayerType::LAYER_FLATTEN, args...) {}
 
   /**
    * @brief     Destructor of Flatten Layer
index ac1ae2d..188bc1c 100644 (file)
@@ -38,14 +38,17 @@ public:
   /**
    * @brief     Constructor of InputLayer
    */
-  InputLayer() : normalization(false), standardization(false) {
-    setType(LayerType::LAYER_IN);
-  };
+  template <typename... Args>
+  InputLayer(bool normalization = false, bool standardization = false,
+             Args... args) :
+    Layer(LayerType::LAYER_IN, args...),
+    normalization(false),
+    standardization(false) {}
 
   /**
    * @brief     Destructor of InputLayer
    */
-  ~InputLayer(){};
+  ~InputLayer() {}
 
   /**
    *  @brief  Move constructor of Pooling 2D Layer.
index f3caaf1..8345b1b 100644 (file)
@@ -84,17 +84,24 @@ public:
   /**
    * @brief     Constructor of Layer Class
    */
-  Layer() :
+  Layer(
+    LayerType type_, ActivationType activation_type_ = ActivationType::ACT_NONE,
+    WeightRegularizerType weight_regularizer_ = WeightRegularizerType::unknown,
+    const float weight_regularizer_constant_ = 1.0f,
+    WeightInitializer weight_initializer_ =
+      WeightInitializer::WEIGHT_XAVIER_UNIFORM,
+    WeightInitializer bias_initializer_ = WeightInitializer::WEIGHT_ZEROS,
+    bool trainable_ = true, bool flatten_ = false) :
     name(std::string()),
-    type(LayerType::LAYER_UNKNOWN),
+    type(type_),
     loss(0.0f),
-    activation_type(ActivationType::ACT_NONE),
-    weight_regularizer(WeightRegularizerType::unknown),
-    weight_regularizer_constant(0.0f),
-    weight_initializer(WeightInitializer::WEIGHT_XAVIER_UNIFORM),
-    bias_initializer(WeightInitializer::WEIGHT_ZEROS),
-    flatten(false),
-    trainable(true),
+    activation_type(activation_type_),
+    weight_regularizer(weight_regularizer_),
+    weight_regularizer_constant(weight_regularizer_constant_),
+    weight_initializer(weight_initializer_),
+    bias_initializer(bias_initializer_),
+    flatten(flatten_),
+    trainable(trainable_),
     num_weights(0),
     num_inputs(1),
     num_outputs(1) {}
diff --git a/nntrainer/include/layer_factory.h b/nntrainer/include/layer_factory.h
new file mode 100644 (file)
index 0000000..06f6180
--- /dev/null
@@ -0,0 +1,70 @@
+// SPDX-License-Identifier: Apache-2.0
+/**
+ * Copyright (C) 2020 Parichay Kapoor <pk.kapoor@samsung.com>
+ *
+ * @file       optimizer_factory.h
+ * @date       7 October 2020
+ * @see                https://github.com/nnstreamer/nntrainer
+ * @author     Parichay Kapoor <pk.kapoor@samsung.com>
+ * @bug                No known bugs except for NYI items
+ * @brief      This is the layer factory.
+ */
+
+#ifndef __LAYER_FACTORY_H__
+#define __LAYER_FACTORY_H__
+#ifdef __cplusplus
+
+#include <activation_layer.h>
+#include <addition_layer.h>
+#include <bn_layer.h>
+#include <conv2d_layer.h>
+#include <fc_layer.h>
+#include <flatten_layer.h>
+#include <input_layer.h>
+#include <layer.h>
+#include <loss_layer.h>
+#include <pooling2d_layer.h>
+
+namespace nntrainer {
+
+/**
+ * @brief Factory creator with copy constructor
+ */
+// std::unique_ptr<Optimizer> createOptimizer(OptType type, const Optimizer
+// &opt);
+
+/**
+ * @brief Factory creator with constructor
+ */
+template <typename... Args>
+std::unique_ptr<Layer> createLayer(LayerType type, Args... args) {
+  switch (type) {
+  case LayerType::LAYER_IN:
+    return std::make_unique<InputLayer>(args...);
+  case LayerType::LAYER_FC:
+    return std::make_unique<FullyConnectedLayer>(args...);
+  case LayerType::LAYER_BN:
+    return std::make_unique<BatchNormalizationLayer>(args...);
+  case LayerType::LAYER_CONV2D:
+    return std::make_unique<Conv2DLayer>(args...);
+  case LayerType::LAYER_POOLING2D:
+    return std::make_unique<Pooling2DLayer>(args...);
+  case LayerType::LAYER_FLATTEN:
+    return std::make_unique<FlattenLayer>(args...);
+  case LayerType::LAYER_ACTIVATION:
+    return std::make_unique<ActivationLayer>(args...);
+  case LayerType::LAYER_ADDITION:
+    return std::make_unique<AdditionLayer>(args...);
+  case LayerType::LAYER_LOSS:
+    return std::make_unique<LossLayer>(args...);
+  case LayerType::LAYER_UNKNOWN:
+    /** fallthrough intended */
+  default:
+    throw std::invalid_argument("Unknown type for the layer");
+  }
+}
+
+} /* namespace nntrainer */
+
+#endif /* __cplusplus */
+#endif /* __LAYER_FACTORY_H__ */
index 6256df5..23c61f1 100644 (file)
@@ -42,9 +42,10 @@ public:
   /**
    * @brief     Constructor of Loss Layer
    */
-  LossLayer() : loss_type(LossType::LOSS_UNKNOWN) {
-    setType(LayerType::LAYER_LOSS);
-  };
+  template <typename... Args>
+  LossLayer(LossType loss_type_ = LossType::LOSS_UNKNOWN, Args... args) :
+    Layer(LayerType::LAYER_LOSS, args...),
+    loss_type(LossType::LOSS_UNKNOWN) {}
 
   /**
    * @brief     Destructor of Loss Layer
index e42557f..aedf74a 100644 (file)
@@ -40,18 +40,23 @@ public:
   /**
    * @brief     Constructor of Pooling 2D Layer
    */
-  Pooling2DLayer() :
-    pool_size{0, 0},
-    stride{1, 1},
-    padding{0, 0},
-    pooling_type(PoolingType::average) {
-    setType(LayerType::LAYER_POOLING2D);
-  };
+  template <typename... Args>
+  Pooling2DLayer(
+    PoolingType pooling_type_ = PoolingType::average,
+    const std::array<unsigned int, POOLING2D_DIM> &pool_size_ = {0, 0},
+    const std::array<unsigned int, POOLING2D_DIM> &stride_ = {1, 1},
+    const std::array<unsigned int, POOLING2D_DIM> &padding_ = {0, 0},
+    Args... args) :
+    Layer(LayerType::LAYER_POOLING2D, args...),
+    pool_size(pool_size_),
+    stride(stride_),
+    padding(padding_),
+    pooling_type(pooling_type_) {}
 
   /**
    * @brief     Destructor of Pooling 2D Layer
    */
-  ~Pooling2DLayer(){};
+  ~Pooling2DLayer() {}
 
   /**
    *  @brief  Move constructor of Pooling 2D Layer.
@@ -128,9 +133,9 @@ public:
   void setProperty(const PropertyType type, const std::string &value = "");
 
 private:
-  unsigned int pool_size[POOLING2D_DIM];
-  unsigned int stride[POOLING2D_DIM];
-  unsigned int padding[POOLING2D_DIM];
+  std::array<unsigned int, POOLING2D_DIM> pool_size;
+  std::array<unsigned int, POOLING2D_DIM> stride;
+  std::array<unsigned int, POOLING2D_DIM> padding;
   std::vector<unsigned int> max_idx;
   std::vector<std::vector<unsigned int>> max_idx_global;
   PoolingType pooling_type;
index b656cb1..e8cc189 100644 (file)
@@ -67,6 +67,7 @@ nntrainer_headers = [
   'include/flatten_layer.h',
   'include/input_layer.h',
   'include/layer.h',
+  'include/layer_factory.h',
   'include/lazy_tensor.h',
   'include/loss_layer.h',
   'include/model_loader.h',
index f0c798d..c796fb1 100644 (file)
 namespace nntrainer {
 
 /**
- * @brief     Constructor of Activation Layer
- */
-ActivationLayer::ActivationLayer(ActivationType at) : Layer() {
-  setType(LayerType::LAYER_ACTIVATION);
-  setActivation(at);
-}
-
-/**
  * @brief     Initialize the layer
  *
  * @retval #ML_ERROR_NONE Successful.
index 17808f6..57d0876 100644 (file)
@@ -167,7 +167,7 @@ sharedConstTensor Conv2DLayer::forwarding(sharedConstTensor in) {
 sharedConstTensor Conv2DLayer::backwarding(sharedConstTensor derivative,
                                            int iteration) {
 
-  unsigned int same_pad[CONV2D_DIM];
+  std::array<unsigned int, CONV2D_DIM> same_pad;
 
   same_pad[0] = kernel_size[0] - 1;
   same_pad[1] = kernel_size[1] - 1;
@@ -351,7 +351,7 @@ sharedConstTensor Conv2DLayer::backwarding(sharedConstTensor derivative,
     opt->apply_gradients(weight_list, num_weights, iteration);
   }
 
-  return MAKE_SHARED_TENSOR(std::move(strip_pad(ret, padding)));
+  return MAKE_SHARED_TENSOR(std::move(strip_pad(ret, padding.data())));
 }
 
 void Conv2DLayer::copy(std::shared_ptr<Layer> l) {
@@ -415,7 +415,7 @@ void Conv2DLayer::setProperty(const PropertyType type,
   } break;
   case PropertyType::kernel_size:
     if (!value.empty()) {
-      status = getValues(CONV2D_DIM, value, (int *)(kernel_size));
+      status = getValues(CONV2D_DIM, value, (int *)(kernel_size.data()));
       throw_status(status);
       if (kernel_size[0] == 0 || kernel_size[1] == 0) {
         throw std::invalid_argument(
@@ -425,7 +425,7 @@ void Conv2DLayer::setProperty(const PropertyType type,
     break;
   case PropertyType::stride:
     if (!value.empty()) {
-      status = getValues(CONV2D_DIM, value, (int *)(stride));
+      status = getValues(CONV2D_DIM, value, (int *)(stride.data()));
       throw_status(status);
       if (stride[0] == 0 || stride[1] == 0) {
         throw std::invalid_argument(
@@ -435,7 +435,7 @@ void Conv2DLayer::setProperty(const PropertyType type,
     break;
   case PropertyType::padding:
     if (!value.empty()) {
-      status = getValues(CONV2D_DIM, value, (int *)(padding));
+      status = getValues(CONV2D_DIM, value, (int *)(padding.data()));
       throw_status(status);
     }
     break;
@@ -498,11 +498,11 @@ int Conv2DLayer::conv2d(float *in, TensorDim indim, const float *kernel,
   return status;
 }
 
-int Conv2DLayer::conv2d_gemm(const float *mkernel, TensorDim kdim,
-                             Tensor const &in, TensorDim outdim,
-                             unsigned int const *mstride,
-                             unsigned int const *pad, float *out,
-                             unsigned int osize, bool channel_mode) {
+int Conv2DLayer::conv2d_gemm(
+  const float *mkernel, TensorDim kdim, Tensor const &in, TensorDim outdim,
+  const std::array<unsigned int, CONV2D_DIM> &mstride,
+  const std::array<unsigned int, CONV2D_DIM> &pad, float *out,
+  unsigned int osize, bool channel_mode) {
   int status = ML_ERROR_NONE;
   std::vector<float> in_col;
 
@@ -512,7 +512,7 @@ int Conv2DLayer::conv2d_gemm(const float *mkernel, TensorDim kdim,
     in_col.resize(kdim.width() * kdim.height() * outdim.width());
   }
 
-  Tensor in_padded = zero_pad(0, in, pad);
+  Tensor in_padded = zero_pad(0, in, pad.data());
   status =
     im2col(in_padded, kdim, in_col.data(), outdim, mstride, channel_mode);
   if (status != ML_ERROR_NONE)
@@ -543,7 +543,8 @@ int Conv2DLayer::conv2d_gemm(const float *mkernel, TensorDim kdim,
 }
 
 int Conv2DLayer::im2col(Tensor in_padded, TensorDim kdim, float *in_col,
-                        TensorDim outdim, unsigned int const *mstride,
+                        TensorDim outdim,
+                        const std::array<unsigned int, CONV2D_DIM> &mstride,
                         bool channel_mode) {
 
   int status = ML_ERROR_NONE;
index 36c3b21..3a8b2d9 100644 (file)
@@ -14,6 +14,7 @@
 
 #include <databuffer_file.h>
 #include <databuffer_func.h>
+#include <layer_factory.h>
 #include <model_loader.h>
 #include <neuralnet.h>
 #include <nntrainer_error.h>
@@ -169,36 +170,16 @@ int ModelLoader::loadLayerConfigIni(dictionary *ini,
     iniparser_getstring(ini, (layer_name + ":Type").c_str(), unknown);
   LayerType layer_type = (LayerType)parseType(layer_type_str, TOKEN_LAYER);
 
-  switch (layer_type) {
-  case LayerType::LAYER_IN:
-    layer = std::make_shared<InputLayer>();
-    break;
-  case LayerType::LAYER_CONV2D:
-    layer = std::make_shared<Conv2DLayer>();
-    break;
-  case LayerType::LAYER_POOLING2D:
-    layer = std::make_shared<Pooling2DLayer>();
-    break;
-  case LayerType::LAYER_FLATTEN:
-    layer = std::make_shared<FlattenLayer>();
-    break;
-  case LayerType::LAYER_FC:
-    layer = std::make_shared<FullyConnectedLayer>();
-    break;
-  case LayerType::LAYER_BN:
-    layer = std::make_shared<BatchNormalizationLayer>();
-    break;
-  case LayerType::LAYER_ACTIVATION:
-    layer = std::make_shared<ActivationLayer>();
-    break;
-  case LayerType::LAYER_UNKNOWN:
-  default:
-    ml_loge("Error: Unknown layer type from %s, parsed to %d",
-            layer_type_str.c_str(),
-            static_cast<std::underlying_type<LayerType>::type>(layer_type));
+  try {
+    layer = createLayer(layer_type);
+  } catch (const std::exception &e) {
+    ml_loge("%s %s", typeid(e).name(), e.what());
+    status = ML_ERROR_INVALID_PARAMETER;
+  } catch (...) {
+    ml_loge("unknown error type thrown");
     status = ML_ERROR_INVALID_PARAMETER;
-    NN_RETURN_STATUS();
   }
+  NN_RETURN_STATUS();
 
   unsigned int property_end =
     static_cast<unsigned int>(Layer::PropertyType::unknown);
index f2deb74..6cb6bc9 100644 (file)
@@ -63,7 +63,7 @@ sharedConstTensor Pooling2DLayer::forwarding(sharedConstTensor in) {
   hidden.setZero();
 
   for (unsigned int b = 0; b < input_dim.batch(); ++b) {
-    Tensor in_padded = zero_pad(b, input, padding);
+    Tensor in_padded = zero_pad(b, input, padding.data());
     Tensor result = pooling2d(b, in_padded);
     memcpy(hidden.getAddress(b * hidden.getDim().getFeatureLen()),
            result.getData(), result.getDim().getDataLen() * sizeof(float));
@@ -208,7 +208,7 @@ void Pooling2DLayer::setProperty(const PropertyType type,
     }
   case PropertyType::pool_size:
     if (!value.empty()) {
-      status = getValues(POOLING2D_DIM, value, (int *)(pool_size));
+      status = getValues(POOLING2D_DIM, value, (int *)(pool_size.data()));
       throw_status(status);
       if (pool_size[0] == 0 || pool_size[1] == 0) {
         throw std::invalid_argument(
@@ -218,7 +218,7 @@ void Pooling2DLayer::setProperty(const PropertyType type,
     break;
   case PropertyType::stride:
     if (!value.empty()) {
-      status = getValues(POOLING2D_DIM, value, (int *)(stride));
+      status = getValues(POOLING2D_DIM, value, (int *)(stride.data()));
       throw_status(status);
       if (stride[0] == 0 || stride[1] == 0) {
         throw std::invalid_argument(
@@ -228,7 +228,7 @@ void Pooling2DLayer::setProperty(const PropertyType type,
     break;
   case PropertyType::padding:
     if (!value.empty()) {
-      status = getValues(POOLING2D_DIM, value, (int *)(padding));
+      status = getValues(POOLING2D_DIM, value, (int *)(padding.data()));
       throw_status(status);
       if ((int)padding[0] < 0 || (int)padding[1] < 0) {
         throw std::invalid_argument(
index 282c085..aad9840 100644 (file)
@@ -316,6 +316,7 @@ cp -r result %{buildroot}%{_datadir}/nntrainer/unittest/
 %{_includedir}/nntrainer/databuffer_file.h
 %{_includedir}/nntrainer/databuffer_func.h
 %{_includedir}/nntrainer/layer.h
+%{_includedir}/nntrainer/layer_factory.h
 %{_includedir}/nntrainer/input_layer.h
 %{_includedir}/nntrainer/fc_layer.h
 %{_includedir}/nntrainer/bn_layer.h