[ini] update ini bias init and flatten as feature
authorParichay Kapoor <pk.kapoor@samsung.com>
Thu, 2 Jul 2020 04:17:29 +0000 (13:17 +0900)
committerJijoong Moon <jijoong.moon@samsung.com>
Mon, 6 Jul 2020 00:42:06 +0000 (09:42 +0900)
bias init name is changes to bias_init_zero to make it more readable
flatten is now a layer feature externally rather than as a new layer itself

Signed-off-by: Parichay Kapoor <pk.kapoor@samsung.com>
27 files changed:
Applications/Classification/res/Classification.ini
Applications/Classification/res/Classification_func.ini
Applications/Classification/res/Classification_new.ini
Applications/LogisticRegression/res/LogisticRegression.ini
Applications/ReinforcementLearning/DeepQ/jni/DeepQ.ini
Applications/Tizen_CAPI/Tizen_CAPI_config.ini
Applications/Tizen_CAPI/capi_file.c
Applications/Tizen_CAPI/capi_func.c
Applications/Training/res/Training.ini
jni/Android.mk
nntrainer/include/flatten_layer.h
nntrainer/include/layer.h
nntrainer/include/neuralnet.h
nntrainer/include/parse_util.h
nntrainer/src/bn_layer.cpp
nntrainer/src/conv2d_layer.cpp
nntrainer/src/fc_layer.cpp
nntrainer/src/flatten_layer.cpp
nntrainer/src/input_layer.cpp
nntrainer/src/neuralnet.cpp
nntrainer/src/parse_util.cpp
test/include/nntrainer_test_util.h
test/tizen_capi/test_conf.ini
test/tizen_capi/unittest_tizen_capi.cpp
test/tizen_capi/unittest_tizen_capi_layer.cpp
test/unittest/unittest_nntrainer_internal.cpp
test/unittest/unittest_nntrainer_layers.cpp

index fc2b9b9..6eaf73c 100644 (file)
@@ -30,13 +30,13 @@ LabelData="label.dat"
 [inputlayer]
 Type = input
 Input_Shape = 32:1:1:62720     # Input Layer Dimension
-Bias_zero = true       # Zero Bias
+Bias_init_zero = true  # Zero Bias
 Normalization = true
 
 [outputlayer]
 Type = fully_connected
 Unit = 10              # Output Layer Dimension ( = Weight Width )
-Bias_zero = true
+Bias_init_zero = true
 Activation = softmax   # activation : sigmoid, softmax
 Weight_Decay = l2norm
 weight_Decay_Lambda = 0.005
index 99b649b..a91c6c1 100644 (file)
@@ -19,14 +19,14 @@ epsilon = 1e-7      # epsilon for adam
 [inputlayer]
 Type = input
 HiddenSize = 62720             # Input Layer Dimension
-Bias_zero = true       # Zero Bias
+Bias_init_zero = true  # Zero Bias
 Normalization = true
 Activation = sigmoid   # activation : sigmoid, tanh
 
 [outputlayer]
 Type = fully_connected
 HiddenSize = 10                # Output Layer Dimension ( = Weight Width )
-Bias_zero = true
+Bias_init_zero = true
 Activation = softmax   # activation : sigmoid, softmax
 Weight_Decay = l2norm
 weight_Decay_Lambda = 0.005
index 7b203fb..27bf1a9 100644 (file)
@@ -21,13 +21,13 @@ epsilon = 1e-8      # epsilon for adam
 Type = InputLayer
 Id = 0                 # Layer Id
 HiddenSize = 62720     # Input Layer Dimension
-Bias_zero = true       # Zero Bias
+Bias_init_zero = true  # Zero Bias
 
 [fc1layer]
 Type = FullyConnectedLayer
 Id = 1
 HiddenSize = 128       # Hidden Layer Dimension ( = Weight Width )
-Bias_zero = true
+Bias_init_zero = true
 
 [batchnormalization]
 Type = BatchNormalizationLayer
@@ -37,5 +37,5 @@ Id = 2
 Type = OutputLayer
 Id = 3
 HiddenSize = 10                # Output Layer Dimension ( = Weight Width )
-Bias_zero = true
+Bias_init_zero = true
 Softmax = true
index 1a41605..c0b8103 100644 (file)
@@ -15,11 +15,11 @@ epsilon = 1e-5
 [inputlayer]
 Type = input
 Input_Shape = 1:1:1:2
-Bias_zero = true       # Zero Bias
+Bias_init_zero = true  # Zero Bias
 Activation = sigmoid
 
 [outputlayer]
 Type = fully_connected
 Unit = 1
-Bias_zero = true
+Bias_init_zero = true
 Activation = sigmoid
index 279a969..ec9d2c0 100644 (file)
@@ -18,23 +18,23 @@ epsilon = 1e-8              # epsilon for adam
 [inputlayer]
 Type = input
 Input_Shape = 32:1:1:4         # Input Layer Dimension
-Bias_zero = true       # Zero Bias
+Bias_init_zero = true  # Zero Bias
 Activation = tanh      # activation : sigmoid, tanh
 
 [fc1layer]
 Type = fully_connected
 Unit = 50              # Hidden Layer Dimension ( = Weight Width )
-Bias_zero = true
+Bias_init_zero = true
 Activation = tanh      # activation : sigmoid, tanh
 
 [fc2layer]
 Type = fully_connected
 Unit = 50
-Bias_zero = true
+Bias_init_zero = true
 Activation = tanh      # activation : sigmoid, tanh
 
 [outputlayer]
 Type = fully_connected
 Unit = 2               # Output Layer Dimension ( = Weight Width )
-Bias_zero = true
+Bias_init_zero = true
 Activation = sigmoid   # activation : sigmoid, tanh
index 918477f..ba96f76 100644 (file)
@@ -30,14 +30,14 @@ LabelData="label.dat"
 [inputlayer]
 Type = input
 Input_Shape = 32:1:1:62720             # Input Layer Dimension
-Bias_zero = true       # Zero Bias
+Bias_init_zero = true  # Zero Bias
 Normalization = true
 Activation = sigmoid   # activation : sigmoid, tanh
 
 [outputlayer]
 Type = fully_connected
 Unit = 10              # Output Layer Dimension ( = Weight Width )
-Bias_zero = true
+Bias_init_zero = true
 Activation = softmax   # activation : sigmoid, softmax
 Weight_Decay = l2norm
 weight_Decay_Lambda = 0.005
index c506d98..780b395 100644 (file)
@@ -55,7 +55,7 @@ main (int argc, char *argv[])
   /* set property for input layer */
   status =
       ml_nnlayer_set_property (layers[0], "input_shape= 32:1:1:62720",
-      "normalization=true", "bias_zero=true", NULL);
+      "normalization=true", "bias_init_zero=true", NULL);
   NN_RETURN_STATUS ();
 
   /* add input layer into model */
@@ -68,7 +68,7 @@ main (int argc, char *argv[])
 
   /* set property for fc layer */
   status = ml_nnlayer_set_property (layers[1], "unit= 10", "activation=softmax",
-      "bias_zero=true", "weight_decay=l2norm",
+      "bias_init_zero=true", "weight_decay=l2norm",
       "weight_decay_lambda=0.005", "weight_ini=xavier_uniform", NULL);
   NN_RETURN_STATUS ();
 
index 09fc9b6..8b6f708 100644 (file)
@@ -291,7 +291,7 @@ main (int argc, char *argv[])
   /* set property for input layer */
   status =
       ml_nnlayer_set_property (layers[0], "input_shape= 32:1:1:62720",
-      "normalization=true", "bias_zero=true", NULL);
+      "normalization=true", "bias_init_zero=true", NULL);
   NN_RETURN_STATUS ();
 
   /* add input layer into model */
@@ -304,7 +304,7 @@ main (int argc, char *argv[])
 
   /* set property for fc layer */
   status = ml_nnlayer_set_property (layers[1], "unit= 10", "activation=softmax",
-      "bias_zero=true", "weight_decay=l2norm",
+      "bias_init_zero=true", "weight_decay=l2norm",
       "weight_decay_lambda=0.005", "weight_ini=xavier_uniform", NULL);
   NN_RETURN_STATUS ();
 
index 518d525..886c135 100644 (file)
@@ -17,17 +17,17 @@ minibatch = 1               # mini batch size
 [inputlayer]
 Type = input
 Input_Shape = 1:1:1:128        # Input Layer Dimension
-Bias_zero = true       # Zero Bias
+Bias_init_zero = true  # Zero Bias
 Activation = sigmoid
 
 [fc1layer]
 Type = fully_connected
 Unit = 20              # Hidden Layer Dimension ( = Weight Width )
-Bias_zero = true
+Bias_init_zero = true
 Activation = sigmoid
 
 [outputlayer]
 Type = fully_connected
 Unit = 3               # Output Layer Dimension ( = Weight Width )
-Bias_zero = true
+Bias_init_zero = true
 Activation = sigmoid
index 2539b35..b472863 100644 (file)
@@ -37,7 +37,8 @@ NNTRAINER_SRCS := $(NNTRAINER_ROOT)/nntrainer/src/neuralnet.cpp \
                   $(NNTRAINER_ROOT)/nntrainer/src/tensor_dim.cpp \
                   $(NNTRAINER_ROOT)/nntrainer/src/conv2d_layer.cpp \
                   $(NNTRAINER_ROOT)/nntrainer/src/pooling2d_layer.cpp \
-                  $(NNTRAINER_ROOT)/nntrainer/src/activation_layer.cpp
+                  $(NNTRAINER_ROOT)/nntrainer/src/activation_layer.cpp \
+                  $(NNTRAINER_ROOT)/nntrainer/src/flatten_layer.cpp
 
 NNTRAINER_INCLUDES := $(NNTRAINER_ROOT)/nntrainer/include
 
index 31b9700..711b22f 100644 (file)
@@ -19,7 +19,6 @@
 #include <fstream>
 #include <iostream>
 #include <layer.h>
-#include <nntrainer_error.h>
 #include <tensor.h>
 #include <vector>
 
@@ -92,9 +91,7 @@ public:
    * @param[in] values values of property
    * @retval #ML_ERROR_NOT_SUPPORTED Successful.
    */
-  int setProperty(std::vector<std::string> values) {
-    return ML_ERROR_NOT_SUPPORTED;
-  };
+  int setProperty(std::vector<std::string> values);
 
   /**
    * @brief     copy layer
index 97d6aee..8988e54 100644 (file)
@@ -119,14 +119,15 @@ class Layer {
 public:
   Layer()
     : last_layer(false),
-      init_zero(false),
+      bias_init_zero(false),
       type(LAYER_UNKNOWN),
       loss(0.0),
       cost(COST_UNKNOWN),
       activation_type(ACT_NONE),
       bn_follow(false),
       weight_decay(),
-      weight_ini_type(WEIGHT_UNKNOWN) {}
+      weight_ini_type(WEIGHT_UNKNOWN),
+      flatten(false) {}
 
   /**
    * @brief     Destructor of Layer Class
@@ -258,7 +259,7 @@ public:
    * @brief  set bias initialize with zero
    * @param[in] zero true/false
    */
-  void setBiasZero(bool zero) { init_zero = zero; }
+  void setBiasZero(bool zero) { bias_init_zero = zero; }
 
   /**
    * @brief  set Weight Initialization Type
@@ -310,6 +311,12 @@ public:
   std::shared_ptr<std::vector<Tensor>> getWeights() { return getObjFromRef(weights); }
 
   /**
+   * @brief     get if the output of this layer must be flatten
+   * @retval    flatten value
+   */
+  bool getFlatten() { return flatten; }
+
+  /**
    * @brief     Property Enumeration
    *            0. input shape : string
    *            1. bias zero : bool
@@ -330,7 +337,7 @@ public:
    */
   enum class PropertyType {
     input_shape = 0,
-    bias_zero = 1,
+    bias_init_zero = 1,
     normalization = 2,
     standardization = 3,
     activation = 4,
@@ -345,7 +352,8 @@ public:
     padding = 13,
     pooling_size = 14,
     pooling = 15,
-    unknown = 16
+    flatten = 16,
+    unknown = 17
   };
 
 protected:
@@ -396,7 +404,7 @@ protected:
   /**
    * @brief     Boolean for the Bias to set zero
    */
-  bool init_zero;
+  bool bias_init_zero;
 
   /**
    * @brief     Layer type
@@ -422,6 +430,11 @@ protected:
   WeightIniType weight_ini_type;
 
   /**
+   * @brief   Output of this layer should be flattened
+   */
+  bool flatten;
+
+  /**
    * @brief     Gradient for the weights in this layer
    */
   std::vector<std::reference_wrapper<Tensor>> gradients;
index c1deee0..d1b9f3d 100644 (file)
@@ -29,6 +29,7 @@
 #include <conv2d_layer.h>
 #include <databuffer.h>
 #include <fc_layer.h>
+#include <flatten_layer.h>
 #include <fstream>
 #include <input_layer.h>
 #include <iostream>
@@ -384,6 +385,20 @@ private:
    * @returns   Create activation layer
    */
   std::shared_ptr<Layer> _make_act_layer(ActiType act, std::shared_ptr<Layer>);
+
+  /**
+   * @brief     Add flatten layer to layers
+   * @param[in/out] int Position position to insert the layer.
+   *                position++ when layer is inserted.
+   * @note layer is inserted at the back of layers
+   */
+  int initFlattenLayer(unsigned int &position);
+
+  /**
+   * @brief     Add flatten layer to layers
+   * @note layer is inserted at the back of layers
+   */
+  int initFlattenLayer();
 };
 
 } /* namespace nntrainer */
index 25b37fe..26b7d72 100644 (file)
@@ -46,6 +46,7 @@ namespace nntrainer {
  *            5. WEIGHTINI  ( Weight Initialization Token )
  *            7. WEIGHT_DECAY  ( Weight Decay Token )
  *            8. PADDING  ( Padding Token )
+ *            9. POOLING  ( Pooling Token )
  *            9. UNKNOWN
  */
 typedef enum {
index 4efd6a6..f660b47 100644 (file)
@@ -68,8 +68,8 @@ int BatchNormalizationLayer::setProperty(std::vector<std::string> values) {
     case PropertyType::input_shape:
       status = dim.setTensorDim(values[0].c_str());
       break;
-    case PropertyType::bias_zero: {
-      status = setBoolean(init_zero, value);
+    case PropertyType::bias_init_zero: {
+      status = setBoolean(bias_init_zero, value);
       NN_RETURN_STATUS();
     } break;
     case PropertyType::epsilon:
index 47a5833..4150c29 100644 (file)
@@ -45,7 +45,7 @@ int Conv2DLayer::initialize(bool last) {
     filters.push_back(Knl);
 
     Tensor B(input_dim.batch(), 1, 1, 1);
-    if (!init_zero) {
+    if (!bias_init_zero) {
       B.apply([&](float x) { return random(); });
     }
     bias.push_back(B);
@@ -282,14 +282,18 @@ int Conv2DLayer::setProperty(std::vector<std::string> values) {
       status = input_dim.setTensorDim(value.c_str());
       NN_RETURN_STATUS();
       break;
-    case PropertyType::bias_zero:
-      status = setBoolean(init_zero, value);
+    case PropertyType::bias_init_zero:
+      status = setBoolean(bias_init_zero, value);
       NN_RETURN_STATUS();
       break;
     case PropertyType::activation:
       status = setActivation((ActiType)parseType(value, TOKEN_ACTI));
       NN_RETURN_STATUS();
       break;
+    case PropertyType::flatten:
+      status = setBoolean(flatten, value);
+      NN_RETURN_STATUS();
+      break;
     case PropertyType::weight_decay:
       weight_decay.type = (WeightDecayType)parseType(value, TOKEN_WEIGHT_DECAY);
       if (weight_decay.type == WeightDecayType::unknown) {
index c45bf8f..7b75e62 100644 (file)
@@ -47,7 +47,7 @@ int FullyConnectedLayer::initialize(bool last) {
   output_dim = input_dim;
   output_dim.width(unit);
 
-  if (init_zero) {
+  if (bias_init_zero) {
     bias.setZero();
   } else {
     bias.setRandUniform(-0.5, 0.5);
@@ -79,14 +79,18 @@ int FullyConnectedLayer::setProperty(std::vector<std::string> values) {
       unit = width;
       output_dim.width(unit);
     } break;
-    case PropertyType::bias_zero: {
-      status = setBoolean(init_zero, value);
+    case PropertyType::bias_init_zero: {
+      status = setBoolean(this->bias_init_zero, value);
       NN_RETURN_STATUS();
     } break;
     case PropertyType::activation:
       status = setActivation((ActiType)parseType(value, TOKEN_ACTI));
       NN_RETURN_STATUS();
       break;
+    case PropertyType::flatten:
+      status = setBoolean(flatten, value);
+      NN_RETURN_STATUS();
+      break;
     case PropertyType::weight_decay:
       weight_decay.type = (WeightDecayType)parseType(value, TOKEN_WEIGHT_DECAY);
       if (weight_decay.type == WeightDecayType::unknown) {
index 0a5b6af..29716a5 100644 (file)
@@ -67,4 +67,8 @@ void FlattenLayer::copy(std::shared_ptr<Layer> l) {
   this->last_layer = from->last_layer;
 }
 
+int FlattenLayer::setProperty(std::vector<std::string> values) {
+  return ML_ERROR_NOT_SUPPORTED;
+}
+
 } /* namespace nntrainer */
index bb948c8..c485dc4 100644 (file)
@@ -51,8 +51,8 @@ int InputLayer::setProperty(std::vector<std::string> values) {
       status = input_dim.setTensorDim(value.c_str());
       NN_RETURN_STATUS();
       break;
-    case PropertyType::bias_zero:
-      status = setBoolean(init_zero, value);
+    case PropertyType::bias_init_zero:
+      status = setBoolean(bias_init_zero, value);
       NN_RETURN_STATUS();
       break;
     case PropertyType::normalization:
index 7155ab2..040bfcb 100644 (file)
@@ -243,7 +243,7 @@ int NeuralNetwork::init() {
       iniparser_getstring(ini, (layer_name + ":Type").c_str(), unknown);
     LayerType layer_type = (LayerType) parseType(layer_type_str, TOKEN_LAYER);
     bool b_zero =
-      iniparser_getboolean(ini, (layer_name + ":Bias_zero").c_str(), true);
+      iniparser_getboolean(ini, (layer_name + ":bias_init_zero").c_str(), true);
 
     last = (section_names_iter + 1) == section_names.end();
 
@@ -281,7 +281,7 @@ int NeuralNetwork::init() {
         std::make_shared<Conv2DLayer>();
 
       std::string input_shape_str = iniparser_getstring(
-        ini, (layer_name + ":Input_Shape").c_str(), unknown);
+          ini, (layer_name + ":Input_Shape").c_str(), unknown);
 
       if (input_shape_str.compare("Unknown") != 0) {
         TensorDim d;
@@ -307,38 +307,38 @@ int NeuralNetwork::init() {
 
       status =
         getValues(CONV2D_DIM,
-                  iniparser_getstring(
-                    ini, (layer_name + ":kernel_size").c_str(), unknown),
-                  (int *)size);
+            iniparser_getstring(
+              ini, (layer_name + ":kernel_size").c_str(), unknown),
+            (int *)size);
       NN_INI_RETURN_STATUS();
       status = conv2d_layer->setSize(size, Layer::PropertyType::kernel_size);
       NN_INI_RETURN_STATUS();
 
       status = getValues(
-        CONV2D_DIM,
-        iniparser_getstring(ini, (layer_name + ":stride").c_str(), unknown),
-        (int *)size);
+          CONV2D_DIM,
+          iniparser_getstring(ini, (layer_name + ":stride").c_str(), unknown),
+          (int *)size);
       NN_INI_RETURN_STATUS();
       status = conv2d_layer->setSize(size, Layer::PropertyType::stride);
       NN_INI_RETURN_STATUS();
 
       status = getValues(CONV2D_DIM,
-                         iniparser_getstring(
-                           ini, (layer_name + ":padding").c_str(), unknown),
-                         (int *)size);
+          iniparser_getstring(
+            ini, (layer_name + ":padding").c_str(), unknown),
+          (int *)size);
       NN_INI_RETURN_STATUS();
       status = conv2d_layer->setSize(size, Layer::PropertyType::padding);
       NN_INI_RETURN_STATUS();
 
       status = conv2d_layer->setFilter(
-        iniparser_getint(ini, (layer_name + ":filter").c_str(), 0));
+          iniparser_getint(ini, (layer_name + ":filter").c_str(), 0));
       NN_INI_RETURN_STATUS();
 
       conv2d_layer->setBiasZero(b_zero);
       conv2d_layer->setWeightInit((WeightIniType)parseType(
-        iniparser_getstring(ini, (layer_name + ":WeightIni").c_str(),
-                            unknown),
-        TOKEN_WEIGHTINI));
+            iniparser_getstring(ini, (layer_name + ":WeightIni").c_str(),
+              unknown),
+            TOKEN_WEIGHTINI));
 
       status = parseWeightDecay(ini, layer_name, weight_decay);
       NN_INI_RETURN_STATUS();
@@ -431,6 +431,8 @@ int NeuralNetwork::init() {
       NN_INI_RETURN_STATUS();
       break;
     }
+
+    /** Add activation layer */
     const char *acti_str = iniparser_getstring(
       ini, (layer_name + ":Activation").c_str(), unknown);
     ActiType act = (ActiType)parseType(acti_str, TOKEN_ACTI);
@@ -439,6 +441,13 @@ int NeuralNetwork::init() {
     status = initActivationLayer(act);
     NN_INI_RETURN_STATUS();
 
+    /** Add flatten layer */
+    bool flatten =
+      iniparser_getboolean(ini, (layer_name + ":Flatten").c_str(), false);
+    if (flatten) {
+      status = initFlattenLayer();
+      NN_INI_RETURN_STATUS();
+    }
     previous_dim = layers.back()->getOutputDimension();
   }
 
@@ -657,8 +666,13 @@ int NeuralNetwork::init(std::shared_ptr<Optimizer> optimizer,
     default:
       break;
     }
-    status = initActivationLayer(layers[i]->getActivationType(), i);
+    std::shared_ptr<Layer> last_layer = layers[i];
+    status = initActivationLayer(last_layer->getActivationType(), i);
     NN_RETURN_STATUS();
+    if (last_layer->getFlatten()) {
+      status = initFlattenLayer(i);
+      NN_RETURN_STATUS();
+    }
     previous_dim = layers[i]->getOutputDimension();
   }
 
@@ -1046,4 +1060,20 @@ int NeuralNetwork::initActivationLayer(ActiType act, unsigned int &position) {
   return ML_ERROR_INVALID_PARAMETER;
 }
 
+int NeuralNetwork::initFlattenLayer(unsigned int &position) {
+  std::shared_ptr<FlattenLayer> flatten_layer =
+    std::make_shared<FlattenLayer>();
+
+  flatten_layer->setInputDimension(layers[position]->getOutputDimension());
+  flatten_layer->initialize(layers[position]->getLast());
+  layers.insert(layers.begin() + position + 1, flatten_layer);
+  position++;
+  return ML_ERROR_NONE;
+}
+
+int NeuralNetwork::initFlattenLayer() {
+  unsigned int position = layers.end() - layers.begin() - 1;
+  return initFlattenLayer(position);
+}
+
 } /* namespace nntrainer */
index d67d81a..1b4a88a 100644 (file)
@@ -243,7 +243,7 @@ unsigned int parseLayerProperty(std::string property) {
   /**
    * @brief     Layer Properties
    * input_shape = 0,
-   * bias_zero = 1,
+   * bias_init_zero = 1,
    * normalization = 2,
    * standardization = 3,
    * activation = 4,
@@ -258,6 +258,7 @@ unsigned int parseLayerProperty(std::string property) {
    * padding = 13
    * pooling_size = 14
    * pooling = 15
+   * flatten = 16
    *
    * InputLayer has 0, 1, 2, 3 properties.
    * FullyConnectedLayer has 1, 4, 6, 7, 8, 9 properties.
@@ -265,12 +266,12 @@ unsigned int parseLayerProperty(std::string property) {
    * Pooling2DLayer has 12, 13, 14, 15 properties.
    * BatchNormalizationLayer has 0, 1, 5, 6, 7 properties.
    */
-  std::array<std::string, 17> property_string = {
-    "input_shape", "bias_zero",  "normalization", "standardization",
+  std::array<std::string, 18> property_string = {
+    "input_shape", "bias_init_zero",  "normalization", "standardization",
     "activation",  "epsilon",    "weight_decay",  "weight_decay_lambda",
     "unit",        "weight_ini", "filter",        "kernel_size",
     "stride",      "padding",    "pooling_size",  "pooling",
-    "unknown"};
+    "flatten",     "unknown"};
 
   for (i = 0; i < property_string.size(); i++) {
     unsigned int size = (property_string[i].size() > property.size())
index 4b1734f..5e170dd 100644 (file)
@@ -78,7 +78,7 @@ const std::string config_str = "[Network]"
                                "\n"
                                "Input_Shape = 32:1:1:62720"
                                "\n"
-                               "Bias_zero = true"
+                               "bias_init_zero = true"
                                "\n"
                                "Normalization = true"
                                "\n"
@@ -90,7 +90,7 @@ const std::string config_str = "[Network]"
                                "\n"
                                "Unit = 10"
                                "\n"
-                               "Bias_zero = true"
+                               "bias_init_zero = true"
                                "\n"
                                "Activation = softmax"
                                "\n";
@@ -141,7 +141,7 @@ const std::string config_str2 = "[Network]"
                                 "\n"
                                 "Input_Shape = 32:3:28:28"
                                 "\n"
-                                "Bias_zero = true"
+                                "bias_init_zero = true"
                                 "\n"
                                 "Activation = sigmoid"
                                 "\n"
@@ -159,13 +159,15 @@ const std::string config_str2 = "[Network]"
                                 "\n"
                                 "weightIni=xavier_uniform"
                                 "\n"
+                                "flatten = false"
+                                "\n"
                                 "[outputlayer]"
                                 "\n"
                                 "Type = fully_connected"
                                 "\n"
                                 "Unit = 10"
                                 "\n"
-                                "Bias_zero = true"
+                                "bias_init_zero = true"
                                 "\n"
                                 "Activation = softmax"
                                 "\n";
index 6d7a549..725f68d 100644 (file)
@@ -23,19 +23,19 @@ LabelData = "label.dat"
 Type = input
 Id = 0                 # Layer Id
 HiddenSize = 128       # Input Layer Dimension
-Bias_zero = true       # Zero Bias
+Bias_init_zero = true  # Zero Bias
 Activation = sigmoid
 
 [fc1layer]
 Type = fully_connected
 Id = 1
 HiddenSize = 20                # Hidden Layer Dimension ( = Weight Width )
-Bias_zero = true
+Bias_init_zero = true
 Activation = sigmoid
 
 [outputlayer]
 Type = output
 Id = 3
 HiddenSize = 3         # Output Layer Dimension ( = Weight Width )
-Bias_zero = true
+Bias_init_zero = true
 Activation = sigmoid
index 268e2de..bd684a7 100644 (file)
@@ -132,7 +132,7 @@ TEST(nntrainer_capi_nnmodel, compile_05_p) {
 
   status =
     ml_nnlayer_set_property(layers[0], "input_shape= 32:1:1:62720",
-                            "normalization=true", "bias_zero=true", NULL);
+                            "normalization=true", "bias_init_zero=true", NULL);
   EXPECT_EQ(status, ML_ERROR_NONE);
 
   status = ml_nnmodel_add_layer(model, layers[0]);
@@ -142,7 +142,7 @@ TEST(nntrainer_capi_nnmodel, compile_05_p) {
   EXPECT_EQ(status, ML_ERROR_NONE);
 
   status = ml_nnlayer_set_property(layers[1], "unit= 10", "activation=softmax",
-                                   "bias_zero=true", "weight_decay=l2norm",
+                                   "bias_init_zero=true", "weight_decay=l2norm",
                                    "weight_decay_lambda=0.005",
                                    "weight_ini=xavier_uniform", NULL);
   EXPECT_EQ(status, ML_ERROR_NONE);
@@ -306,7 +306,7 @@ TEST(nntrainer_capi_nnmodel, addLayer_04_p) {
 
   status =
     ml_nnlayer_set_property(layers[0], "input_shape= 32:1:1:62720",
-                            "normalization=true", "bias_zero=true", NULL);
+                            "normalization=true", "bias_init_zero=true", NULL);
   EXPECT_EQ(status, ML_ERROR_NONE);
 
   status = ml_nnmodel_add_layer(model, layers[0]);
@@ -316,7 +316,7 @@ TEST(nntrainer_capi_nnmodel, addLayer_04_p) {
   EXPECT_EQ(status, ML_ERROR_NONE);
 
   status = ml_nnlayer_set_property(layers[1], "unit= 10", "activation=softmax",
-                                   "bias_zero=true", "weight_decay=l2norm",
+                                   "bias_init_zero=true", "weight_decay=l2norm",
                                    "weight_decay_lambda=0.005", NULL);
   EXPECT_EQ(status, ML_ERROR_NONE);
 
@@ -356,7 +356,7 @@ TEST(nntrainer_capi_nnmodel, addLayer_05_n) {
   EXPECT_EQ(status, ML_ERROR_NONE);
 
   status = ml_nnlayer_set_property(layer, "unit= 10", "activation=softmax",
-                                   "bias_zero=true", "weight_decay=l2norm",
+                                   "bias_init_zero=true", "weight_decay=l2norm",
                                    "weight_decay_lambda=0.005", NULL);
   EXPECT_EQ(status, ML_ERROR_NONE);
 
@@ -388,7 +388,7 @@ TEST(nntrainer_capi_nnmodel, create_optimizer_05_p) {
 
   status =
     ml_nnlayer_set_property(layers[0], "input_shape= 32:1:1:62720",
-                            "normalization=true", "bias_zero=true", NULL);
+                            "normalization=true", "bias_init_zero=true", NULL);
   EXPECT_EQ(status, ML_ERROR_NONE);
 
   status = ml_nnmodel_add_layer(model, layers[0]);
@@ -398,7 +398,7 @@ TEST(nntrainer_capi_nnmodel, create_optimizer_05_p) {
   EXPECT_EQ(status, ML_ERROR_NONE);
 
   status = ml_nnlayer_set_property(layers[1], "unit= 10", "activation=softmax",
-                                   "bias_zero=true", "weight_decay=l2norm",
+                                   "bias_init_zero=true", "weight_decay=l2norm",
                                    "weight_decay_lambda=0.005", NULL);
   EXPECT_EQ(status, ML_ERROR_NONE);
 
@@ -442,7 +442,7 @@ TEST(nntrainer_capi_nnmodel, train_with_file_01_p) {
 
   status =
     ml_nnlayer_set_property(layers[0], "input_shape= 16:1:1:62720",
-                            "normalization=true", "bias_zero=true", NULL);
+                            "normalization=true", "bias_init_zero=true", NULL);
   EXPECT_EQ(status, ML_ERROR_NONE);
 
   status = ml_nnmodel_add_layer(model, layers[0]);
@@ -452,7 +452,7 @@ TEST(nntrainer_capi_nnmodel, train_with_file_01_p) {
   EXPECT_EQ(status, ML_ERROR_NONE);
 
   status = ml_nnlayer_set_property(layers[1], "unit= 10", "activation=softmax",
-                                   "bias_zero=true", "weight_decay=l2norm",
+                                   "bias_init_zero=true", "weight_decay=l2norm",
                                    "weight_decay_lambda=0.005",
                                    "weight_ini=xavier_uniform", NULL);
   EXPECT_EQ(status, ML_ERROR_NONE);
@@ -505,7 +505,7 @@ TEST(nntrainer_capi_nnmodel, train_with_generator_01_p) {
 
   status =
     ml_nnlayer_set_property(layers[0], "input_shape= 16:1:1:62720",
-                            "normalization=true", "bias_zero=true", NULL);
+                            "normalization=true", "bias_init_zero=true", NULL);
   EXPECT_EQ(status, ML_ERROR_NONE);
 
   status = ml_nnmodel_add_layer(model, layers[0]);
@@ -515,7 +515,7 @@ TEST(nntrainer_capi_nnmodel, train_with_generator_01_p) {
   EXPECT_EQ(status, ML_ERROR_NONE);
 
   status = ml_nnlayer_set_property(layers[1], "unit= 10", "activation=softmax",
-                                   "bias_zero=true", "weight_decay=l2norm",
+                                   "bias_init_zero=true", "weight_decay=l2norm",
                                    "weight_decay_lambda=0.005",
                                    "weight_ini=xavier_uniform", NULL);
   EXPECT_EQ(status, ML_ERROR_NONE);
index b026237..3ad6656 100644 (file)
@@ -87,7 +87,7 @@ TEST(nntrainer_capi_nnlayer, setproperty_02_p) {
   status = ml_nnlayer_set_property(handle, "unit=10", NULL);
   EXPECT_EQ(status, ML_ERROR_NONE);
 
-  status = ml_nnlayer_set_property(handle, "bias_zero=true", NULL);
+  status = ml_nnlayer_set_property(handle, "bias_init_zero=true", NULL);
   EXPECT_EQ(status, ML_ERROR_NONE);
 
   status = ml_nnlayer_set_property(handle, "activation =sigmoid", NULL);
index 05706b0..8e1f854 100644 (file)
@@ -264,6 +264,21 @@ TEST(nntrainer_Conv2DLayer, initialize_01_p) {
 }
 
 /**
+ * @brief Flatten Layer
+ */
+TEST(nntrainer_Conv2DLayer, initialize_02_p) {
+  int status = ML_ERROR_NONE;
+  std::string config_file = "./test.ini";
+  RESET_CONFIG(config_file.c_str());
+  replaceString("flatten = false", "flatten = true", config_file, config_str2);
+  nntrainer::NeuralNetwork NN;
+  status = NN.setConfig(config_file);
+  EXPECT_EQ(status, ML_ERROR_NONE);
+  status = NN.init();
+  EXPECT_EQ(status, ML_ERROR_NONE);
+}
+
+/**
  * @brief Main gtest
  */
 int main(int argc, char **argv) {
index 7e42bc9..a0b5dae 100644 (file)
@@ -438,7 +438,7 @@ TEST(nntrainer_Conv2DLayer, setProperty_01_p) {
   std::vector<std::string> input_str;
 
   input_str.push_back("input_shape=32:3:28:28");
-  input_str.push_back("bias_zero=true");
+  input_str.push_back("bias_init_zero=true");
   input_str.push_back("activation=sigmoid");
   input_str.push_back("weight_decay=l2norm");
   input_str.push_back("weight_decay_lambda = 0.005");
@@ -464,7 +464,7 @@ TEST(nntrainer_Conv2DLayer, initialize_01_p) {
   previous_dim.setTensorDim("32:3:28:28");
 
   input_str.push_back("input_shape=32:3:28:28");
-  input_str.push_back("bias_zero=true");
+  input_str.push_back("bias_init_zero=true");
   input_str.push_back("activation=sigmoid");
   input_str.push_back("weight_decay=l2norm");
   input_str.push_back("weight_decay_lambda = 0.005");
@@ -493,7 +493,7 @@ TEST(nntrainer_Conv2DLayer, save_read_01_p) {
   previous_dim.setTensorDim("32:3:28:28");
 
   input_str.push_back("input_shape=32:3:28:28");
-  input_str.push_back("bias_zero=true");
+  input_str.push_back("bias_init_zero=true");
   input_str.push_back("activation=sigmoid");
   input_str.push_back("weight_decay=l2norm");
   input_str.push_back("weight_decay_lambda = 0.005");
@@ -542,7 +542,7 @@ TEST(nntrainer_Conv2DLayer, forwarding_01_p) {
   previous_dim.setTensorDim("1:3:7:7");
 
   input_str.push_back("input_shape=1:3:7:7");
-  input_str.push_back("bias_zero=true");
+  input_str.push_back("bias_init_zero=true");
   input_str.push_back("weight_decay=l2norm");
   input_str.push_back("weight_decay_lambda = 0.005");
   input_str.push_back("weight_ini=xavier_uniform");
@@ -595,7 +595,7 @@ TEST(nntrainer_Conv2DLayer, forwarding_02_p) {
   previous_dim.setTensorDim("2:3:7:7");
 
   input_str.push_back("input_shape=2:3:7:7");
-  input_str.push_back("bias_zero=true");
+  input_str.push_back("bias_init_zero=true");
   input_str.push_back("weight_decay=l2norm");
   input_str.push_back("weight_decay_lambda = 0.005");
   input_str.push_back("weight_ini=xavier_uniform");
@@ -650,7 +650,7 @@ TEST(nntrainer_Conv2D, backwarding_01_p) {
   previous_dim.setTensorDim("1:3:7:7");
 
   input_str.push_back("input_shape=1:3:7:7");
-  input_str.push_back("bias_zero=true");
+  input_str.push_back("bias_init_zero=true");
   input_str.push_back("weight_decay=l2norm");
   input_str.push_back("weight_decay_lambda = 0.005");
   input_str.push_back("weight_ini=xavier_uniform");