[ TENSOR ] Change to get Tensor Vector
authorjijoong.moon <jijoong.moon@samsung.com>
Wed, 21 Oct 2020 11:56:26 +0000 (20:56 +0900)
committerJijoong Moon <jijoong.moon@samsung.com>
Tue, 27 Oct 2020 01:13:13 +0000 (10:13 +0900)
Currently we only take and push one tensor per layer. For the skip
connection or other layer which takes and make multiple tensors, we
need to handle tensors as an input and output of layer.

In this PR, tensor input chaneged to tensor vector.

**Self evaluation:**
1. Build test:  [X]Passed [ ]Failed [ ]Skipped
2. Run test:  [X]Passed [ ]Failed [ ]Skipped

Signed-off-by: jijoong.moon <jijoong.moon@samsung.com>
28 files changed:
Applications/LogisticRegression/jni/main.cpp
Applications/ReinforcementLearning/DeepQ/jni/main.cpp
Applications/TransferLearning/CIFAR_Classification/jni/main.cpp
nnstreamer/tensor_filter/tensor_filter_nntrainer.cc
nntrainer/include/activation_layer.h
nntrainer/include/addition_layer.h
nntrainer/include/bn_layer.h
nntrainer/include/conv2d_layer.h
nntrainer/include/fc_layer.h
nntrainer/include/flatten_layer.h
nntrainer/include/input_layer.h
nntrainer/include/layer_internal.h
nntrainer/include/loss_layer.h
nntrainer/include/neuralnet.h
nntrainer/include/pooling2d_layer.h
nntrainer/include/tensor.h
nntrainer/src/activation_layer.cpp
nntrainer/src/addition_layer.cpp
nntrainer/src/bn_layer.cpp
nntrainer/src/conv2d_layer.cpp
nntrainer/src/fc_layer.cpp
nntrainer/src/flatten_layer.cpp
nntrainer/src/input_layer.cpp
nntrainer/src/loss_layer.cpp
nntrainer/src/neuralnet.cpp
nntrainer/src/pooling2d_layer.cpp
test/unittest/unittest_nntrainer_layers.cpp
test/unittest/unittest_nntrainer_models.cpp

index 16a411c..f8ebb47 100644 (file)
@@ -215,9 +215,10 @@ int main(int argc, char *argv[]) {
       getData(dataFile, o, l, j);
 
       try {
-        float answer = NN.forwarding(MAKE_SHARED_TENSOR(nntrainer::Tensor({o})))
-                         ->apply(stepFunction)
-                         .getValue(0, 0, 0, 0);
+        float answer =
+          NN.forwarding({MAKE_SHARED_TENSOR(nntrainer::Tensor({o}))})[0]
+            ->apply(stepFunction)
+            .getValue(0, 0, 0, 0);
         std::cout << answer << " : " << l[0] << std::endl;
         cn += answer == l[0];
       } catch (...) {
index b3ca61c..a5dba5b 100644 (file)
@@ -341,7 +341,7 @@ int main(int argc, char **argv) {
           return 0;
         }
         try {
-          test = mainNet.forwarding(MAKE_SHARED_TENSOR(in_tensor));
+          test = mainNet.forwarding({MAKE_SHARED_TENSOR(in_tensor)})[0];
         } catch (...) {
           std::cerr << "Error while forwarding the network" << std::endl;
           return 0;
@@ -448,7 +448,7 @@ int main(int argc, char **argv) {
          */
         nntrainer::sharedConstTensor Q;
         try {
-          Q = mainNet.forwarding(MAKE_SHARED_TENSOR(q_in));
+          Q = mainNet.forwarding({MAKE_SHARED_TENSOR(q_in)})[0];
         } catch (...) {
           std::cerr << "Error during forwarding main network" << std::endl;
           return -1;
@@ -459,7 +459,7 @@ int main(int argc, char **argv) {
          */
         nntrainer::sharedConstTensor NQ;
         try {
-          NQ = targetNet.forwarding(MAKE_SHARED_TENSOR(nq_in));
+          NQ = targetNet.forwarding({MAKE_SHARED_TENSOR(nq_in)})[0];
         } catch (...) {
           std::cerr << "Error during forwarding target network" << std::endl;
           return -1;
@@ -495,7 +495,7 @@ int main(int argc, char **argv) {
         nntrainer::Tensor in_tensor;
         try {
           in_tensor = nntrainer::Tensor(inbatch);
-          mainNet.backwarding(MAKE_SHARED_TENSOR(in_tensor), Q, iter);
+          mainNet.backwarding({MAKE_SHARED_TENSOR(in_tensor)}, {Q}, iter);
         } catch (...) {
           std::cerr << "Error during backwarding the network" << std::endl;
           return -1;
index 898fd5c..0f4e432 100644 (file)
@@ -435,7 +435,7 @@ int main(int argc, char *argv[]) {
     nntrainer::Tensor X;
     try {
       X = nntrainer::Tensor({featureVector});
-      NN.forwarding(MAKE_SHARED_TENSOR(X))->apply(stepFunction);
+      NN.forwarding({MAKE_SHARED_TENSOR(X)})[0]->apply(stepFunction);
     } catch (...) {
       std::cerr << "Error while forwarding the model" << std::endl;
       return 0;
index 57d1126..09c0974 100644 (file)
@@ -183,7 +183,7 @@ int NNTrainer::run(const GstTensorMemory *input, GstTensorMemory *output) {
   std::shared_ptr<const nntrainer::Tensor> o;
 
   try {
-    o = model->inference(X);
+    o = model->inference({MAKE_SHARED_TENSOR(X)})[0];
   } catch (std::exception &e) {
     ml_loge("%s %s", typeid(e).name(), e.what());
     return -2;
index dff488d..7dc6e10 100644 (file)
@@ -62,14 +62,14 @@ public:
   void save(std::ofstream &file){/* noop */};
 
   /**
-   * @copydoc Layer::forwarding(sharedConstTensor in)
+   * @copydoc Layer::forwarding(sharedConstTensors in)
    */
-  sharedConstTensor forwarding(sharedConstTensor in);
+  sharedConstTensors forwarding(sharedConstTensors in);
 
   /**
-   * @copydoc Layer::backwarding(sharedConstTensor in, int iteration)
+   * @copydoc Layer::backwarding(sharedConstTensors in, int iteration)
    */
-  sharedConstTensor backwarding(sharedConstTensor in, int iteration);
+  sharedConstTensors backwarding(sharedConstTensors in, int iteration);
 
   /**
    * @brief setActivation by preset ActivationType
index 7b89a1e..faed92a 100644 (file)
@@ -73,14 +73,14 @@ public:
   void save(std::ofstream &file){};
 
   /**
-   * @copydoc Layer::forwarding(sharedConstTensor in)
+   * @copydoc Layer::forwarding(sharedConstTensors in)
    */
-  sharedConstTensor forwarding(sharedConstTensor in);
+  sharedConstTensors forwarding(sharedConstTensors in);
 
   /**
-   * @copydoc Layer::backwarding(sharedConstTensor in, int iteration)
+   * @copydoc Layer::backwarding(sharedConstTensors in, int iteration)
    */
-  sharedConstTensor backwarding(sharedConstTensor in, int iteration);
+  sharedConstTensors backwarding(sharedConstTensors in, int iteration);
 
   /**
    * @brief     get the base name for the layer
index 41d5e09..d4dcc1b 100644 (file)
@@ -78,14 +78,14 @@ public:
   BatchNormalizationLayer &operator=(BatchNormalizationLayer &&rhs) = default;
 
   /**
-   * @copydoc Layer::forwarding(sharedConstTensor in)
+   * @copydoc Layer::forwarding(sharedConstTensors in)
    */
-  sharedConstTensor forwarding(sharedConstTensor in);
+  sharedConstTensors forwarding(sharedConstTensors in);
 
   /**
-   * @copydoc Layer::backwarding(sharedConstTensor in, int iteration)
+   * @copydoc Layer::backwarding(sharedConstTensors in, int iteration)
    */
-  sharedConstTensor backwarding(sharedConstTensor in, int iteration);
+  sharedConstTensors backwarding(sharedConstTensors in, int iteration);
 
   /**
    * @brief     copy layer
index 5e9157e..8efdd4f 100644 (file)
@@ -83,14 +83,14 @@ public:
   void save(std::ofstream &file);
 
   /**
-   * @copydoc Layer::forwarding(sharedConstTensor in)
+   * @copydoc Layer::forwarding(sharedConstTensors in)
    */
-  sharedConstTensor forwarding(sharedConstTensor in);
+  sharedConstTensors forwarding(sharedConstTensors in);
 
   /**
-   * @copydoc Layer::backwarding(sharedConstTensor in, int iteration)
+   * @copydoc Layer::backwarding(sharedConstTensors in, int iteration)
    */
-  sharedConstTensor backwarding(sharedConstTensor in, int iteration);
+  sharedConstTensors backwarding(sharedConstTensors in, int iteration);
 
   /**
    * @brief     copy layer
index a73e0f8..7e2bb7d 100644 (file)
@@ -64,14 +64,14 @@ public:
   void save(std::ofstream &file);
 
   /**
-   * @copydoc Layer::forwarding(sharedConstTensor in)
+   * @copydoc Layer::forwarding(sharedConstTensors in)
    */
-  sharedConstTensor forwarding(sharedConstTensor in);
+  sharedConstTensors forwarding(sharedConstTensors in);
 
   /**
-   * @copydoc Layer::backwarding(sharedConstTensor in, int iteration)
+   * @copydoc Layer::backwarding(sharedConstTensors in, int iteration)
    */
-  sharedConstTensor backwarding(sharedConstTensor in, int iteration);
+  sharedConstTensors backwarding(sharedConstTensors in, int iteration);
 
   /**
    * @brief     copy layer
index 8080157..a1be63d 100644 (file)
@@ -69,14 +69,14 @@ public:
   void save(std::ofstream &file){};
 
   /**
-   * @copydoc Layer::forwarding(sharedConstTensor in)
+   * @copydoc Layer::forwarding(sharedConstTensors in)
    */
-  sharedConstTensor forwarding(sharedConstTensor in);
+  sharedConstTensors forwarding(sharedConstTensors in);
 
   /**
-   * @copydoc Layer::backwarding(sharedConstTensor in, int iteration)
+   * @copydoc Layer::backwarding(sharedConstTensors in, int iteration)
    */
-  sharedConstTensor backwarding(sharedConstTensor in, int iteration);
+  sharedConstTensors backwarding(sharedConstTensors in, int iteration);
 
   /**
    * @brief     get the base name for the layer
index 14215e9..12f7734 100644 (file)
@@ -73,14 +73,14 @@ public:
   void save(std::ofstream &file){};
 
   /**
-   * @copydoc Layer::forwarding(sharedConstTensor in)
+   * @copydoc Layer::forwarding(sharedConstTensors in)
    */
-  sharedConstTensor forwarding(sharedConstTensor in);
+  sharedConstTensors forwarding(sharedConstTensors in);
 
   /**
-   * @copydoc Layer::backwarding(sharedConstTensor in, int iteration)
+   * @copydoc Layer::backwarding(sharedConstTensors in, int iteration)
    */
-  sharedConstTensor backwarding(sharedConstTensor in, int iteration);
+  sharedConstTensors backwarding(sharedConstTensors in, int iteration);
 
   /**
    * @brief     Initializer of Input Layer
index 031489e..78aac16 100644 (file)
@@ -102,7 +102,7 @@ public:
    * @param[in] in List of Input Tensors taken by this layer
    * @retval    List of Output Tensors
    */
-  virtual sharedConstTensor forwarding(sharedConstTensor in) = 0;
+  virtual sharedConstTensors forwarding(sharedConstTensors in) = 0;
 
   /**
    * @brief     Back Propagation of a layer
@@ -110,8 +110,8 @@ public:
    * @param[in] iteration Iteration value for the Optimizer
    * @retval    Derivative List of Tensor for the previous layer
    */
-  virtual sharedConstTensor backwarding(sharedConstTensor in,
-                                        int iteration) = 0;
+  virtual sharedConstTensors backwarding(sharedConstTensors in,
+                                         int iteration) = 0;
 
   /**
    * @brief     read layer Weight & Bias data from file
index 9b436cb..b40b743 100644 (file)
@@ -53,9 +53,9 @@ public:
   ~LossLayer(){};
 
   /**
-   * @copydoc Layer::forwarding(sharedConstTensor in)
+   * @copydoc Layer::forwarding(sharedConstTensors in)
    */
-  sharedConstTensor forwarding(sharedConstTensor in);
+  sharedConstTensors forwarding(sharedConstTensors in);
 
   /**
    * @brief     Forward Propagation of a layer
@@ -63,12 +63,13 @@ public:
    * @param[in] label List of Label Tensors for the model
    * @retval    List of Input Tensors as it is.
    */
-  sharedConstTensor forwarding(sharedConstTensor in, sharedConstTensor label);
+  sharedConstTensors forwarding(sharedConstTensors in,
+                                sharedConstTensors label);
 
   /**
-   * @copydoc Layer::backwarding(sharedConstTensor in, int iteration)
+   * @copydoc Layer::backwarding(sharedConstTensors in, int iteration)
    */
-  sharedConstTensor backwarding(sharedConstTensor in, int iteration);
+  sharedConstTensors backwarding(sharedConstTensors in, int iteration);
 
   /**
    * @brief     read layer Weight & Bias data from file
index 3101151..e0edbc1 100644 (file)
@@ -149,7 +149,7 @@ public:
    * @param[in] input List of Input Tensors taken by the neural network
    * @retval    List of Output Tensors
    */
-  sharedConstTensor forwarding(sharedConstTensor input);
+  sharedConstTensors forwarding(sharedConstTensors input);
 
   /**
    * @brief     Forward Propagation of the neural network
@@ -157,8 +157,8 @@ public:
    * @param[in] label List of Label Tensors for the model
    * @retval    List of Output Tensors
    */
-  sharedConstTensor forwarding(sharedConstTensor input,
-                               sharedConstTensor label);
+  sharedConstTensors forwarding(sharedConstTensors input,
+                                sharedConstTensors label);
 
   /**
    * @brief     Backward Propagation of the neural network
@@ -166,7 +166,7 @@ public:
    * @param[in] label List of Label Tensors for the model
    * @param[in] iteration Iteration Number for the optimizer
    */
-  void backwarding(sharedConstTensor input, sharedConstTensor label,
+  void backwarding(sharedConstTensors input, sharedConstTensors label,
                    int iteration);
 
   /**
@@ -205,7 +205,7 @@ public:
    * @param[in] X input tensor
    * @retval shared_ptr<const Tensor>
    */
-  sharedConstTensor inference(const Tensor X);
+  sharedConstTensors inference(sharedConstTensors X);
 
   /**
    * @brief     Run NeuralNetwork train with callback function by user
index 81f40c4..3ba497d 100644 (file)
@@ -90,14 +90,14 @@ public:
   void save(std::ofstream &file){};
 
   /**
-   * @copydoc Layer::forwarding(sharedConstTensor in)
+   * @copydoc Layer::forwarding(sharedConstTensors in)
    */
-  sharedConstTensor forwarding(sharedConstTensor in);
+  sharedConstTensors forwarding(sharedConstTensors in);
 
   /**
-   * @copydoc Layer::backwarding(sharedConstTensor in, int iteration)
+   * @copydoc Layer::backwarding(sharedConstTensors in, int iteration)
    */
-  sharedConstTensor backwarding(sharedConstTensor in, int iteration);
+  sharedConstTensors backwarding(sharedConstTensors in, int iteration);
 
   /**
    * @copydoc Layer::setBatch(unsigned int batch)
index 1588113..ea0662f 100644 (file)
@@ -629,6 +629,10 @@ typedef std::shared_ptr<Tensor> sharedTensor;
 
 typedef std::shared_ptr<const Tensor> sharedConstTensor;
 
+typedef std::vector<sharedConstTensor> sharedConstTensors;
+
+typedef std::vector<sharedTensor> sharedTensors;
+
 } /* namespace nntrainer */
 
 #endif /* __cplusplus */
index 40b8a38..a08d27e 100644 (file)
@@ -44,24 +44,24 @@ int ActivationLayer::initialize() {
   return ML_ERROR_NONE;
 }
 
-sharedConstTensor ActivationLayer::forwarding(sharedConstTensor in) {
-  input = *in;
+sharedConstTensors ActivationLayer::forwarding(sharedConstTensors in) {
+  input = *in[0];
   /// @note @a _act_fn is expected to work out of place and not modify @a input
   hidden = _act_fn(input);
 
-  return MAKE_SHARED_TENSOR(hidden);
+  return {MAKE_SHARED_TENSOR(hidden)};
 }
 
-sharedConstTensor ActivationLayer::backwarding(sharedConstTensor derivative,
-                                               int iteration) {
-  Tensor deriv = *derivative;
+sharedConstTensors ActivationLayer::backwarding(sharedConstTensors derivative,
+                                                int iteration) {
+  Tensor deriv = *derivative[0];
   Tensor ret;
   if (activation_type == ActivationType::ACT_SOFTMAX)
     ret = _act_prime_fn(hidden, deriv);
   else
     ret = _act_prime_fn(input, deriv);
 
-  return MAKE_SHARED_TENSOR(std::move(ret));
+  return {MAKE_SHARED_TENSOR(std::move(ret))};
 }
 
 int ActivationLayer::setActivation(
index d8dfce3..85bef1e 100644 (file)
@@ -37,31 +37,31 @@ int AdditionLayer::initialize() {
   return status;
 }
 
-sharedConstTensor AdditionLayer::forwarding(sharedConstTensor in) {
+sharedConstTensors AdditionLayer::forwarding(sharedConstTensors in) {
   hidden = Tensor(input_dim);
   hidden.setZero();
 
   for (unsigned int idx = 0; idx < num_inputs; ++idx) {
-    if (input_dim != in.get()[idx].getDim())
+    if (input_dim != in[0].get()[idx].getDim())
       throw std::runtime_error("Error: addition layer requires same "
                                "shape from all input layers");
-    hidden.add_i(in.get()[idx]);
+    hidden.add_i(in[0].get()[idx]);
   }
 
-  return MAKE_SHARED_TENSOR(hidden);
+  return {MAKE_SHARED_TENSOR(hidden)};
 }
 
-sharedConstTensor AdditionLayer::backwarding(sharedConstTensor derivative,
-                                             int iteration) {
+sharedConstTensors AdditionLayer::backwarding(sharedConstTensors derivative,
+                                              int iteration) {
   sharedTensor ret = std::shared_ptr<Tensor>(new Tensor[num_inputs],
                                              std::default_delete<Tensor[]>());
 
   for (unsigned int idx = 0; idx < num_inputs; ++idx) {
     Tensor &t = ret.get()[idx];
-    t = *derivative;
+    t = *derivative[0];
   }
 
-  return ret;
+  return {ret};
 }
 
 void AdditionLayer::setProperty(const PropertyType type,
index fce4be9..d1b1907 100644 (file)
@@ -114,13 +114,13 @@ void BatchNormalizationLayer::setProperty(const PropertyType type,
   }
 }
 
-sharedConstTensor BatchNormalizationLayer::forwarding(sharedConstTensor in) {
+sharedConstTensors BatchNormalizationLayer::forwarding(sharedConstTensors in) {
   Tensor &mu = weightAt(static_cast<int>(BNParams::mu)).getVariableRef();
   Tensor &var = weightAt(static_cast<int>(BNParams::var)).getVariableRef();
   Tensor &gamma = weightAt(static_cast<int>(BNParams::gamma)).getVariableRef();
   Tensor &beta = weightAt(static_cast<int>(BNParams::beta)).getVariableRef();
 
-  input = *in;
+  input = *in[0];
   /// @todo change trainable #524
   if (trainable) {
     Tensor cmu = input.average(axes_to_reduce);
@@ -145,18 +145,18 @@ sharedConstTensor BatchNormalizationLayer::forwarding(sharedConstTensor in) {
     this->hidden.add(beta);
   }
 
-  return MAKE_SHARED_TENSOR(hidden);
+  return {MAKE_SHARED_TENSOR(hidden)};
 }
 
-sharedConstTensor
-BatchNormalizationLayer::backwarding(sharedConstTensor derivative,
+sharedConstTensors
+BatchNormalizationLayer::backwarding(sharedConstTensors derivative,
                                      int iteration) {
   Tensor &gamma = weightAt(static_cast<int>(BNParams::gamma)).getVariableRef();
   Tensor &dgamma = weightAt(static_cast<int>(BNParams::gamma)).getGradientRef();
   Tensor &dbeta = weightAt(static_cast<int>(BNParams::beta)).getGradientRef();
   Tensor dx_normalized;
 
-  Tensor deriv = *derivative;
+  Tensor deriv = *derivative[0];
 
   int N = 1;
 
@@ -178,7 +178,7 @@ BatchNormalizationLayer::backwarding(sharedConstTensor derivative,
 
   opt->apply_gradients(weight_list, num_weights, iteration);
 
-  return MAKE_SHARED_TENSOR(std::move(dx));
+  return {MAKE_SHARED_TENSOR(std::move(dx))};
 }
 
 void BatchNormalizationLayer::copy(std::shared_ptr<Layer> l) {
index 00c68f5..436cb92 100644 (file)
@@ -64,9 +64,9 @@ void Conv2DLayer::read(std::ifstream &file) { Layer::read(file); }
 
 void Conv2DLayer::save(std::ofstream &file) { Layer::save(file); }
 
-sharedConstTensor Conv2DLayer::forwarding(sharedConstTensor in) {
+sharedConstTensors Conv2DLayer::forwarding(sharedConstTensors in) {
   int status = ML_ERROR_NONE;
-  input = *in;
+  input = *in[0];
 
   if (normalization) {
     input = input.normalization();
@@ -161,13 +161,14 @@ sharedConstTensor Conv2DLayer::forwarding(sharedConstTensor in) {
     loss /= filter_size;
   }
 
-  return MAKE_SHARED_TENSOR(hidden);
+  return {MAKE_SHARED_TENSOR(hidden)};
 };
 
-sharedConstTensor Conv2DLayer::backwarding(sharedConstTensor derivative,
-                                           int iteration) {
+sharedConstTensors Conv2DLayer::backwarding(sharedConstTensors derivatives,
+                                            int iteration) {
 
   std::array<unsigned int, CONV2D_DIM> same_pad;
+  sharedConstTensor derivative = derivatives[0];
 
   same_pad[0] = kernel_size[0] - 1;
   same_pad[1] = kernel_size[1] - 1;
@@ -351,7 +352,7 @@ sharedConstTensor Conv2DLayer::backwarding(sharedConstTensor derivative,
     opt->apply_gradients(weight_list, num_weights, iteration);
   }
 
-  return MAKE_SHARED_TENSOR(std::move(strip_pad(ret, padding.data())));
+  return {MAKE_SHARED_TENSOR(std::move(strip_pad(ret, padding.data())))};
 }
 
 void Conv2DLayer::copy(std::shared_ptr<Layer> l) {
index ee14749..85e5f9f 100644 (file)
@@ -70,12 +70,12 @@ void FullyConnectedLayer::setProperty(const PropertyType type,
   }
 }
 
-sharedConstTensor FullyConnectedLayer::forwarding(sharedConstTensor in) {
+sharedConstTensors FullyConnectedLayer::forwarding(sharedConstTensors in) {
   Tensor &weight =
     weightAt(static_cast<int>(FCParams::weight)).getVariableRef();
   Tensor &bias = weightAt(static_cast<int>(FCParams::bias)).getVariableRef();
 
-  input = *in;
+  input = *in[0];
   hidden = input.dot(weight);
   hidden.add_i(bias);
 
@@ -83,7 +83,7 @@ sharedConstTensor FullyConnectedLayer::forwarding(sharedConstTensor in) {
     loss = weight_regularizer_constant * 0.5f * (weight.l2norm());
   }
 
-  return MAKE_SHARED_TENSOR(hidden);
+  return {MAKE_SHARED_TENSOR(hidden)};
 }
 
 void FullyConnectedLayer::read(std::ifstream &file) {
@@ -106,18 +106,18 @@ void FullyConnectedLayer::copy(std::shared_ptr<Layer> l) {
   this->unit = from->unit;
 }
 
-sharedConstTensor FullyConnectedLayer::backwarding(sharedConstTensor derivative,
-                                                   int iteration) {
+sharedConstTensors
+FullyConnectedLayer::backwarding(sharedConstTensors derivative, int iteration) {
   unsigned int weight_idx = static_cast<int>(FCParams::weight);
   unsigned int bias_idx = static_cast<int>(FCParams::bias);
   Tensor &weight = weightAt(weight_idx).getVariableRef();
   Tensor &djdw = weightAt(weight_idx).getGradientRef();
   Tensor &djdb = weightAt(bias_idx).getGradientRef();
 
-  Tensor ret = derivative->dot(weight, false, true);
-  djdb = derivative->sum(0);
+  Tensor ret = derivative[0]->dot(weight, false, true);
+  djdb = derivative[0]->sum(0);
 
-  djdw = input.dot(*derivative, true, false);
+  djdw = input.dot(*derivative[0], true, false);
   if (isWeightRegularizerL2Norm())
     djdw.add_i(weight, weight_regularizer_constant);
   djdw = djdw.sum(0);
@@ -126,6 +126,6 @@ sharedConstTensor FullyConnectedLayer::backwarding(sharedConstTensor derivative,
     opt->apply_gradients(weight_list, num_weights, iteration);
   }
 
-  return MAKE_SHARED_TENSOR(std::move(ret));
+  return {MAKE_SHARED_TENSOR(std::move(ret))};
 }
 } /* namespace nntrainer */
index 389cecf..bfd9345 100644 (file)
@@ -34,21 +34,21 @@ int FlattenLayer::initialize() {
   return status;
 }
 
-sharedConstTensor FlattenLayer::forwarding(sharedConstTensor in) {
-  input = *in;
+sharedConstTensors FlattenLayer::forwarding(sharedConstTensors in) {
+  input = *in[0];
   hidden = input;
 
   hidden.reshape(output_dim);
 
-  return MAKE_SHARED_TENSOR(hidden);
+  return {MAKE_SHARED_TENSOR(hidden)};
 }
 
-sharedConstTensor FlattenLayer::backwarding(sharedConstTensor in,
-                                            int iteration) {
-  Tensor temp = *in;
+sharedConstTensors FlattenLayer::backwarding(sharedConstTensors in,
+                                             int iteration) {
+  Tensor temp = *in[0];
   temp.reshape(input_dim);
 
-  return MAKE_SHARED_TENSOR(std::move(temp));
+  return {MAKE_SHARED_TENSOR(std::move(temp))};
 }
 
 } /* namespace nntrainer */
index d1cfec7..19373b6 100644 (file)
@@ -51,8 +51,8 @@ void InputLayer::setProperty(const PropertyType type,
   }
 }
 
-sharedConstTensor InputLayer::forwarding(sharedConstTensor in) {
-  input = *in;
+sharedConstTensors InputLayer::forwarding(sharedConstTensors in) {
+  input = *in[0];
 
   hidden = input;
   if (normalization)
@@ -60,10 +60,11 @@ sharedConstTensor InputLayer::forwarding(sharedConstTensor in) {
   if (standardization)
     hidden = hidden.standardization();
 
-  return MAKE_SHARED_TENSOR(hidden);
+  return {MAKE_SHARED_TENSOR(hidden)};
 }
 
-sharedConstTensor InputLayer::backwarding(sharedConstTensor in, int iteration) {
+sharedConstTensors InputLayer::backwarding(sharedConstTensors in,
+                                           int iteration) {
   return in;
 }
 
index 917e9a5..614c434 100644 (file)
@@ -40,10 +40,10 @@ int LossLayer::initialize() {
   return status;
 }
 
-sharedConstTensor LossLayer::forwarding(sharedConstTensor in,
-                                        sharedConstTensor label) {
-  input = *in;
-  Tensor y2 = *label;
+sharedConstTensors LossLayer::forwarding(sharedConstTensors in,
+                                         sharedConstTensors label) {
+  input = *in[0];
+  Tensor y2 = *label[0];
   Tensor y = input;
   Tensor l;
 
@@ -90,21 +90,21 @@ sharedConstTensor LossLayer::forwarding(sharedConstTensor in,
   }
 
   updateLoss(l);
-  return MAKE_SHARED_TENSOR(std::move(y));
+  return {MAKE_SHARED_TENSOR(std::move(y))};
 }
 
-sharedConstTensor LossLayer::forwarding(sharedConstTensor in) {
+sharedConstTensors LossLayer::forwarding(sharedConstTensors in) {
   Tensor ret;
 
   switch (loss_type) {
   case LossType::LOSS_MSE:
     return in;
   case LossType::LOSS_ENTROPY_SIGMOID:
-    ret = in->apply(ActivationLayer::sigmoid);
-    return MAKE_SHARED_TENSOR(std::move(ret));
+    ret = in[0]->apply(ActivationLayer::sigmoid);
+    return {MAKE_SHARED_TENSOR(std::move(ret))};
   case LossType::LOSS_ENTROPY_SOFTMAX:
-    ret = in->apply(ActivationLayer::softmax);
-    return MAKE_SHARED_TENSOR(std::move(ret));
+    ret = in[0]->apply(ActivationLayer::softmax);
+    return {MAKE_SHARED_TENSOR(std::move(ret))};
   case LossType::LOSS_ENTROPY:
     throw std::runtime_error(
       "Error: Cross Entropy not supported without softmax or sigmoid.");
@@ -132,10 +132,10 @@ void LossLayer::copy(std::shared_ptr<Layer> l) {
   this->loss_type = from->loss_type;
 }
 
-sharedConstTensor LossLayer::backwarding(sharedConstTensor derivative,
-                                         int iteration) {
+sharedConstTensors LossLayer::backwarding(sharedConstTensors derivative,
+                                          int iteration) {
   Tensor ret_derivative;
-  Tensor y2 = *derivative;
+  Tensor y2 = *derivative[0];
   Tensor y = input;
 
   switch (loss_type) {
@@ -159,7 +159,7 @@ sharedConstTensor LossLayer::backwarding(sharedConstTensor derivative,
     throw std::runtime_error("Unknown loss_type.");
   }
 
-  return MAKE_SHARED_TENSOR(std::move(ret_derivative));
+  return {MAKE_SHARED_TENSOR(std::move(ret_derivative))};
 }
 
 int LossLayer::setLoss(LossType l) {
index 0c4508e..6d7862b 100644 (file)
@@ -267,8 +267,8 @@ NeuralNetwork::~NeuralNetwork() {
 /**
  * @brief     forward propagation using layers object which has layer
  */
-sharedConstTensor NeuralNetwork::forwarding(sharedConstTensor input) {
-  sharedConstTensor X = input;
+sharedConstTensors NeuralNetwork::forwarding(sharedConstTensors input) {
+  sharedConstTensors X = input;
   /** Do not forward the loss layer, as label is not available */
   for (unsigned int i = 0; i < layers.size() - 1; i++) {
     X = layers[i]->forwarding(X);
@@ -280,11 +280,11 @@ sharedConstTensor NeuralNetwork::forwarding(sharedConstTensor input) {
 /**
  * @brief     forward propagation using layers object which has layer
  */
-sharedConstTensor NeuralNetwork::forwarding(sharedConstTensor input,
-                                            sharedConstTensor label) {
-  sharedConstTensor X;
+sharedConstTensors NeuralNetwork::forwarding(sharedConstTensors input,
+                                             sharedConstTensors label) {
+  sharedConstTensors X;
 
-  if (input->getDim().batch() > batch_size)
+  if (input[0]->getDim().batch() > batch_size)
     throw std::logic_error("Error: mismatch in batchsize for data and model.");
 
   X = forwarding(input);
@@ -299,8 +299,8 @@ sharedConstTensor NeuralNetwork::forwarding(sharedConstTensor input,
  *            Call backwarding function of layer in reverse order
  *            No need to call at first Input Layer (No data to be updated)
  */
-void NeuralNetwork::backwarding(sharedConstTensor input,
-                                sharedConstTensor label, int iteration) {
+void NeuralNetwork::backwarding(sharedConstTensors input,
+                                sharedConstTensors label, int iteration) {
 
   if (layers.empty() || layers.back()->getType() != LayerType::LAYER_LOSS) {
     throw std::invalid_argument("last layer is not loss layer");
@@ -308,7 +308,7 @@ void NeuralNetwork::backwarding(sharedConstTensor input,
 
   forwarding(input, label);
 
-  sharedConstTensor output = label;
+  sharedConstTensors output = label;
   for (unsigned int i = layers.size() - 1; i > 0; i--)
     output = layers[i]->backwarding(output, iteration);
 }
@@ -375,24 +375,24 @@ void NeuralNetwork::setBatchSize(unsigned int batch) {
     throw std::invalid_argument("Error setting batchsize for the dataset");
 }
 
-sharedConstTensor NeuralNetwork::inference(const Tensor X) {
-  if (batch_size != X.batch()) {
+sharedConstTensors NeuralNetwork::inference(sharedConstTensors X) {
+  if (batch_size != X[0]->batch()) {
     /**
      * Note that inference resets batch_size of the previous train configuration
      * Next train must set its batch_size if inference is run with this model.
      */
-    setBatchSize(X.batch());
+    setBatchSize(X[0]->batch());
   }
 
-  sharedConstTensor out;
+  sharedConstTensors out;
   try {
-    out = forwarding(MAKE_SHARED_TENSOR(X));
+    out = forwarding(X);
     /** Forward loss layer without label as well */
     out = std::static_pointer_cast<LossLayer>(layers[layers.size() - 1])
             ->forwarding(out);
   } catch (...) {
     ml_loge("Failed to inference Model");
-    return nullptr;
+    return out;
   }
   return out;
 }
@@ -455,7 +455,7 @@ int NeuralNetwork::train_run() {
       if (data_buffer->getDataFromBuffer(nntrainer::BufferType::BUF_TRAIN,
                                          in->getData(), label->getData())) {
         try {
-          backwarding(in, label, iter++);
+          backwarding({in}, {label}, iter++);
         } catch (...) {
           data_buffer->clear(nntrainer::BufferType::BUF_TRAIN);
           ml_loge("Error: training error in #%d/%d.", epoch_idx, epochs);
@@ -494,8 +494,8 @@ int NeuralNetwork::train_run() {
       while (true) {
         if (data_buffer->getDataFromBuffer(nntrainer::BufferType::BUF_VAL,
                                            in->getData(), label->getData())) {
-          sharedConstTensor Y = forwarding(in, label);
-          auto model_out = Y->argmax();
+          sharedConstTensors Y = forwarding({in}, {label});
+          auto model_out = Y[0]->argmax();
           auto label_out = label->argmax();
           for (unsigned int b = 0; b < batch_size; b++) {
             if (model_out[b] == label_out[b])
index 527422a..8d81efc 100644 (file)
@@ -55,8 +55,8 @@ int Pooling2DLayer::initialize() {
   return status;
 }
 
-sharedConstTensor Pooling2DLayer::forwarding(sharedConstTensor in) {
-  input = *in;
+sharedConstTensors Pooling2DLayer::forwarding(sharedConstTensors in) {
+  input = *in[0];
 
   TensorDim hidden_dim = output_dim;
   hidden = Tensor(hidden_dim);
@@ -69,11 +69,11 @@ sharedConstTensor Pooling2DLayer::forwarding(sharedConstTensor in) {
            result.getData(), result.getDim().getDataLen() * sizeof(float));
   }
 
-  return MAKE_SHARED_TENSOR(hidden);
+  return {MAKE_SHARED_TENSOR(hidden)};
 }
 
-sharedConstTensor Pooling2DLayer::backwarding(sharedConstTensor derivative,
-                                              int iteration) {
+sharedConstTensors Pooling2DLayer::backwarding(sharedConstTensors derivative,
+                                               int iteration) {
   unsigned int batch = input_dim.batch();
   unsigned int channel = input_dim.channel();
   unsigned int height = input_dim.height();
@@ -88,8 +88,8 @@ sharedConstTensor Pooling2DLayer::backwarding(sharedConstTensor derivative,
   float *out = result.getData();
   switch (pooling_type) {
   case PoolingType::max: {
-    for (unsigned int i = 0; i < derivative->getDim().getDataLen(); ++i) {
-      out[max_idx[i]] += derivative->getData()[i];
+    for (unsigned int i = 0; i < derivative[0]->getDim().getDataLen(); ++i) {
+      out[max_idx[i]] += derivative[0]->getData()[i];
     }
   } break;
   case PoolingType::average: {
@@ -100,7 +100,7 @@ sharedConstTensor Pooling2DLayer::backwarding(sharedConstTensor derivative,
           K = 0;
           for (unsigned int k = 0; k <= width - p_width; k += stride[1]) {
             float del =
-              derivative->getValue(b, i, J, K) / static_cast<float>(p_size);
+              derivative[0]->getValue(b, i, J, K) / static_cast<float>(p_size);
             for (unsigned int pi = 0; pi < p_height; ++pi) {
               for (unsigned int pj = 0; pj < p_width; ++pj) {
                 result.setValue(b, i, j + pi, k + pj,
@@ -115,8 +115,8 @@ sharedConstTensor Pooling2DLayer::backwarding(sharedConstTensor derivative,
     }
   } break;
   case PoolingType::global_max: {
-    for (unsigned int i = 0; i < derivative->getDim().getDataLen(); ++i) {
-      float der = derivative->getData()[i] / max_idx_global[i].size();
+    for (unsigned int i = 0; i < derivative[0]->getDim().getDataLen(); ++i) {
+      float der = derivative[0]->getData()[i] / max_idx_global[i].size();
       for (unsigned int m = 0; m < max_idx_global[i].size(); m++) {
         out[max_idx_global[i][m]] += der;
       }
@@ -126,7 +126,7 @@ sharedConstTensor Pooling2DLayer::backwarding(sharedConstTensor derivative,
     unsigned int p_size = width * height;
     for (unsigned int b = 0; b < batch; ++b) {
       for (unsigned int i = 0; i < channel; ++i) {
-        float del = derivative->getValue(b, i, 0, 0) / (p_size);
+        float del = derivative[0]->getValue(b, i, 0, 0) / (p_size);
         for (unsigned int j = 0; j < height; ++j) {
           for (unsigned int k = 0; k < width; ++k) {
             result.setValue(b, i, j, k, del);
@@ -139,7 +139,7 @@ sharedConstTensor Pooling2DLayer::backwarding(sharedConstTensor derivative,
   default:
     throw std::runtime_error("Error: Unknown Pooling Type");
   }
-  return MAKE_SHARED_TENSOR(std::move(result));
+  return {MAKE_SHARED_TENSOR(std::move(result))};
 }
 
 int Pooling2DLayer::setSize(int *size, PropertyType type) {
index 80935fb..2951ec5 100644 (file)
@@ -447,7 +447,7 @@ protected:
 
     loadFile("tc_fc_1_FCLayer.in", in);
     loadFile("tc_fc_1_FCKernel.in", layer);
-    loadFile("tc_fc_1_FCLabel.in", label.get()[0]);
+    loadFile("tc_fc_1_FCLabel.in", *label);
     layers.clear();
 
     return status;
@@ -483,29 +483,29 @@ protected:
     if (type == nntrainer::LossType::LOSS_ENTROPY_SOFTMAX) {
       loadFile("tc_fc_1_FCLayer_sensible.in", in);
       loadFile("tc_fc_1_FCKernel_sensible.in", layer);
-      loadFile("tc_fc_1_FCLabel_sensible.in", label.get()[0]);
+      loadFile("tc_fc_1_FCLabel_sensible.in", *label);
     }
   }
 
   void matchForwarding(const char *file) {
     sharedConstTensor out;
-    EXPECT_NO_THROW(out = layer.forwarding(MAKE_SHARED_TENSOR(in)));
+    EXPECT_NO_THROW(out = layer.forwarding({MAKE_SHARED_TENSOR(in)})[0]);
 
     if (layers.size() > 0) {
       for (unsigned int idx = 0; idx < layers.size() - 1; idx++) {
-        EXPECT_NO_THROW(out = layers[idx]->forwarding(out));
+        EXPECT_NO_THROW(out = layers[idx]->forwarding({out})[0]);
       }
 
       if (layers.back()->getType() == nntrainer::LayerType::LAYER_LOSS) {
         std::shared_ptr<nntrainer::LossLayer> loss_layer =
           std::static_pointer_cast<nntrainer::LossLayer>(layers.back());
-        EXPECT_NO_THROW(out = loss_layer->forwarding(out, label));
+        EXPECT_NO_THROW(out = loss_layer->forwarding({out}, {label})[0]);
       } else {
-        EXPECT_NO_THROW(out = layers.back()->forwarding(out));
+        EXPECT_NO_THROW(out = layers.back()->forwarding({out})[0]);
       }
       EXPECT_EQ(status, ML_ERROR_NONE);
     }
-    matchOutput(out.get()[0], file);
+    matchOutput(*out, file);
   }
 
   void matchLoss(const char *file) {
@@ -525,7 +525,7 @@ protected:
     if (layers.size() &&
         layers.back()->getType() == nntrainer::LayerType::LAYER_LOSS) {
       if (with_loss) {
-        EXPECT_NO_THROW(back_out = layers.back()->backwarding(label, 1));
+        EXPECT_NO_THROW(back_out = layers.back()->backwarding({label}, 1)[0]);
       } else {
         back_out = def_derivative;
       }
@@ -535,9 +535,9 @@ protected:
     }
 
     for (; idx >= 0; --idx)
-      EXPECT_NO_THROW(back_out = layers[idx]->backwarding(back_out, 1));
+      EXPECT_NO_THROW(back_out = layers[idx]->backwarding({back_out}, 1)[0]);
 
-    EXPECT_NO_THROW(back_out = layer.backwarding(back_out, 1));
+    EXPECT_NO_THROW(back_out = layer.backwarding({back_out}, 1)[0]);
     matchOutput(*back_out.get(), file_dx);
 
     loadUpdatedWeightsGradients(file_uw, file_g);
@@ -591,7 +591,7 @@ TEST_F(nntrainer_FullyConnectedLayer_TFmatch,
 
   sharedConstTensor out;
 
-  EXPECT_NO_THROW(out = layer.forwarding(MAKE_SHARED_TENSOR(in)));
+  EXPECT_NO_THROW(out = layer.forwarding({MAKE_SHARED_TENSOR(in)})[0]);
 
   nntrainer::Tensor derivatives(3, 1, 1, 15);
 
@@ -601,7 +601,7 @@ TEST_F(nntrainer_FullyConnectedLayer_TFmatch,
 
   nntrainer::Tensor result;
   EXPECT_NO_THROW(
-    result = layer.backwarding(MAKE_SHARED_TENSOR(derivatives), 1).get()[0]);
+    result = *layer.backwarding({MAKE_SHARED_TENSOR(derivatives)}, 1)[0]);
 
   matchOutput(result, "tc_fc_1_goldenFCGradientAdam.out");
 
@@ -702,7 +702,7 @@ TEST_F(nntrainer_FullyConnectedLayer_TFmatch, forwarding_backwarding_04_p) {
 
   /** Verify forwarding value */
   matchForwarding("tc_fc_1_goldenFCResultActNone.out");
-  matchOutput(label.get()[0], "tc_fc_1_FCLabel.in");
+  matchOutput(*label, "tc_fc_1_FCLabel.in");
 
   /** Verify loss value */
   matchLoss("tc_fc_1_goldenFCLossActNoneMse.out");
@@ -876,14 +876,15 @@ TEST_F(nntrainer_BatchNormalizationLayer, forward_backward_training_01_p) {
   layer.setTrainable(true);
   sharedConstTensor forward_result;
 
-  EXPECT_NO_THROW(forward_result = layer.forwarding(MAKE_SHARED_TENSOR(in)));
+  EXPECT_NO_THROW(forward_result =
+                    layer.forwarding({MAKE_SHARED_TENSOR(in)})[0]);
   matchOutput(*forward_result, "tc_bn_fc_1_goldenBNResultForward.out");
 
   nntrainer::Tensor backward_in(layer.getOutputDimension());
   loadFile("tc_bn_fc_1_goldenBNLayerBackwardDxIn.out", backward_in);
 
   nntrainer::Tensor backward_result =
-    *layer.backwarding(MAKE_SHARED_TENSOR(backward_in), 1);
+    *layer.backwarding({MAKE_SHARED_TENSOR(backward_in)}, 1)[0];
 
   matchOutput(backward_result, "tc_bn_fc_1_goldenBNLayerBackwardDx.out");
 }
@@ -911,14 +912,14 @@ TEST_F(nntrainer_BatchNormalizationLayer_Conv, forward_backward_training_01_p) {
   layer.setTrainable(true);
   sharedConstTensor forward_result;
 
-  forward_result = layer.forwarding(MAKE_SHARED_TENSOR(in));
+  forward_result = layer.forwarding({MAKE_SHARED_TENSOR(in)})[0];
   matchOutput(*forward_result, "tc_bn_conv_1_goldenBNResultForward.out");
 
   nntrainer::Tensor backward_in(layer.getOutputDimension());
   loadFile("tc_bn_conv_1_goldenBNLayerBackwardDxIn.out", backward_in);
 
   nntrainer::Tensor backward_result =
-    *layer.backwarding(MAKE_SHARED_TENSOR(backward_in), 1);
+    *layer.backwarding({MAKE_SHARED_TENSOR(backward_in)}, 1)[0];
 
   matchOutput(backward_result, "tc_bn_conv_1_goldenBNLayerBackwardDx.out");
 }
@@ -947,14 +948,14 @@ TEST_F(nntrainer_BatchNormalizationLayer_Conv2,
   layer.setTrainable(true);
   sharedConstTensor forward_result;
 
-  forward_result = layer.forwarding(MAKE_SHARED_TENSOR(in));
+  forward_result = layer.forwarding({MAKE_SHARED_TENSOR(in)})[0];
   matchOutput(*forward_result, "tc_bn_conv_2_goldenBNResultForward.out");
 
   nntrainer::Tensor backward_in(layer.getOutputDimension());
   loadFile("tc_bn_conv_2_goldenBNLayerBackwardDxIn.out", backward_in);
 
   nntrainer::Tensor backward_result =
-    *layer.backwarding(MAKE_SHARED_TENSOR(backward_in), 1);
+    *layer.backwarding({MAKE_SHARED_TENSOR(backward_in)}, 1)[0];
 
   matchOutput(backward_result, "tc_bn_conv_2_goldenBNLayerBackwardDx.out");
 }
@@ -1040,7 +1041,7 @@ TEST_F(nntrainer_Conv2DLayer, forwarding_01_p) {
   loadFile("tc_conv2d_1_conv2DLayer.in", in);
   loadFile("tc_conv2d_1_conv2DKernel.in", layer);
 
-  EXPECT_NO_THROW(out = layer.forwarding(MAKE_SHARED_TENSOR(in)).get()[0]);
+  EXPECT_NO_THROW(out = *layer.forwarding({MAKE_SHARED_TENSOR(in)})[0]);
   matchOutput(out, "tc_conv2d_1_goldenConv2DResult.out");
 }
 
@@ -1062,7 +1063,7 @@ TEST_F(nntrainer_Conv2DLayer, forwarding_02_p) {
   loadFile("tc_conv2d_2_conv2DLayer.in", in);
   loadFile("tc_conv2d_2_conv2DKernel.in", layer);
 
-  EXPECT_NO_THROW(out = layer.forwarding(MAKE_SHARED_TENSOR(in)).get()[0]);
+  EXPECT_NO_THROW(out = *layer.forwarding({MAKE_SHARED_TENSOR(in)})[0]);
   matchOutput(out, "tc_conv2d_2_goldenConv2DResult.out");
 }
 
@@ -1087,13 +1088,13 @@ TEST_F(nntrainer_Conv2DLayer, backwarding_01_p) {
   loadFile("tc_conv2d_1_conv2DLayer.in", in);
   loadFile("tc_conv2d_1_conv2DKernel.in", layer);
 
-  EXPECT_NO_THROW(out = layer.forwarding(MAKE_SHARED_TENSOR(in)).get()[0]);
+  EXPECT_NO_THROW(out = *layer.forwarding({MAKE_SHARED_TENSOR(in)})[0]);
 
   for (unsigned int i = 0; i < derivatives.getDim().getDataLen(); ++i) {
     derivatives.getData()[i] = 1.0;
   }
   EXPECT_NO_THROW(
-    result = layer.backwarding(MAKE_SHARED_TENSOR(derivatives), 1).get()[0]);
+    result = *layer.backwarding({MAKE_SHARED_TENSOR(derivatives)}, 1)[0]);
 
   nntrainer::Weight *param_data = layer.getWeights().get();
 
@@ -1140,13 +1141,13 @@ TEST_F(nntrainer_Conv2DLayer, backwarding_04_p) {
   loadFile("tc_conv2d_3_conv2DLayer.in", in);
   loadFile("tc_conv2d_3_conv2DKernel.in", layer);
 
-  EXPECT_NO_THROW(out = layer.forwarding(MAKE_SHARED_TENSOR(in)).get()[0]);
+  EXPECT_NO_THROW(out = *layer.forwarding({MAKE_SHARED_TENSOR(in)})[0]);
 
   for (unsigned int i = 0; i < derivatives.getDim().getDataLen(); ++i) {
     derivatives.getData()[i] = 1.0;
   }
   EXPECT_NO_THROW(
-    result = layer.backwarding(MAKE_SHARED_TENSOR(derivatives), 1).get()[0]);
+    result = *layer.backwarding({MAKE_SHARED_TENSOR(derivatives)}, 1)[0]);
 
   nntrainer::Weight *param_data = layer.getWeights().get();
 
@@ -1196,13 +1197,13 @@ TEST_F(nntrainer_Conv2DLayer, backwarding_02_p) {
   loadFile("tc_conv2d_2_conv2DLayer.in", in);
   loadFile("tc_conv2d_2_conv2DKernel.in", layer);
 
-  EXPECT_NO_THROW(out = layer.forwarding(MAKE_SHARED_TENSOR(in)).get()[0]);
+  EXPECT_NO_THROW(out = *layer.forwarding({MAKE_SHARED_TENSOR(in)})[0]);
 
   for (unsigned int i = 0; i < derivatives.getDim().getDataLen(); ++i) {
     derivatives.getData()[i] = 1.0;
   }
   EXPECT_NO_THROW(
-    result = layer.backwarding(MAKE_SHARED_TENSOR(derivatives), 1).get()[0]);
+    result = *layer.backwarding({MAKE_SHARED_TENSOR(derivatives)}, 1)[0]);
   param_data = layer.getWeights().get();
 
   for (unsigned int i = 0; i < filter_size * 2; ++i) {
@@ -1227,9 +1228,9 @@ TEST_F(nntrainer_Conv2DLayer, backwarding_02_p) {
   matchOutput(bias_grad, "tc_conv2d_2_goldenBiasGrad.out");
 
   for (int i = 0; i < 4; i++) {
-    EXPECT_NO_THROW(out = layer.forwarding(MAKE_SHARED_TENSOR(in)).get()[0]);
+    EXPECT_NO_THROW(out = *layer.forwarding({MAKE_SHARED_TENSOR(in)})[0]);
     EXPECT_NO_THROW(
-      result = layer.backwarding(MAKE_SHARED_TENSOR(derivatives), 1).get()[0]);
+      result = *layer.backwarding({MAKE_SHARED_TENSOR(derivatives)}, 1)[0]);
   }
 
   param_data = layer.getWeights().get();
@@ -1318,11 +1319,11 @@ TEST_F(nntrainer_Conv2DLayer, backwarding_03_p) {
   loadFile("tc_conv2d_int_conv2DKernel2.in", layer2);
 
   nntrainer::Tensor out1;
-  EXPECT_NO_THROW(out1 = layer1.forwarding(MAKE_SHARED_TENSOR(in)).get()[0]);
+  EXPECT_NO_THROW(out1 = *layer1.forwarding({MAKE_SHARED_TENSOR(in)})[0]);
 
   nntrainer::Tensor out2;
 
-  EXPECT_NO_THROW(out2 = layer2.forwarding(MAKE_SHARED_TENSOR(out1)).get()[0]);
+  EXPECT_NO_THROW(out2 = *layer2.forwarding({MAKE_SHARED_TENSOR(out1)})[0]);
 
   matchOutput(out1, "tc_conv2d_int_goldenConv2DResult.out");
   matchOutput(out2, "tc_conv2d_int_goldenConv2DResult2.out");
@@ -1333,10 +1334,10 @@ TEST_F(nntrainer_Conv2DLayer, backwarding_03_p) {
 
   nntrainer::Tensor result2;
   EXPECT_NO_THROW(
-    result2 = layer2.backwarding(MAKE_SHARED_TENSOR(derivatives), 1).get()[0]);
+    result2 = *layer2.backwarding({MAKE_SHARED_TENSOR(derivatives)}, 1)[0]);
 
-  EXPECT_NO_THROW(
-    result = layer1.backwarding(MAKE_SHARED_TENSOR(result2), 1).get()[0]);
+  EXPECT_NO_THROW(result =
+                    *layer1.backwarding({MAKE_SHARED_TENSOR(result2)}, 1)[0]);
 
   /** Compare second conv */
   param_data = layer2.getWeights().get();
@@ -1429,7 +1430,7 @@ TEST_F(nntrainer_Pooling2DLayer, forwarding_01_p) {
 
   loadFile("tc_pooling2d_1.in", in);
 
-  EXPECT_NO_THROW(out = layer.forwarding(MAKE_SHARED_TENSOR(in)).get()[0]);
+  EXPECT_NO_THROW(out = *layer.forwarding({MAKE_SHARED_TENSOR(in)})[0]);
 
   matchOutput(out, "tc_pooling2d_1_goldenPooling2Dmax.out");
 }
@@ -1442,7 +1443,7 @@ TEST_F(nntrainer_Pooling2DLayer, forwarding_02_p) {
 
   loadFile("tc_pooling2d_1.in", in);
 
-  EXPECT_NO_THROW(out = layer.forwarding(MAKE_SHARED_TENSOR(in)).get()[0]);
+  EXPECT_NO_THROW(out = *layer.forwarding({MAKE_SHARED_TENSOR(in)})[0]);
 
   matchOutput(out, "tc_pooling2d_1_goldenPooling2Daverage.out");
 }
@@ -1455,7 +1456,7 @@ TEST_F(nntrainer_Pooling2DLayer, forwarding_03_p) {
 
   loadFile("tc_pooling2d_1.in", in);
 
-  EXPECT_NO_THROW(out = layer.forwarding(MAKE_SHARED_TENSOR(in)).get()[0]);
+  EXPECT_NO_THROW(out = *layer.forwarding({MAKE_SHARED_TENSOR(in)})[0]);
 
   matchOutput(out, "tc_pooling2d_1_goldenPooling2Dglobal_max.out");
 }
@@ -1468,7 +1469,7 @@ TEST_F(nntrainer_Pooling2DLayer, forwarding_04_p) {
 
   loadFile("tc_pooling2d_1.in", in);
 
-  EXPECT_NO_THROW(out = layer.forwarding(MAKE_SHARED_TENSOR(in)).get()[0]);
+  EXPECT_NO_THROW(out = *layer.forwarding({MAKE_SHARED_TENSOR(in)})[0]);
 
   matchOutput(out, "tc_pooling2d_1_goldenPooling2Dglobal_average.out");
 }
@@ -1481,7 +1482,7 @@ TEST_F(nntrainer_Pooling2DLayer, forwarding_05_p) {
   reinitialize();
 
   loadFile("tc_pooling2d_2.in", in);
-  EXPECT_NO_THROW(out = layer.forwarding(MAKE_SHARED_TENSOR(in)).get()[0]);
+  EXPECT_NO_THROW(out = *layer.forwarding({MAKE_SHARED_TENSOR(in)})[0]);
   matchOutput(out, "tc_pooling2d_2_goldenPooling2Dglobal_max.out");
 }
 
@@ -1494,7 +1495,7 @@ TEST_F(nntrainer_Pooling2DLayer, forwarding_06_p) {
 
   loadFile("tc_pooling2d_2.in", in);
 
-  EXPECT_NO_THROW(out = layer.forwarding(MAKE_SHARED_TENSOR(in)).get()[0]);
+  EXPECT_NO_THROW(out = *layer.forwarding({MAKE_SHARED_TENSOR(in)})[0]);
   matchOutput(out, "tc_pooling2d_2_goldenPooling2Dglobal_average.out");
 }
 
@@ -1506,7 +1507,7 @@ TEST_F(nntrainer_Pooling2DLayer, backwarding_01_p) {
   reinitialize();
   loadFile("tc_pooling2d_1.in", in);
 
-  EXPECT_NO_THROW(out = layer.forwarding(MAKE_SHARED_TENSOR(in)).get()[0]);
+  EXPECT_NO_THROW(out = *layer.forwarding({MAKE_SHARED_TENSOR(in)})[0]);
 
   nntrainer::Tensor grad(out.getDim());
 
@@ -1514,7 +1515,7 @@ TEST_F(nntrainer_Pooling2DLayer, backwarding_01_p) {
     grad.getData()[i] = 1.0;
   }
 
-  EXPECT_NO_THROW(in = layer.backwarding(MAKE_SHARED_TENSOR(grad), 0).get()[0]);
+  EXPECT_NO_THROW(in = *layer.backwarding({MAKE_SHARED_TENSOR(grad)}, 0)[0]);
 
   matchOutput(in, "tc_pooling2d_1_goldenPooling2DmaxGrad.out");
 }
@@ -1526,7 +1527,7 @@ TEST_F(nntrainer_Pooling2DLayer, backwarding_02_p) {
   reinitialize();
   loadFile("tc_pooling2d_1.in", in);
 
-  EXPECT_NO_THROW(out = layer.forwarding(MAKE_SHARED_TENSOR(in)).get()[0]);
+  EXPECT_NO_THROW(out = *layer.forwarding({MAKE_SHARED_TENSOR(in)})[0]);
 
   sharedTensor grad = MAKE_SHARED_TENSOR(out.getDim());
 
@@ -1534,7 +1535,7 @@ TEST_F(nntrainer_Pooling2DLayer, backwarding_02_p) {
     grad->getData()[i] = 1.0;
   }
 
-  EXPECT_NO_THROW(in = layer.backwarding(grad, 0).get()[0]);
+  EXPECT_NO_THROW(in = *layer.backwarding({grad}, 0)[0]);
 
   matchOutput(in, "tc_pooling2d_1_goldenPooling2DaverageGrad.out");
 }
@@ -1547,7 +1548,7 @@ TEST_F(nntrainer_Pooling2DLayer, backwarding_03_p) {
 
   loadFile("tc_pooling2d_1.in", in);
 
-  EXPECT_NO_THROW(out = layer.forwarding(MAKE_SHARED_TENSOR(in)).get()[0]);
+  EXPECT_NO_THROW(out = *layer.forwarding({MAKE_SHARED_TENSOR(in)})[0]);
 
   nntrainer::Tensor grad(out.getDim());
 
@@ -1555,7 +1556,7 @@ TEST_F(nntrainer_Pooling2DLayer, backwarding_03_p) {
     grad.getData()[i] = 1.0;
   }
 
-  EXPECT_NO_THROW(in = layer.backwarding(MAKE_SHARED_TENSOR(grad), 0).get()[0]);
+  EXPECT_NO_THROW(in = *layer.backwarding({MAKE_SHARED_TENSOR(grad)}, 0)[0]);
 
   matchOutput(in, "tc_pooling2d_1_goldenPooling2Dglobal_maxGrad.out");
 }
@@ -1567,7 +1568,7 @@ TEST_F(nntrainer_Pooling2DLayer, backwarding_04_p) {
   reinitialize();
   loadFile("tc_pooling2d_1.in", in);
 
-  EXPECT_NO_THROW(out = layer.forwarding(MAKE_SHARED_TENSOR(in)).get()[0]);
+  EXPECT_NO_THROW(out = *layer.forwarding({MAKE_SHARED_TENSOR(in)})[0]);
 
   nntrainer::Tensor grad(out.getDim());
 
@@ -1575,7 +1576,7 @@ TEST_F(nntrainer_Pooling2DLayer, backwarding_04_p) {
     grad.getData()[i] = 1.0;
   }
 
-  EXPECT_NO_THROW(in = layer.backwarding(MAKE_SHARED_TENSOR(grad), 0).get()[0]);
+  EXPECT_NO_THROW(in = *layer.backwarding({MAKE_SHARED_TENSOR(grad)}, 0)[0]);
 
   matchOutput(in, "tc_pooling2d_1_goldenPooling2Dglobal_averageGrad.out");
 }
@@ -1599,7 +1600,7 @@ TEST_F(nntrainer_FlattenLayer, forwarding_01_p) {
 
   loadFile("tc_pooling2d_1_goldenPooling2Dmax.out", in);
 
-  EXPECT_NO_THROW(out = layer.forwarding(MAKE_SHARED_TENSOR(in)).get()[0]);
+  EXPECT_NO_THROW(out = *layer.forwarding({MAKE_SHARED_TENSOR(in)})[0]);
 
   matchOutput(out, "tc_pooling2d_1_goldenPooling2Dmax.out");
 }
@@ -1616,7 +1617,7 @@ TEST_F(nntrainer_FlattenLayer, forwarding_02_p) {
 
   loadFile("tc_pooling2d_2_goldenPooling2Dmax.out", in);
 
-  EXPECT_NO_THROW(out = layer.forwarding(MAKE_SHARED_TENSOR(in)).get()[0]);
+  EXPECT_NO_THROW(out = *layer.forwarding({MAKE_SHARED_TENSOR(in)})[0]);
 
   matchOutput(out, "tc_pooling2d_2_goldenPooling2Dmax.out");
 }
@@ -1631,7 +1632,7 @@ TEST_F(nntrainer_FlattenLayer, backwarding_01_p) {
 
   loadFile("tc_pooling2d_1_goldenPooling2Dmax.out", out);
 
-  EXPECT_NO_THROW(in = layer.backwarding(MAKE_SHARED_TENSOR(out), 0).get()[0]);
+  EXPECT_NO_THROW(in = *layer.backwarding({MAKE_SHARED_TENSOR(out)}, 0)[0]);
   EXPECT_EQ(in.getDim(), nntrainer::TensorDim(1, 2, 4, 4));
 
   matchOutput(in, "tc_pooling2d_1_goldenPooling2Dmax.out");
@@ -1649,7 +1650,7 @@ TEST_F(nntrainer_FlattenLayer, backwarding_02_p) {
 
   loadFile("tc_pooling2d_2_goldenPooling2Dmax.out", out);
 
-  EXPECT_NO_THROW(in = layer.backwarding(MAKE_SHARED_TENSOR(out), 0).get()[0]);
+  EXPECT_NO_THROW(in = *layer.backwarding({MAKE_SHARED_TENSOR(out)}, 0)[0]);
   EXPECT_EQ(in.getDim(), nntrainer::TensorDim(2, 2, 4, 4));
 
   matchOutput(in, "tc_pooling2d_2_goldenPooling2Dmax.out");
@@ -1678,21 +1679,23 @@ TEST(nntrainer_LossLayer, setLoss_02_n) {
 TEST(nntrainer_LossLayer, forward_nolabel_n) {
   nntrainer::LossLayer layer;
   nntrainer::Tensor a = constant(1.0, 1, 1, 1, 1);
-  EXPECT_THROW(layer.forwarding(MAKE_SHARED_TENSOR(a)), std::runtime_error);
+  EXPECT_THROW(layer.forwarding({MAKE_SHARED_TENSOR(a)}), std::runtime_error);
 }
 
 TEST(nntrainer_LossLayer, forward_loss_unknown_n) {
   nntrainer::LossLayer layer;
   nntrainer::Tensor a = constant(1.0, 1, 1, 1, 1);
   nntrainer::Tensor b = constant(1.0, 1, 1, 1, 1);
-  EXPECT_THROW(layer.forwarding(MAKE_SHARED_TENSOR(a), MAKE_SHARED_TENSOR(b)),
-               std::runtime_error);
+  EXPECT_THROW(
+    layer.forwarding({MAKE_SHARED_TENSOR(a)}, {MAKE_SHARED_TENSOR(b)}),
+    std::runtime_error);
 }
 
 TEST(nntrainer_LossLayer, backward_loss_unknown_n) {
   nntrainer::LossLayer layer;
   nntrainer::Tensor a = constant(1.0, 1, 1, 1, 1);
-  EXPECT_THROW(layer.backwarding(MAKE_SHARED_TENSOR(a), 1), std::runtime_error);
+  EXPECT_THROW(layer.backwarding({MAKE_SHARED_TENSOR(a)}, 1),
+               std::runtime_error);
 }
 
 TEST(nntrainer_LossLayer, forward_loss_forward_entropy_n) {
@@ -1700,15 +1703,17 @@ TEST(nntrainer_LossLayer, forward_loss_forward_entropy_n) {
   layer.setLoss(nntrainer::LossType::LOSS_ENTROPY);
   nntrainer::Tensor a = constant(1.0, 1, 1, 1, 1);
   nntrainer::Tensor b = constant(1.0, 1, 1, 1, 1);
-  EXPECT_THROW(layer.forwarding(MAKE_SHARED_TENSOR(a), MAKE_SHARED_TENSOR(b)),
-               std::runtime_error);
+  EXPECT_THROW(
+    layer.forwarding({MAKE_SHARED_TENSOR(a)}, {MAKE_SHARED_TENSOR(b)}),
+    std::runtime_error);
 }
 
 TEST(nntrainer_LossLayer, backward_loss_backward_entropy_n) {
   nntrainer::LossLayer layer;
   layer.setLoss(nntrainer::LossType::LOSS_ENTROPY);
   nntrainer::Tensor a = constant(1.0, 1, 1, 1, 1);
-  EXPECT_THROW(layer.backwarding(MAKE_SHARED_TENSOR(a), 1), std::runtime_error);
+  EXPECT_THROW(layer.backwarding({MAKE_SHARED_TENSOR(a)}, 1),
+               std::runtime_error);
 }
 
 /**
@@ -1788,15 +1793,12 @@ TEST(nntrainer_ActivationLayer, forward_backward_01_p) {
   GEN_TEST_INPUT(expected,
                  nntrainer::ActivationLayer::relu((l - 4) * 0.1 * (i + 1)));
   nntrainer::Tensor result;
-  EXPECT_NO_THROW(result =
-                    layer.forwarding(MAKE_SHARED_TENSOR(input)).get()[0]);
+  EXPECT_NO_THROW(result = *layer.forwarding({MAKE_SHARED_TENSOR(input)})[0]);
   EXPECT_TRUE(result == expected);
 
   expected.copy(input);
-  EXPECT_NO_THROW(
-    result =
-      layer.backwarding(MAKE_SHARED_TENSOR(constant(1.0, 3, 1, 1, 10)), 1)
-        .get()[0]);
+  EXPECT_NO_THROW(result = *layer.backwarding(
+                    {MAKE_SHARED_TENSOR(constant(1.0, 3, 1, 1, 10))}, 1)[0]);
   GEN_TEST_INPUT(expected,
                  nntrainer::ActivationLayer::reluPrime(
                    nntrainer::ActivationLayer::relu((l - 4) * 0.1 * (i + 1))));
@@ -1849,11 +1851,11 @@ TEST_F(nntrainer_AdditionLayer, forwarding_01_n) {
 
   sharedTensor input = std::shared_ptr<nntrainer::Tensor>(
     new nntrainer::Tensor[1], std::default_delete<nntrainer::Tensor[]>());
-  nntrainer::Tensor &in = input.get()[0];
+  nntrainer::Tensor &in = *input;
 
   in = nntrainer::Tensor();
 
-  EXPECT_THROW(layer.forwarding(input), std::runtime_error);
+  EXPECT_THROW(layer.forwarding({input}), std::runtime_error);
 }
 
 TEST_F(nntrainer_AdditionLayer, forwarding_02_n) {
@@ -1861,11 +1863,11 @@ TEST_F(nntrainer_AdditionLayer, forwarding_02_n) {
 
   sharedTensor input = std::shared_ptr<nntrainer::Tensor>(
     new nntrainer::Tensor[1], std::default_delete<nntrainer::Tensor[]>());
-  nntrainer::Tensor &in = input.get()[0];
+  nntrainer::Tensor &in = *input;
 
   in = nntrainer::Tensor(layer.getInputDimension());
 
-  EXPECT_THROW(layer.forwarding(input), std::runtime_error);
+  EXPECT_THROW(layer.forwarding({input}), std::runtime_error);
 }
 
 TEST_F(nntrainer_AdditionLayer, forwarding_03_p) {
@@ -1873,12 +1875,12 @@ TEST_F(nntrainer_AdditionLayer, forwarding_03_p) {
 
   sharedTensor input = std::shared_ptr<nntrainer::Tensor>(
     new nntrainer::Tensor[2], std::default_delete<nntrainer::Tensor[]>());
-  nntrainer::Tensor &in = input.get()[0];
+  nntrainer::Tensor &in = *input;
   in = nntrainer::Tensor(layer.getInputDimension());
 
-  input.get()[1] = input.get()[0];
+  input.get()[1] = *input;
 
-  EXPECT_NO_THROW(layer.forwarding(input));
+  EXPECT_NO_THROW(layer.forwarding({input}));
 }
 
 /**
index 9cc2656..64cd51d 100644 (file)
@@ -211,7 +211,7 @@ NodeWatcher::forward(nntrainer::sharedConstTensor in, int iteration) {
   std::string err_msg = ss.str();
 
   verify(*in, expected_input, err_msg + " at input ");
-  nntrainer::sharedConstTensor out = node->forwarding(in);
+  nntrainer::sharedConstTensor out = node->forwarding({in})[0];
   verify(*out, expected_output, err_msg + " at output ");
   return out;
 }
@@ -224,8 +224,8 @@ NodeWatcher::lossForward(nntrainer::sharedConstTensor pred,
   std::string err_msg = ss.str();
 
   nntrainer::sharedConstTensor out =
-    std::static_pointer_cast<nntrainer::LossLayer>(node)->forwarding(pred,
-                                                                     answer);
+    std::static_pointer_cast<nntrainer::LossLayer>(node)->forwarding(
+      {pred}, {answer})[0];
 
   return out;
 }
@@ -238,7 +238,7 @@ NodeWatcher::backward(nntrainer::sharedConstTensor deriv, int iteration,
      << iteration;
   std::string err_msg = ss.str();
 
-  nntrainer::sharedConstTensor out = node->backwarding(deriv, iteration);
+  nntrainer::sharedConstTensor out = node->backwarding({deriv}, iteration)[0];
 
   if (should_verify) {
     verify(*out, expected_dx, err_msg);