[layer] Move layer input/output management to manager
authorParichay Kapoor <pk.kapoor@samsung.com>
Thu, 17 Dec 2020 07:29:28 +0000 (16:29 +0900)
committerJijoong Moon <jijoong.moon@samsung.com>
Mon, 28 Dec 2020 04:48:51 +0000 (13:48 +0900)
Move layer inputs/outputs memory management to the manager.
This is accomplished by replacing the use of NetBuffers instead of Var_Grad.

Now, all the memory of weights, gradients, inputs, outputs and derivatives
are managed by the manager, and allows more optimizations to be done with
inputs/outputs.

**Self evaluation:**
1. Build test: [x]Passed [ ]Failed [ ]Skipped
2. Run test: [x]Passed [ ]Failed [ ]Skipped

Signed-off-by: Parichay Kapoor <pk.kapoor@samsung.com>
23 files changed:
Applications/Custom/LayerClient/jni/pow.cpp
nntrainer/graph/network_graph.cpp
nntrainer/graph/network_graph.h
nntrainer/layers/activation_layer.cpp
nntrainer/layers/addition_layer.cpp
nntrainer/layers/bn_layer.cpp
nntrainer/layers/concat_layer.cpp
nntrainer/layers/conv2d_layer.cpp
nntrainer/layers/fc_layer.cpp
nntrainer/layers/flatten_layer.cpp
nntrainer/layers/input_layer.cpp
nntrainer/layers/layer.cpp
nntrainer/layers/layer_internal.h
nntrainer/layers/loss_layer.cpp
nntrainer/layers/nnstreamer_layer.cpp
nntrainer/layers/output_layer.cpp
nntrainer/layers/pooling2d_layer.cpp
nntrainer/layers/tflite_layer.cpp
nntrainer/models/neuralnet.cpp
nntrainer/tensor/manager.cpp
nntrainer/tensor/manager.h
nntrainer/tensor/var_grad.h
test/unittest/unittest_nntrainer_layers.cpp

index b0f1b32..5bbb4b2 100644 (file)
@@ -107,11 +107,12 @@ void PowLayer::forwarding(nntrainer::sharedConstTensors in) {
 #endif
 
   /// net hidden are used to save var,
-  net_hidden[0]->var = net_input[0]->var.pow(exponent);
+  net_hidden[0]->getVariableRef() =
+    net_input[0]->getVariableRef().pow(exponent);
 
 #ifdef DEBUG
-  std::cout << "input: " << net_input[0]->var;
-  std::cout << "output: " << net_hidden[0]->var;
+  std::cout << "input: " << net_input[0]->getVariable();
+  std::cout << "output: " << net_hidden[0]->getVariable();
   PowUtil::pause();
 #endif
 }
@@ -122,14 +123,14 @@ void PowLayer::calcDerivative(nntrainer::sharedConstTensors in) {
   std::cout << "pow layer backward is called\n";
 #endif
 
-  nntrainer::Tensor &derivative_ = net_hidden[0]->var;
-  nntrainer::Tensor &dx = net_input[0]->var;
+  nntrainer::Tensor &derivative_ = net_hidden[0]->getVariableRef();
+  nntrainer::Tensor &dx = net_input[0]->getVariableRef();
 
   dx = derivative_.multiply(exponent);
 
 #ifdef DEBUG
-  std::cout << "input: " << net_hidden[0]->var;
-  std::cout << "output: " << net_input[0]->var;
+  std::cout << "input: " << net_hidden[0]->getVariable();
+  std::cout << "output: " << net_input[0]->getVariable();
   PowUtil::pause();
 #endif
 }
index 3a94258..ace490f 100644 (file)
@@ -526,8 +526,8 @@ sharedConstTensors NetworkGraph::forwarding(sharedConstTensors input) {
 
   for (unsigned int i = 0; i < Sorted[Sorted.size() - 2].layer->num_outputs;
        ++i) {
-    out.push_back(
-      MAKE_SHARED_TENSOR(Sorted[Sorted.size() - 2].layer->net_hidden[i]->var));
+    out.push_back(MAKE_SHARED_TENSOR(
+      Sorted[Sorted.size() - 2].layer->net_hidden[i]->getVariable()));
   }
 
   return out;
index 5eda129..4740a9e 100644 (file)
@@ -245,7 +245,7 @@ private:
   std::vector<LayerNode> Sorted;         /**< Ordered Graph Node List  */
   std::set<std::string>
     layer_names; /**< Set containing all the names of layers in the model */
-  std::vector<std::shared_ptr<NetBuffers>>
+  std::vector<std::shared_ptr<Var_Grad>>
     netBuffers;       /**< List of Buffers used to calculate layer */
   int def_name_count; /**< Count assigned to layer names declared by default */
   unsigned int
index 0a5087b..228fd96 100644 (file)
@@ -47,21 +47,21 @@ int ActivationLayer::initialize(Manager &manager) {
 }
 
 void ActivationLayer::forwarding(sharedConstTensors in) {
-  Tensor &hidden_ = net_hidden[0]->var;
+  Tensor &hidden_ = net_hidden[0]->getVariableRef();
   /// @note @a _act_fn is expected to work out of place and not modify @a input
-  _act_fn(net_input[0]->var, hidden_);
+  _act_fn(net_input[0]->getVariableRef(), hidden_);
   if (activation_type == ActivationType::ACT_SOFTMAX)
     backup_hidden = hidden_.clone();
 }
 
 void ActivationLayer::calcDerivative(sharedConstTensors derivative) {
-  Tensor &deriv = net_hidden[0]->var;
-  Tensor &ret = net_input[0]->var;
+  Tensor &deriv = net_hidden[0]->getVariableRef();
+  Tensor &ret = net_input[0]->getVariableRef();
 
   if (activation_type == ActivationType::ACT_SOFTMAX) {
     ret = _act_prime_fn(backup_hidden, ret, deriv);
   } else {
-    ret = _act_prime_fn(net_input[0]->var, ret, deriv);
+    ret = _act_prime_fn(net_input[0]->getVariableRef(), ret, deriv);
   }
 }
 
index 8568477..231c68a 100644 (file)
@@ -42,21 +42,21 @@ int AdditionLayer::initialize(Manager &manager) {
 }
 
 void AdditionLayer::forwarding(sharedConstTensors in) {
-  Tensor &hidden_ = net_hidden[0]->var;
+  Tensor &hidden_ = net_hidden[0]->getVariableRef();
   TensorDim &in_dim = input_dim[0];
 
   for (unsigned int idx = 0; idx < num_inputs; ++idx) {
-    if (in_dim != net_input[idx]->var.getDim())
+    if (in_dim != net_input[idx]->getDim())
       throw std::invalid_argument("Error: addition layer requires same "
                                   "shape from all input layers");
-    hidden_.add_i(net_input[idx]->var);
+    hidden_.add_i(net_input[idx]->getVariableRef());
   }
 }
 
 void AdditionLayer::calcDerivative(sharedConstTensors derivative) {
 
   for (unsigned int i = 0; i < num_inputs; ++i) {
-    net_input[i]->var = net_hidden[0]->var;
+    net_input[i]->getVariableRef() = net_hidden[0]->getVariableRef();
   }
 }
 
index 093a532..ac0ed6d 100644 (file)
@@ -131,8 +131,8 @@ void BatchNormalizationLayer::forwarding(sharedConstTensors in) {
   Tensor &gamma = weightAt(BNParams::gamma).getVariableRef();
   Tensor &beta = weightAt(BNParams::beta).getVariableRef();
 
-  Tensor &input_ = net_input[0]->var;
-  Tensor &hidden_ = net_hidden[0]->var;
+  Tensor &input_ = net_input[0]->getVariableRef();
+  Tensor &hidden_ = net_hidden[0]->getVariableRef();
 
   /// @todo change trainable to train/eval mode #524
   if (trainable) {
@@ -162,7 +162,7 @@ void BatchNormalizationLayer::forwarding(sharedConstTensors in) {
 void BatchNormalizationLayer::calcDerivative(sharedConstTensors derivative) {
 
   Tensor &gamma = weightAt(BNParams::gamma).getVariableRef();
-  Tensor &deriv = net_hidden[0]->var;
+  Tensor &deriv = net_hidden[0]->getVariableRef();
 
   int N = 1;
   for (auto &axis : axes_to_reduce) {
@@ -175,7 +175,7 @@ void BatchNormalizationLayer::calcDerivative(sharedConstTensors derivative) {
   dx_2.subtract_i(deviation.divide(cvar).multiply(
     deviation.multiply(deriv).sum(axes_to_reduce)));
 
-  Tensor &dx = net_input[0]->var;
+  Tensor &dx = net_input[0]->getVariableRef();
   dx = dx_2.multiply(dx_1, dx);
   dx.divide_i(N);
 }
@@ -184,7 +184,7 @@ void BatchNormalizationLayer::calcGradient(sharedConstTensors derivative) {
 
   Tensor &dgamma = weightAt(BNParams::gamma).getGradientRef();
   Tensor &dbeta = weightAt(BNParams::beta).getGradientRef();
-  Tensor &deriv = net_hidden[0]->var;
+  Tensor &deriv = net_hidden[0]->getVariableRef();
 
   dbeta = deriv.sum(axes_to_reduce);
   Tensor dev = deviation.multiply(invstd);
index f3c0142..6173a21 100644 (file)
@@ -52,14 +52,14 @@ int ConcatLayer::initialize(Manager &manager) {
 }
 
 void ConcatLayer::forwarding(sharedConstTensors in) {
-  Tensor &hidden_ = net_hidden[0]->var;
+  Tensor &hidden_ = net_hidden[0]->getVariableRef();
 
 #ifdef DEBUG
   unsigned int channel = 0;
-  const TensorDim &d = net_input[0]->var.getDim();
+  const TensorDim &d = net_input[0]->getDim();
   channel += d.channel();
   for (unsigned int idx = 1; idx < num_inputs; ++idx) {
-    const TensorDim &dim = net_input[idx]->var.getDim();
+    const TensorDim &dim = net_input[idx]->getDim();
 
     for (unsigned int i = 2; i < d.rank(); ++i) {
       if (d[i] != dim[i])
@@ -79,26 +79,29 @@ void ConcatLayer::forwarding(sharedConstTensors in) {
   for (unsigned int b = 0; b < input_dim[0].batch(); ++b) {
     unsigned int position = 0;
     for (unsigned int idx = 0; idx < num_inputs; ++idx) {
-      TensorDim in_dim = net_input[idx]->var.getDim();
-      memcpy(hidden_.getAddress(b * f_size + position),
-             net_input[idx]->var.getAddress(b * in_dim.getFeatureLen()),
-             in_dim.getFeatureLen() * sizeof(float));
+      TensorDim in_dim = net_input[idx]->getDim();
+      memcpy(
+        hidden_.getAddress(b * f_size + position),
+        net_input[idx]->getVariable().getAddress(b * in_dim.getFeatureLen()),
+        in_dim.getFeatureLen() * sizeof(float));
       position += in_dim.getFeatureLen();
     }
   }
 }
 
 void ConcatLayer::calcDerivative(sharedConstTensors derivative) {
-  TensorDim d = net_hidden[0]->var.getDim();
+  TensorDim d = net_hidden[0]->getDim();
 
   unsigned int position = 0;
   for (unsigned int idx = 0; idx < num_inputs; ++idx) {
     TensorDim in_dim = input_dim[idx];
 
     for (unsigned int b = 0; b < in_dim.batch(); ++b) {
-      memcpy(net_input[idx]->var.getAddress(b * in_dim.getFeatureLen()),
-             net_hidden[0]->var.getAddress(b * d.getFeatureLen() + position),
-             in_dim.getFeatureLen() * sizeof(float));
+      memcpy(
+        net_input[idx]->getVariable().getAddress(b * in_dim.getFeatureLen()),
+        net_hidden[0]->getVariable().getAddress(b * d.getFeatureLen() +
+                                                position),
+        in_dim.getFeatureLen() * sizeof(float));
     }
     position += in_dim.getFeatureLen();
   }
index b397e4b..c12bf2e 100644 (file)
@@ -77,16 +77,12 @@ void Conv2DLayer::forwarding(sharedConstTensors in) {
   if (num_inputs != 1)
     throw std::invalid_argument("Convolution layer only takes one input");
 
-  Tensor &input_ = net_input[0]->var;
+  Tensor &input_ = net_input[0]->getVariableRef();
 
   TensorDim &in_dim = input_dim[0];
   TensorDim &out_dim = output_dim[0];
 
-  Tensor &hidden_ = net_hidden[0]->var;
-  /** @todo This check is redundant, remove it later */
-  if (hidden_.uninitialized()) {
-    hidden_ = Tensor(out_dim);
-  }
+  Tensor &hidden_ = net_hidden[0]->getVariableRef();
 
   Tensor &filter_kernel = weightAt(ConvParams::weight).getVariableRef();
   Tensor &bias_kernel = weightAt(ConvParams::bias).getVariableRef();
@@ -168,7 +164,7 @@ void Conv2DLayer::calcDerivative(sharedConstTensors derivatives) {
   int status = ML_ERROR_NONE;
   TensorDim &in_dim = input_dim[0];
 
-  Tensor &derivative = net_hidden[0]->var;
+  Tensor &derivative = net_hidden[0]->getVariableRef();
   Tensor &filter_kernel = weightAt(ConvParams::weight).getVariableRef();
 
   std::array<unsigned int, CONV2D_DIM> same_pad;
@@ -268,7 +264,7 @@ void Conv2DLayer::calcDerivative(sharedConstTensors derivatives) {
     if (status != ML_ERROR_NONE)
       throw std::runtime_error("calcDerivative Convolution failed.");
 
-    strip_pad(ret, padding.data(), net_input[0]->var, b);
+    strip_pad(ret, padding.data(), net_input[0]->getVariableRef(), b);
   }
 }
 
@@ -276,8 +272,8 @@ void Conv2DLayer::calcGradient(sharedConstTensors derivatives) {
   TensorDim &in_dim = input_dim[0];
 
   Tensor &filter_kernel = weightAt(ConvParams::weight).getVariableRef();
-  Tensor &derivative = net_hidden[0]->var;
-  Tensor &input_ = net_input[0]->var;
+  Tensor &derivative = net_hidden[0]->getVariableRef();
+  Tensor &input_ = net_input[0]->getVariableRef();
 
   Tensor &delK = weightAt(ConvParams::weight).getGradientRef();
   Tensor &delBias = weightAt(ConvParams::bias).getGradientRef();
index d971412..575648e 100644 (file)
@@ -87,8 +87,8 @@ void FullyConnectedLayer::forwarding(sharedConstTensors in) {
     weightAt(static_cast<int>(FCParams::weight)).getVariableRef();
   Tensor &bias = weightAt(static_cast<int>(FCParams::bias)).getVariableRef();
 
-  Tensor &hidden_ = net_hidden[0]->var;
-  Tensor &input_ = net_input[0]->var;
+  Tensor &hidden_ = net_hidden[0]->getVariableRef();
+  Tensor &input_ = net_input[0]->getVariableRef();
   hidden_ = input_.dot(weight, hidden_);
   hidden_.add_i(bias);
 
@@ -108,8 +108,8 @@ void FullyConnectedLayer::copy(std::shared_ptr<Layer> l) {
 void FullyConnectedLayer::calcDerivative(sharedConstTensors derivative) {
   unsigned int weight_idx = static_cast<int>(FCParams::weight);
   Tensor &weight = weightAt(weight_idx).getVariableRef();
-  Tensor &derivative_ = net_hidden[0]->var;
-  Tensor &ret_ = net_input[0]->var;
+  Tensor &derivative_ = net_hidden[0]->getVariableRef();
+  Tensor &ret_ = net_input[0]->getVariableRef();
 
   ret_ = derivative_.dot(weight, ret_, false, true);
 }
@@ -121,10 +121,10 @@ void FullyConnectedLayer::calcGradient(sharedConstTensors derivative) {
   Tensor &djdw = weightAt(weight_idx).getGradientRef();
   Tensor &djdb = weightAt(bias_idx).getGradientRef();
 
-  Tensor &derivative_ = net_hidden[0]->var;
+  Tensor &derivative_ = net_hidden[0]->getVariableRef();
 
   djdb = derivative_.sum(0);
-  djdw = net_input[0]->var.dot(derivative_, djdw, true, false);
+  djdw = net_input[0]->getVariableRef().dot(derivative_, djdw, true, false);
 
   if (isWeightRegularizerL2Norm())
     djdw.add_i(weight, weight_regularizer_constant);
index e2fb020..58f4b19 100644 (file)
@@ -42,15 +42,15 @@ int FlattenLayer::initialize(Manager &manager) {
 }
 
 void FlattenLayer::forwarding(sharedConstTensors in) {
-  Tensor temp = net_input[0]->var;
-  temp.reshape(net_hidden[0]->var.getDim());
-  net_hidden[0]->var = temp;
+  Tensor temp = net_input[0]->getVariableRef();
+  temp.reshape(net_hidden[0]->getDim());
+  net_hidden[0]->getVariableRef() = temp;
 }
 
 void FlattenLayer::calcDerivative(sharedConstTensors in) {
-  Tensor temp = net_hidden[0]->var;
-  temp.reshape(net_input[0]->var.getDim());
-  net_input[0]->var = temp;
+  Tensor temp = net_hidden[0]->getVariableRef();
+  temp.reshape(net_input[0]->getDim());
+  net_input[0]->getVariableRef() = temp;
 }
 
 } /* namespace nntrainer */
index 4b94bf5..c72c936 100644 (file)
@@ -54,7 +54,7 @@ void InputLayer::setProperty(const PropertyType type,
 }
 
 void InputLayer::forwarding(sharedConstTensors in) {
-  Tensor &hidden_ = net_hidden[0]->var;
+  Tensor &hidden_ = net_hidden[0]->getVariableRef();
   hidden_ = *in[0];
 
   if (normalization)
index 69254c2..38fb8df 100644 (file)
@@ -59,7 +59,7 @@ void Layer::setBatch(unsigned int batch) {
 std::vector<Tensor> Layer::getOutputs() {
   std::vector<Tensor> ret;
   for (unsigned int i = 0; i < num_outputs; ++i) {
-    ret.push_back(net_hidden[i]->var);
+    ret.push_back(net_hidden[i]->getVariableRef());
   }
   return ret;
 }
@@ -67,13 +67,13 @@ std::vector<Tensor> Layer::getOutputs() {
 std::vector<Tensor> Layer::getDerivatives() {
   std::vector<Tensor> ret;
   for (unsigned int i = 0; i < num_inputs; ++i) {
-    ret.push_back(net_input[i]->var);
+    ret.push_back(net_input[i]->getVariableRef());
   }
   return ret;
 }
 
 void Layer::copy(std::shared_ptr<Layer> l) {
-  for (auto const &w : weights)
+  for (auto const &w : l->weights)
     weights.push_back(w.clone());
 
   this->input_dim = l->input_dim;
@@ -94,7 +94,7 @@ void Layer::copy(std::shared_ptr<Layer> l) {
 sharedConstTensors Layer::forwarding_with_val(sharedConstTensors input) {
 
   for (unsigned int i = 0; i < num_inputs; ++i) {
-    net_input[i]->var = input[i]->clone();
+    net_input[i]->getVariableRef() = input[i]->clone();
   }
 
   if (num_outputs != net_hidden.size())
@@ -105,7 +105,7 @@ sharedConstTensors Layer::forwarding_with_val(sharedConstTensors input) {
   nntrainer::sharedConstTensors out;
 
   for (unsigned int i = 0; i < num_outputs; ++i) {
-    out.push_back(MAKE_SHARED_TENSOR(net_hidden[i]->var));
+    out.push_back(MAKE_SHARED_TENSOR(net_hidden[i]->getVariable()));
   }
 
   return out;
@@ -117,7 +117,7 @@ Layer::backwarding_with_val(int iteration, sharedConstTensors deriv,
                             std::shared_ptr<Optimizer> optimizer) {
 
   for (unsigned int i = 0; i < num_outputs; ++i) {
-    net_hidden[i]->var = deriv[i]->clone();
+    net_hidden[i]->getVariableRef() = deriv[i]->clone();
   }
 
   if (num_inputs != net_input.size())
@@ -135,7 +135,7 @@ Layer::backwarding_with_val(int iteration, sharedConstTensors deriv,
   nntrainer::sharedConstTensors out;
 
   for (unsigned int i = 0; i < num_inputs; ++i) {
-    out.push_back(MAKE_SHARED_TENSOR(net_input[i]->var));
+    out.push_back(MAKE_SHARED_TENSOR(net_input[i]->getVariable()));
   }
 
   return out;
index a3a0511..4167ec2 100644 (file)
 
 namespace nntrainer {
 
-struct NetBuffers {
-  Tensor var;
-  /* TODO : We could remove this. for now, We are not allocate memory. This
-   * exists only for the unittest.  */
-  Tensor grad;
-};
-
-typedef std::shared_ptr<nntrainer::NetBuffers> sharedNetBuffer;
-typedef std::vector<sharedNetBuffer> sharedNetBuffers;
-
 /**
  * @brief     Enumeration of activation function type
  */
@@ -330,25 +320,17 @@ public:
    */
   std::vector<Weight> &getWeightsRef() { return weights; }
 
-#ifdef ENABLE_TEST
-  void resizeNetInput(unsigned int size) { net_input.resize(size); }
+  void setInputBuffers(std::vector<std::shared_ptr<Var_Grad>> inputs) {
+    net_input = inputs;
+  }
 
-  void resizeNetOutput(unsigned int size) { net_hidden.resize(size); }
+  void setOutputBuffers(std::vector<std::shared_ptr<Var_Grad>> outputs) {
+    net_hidden = outputs;
+  }
 
+#ifdef ENABLE_TEST
   unsigned int getNumInputs() { return num_inputs; }
   unsigned int getNumOutputs() { return num_outputs; }
-
-  void setInputBuffer(unsigned int i, std::shared_ptr<NetBuffers> n_buffer) {
-    if (i >= net_input.size())
-      throw std::invalid_argument("Error: exceed num_input size");
-    net_input[i] = n_buffer;
-  }
-
-  void setOutputBuffer(unsigned int i, std::shared_ptr<NetBuffers> n_buffer) {
-    if (i >= net_hidden.size())
-      throw std::invalid_argument("Error: exceed num_input size");
-    net_hidden[i] = n_buffer;
-  }
 #endif
 
 protected:
@@ -384,7 +366,7 @@ protected:
    */
   Tensor input;
 
-  std::vector<std::shared_ptr<NetBuffers>> net_input;
+  std::vector<std::shared_ptr<Var_Grad>> net_input;
 
   /**
    * @brief     Hidden Layer Tensor which store the
@@ -393,7 +375,7 @@ protected:
   Tensor hidden;
   Tensor ret_derivative; /** derivative to be returned to previous layer */
 
-  std::vector<std::shared_ptr<NetBuffers>> net_hidden;
+  std::vector<std::shared_ptr<Var_Grad>> net_hidden;
 
   /**
    * @brief     Dimension of input activation
index 8cb9dff..91dd11c 100644 (file)
@@ -44,11 +44,11 @@ int LossLayer::initialize(Manager &manager) {
 
 sharedConstTensors LossLayer::forwarding(sharedConstTensors in,
                                          sharedConstTensors label) {
-  net_input[0]->var = *in[0];
-  Tensor &hidden_ = net_hidden[0]->var;
+  net_input[0]->getVariableRef() = *in[0];
+  Tensor &hidden_ = net_hidden[0]->getVariableRef();
 
   Tensor y2 = *label[0];
-  Tensor y = net_input[0]->var;
+  Tensor y = net_input[0]->getVariableRef();
   Tensor l;
 
   switch (loss_type) {
@@ -92,21 +92,21 @@ sharedConstTensors LossLayer::forwarding(sharedConstTensors in,
 
   updateLoss(l);
 
-  return {MAKE_SHARED_TENSOR(net_hidden[0]->var)};
+  return {MAKE_SHARED_TENSOR(net_hidden[0]->getVariable())};
 }
 
 void LossLayer::forwarding(sharedConstTensors in) {
   switch (loss_type) {
   case LossType::LOSS_MSE:
-    net_hidden[0]->var = net_input[0]->var;
+    net_hidden[0]->getVariableRef() = net_input[0]->getVariableRef();
     break;
   case LossType::LOSS_ENTROPY_SIGMOID:
-    net_hidden[0]->var =
-      net_input[0]->var.apply(ActivationLayer::sigmoid, net_hidden[0]->var);
+    net_hidden[0]->getVariableRef() = net_input[0]->getVariableRef().apply(
+      ActivationLayer::sigmoid, net_hidden[0]->getVariableRef());
     break;
   case LossType::LOSS_ENTROPY_SOFTMAX:
-    net_hidden[0]->var =
-      net_input[0]->var.apply(ActivationLayer::softmax, net_hidden[0]->var);
+    net_hidden[0]->getVariableRef() = net_input[0]->getVariableRef().apply(
+      ActivationLayer::softmax, net_hidden[0]->getVariableRef());
     break;
   case LossType::LOSS_ENTROPY:
     throw std::runtime_error(
@@ -136,9 +136,9 @@ void LossLayer::copy(std::shared_ptr<Layer> l) {
 }
 
 void LossLayer::calcDerivative(sharedConstTensors derivative) {
-  Tensor &ret_derivative = net_input[0]->var;
+  Tensor &ret_derivative = net_input[0]->getVariableRef();
   Tensor y2 = *derivative[0];
-  Tensor &y = net_input[0]->var;
+  Tensor &y = net_input[0]->getVariableRef();
   Tensor ret;
 
   switch (loss_type) {
index c53a5b7..7d5a032 100644 (file)
@@ -168,7 +168,7 @@ void NNStreamerLayer::setProperty(const PropertyType type,
 void NNStreamerLayer::forwarding(sharedConstTensors in) {
   size_t data_size;
   Tensor input = *in[0];
-  Tensor &hidden_ = net_hidden[0]->var;
+  Tensor &hidden_ = net_hidden[0]->getVariableRef();
 
   std::copy(input.getData(), input.getData() + input.length(),
             (float *)in_data);
index 83866ca..1ddb611 100644 (file)
@@ -44,18 +44,18 @@ int OutputLayer::initialize(Manager &manager) {
 }
 
 void OutputLayer::forwarding(sharedConstTensors in) {
-  Tensor &input_ = net_input[0]->var;
+  Tensor &input_ = net_input[0]->getVariableRef();
   for (unsigned int idx = 0; idx < num_outputs; ++idx) {
-    net_hidden[idx]->var = input_;
+    net_hidden[idx]->getVariableRef() = input_;
   }
 }
 
 void OutputLayer::calcDerivative(sharedConstTensors derivative) {
 
-  Tensor &ret = net_input[0]->var;
+  Tensor &ret = net_input[0]->getVariableRef();
 
   for (unsigned int idx = 0; idx < num_outputs; ++idx) {
-    ret.add_i(net_hidden[idx]->var);
+    ret.add_i(net_hidden[idx]->getVariableRef());
   }
 }
 
index 966a71e..bb87d3f 100644 (file)
@@ -67,8 +67,8 @@ int Pooling2DLayer::initialize(Manager &manager) {
 }
 
 void Pooling2DLayer::forwarding(sharedConstTensors in) {
-  Tensor &input_ = net_input[0]->var;
-  Tensor &hidden_ = net_hidden[0]->var;
+  Tensor &input_ = net_input[0]->getVariableRef();
+  Tensor &hidden_ = net_hidden[0]->getVariableRef();
 
   TensorDim &hidden_dim = output_dim[0];
   TensorDim &in_dim = input_dim[0];
@@ -96,8 +96,8 @@ void Pooling2DLayer::calcDerivative(sharedConstTensors derivative) {
 
   unsigned int J, K;
 
-  Tensor &deriv = net_hidden[0]->var;
-  Tensor &result = net_input[0]->var;
+  Tensor &deriv = net_hidden[0]->getVariableRef();
+  Tensor &result = net_input[0]->getVariableRef();
 
   result.setZero();
   float *out = result.getData();
index 76970b5..b607381 100644 (file)
@@ -126,7 +126,7 @@ void TfLiteLayer::forwarding(sharedConstTensors in) {
   if (status != kTfLiteOk)
     throw std::runtime_error("Invoke failed");
 
-  net_hidden[0]->var = *out[0];
+  net_hidden[0]->getVariableRef() = *out[0];
 }
 
 void TfLiteLayer::copy(std::shared_ptr<Layer> l) {
index 2398399..4324750 100644 (file)
@@ -230,7 +230,6 @@ int NeuralNetwork::initialize() {
       }
 
       for (unsigned int i = 0; i < l.input_layers.size(); ++i) {
-        std::shared_ptr<NetBuffers> n_buffer = std::make_unique<NetBuffers>();
         Layer &in_layer = *model_graph.getLayerNode(l.input_layers[i]).layer;
 
         unsigned int location = 0;
@@ -242,17 +241,29 @@ int NeuralNetwork::initialize() {
         }
 
         l.setInputDimension(in_layer.getOutputDimension()[location], i);
+      }
 
-        l.net_input[i] = n_buffer;
+      manager->TrackLayerInOuts(l.getName(), l.getInputDimension());
+      auto in_out = manager->getInputsLayer(-1);
+      l.setInputBuffers(in_out);
+
+      for (unsigned int i = 0; i < l.input_layers.size(); ++i) {
+        Layer &in_layer = *model_graph.getLayerNode(l.input_layers[i]).layer;
+
+        unsigned int location = 0;
+        for (unsigned int j = 0; j < in_layer.output_layers.size(); ++j) {
+          if (in_layer.output_layers[j] == l.getName()) {
+            location = j;
+            break;
+          }
+        }
 
         model_graph.getLayerNode(l.input_layers[i])
-          .layer->net_hidden[location] = n_buffer;
+          .layer->net_hidden[location] = in_out[i];
       }
     } else {
-      for (unsigned int i = 0; i < l.input_layers.size(); ++i) {
-        std::shared_ptr<NetBuffers> n_buffer = std::make_unique<NetBuffers>();
-        l.net_input[i] = n_buffer;
-      }
+      manager->TrackLayerInOuts(l.getName(), l.getInputDimension());
+      l.setInputBuffers(manager->getInputsLayer(-1));
     }
 
     status = l.initialize(*manager);
@@ -262,11 +273,13 @@ int NeuralNetwork::initialize() {
     opt->addOptimizerVariable(l.getWeightsRef());
   }
 
-  for (unsigned int i = 0; i < model_graph.Sorted.back().layer->num_outputs;
-       ++i) {
-    std::shared_ptr<NetBuffers> last_hidden_buffer =
-      std::make_unique<NetBuffers>();
-    model_graph.Sorted.back().layer->net_hidden[i] = last_hidden_buffer;
+  auto &last_layer = model_graph.Sorted.back().layer;
+  manager->TrackLayerInOuts(last_layer->getName(),
+                            last_layer->getOutputDimension());
+  auto in_out = manager->getInputsLayer(-1);
+
+  for (unsigned int i = 0; i < last_layer->num_outputs; ++i) {
+    last_layer->net_hidden[i] = in_out[i];
   }
 
   setBatchSize(batch_size);
@@ -429,6 +442,7 @@ void NeuralNetwork::setBatchSize(unsigned int batch) {
   batch_size = batch;
 
   model_graph.setBatchSize(batch);
+  manager->setBatchSize(batch);
 
   if (data_buffer && data_buffer->setBatchSize(batch_size) != ML_ERROR_NONE)
     throw std::invalid_argument("Error setting batchsize for the dataset");
@@ -464,34 +478,15 @@ sharedConstTensors NeuralNetwork::inference(sharedConstTensors X) {
     out.push_back(
       MAKE_SHARED_TENSOR(model_graph.Sorted[model_graph.Sorted.size() - 1]
                            .layer->net_hidden[i]
-                           ->var));
+                           ->getVariable()));
   }
   return out;
 }
 
 int NeuralNetwork::assignMem() {
-  int status = ML_ERROR_NONE;
-  unsigned int n_layers = (unsigned int)model_graph.Sorted.size();
-
-  for (unsigned int idx = 0; idx < n_layers; ++idx) {
-    bool first = idx == 0;
-    Layer &l = *model_graph.getSortedLayerNode(idx).layer;
-
-    if (!first && l.getType() != BatchNormalizationLayer::type) {
-      for (unsigned int i = 0; i < l.input_layers.size(); ++i) {
-
-        l.net_input[i]->var = Tensor(l.getInputDimension()[i]);
-      }
-    }
-  }
-
-  for (unsigned int i = 0; i < model_graph.Sorted.back().layer->num_outputs;
-       ++i) {
-    model_graph.Sorted.back().layer->net_hidden[i]->var =
-      Tensor(model_graph.Sorted.back().layer->getOutputDimension()[i]);
-  }
-
-  return status;
+  // TODO: directly replace this
+  manager->initializeInOuts();
+  return ML_ERROR_NONE;
 }
 
 int NeuralNetwork::train(std::vector<std::string> values) {
index e5f19ad..b251538 100644 (file)
@@ -234,4 +234,23 @@ void Manager::initialize() {
   }
 }
 
+/**
+ * @brief Track the inputs/ouputs of the layer
+ */
+void Manager::TrackLayerInOuts(const std::string layer_name,
+                               const std::vector<TensorDim> &input_dim) {
+  int cnt = 0;
+  auto base_name = layer_name + ":Input";
+
+  std::vector<std::shared_ptr<Var_Grad>> in_out;
+  in_out.reserve(input_dim.size());
+
+  for (auto const &dim : input_dim) {
+    in_out.emplace_back(std::make_shared<Var_Grad>(
+      dim, false, base_name + std::to_string(cnt++)));
+  }
+
+  in_outs.push_back(in_out);
+}
+
 } // namespace nntrainer
index 6cbeb30..b29ca33 100644 (file)
@@ -21,6 +21,7 @@
 #include <memory>
 #include <vector>
 
+#include <var_grad.h>
 #include <weight.h>
 
 namespace nntrainer {
@@ -88,9 +89,9 @@ public:
   Manager(bool enable_gradient_memory_opt_ = true,
           bool use_shared_memory_ = true);
 
-  Manager(const Manager &) = delete;
+  Manager(const Manager &) = default;
 
-  Manager &operator=(const Manager &) = delete;
+  Manager &operator=(const Manager &) = default;
 
   Manager(Manager &&) noexcept = default;
 
@@ -147,17 +148,61 @@ public:
     total_grad_size = 0;
     weight_mmaped_memory.reset();
     grad_mmaped_memory.reset();
+    in_outs.clear();
+  }
+
+  /**
+   * @brief Track the inputs/ouputs of the layer
+   * @param[in] layer_name Name of the layer
+   * @param[in] input_dim Dimension of the input for the layer
+   * @note Manager is kept independent from the layer object itself
+   */
+  void TrackLayerInOuts(const std::string layer_name,
+                        const std::vector<TensorDim> &input_dim);
+
+  /**
+   * @brief Get input tensor list for a layer by index
+   * @param[in] layer_idx Index of the layer in the order of layer tracked
+   * @note The order of layers tracked is same as the order of sorted layers
+   */
+  std::vector<std::shared_ptr<Var_Grad>> getInputsLayer(int layer_idx) {
+    if (layer_idx == -1)
+      return in_outs.back();
+    return in_outs[layer_idx];
+  }
+
+  /**
+   * @brief Initialize the inputs/outputs for the layers
+   * @todo Make initialize() and initializeInOuts() coherent but still separated
+   */
+  void initializeInOuts() {
+    // TODO: remove assign mem and do this
+    for (auto &in_out : in_outs)
+      for (auto &vg : in_out)
+        vg->initialize();
+  }
+
+  /**
+   * @brief Set the batch size for the inputs/outputs of the layers
+   */
+  void setBatchSize(unsigned int batch) {
+    for (auto &in_out : in_outs)
+      for (auto &vg : in_out)
+        vg->setBatchSize(batch);
   }
 
 private:
   // TODO: ensure that names of these weights are unique
-  /**< Weights all the layer in the model to be managed */
+  /**< Weights of all the layer in the model to be managed */
   std::vector<std::vector<std::reference_wrapper<Weight>>> weights;
 
   size_t total_weight_size; /**< total weight size */
   size_t total_grad_size;   /**< total weight size */
   size_t max_grad_size;     /**< max trainable weight required by a layer */
 
+  /**< Inputs/outputs of all the layer in the model */
+  std::vector<std::vector<std::shared_ptr<Var_Grad>>> in_outs;
+
   bool enable_gradient_memory_opt; /**< share memory among all the gradients */
 
   /**< shared memory related */
index 876d59f..2991d8f 100644 (file)
@@ -169,7 +169,6 @@ public:
    *
    * @note New dimension must maintain the shape of the variable
    */
-
   void reset(const TensorDim &tdim, bool train) {
     dim = tdim;
     if (!var->uninitialized())
@@ -180,7 +179,16 @@ public:
     resetGradient();
   }
 
-protected:
+  void setBatchSize(unsigned int batch) {
+    dim.batch(batch);
+    /** @note This will shape when changing batch size with initialized
+     * variables */
+    if (!var->uninitialized())
+      var->reshape(dim);
+    if (!grad->uninitialized())
+      grad->reshape(dim);
+  }
+
   /**
    * @brief Get the variable tensor (by reference)
    *
@@ -195,6 +203,7 @@ protected:
    */
   Tensor &getGradientRef() { return *grad.get(); }
 
+protected:
   TensorDim dim;                /**< dimension of the tensor */
   std::shared_ptr<Tensor> var;  /**< variable to be updated and used */
   std::shared_ptr<Tensor> grad; /**< gradient for the variable */
index 75ba454..e1b2f67 100644 (file)
@@ -48,37 +48,26 @@ template <typename LayerType>
 class nntrainer_abstractLayer : public ::testing::Test {
 protected:
   virtual void SetUp() {
-    manager = std::make_shared<nntrainer::Manager>(true, false);
+    manager = nntrainer::Manager(true, false);
     status = ML_ERROR_NONE;
     prepareLayer();
     reinitialize();
   }
 
   virtual int reinitialize() {
-    int status = layer.initialize(*manager);
+    int status = layer.initialize(manager);
     EXPECT_EQ(status, ML_ERROR_NONE);
 
     in = nntrainer::Tensor(layer.getInputDimension()[0]);
     out = nntrainer::Tensor(layer.getOutputDimension()[0]);
 
-    layer.resizeNetInput(layer.getNumInputs());
-    layer.resizeNetOutput(layer.getNumOutputs());
+    manager.TrackLayerInOuts(layer.getName(), layer.getInputDimension());
+    layer.setInputBuffers(manager.getInputsLayer(-1));
+    manager.TrackLayerInOuts(layer.getName(), layer.getOutputDimension());
+    layer.setOutputBuffers(manager.getInputsLayer(-1));
 
-    for (unsigned int i = 0; i < layer.getNumInputs(); ++i) {
-      std::shared_ptr<nntrainer::NetBuffers> n_buffer =
-        std::make_unique<nntrainer::NetBuffers>();
-      n_buffer->var = nntrainer::Tensor(layer.getInputDimension()[i]);
-      layer.setInputBuffer(i, n_buffer);
-    }
-
-    for (unsigned int i = 0; i < layer.getNumOutputs(); ++i) {
-      std::shared_ptr<nntrainer::NetBuffers> n_buffer =
-        std::make_unique<nntrainer::NetBuffers>();
-      n_buffer->var = nntrainer::Tensor(layer.getOutputDimension()[i]);
-      layer.setOutputBuffer(i, n_buffer);
-    }
-
-    manager->initialize();
+    manager.initializeInOuts();
+    manager.initialize();
 
     return status;
   }
@@ -98,7 +87,7 @@ protected:
 
   virtual void resetLayer() {
     layer = LayerType();
-    manager->reset();
+    manager.reset();
   }
 
   virtual void setInputDim(const std::string &dimension) {
@@ -209,7 +198,7 @@ protected:
   nntrainer::Tensor in;
   nntrainer::Tensor out;
   float local_tolerance = tolerance;
-  std::shared_ptr<nntrainer::Manager> manager;
+  nntrainer::Manager manager;
   std::shared_ptr<nntrainer::Optimizer> opt;
 };
 
@@ -503,26 +492,15 @@ protected:
 
     act_layer->setBatch(layer.getOutputDimension()[0].batch());
 
-    status = act_layer->initialize(*manager);
+    status = act_layer->initialize(manager);
     EXPECT_EQ(status, ML_ERROR_NONE);
 
-    act_layer->resizeNetInput(act_layer->getNumInputs());
-    act_layer->resizeNetOutput(act_layer->getNumOutputs());
-
-    for (unsigned int i = 0; i < act_layer->getNumInputs(); ++i) {
-      std::shared_ptr<nntrainer::NetBuffers> n_buffer =
-        std::make_unique<nntrainer::NetBuffers>();
-      n_buffer->var = nntrainer::Tensor(act_layer->getInputDimension()[i]);
-      act_layer->setInputBuffer(i, n_buffer);
-    }
-
-    for (unsigned int i = 0; i < act_layer->getNumOutputs(); ++i) {
-      std::shared_ptr<nntrainer::NetBuffers> n_buffer =
-        std::make_unique<nntrainer::NetBuffers>();
-      n_buffer->var = nntrainer::Tensor(act_layer->getOutputDimension()[i]);
-      act_layer->setOutputBuffer(i, n_buffer);
-    }
+    manager.TrackLayerInOuts(layer.getName(), layer.getInputDimension());
+    act_layer->setInputBuffers(manager.getInputsLayer(-1));
+    manager.TrackLayerInOuts(layer.getName(), layer.getOutputDimension());
+    act_layer->setOutputBuffers(manager.getInputsLayer(-1));
 
+    manager.initializeInOuts();
     layers.push_back(act_layer);
   }
 
@@ -536,28 +514,17 @@ protected:
 
     loss_layer->setBatch(layer.getOutputDimension()[0].batch());
 
-    status = loss_layer->initialize(*manager);
+    status = loss_layer->initialize(manager);
     EXPECT_EQ(status, ML_ERROR_NONE);
     status = loss_layer->setLoss(type);
     EXPECT_EQ(status, ML_ERROR_NONE);
 
-    loss_layer->resizeNetInput(loss_layer->getNumInputs());
-    loss_layer->resizeNetOutput(loss_layer->getNumOutputs());
-
-    for (unsigned int i = 0; i < loss_layer->getNumInputs(); ++i) {
-      std::shared_ptr<nntrainer::NetBuffers> n_buffer =
-        std::make_unique<nntrainer::NetBuffers>();
-      n_buffer->var = nntrainer::Tensor(loss_layer->getInputDimension()[i]);
-      loss_layer->setInputBuffer(i, n_buffer);
-    }
-
-    for (unsigned int i = 0; i < loss_layer->getNumOutputs(); ++i) {
-      std::shared_ptr<nntrainer::NetBuffers> n_buffer =
-        std::make_unique<nntrainer::NetBuffers>();
-      n_buffer->var = nntrainer::Tensor(loss_layer->getOutputDimension()[i]);
-      loss_layer->setOutputBuffer(i, n_buffer);
-    }
+    manager.TrackLayerInOuts(layer.getName(), layer.getInputDimension());
+    loss_layer->setInputBuffers(manager.getInputsLayer(-1));
+    manager.TrackLayerInOuts(layer.getName(), layer.getOutputDimension());
+    loss_layer->setOutputBuffers(manager.getInputsLayer(-1));
 
+    manager.initializeInOuts();
     layers.push_back(loss_layer);
 
     if (type == nntrainer::LossType::LOSS_ENTROPY_SOFTMAX) {
@@ -1270,9 +1237,9 @@ TEST_F(nntrainer_Conv2DLayer, DISABLED_backwarding_03_p) {
                         "padding=0, 0");
 
   loadFile("tc_conv2d_int_conv2DLayer.in", in);
-  auto manager = std::make_shared<nntrainer::Manager>();
+  auto manager = nntrainer::Manager();
 
-  manager->setGradientMemoryOptimization(false);
+  manager.setGradientMemoryOptimization(false);
 
   nntrainer::Conv2DLayer layer1;
   status =
@@ -1281,7 +1248,7 @@ TEST_F(nntrainer_Conv2DLayer, DISABLED_backwarding_03_p) {
                         "kernel_size= 5,5", "stride=1, 1", "padding=0, 0"});
   EXPECT_EQ(status, ML_ERROR_NONE);
   layer1.setBatch(1);
-  status = layer1.initialize(*manager);
+  status = layer1.initialize(manager);
   EXPECT_EQ(status, ML_ERROR_NONE);
 
   loadFile("tc_conv2d_int_conv2DKernel.in", layer1);
@@ -1300,7 +1267,7 @@ TEST_F(nntrainer_Conv2DLayer, DISABLED_backwarding_03_p) {
   status = layer2.setProperty(
     {"input_shape=" + getDimensionString(layer1.getOutputDimension()[0])});
   EXPECT_EQ(status, ML_ERROR_NONE);
-  status = layer2.initialize(*manager);
+  status = layer2.initialize(manager);
   EXPECT_EQ(status, ML_ERROR_NONE);
 
   loadFile("tc_conv2d_int_conv2DKernel2.in", layer2);
@@ -1703,14 +1670,14 @@ TEST(nntrainer_LossLayer, forward_loss_unknown_n) {
   nntrainer::LossLayer layer;
   nntrainer::Tensor a = constant(1.0, 1, 1, 1, 1);
   nntrainer::Tensor b = constant(1.0, 1, 1, 1, 1);
-  layer.resizeNetInput(1);
-  layer.resizeNetOutput(1);
-  nntrainer::sharedNetBuffer in_buffer =
-    std::make_unique<nntrainer::NetBuffers>();
-  nntrainer::sharedNetBuffer out_buffer =
-    std::make_unique<nntrainer::NetBuffers>();
-  layer.setInputBuffer(0, in_buffer);
-  layer.setOutputBuffer(0, out_buffer);
+
+  nntrainer::Manager manager;
+  manager.TrackLayerInOuts(layer.getName(), layer.getInputDimension());
+  layer.setInputBuffers(manager.getInputsLayer(-1));
+  manager.TrackLayerInOuts(layer.getName(), layer.getOutputDimension());
+  layer.setOutputBuffers(manager.getInputsLayer(-1));
+
+  manager.initializeInOuts();
   EXPECT_THROW(
     layer.forwarding({MAKE_SHARED_TENSOR(a)}, {MAKE_SHARED_TENSOR(b)}),
     std::runtime_error);
@@ -1719,14 +1686,14 @@ TEST(nntrainer_LossLayer, forward_loss_unknown_n) {
 TEST(nntrainer_LossLayer, backward_loss_unknown_n) {
   nntrainer::LossLayer layer;
   nntrainer::Tensor a = constant(1.0, 1, 1, 1, 1);
-  layer.resizeNetInput(1);
-  layer.resizeNetOutput(1);
-  nntrainer::sharedNetBuffer in_buffer =
-    std::make_unique<nntrainer::NetBuffers>();
-  nntrainer::sharedNetBuffer out_buffer =
-    std::make_unique<nntrainer::NetBuffers>();
-  layer.setInputBuffer(0, in_buffer);
-  layer.setOutputBuffer(0, out_buffer);
+
+  nntrainer::Manager manager;
+  manager.TrackLayerInOuts(layer.getName(), layer.getInputDimension());
+  layer.setInputBuffers(manager.getInputsLayer(-1));
+  manager.TrackLayerInOuts(layer.getName(), layer.getOutputDimension());
+  layer.setOutputBuffers(manager.getInputsLayer(-1));
+
+  manager.initializeInOuts();
   EXPECT_THROW(layer.backwarding({MAKE_SHARED_TENSOR(a)}), std::runtime_error);
 }
 
@@ -1735,14 +1702,14 @@ TEST(nntrainer_LossLayer, forward_loss_forward_entropy_n) {
   layer.setLoss(nntrainer::LossType::LOSS_ENTROPY);
   nntrainer::Tensor a = constant(1.0, 1, 1, 1, 1);
   nntrainer::Tensor b = constant(1.0, 1, 1, 1, 1);
-  layer.resizeNetInput(1);
-  layer.resizeNetOutput(1);
-  nntrainer::sharedNetBuffer in_buffer =
-    std::make_unique<nntrainer::NetBuffers>();
-  nntrainer::sharedNetBuffer out_buffer =
-    std::make_unique<nntrainer::NetBuffers>();
-  layer.setInputBuffer(0, in_buffer);
-  layer.setOutputBuffer(0, out_buffer);
+
+  nntrainer::Manager manager;
+  manager.TrackLayerInOuts(layer.getName(), layer.getInputDimension());
+  layer.setInputBuffers(manager.getInputsLayer(-1));
+  manager.TrackLayerInOuts(layer.getName(), layer.getOutputDimension());
+  layer.setOutputBuffers(manager.getInputsLayer(-1));
+
+  manager.initializeInOuts();
   EXPECT_THROW(
     layer.forwarding({MAKE_SHARED_TENSOR(a)}, {MAKE_SHARED_TENSOR(b)}),
     std::runtime_error);
@@ -1752,14 +1719,14 @@ TEST(nntrainer_LossLayer, backward_loss_backward_entropy_n) {
   nntrainer::LossLayer layer;
   layer.setLoss(nntrainer::LossType::LOSS_ENTROPY);
   nntrainer::Tensor a = constant(1.0, 1, 1, 1, 1);
-  layer.resizeNetInput(1);
-  layer.resizeNetOutput(1);
-  nntrainer::sharedNetBuffer in_buffer =
-    std::make_unique<nntrainer::NetBuffers>();
-  nntrainer::sharedNetBuffer out_buffer =
-    std::make_unique<nntrainer::NetBuffers>();
-  layer.setInputBuffer(0, in_buffer);
-  layer.setOutputBuffer(0, out_buffer);
+
+  nntrainer::Manager manager;
+  manager.TrackLayerInOuts(layer.getName(), layer.getInputDimension());
+  layer.setInputBuffers(manager.getInputsLayer(-1));
+  manager.TrackLayerInOuts(layer.getName(), layer.getOutputDimension());
+  layer.setOutputBuffers(manager.getInputsLayer(-1));
+
+  manager.initializeInOuts();
   EXPECT_THROW(layer.backwarding({MAKE_SHARED_TENSOR(a)}), std::runtime_error);
 }
 
@@ -1842,14 +1809,13 @@ TEST(nntrainer_ActivationLayer, forward_backward_01_p) {
   GEN_TEST_INPUT(expected,
                  nntrainer::ActivationLayer::relu((l - 4) * 0.1 * (i + 1)));
 
-  layer.resizeNetInput(1);
-  layer.resizeNetOutput(1);
-  nntrainer::sharedNetBuffer in_buffer =
-    std::make_unique<nntrainer::NetBuffers>();
-  nntrainer::sharedNetBuffer out_buffer =
-    std::make_unique<nntrainer::NetBuffers>();
-  layer.setInputBuffer(0, in_buffer);
-  layer.setOutputBuffer(0, out_buffer);
+  nntrainer::Manager manager;
+  manager.TrackLayerInOuts(layer.getName(), layer.getInputDimension());
+  layer.setInputBuffers(manager.getInputsLayer(-1));
+  manager.TrackLayerInOuts(layer.getName(), layer.getOutputDimension());
+  layer.setOutputBuffers(manager.getInputsLayer(-1));
+
+  manager.initializeInOuts();
 
   nntrainer::Tensor result;
   EXPECT_NO_THROW(result =
@@ -1886,7 +1852,7 @@ TEST_F(nntrainer_AdditionLayer, initialize_01_p) {
 TEST_F(nntrainer_AdditionLayer, initialize_02_n) {
   nntrainer::AdditionLayer layer;
   layer.setProperty({"input_shape=1:1:1:1"});
-  status = layer.initialize(*manager);
+  status = layer.initialize(manager);
   EXPECT_EQ(status, ML_ERROR_INVALID_PARAMETER);
 }
 
@@ -1897,7 +1863,7 @@ TEST_F(nntrainer_AdditionLayer, checkValidation_01_p) {
 
 TEST_F(nntrainer_AdditionLayer, setProperty_01_p) {
   setProperty("num_inputs=10");
-  status = layer.initialize(*manager);
+  status = layer.initialize(manager);
   EXPECT_EQ(status, ML_ERROR_NONE);
 }
 
@@ -1915,14 +1881,13 @@ TEST_F(nntrainer_AdditionLayer, forwarding_01_n) {
 
   in = nntrainer::Tensor();
 
-  layer.resizeNetInput(1);
-  layer.resizeNetOutput(1);
-  nntrainer::sharedNetBuffer in_buffer =
-    std::make_unique<nntrainer::NetBuffers>();
-  nntrainer::sharedNetBuffer out_buffer =
-    std::make_unique<nntrainer::NetBuffers>();
-  layer.setInputBuffer(0, in_buffer);
-  layer.setOutputBuffer(0, out_buffer);
+  nntrainer::Manager manager;
+  manager.TrackLayerInOuts(layer.getName(), layer.getInputDimension());
+  layer.setInputBuffers(manager.getInputsLayer(-1));
+  manager.TrackLayerInOuts(layer.getName(), layer.getOutputDimension());
+  layer.setOutputBuffers(manager.getInputsLayer(-1));
+
+  manager.initializeInOuts();
 
   EXPECT_THROW(layer.forwarding_with_val({input}), std::invalid_argument);
 }
@@ -1940,14 +1905,13 @@ TEST_F(nntrainer_AdditionLayer, DISABLED_forwarding_02_n) {
 
   in = nntrainer::Tensor(layer.getInputDimension()[0]);
 
-  layer.resizeNetInput(1);
-  layer.resizeNetOutput(1);
-  nntrainer::sharedNetBuffer in_buffer =
-    std::make_unique<nntrainer::NetBuffers>();
-  nntrainer::sharedNetBuffer out_buffer =
-    std::make_unique<nntrainer::NetBuffers>();
-  layer.setInputBuffer(0, in_buffer);
-  layer.setOutputBuffer(0, out_buffer);
+  nntrainer::Manager manager;
+  manager.TrackLayerInOuts(layer.getName(), layer.getInputDimension());
+  layer.setInputBuffers(manager.getInputsLayer(-1));
+  manager.TrackLayerInOuts(layer.getName(), layer.getOutputDimension());
+  layer.setOutputBuffers(manager.getInputsLayer(-1));
+
+  manager.initializeInOuts();
 
   EXPECT_THROW(layer.forwarding_with_val({input}), std::runtime_error);
 }
@@ -1962,14 +1926,11 @@ TEST_F(nntrainer_AdditionLayer, DISABLED_forwarding_03_p) {
 
   input.get()[1] = *input;
 
-  layer.resizeNetInput(1);
-  layer.resizeNetOutput(1);
-  nntrainer::sharedNetBuffer in_buffer =
-    std::make_unique<nntrainer::NetBuffers>();
-  nntrainer::sharedNetBuffer out_buffer =
-    std::make_unique<nntrainer::NetBuffers>();
-  layer.setInputBuffer(0, in_buffer);
-  layer.setOutputBuffer(0, out_buffer);
+  nntrainer::Manager manager;
+  manager.TrackLayerInOuts(layer.getName(), layer.getInputDimension());
+  layer.setInputBuffers(manager.getInputsLayer(-1));
+  manager.TrackLayerInOuts(layer.getName(), layer.getOutputDimension());
+  layer.setOutputBuffers(manager.getInputsLayer(-1));
 
   EXPECT_NO_THROW(layer.forwarding_with_val({input}));
 }