[rebase] Rebase fix
authorParichay Kapoor <pk.kapoor@samsung.com>
Wed, 6 Oct 2021 02:35:50 +0000 (11:35 +0900)
committerJijoong Moon <jijoong.moon@samsung.com>
Wed, 6 Oct 2021 08:07:09 +0000 (17:07 +0900)
This patch adds rebase fix.
Further some of the temporary fixes in the previous commits are also
removed.

Signed-off-by: Parichay Kapoor <pk.kapoor@samsung.com>
nntrainer/tensor/manager.cpp
nntrainer/tensor/var_grad.cpp
test/unittest/memory/memory_planner_validate.cpp

index a42dc74..7f21adc 100644 (file)
@@ -255,14 +255,12 @@ void Manager::initializeTensorsTrain(unsigned int max_exec_order_) {
 std::vector<Weight *>
 Manager::requestWeights(const GraphNode &node,
                         const std::vector<Weight::Spec> &weights_spec) {
-  const auto &exec_order = node.getExecutionOrder();
+  const auto [forwarding_order, calcGradient_order, calcDerivative_order] =
+    node.getExecutionOrder();
   std::vector<unsigned int> var_exec_order(
-    {std::get<0>(exec_order), /** forwarding */
-     std::get<1>(exec_order), /** calcGradient */
-     std::get<2>(exec_order) /** calcDerivative */});
+    {forwarding_order, calcGradient_order, calcDerivative_order});
   std::vector<unsigned int> grad_exec_order(
-    {std::get<1>(exec_order), /** calcGradient */
-    std::get<2>(exec_order) /** calcDerivative as gradient is applied after calcDerivative */ });
+    {calcGradient_order, calcDerivative_order});
 
   TensorLifespan var_ls = TensorLifespan::MAX_LIFESPAN;
   TensorLifespan grad_ls = TensorLifespan::BACKWARD_FUNC_LIFESPAN;
@@ -308,7 +306,8 @@ Manager::requestWeights(const GraphNode &node,
 std::vector<Var_Grad *>
 Manager::requestTensors(const GraphNode &node,
                         const std::vector<Var_Grad::Spec> &tensors_spec) {
-  const auto &exec_order = node.getExecutionOrder();
+  const auto [forwarding_order, calcGradient_order, calcDerivative_order] =
+    node.getExecutionOrder();
 
   std::vector<Var_Grad *> ret;
   size_t current_size = tensors_v2.size();
@@ -321,16 +320,16 @@ Manager::requestTensors(const GraphNode &node,
     /** usage for tensors */
     if (enum_class_logical_and<TensorLifespan>(
           tspan, TensorLifespan::FORWARD_FUNC_LIFESPAN))
-      var_exec_order.push_back(std::get<0>(exec_order));
+      var_exec_order.push_back(forwarding_order);
 
     /** usage for tensors gradient in backwarding */
     if (enum_class_logical_and<TensorLifespan>(
           tspan, TensorLifespan::BACKWARD_FUNC_LIFESPAN)) {
-      var_exec_order.push_back(std::get<1>(exec_order));
-      grad_exec_order.push_back(std::get<1>(exec_order));
+      var_exec_order.push_back(calcGradient_order);
+      grad_exec_order.push_back(calcGradient_order);
 
-      var_exec_order.push_back(std::get<2>(exec_order));
-      grad_exec_order.push_back(std::get<2>(exec_order));
+      var_exec_order.push_back(calcDerivative_order);
+      grad_exec_order.push_back(calcDerivative_order);
     }
 
     Tensor *var =
@@ -368,12 +367,11 @@ std::vector<Var_Grad *>
 Manager::requestInputs(const GraphNode &node,
                        const std::vector<TensorDim> &inputs_dim,
                        const std::vector<std::string> &outputs_name) {
-  const auto &exec_order = node.getExecutionOrder();
+  const auto [forwarding_order, calcGradient_order, calcDerivative_order] =
+    node.getExecutionOrder();
   std::vector<unsigned int> var_exec_order(
-    {std::get<0>(exec_order), /** forwarding */
-     std::get<1>(exec_order) /** calcGradient */});
-  std::vector<unsigned int> grad_exec_order(
-    {std::get<2>(exec_order) /** calcDerivative */});
+    {forwarding_order, calcGradient_order});
+  std::vector<unsigned int> grad_exec_order({calcDerivative_order});
 
   TensorLifespan var_ls = TensorLifespan::ITERATION_LIFESPAN;
   TensorLifespan grad_ls = TensorLifespan::ITERATION_LIFESPAN;
@@ -440,17 +438,7 @@ Manager::requestInputs(const GraphNode &node,
 #endif
     }
 
-    /**
-     * TODO: This a temporary fix to handle external tensors due to rebase.
-     * This is properly fixed with #1544 using
-     * context.requestExternallyAllocatedTensor().
-     */
-    if (var && grad)
-      inputs_v2.emplace_back(std::make_unique<Var_Grad>(var, grad));
-    else
-      inputs_v2.emplace_back(std::make_unique<Var_Grad>(
-        dim, Tensor::Initializer::NONE, true, false,
-        node.getName() + std::string(":input") + std::to_string(idx)));
+    inputs_v2.emplace_back(std::make_unique<Var_Grad>(var, grad));
   }
 
   ret.reserve(inputs_dim.size());
@@ -468,15 +456,14 @@ std::vector<Var_Grad *>
 Manager::requestOutputs(const GraphNode &node,
                         const std::vector<TensorDim> &outputs_dim,
                         const std::vector<std::string> &inputs_name) {
-  const auto &exec_order = node.getExecutionOrder();
-  std::vector<unsigned int> var_exec_order(
-    {std::get<0>(exec_order)}); /** forwarding */
+  const auto [forwarding_order, calcGradient_order, calcDerivative_order] =
+    node.getExecutionOrder();
+  std::vector<unsigned int> var_exec_order({forwarding_order});
   if (node.getType() == ActivationLayer::type)
     /** TODO: if removing this reduces memory consumption, resolve this */
-    var_exec_order.push_back(std::get<2>(exec_order)); /** calcDerivative */
+    var_exec_order.push_back(calcDerivative_order);
   std::vector<unsigned int> grad_exec_order(
-    {std::get<1>(exec_order), /** calcGradient */
-     std::get<2>(exec_order) /** calcDerivative */});
+    {calcGradient_order, calcDerivative_order});
 
   TensorLifespan var_ls = TensorLifespan::ITERATION_LIFESPAN;
   TensorLifespan grad_ls = TensorLifespan::ITERATION_LIFESPAN;
index 3dc37e0..12223c3 100644 (file)
@@ -40,11 +40,7 @@ void Var_Grad::initializeVariable(const Tensor &preallocated) {
    * Making a new tensor is intentional here as this tensor is not shared
    * with other layers but the internal memory is.
    */
-  if (var)
-    var->setData(preallocated.getData());
-  else
-    var = std::make_shared<Tensor>(preallocated);
-  // var = std::make_shared<Tensor>(preallocated);
+  var = std::make_shared<Tensor>(preallocated);
   /** intentionally not initialized tensor memory for shared tensors */
 }
 
@@ -53,18 +49,7 @@ void Var_Grad::initializeGradient(const Tensor &preallocated) {
    * Making a new tensor is intentional here as this tensor is not shared
    * with other layers but the internal memory is.
    */
-  /**
-   * This is a temporary fix till requestExternallyAllocatedTensors is enabled
-   * from #1544.
-   */
-  if (grad) {
-    if (grad->empty())
-      grad = std::make_shared<Tensor>(
-        preallocated.getSharedDataTensor(preallocated.getDim(), 0));
-    else
-      grad->setData(preallocated.getData());
-  } else
-    grad = std::make_shared<Tensor>(preallocated);
+  grad = std::make_shared<Tensor>(preallocated);
   /** intentionally not initialized tensor memory for shared tensors */
 }
 
index 168b8dc..12c1788 100644 (file)
@@ -229,7 +229,7 @@ TEST_P(MemoryPlannerValidate, partial_overlap) {
     EXPECT_GE(pool_size,
               *std::max_element(memory_size.begin(), memory_size.end()));
     EXPECT_LE(pool_size,
-              std::accumulate(memory_size.begin(), memory_size.end(), 0));
+              std::accumulate(memory_size.begin(), memory_size.end(), 0u));
     EXPECT_TRUE(
       validateIntervalOverlap(memory_validity, memory_size, memory_offset));
   }