[layer] multi batch incremental forwarding
authorhyeonseok lee <hs89.lee@samsung.com>
Thu, 4 Apr 2024 12:23:37 +0000 (21:23 +0900)
committerJijoong Moon <jijoong.moon@samsung.com>
Fri, 5 Apr 2024 01:38:37 +0000 (10:38 +0900)
 - Enable multi batch incremental forwarding by looping batchwise

Signed-off-by: hyeonseok lee <hs89.lee@samsung.com>
nntrainer/layers/addition_layer.cpp

index e5399d331132a473ace4d082276715252ba2fc37..592463051f0caa3d57f2e9fb5539a1461b74160f 100644 (file)
@@ -55,23 +55,29 @@ void AdditionLayer::incremental_forwarding(RunLayerContext &context,
     to = 1;
   }
 
+  hidden_step_dim.batch(1);
   hidden_step_dim.height(to - from);
 
-  Tensor hidden_step = hidden_.getSharedDataTensor(hidden_step_dim, 0, true);
-
-  /** @todo check possibility for in-place of addition layer */
-  for (unsigned int idx = 0; idx < context.getNumInputs(); ++idx) {
-    const Tensor &input_ = context.getInput(idx);
-    TensorDim input_dim = input_.getDim();
-
-    TensorDim input_step_dim = input_dim;
-    input_step_dim.height(to - from);
-
-    Tensor input_step = input_.getSharedDataTensor(input_step_dim, 0, true);
-    if (!idx) {
-      hidden_step.copy(input_step);
-    } else {
-      hidden_step.add_i(input_step);
+  for (unsigned int b = 0; b < hidden_.batch(); ++b) {
+    Tensor hidden_step = hidden_.getSharedDataTensor(
+      hidden_step_dim, b * hidden_dim.getFeatureLen(), true);
+
+    /** @todo check possibility for in-place of addition layer */
+    for (unsigned int idx = 0; idx < context.getNumInputs(); ++idx) {
+      const Tensor &input_ = context.getInput(idx);
+      TensorDim input_dim = input_.getDim();
+
+      TensorDim input_step_dim = input_dim;
+      input_step_dim.batch(1);
+      input_step_dim.height(to - from);
+
+      Tensor input_step = input_.getSharedDataTensor(
+        input_step_dim, b * input_dim.getFeatureLen(), true);
+      if (!idx) {
+        hidden_step.copy(input_step);
+      } else {
+        hidden_step.add_i(input_step);
+      }
     }
   }
 }