[neurun] Eliminate OperandIndex in ArgMax::Param (#9015)
authorSergei Barannikov/AI Tools Lab /SRR/Engineer/Samsung Electronics <s.barannikov@samsung.com>
Tue, 19 Nov 2019 07:20:05 +0000 (10:20 +0300)
committer오형석/On-Device Lab(SR)/Staff Engineer/삼성전자 <hseok82.oh@samsung.com>
Tue, 19 Nov 2019 07:20:05 +0000 (16:20 +0900)
Replace `OperandIndex` in `ArgMax::Param` with `int`.

Signed-off-by: Sergei Barannikov <s.barannikov@samsung.com>
runtime/neurun/backend/acl_cl/KernelGenerator.cc
runtime/neurun/backend/acl_neon/KernelGenerator.cc
runtime/neurun/core/include/model/operation/ArgMax.h
runtime/neurun/frontend/nnapi/wrapper/OperationFactory.cc

index 7138e62..908b779 100644 (file)
@@ -1796,45 +1796,30 @@ void KernelGenerator::visit(const model::operation::ArgMax &node)
 {
   const auto ofm_index{node.getOutputs().at(0)};
   const auto ifm_index{node.getInputs().at(model::operation::ArgMax::Input::INPUT)};
-  const auto axis_index{node.param().axis_index};
 
   auto ifm_shape = _ctx.at(ifm_index).shape();
   auto ofm_shape = _ctx.at(ofm_index).shape();
-  auto axis_shape = _ctx.at(axis_index).shape();
 
-  assert(_ctx.at(axis_index).isConstant());
-  // Axis dimension is always 1.
-  assert(axis_shape.rank() == 1);
   assert((ifm_shape.rank() - 1) == ofm_shape.rank());
 
-  const int axis_size = axis_shape.num_elements();
-  auto axis_base = _ctx.at(axis_index).data().base();
-  // TODO Should support axis size > 1.
-  assert(axis_size == 1);
-  // axis is tensor with 1 dimension - always a vector.
-  assert(axis_base != nullptr);
-
   auto ofm_alloc = _tensor_builder->at(ofm_index).get();
   auto ifm_alloc = _tensor_builder->at(ifm_index).get();
   const auto ifm_rank = ifm_shape.rank();
   auto frontend_layout = _current_subg_layout;
   auto backend_layout = ifm_alloc->layout();
-  std::set<uint32_t> axes;
-  for (int32_t n = 0; n < axis_size; ++n)
+
+  int axis_value = node.param().axis;
+  if (axis_value < 0)
   {
-    int32_t axis_value = *(reinterpret_cast<const int32_t *>(axis_base) + n);
-    if (axis_value < 0)
-    {
-      axis_value += ifm_rank;
-    }
-    axes.insert(acl_common::ToARMComputeAxis(ifm_rank, axis_value, frontend_layout, backend_layout)
-                    .value());
+    axis_value += ifm_rank;
   }
-  std::vector<uint32_t> fixed_axes(axes.begin(), axes.end());
+
+  auto acl_axis =
+      acl_common::ToARMComputeAxis(ifm_rank, axis_value, frontend_layout, backend_layout).value();
 
   auto fn = nnfw::cpp14::make_unique<::arm_compute::CLArgOperation>();
 
-  fn->configure(ifm_alloc->handle(), ofm_alloc->handle(), fixed_axes,
+  fn->configure(ifm_alloc->handle(), ofm_alloc->handle(), {acl_axis},
                 ::arm_compute::ArgOperation::MAX);
 
   auto acl_fn = asAclFunction(std::move(fn));
index b6c7fe4..398c65a 100644 (file)
@@ -185,22 +185,15 @@ void KernelGenerator::visit(const model::operation::ArgMax &node)
 {
   const auto ofm_index{node.getOutputs().at(0)};
   const auto ifm_index{node.getInputs().at(model::operation::ArgMax::Input::INPUT)};
-  const auto axis_index{node.param().axis_index};
-
-  auto ifm_shape = _ctx.at(ifm_index).shape();
-  auto ofm_shape = _ctx.at(ofm_index).shape();
-  auto axis_shape = _ctx.at(axis_index).shape();
 
-  assert(_ctx.at(axis_index).isConstant());
-  // Axis rank is always 1.
-  assert(axis_shape.rank() == 1);
+  const auto ifm_rank = _ctx.at(ifm_index).shape().rank();
 
   auto ofm_alloc = _tensor_builder->at(ofm_index).get();
   auto ifm_alloc = _tensor_builder->at(ifm_index).get();
-  const auto ifm_rank = ifm_shape.rank();
   auto frontend_layout = _current_subg_layout;
   auto backend_layout = ifm_alloc->layout();
-  int32_t axis_value = _ctx.at(axis_index).asScalar<int32_t>();
+
+  int axis_value = node.param().axis;
   if (axis_value < 0)
   {
     axis_value += ifm_rank;
index e6d28f8..9a34275 100644 (file)
@@ -36,7 +36,7 @@ public:
 
   struct Param
   {
-    OperandIndex axis_index;
+    int axis;
   };
 
 public:
index 98ead3b..26288a9 100644 (file)
@@ -1401,7 +1401,7 @@ OperationFactory::OperationFactory()
   };
 
   _map[ANEURALNETWORKS_ARGMAX_EX] = [](const OperationFactory::Param &init_param,
-                                       neurun::model::Operands &) {
+                                       neurun::model::Operands &operands) {
     assert(init_param.input_count == 2 && init_param.output_count == 1);
 
     OperandIndexSequence outputs{init_param.outputs[0]};
@@ -1413,7 +1413,7 @@ OperationFactory::OperationFactory()
     OperandIndexSequence inputs{init_param.inputs[0]};
 
     operation::ArgMax::Param param;
-    param.axis_index = OperandIndex{init_param.inputs[1]};
+    param.axis = operands.at(OperandIndex{init_param.inputs[1]}).asScalar<std::int32_t>();
 
     return new operation::ArgMax{inputs, outputs, param};
   };