{
const auto ofm_index{node.getOutputs().at(0)};
const auto ifm_index{node.getInputs().at(model::operation::ArgMax::Input::INPUT)};
- const auto axis_index{node.param().axis_index};
auto ifm_shape = _ctx.at(ifm_index).shape();
auto ofm_shape = _ctx.at(ofm_index).shape();
- auto axis_shape = _ctx.at(axis_index).shape();
- assert(_ctx.at(axis_index).isConstant());
- // Axis dimension is always 1.
- assert(axis_shape.rank() == 1);
assert((ifm_shape.rank() - 1) == ofm_shape.rank());
- const int axis_size = axis_shape.num_elements();
- auto axis_base = _ctx.at(axis_index).data().base();
- // TODO Should support axis size > 1.
- assert(axis_size == 1);
- // axis is tensor with 1 dimension - always a vector.
- assert(axis_base != nullptr);
-
auto ofm_alloc = _tensor_builder->at(ofm_index).get();
auto ifm_alloc = _tensor_builder->at(ifm_index).get();
const auto ifm_rank = ifm_shape.rank();
auto frontend_layout = _current_subg_layout;
auto backend_layout = ifm_alloc->layout();
- std::set<uint32_t> axes;
- for (int32_t n = 0; n < axis_size; ++n)
+
+ int axis_value = node.param().axis;
+ if (axis_value < 0)
{
- int32_t axis_value = *(reinterpret_cast<const int32_t *>(axis_base) + n);
- if (axis_value < 0)
- {
- axis_value += ifm_rank;
- }
- axes.insert(acl_common::ToARMComputeAxis(ifm_rank, axis_value, frontend_layout, backend_layout)
- .value());
+ axis_value += ifm_rank;
}
- std::vector<uint32_t> fixed_axes(axes.begin(), axes.end());
+
+ auto acl_axis =
+ acl_common::ToARMComputeAxis(ifm_rank, axis_value, frontend_layout, backend_layout).value();
auto fn = nnfw::cpp14::make_unique<::arm_compute::CLArgOperation>();
- fn->configure(ifm_alloc->handle(), ofm_alloc->handle(), fixed_axes,
+ fn->configure(ifm_alloc->handle(), ofm_alloc->handle(), {acl_axis},
::arm_compute::ArgOperation::MAX);
auto acl_fn = asAclFunction(std::move(fn));
{
const auto ofm_index{node.getOutputs().at(0)};
const auto ifm_index{node.getInputs().at(model::operation::ArgMax::Input::INPUT)};
- const auto axis_index{node.param().axis_index};
-
- auto ifm_shape = _ctx.at(ifm_index).shape();
- auto ofm_shape = _ctx.at(ofm_index).shape();
- auto axis_shape = _ctx.at(axis_index).shape();
- assert(_ctx.at(axis_index).isConstant());
- // Axis rank is always 1.
- assert(axis_shape.rank() == 1);
+ const auto ifm_rank = _ctx.at(ifm_index).shape().rank();
auto ofm_alloc = _tensor_builder->at(ofm_index).get();
auto ifm_alloc = _tensor_builder->at(ifm_index).get();
- const auto ifm_rank = ifm_shape.rank();
auto frontend_layout = _current_subg_layout;
auto backend_layout = ifm_alloc->layout();
- int32_t axis_value = _ctx.at(axis_index).asScalar<int32_t>();
+
+ int axis_value = node.param().axis;
if (axis_value < 0)
{
axis_value += ifm_rank;
};
_map[ANEURALNETWORKS_ARGMAX_EX] = [](const OperationFactory::Param &init_param,
- neurun::model::Operands &) {
+ neurun::model::Operands &operands) {
assert(init_param.input_count == 2 && init_param.output_count == 1);
OperandIndexSequence outputs{init_param.outputs[0]};
OperandIndexSequence inputs{init_param.inputs[0]};
operation::ArgMax::Param param;
- param.axis_index = OperandIndex{init_param.inputs[1]};
+ param.axis = operands.at(OperandIndex{init_param.inputs[1]}).asScalar<std::int32_t>();
return new operation::ArgMax{inputs, outputs, param};
};