Replace `Param::axis_index` of type `OperandIndex` with `Param::axes` of type `vector<int>`.
Signed-off-by: Sergei Barannikov <s.barannikov@samsung.com>
{
const auto output_index{node.getOutputs().at(0)};
const auto input_index{node.getInputs().at(model::operation::ReduceSum::Input::INPUT)};
- const auto &axes{_ctx.at(node.param().axis_index).asVector<int>()};
+ const auto &axes{node.param().axes};
auto output_alloc = _tensor_builder->at(output_index).get();
auto input_alloc = _tensor_builder->at(input_index).get();
{
const auto output_index{node.getOutputs().at(0)};
const auto input_index{node.getInputs().at(model::operation::ReduceMax::Input::INPUT)};
- const auto &axes{_ctx.at(node.param().axis_index).asVector<int>()};
+ const auto &axes{node.param().axes};
auto ofm_alloc = _tensor_builder->at(output_index).get();
auto ifm_alloc = _tensor_builder->at(input_index).get();
{
const auto ofm_index{node.getOutputs().at(0)};
const auto ifm_index{node.getInputs().at(model::operation::Mean::Input::INPUT)};
- const auto &axes{_ctx.at(node.param().axis_index).asVector<int>()};
+ const auto &axes{node.param().axes};
const auto keep_dims{node.param().keep_dims};
auto ofm_alloc = _tensor_builder->at(ofm_index).get();
{
const auto ofm_index{node.getOutputs().at(0)};
const auto ifm_index{node.getInputs().at(model::operation::ReduceMin::Input::INPUT)};
- const auto &axes{_ctx.at(node.param().axis_index).asVector<int>()};
+ const auto &axes{node.param().axes};
auto ofm_alloc = _tensor_builder->at(ofm_index).get();
auto ifm_alloc = _tensor_builder->at(ifm_index).get();
{
const auto ofm_index{node.getOutputs().at(0)};
const auto ifm_index{node.getInputs().at(model::operation::Mean::Input::INPUT)};
- const auto &axes{_ctx.at(node.param().axis_index).asVector<int>()};
+ const auto &axes{node.param().axes};
const auto keep_dims{node.param().keep_dims};
auto ofm_alloc = _tensor_builder->at(ofm_index).get();
{
const auto ofm_index{node.getOutputs().at(0)};
const auto ifm_index{node.getInputs().at(model::operation::ReduceMax::Input::INPUT)};
- const auto &axes{_ctx.at(node.param().axis_index).asVector<int>()};
+ const auto &axes{node.param().axes};
auto ofm_alloc = _tensor_builder->at(ofm_index).get();
auto ifm_alloc = _tensor_builder->at(ifm_index).get();
{
const auto ofm_index{node.getOutputs().at(0)};
const auto ifm_index{node.getInputs().at(model::operation::ReduceMin::Input::INPUT)};
- const auto &axes{_ctx.at(node.param().axis_index).asVector<int>()};
+ const auto &axes{node.param().axes};
auto ofm_alloc = _tensor_builder->at(ofm_index).get();
auto ifm_alloc = _tensor_builder->at(ifm_index).get();
{
const auto output_index{node.getOutputs().at(0)};
const auto input_index{node.getInputs().at(model::operation::ReduceSum::Input::INPUT)};
- const auto &axes{_ctx.at(node.param().axis_index).asVector<int>()};
+ const auto &axes{node.param().axes};
auto output_alloc = _tensor_builder->at(output_index).get();
auto input_alloc = _tensor_builder->at(input_index).get();
struct Param
{
- OperandIndex axis_index;
+ std::vector<int> axes;
bool keep_dims;
};
struct Param
{
- OperandIndex axis_index;
+ std::vector<int> axes;
};
public:
struct Param
{
- OperandIndex axis_index;
+ std::vector<int> axes;
};
public:
struct Param
{
- OperandIndex axis_index;
+ std::vector<int> axes;
};
public:
const auto output_index{node.getOutputs().at(0)};
const auto input_index{node.getInputs().at(model::operation::ReduceSum::Input::INPUT)};
- const auto axis_index{node.param().axis_index};
+ const auto &axes = node.param().axes;
UNUSED_RELEASE(output_index);
UNUSED_RELEASE(input_index);
- UNUSED_RELEASE(axis_index);
+ UNUSED_RELEASE(axes);
const auto input_shape = _ctx.at(input_index).shape();
const auto output_shape = _ctx.at(output_index).shape();
- const auto axis_shape = _ctx.at(axis_index).shape();
UNUSED_RELEASE(output_shape);
UNUSED_RELEASE(input_shape);
- UNUSED_RELEASE(axis_shape);
assert(input_shape.rank() <= 4);
assert(output_shape.rank() <= input_shape.rank());
- assert(_ctx.at(axis_index).isConstant());
- assert(axis_shape.rank() == 0 || axis_shape.rank() == 1);
// NOTE For the 4-dimensions, if the rank of input and output are different, this runtime only
// supports cases reducing height and width or reducing depth.
{
const auto output_index{node.getOutputs().at(0)};
const auto input_index{node.getInputs().at(model::operation::ReduceMax::Input::INPUT)};
- const auto axis_index{node.param().axis_index};
+ const auto &axes = node.param().axes;
auto output_shape = _ctx.at(output_index).shape();
auto input_shape = _ctx.at(input_index).shape();
- auto axis_shape = _ctx.at(axis_index).shape();
UNUSED_RELEASE(output_shape);
UNUSED_RELEASE(input_shape);
- UNUSED_RELEASE(axis_shape);
+ UNUSED_RELEASE(axes);
assert(input_shape.rank() <= 4);
assert(output_shape.rank() <= input_shape.rank());
- assert(_ctx.at(axis_index).isConstant());
- assert(axis_shape.rank() == 0 || axis_shape.rank() == 1);
// NOTE For the 4-dimensions, if the rank of input and output are different, this runtime only
// supports cases reducing height and width or reducing depth.
{
const auto ofm_index{node.getOutputs().at(0)};
const auto ifm_index{node.getInputs().at(model::operation::ReduceMin::Input::INPUT)};
- const auto axis_index{node.param().axis_index};
+ const auto &axes = node.param().axes;
auto ifm_shape = _ctx.at(ifm_index).shape();
auto ofm_shape = _ctx.at(ofm_index).shape();
- auto axis_shape = _ctx.at(axis_index).shape();
UNUSED_RELEASE(ifm_shape);
UNUSED_RELEASE(ofm_shape);
- UNUSED_RELEASE(axis_shape);
+ UNUSED_RELEASE(axes);
assert(ifm_shape.rank() <= 4);
assert(ofm_shape.rank() <= ifm_shape.rank());
- assert(_ctx.at(axis_index).isConstant());
- assert(axis_shape.rank() == 0 || axis_shape.rank() == 1);
// NOTE For the 4-dimensions, if the rank of input and output are different, this runtime only
// supports cases reducing height and width or reducing depth.
model::OperandIndexSequence inputs;
model::OperandIndexSequence outputs;
- const auto input_index = (*op->inputs())[0];
- inputs.append(model::OperandIndex(input_index));
- const auto output_index = (*op->outputs())[0];
- outputs.append(model::OperandIndex(output_index));
+ loadOperationIO(op, inputs, outputs);
+ auto input = inputs.at(0);
+ auto axes = inputs.at(1);
+
+ if (!_graph.operands().at(axes).isConstant())
+ throw std::runtime_error("Mean: non-constant 'axes' is not supported.");
model::operation::Mean::Param param;
- param.axis_index = model::OperandIndex((*op->inputs())[1]);
+ param.axes = _graph.operands().at(axes).template asVector<int>();
param.keep_dims = op->builtin_options_as_ReducerOptions()->keep_dims();
- std::unique_ptr<model::Operation> new_op(new model::operation::Mean(inputs, outputs, param));
+ std::unique_ptr<model::Operation> new_op(new model::operation::Mean({input}, outputs, param));
_graph.addOperation(std::move(new_op));
}
model::OperandIndexSequence inputs;
model::OperandIndexSequence outputs;
- const auto input_index = (*op->inputs())[0];
- inputs.append(model::OperandIndex(input_index));
- const auto output_index = (*op->outputs())[0];
- outputs.append(model::OperandIndex(output_index));
+ loadOperationIO(op, inputs, outputs);
+ auto input = inputs.at(0);
+ auto axes = inputs.at(1);
+
+ // FIXME Handle ReducerOptions.
+ if (!_graph.operands().at(axes).isConstant())
+ throw std::runtime_error("ReduceSum: non-constant 'axes' is not supported.");
model::operation::ReduceMax::Param param;
- param.axis_index = model::OperandIndex((*op->inputs())[1]);
+ param.axes = _graph.operands().at(axes).template asVector<int>();
- std::unique_ptr<model::Operation> new_op(new model::operation::ReduceMax(inputs, outputs, param));
+ std::unique_ptr<model::Operation> new_op(
+ new model::operation::ReduceMax({input}, outputs, param));
_graph.addOperation(std::move(new_op));
}
template <typename LoaderDomain, typename SpecificLoader>
void BaseLoader<LoaderDomain, SpecificLoader>::loadReduceSum(const Operator *op)
{
- model::OperandIndexSequence inputs{(*op->inputs())[0]};
- model::OperandIndexSequence outputs{(*op->outputs())[0]};
+ model::OperandIndexSequence inputs;
+ model::OperandIndexSequence outputs;
+
+ loadOperationIO(op, inputs, outputs);
+ auto input = inputs.at(0);
+ auto axes = inputs.at(1);
+
+ // FIXME Handle ReducerOptions.
+ if (!_graph.operands().at(axes).isConstant())
+ throw std::runtime_error("ReduceSum: non-constant 'axes' is not supported.");
model::operation::ReduceSum::Param param;
- param.axis_index = model::OperandIndex{static_cast<uint32_t>((*op->inputs())[1])};
+ param.axes = _graph.operands().at(axes).template asVector<int>();
- std::unique_ptr<model::Operation> new_op{new model::operation::ReduceSum{inputs, outputs, param}};
+ std::unique_ptr<model::Operation> new_op{
+ new model::operation::ReduceSum{{input}, outputs, param}};
_graph.addOperation(std::move(new_op));
}
};
_map[ANEURALNETWORKS_REDUCE_SUM_EX] = [](const OperationFactory::Param &init_param,
- neurun::model::Operands &) {
+ neurun::model::Operands &operands) {
assert(init_param.input_count == 2);
assert(init_param.output_count == 1);
OperandIndexSequence inputs{init_param.inputs[0]};
OperandIndexSequence outputs{init_param.outputs[0]};
+ std::vector<std::int32_t> axes =
+ operands.at(OperandIndex{init_param.inputs[1]}).asVector<std::int32_t>();
operation::ReduceSum::Param param;
-
- param.axis_index = OperandIndex{init_param.inputs[1]};
+ param.axes.assign(axes.cbegin(), axes.cend());
return new operation::ReduceSum{inputs, outputs, param};
};
};
_map[ANEURALNETWORKS_REDUCE_MAX_EX] = [](const OperationFactory::Param &init_param,
- neurun::model::Operands &) {
+ neurun::model::Operands &operands) {
assert(init_param.input_count == 2 && init_param.output_count == 1);
OperandIndexSequence outputs{init_param.outputs[0]};
// 0 -> Input Tensor Index
// 1 -> Axis Tensor Index
OperandIndexSequence inputs{init_param.inputs[0]};
+ std::vector<std::int32_t> axes =
+ operands.at(OperandIndex{init_param.inputs[1]}).asVector<std::int32_t>();
operation::ReduceMax::Param param;
- param.axis_index = OperandIndex{init_param.inputs[1]};
+ param.axes.assign(axes.cbegin(), axes.cend());
return new operation::ReduceMax{inputs, outputs, param};
};
// 1 -> axis Tensor Index
// 2 -> keep_dims Index
OperandIndexSequence inputs{init_param.inputs[0]};
+ std::vector<std::int32_t> axes =
+ operands.at(OperandIndex{init_param.inputs[1]}).asVector<std::int32_t>();
operation::Mean::Param param;
- param.axis_index = OperandIndex{init_param.inputs[1]};
+ param.axes.assign(axes.cbegin(), axes.cend());
param.keep_dims = operands.at(OperandIndex{init_param.inputs[2]}).asScalar<int32_t>() != 0;
return new operation::Mean{inputs, outputs, param};
};
_map[ANEURALNETWORKS_REDUCE_MIN_EX] = [](const OperationFactory::Param &init_param,
- neurun::model::Operands &) {
+ neurun::model::Operands &operands) {
assert(init_param.input_count == 2 && init_param.output_count == 1);
OperandIndexSequence outputs{init_param.outputs[0]};
// 0 -> Input Tensor Index
// 1 -> Axis Tensor Index
OperandIndexSequence inputs{init_param.inputs[0]};
+ std::vector<std::int32_t> axes =
+ operands.at(OperandIndex{init_param.inputs[1]}).asVector<std::int32_t>();
operation::ReduceMin::Param param;
- param.axis_index = OperandIndex{init_param.inputs[1]};
+ param.axes.assign(axes.cbegin(), axes.cend());
return new operation::ReduceMin{inputs, outputs, param};
};