Replace `OperandIndex` in `ArgMax::Param` with `vector<int>`.
Signed-off-by: Sergei Barannikov <s.barannikov@samsung.com>
{
const auto ofm_idx{node.getOutputs().at(0)};
const auto ifm_idx{node.getInputs().at(model::operation::Transpose::Input::INPUT)};
- const auto perm{node.param().perm};
+ const auto &perm{node.param().perm};
const auto rank = _ctx.at(ifm_idx).shape().rank();
- std::vector<int32_t> pv;
- const auto perm_base = _ctx.at(perm).data().base();
- const int perm_size = _ctx.at(perm).shape().num_elements();
-
- assert(perm_base != nullptr);
- for (int32_t n = 0; n < perm_size; ++n)
- {
- int32_t perm_value = *(reinterpret_cast<const int32_t *>(perm_base) + n);
- assert(perm_value < rank);
- pv.emplace_back(perm_value);
- }
auto ofm_alloc = _tensor_builder->at(ofm_idx).get();
auto ifm_alloc = _tensor_builder->at(ifm_idx).get();
const auto frontend_layout = _current_subg_layout;
const auto backend_layout = ifm_alloc->layout();
+
+ std::vector<std::int32_t> pv(perm.cbegin(), perm.cend());
// Reversed
auto backend_pv = ::neurun::backend::acl_common::getARMComputePermutationVector(
rank, pv, frontend_layout, backend_layout);
{
const auto ofm_idx{node.getOutputs().at(0)};
const auto ifm_idx{node.getInputs().at(model::operation::Transpose::Input::INPUT)};
- const auto perm{node.param().perm};
-
- const auto rank = _ctx.at(ifm_idx).shape().rank();
- std::vector<int32_t> pv;
- const auto perm_base = _ctx.at(perm).data().base();
- const int perm_size = _ctx.at(perm).shape().num_elements();
-
- assert(perm_base != nullptr);
- for (int32_t n = 0; n < perm_size; ++n)
- {
- const int32_t perm_value = *(reinterpret_cast<const int32_t *>(perm_base) + n);
- assert(perm_value < rank);
- pv.emplace_back(perm_value);
- }
+ const auto &perm{node.param().perm};
auto ofm_alloc = _tensor_builder->at(ofm_idx).get();
const auto ifm_alloc = _tensor_builder->at(ifm_idx).get();
const auto frontend_layout = _current_subg_layout;
const auto backend_layout = ifm_alloc->layout();
+ const auto rank = _ctx.at(ifm_idx).shape().rank();
+ std::vector<std::int32_t> pv(perm.cbegin(), perm.cend());
auto backend_pv = ::neurun::backend::acl_common::getARMComputePermutationVector(
rank, pv, frontend_layout, backend_layout);
struct Param
{
- // permutation vector is optional.
- // if permutation vector is provided, set perm.first to true
- // if permutation vector is NOT provided, set perm.first to false
- OperandIndex perm;
+ std::vector<int> perm;
};
public:
{
const auto output_index{node.getOutputs().at(0)};
const auto input_index{node.getInputs().at(model::operation::Transpose::Input::INPUT)};
- const auto perm_idx{node.param().perm};
+ const auto &perm{node.param().perm};
const auto &output_shape = _ctx.at(output_index).shape();
const auto &input_shape = _ctx.at(input_index).shape();
- const auto &perm_shape = _ctx.at(perm_idx).shape();
UNUSED_RELEASE(output_shape);
UNUSED_RELEASE(input_shape);
- UNUSED_RELEASE(perm_shape);
+ UNUSED_RELEASE(perm);
- assert(perm_shape.rank() == 1);
- assert(input_shape.rank() == perm_shape.dim(0));
+ assert(input_shape.rank() == static_cast<int>(perm.size()));
assert(input_shape.rank() == output_shape.rank());
}
model::OperandIndexSequence inputs;
model::OperandIndexSequence outputs;
- const auto input_index = (*op->inputs())[0];
- inputs.append(model::OperandIndex(input_index));
- const auto output_index = (*op->outputs())[0];
- outputs.append(model::OperandIndex(output_index));
+ loadOperationIO(op, inputs, outputs);
+ auto input = inputs.at(0);
+ auto perm = inputs.at(1);
+
+ if (!_graph.operands().at(perm).isConstant())
+ throw std::runtime_error("Transpose: non-constant 'perm' is not supported.");
model::operation::Transpose::Param param;
- if (op->inputs()->size() == 2)
- {
- const auto perm_index = (*op->inputs())[1];
- param.perm = model::OperandIndex(perm_index);
- }
+ param.perm = _graph.operands().at(perm).template asVector<int>();
- std::unique_ptr<model::Operation> new_op(new model::operation::Transpose(inputs, outputs, param));
+ std::unique_ptr<model::Operation> new_op(
+ new model::operation::Transpose({input}, outputs, param));
_graph.addOperation(std::move(new_op));
}
};
_map[ANEURALNETWORKS_TRANSPOSE] = [](const OperationFactory::Param &init_param,
- neurun::model::Operands &) {
+ neurun::model::Operands &operands) {
// TODO make this work with init_param.input_count == 1 (when permutation vector is optional)
// Inputs
OperandIndexSequence inputs{init_param.inputs[0]};
OperandIndexSequence outputs{init_param.outputs[0]};
+ std::vector<std::int32_t> perm =
+ operands.at(OperandIndex{init_param.inputs[1]}).asVector<std::int32_t>();
operation::Transpose::Param param;
- param.perm = OperandIndex{init_param.inputs[1]};
+ param.perm.assign(perm.cbegin(), perm.cend());
return new operation::Transpose{inputs, outputs, param};
};