* Make `starts`, `ends` and `strides` inputs to the operation.
* Make `begin_axis_mask`, `end_axis_mask`, `shrink_axis_mask` static attributes of the operation.
Signed-off-by: Sergei Barannikov <s.barannikov@samsung.com>
{
const auto output_index{node.getOutputs().at(0)};
const auto input_index{node.getInputs().at(model::operation::StridedSlice::Input::INPUT)};
- const auto startData_index{node.param().startData_index};
- const auto endData_index{node.param().endData_index};
- const auto stridesData_index{node.param().stridesData_index};
- const auto beginMask_index{node.param().beginMask_index};
- const auto endMask_index{node.param().endMask_index};
- const auto shrinkAxisMask_index{node.param().shrinkAxisMask_index};
+ const auto starts_index{node.getInputs().at(model::operation::StridedSlice::Input::STARTS)};
+ const auto ends_index{node.getInputs().at(model::operation::StridedSlice::Input::ENDS)};
+ const auto strides_index{node.getInputs().at(model::operation::StridedSlice::Input::STRIDES)};
auto outputData_alloc = _tensor_builder->at(output_index).get();
auto inputData_alloc = _tensor_builder->at(input_index).get();
ends.resize(input_rank, 0);
strides.resize(input_rank, 0);
{
- auto input_shape = _ctx.at(input_index).shape();
- auto startData_base = _ctx.at(startData_index).data().base();
- auto endData_base = _ctx.at(endData_index).data().base();
- auto stridesData_base = _ctx.at(stridesData_index).data().base();
- const int startData_size = _ctx.at(startData_index).shape().num_elements();
- const int endData_size = _ctx.at(endData_index).shape().num_elements();
- const int stridesData_size = _ctx.at(stridesData_index).shape().num_elements();
+ auto startData_base = _ctx.at(starts_index).data().base();
+ auto endData_base = _ctx.at(ends_index).data().base();
+ auto stridesData_base = _ctx.at(strides_index).data().base();
+ const int startData_size = _ctx.at(starts_index).shape().num_elements();
+ const int endData_size = _ctx.at(ends_index).shape().num_elements();
+ const int stridesData_size = _ctx.at(strides_index).shape().num_elements();
using neurun::model::DataType;
UNUSED_RELEASE(endData_size);
UNUSED_RELEASE(stridesData_size);
- assert(_ctx.at(startData_index).typeInfo().type() == DataType::INT32);
- assert(_ctx.at(endData_index).typeInfo().type() == DataType::INT32);
- assert(_ctx.at(stridesData_index).typeInfo().type() == DataType::INT32);
+ assert(_ctx.at(starts_index).typeInfo().type() == DataType::INT32);
+ assert(_ctx.at(ends_index).typeInfo().type() == DataType::INT32);
+ assert(_ctx.at(strides_index).typeInfo().type() == DataType::INT32);
assert(startData_size == input_rank);
assert(endData_size == input_rank);
assert(stridesData_size == input_rank);
}
// Set mask bits such as order of inputData
- const auto beginMask = ::neurun::backend::acl_common::ReorderBits<int32_t>(
- _ctx.at(beginMask_index).asScalar<int32_t>(), input_rank, frontend_layout, backend_layout);
- const auto endMask = ::neurun::backend::acl_common::ReorderBits<int32_t>(
- _ctx.at(endMask_index).asScalar<int32_t>(), input_rank, frontend_layout, backend_layout);
- const auto shrinkAxisMask = ::neurun::backend::acl_common::ReorderBits<int32_t>(
- _ctx.at(shrinkAxisMask_index).asScalar<int32_t>(), input_rank, frontend_layout,
- backend_layout);
+ const auto begin_mask = acl_common::ReorderBits<int32_t>(node.param().begin_mask, input_rank,
+ frontend_layout, backend_layout);
+ const auto end_mask = acl_common::ReorderBits<int32_t>(node.param().end_mask, input_rank,
+ frontend_layout, backend_layout);
+ const auto shrink_axis_mask = acl_common::ReorderBits<int32_t>(
+ node.param().shrink_axis_mask, input_rank, frontend_layout, backend_layout);
::arm_compute::Coordinates starts_set;
::arm_compute::Coordinates ends_set;
auto fn = nnfw::cpp14::make_unique<::arm_compute::CLStridedSlice>();
fn->configure(inputData_alloc->handle(), outputData_alloc->handle(), starts_set, ends_set,
- strides_set, beginMask, endMask, shrinkAxisMask);
+ strides_set, begin_mask, end_mask, shrink_axis_mask);
auto acl_fn = asAclFunction(std::move(fn));
{
const auto output_index{node.getOutputs().at(0)};
const auto input_index{node.getInputs().at(model::operation::StridedSlice::Input::INPUT)};
- const auto startData_index{node.param().startData_index};
- const auto endData_index{node.param().endData_index};
- const auto stridesData_index{node.param().stridesData_index};
- const auto beginMask_index{node.param().beginMask_index};
- const auto endMask_index{node.param().endMask_index};
- const auto shrinkAxisMask_index{node.param().shrinkAxisMask_index};
+ const auto starts_index{node.getInputs().at(model::operation::StridedSlice::Input::STARTS)};
+ const auto ends_index{node.getInputs().at(model::operation::StridedSlice::Input::ENDS)};
+ const auto strides_index{node.getInputs().at(model::operation::StridedSlice::Input::STRIDES)};
// Set initializers for indices data such as order of inputData
int input_rank = _ctx.at(input_index).shape().rank();
ends.resize(input_rank, 0);
strides.resize(input_rank, 0);
{
- auto input_shape = _ctx.at(input_index).shape();
- auto startData_base = _ctx.at(startData_index).data().base();
- auto endData_base = _ctx.at(endData_index).data().base();
- auto stridesData_base = _ctx.at(stridesData_index).data().base();
- const int startData_size = _ctx.at(startData_index).shape().num_elements();
- const int endData_size = _ctx.at(endData_index).shape().num_elements();
- const int stridesData_size = _ctx.at(stridesData_index).shape().num_elements();
+ auto startData_base = _ctx.at(starts_index).data().base();
+ auto endData_base = _ctx.at(ends_index).data().base();
+ auto stridesData_base = _ctx.at(strides_index).data().base();
+ const int startData_size = _ctx.at(starts_index).shape().num_elements();
+ const int endData_size = _ctx.at(ends_index).shape().num_elements();
+ const int stridesData_size = _ctx.at(strides_index).shape().num_elements();
using neurun::model::DataType;
UNUSED_RELEASE(endData_size);
UNUSED_RELEASE(stridesData_size);
- assert(_ctx.at(startData_index).typeInfo().type() == DataType::INT32);
- assert(_ctx.at(endData_index).typeInfo().type() == DataType::INT32);
- assert(_ctx.at(stridesData_index).typeInfo().type() == DataType::INT32);
+ assert(_ctx.at(starts_index).typeInfo().type() == DataType::INT32);
+ assert(_ctx.at(ends_index).typeInfo().type() == DataType::INT32);
+ assert(_ctx.at(strides_index).typeInfo().type() == DataType::INT32);
assert(startData_size == input_rank);
assert(endData_size == input_rank);
assert(stridesData_size == input_rank);
assert(startData_base != nullptr);
for (int n = 0; n < input_rank; ++n)
{
+ // FIXME Take the layouts into account.
auto axis = ::neurun::backend::acl_common::ToARMComputeAxis(input_rank, n).value();
int32_t start_value = *(reinterpret_cast<const int32_t *>(startData_base) + n);
}
// Set mask bits such as order of inputData
- const auto beginMask = ::neurun::backend::acl_common::ReorderBits<int32_t>(
- _ctx.at(beginMask_index).asScalar<int32_t>(), input_rank);
- const auto endMask = ::neurun::backend::acl_common::ReorderBits<int32_t>(
- _ctx.at(endMask_index).asScalar<int32_t>(), input_rank);
- const auto shrinkAxisMask = ::neurun::backend::acl_common::ReorderBits<int32_t>(
- _ctx.at(shrinkAxisMask_index).asScalar<int32_t>(), input_rank);
+ // FIXME Take the layouts into account.
+ const auto begin_mask = acl_common::ReorderBits<int32_t>(node.param().begin_mask, input_rank);
+ const auto end_mask = acl_common::ReorderBits<int32_t>(node.param().end_mask, input_rank);
+ const auto shrink_axis_mask =
+ acl_common::ReorderBits<int32_t>(node.param().shrink_axis_mask, input_rank);
auto outputData_alloc = _tensor_builder->at(output_index).get();
auto inputData_alloc = _tensor_builder->at(input_index).get();
auto fn = nnfw::cpp14::make_unique<::arm_compute::NEStridedSlice>();
fn->configure(inputData_alloc->handle(), outputData_alloc->handle(), starts_set, ends_set,
- strides_set, beginMask, endMask, shrinkAxisMask);
+ strides_set, begin_mask, end_mask, shrink_axis_mask);
auto acl_fn = asAclFunction(std::move(fn));
public:
enum Input
{
- INPUT = 0
+ INPUT = 0,
+ STARTS = 1,
+ ENDS = 2,
+ STRIDES = 3
};
struct Param
{
- OperandIndex startData_index; //!< index where slicing start from
- OperandIndex endData_index; //!< index where slicing ends to
- OperandIndex stridesData_index; //!< index for stride value
- OperandIndex beginMask_index; //!< index for beginmask
- OperandIndex endMask_index; //!< index for endmask
- OperandIndex shrinkAxisMask_index; //!< index for shrink axis
+ std::int32_t begin_mask;
+ std::int32_t end_mask;
+ std::int32_t shrink_axis_mask;
};
public:
StridedSlice::StridedSlice(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs,
const Param ¶m)
- : model::Operation{OperandConstraint::createExact(1u), inputs, outputs}, _param{param}
+ : model::Operation{OperandConstraint::createExact(3u), inputs, outputs}, _param{param}
{
}
};
_map[ANEURALNETWORKS_STRIDED_SLICE] = [](const OperationFactory::Param &init_param,
- neurun::model::Operands &) {
+ neurun::model::Operands &operands) {
assert(init_param.input_count == 7 && init_param.output_count == 1);
- OperandIndexSequence inputs{init_param.inputs[0]};
+ OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1], init_param.inputs[2],
+ init_param.inputs[3]};
OperandIndexSequence outputs{init_param.outputs[0]};
// Each input should be interpreted as follows:
operation::StridedSlice::Param param;
- param.startData_index = OperandIndex{init_param.inputs[1]};
- param.endData_index = OperandIndex{init_param.inputs[2]};
- param.stridesData_index = OperandIndex{init_param.inputs[3]};
- param.beginMask_index = OperandIndex{init_param.inputs[4]};
- param.endMask_index = OperandIndex{init_param.inputs[5]};
- param.shrinkAxisMask_index = OperandIndex{init_param.inputs[6]};
+ param.begin_mask = operands.at(OperandIndex{init_param.inputs[4]}).asScalar<std::int32_t>();
+ param.end_mask = operands.at(OperandIndex{init_param.inputs[5]}).asScalar<std::int32_t>();
+ param.shrink_axis_mask =
+ operands.at(OperandIndex{init_param.inputs[6]}).asScalar<std::int32_t>();
return new operation::StridedSlice{inputs, outputs, param};
};