}
template <typename T>
+static void initReorderVectorTensor(::arm_compute::ITensor &tensor, const uint8_t *vec_base,
+ const size_t vec_size)
+{
+ for (uint32_t n = 0; n < vec_size; ++n)
+ {
+ const ::arm_compute::Coordinates coordinate{ToARMComputeAxis(vec_size, n).value()};
+
+ T *into = reinterpret_cast<T *>(tensor.ptr_to_element(coordinate));
+
+ const T *from = reinterpret_cast<const T *>(vec_base) + n;
+ const auto value = *from;
+
+ *into = value;
+ }
+}
+
+template <typename T>
static void initKernelTensor(::arm_compute::ITensor &tensor,
const nnfw::util::kernel::Shape &kernel_shape,
const uint8_t *kernel_base, const size_t kernel_size)
const ::internal::tflite::operand::Index endMask_index{node.param().endMask_index};
const ::internal::tflite::operand::Index shrinkAxisMask_index{node.param().shrinkAxisMask_index};
- // TODO Should move to the place where the operand is handled, if it is possible.
// Set Shape Constraints
_builder.addShapeConstr(outputData_index, asTensorInfo(_ctx.at(outputData_index).shape(),
_ctx.at(outputData_index).type(),
_ctx.at(inputData_index).scale(),
_ctx.at(inputData_index).zeroPoint()));
- _builder.addShapeConstr(startData_index, asTensorInfo(_ctx.at(startData_index).shape().asVector(),
- _ctx.at(startData_index).type()));
- _builder.addShapeConstr(endData_index, asTensorInfo(_ctx.at(endData_index).shape().asVector(),
- _ctx.at(endData_index).type()));
+ const auto startData_size = _ctx.at(startData_index).shape().asVector();
+ const auto endData_size = _ctx.at(endData_index).shape().asVector();
+ const auto stridesData_size = _ctx.at(stridesData_index).shape().asVector();
+ _builder.addShapeConstr(startData_index,
+ asTensorInfo(startData_size, _ctx.at(startData_index).type()));
+ _builder.addShapeConstr(endData_index, asTensorInfo(endData_size, _ctx.at(endData_index).type()));
_builder.addShapeConstr(stridesData_index,
- asTensorInfo(_ctx.at(stridesData_index).shape().asVector(),
- _ctx.at(stridesData_index).type()));
+ asTensorInfo(stridesData_size, _ctx.at(stridesData_index).type()));
+
+ // Set initializers for indices data such as order of inputData
+ {
+ auto startData_base = _ctx.at(startData_index).data().base();
+ auto endData_base = _ctx.at(endData_index).data().base();
+ auto stridesData_base = _ctx.at(stridesData_index).data().base();
+
+ assert(_ctx.at(startData_index).type() == ANEURALNETWORKS_TENSOR_INT32);
+ auto startData_initializer =
+ std::bind(initReorderVectorTensor<int32_t>, _1, startData_base, startData_size);
+ _builder.addInitializer(startData_index, startData_initializer);
+
+ assert(_ctx.at(endData_index).type() == ANEURALNETWORKS_TENSOR_INT32);
+ auto endData_initializer =
+ std::bind(initReorderVectorTensor<int32_t>, _1, endData_base, endData_size);
+ _builder.addInitializer(endData_index, endData_initializer);
+
+ assert(_ctx.at(stridesData_index).type() == ANEURALNETWORKS_TENSOR_INT32);
+ auto stridesData_initializer =
+ std::bind(initReorderVectorTensor<int32_t>, _1, stridesData_base, stridesData_size);
+ _builder.addInitializer(stridesData_index, stridesData_initializer);
+ }
struct Param
{
param.endData_index = endData_index.asInt();
param.stridesData_index = stridesData_index.asInt();
- param.beginMask = _ctx.at(beginMask_index).asScalar<int32_t>();
- param.endMask = _ctx.at(endMask_index).asScalar<int32_t>();
- param.shrinkAxisMask = _ctx.at(shrinkAxisMask_index).asScalar<int32_t>();
+ // Set mask bits such as order of inputData
+ const auto inputData_rank = _ctx.at(inputData_index).shape().rank();
+ param.beginMask = _ctx.at(beginMask_index).asReorderBits<int32_t>(inputData_rank);
+ param.endMask = _ctx.at(endMask_index).asReorderBits<int32_t>(inputData_rank);
+ param.shrinkAxisMask = _ctx.at(shrinkAxisMask_index).asReorderBits<int32_t>(inputData_rank);
auto stage = [param](const IAllocationContext &ctx, IExecutionBuilder &builder) {
auto outputData_alloc = ctx.at(::internal::tflite::operand::Index{param.outputData_index});
auto inputData_alloc = ctx.at(::internal::tflite::operand::Index{param.inputData_index});
- // TODO: Let's alloc 1-D array for startData, endData and stridesData from operand
auto startData_alloc = ctx.at(::internal::tflite::operand::Index{param.startData_index});
auto endData_alloc = ctx.at(::internal::tflite::operand::Index{param.endData_index});
auto stridesData_alloc = ctx.at(::internal::tflite::operand::Index{param.stridesData_index});