This commit makes converting functions support NCHW.
Signed-off-by: jiseob.jang <jiseob.jang@samsung.com>
auto fn = nnfw::cpp14::make_unique<arm_compute::CLFullyConnectedReshapingLayer>(
_tensor_builder->acl_memory_manager()->internal_buffer_manager());
- fn->configure(input_alloc->handle(), weight_alloc->handle(), bias_alloc->handle(),
- output_alloc->handle(), needs_reshape,
- ::neurun::backend::acl_common::asTensorShape(
- reshape, ::neurun::backend::acl_common::asRuntimeLayout(acl_layout)));
+ fn->configure(
+ input_alloc->handle(), weight_alloc->handle(), bias_alloc->handle(), output_alloc->handle(),
+ needs_reshape,
+ ::neurun::backend::acl_common::asTensorShape(/* TODO Support NCHW frontend */
+ reshape, model::Layout::NHWC,
+ ::neurun::backend::acl_common::asRuntimeLayout(
+ acl_layout)));
auto acl_fn = asAclFunction(std::move(fn));
namespace acl_common
{
-// TODO Support front-end as NCHW for this function
::arm_compute::TensorShape asTensorShape(const ::neurun::model::Shape &shape,
- ::neurun::model::Layout layout, bool apply_dim_correction)
+ ::neurun::model::Layout frontend_layout,
+ ::neurun::model::Layout backend_layout,
+ bool apply_dim_correction)
{
const uint32_t rank = shape.rank();
// However, if the dimension correction is applied to input_to_input_weights with input_size
// equal to 1, it will be changed to 1-D.
// So input_to_input_weights is not used by the weight of FullyConnected.
- if (rank == 4 && layout == ::neurun::model::Layout::NCHW)
- {
- // NHWC -> NCHW
- int32_t permutation[4] = {0, 2, 3, 1};
- res.set(ToARMComputeAxis(rank, permutation[axis]).value(), shape.dim(axis),
- apply_dim_correction);
- }
- else
- {
- res.set(ToARMComputeAxis(rank, axis).value(), shape.dim(axis), apply_dim_correction);
- }
+ res.set(ToARMComputeAxis(rank, axis, frontend_layout, backend_layout).value(), shape.dim(axis),
+ apply_dim_correction);
}
return res;
}
::arm_compute::Coordinates asTensorCoordinate(const ::neurun::util::Coordinates &coord,
- ::neurun::model::Layout layout)
+ ::neurun::model::Layout frontend_layout,
+ ::neurun::model::Layout backend_layout)
{
const uint32_t rank = coord.size();
for (uint32_t axis = 0; axis < rank; ++axis)
{
- if (rank == 4 && layout == ::neurun::model::Layout::NCHW)
- {
- // NHWC -> NCHW
- int32_t permutation[4] = {0, 2, 3, 1};
- res.set(ToARMComputeAxis(rank, permutation[axis]).value(), coord[axis]);
- }
- else
- {
- res.set(ToARMComputeAxis(rank, axis).value(), coord[axis]);
- }
+ res.set(ToARMComputeAxis(rank, axis, frontend_layout, backend_layout).value(), coord[axis]);
}
return res;
return ::arm_compute::QuantizationInfo(scale, offset);
}
-// TODO Support front-end as NCHW for this function
::arm_compute::TensorInfo asTensorInfo(const ::neurun::model::Shape &shape,
const ::neurun::model::TypeInfo &typeInfo,
- ::neurun::model::Layout layout, bool apply_dim_correction)
+ ::neurun::model::Layout frontend_layout,
+ ::neurun::model::Layout backend_layout,
+ bool apply_dim_correction)
{
- ::arm_compute::TensorInfo info(asTensorShape(shape, layout, apply_dim_correction), 1,
- asDataType(typeInfo.type()),
- asQuantizationInfo(typeInfo.scale(), typeInfo.offset()));
- info.set_data_layout(asDataLayout(layout));
+ ::arm_compute::TensorInfo info(
+ asTensorShape(shape, frontend_layout, backend_layout, apply_dim_correction), 1,
+ asDataType(typeInfo.type()), asQuantizationInfo(typeInfo.scale(), typeInfo.offset()));
+ info.set_data_layout(asDataLayout(backend_layout));
return info;
}
{
::arm_compute::TensorShape asTensorShape(const ::neurun::model::Shape &shape,
- ::neurun::model::Layout layout,
+ ::neurun::model::Layout frontend_layout,
+ ::neurun::model::Layout backend_layout,
bool apply_dim_correction = true);
::arm_compute::Coordinates asTensorCoordinate(const ::neurun::util::Coordinates &coord,
- ::neurun::model::Layout layout);
+ ::neurun::model::Layout frontend_layout,
+ ::neurun::model::Layout backend_layout);
::arm_compute::DataType asDataType(const ::neurun::model::DataType &type);
::arm_compute::TensorInfo asTensorInfo(const ::neurun::model::Shape &shape,
const ::neurun::model::TypeInfo &typeInfo,
- ::neurun::model::Layout layout,
+ ::neurun::model::Layout frontend_layout,
+ ::neurun::model::Layout backend_layout,
bool apply_dim_correction = true);
::arm_compute::PadStrideInfo asPadStrideInfo(const model::ExplicitPadding &padding,
{
auto ind = entry.first;
const auto &info = entry.second;
- auto tensor_info =
- asTensorInfo(info.shape(), info.typeInfo(), _layout, _apply_dim_correction_map[ind]);
+ // TODO Support NCHW frontend
+ auto tensor_info = asTensorInfo(info.shape(), info.typeInfo(), model::Layout::NHWC, _layout,
+ _apply_dim_correction_map[ind]);
_mem_mgr->buildTensor(ind, tensor_info);
}
}
assert(info.type().scale() == parent_tensor->info()->quantization_info().scale);
assert(asDataType(info.type().type()) == parent_tensor->info()->data_type());
// TODO Change to set data_layout for each front-end
- auto shape = asTensorShape(info.shape(), _layout, _apply_dim_correction_map[current]);
- ::arm_compute::Coordinates coordinates = asTensorCoordinate(info.offset(), _layout);
+ auto shape = asTensorShape(info.shape(), model::Layout::NHWC, _layout,
+ _apply_dim_correction_map[current]);
+ ::arm_compute::Coordinates coordinates =
+ asTensorCoordinate(info.offset(), model::Layout::NHWC, _layout);
auto tensor = std::make_shared<T_SubTensor>(parent_tensor.get(), shape, coordinates, true);
subtensors[current] = tensor;
stack.pop();
auto fn = nnfw::cpp14::make_unique<arm_compute::NEFullyConnectedReshapingLayer>(
_tensor_builder->acl_memory_manager()->internal_buffer_manager());
- fn->configure(input_alloc->handle(), weight_alloc->handle(), bias_alloc->handle(),
- output_alloc->handle(), needs_reshape,
- ::neurun::backend::acl_common::asTensorShape(
- reshape, ::neurun::backend::acl_common::asRuntimeLayout(acl_layout)));
+ fn->configure(
+ input_alloc->handle(), weight_alloc->handle(), bias_alloc->handle(), output_alloc->handle(),
+ needs_reshape,
+ ::neurun::backend::acl_common::asTensorShape(/* TODO Support NCHW frontend */
+ reshape, model::Layout::NHWC,
+ ::neurun::backend::acl_common::asRuntimeLayout(
+ acl_layout)));
auto acl_fn = asAclFunction(std::move(fn));