* @param[in] layout Tensor data layout
*/
void registerTensorInfo(const ir::OperandIndex &ind, const ir::OperandInfo &info,
- ir::Layout backend_layout, bool as_const) override;
+ ir::Layout backend_layout) override;
void notifyFirstUse(const ir::OperandIndex &) override;
void notifyLastUse(const ir::OperandIndex &) override;
bool isRegistered(const ir::OperandIndex &) const override;
+ std::shared_ptr<backend::ITensorRegistry> tensorRegistry() override { return nullptr; }
void prepare(void) override;
void allocate() override;
std::shared_ptr<ITensor> tensorAt(const ir::OperandIndex &ind) override;
void iterate(const IterateFunction &fn) override;
- std::unique_ptr<ITensorManager> releaseTensorManager(void) override;
+ std::unique_ptr<ITensorManager> releaseStaticTensorManager(void) override;
std::shared_ptr<T_ITensor> at(const ir::OperandIndex &ind);
- void dimCorrection(const ir::OperandIndex &index, bool apply_dim_correction);
-
T_AclTensorManager *acl_tensor_manager(void) { return _tensor_mgr.get(); }
void setUsesCount(const ir::OperandIndex &index, size_t num_uses)
*/
bool isSubTensorOf(const ir::OperandIndex &parent, const ir::OperandIndex &child);
+ bool supportDynamicTensor() override { return false; }
+
private:
void buildTensors(void);
ir::OperandIndex findRootParent(ir::OperandIndex index);
private:
const ir::Operands &_operands;
ir::OperandIndexMap<ir::OperandInfo> _tensor_info_map;
- ir::OperandIndexMap<bool> _apply_dim_correction_map;
ir::OperandIndexMap<ir::Layout> _tensor_layout_map;
ir::OperandIndexMap<size_t> _uses_count_map;
std::unique_ptr<T_AclTensorManager> _tensor_mgr;
- ir::OperandIndexSequence _constants;
// for linear executor
std::vector<std::pair<UsesType, ir::OperandIndex>> _lifetime_seq;
template <typename T_ITensor, typename T_Tensor, typename T_SubTensor>
void AclTensorBuilder<T_ITensor, T_Tensor, T_SubTensor>::registerTensorInfo(
- const ir::OperandIndex &ind, const ir::OperandInfo &info, ir::Layout backend_layout,
- bool as_const)
+ const ir::OperandIndex &ind, const ir::OperandInfo &info, ir::Layout backend_layout)
{
assert(_tensor_mgr->constTensors().size() == 0);
assert(_tensor_mgr->nonconstTensors().size() == 0);
{
// Normal Tensors
_tensor_info_map.emplace(ind, info);
- _apply_dim_correction_map.emplace(ind, true);
_tensor_layout_map.insert({ind, backend_layout});
- if (as_const)
- _constants.append(ind);
}
else
{
// SubTensors
- assert(!as_const && "Subtensors of constants are not supported yet.");
+ assert(!info.isConstant() && "Subtensors of constants are not supported yet.");
// Update offset info and emplace
auto &parent_info = _parent_map[ind];
auto &offset = parent_info.coordinates;
auto frontend_layout = parent_info.frontend_layout;
- assert(obj.shape().rank() <= 4);
+ assert(obj.shape().rank() <= ir::Shape::MAX_RANK);
auto shape = obj.shape();
- if (_operands.at(parent_index).shape().rank() == 4 && frontend_layout == ir::Layout::NHWC &&
+ if (_operands.at(parent_index).shape().rank() >= 4 && frontend_layout == ir::Layout::NHWC &&
backend_layout == ir::Layout::NCHW)
{
- shape.extendRank(4);
+ // Permutation changing layout beyond 4-D is not supported yet
+ const auto parent_rank = _operands.at(parent_index).shape().rank();
+ assert(parent_rank == 4);
+ shape.extendRank(parent_rank);
offset = {offset[0], offset[3], offset[1], offset[2]};
}
- else if (_operands.at(parent_index).shape().rank() == 4 &&
+ else if (_operands.at(parent_index).shape().rank() >= 4 &&
frontend_layout == ir::Layout::NHWC && backend_layout == ir::Layout::NCHW)
{
- shape.extendRank(4);
+ // Permutation changing layout beyond 4-D is not supported yet
+ const auto parent_rank = _operands.at(parent_index).shape().rank();
+ assert(parent_rank == 4);
+ shape.extendRank(parent_rank);
offset = {offset[0], offset[2], offset[3], offset[1]};
}
auto new_shape = permuteShape(shape, frontend_layout, backend_layout);
- ir::OperandInfo oi{new_shape, obj.typeInfo()};
+ auto oi = ir::OperandInfo::createStaticInfo(new_shape, obj.typeInfo());
_tensor_info_map.emplace(ind, oi);
-
- _apply_dim_correction_map.emplace(ind, true);
}
}
_tensor_mgr->finishLifetime(use_index);
}
- assert(_constants.size() == _tensor_mgr->constTensors().size());
_tensor_mgr->allocateConsts();
// TODO Since `_parent_map` is filled for all Concat nodes even if the node this backend uses
// After refactoring BackendContext we can uncomment this
// assert(_tensor_info_map.size() ==
- // _tensor_mgr->nonconstTensors().size() + _constants.size() + _parent_map.size());
+ // _tensor_mgr->nonconstTensors().size() + num of constants of _tensor_info_map +
+ // _parent_map.size());
_tensor_mgr->allocateNonconsts();
_tensor_mgr->allocateInternalBufferManager();
}
template <typename T_ITensor, typename T_Tensor, typename T_SubTensor>
-void AclTensorBuilder<T_ITensor, T_Tensor, T_SubTensor>::dimCorrection(
- const ir::OperandIndex &index, bool apply_dim_correction)
-{
- _apply_dim_correction_map[index] = apply_dim_correction;
-}
-
-template <typename T_ITensor, typename T_Tensor, typename T_SubTensor>
std::unique_ptr<ITensorManager>
-AclTensorBuilder<T_ITensor, T_Tensor, T_SubTensor>::releaseTensorManager(void)
+AclTensorBuilder<T_ITensor, T_Tensor, T_SubTensor>::releaseStaticTensorManager(void)
{
return std::move(_tensor_mgr);
}
const auto &info = entry.second;
const auto &backend_layout = _tensor_layout_map[ind];
- auto tensor_info = asTensorInfo(info.shape(), info.typeInfo(), ir::Layout::UNKNOWN,
- backend_layout, _apply_dim_correction_map[ind]);
- _tensor_mgr->buildTensor(ind, tensor_info, info.shape().rank(), _constants.contains(ind),
+ auto tensor_info =
+ asTensorInfo(info.shape(), info.typeInfo(), ir::Layout::UNKNOWN, backend_layout, true);
+ _tensor_mgr->buildTensor(ind, tensor_info, info.shape().rank(), info.isConstant(),
_uses_count_map[ind]);
}
parent_tensor->info()->quantization_info().uniform().offset);
assert(tensor_info.typeInfo().scale() ==
parent_tensor->info()->quantization_info().uniform().scale);
- assert(asDataType(tensor_info.typeInfo().type()) == parent_tensor->info()->data_type());
+ assert(tensor_info.typeInfo().type() == parent_tensor->data_type());
// NOTE SubTensor's layout must be the same with layout of parent tensor
const auto &root_parent = findRootParent(parent);
const auto &backend_layout = _tensor_layout_map[root_parent];
- auto shape = asTensorShape(tensor_info.shape(), ir::Layout::UNKNOWN, backend_layout,
- _apply_dim_correction_map[current]);
+ auto shape = asTensorShape(tensor_info.shape(), ir::Layout::UNKNOWN, backend_layout, true);
::arm_compute::Coordinates coordinates =
asTensorCoordinate(parent_info.coordinates, ir::Layout::UNKNOWN, backend_layout);
_tensor_mgr->buildSubtensor(parent, current, shape, coordinates, tensor_info.shape().rank(),