/**
* @brief Register tensor information to allocate on ACL-CL backend
- * @param[in] ind Operand index
- * @param[in] info Tensor information
+ * @param[in] ind Operand index
+ * @param[in] info Tensor information
+ * @param[in] layout Tensor data layout
*/
- void registerTensorInfo(const model::OperandIndex &ind, const model::OperandInfo &info) override;
+ void registerTensorInfo(const model::OperandIndex &ind, const model::OperandInfo &info,
+ const graph::operand::Layout &layout) override;
/**
* @brief Register subtensor information to allocate on ACL-CL backend
* @param[in] ind Operand index
model::OperandIndexMap<std::shared_ptr<T_Tensor>> _tensors;
model::OperandIndexMap<std::shared_ptr<T_SubTensor>> _subtensors;
model::OperandIndexMap<std::shared_ptr<T_Object>> _objects;
+ graph::operand::Layout _layout;
};
} // namespace acl_common
template <typename T_ITensor, typename T_Tensor, typename T_SubTensor, typename T_Object>
void TemplTensorBuilder<T_ITensor, T_Tensor, T_SubTensor, T_Object>::registerTensorInfo(
- const model::OperandIndex &ind, const model::OperandInfo &info)
+ const model::OperandIndex &ind, const model::OperandInfo &info,
+ const graph::operand::Layout &layout)
{
assert(_tensors.size() == 0);
_tensor_info_map.insert({ind, info});
_apply_dim_correction_map.insert({ind, true});
+ _layout = layout;
}
template <typename T_ITensor, typename T_Tensor, typename T_SubTensor, typename T_Object>
// Allocated subtensor will be mapped to _subtensors instead of _tensors
assert(_subtensors.size() == 0);
- const std::string layout_str =
- config::ConfigManager::instance().get<std::string>(config::ACL_DEFAULT_LAYOUT);
- ::neurun::graph::operand::Layout default_layout;
- if (layout_str == "NHWC")
- {
- default_layout = ::neurun::graph::operand::Layout::NHWC;
- }
- else if (layout_str == "NCHW")
- {
- default_layout = ::neurun::graph::operand::Layout::NCHW;
- }
- else
- {
- throw std::runtime_error("Invalid ACL_DEFAULT_LAYOUT settings");
- }
-
for (auto &entry : _tensor_info_map)
{
auto ind = entry.first;
const auto &info = entry.second;
- // The default data_layout of tensors depends on data_layout of front-end
- // TODO Change to set data_layout for each front-end
auto tensor_info =
- asTensorInfo(info.shape(), info.typeInfo(), default_layout, _apply_dim_correction_map[ind]);
+ asTensorInfo(info.shape(), info.typeInfo(), _layout, _apply_dim_correction_map[ind]);
auto tensor = std::make_shared<T_Tensor>(tensor_info);
_tensors[ind] = tensor;
}
assert(info.type().scale() == parent_tensor->info()->quantization_info().scale);
assert(asDataType(info.type().type()) == parent_tensor->info()->data_type());
// TODO Change to set data_layout for each front-end
- auto shape = asTensorShape(info.shape(), default_layout, _apply_dim_correction_map[current]);
+ auto shape = asTensorShape(info.shape(), _layout, _apply_dim_correction_map[current]);
// Only support axis: 3 (channel)
::arm_compute::Coordinates coordinates;
/**
* @brief Register tensor information to allocate on CPU backend
- * @param[in] ind Operand index
- * @param[in] info Operand information
+ * @param[in] ind Operand index
+ * @param[in] info Operand information
+ * @param[in] layout Operand data layout
*/
- void registerTensorInfo(const model::OperandIndex &ind, const model::OperandInfo &info) override;
+ void registerTensorInfo(const model::OperandIndex &ind, const model::OperandInfo &info,
+ const graph::operand::Layout &layout) override;
/**
* @brief Register subtensor information to allocate on CPU backend
* @param[in] ind Operand index
{
auto tensor_builder = backend->tensor_builder();
const auto info = obj.info();
- tensor_builder->registerTensorInfo(ind, info);
+ const auto layout = lower_info->layout();
+ tensor_builder->registerTensorInfo(ind, info, layout);
tensor_builder->notifyFirstUse(ind);
tensor_builders.insert(tensor_builder);
}
{
auto tensor_builder = backend->tensor_builder();
const auto info = obj.info();
- tensor_builder->registerTensorInfo(ind, info);
+ const auto layout = lower_info->layout();
+ tensor_builder->registerTensorInfo(ind, info, layout);
tensor_builder->notifyFirstUse(ind);
tensor_builders.insert(tensor_builder);
}