This commit removes backend layout.
- Remove getting layout from backend
- Introduce layout into LowerInfo
Signed-off-by: jiseob.jang <jiseob.jang@samsung.com>
public:
std::string id() override { return "acl_cl"; }
void initialize() override;
- graph::operand::Layout getOperandLayout() override
- {
- // TODO Remove this method
- const std::string layout_str =
- config::ConfigManager::instance().get<std::string>(config::ACL_DEFAULT_LAYOUT);
- if (layout_str == "NHWC")
- {
- return graph::operand::Layout::NHWC;
- }
- else if (layout_str == "NCHW")
- {
- return graph::operand::Layout::NCHW;
- }
- else
- {
- throw std::runtime_error("Invalid ACL_DEFAULT_LAYOUT settings");
- }
- }
-
bool SupportSubTensorAlloc() override { return true; }
};
public:
std::string id() override { return "acl_neon"; }
void initialize() override;
- graph::operand::Layout getOperandLayout() override
- {
- // TODO Remove this method
- const std::string layout_str =
- config::ConfigManager::instance().get<std::string>(config::ACL_DEFAULT_LAYOUT);
- if (layout_str == "NHWC")
- {
- return ::neurun::graph::operand::Layout::NHWC;
- }
- else if (layout_str == "NCHW")
- {
- return ::neurun::graph::operand::Layout::NCHW;
- }
- else
- {
- throw std::runtime_error("Invalid ACL_DEFAULT_LAYOUT settings");
- }
- }
-
bool SupportSubTensorAlloc() override { return true; }
};
public:
std::string id() override { return "cpu"; }
void initialize() override;
- graph::operand::Layout getOperandLayout() override { return graph::operand::Layout::NHWC; }
bool SupportSubTensorAlloc() override
{
// NOTE CPU allocator cannot support subtensor allocation yet
const auto output_index{node.getOutputs().at(0)};
const auto input_index{node.getInputs().at(0)};
- using PermuteType = model::operation::PermuteNode::Type;
-
struct Param
{
model::OperandIndex output_index;
model::Shape shape;
model::DataType data_type;
- PermuteType type{PermuteType::COPY};
const backend::Backend *input_backend;
const backend::Backend *output_backend;
};
param.input_index = input_index;
param.shape = _ctx.at(output_index).shape();
- param.type = node.param().type;
param.input_backend = node.param().input_backend;
param.output_backend = node.param().output_backend;
param.data_type = node.getDataType();
shape.dim(3) = param.shape.dim(2);
}
- fn->configure(input_object, output_object, shape, param.type, param.data_type);
+ // Find Permutation Type
+ auto permuteType = [&]() {
+ if (input_object->ptr()->layout() == graph::operand::Layout::NHWC &&
+ output_object->ptr()->layout() == graph::operand::Layout::NCHW)
+ {
+ return model::operation::PermuteNode::Type::NHWC_TO_NCHW;
+ }
+ else if (input_object->ptr()->layout() == graph::operand::Layout::NCHW &&
+ output_object->ptr()->layout() == graph::operand::Layout::NHWC)
+ {
+ return model::operation::PermuteNode::Type::NCHW_TO_NHWC;
+ }
+ else
+ {
+ return model::operation::PermuteNode::Type::COPY;
+ }
+ }();
+
+ fn->configure(input_object, output_object, shape, permuteType, param.data_type);
builder.append(std::move(fn));
});
virtual std::string id() = 0;
virtual void initialize() = 0;
- // NOTE Assume backend has only one type of operand layout
- virtual graph::operand::Layout getOperandLayout() = 0;
// Support subtensor allocation
virtual bool SupportSubTensorAlloc() = 0;
};
#include <stdint.h>
#include "graph/BackendSet.h"
+#include "Layout.h"
namespace neurun
{
void removeDefBackend(const backend::Backend *backend) { _def_backends.remove(backend); }
void removeUseBackend(const backend::Backend *backend) { _use_backends.remove(backend); }
+public:
+ void setLayout(const Layout &layout) { _layout = layout; }
+ const Layout &layout() { return _layout; }
+
private:
Shape4D _shape;
BackendSet _def_backends;
BackendSet _use_backends;
+ Layout _layout{Layout::NHWC};
};
} // namespace operand
struct Param
{
- Type type;
const backend::Backend *input_backend;
const backend::Backend *output_backend;
};
virtual std::string getName() const override { return "Permute"; }
public:
- PermuteNode(const OperandIndex &input, const OperandIndex &output, Type type,
+ PermuteNode(const OperandIndex &input, const OperandIndex &output,
const backend::Backend *input_backend, const backend::Backend *output_backend,
model::DataType data_type = model::DataType::FLOAT32);
return;
}
- const auto output_backend = operand_li->def_backends().getOnlyElement();
- const auto output_layout = output_backend->config()->getOperandLayout();
+ const auto tensor = _operand_context->at(operand_index)->ptr();
+ const auto output_layout = tensor->layout();
+ // TODO Set input_layout as frontend model's input layout
auto input_layout = graph::operand::Layout::NHWC;
if ((input_layout == graph::operand::Layout::NHWC) &&
(output_layout == graph::operand::Layout::NCHW))
source<PermutateSource<T>>(index, buffer, length, operand.shape());
return;
}
+ // TODO Supports NCHW -> NHWC
source<CopySource<T>>(index, buffer, length, operand.shape());
}
{
const auto operand_index = _model->outputs.at(index);
const auto &operand = _model->operands.at(operand_index);
- const auto operand_li = _lower_info->operand.at(operand_index).get();
- const auto input_backend = operand_li->def_backends().getOnlyElement();
- const auto input_layout = input_backend->config()->getOperandLayout();
+ const auto tensor = _operand_context->at(operand_index)->ptr();
+ const auto input_layout = tensor->layout();
+ // TODO Set output_layout as frontend model's output layout
auto output_layout = graph::operand::Layout::NHWC;
if ((input_layout == graph::operand::Layout::NCHW) &&
(output_layout == graph::operand::Layout::NHWC))
sink<PermutateSink<T>>(index, buffer, length, operand.shape());
return;
}
+ // TODO Supports NHWC -> NCHW
sink<CopySink<T>>(index, buffer, length, operand.shape());
}
subg = nullptr;
});
- _subg_ctx->iterate([&](const model::SubgraphIndex &, model::Subgraph &subg) {
+ _subg_ctx->iterate([&](const model::SubgraphIndex &ind, model::Subgraph &subg) {
assert(subg.operations().size() > 0);
std::reverse(std::begin(subg.operations()), std::end(subg.operations()));
+
+ // TODO Change to set layout when creating subgraph
+ const std::string layout_str =
+ config::ConfigManager::instance().get<std::string>(config::ACL_DEFAULT_LAYOUT);
+ graph::operand::Layout layout;
+ if (layout_str == "NHWC")
+ {
+ layout = graph::operand::Layout::NHWC;
+ }
+ else if (layout_str == "NCHW")
+ {
+ layout = graph::operand::Layout::NCHW;
+ }
+ else
+ {
+ throw std::runtime_error("Invalid ACL_DEFAULT_LAYOUT settings");
+ }
+
+ // CPU supports only NHWC now
+ if (getLowerInfo(ind)->backend()->config()->id() == "cpu")
+ {
+ layout = graph::operand::Layout::NHWC;
+ }
+
+ // TODO Remove This workarounds
+ // This implementation is a workaround
+ // The unit setting layout can be replaced with operation or subgraph
+ const auto operations = subg.operations();
+ for (const auto operation : operations)
+ {
+ const auto inputs = operation.node->getInputs();
+ for (auto it = inputs.begin(); it != inputs.end(); ++it)
+ {
+ // This is a workaround
+ // The unit setting layout can be replaced with `def` and `use` such as backend
+ operands_lower_info.at(*it)->setLayout(layout);
+ }
+
+ const auto outputs = operation.node->getOutputs();
+ for (auto it = outputs.begin(); it != outputs.end(); ++it)
+ {
+ operands_lower_info.at(*it)->setLayout(layout);
+ }
+ }
});
_subg_ctx->dump("merged and sorted operations without permutation");
auto input_def_backends = _graph.getLowerInfo(inp_indexes.at(0))->def_backends();
auto output_def_backends = _graph.getLowerInfo(out_indexes.at(0))->def_backends();
- auto input_layout = input_def_backends.getOnlyElement()->config()->getOperandLayout();
- auto output_layout = output_def_backends.getOnlyElement()->config()->getOperandLayout();
+ auto input_layout = _graph.getLowerInfo(inp_indexes.at(0))->layout();
+ auto output_layout = _graph.getLowerInfo(out_indexes.at(0))->layout();
if (input_def_backends.size() != 1 || output_def_backends.size() != 1)
{
for (auto index : inp_indexes)
{
auto op_backend_set = _graph.getLowerInfo(index)->def_backends();
- if (op_backend_set.size() != 1 ||
- input_layout != op_backend_set.getOnlyElement()->config()->getOperandLayout())
+ if (op_backend_set.size() != 1 || input_layout != _graph.getLowerInfo(index)->layout())
{
return false;
}
for (auto index : out_indexes)
{
auto op_backend_set = _graph.getLowerInfo(index)->def_backends();
- if (op_backend_set.size() != 1 ||
- output_layout != op_backend_set.getOnlyElement()->config()->getOperandLayout())
+ if (op_backend_set.size() != 1 || output_layout != _graph.getLowerInfo(index)->layout())
{
return false;
}
auto &operand = _graph.operands().at(operand_index);
+ // TODO It can change to get the layout from def Subgraph
+ const std::string layout_str =
+ config::ConfigManager::instance().get<std::string>(config::ACL_DEFAULT_LAYOUT);
+ graph::operand::Layout input_layout;
+ if (layout_str == "NHWC")
+ {
+ input_layout = graph::operand::Layout::NHWC;
+ }
+ else if (layout_str == "NCHW")
+ {
+ input_layout = graph::operand::Layout::NCHW;
+ }
+ else
+ {
+ throw std::runtime_error("Invalid ACL_DEFAULT_LAYOUT settings");
+ }
+
+ // CPU supports only NHWC now
+ if (_graph.getLowerInfo(operand_index)->def_backends().getOnlyElement()->config()->id() == "cpu")
+ {
+ input_layout = graph::operand::Layout::NHWC;
+ }
+
// Generate output operand and permute operation
auto out_operand_index = _graph.addOperand(operand.shape(), operand.typeInfo());
// change model output if operand_index is model output index
{
model_outputs.replace(operand_index, out_operand_index);
}
- auto out_operand_li =
- nnfw::cpp14::make_unique<operand::LowerInfo>(operand::asShape4D(operand.shape()));
- out_operand_li->addDefBackend(backend);
- out_operand_li->addUseBackend(backend);
- _graph.setLowerInfo(out_operand_index, std::move(out_operand_li));
// Update LowerInfo of input operand
auto operand_lower_info = _graph.getLowerInfo(operand_index);
+ const auto output_layout = operand_lower_info->layout();
operand_lower_info->removeUseBackend(backend);
operand_lower_info->addUseBackend(operand_lower_info->def_backends().getOnlyElement());
+ operand_lower_info->setLayout(input_layout);
+
+ auto out_operand_li =
+ nnfw::cpp14::make_unique<operand::LowerInfo>(operand::asShape4D(operand.shape()));
+ out_operand_li->addDefBackend(backend);
+ out_operand_li->addUseBackend(backend);
+ out_operand_li->setLayout(output_layout);
+ _graph.setLowerInfo(out_operand_index, std::move(out_operand_li));
using PermuteNode = model::operation::PermuteNode;
auto input_backend = _graph.getLowerInfo(operand_index)->def_backends().getOnlyElement();
auto output_backend = _graph.getLowerInfo(out_operand_index)->def_backends().getOnlyElement();
- // Find Permutation Type
- auto type = [&]() {
- auto input_layout = input_backend->config()->getOperandLayout();
- auto output_layout = output_backend->config()->getOperandLayout();
-
- if (input_layout == graph::operand::Layout::NHWC &&
- output_layout == graph::operand::Layout::NCHW)
- {
- return PermuteNode::Type::NHWC_TO_NCHW;
- }
- else if (input_layout == graph::operand::Layout::NCHW &&
- output_layout == graph::operand::Layout::NHWC)
- {
- return PermuteNode::Type::NCHW_TO_NHWC;
- }
- else
- {
- return PermuteNode::Type::COPY;
- }
- }();
-
// Insert permute operation to the graph
- auto insert_node = nnfw::cpp14::make_unique<PermuteNode>(operand_index, out_operand_index, type,
+ auto insert_node = nnfw::cpp14::make_unique<PermuteNode>(operand_index, out_operand_index,
input_backend, output_backend);
auto node_index = _graph.operations().append(std::move(insert_node));
void PermuteNode::accept(OperationVisitor &&v) const { v.visit(*this); }
-PermuteNode::PermuteNode(const OperandIndex &input, const OperandIndex &output, Type type,
+PermuteNode::PermuteNode(const OperandIndex &input, const OperandIndex &output,
const backend::Backend *input_backend,
const backend::Backend *output_backend, model::DataType data_type)
- : model::Operation{OperandConstraint::createExact(1u)},
- _param{type, input_backend, output_backend}, _dataType{data_type}
+ : model::Operation{OperandConstraint::createExact(1u)}, _param{input_backend, output_backend},
+ _dataType{data_type}
{
setInputs({input});
setOutputs({output});