// NOTE This assumes an operand can have one layout, and only Permutate can have
// different layouts for input and output
const auto &def = *obj.getDef().list().cbegin();
- auto frontend_layout = model::Layout::UNKNOWN;
-
- if (_subgraphs->containsOperation(def))
+ auto frontend_layout = _subgraphs->at(_subgraphs->getOperation(def)).getLayout();
+ if (frontend_layout == model::Layout::UNKNOWN)
{
- frontend_layout = _subgraphs->at(_subgraphs->getOperation(def)).getLayout();
- if (frontend_layout == model::Layout::UNKNOWN)
- {
- const auto &use = *obj.getUses().list().cbegin();
- frontend_layout = _subgraphs->at(_subgraphs->getOperation(use)).getLayout();
- }
+ const auto &use = *obj.getUses().list().cbegin();
+ frontend_layout = _subgraphs->at(_subgraphs->getOperation(use)).getLayout();
}
-
const auto backend_layout = lower_info->def_factors().getOnlyElement().layout();
tensor_builder->registerTensorInfo(ind, info, frontend_layout, backend_layout, is_const);
}
for (const auto &ind : _model->outputs)
{
--uses_map[ind];
- if (uses_map[ind] == 0) // To prevent notifyLastUse from being called twice
- {
- tensor_builder_map[ind]->notifyLastUse(ind);
- }
+ assert(uses_map[ind] == 0);
+ tensor_builder_map[ind]->notifyLastUse(ind);
}
for (const auto &ind : constants)
{
--uses_map[ind];
- if (uses_map[ind] == 0) // To prevent notifyLastUse from being called twice
- {
- tensor_builder_map[ind]->notifyLastUse(ind);
- }
+ assert(uses_map[ind] == 0);
+ tensor_builder_map[ind]->notifyLastUse(ind);
}
assert(std::all_of(
_subgraphs->dump("merged and sorted operations without permutation");
- const auto default_backend = backend::BackendManager::instance().getDefault();
- for (auto index : _model->inputs)
+// NOTE This is desired way to handle model input and outputs however getDefaultBackend() is
+// cpu backend dependent for now we cannot use it.
+#if 0
+ // Add def backend to model input/output operand as default backend
+ for (auto index : getInputs())
{
auto &&lower_info = operands_lower_info.at(index);
- lower_info->addDefPermuteFactor(lower_info->use_factors().getOnlyElement());
+ lower_info->addDefBackend(_backend_resolver->getDefaultBackend());
}
- for (auto index : _model->outputs)
+
+ for (auto index : getOutputs())
{
auto &&lower_info = operands_lower_info.at(index);
- if (_model->operands.at(index).isConstant())
- {
- lower_info->addDefPermuteFactor(operand::PermuteFactor{
- default_backend,
- model::Layout::NHWC // TODO Get frontend layout of this node from IR
- });
- }
+ lower_info->addUseBackend(_backend_resolver->getDefaultBackend());
}
+#endif
// Add DefFactor constants same as UseFactor
// NOTE This assumes a constant operand is used by only one operation
auto &&lower_info = operands_lower_info.at(operand);
if (lower_info->def_factors().empty())
{
+ // NOTE Handling model inputs here is not ideal. See above NOTE comment.
+ // If it is a model input, not a constant
+ if (_model->inputs.contains(operand))
+ {
+ // If one or more elements then any PermuteFactor is OK so pick first one
+ if (!lower_info->use_factors().empty())
+ {
+ lower_info->addDefPermuteFactor(*lower_info->use_factors().begin());
+ }
+ }
// If it is a constant
- if (!_model->inputs.contains(operand))
+ else
{
lower_info->addDefPermuteFactor(lower_info->use_factors().getOnlyElement());
}
// verify input and output
+ if (num_inputs == 0)
+ {
+ std::cerr << "[ ERROR ] "
+ << "No inputs in model => execution is not possible" << std::endl;
+ exit(1);
+ }
+
auto verifyInputTypes = [session]() {
uint32_t sz;
NNPR_ENSURE_STATUS(nnfw_input_size(session, &sz));