for (auto it = _initializer_ctx.begin(); it != _initializer_ctx.end(); ++it)
{
const ::neurun::graph::operand::Index operand_index{it->first};
- auto objects = _plan.operands().at(operand_index);
+ auto object = _plan.operands().at(operand_index);
- for (auto object : objects)
- {
- object->access(it->second);
- }
+ object->access(it->second);
}
}
#include "Context.h"
+#include <cassert>
+
namespace neurun
{
namespace codegen
Context &Context::set(const graph::operand::Index &id,
const std::shared_ptr<backend::operand::IObject> &object)
{
- _objects[id.value()].emplace_back(object);
+ // Only one object for an id
+ assert(_objects.find(id.value()) == _objects.end());
+ _objects[id.value()] = object;
return (*this);
}
}
public:
- const std::vector<std::shared_ptr<backend::operand::IObject>> &
- at(const graph::operand::Index &ind) const
+ std::shared_ptr<backend::operand::IObject> at(const graph::operand::Index &ind) const
{
return _objects.at(ind.asInt());
}
- std::vector<std::shared_ptr<backend::operand::IObject>> &at(const graph::operand::Index &ind)
+ std::shared_ptr<backend::operand::IObject> &at(const graph::operand::Index &ind)
{
return _objects.at(ind.asInt());
}
private:
- std::map<int, std::vector<std::shared_ptr<backend::operand::IObject>>> _objects;
+ std::map<int, std::shared_ptr<backend::operand::IObject>> _objects;
};
} // namespace operand
neurun::graph::operand::IO::Index input_index{n};
::neurun::graph::operand::Index index{model.getInputs().at(input_index)};
- auto objects = plan.operands().at(index);
+ auto object = plan.operands().at(index);
- for (auto object : objects)
- {
- object->access(setter);
- }
+ object->access(setter);
}
const auto &operations = execution->plan().operations();
neurun::graph::operand::IO::Index output_index{n};
::neurun::graph::operand::Index index{model.getOutputs().at(output_index)};
- auto objects = plan.operands().at(index);
+ auto object = plan.operands().at(index);
- for (auto object : objects)
- {
- object->access(getter);
- }
+ object->access(getter);
}
return ANEURALNETWORKS_NO_ERROR;
lower_info->addUseBackend(_backend_resolver->getDefaultBackend());
}
+ // Add DefBackend constants same as UseBackend
+ // NOTE This assumes a constant operand is used by only one operation
+ _operations.iterate([&](const operation::Index &, operation::Node &node) {
+ // LowerInfo for input operands
+ for (auto operand : node.getInputs())
+ {
+ auto &&lower_info = operands_lower_info.at(operand);
+ if (lower_info->def_backends().empty())
+ {
+ lower_info->addDefBackend(lower_info->use_backends().getOnlyElement());
+ }
+ }
+ });
+
// Set LowerInfo for each operand from the operand::LowerInfo holder
_operands.iterate([&](const operand::Index &index, operand::Object &object) {
object.lower_info(std::move(operands_lower_info[index]));
backend::TensorBuilderSet Linear::markTensors() const
{
backend::TensorBuilderSet tensor_builders;
- for (const auto op : _operations)
- {
- const auto tensor_builder = op->lower_info()->backend()->stage_gen()->tensor_builder();
- for (const auto &ind : op->getInputs())
- {
- const auto &operand = _graph.operands().at(ind);
- const auto info = ::internal::asTensorInfo(operand.shape(), operand.typeInfo());
- tensor_builder->mark(ind, info);
- tensor_builders.insert(tensor_builder);
- }
- for (const auto &ind : op->getOutputs())
- {
- const auto &operand = _graph.operands().at(ind);
- const auto info = ::internal::asTensorInfo(operand.shape(), operand.typeInfo());
+ _graph.operands().iterate(
+ [&](const graph::operand::Index &ind, const graph::operand::Object &obj) {
+ for (auto backend : obj.lower_info()->def_backends())
+ {
+ auto tensor_builder = backend->tensor_builder();
+ const auto info = ::internal::asTensorInfo(obj.shape(), obj.typeInfo());
+
+ tensor_builder->mark(ind, info);
+
+ tensor_builders.insert(tensor_builder);
+ }
+ });
- tensor_builder->mark(ind, info);
- tensor_builders.insert(tensor_builder);
- }
- }
return tensor_builders;
}