// TODO Add optimization passes
plan_builder.finalize(tensor_builders);
- ConstantInitializer{*_model, *operand_context}();
+ ConstantInitializer{*_model, *operand_context, *linear->getLowerInfo()}();
/********************************
* Code generation phase finished
********************************/
auto plan = std::make_shared<Plan>(operand_context, operation_sequence);
_executor =
- std::make_shared<exec::Executor>(_model->shareModel(), _model->releaseLowerInfo(), plan);
+ std::make_shared<exec::Executor>(_model->shareModel(), linear->releaseLowerInfo(), plan);
_state = State::COMPILED;
}
#include "util/feature/nchw/View.h"
#include "misc/feature/IndexIterator.h"
#include "util/logging.h"
-#include "graph/operand/LowerInfo.h"
+#include "graph/LowerInfoMap.h"
namespace neurun
{
namespace compiler
{
-ConstantInitializer::ConstantInitializer(const graph::Graph &graph, operand::Context &operands)
- : _graph{graph}, _operands{operands}
+ConstantInitializer::ConstantInitializer(const graph::Graph &graph, operand::Context &operands,
+ const graph::LowerInfoMap &lower_info_map)
+ : _graph{graph}, _operands{operands}, _lower_info_map{lower_info_map}
{
}
const neurun::model::operand::Object &model_obj)
{
neurun::model::operand::Index index(ind);
- auto layout =
- _graph.getLowerInfo(ind)->def_backends().getOnlyElement()->config()->getOperandLayout();
+
+ graph::operand::LowerInfo *lower_info = nullptr;
+ {
+ auto itr = _lower_info_map.operand.find(ind);
+ lower_info = itr->second.get();
+ assert(lower_info);
+ }
+
+ auto layout = lower_info->def_backends().getOnlyElement()->config()->getOperandLayout();
const auto shape = model_obj.shape();
auto base = reinterpret_cast<const T *>(model_obj.data().base());
auto size = model_obj.data().size();
namespace neurun
{
+namespace graph
+{
+
+struct LowerInfoMap;
+
+} // namespace graph
+} // namespace neurun
+
+namespace neurun
+{
namespace compiler
{
class ConstantInitializer
{
public:
- ConstantInitializer(const graph::Graph &graph, operand::Context &operands);
+ // TODO Change std::shared_ptr<Model> instead of Graph
+ ConstantInitializer(const graph::Graph &graph, operand::Context &operands,
+ const graph::LowerInfoMap &lower_info_map);
void operator()();
private:
const graph::Graph &_graph;
operand::Context &_operands;
+ const graph::LowerInfoMap &_lower_info_map;
};
} // namespace compiler
{
assert(_phase == Phase::MODEL);
- auto linear = nnfw::cpp14::make_unique<linear::Linear>(*this);
+ auto linear = nnfw::cpp14::make_unique<linear::Linear>(*this, releaseLowerInfo());
// TODO Move the operations and operands to linear object
return std::move(linear);
namespace linear
{
-Linear::Linear(const graph::Graph &graph) : _graph(graph)
+Linear::Linear(const graph::Graph &graph, std::unique_ptr<graph::LowerInfoMap> lower_info_map)
+ : _graph(graph), _lower_info_map(std::move(lower_info_map))
{
+ assert(_lower_info_map);
+
// TODO: Move this code to graph
// Linearize graph with subgraphs by topological sort while assuming that
{
// Assume that the backend of all nodes on a subgraph are identified on the subgraph
const auto &first_ind = subgraph->operations()[0].index;
- auto lower_info = _graph.getLowerInfo(first_ind);
+ auto lower_info = getLowerInfo(first_ind);
_elements.emplace_back(std::move(subgraph), lower_info);
}
std::function<void(const model::operand::Index &ind, ITensorBuilderPtr)>;
const auto &graph = _graph;
- auto iterTensorBuilders = [&graph](const model::operand::Index &ind, FnOnTensorBuilder fn) {
- const auto lower_info = graph.getLowerInfo(ind);
+ auto iterTensorBuilders = [this, &graph](const model::operand::Index &ind, FnOnTensorBuilder fn) {
+ const auto lower_info = getLowerInfo(ind);
for (auto backend : lower_info->def_backends())
{
auto tensor_builder = backend->tensor_builder();
_graph.operands().iterate(
[&](const model::operand::Index &ind, const model::operand::Object &obj) {
- const auto lower_info = graph.getLowerInfo(ind);
+ const auto lower_info = getLowerInfo(ind);
uses_map[ind] = obj.getUses().size();
// If a tensor is a constant, increase the use of the tensor.
}
}
+const graph::operation::LowerInfo *Linear::getLowerInfo(const model::operation::Index &index) const
+{
+ if (!_lower_info_map)
+ return nullptr;
+ auto itr = _lower_info_map->operation.find(index);
+ if (itr == _lower_info_map->operation.end())
+ return nullptr;
+ return itr->second.get();
+}
+
+const graph::operand::LowerInfo *Linear::getLowerInfo(const model::operand::Index &index) const
+{
+ if (!_lower_info_map)
+ return nullptr;
+ auto itr = _lower_info_map->operand.find(index);
+ if (itr == _lower_info_map->operand.end())
+ return nullptr;
+ return itr->second.get();
+}
+
} // namespace linear
} // namespace neurun
#include "model/operation/Subgraph.h"
#include "backend/interface/ITensorBuilder.h"
+#include "graph/LowerInfoMap.h"
namespace neurun
{
class Linear
{
public:
- Linear(const graph::Graph &graph);
+ // TODO Change std::shared_ptr<Model> instead of Graph
+ Linear(const graph::Graph &graph, std::unique_ptr<graph::LowerInfoMap> lower_info_map);
public:
Linear(const Linear &linear) = delete;
void iterate(const std::function<void(const Element &element)> &fn) const;
+ std::unique_ptr<graph::LowerInfoMap> releaseLowerInfo() { return std::move(_lower_info_map); }
+
+ graph::LowerInfoMap *getLowerInfo() { return _lower_info_map.get(); }
+
+private:
+ // TODO Replace these getLowerInfo methods with ones of LowerInfoMap in the future
+ const graph::operation::LowerInfo *getLowerInfo(const model::operation::Index &index) const;
+
+ const graph::operand::LowerInfo *getLowerInfo(const model::operand::Index &index) const;
+
private:
const graph::Graph &_graph;
+ std::unique_ptr<graph::LowerInfoMap> _lower_info_map;
std::vector<Element> _elements;
};