void state(State state) { _state = state; }
State state(void) const { return _state; }
-private:
- static std::shared_ptr<exec::IExecutor> createLinearExecutor(graph::Graph &model);
- static std::shared_ptr<exec::IExecutor> createDataflowExecutor(graph::Graph &model);
- static std::shared_ptr<exec::IExecutor> createParallelExecutor(graph::Graph &model);
-
/**
* @brief Check if model can compile
* @return @c true if model can compile, otherwise @c false
#include "compiler/Compiler.h"
-#include "OperationValidator.h"
-#include "SubTensorAnalyzer.h"
-#include "PlanBuilder.h"
-#include "ConstantInitializer.h"
#include "ParamChecker.h"
+#include "ExecutorFactory.h"
-#include "graph/dumper/Dumper.h"
#include "graph/operation/LowerInfo.h"
#include "dumper/dot/DotDumper.h"
#include "linear/Linear.h"
-#include "exec/LinearExecutor.h"
-#include "exec/DataflowExecutor.h"
-#include "exec/ParallelExecutor.h"
#include "exec/interp/ExecManager.h"
#include "util/config/ConfigManager.h"
const std::string executor_str =
config::ConfigManager::instance().get<std::string>(config::EXECUTOR);
- // TODO Extract Executor generation as a class
- if (executor_str == "Dataflow")
- {
- _executor = createDataflowExecutor(*_model);
- }
- else if (executor_str == "Parallel")
- {
- _executor = createParallelExecutor(*_model);
- }
- else
- {
- // (executor_str == "Linear") or other values
- _executor = createLinearExecutor(*_model);
- }
+ _executor =
+ std::shared_ptr<exec::IExecutor>{ExecutorFactory::instance().create(executor_str, *_model)};
/********************************
* Code generation phase finished
_state = State::COMPILED;
}
-std::shared_ptr<exec::IExecutor> Compiler::createLinearExecutor(graph::Graph &model)
-{
- auto operand_context = std::make_shared<OperandContext>();
- const auto &operands = model.operands();
-
- // Compilation result will be filled in operand_context and operation_sequence
- auto operation_sequence = std::make_shared<operation::Sequence>();
-
- // linearize
- auto linear = model.linearize();
-
- // Dump ops
- linear->accept(neurun::graph::dumper::Dumper{});
-
- linear->accept(OperationValidator{operands});
-
- /*************************************************
- * Backend dependent analysis & optimization phase
- *************************************************/
-
- // SubTensorInfo should be generated after lower, before stage generation and finalize
- // because SubTensorAnalyzer assume that insert permutation is already finished
- // lower: decide backend and insert permutation
- // stage generation: prepare codegen to optimization
- // finalize: generate tensor using subtensor info, then execute stage
- // Generated SubTensorInfo is in operand(Object)
- // for easy pass SubTensorInfo to plan builder and tensor builder
- linear->accept(SubTensorAnalyzer{*linear->getLowerInfo(), model.operands()});
-
- /**********************************************************
- * Backend dependent analysis & optimization phase finished
- **********************************************************/
-
- /***********************
- * Code generation phase
- ***********************/
-
- PlanBuilder plan_builder{*operand_context, *operation_sequence};
-
- // Plan building
- linear->iterate([&](const linear::Element &element) {
- auto backend = element.lower_info->backend();
-
- // Generate Stage
- auto stage_gen = backend->stage_gen();
- plan_builder.addStage(stage_gen->generate(*element.subgraph));
- });
-
- auto tensor_builders = linear->planTensors();
-
- // TODO Add optimization passes
- plan_builder.finalize(tensor_builders);
-
- ConstantInitializer{model, *operand_context, *linear->getLowerInfo()}();
-
- auto plan = std::make_shared<Plan>(operation_sequence);
- return std::make_shared<exec::LinearExecutor>(
- model.shareModel(), linear->releaseSubgraphContext(), operand_context,
- linear->releaseLowerInfo(), linear->releaseElements(), plan);
-}
-
-std::shared_ptr<exec::IExecutor> Compiler::createDataflowExecutor(graph::Graph &model)
-{
- auto operand_context = std::make_shared<OperandContext>();
- std::unordered_map<model::SubgraphIndex, std::unique_ptr<backend::IStage>> stages;
-
- model.subg_ctx().iterate(
- [&](const model::SubgraphIndex &subg_index, const model::Subgraph &subg) {
- auto backend = model.getLowerInfo(subg_index)->backend();
-
- // Generate Stage
- auto stage_gen = backend->stage_gen();
- stages[subg_index] = stage_gen->generate(subg);
- });
-
- backend::TensorBuilderSet tensor_builders;
-
- model.operands().iterate([&](const model::OperandIndex &ind, const model::Operand &obj) {
- const auto lower_info = model.getLowerInfo(ind);
- for (auto backend : lower_info->def_backends())
- {
- auto tensor_builder = backend->tensor_builder();
- const auto info = obj.info();
- tensor_builder->registerTensorInfo(ind, info);
- tensor_builder->notifyFirstUse(ind);
- tensor_builders.insert(tensor_builder);
- }
- });
-
- for (auto &tensor_builder : tensor_builders)
- {
- tensor_builder->prepare();
-
- // Wrap tensors as Object and store them to plan
- tensor_builder->iterate([&](const model::OperandIndex &index) {
- auto object = tensor_builder->wrapTensor(index);
- operand_context->set(index, object);
- });
- }
-
- // TODO Extract this to another class
- // IExecutionBuilder should be moved to `compiler/IExecutionBuilder.h` from
- // `backend/IStageGenerator.h`.
- class ExecutionBuilder : public IExecutionBuilder
- {
- public:
- void append(std::unique_ptr<exec::IFunction> &&fn) override
- {
- auto itr = _code_map.find(_next_index);
- if (itr == _code_map.end())
- {
- _code_map[_next_index] = nnfw::cpp14::make_unique<exec::FunctionSequence>();
- }
- _code_map[_next_index]->append(std::move(fn));
- };
-
- // TODO Remove this method and make `append` to get index value as an argument
- void setNextIndex(const model::SubgraphIndex next_index) { _next_index = next_index; }
-
- exec::DataflowExecutor::CodeMap &&releaseCodeMap() { return std::move(_code_map); }
-
- private:
- model::SubgraphIndex _next_index;
- exec::DataflowExecutor::CodeMap _code_map;
- };
-
- ExecutionBuilder execution_builder;
-
- for (auto &&itr : stages)
- {
- // TODO This approach is temporal. See declaration of `setNextIndex`.
- execution_builder.setNextIndex(itr.first);
- (*itr.second)(execution_builder);
- }
-
- for (const auto &tensor_builder : tensor_builders)
- {
- tensor_builder->allocate();
- }
-
- auto lower_info = model.releaseLowerInfo();
-
- ConstantInitializer{model, *operand_context, *lower_info}();
-
- return std::make_shared<exec::DataflowExecutor>(
- model.shareModel(), std::move(model.releaseSubgraphContext()), operand_context,
- std::move(lower_info), std::move(execution_builder.releaseCodeMap()));
-}
-
-std::shared_ptr<exec::IExecutor> Compiler::createParallelExecutor(graph::Graph &model)
-{
- auto operand_context = std::make_shared<OperandContext>();
- std::unordered_map<model::SubgraphIndex, std::unique_ptr<backend::IStage>> stages;
-
- model.subg_ctx().iterate(
- [&](const model::SubgraphIndex &subg_index, const model::Subgraph &subg) {
- auto backend = model.getLowerInfo(subg_index)->backend();
-
- // Generate Stage
- auto stage_gen = backend->stage_gen();
- stages[subg_index] = stage_gen->generate(subg);
- });
-
- backend::TensorBuilderSet tensor_builders;
-
- model.operands().iterate([&](const model::OperandIndex &ind, const model::Operand &obj) {
- const auto lower_info = model.getLowerInfo(ind);
- for (auto backend : lower_info->def_backends())
- {
- auto tensor_builder = backend->tensor_builder();
- const auto info = obj.info();
- tensor_builder->registerTensorInfo(ind, info);
- tensor_builder->notifyFirstUse(ind);
- tensor_builders.insert(tensor_builder);
- }
- });
-
- for (auto &tensor_builder : tensor_builders)
- {
- tensor_builder->prepare();
-
- // Wrap tensors as Object and store them to plan
- tensor_builder->iterate([&](const model::OperandIndex &index) {
- auto object = tensor_builder->wrapTensor(index);
- operand_context->set(index, object);
- });
- }
-
- // TODO Extract this to another class
- // IExecutionBuilder should be moved to `compiler/IExecutionBuilder.h` from
- // `backend/IStageGenerator.h`.
- class ExecutionBuilder : public IExecutionBuilder
- {
- public:
- void append(std::unique_ptr<exec::IFunction> &&fn) override
- {
- auto key = _next_index;
- auto itr = _code_map.find(key);
- if (itr == _code_map.end())
- {
- _code_map[key] = nnfw::cpp14::make_unique<exec::FunctionSequence>();
- }
- _code_map[key]->append(std::move(fn));
- };
-
- // TODO Remove this method and make `append` to get index value as an argument
- void setNextIndex(const model::SubgraphIndex next_index) { _next_index = next_index; }
-
- exec::ParallelExecutor::CodeMap &&releaseCodeMap() { return std::move(_code_map); }
-
- private:
- model::SubgraphIndex _next_index;
- exec::ParallelExecutor::CodeMap _code_map;
- };
-
- ExecutionBuilder execution_builder;
-
- for (auto &&itr : stages)
- {
- // TODO This approach is temporal. See declaration of `setNextIndex`.
- execution_builder.setNextIndex(itr.first);
- (*itr.second)(execution_builder);
- }
-
- for (const auto &tensor_builder : tensor_builders)
- {
- tensor_builder->allocate();
- }
-
- auto lower_info = model.releaseLowerInfo();
-
- ConstantInitializer{model, *operand_context, *lower_info}();
-
- return std::make_shared<exec::ParallelExecutor>(
- model.shareModel(), std::move(model.releaseSubgraphContext()), operand_context,
- std::move(lower_info), std::move(execution_builder.releaseCodeMap()));
-}
-
bool Compiler::checkCompilable()
{
// Disable compile phase
--- /dev/null
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ExecutorFactory.h"
+
+#include "exec/LinearExecutor.h"
+#include "exec/DataflowExecutor.h"
+#include "exec/ParallelExecutor.h"
+#include "linear/Linear.h"
+#include "graph/dumper/Dumper.h"
+#include "OperationValidator.h"
+#include "SubTensorAnalyzer.h"
+#include "PlanBuilder.h"
+#include "ConstantInitializer.h"
+
+namespace neurun
+{
+namespace compiler
+{
+
+ExecutorFactory &ExecutorFactory::instance()
+{
+ static ExecutorFactory singleton;
+ return singleton;
+}
+
+ExecutorFactory::ExecutorFactory()
+{
+ _map["Linear"] = createLinearExecutor;
+ _map["Dataflow"] = createDataflowExecutor;
+ _map["Parallel"] = createParallelExecutor;
+}
+
+exec::IExecutor *ExecutorFactory::create(const std::string &id, graph::Graph &graph)
+{
+ return _map.at(id)(graph);
+}
+
+exec::IExecutor *ExecutorFactory::createLinearExecutor(graph::Graph &graph)
+{
+ auto operand_context = std::make_shared<OperandContext>();
+ const auto &operands = graph.operands();
+
+ // Compilation result will be filled in operand_context and operation_sequence
+ auto operation_sequence = std::make_shared<operation::Sequence>();
+
+ // linearize
+ auto linear = graph.linearize();
+
+ // Dump ops
+ linear->accept(neurun::graph::dumper::Dumper{});
+
+ linear->accept(OperationValidator{operands});
+
+ /*************************************************
+ * Backend dependent analysis & optimization phase
+ *************************************************/
+
+ // SubTensorInfo should be generated after lower, before stage generation and finalize
+ // because SubTensorAnalyzer assume that insert permutation is already finished
+ // lower: decide backend and insert permutation
+ // stage generation: prepare codegen to optimization
+ // finalize: generate tensor using subtensor info, then execute stage
+ // Generated SubTensorInfo is in operand(Object)
+ // for easy pass SubTensorInfo to plan builder and tensor builder
+ linear->accept(SubTensorAnalyzer{*linear->getLowerInfo(), graph.operands()});
+
+ /**********************************************************
+ * Backend dependent analysis & optimization phase finished
+ **********************************************************/
+
+ /***********************
+ * Code generation phase
+ ***********************/
+
+ PlanBuilder plan_builder{*operand_context, *operation_sequence};
+
+ // Plan building
+ linear->iterate([&](const linear::Element &element) {
+ auto backend = element.lower_info->backend();
+
+ // Generate Stage
+ auto stage_gen = backend->stage_gen();
+ plan_builder.addStage(stage_gen->generate(*element.subgraph));
+ });
+
+ auto tensor_builders = linear->planTensors();
+
+ // TODO Add optimization passes
+ plan_builder.finalize(tensor_builders);
+
+ ConstantInitializer{graph, *operand_context, *linear->getLowerInfo()}();
+
+ auto plan = std::make_shared<Plan>(operation_sequence);
+ return new exec::LinearExecutor{graph.shareModel(), linear->releaseSubgraphContext(),
+ operand_context, linear->releaseLowerInfo(),
+ linear->releaseElements(), plan};
+}
+
+exec::IExecutor *ExecutorFactory::createDataflowExecutor(graph::Graph &graph)
+{
+ auto operand_context = std::make_shared<OperandContext>();
+ std::unordered_map<model::SubgraphIndex, std::unique_ptr<backend::IStage>> stages;
+
+ graph.subg_ctx().iterate(
+ [&](const model::SubgraphIndex &subg_index, const model::Subgraph &subg) {
+ auto backend = graph.getLowerInfo(subg_index)->backend();
+
+ // Generate Stage
+ auto stage_gen = backend->stage_gen();
+ stages[subg_index] = stage_gen->generate(subg);
+ });
+
+ backend::TensorBuilderSet tensor_builders;
+
+ graph.operands().iterate([&](const model::OperandIndex &ind, const model::Operand &obj) {
+ const auto lower_info = graph.getLowerInfo(ind);
+ for (auto backend : lower_info->def_backends())
+ {
+ auto tensor_builder = backend->tensor_builder();
+ const auto info = obj.info();
+ tensor_builder->registerTensorInfo(ind, info);
+ tensor_builder->notifyFirstUse(ind);
+ tensor_builders.insert(tensor_builder);
+ }
+ });
+
+ for (auto &tensor_builder : tensor_builders)
+ {
+ tensor_builder->prepare();
+
+ // Wrap tensors as Object and store them to plan
+ tensor_builder->iterate([&](const model::OperandIndex &index) {
+ auto object = tensor_builder->wrapTensor(index);
+ operand_context->set(index, object);
+ });
+ }
+
+ // TODO Extract this to another class
+ // IExecutionBuilder should be moved to `compiler/IExecutionBuilder.h` from
+ // `backend/IStageGenerator.h`.
+ class ExecutionBuilder : public IExecutionBuilder
+ {
+ public:
+ void append(std::unique_ptr<exec::IFunction> &&fn) override
+ {
+ auto itr = _code_map.find(_next_index);
+ if (itr == _code_map.end())
+ {
+ _code_map[_next_index] = nnfw::cpp14::make_unique<exec::FunctionSequence>();
+ }
+ _code_map[_next_index]->append(std::move(fn));
+ };
+
+ // TODO Remove this method and make `append` to get index value as an argument
+ void setNextIndex(const model::SubgraphIndex next_index) { _next_index = next_index; }
+
+ exec::DataflowExecutor::CodeMap &&releaseCodeMap() { return std::move(_code_map); }
+
+ private:
+ model::SubgraphIndex _next_index;
+ exec::DataflowExecutor::CodeMap _code_map;
+ };
+
+ ExecutionBuilder execution_builder;
+
+ for (auto &&itr : stages)
+ {
+ // TODO This approach is temporal. See declaration of `setNextIndex`.
+ execution_builder.setNextIndex(itr.first);
+ (*itr.second)(execution_builder);
+ }
+
+ for (const auto &tensor_builder : tensor_builders)
+ {
+ tensor_builder->allocate();
+ }
+
+ auto lower_info = graph.releaseLowerInfo();
+
+ ConstantInitializer{graph, *operand_context, *lower_info}();
+
+ return new exec::DataflowExecutor{graph.shareModel(), std::move(graph.releaseSubgraphContext()),
+ operand_context, std::move(lower_info),
+ std::move(execution_builder.releaseCodeMap())};
+}
+
+exec::IExecutor *ExecutorFactory::createParallelExecutor(graph::Graph &graph)
+{
+ auto operand_context = std::make_shared<OperandContext>();
+ std::unordered_map<model::SubgraphIndex, std::unique_ptr<backend::IStage>> stages;
+
+ graph.subg_ctx().iterate(
+ [&](const model::SubgraphIndex &subg_index, const model::Subgraph &subg) {
+ auto backend = graph.getLowerInfo(subg_index)->backend();
+
+ // Generate Stage
+ auto stage_gen = backend->stage_gen();
+ stages[subg_index] = stage_gen->generate(subg);
+ });
+
+ backend::TensorBuilderSet tensor_builders;
+
+ graph.operands().iterate([&](const model::OperandIndex &ind, const model::Operand &obj) {
+ const auto lower_info = graph.getLowerInfo(ind);
+ for (auto backend : lower_info->def_backends())
+ {
+ auto tensor_builder = backend->tensor_builder();
+ const auto info = obj.info();
+ tensor_builder->registerTensorInfo(ind, info);
+ tensor_builder->notifyFirstUse(ind);
+ tensor_builders.insert(tensor_builder);
+ }
+ });
+
+ for (auto &tensor_builder : tensor_builders)
+ {
+ tensor_builder->prepare();
+
+ // Wrap tensors as Object and store them to plan
+ tensor_builder->iterate([&](const model::OperandIndex &index) {
+ auto object = tensor_builder->wrapTensor(index);
+ operand_context->set(index, object);
+ });
+ }
+
+ // TODO Extract this to another class
+ // IExecutionBuilder should be moved to `compiler/IExecutionBuilder.h` from
+ // `backend/IStageGenerator.h`.
+ class ExecutionBuilder : public IExecutionBuilder
+ {
+ public:
+ void append(std::unique_ptr<exec::IFunction> &&fn) override
+ {
+ auto key = _next_index;
+ auto itr = _code_map.find(key);
+ if (itr == _code_map.end())
+ {
+ _code_map[key] = nnfw::cpp14::make_unique<exec::FunctionSequence>();
+ }
+ _code_map[key]->append(std::move(fn));
+ };
+
+ // TODO Remove this method and make `append` to get index value as an argument
+ void setNextIndex(const model::SubgraphIndex next_index) { _next_index = next_index; }
+
+ exec::ParallelExecutor::CodeMap &&releaseCodeMap() { return std::move(_code_map); }
+
+ private:
+ model::SubgraphIndex _next_index;
+ exec::ParallelExecutor::CodeMap _code_map;
+ };
+
+ ExecutionBuilder execution_builder;
+
+ for (auto &&itr : stages)
+ {
+ // TODO This approach is temporal. See declaration of `setNextIndex`.
+ execution_builder.setNextIndex(itr.first);
+ (*itr.second)(execution_builder);
+ }
+
+ for (const auto &tensor_builder : tensor_builders)
+ {
+ tensor_builder->allocate();
+ }
+
+ auto lower_info = graph.releaseLowerInfo();
+
+ ConstantInitializer{graph, *operand_context, *lower_info}();
+
+ return new exec::ParallelExecutor{graph.shareModel(), std::move(graph.releaseSubgraphContext()),
+ operand_context, std::move(lower_info),
+ std::move(execution_builder.releaseCodeMap())};
+}
+
+} // namespace compiler
+} // namespace neurun
--- /dev/null
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_COMPILER_EXECUTOR_FACTORY_H__
+#define __NEURUN_COMPILER_EXECUTOR_FACTORY_H__
+
+#include <unordered_map>
+
+#include "exec/IExecutor.h"
+#include "graph/Graph.h"
+
+namespace neurun
+{
+namespace compiler
+{
+
+class ExecutorFactory
+{
+public:
+ static ExecutorFactory &instance();
+
+public:
+ exec::IExecutor *create(const std::string &id, graph::Graph &graph);
+
+private:
+ ExecutorFactory();
+
+private:
+ static exec::IExecutor *createLinearExecutor(graph::Graph &graph);
+ static exec::IExecutor *createDataflowExecutor(graph::Graph &graph);
+ static exec::IExecutor *createParallelExecutor(graph::Graph &graph);
+
+private:
+ std::unordered_map<std::string, std::function<exec::IExecutor *(graph::Graph &)>> _map;
+};
+
+} // namespace compiler
+} // namespace neurun
+
+#endif // __NEURUN_COMPILER_EXECUTOR_FACTORY_H__