--- /dev/null
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_COMPILER_CODE_WITH_INFO_H__
+#define __NEURUN_COMPILER_CODE_WITH_INFO_H__
+
+#include <memory>
+
+#include "compiler/Linear.h"
+#include "exec/IFunction.h"
+
+namespace neurun
+{
+namespace compiler
+{
+
+struct CodeWithInfo
+{
+ Linear::Element elem;
+ std::unique_ptr<exec::IFunction> fn;
+
+ CodeWithInfo(const Linear::Element &elem, std::unique_ptr<exec::IFunction> &&fn)
+ : elem{elem}, fn{std::move(fn)}
+ {
+ }
+};
+
+} // namespace compiler
+} // namespace neurun
+
+#endif // __NEURUN_COMPILER_CODE_WITH_INFO_H__
#include "backend/IShapeFixer.h"
#include "backend/ITensorRegister.h"
#include "cpp14/memory.h"
+#include "CodeWithInfo.h"
namespace neurun
{
auto operand_context = std::make_shared<OperandContext>();
const auto &operands = graph.operands();
- // Compilation result will be filled in operand_context and operation_sequence
- auto function_sequence = std::make_shared<exec::FunctionSequence>();
-
// linearize
assert(!graph.isBuildingPhase());
auto linear = nnfw::cpp14::make_unique<Linear>(graph);
class ExecutionBuilder final : public IExecutionBuilder
{
public:
- ExecutionBuilder(exec::FunctionSequence &functions) : _functions{functions}
+ void append(std::unique_ptr<exec::IFunction> &&f) override
{
- // DO NOTHING
+ _code.emplace_back(_next_elem, std::move(f));
}
- public:
- void append(std::unique_ptr<::neurun::exec::IFunction> &&f) override
- {
- _functions.append(std::move(f));
- }
+ void setNextElem(const compiler::Linear::Element &next_elem) { _next_elem = next_elem; }
+ std::vector<CodeWithInfo> releaseCode() { return std::move(_code); }
private:
- exec::FunctionSequence &_functions;
+ compiler::Linear::Element _next_elem;
+ std::vector<CodeWithInfo> _code;
};
- auto execution_builder = nnfw::cpp14::make_unique<ExecutionBuilder>(*function_sequence);
+ ExecutionBuilder builder;
// Generate kernels
linear->iterate([&](const compiler::Linear::Element &element) {
auto backend = element.lower_info->backend();
+ builder.setNextElem(element);
auto kernel_gen = graph.backend_resolver()->getBackendContext(backend)->kernel_gen;
- kernel_gen->generate(*element.subgraph, execution_builder.get());
+ kernel_gen->generate(*element.subgraph, &builder);
});
+ auto code = builder.releaseCode();
+
for (auto &tensor_builder : tensor_builders)
{
tensor_builder->allocateConsts();
graph.backend_resolver()->getBackendContext(backend)->constant_initializer->run();
}
- function_sequence->iterate([&](exec::IFunction &ifunc) {
+ for (auto &&e : code)
+ {
// NOTE. It may need avoiding prepare() for some operations
// Ref: https://github.sec.samsung.net/STAR/nnfw/issues/7326
- ifunc.prepare();
+ e.fn->prepare();
for (auto &tensor_builder : tensor_builders)
{
tensor_builder->postFunctionPrepare();
}
- });
+ }
for (auto &tensor_builder : tensor_builders)
{
tensor_mgrs->insert(tensor_builder->releaseTensorManager());
}
- return new exec::LinearExecutor{graph.shareModel(), graph.releaseSubgraphs(),
- operand_context, graph.releaseLowerInfo(),
- std::move(tensor_mgrs), function_sequence};
+ auto exec =
+ new exec::LinearExecutor{graph.shareModel(), graph.releaseSubgraphs(), operand_context,
+ graph.releaseLowerInfo(), std::move(tensor_mgrs), std::move(code)};
+
+ const std::string trace_filepath = util::getConfigString(util::config::TRACE_FILEPATH);
+ if (!trace_filepath.empty())
+ {
+ std::unique_ptr<exec::IExecutionObserver> ctp =
+ nnfw::cpp14::make_unique<exec::ChromeTracingObserver>(trace_filepath);
+ exec->addObserver(std::move(ctp));
+ }
+
+ return exec;
}
exec::IExecutor *ExecutorFactory::createDataflowExecutor(graph::Graph &graph, bool parallel)
#include "ExecutorBase.h"
#include "compiler/Linear.h"
#include "exec/FunctionSequence.h"
+#include "compiler/CodeWithInfo.h"
namespace neurun
{
const std::shared_ptr<compiler::OperandContext> &operand_context,
std::unique_ptr<graph::LowerInfoMap> lower_info,
std::unique_ptr<backend::TensorManagerSet> tensor_mgrs,
- const std::shared_ptr<exec::FunctionSequence> &fn_seq)
+ std::vector<compiler::CodeWithInfo> &&code)
: ExecutorBase{model, std::move(subgraphs), operand_context, std::move(lower_info),
std::move(tensor_mgrs)},
- _fn_seq{fn_seq}
+ _code{std::move(code)}
{
}
void executeImpl(void) override;
private:
- std::shared_ptr<exec::FunctionSequence> _fn_seq;
+ std::vector<compiler::CodeWithInfo> _code;
};
} // namespace exec