class IShapeFixer;
struct ITensorBuilder;
-struct BackendContext
+class BackendContext
{
+public:
const Backend *backend;
std::shared_ptr<ITensorBuilder> tensor_builder;
std::shared_ptr<IConstantInitializer> constant_initializer;
std::shared_ptr<IShapeFixer> shape_fixer;
};
-struct Backend
+class Backend
{
+public:
virtual ~Backend() = default;
virtual std::shared_ptr<neurun::backend::IConfig> config() const = 0;
virtual std::unique_ptr<BackendContext> newContext(const model::Operands &operands) const = 0;
auto mem_mgrs = nnfw::cpp14::make_unique<backend::MemoryManagerSet>();
for (auto &tensor_builder : tensor_builders)
{
- mem_mgrs->insert(std::move(tensor_builder->releaseMemoryManager()));
+ mem_mgrs->insert(tensor_builder->releaseMemoryManager());
}
auto plan = std::make_shared<Plan>(function_sequence);
auto mem_mgrs = nnfw::cpp14::make_unique<backend::MemoryManagerSet>();
for (auto &tensor_builder : tensor_builders)
{
- mem_mgrs->insert(std::move(tensor_builder->releaseMemoryManager()));
+ mem_mgrs->insert(tensor_builder->releaseMemoryManager());
}
if (parallel)
{
return new exec::ParallelExecutor{
- graph.shareModel(), std::move(graph.releaseSubgraphs()),
+ graph.shareModel(), graph.releaseSubgraphs(),
operand_context, std::move(lower_info),
std::move(mem_mgrs), std::move(execution_builder->releaseCodeMap())};
}
else
{
auto exec = new exec::DataflowExecutor{
- graph.shareModel(), std::move(graph.releaseSubgraphs()),
+ graph.shareModel(), graph.releaseSubgraphs(),
operand_context, std::move(lower_info),
std::move(mem_mgrs), std::move(execution_builder->releaseCodeMap())};
if (util::getConfigBool(util::config::PROFILING_MODE))
throw std::runtime_error{"Too small length"};
}
- _io_desc.inputs.at(index.value()) =
- std::move(nnfw::cpp14::make_unique<InputDesc>(info, buffer, length));
+ _io_desc.inputs.at(index.value()) = nnfw::cpp14::make_unique<InputDesc>(info, buffer, length);
}
void Execution::setInput(const model::IOIndex &index, const model::TypeInfo &type,
throw std::runtime_error{"Too small length"};
}
- _io_desc.inputs.at(index.value()) =
- std::move(nnfw::cpp14::make_unique<InputDesc>(info, buffer, length));
+ _io_desc.inputs.at(index.value()) = nnfw::cpp14::make_unique<InputDesc>(info, buffer, length);
}
void Execution::setOutput(const model::IOIndex &index, void *buffer, size_t length)
throw std::runtime_error{"Too small length"};
}
- _io_desc.outputs.at(index.value()) =
- std::move(nnfw::cpp14::make_unique<OutputDesc>(info, buffer, length));
+ _io_desc.outputs.at(index.value()) = nnfw::cpp14::make_unique<OutputDesc>(info, buffer, length);
}
void Execution::setOutput(const model::IOIndex &index, const model::TypeInfo &type,
throw std::runtime_error{"Too small length"};
}
- _io_desc.outputs.at(index.value()) =
- std::move(nnfw::cpp14::make_unique<OutputDesc>(info, buffer, length));
+ _io_desc.outputs.at(index.value()) = nnfw::cpp14::make_unique<OutputDesc>(info, buffer, length);
}
void Execution::execute()
* limitations under the License.
*/
-volatile static const char info[] = "library information : runtime=neurun";
+volatile const char info[] = "library information : runtime=neurun";