namespace interp
{
-void Interpreter::setInput(const neurun::model::operand::IO::Index &,
- const neurun::model::operand::TypeInfo &,
- const neurun::model::operand::Shape &, const void *, size_t)
+void Interpreter::setInput(const neurun::model::operand::IO::Index &index,
+ const neurun::model::operand::TypeInfo &type,
+ const neurun::model::operand::Shape &shape, const void *buffer,
+ size_t length)
{
- throw std::runtime_error{"NYI: Interpreter setInput"};
+ using ::neurun::model::operand::DataType;
+ switch (type.type())
+ {
+ case DataType::SCALAR_FLOAT32:
+ case DataType::TENSOR_FLOAT32:
+ source<Source<float>>(index, reinterpret_cast<const float *>(buffer), length);
+ break;
+ case DataType::SCALAR_INT32:
+ case DataType::TENSOR_INT32:
+ source<Source<int32_t>>(index, reinterpret_cast<const int32_t *>(buffer), length);
+ break;
+ case DataType::SCALAR_UINT32:
+ source<Source<uint32_t>>(index, reinterpret_cast<const uint32_t *>(buffer), length);
+ break;
+ case DataType::TENSOR_QUANT8_ASYMM:
+ source<Source<uint8_t>>(index, reinterpret_cast<const uint8_t *>(buffer), length);
+ break;
+ default:
+ throw std::runtime_error("Not supported, yet");
+ break;
+ }
+
+ const auto input_index = _model->inputs.at(index);
+ const auto info = compiler::TensorInfo(shape, type);
+ _tensor_info_map.insert({input_index, info});
}
-void Interpreter::setOutput(const neurun::model::operand::IO::Index &,
- const neurun::model::operand::TypeInfo &,
- const neurun::model::operand::Shape &, void *, size_t)
+void Interpreter::setOutput(const neurun::model::operand::IO::Index &index,
+ const neurun::model::operand::TypeInfo &type,
+ const neurun::model::operand::Shape &shape, void *buffer, size_t length)
{
- throw std::runtime_error{"NYI: Interpreter setOutput"};
+ using ::neurun::model::operand::DataType;
+ switch (type.type())
+ {
+ case DataType::SCALAR_FLOAT32:
+ case DataType::TENSOR_FLOAT32:
+ sink<Sink<float>>(index, reinterpret_cast<float *>(buffer), length);
+ break;
+ case DataType::SCALAR_INT32:
+ case DataType::TENSOR_INT32:
+ sink<Sink<int32_t>>(index, reinterpret_cast<int32_t *>(buffer), length);
+ break;
+ case DataType::SCALAR_UINT32:
+ sink<Sink<uint32_t>>(index, reinterpret_cast<uint32_t *>(buffer), length);
+ break;
+ case DataType::TENSOR_QUANT8_ASYMM:
+ sink<Sink<uint8_t>>(index, reinterpret_cast<uint8_t *>(buffer), length);
+ break;
+ default:
+ throw std::runtime_error("Not supported, yet");
+ break;
+ }
+
+ const auto output_index = _model->outputs.at(index);
+ const auto info = compiler::TensorInfo(shape, type);
+ _tensor_info_map.insert({output_index, info});
}
void Interpreter::execute(void) { throw std::runtime_error{"NYI: Interpreter execute"}; }
#ifndef __NEURUN_EXEC_INTERP_INTERPRETER_H_
#define __NEURUN_EXEC_INTERP_INTERPRETER_H_
-#include "compiler/Plan.h"
+#include "model/operand/IndexMap.h"
+#include "compiler/TensorInfo.h"
#include "exec/IExecutor.h"
+#include "exec/Source.h"
+#include "exec/Sink.h"
namespace neurun
{
void execute(void);
private:
+ template <typename T, typename... Args>
+ void source(const neurun::model::operand::IO::Index &index, Args &&... args)
+ {
+ _sources.at(index.value()) = std::move(std::unique_ptr<T>{new T{std::forward<Args>(args)...}});
+ }
+ template <typename T, typename... Args>
+ void sink(const neurun::model::operand::IO::Index &index, Args &&... args)
+ {
+ _sinks.at(index.value()) = std::move(std::unique_ptr<T>{new T{std::forward<Args>(args)...}});
+ }
+
+private:
std::shared_ptr<const model::Model> _model;
+ // TODO use own TensorInfo instead of using compiler's TensorInfo struct
+ // or define independent TensorInfo struct to use both compiler and interpreter
+ // TODO use execution environment to handle tensor for each inference
+ model::operand::IndexMap<compiler::TensorInfo> _tensor_info_map;
+ std::vector<std::unique_ptr<ISource>> _sources;
+ std::vector<std::unique_ptr<ISink>> _sinks;
};
} // namespace interp