-#ifndef __INTERNAL_SINK_H__
-#define __INTERNAL_SINK_H__
+#ifndef __NEURUN_EXEC_SINK_H__
+#define __NEURUN_EXEC_SINK_H__
#include <cassert>
#include "internal/nnapi/feature/View.h"
#include "internal/nnapi/feature/Reader.h"
+namespace neurun
+{
+namespace exec
+{
+
struct Sink
{
virtual ~Sink() = default;
const size_t _size;
};
+} // namespace exec
+} // namespace neurun
+
#endif // __INTERNAL_SINK_H__
-#ifndef __INTERNAL_SOURCE_H__
-#define __INTERNAL_SOURCE_H__
+#ifndef __NEURUN_EXEC_SOURCE_H__
+#define __NEURUN_EXEC_SOURCE_H__
#include <cassert>
#include "backend/acl_cl/feature/View.h"
+namespace neurun
+{
+namespace exec
+{
+
struct Source
{
virtual ~Source() = default;
const size_t _size;
};
-#endif // __INTERNAL_SOURCE_H__
+} // namespace exec
+} // namespace neurun
+
+#endif // __NEURUN_EXEC_SOURCE_H__
#include "frontend/wrapper/execution.h"
#include "frontend/wrapper/event.h"
-#include "internal/Source.h"
#include "graph/operand/Index.h"
//
const auto len = operands.at(operand_index).shape().dim(1);
- execution->source<VectorSource>(index, len, reinterpret_cast<const uint8_t *>(buffer), length);
+ execution->source<neurun::exec::VectorSource>(
+ index, len, reinterpret_cast<const uint8_t *>(buffer), length);
}
else if (operands.at(operand_index).shape().rank() == 4)
{
const auto &operand_shape = operands.at(operand_index).shape().asFeature();
- execution->source<FeatureSource>(index, operand_shape,
- reinterpret_cast<const uint8_t *>(buffer), length);
+ execution->source<neurun::exec::FeatureSource>(
+ index, operand_shape, reinterpret_cast<const uint8_t *>(buffer), length);
}
else
{
const auto len = operands.at(operand_index).shape().dim(1);
- execution->sink<VectorSink>(index, len, reinterpret_cast<uint8_t *>(buffer), length);
+ execution->sink<neurun::exec::VectorSink>(index, len, reinterpret_cast<uint8_t *>(buffer),
+ length);
}
else if (operands.at(operand_index).shape().rank() == 4)
{
const auto &operand_shape = operands.at(operand_index).shape().asFeature();
- execution->sink<FeatureSink>(index, operand_shape, reinterpret_cast<uint8_t *>(buffer), length);
+ execution->sink<neurun::exec::FeatureSink>(index, operand_shape,
+ reinterpret_cast<uint8_t *>(buffer), length);
}
else
{
#define __EXECUTION_H__
#include "codegen/Plan.h"
-#include "internal/Source.h"
-#include "internal/Sink.h"
+#include "exec/Source.h"
+#include "exec/Sink.h"
struct ANeuralNetworksExecution
{
public:
// TODO Use InputIndex instead of int
- void source(int n, std::unique_ptr<Source> &&source) { _sources.at(n) = std::move(source); }
+ void source(int n, std::unique_ptr<neurun::exec::Source> &&source)
+ {
+ _sources.at(n) = std::move(source);
+ }
template <typename T, typename... Args> void source(int n, Args &&... args)
{
source(n, std::unique_ptr<T>{new T{std::forward<Args>(args)...}});
}
public:
- const Source &source(int n) const { return *(_sources.at(n)); }
+ const neurun::exec::Source &source(int n) const { return *(_sources.at(n)); }
public:
// TODO Use OutputIndex instead of int
- void sink(int n, std::unique_ptr<Sink> &&sink) { _sinks.at(n) = std::move(sink); }
+ void sink(int n, std::unique_ptr<neurun::exec::Sink> &&sink) { _sinks.at(n) = std::move(sink); }
template <typename T, typename... Args> void sink(int n, Args &&... args)
{
sink(n, std::unique_ptr<T>{new T{std::forward<Args>(args)...}});
}
public:
- const Sink &sink(int n) const { return *(_sinks.at(n)); }
+ const neurun::exec::Sink &sink(int n) const { return *(_sinks.at(n)); }
private:
- std::vector<std::unique_ptr<Source>> _sources;
- std::vector<std::unique_ptr<Sink>> _sinks;
+ std::vector<std::unique_ptr<neurun::exec::Source>> _sources;
+ std::vector<std::unique_ptr<neurun::exec::Sink>> _sinks;
};
#endif