add_library(nnapi_pure_arm_compute SHARED ${SOURCES})
target_include_directories(nnapi_pure_arm_compute PUBLIC ${NNAPI_INCLUDE_DIR})
+ target_include_directories(nnapi_pure_arm_compute PUBLIC src)
target_link_libraries(nnapi_pure_arm_compute arm_compute)
set_target_properties(nnapi_pure_arm_compute PROPERTIES OUTPUT_NAME neuralnetworks)
endif(TARGET arm_compute)
--- /dev/null
+#include "internal/Model.h"
+
+namespace internal
+{
+namespace tflite
+{
+namespace operand
+{
+
+Shape::Shape(uint32_t rank)
+{
+ _dims.resize(rank);
+}
+
+} // namespace operand
+} // namespace tflite
+} // namespace internal
+
+namespace internal
+{
+namespace tflite
+{
+namespace operand
+{
+
+Index Set::append(const Shape &shape)
+{
+ int32_t index = _objects.size();
+
+ _objects.emplace_back(new Object{shape});
+
+ return Index{index};
+}
+
+const Object &Set::at(const Index &index) const
+{
+ return *(_objects.at(index.asInt()));
+}
+
+} // namespace operand
+} // namespace tflite
+} // namespace internal
--- /dev/null
+#ifndef __INTERNAL_MODEL_H__
+#define __INTERNAL_MODEL_H__
+
+namespace internal
+{
+namespace tflite
+{
+namespace operand
+{
+
+class Index
+{
+public:
+ explicit Index(int value) : _value{value}
+ {
+ // DO NOTHING
+ }
+
+public:
+ int asInt(void) const { return _value; }
+
+private:
+ int _value;
+};
+
+} // namespace operand
+} // namespace tflite
+} // namespace internal
+
+#include <vector>
+#include <cstdint>
+
+namespace internal
+{
+namespace tflite
+{
+namespace operand
+{
+
+struct Shape
+{
+public:
+ Shape(uint32_t rank);
+
+public:
+ uint32_t rank(void) const { return _dims.size(); }
+
+public:
+ uint32_t dim(uint32_t n) const { return _dims.at(n); }
+ uint32_t &dim(uint32_t n) { return _dims.at(n); }
+
+private:
+ std::vector<uint32_t> _dims;
+};
+
+} // namespace operand
+} // namespace tflite
+} // namespace internal
+
+namespace internal
+{
+namespace tflite
+{
+namespace operand
+{
+
+class Object
+{
+public:
+ explicit Object(const Shape &shape) : _shape{shape}
+ {
+ // DO NOTHING
+ }
+
+public:
+ const Shape &shape(void) const { return _shape; }
+
+private:
+ const Shape _shape;
+};
+
+} // namespace operand
+} // namespace tflite
+} // namespace internal
+
+#include <memory>
+
+namespace internal
+{
+namespace tflite
+{
+namespace operand
+{
+
+class Set
+{
+public:
+ Index append(const Shape &);
+
+public:
+ const Object &at(const Index &) const;
+
+private:
+ std::vector<std::unique_ptr<Object>> _objects;
+};
+
+} // namespace operand
+} // namespace tflite
+} // namespace internal
+
+namespace internal
+{
+namespace tflite
+{
+
+class Model
+{
+public:
+ operand::Set &operands(void) { return _operands; }
+ const operand::Set &operands(void) const { return _operands; }
+
+private:
+ operand::Set _operands;
+};
+
+} // namespace tflite
+} // namespace internal
+
+#endif // __INTERNAL_MODEL_H__
#include <nnapi.h>
+#include <cassert>
+
#include "model.h"
ResultCode
ANeuralNetworksModel_create(ANeuralNetworksModel** model)
{
+ *model = new ANeuralNetworksModel{};
+
return ANEURALNETWORKS_NO_ERROR;
}
ResultCode
ANeuralNetworksModel_free(ANeuralNetworksModel* model)
{
+ delete model;
+
return ANEURALNETWORKS_NO_ERROR;
}
ResultCode
ANeuralNetworksModel_addOperand(ANeuralNetworksModel* model, const ANeuralNetworksOperandType *type)
{
+ // ASSUME A tensor operand always consists of fp32 values
+ // NOTE We do not care about scala operands.
+ assert(!(type->dimensionCount > 1) || (type->type == 3 /* ANEURALNETWORKS_TENSOR_FLOAT32 */));
+
+ internal::tflite::operand::Shape shape(type->dimensionCount);
+
+ for (uint32_t axis = 0; axis < type->dimensionCount; ++axis)
+ {
+ shape.dim(axis) = type->dimensions[axis];
+ }
+
+ model->deref().operands().append(shape);
+
+ // NOTE We do NOT allocate CLTensor here as we do not how to interpret this one.
+ // TensorFlow Lite may interpret a rank-4 tensor either as a feature map (with batch) or
+ // a convolution kernel.
+
return ANEURALNETWORKS_NO_ERROR;
}
{
return ANEURALNETWORKS_NO_ERROR;
}
+
+//
+// ANeuralNetworksModel
+//
+ANeuralNetworksModel::ANeuralNetworksModel() : _model{new internal::tflite::Model}
+{
+ // DO NOTHING
+}
#ifndef __MODEL_H__
#define __MODEL_H__
+#include "internal/Model.h"
+
struct ANeuralNetworksModel
{
+public:
+ ANeuralNetworksModel();
+
+public:
+ internal::tflite::Model &deref(void) { return *_model; }
+
+public:
+ void release(std::shared_ptr<const internal::tflite::Model> &model)
+ {
+ model = _model;
+ }
+
+private:
+ std::shared_ptr<internal::tflite::Model> _model;
};
#endif // __MODEL_H__