--- /dev/null
+#include <NeuralNetworks.h>
+#include <sys/mman.h>
+
+#include "memory.h"
+
+int ANeuralNetworksMemory_createFromFd(size_t size, int protect, int fd, size_t offset,
+ ANeuralNetworksMemory **memory)
+{
+ *memory = new ANeuralNetworksMemory{size, protect, fd, offset};
+
+ return ANEURALNETWORKS_NO_ERROR;
+}
+
+void ANeuralNetworksMemory_free(ANeuralNetworksMemory *memory) { delete memory; }
--- /dev/null
+#include <NeuralNetworks.h>
+#include <NeuralNetworksEx.h>
+
+#include <cassert>
+#include <stdexcept>
+
+#include "model.h"
+#include "memory.h"
+
+int ANeuralNetworksModel_create(ANeuralNetworksModel **model)
+{
+ *model = new ANeuralNetworksModel{};
+
+ return ANEURALNETWORKS_NO_ERROR;
+}
+
+void ANeuralNetworksModel_free(ANeuralNetworksModel *model) { delete model; }
+
+int ANeuralNetworksModel_addOperand(ANeuralNetworksModel *model,
+ const ANeuralNetworksOperandType *type)
+{
+ // ASSUME A tensor operand always consists of fp32 values
+ // NOTE We do not care about scala operands.
+ assert(!(type->dimensionCount > 1) || (type->type == 3 /* ANEURALNETWORKS_TENSOR_FLOAT32 */));
+
+ internal::tflite::operand::Shape shape(type->dimensionCount);
+
+ for (uint32_t axis = 0; axis < type->dimensionCount; ++axis)
+ {
+ shape.dim(axis) = type->dimensions[axis];
+ }
+
+ shape.set(type->type, type->scale, type->zeroPoint);
+
+ model->deref().operands().append(shape);
+
+ // NOTE We do NOT allocate CLTensor here as we do not how to interpret this one.
+ // TensorFlow Lite may interpret a rank-4 tensor either as a feature map (with batch) or
+ // a convolution kernel.
+
+ return ANEURALNETWORKS_NO_ERROR;
+}
+
+int ANeuralNetworksModel_setOperandValue(ANeuralNetworksModel *model, int32_t index,
+ const void *buffer, size_t length)
+{
+ const internal::tflite::operand::Index ind{index};
+ auto &obj = model->deref().operands().at(ind);
+
+ using internal::tflite::operand::CachedData;
+
+ obj.data<CachedData>(reinterpret_cast<const uint8_t *>(buffer), length);
+
+ return ANEURALNETWORKS_NO_ERROR;
+}
+
+int ANeuralNetworksModel_setOperandValueFromMemory(ANeuralNetworksModel *model, int32_t index,
+ const ANeuralNetworksMemory *memory,
+ size_t offset, size_t length)
+{
+ const internal::tflite::operand::Index ind{index};
+ auto &obj = model->deref().operands().at(ind);
+
+ using internal::tflite::operand::ExternalData;
+
+ obj.data<ExternalData>(reinterpret_cast<const uint8_t *>(memory->base() + offset), length);
+
+ return ANEURALNETWORKS_NO_ERROR;
+}
+
+int ANeuralNetworksModel_addOperation(ANeuralNetworksModel *model,
+ ANeuralNetworksOperationType type, uint32_t inputCount,
+ const uint32_t *inputs, uint32_t outputCount,
+ const uint32_t *outputs)
+{
+ switch (type)
+ {
+ case ANEURALNETWORKS_CONV_2D:
+ {
+ // inputCount is either 7 or 10 acccording to NN API specification.
+ // - Padding is implicit when inputCount is 7
+ // - Padding is explicit when inputCount is 10
+ assert(inputCount == 7 || inputCount == 10);
+ assert(outputCount == 1);
+
+ if (inputCount == 7)
+ {
+ using internal::tflite::op::Conv2D::implicit::Param;
+ using internal::tflite::op::Conv2D::implicit::Node;
+
+ // Add 'operations'
+ auto &operations = model->deref().operations();
+
+ operations.emplace_back<Node>(Param{inputCount, inputs, outputCount, outputs});
+ }
+ else
+ {
+ throw std::runtime_error{"Explicit padding in Conv2D is not supported, yet"};
+ }
+
+ break;
+ }
+ case ANEURALNETWORKS_MAX_POOL_2D:
+ {
+ // inputCount is either 7 or 10 acccording to NN API specification.
+ // - Padding is implicit when inputCount is 7
+ // - Padding is explicit when inputCount is 10
+ assert(inputCount == 7 || inputCount == 10);
+ assert(outputCount == 1);
+
+ if (inputCount == 7)
+ {
+ using internal::tflite::op::MaxPool2D::implicit::Param;
+ using internal::tflite::op::MaxPool2D::implicit::Node;
+
+ // Add 'operations'
+ auto &operations = model->deref().operations();
+
+ operations.emplace_back<Node>(Param{inputCount, inputs, outputCount, outputs});
+ }
+ else
+ {
+ throw std::runtime_error{"Explicit padding in MaxPool2D is not supported, yet"};
+ }
+
+ break;
+ }
+ case ANEURALNETWORKS_AVERAGE_POOL_2D:
+ {
+ // inputCount is either 7 or 10 acccording to NN API specification.
+ // - Padding is implicit when inputCount is 7
+ // - Padding is explicit when inputCount is 10
+ assert(inputCount == 7 || inputCount == 10);
+ assert(outputCount == 1);
+
+ if (inputCount == 7)
+ {
+ using internal::tflite::op::AvgPool2D::implicit::Param;
+ using internal::tflite::op::AvgPool2D::implicit::Node;
+
+ // Add 'operations'
+ auto &operations = model->deref().operations();
+
+ operations.emplace_back<Node>(Param{inputCount, inputs, outputCount, outputs});
+ }
+ else
+ {
+ throw std::runtime_error{"Explicit padding in AvgPool2D is not supported, yet"};
+ }
+
+ break;
+ }
+ case ANEURALNETWORKS_CONCATENATION:
+ {
+ using internal::tflite::op::Concat::Param;
+ using internal::tflite::op::Concat::Node;
+
+ // Add 'operations'
+ auto &operations = model->deref().operations();
+
+ operations.emplace_back<Node>(Param{inputCount, inputs, outputCount, outputs});
+
+ break;
+ }
+ case ANEURALNETWORKS_RESHAPE:
+ {
+ using internal::tflite::op::Reshape::Param;
+ using internal::tflite::op::Reshape::Node;
+
+ // Add 'operations'
+ auto &operations = model->deref().operations();
+
+ operations.emplace_back<Node>(Param{inputCount, inputs, outputCount, outputs});
+
+ break;
+ }
+ case ANEURALNETWORKS_FULLY_CONNECTED:
+ {
+ using internal::tflite::op::FullyConnected::Param;
+ using internal::tflite::op::FullyConnected::Node;
+
+ // Add 'operations'
+ auto &operations = model->deref().operations();
+
+ operations.emplace_back<Node>(Param{inputCount, inputs, outputCount, outputs});
+
+ break;
+ }
+ case ANEURALNETWORKS_SOFTMAX:
+ {
+ using internal::tflite::op::Softmax::Param;
+ using internal::tflite::op::Softmax::Node;
+
+ // Add 'operations'
+ auto &operations = model->deref().operations();
+
+ operations.emplace_back<Node>(Param{inputCount, inputs, outputCount, outputs});
+
+ break;
+ }
+ default:
+ throw std::runtime_error{"Not supported operation"};
+ };
+
+ return ANEURALNETWORKS_NO_ERROR;
+}
+
+int ANeuralNetworksModel_addOperationEx(ANeuralNetworksModel *model,
+ ANeuralNetworksOperationTypeEx type, uint32_t inputCount,
+ const uint32_t *inputs, uint32_t outputCount,
+ const uint32_t *outputs)
+{
+ switch (type)
+ {
+ default:
+ throw std::runtime_error{"Not supported operation"};
+ }
+}
+
+int ANeuralNetworksModel_identifyInputsAndOutputs(ANeuralNetworksModel *model, uint32_t inputCount,
+ const uint32_t *inputs, uint32_t outputCount,
+ const uint32_t *outputs)
+{
+ // NOTE ::internal::tflite::operand::Index uses int as its underlying type as various NNAPI
+ // functions such as ANeuralNetworksModel_setOperandValue use int to represent operand index
+ //
+ // ANeuralNetworksModel_identifyInputsAndOutputs, however, uses uint32_t to represent operand
+ // index.
+ //
+ // Below, static_cast<int>(...) is introduced to eliminate compiler warning.
+ for (uint32_t n = 0; n < inputCount; ++n)
+ {
+ const ::internal::tflite::operand::Index ind{static_cast<int>(inputs[n])};
+ model->deref().inputs.emplace_back(ind);
+ }
+
+ for (uint32_t n = 0; n < outputCount; ++n)
+ {
+ const ::internal::tflite::operand::Index ind{static_cast<int>(outputs[n])};
+ model->deref().outputs.emplace_back(ind);
+ }
+
+ return ANEURALNETWORKS_NO_ERROR;
+}
+
+int ANeuralNetworksModel_finish(ANeuralNetworksModel *model) { return ANEURALNETWORKS_NO_ERROR; }
#include "memory.h"
-int ANeuralNetworksMemory_createFromFd(size_t size, int protect, int fd, size_t offset,
- ANeuralNetworksMemory **memory)
-{
- *memory = new ANeuralNetworksMemory{size, protect, fd, offset};
-
- return ANEURALNETWORKS_NO_ERROR;
-}
-
-void ANeuralNetworksMemory_free(ANeuralNetworksMemory *memory) { delete memory; }
-
//
// ANeuralNetworksMemory
//
#include <NeuralNetworks.h>
#include <NeuralNetworksEx.h>
-#include <cassert>
-#include <stdexcept>
-
#include "model.h"
-#include "memory.h"
-
-int ANeuralNetworksModel_create(ANeuralNetworksModel **model)
-{
- *model = new ANeuralNetworksModel{};
-
- return ANEURALNETWORKS_NO_ERROR;
-}
-
-void ANeuralNetworksModel_free(ANeuralNetworksModel *model) { delete model; }
-
-int ANeuralNetworksModel_addOperand(ANeuralNetworksModel *model,
- const ANeuralNetworksOperandType *type)
-{
- // ASSUME A tensor operand always consists of fp32 values
- // NOTE We do not care about scala operands.
- assert(!(type->dimensionCount > 1) || (type->type == 3 /* ANEURALNETWORKS_TENSOR_FLOAT32 */));
-
- internal::tflite::operand::Shape shape(type->dimensionCount);
-
- for (uint32_t axis = 0; axis < type->dimensionCount; ++axis)
- {
- shape.dim(axis) = type->dimensions[axis];
- }
-
- shape.set(type->type, type->scale, type->zeroPoint);
-
- model->deref().operands().append(shape);
-
- // NOTE We do NOT allocate CLTensor here as we do not how to interpret this one.
- // TensorFlow Lite may interpret a rank-4 tensor either as a feature map (with batch) or
- // a convolution kernel.
-
- return ANEURALNETWORKS_NO_ERROR;
-}
-
-int ANeuralNetworksModel_setOperandValue(ANeuralNetworksModel *model, int32_t index,
- const void *buffer, size_t length)
-{
- const internal::tflite::operand::Index ind{index};
- auto &obj = model->deref().operands().at(ind);
-
- using internal::tflite::operand::CachedData;
-
- obj.data<CachedData>(reinterpret_cast<const uint8_t *>(buffer), length);
-
- return ANEURALNETWORKS_NO_ERROR;
-}
-
-int ANeuralNetworksModel_setOperandValueFromMemory(ANeuralNetworksModel *model, int32_t index,
- const ANeuralNetworksMemory *memory,
- size_t offset, size_t length)
-{
- const internal::tflite::operand::Index ind{index};
- auto &obj = model->deref().operands().at(ind);
-
- using internal::tflite::operand::ExternalData;
-
- obj.data<ExternalData>(reinterpret_cast<const uint8_t *>(memory->base() + offset), length);
-
- return ANEURALNETWORKS_NO_ERROR;
-}
-
-int ANeuralNetworksModel_addOperation(ANeuralNetworksModel *model,
- ANeuralNetworksOperationType type, uint32_t inputCount,
- const uint32_t *inputs, uint32_t outputCount,
- const uint32_t *outputs)
-{
- switch (type)
- {
- case ANEURALNETWORKS_CONV_2D:
- {
- // inputCount is either 7 or 10 acccording to NN API specification.
- // - Padding is implicit when inputCount is 7
- // - Padding is explicit when inputCount is 10
- assert(inputCount == 7 || inputCount == 10);
- assert(outputCount == 1);
-
- if (inputCount == 7)
- {
- using internal::tflite::op::Conv2D::implicit::Param;
- using internal::tflite::op::Conv2D::implicit::Node;
-
- // Add 'operations'
- auto &operations = model->deref().operations();
-
- operations.emplace_back<Node>(Param{inputCount, inputs, outputCount, outputs});
- }
- else
- {
- throw std::runtime_error{"Explicit padding in Conv2D is not supported, yet"};
- }
-
- break;
- }
- case ANEURALNETWORKS_MAX_POOL_2D:
- {
- // inputCount is either 7 or 10 acccording to NN API specification.
- // - Padding is implicit when inputCount is 7
- // - Padding is explicit when inputCount is 10
- assert(inputCount == 7 || inputCount == 10);
- assert(outputCount == 1);
-
- if (inputCount == 7)
- {
- using internal::tflite::op::MaxPool2D::implicit::Param;
- using internal::tflite::op::MaxPool2D::implicit::Node;
-
- // Add 'operations'
- auto &operations = model->deref().operations();
-
- operations.emplace_back<Node>(Param{inputCount, inputs, outputCount, outputs});
- }
- else
- {
- throw std::runtime_error{"Explicit padding in MaxPool2D is not supported, yet"};
- }
-
- break;
- }
- case ANEURALNETWORKS_AVERAGE_POOL_2D:
- {
- // inputCount is either 7 or 10 acccording to NN API specification.
- // - Padding is implicit when inputCount is 7
- // - Padding is explicit when inputCount is 10
- assert(inputCount == 7 || inputCount == 10);
- assert(outputCount == 1);
-
- if (inputCount == 7)
- {
- using internal::tflite::op::AvgPool2D::implicit::Param;
- using internal::tflite::op::AvgPool2D::implicit::Node;
-
- // Add 'operations'
- auto &operations = model->deref().operations();
-
- operations.emplace_back<Node>(Param{inputCount, inputs, outputCount, outputs});
- }
- else
- {
- throw std::runtime_error{"Explicit padding in AvgPool2D is not supported, yet"};
- }
-
- break;
- }
- case ANEURALNETWORKS_CONCATENATION:
- {
- using internal::tflite::op::Concat::Param;
- using internal::tflite::op::Concat::Node;
-
- // Add 'operations'
- auto &operations = model->deref().operations();
-
- operations.emplace_back<Node>(Param{inputCount, inputs, outputCount, outputs});
-
- break;
- }
- case ANEURALNETWORKS_RESHAPE:
- {
- using internal::tflite::op::Reshape::Param;
- using internal::tflite::op::Reshape::Node;
-
- // Add 'operations'
- auto &operations = model->deref().operations();
-
- operations.emplace_back<Node>(Param{inputCount, inputs, outputCount, outputs});
-
- break;
- }
- case ANEURALNETWORKS_FULLY_CONNECTED:
- {
- using internal::tflite::op::FullyConnected::Param;
- using internal::tflite::op::FullyConnected::Node;
-
- // Add 'operations'
- auto &operations = model->deref().operations();
-
- operations.emplace_back<Node>(Param{inputCount, inputs, outputCount, outputs});
-
- break;
- }
- case ANEURALNETWORKS_SOFTMAX:
- {
- using internal::tflite::op::Softmax::Param;
- using internal::tflite::op::Softmax::Node;
-
- // Add 'operations'
- auto &operations = model->deref().operations();
-
- operations.emplace_back<Node>(Param{inputCount, inputs, outputCount, outputs});
-
- break;
- }
- default:
- throw std::runtime_error{"Not supported operation"};
- };
-
- return ANEURALNETWORKS_NO_ERROR;
-}
-
-int ANeuralNetworksModel_addOperationEx(ANeuralNetworksModel *model,
- ANeuralNetworksOperationTypeEx type, uint32_t inputCount,
- const uint32_t *inputs, uint32_t outputCount,
- const uint32_t *outputs)
-{
- switch (type)
- {
- default:
- throw std::runtime_error{"Not supported operation"};
- }
-}
-
-int ANeuralNetworksModel_identifyInputsAndOutputs(ANeuralNetworksModel *model, uint32_t inputCount,
- const uint32_t *inputs, uint32_t outputCount,
- const uint32_t *outputs)
-{
- // NOTE ::internal::tflite::operand::Index uses int as its underlying type as various NNAPI
- // functions such as ANeuralNetworksModel_setOperandValue use int to represent operand index
- //
- // ANeuralNetworksModel_identifyInputsAndOutputs, however, uses uint32_t to represent operand
- // index.
- //
- // Below, static_cast<int>(...) is introduced to eliminate compiler warning.
- for (uint32_t n = 0; n < inputCount; ++n)
- {
- const ::internal::tflite::operand::Index ind{static_cast<int>(inputs[n])};
- model->deref().inputs.emplace_back(ind);
- }
-
- for (uint32_t n = 0; n < outputCount; ++n)
- {
- const ::internal::tflite::operand::Index ind{static_cast<int>(outputs[n])};
- model->deref().outputs.emplace_back(ind);
- }
-
- return ANEURALNETWORKS_NO_ERROR;
-}
-
-int ANeuralNetworksModel_finish(ANeuralNetworksModel *model) { return ANEURALNETWORKS_NO_ERROR; }
//
// ANeuralNetworksModel