# kernel library
set(KERNELACL_SRCS "src/Init_acl.cpp"
"src/IO_accessor.cpp"
+ "src/shape.cpp"
"src/cl/Conv2D_acl.cpp"
"src/cl/FullyConnected.cpp"
"src/cl/Reshape.cpp"
# kernel test executable
set(KERNELACL_TEST_SRCS "src/cl/Conv2D_acl.test.cpp"
"src/util.cpp"
+ "src/shape.cpp"
"src/gtest_env.cpp"
"src/cl/FullyConnected.test.cpp"
"src/cl/Reshape.test.cpp"
#include <arm_compute/core/TensorShape.h>
#include <arm_compute/core/TensorInfo.h>
#include "../IO_accessor.h"
+#include "../util.h"
+#include "../shape.h"
#include <cassert>
-namespace android {
-namespace nn {
-
-// TODO remove from this source and use it from runtime
-uint32_t getSizeOfDimension(const Shape& shape, uint32_t dimensionIdx) {
- if (dimensionIdx >= shape.dimensions.size()) {
- // TODO, log the error
- return 0;
- }
- return shape.dimensions[dimensionIdx];
-}
-
-} // namespace nn
-} // namespace android
-
namespace nnfw {
namespace kernel {
namespace acl {
-arm_compute::TensorShape fromVectorNNShape(const android::nn::Shape& shape)
-{
- assert(shape.dimensions.size() == 1);
-
- const uint32_t len = android::nn::getSizeOfDimension(shape, 0);
-
- return arm_compute::TensorShape(len);
-}
-
-arm_compute::TensorShape fromNNShape(const android::nn::Shape& shape)
-{
- // NNAPI assumes the following ordering:
- //
- // dim(0) -> N
- // dim(1) -> H
- // dim(2) -> W
- // dim(3) -> C
- //
- uint32_t c = android::nn::getSizeOfDimension(shape, 3);
- uint32_t h = android::nn::getSizeOfDimension(shape, 1);
- uint32_t w = android::nn::getSizeOfDimension(shape, 2);
- uint32_t n = android::nn::getSizeOfDimension(shape, 0);
-
- return arm_compute::TensorShape(w, h, c, n);
-}
-
class CLUniqueTensor
{
public:
int32_t activation,
float* outputData, const android::nn::Shape& outputShape)
{
- arm_compute::TensorShape input_shape = fromNNShape(inputShape);
- arm_compute::TensorShape filter_shape = fromNNShape(filterShape);
- arm_compute::TensorShape bias_shape = fromVectorNNShape(biasShape);
- arm_compute::TensorShape output_shape = fromNNShape(outputShape);
+ arm_compute::TensorShape input_shape = util::fromNNShape(inputShape);
+ arm_compute::TensorShape filter_shape = util::fromNNShape(filterShape);
+ arm_compute::TensorShape bias_shape = util::fromVectorNNShape(biasShape);
+ arm_compute::TensorShape output_shape = util::fromNNShape(outputShape);
arm_compute::PadStrideInfo conv_info = arm_compute::PadStrideInfo(stride_width, stride_height,
padding_left, padding_right,
padding_top, padding_bottom,
// TODO: fix include path in CMakeFiles
#include "../IO_accessor.h"
-
-namespace android {
-namespace nn {
-
-// TODO remove from this source and use it from runtime
-uint32_t getSizeOfDimension(const Shape& shape, uint32_t dimensionIdx);
-
-} // namespace nn
-} // namespace android
+#include "../shape.h"
namespace nnfw {
namespace kernel {
namespace acl {
-arm_compute::TensorShape fromNNShape(const android::nn::Shape& shape);
-
bool fullyConnectedFloat32(const float* inputData, const android::nn::Shape& inputShape,
const float* weightsData, const android::nn::Shape& weightsShape,
const float* biasData, const android::nn::Shape& biasShape,
int32_t activation,
float* outputData, const android::nn::Shape& outputShape) {
- auto input_shape = fromNNShape(inputShape);
- auto filter_shape = fromNNShape(weightsShape);
- auto bias_shape = fromNNShape(biasShape);
- auto output_shape = fromNNShape(outputShape);
+ auto input_shape = util::fromNNShape(inputShape);
+ auto filter_shape = util::fromNNShape(weightsShape);
+ auto bias_shape = util::fromNNShape(biasShape);
+ auto output_shape = util::fromNNShape(outputShape);
arm_compute::CLTensor input, output, bias, filter;
// TODO: fix include path in CMakeFiles
#include "../IO_accessor.h"
-
-namespace android {
-namespace nn {
-
-// TODO remove from this source and use it from runtime
-uint32_t getSizeOfDimension(const Shape& shape, uint32_t dimensionIdx);
-
-} // namespace nn
-} // namespace android
+#include "../shape.h"
namespace nnfw {
namespace kernel {
namespace acl {
-arm_compute::TensorShape fromNNShape(const android::nn::Shape& shape);
-
bool reshapeGeneric(const void* inputData, const android::nn::Shape& inputShape,
void* outputData, const android::nn::Shape& outputShape) {
- auto input_shape = fromNNShape(inputShape);
- auto output_shape = fromNNShape(outputShape);
+ auto input_shape = util::fromNNShape(inputShape);
+ auto output_shape = util::fromNNShape(outputShape);
arm_compute::CLTensor input, output;
--- /dev/null
+#include <cassert>
+
+#include "shape.h"
+
+namespace android {
+namespace nn {
+
+// TODO remove from this source and use it from runtime
+uint32_t getSizeOfDimension(const Shape& shape, uint32_t dimensionIdx) {
+ if (dimensionIdx >= shape.dimensions.size()) {
+ // TODO, log the error
+ return 0;
+ }
+ return shape.dimensions[dimensionIdx];
+}
+
+} // namespace nn
+} // namespace android
+
+namespace nnfw {
+namespace kernel {
+namespace acl {
+namespace util {
+
+arm_compute::TensorShape fromVectorNNShape(const android::nn::Shape& shape)
+{
+ assert(shape.dimensions.size() == 1);
+
+ const uint32_t len = android::nn::getSizeOfDimension(shape, 0);
+
+ return arm_compute::TensorShape(len);
+}
+
+arm_compute::TensorShape fromNNShape(const android::nn::Shape& shape)
+{
+ // NNAPI assumes the following ordering:
+ //
+ // dim(0) -> N
+ // dim(1) -> H
+ // dim(2) -> W
+ // dim(3) -> C
+ //
+ uint32_t c = android::nn::getSizeOfDimension(shape, 3);
+ uint32_t h = android::nn::getSizeOfDimension(shape, 1);
+ uint32_t w = android::nn::getSizeOfDimension(shape, 2);
+ uint32_t n = android::nn::getSizeOfDimension(shape, 0);
+
+ return arm_compute::TensorShape(w, h, c, n);
+}
+
+} // namespace util
+} // namespace acl
+} // namespace kernel
+} // namespace nnfw
--- /dev/null
+#include <OperationsUtils.h>
+#include <arm_compute/core/TensorShape.h>
+#include <arm_compute/core/TensorInfo.h>
+
+namespace android {
+namespace nn {
+
+// TODO remove from this source and use it from runtime
+uint32_t getSizeOfDimension(const Shape& shape, uint32_t dimensionIdx);
+
+} // namespace nn
+} // namespace android
+
+namespace nnfw {
+namespace kernel {
+namespace acl {
+namespace util {
+
+arm_compute::TensorShape fromVectorNNShape(const android::nn::Shape& shape);
+arm_compute::TensorShape fromNNShape(const android::nn::Shape& shape);
+
+} // namespace util
+} // namespace acl
+} // namespace kernel
+} // namespace nnfw