#include <arm_compute/runtime/CL/functions/CLFullyConnectedLayer.h>
#include <arm_compute/runtime/CL/functions/CLSoftmaxLayer.h>
+#include "internal/arm_compute/Cast.h"
#include "internal/arm_compute/kernel/View.h"
#include "internal/nnapi/kernel/Reader.h"
#include "internal/layers/GenericReshapeLayer.h"
// output tensor shape
// TODO Check consistency of ouput shape
- // 'Feature Map' to 'Vector' reshape
- assert(_ctx.at(input_index).shape().rank() == 4);
- assert(_ctx.at(output_index).shape().rank() == 2);
- assert(_ctx.at(output_index).shape().dim(0) == 1);
-
- const auto ifm_shape = _ctx.at(input_index).shape().asFeature();
- const auto out_size = _ctx.at(output_index).shape().dim(1);
+ // TODO Re-enable this assert
+ // assert((ifm_shape.C * ifm_shape.H * ifm_shape.W) == out_size);
- assert((ifm_shape.C * ifm_shape.H * ifm_shape.W) == out_size);
-
- _builder.addShapeConstr(output_index, asTensorInfo(out_size));
- _builder.addShapeConstr(input_index, asTensorInfo(ifm_shape));
+ _builder.addShapeConstr(output_index, asTensorInfo(_ctx.at(output_index).shape()));
+ _builder.addShapeConstr(input_index, asTensorInfo(_ctx.at(input_index).shape()));
struct Param
{
assert(_ctx.at(output_index).shape().rank() == _ctx.at(input_index).shape().rank());
- // TODO Support 'feature map' input
- assert(_ctx.at(input_index).shape().rank() == 2);
- assert(_ctx.at(input_index).shape().dim(0) == 1);
- assert(_ctx.at(input_index).shape().dim(0) == _ctx.at(output_index).shape().dim(0));
- assert(_ctx.at(input_index).shape().dim(1) == _ctx.at(output_index).shape().dim(1));
-
- const uint32_t len = _ctx.at(output_index).shape().dim(1);
-
- _builder.addShapeConstr(output_index, asTensorInfo(len));
- _builder.addShapeConstr(input_index, asTensorInfo(len));
+ _builder.addShapeConstr(output_index, asTensorInfo(_ctx.at(output_index).shape()));
+ _builder.addShapeConstr(input_index, asTensorInfo(_ctx.at(input_index).shape()));
struct Param
{
--- /dev/null
+#ifndef __SWIZZLE_H__
+#define __SWIZZLE_H__
+
+class ARMComputeAxis
+{
+public:
+ ARMComputeAxis() = default;
+
+public:
+ explicit ARMComputeAxis(uint32_t value) : _value{value}
+ {
+ // DO NOTHING
+ }
+
+public:
+ uint32_t value(void) const { return _value; }
+
+private:
+ uint32_t _value;
+};
+
+// Convert T/F Lite / NNAPI axis (based on ...NHWC) to ARMCompute axis (WCHN...)
+inline ARMComputeAxis ToARMComputeAxis(uint32_t rank, uint32_t axis)
+{
+ const ARMComputeAxis reversed{(rank - axis) - 1};
+
+ if (rank < 4)
+ {
+ return reversed;
+ }
+
+ // DEPTH
+ if (0 == reversed.value())
+ {
+ return ARMComputeAxis{2};
+ }
+ // WIDTH
+ if (1 == reversed.value())
+ {
+ return ARMComputeAxis{0};
+ }
+ // HEIGHT
+ if (2 == reversed.value())
+ {
+ return ARMComputeAxis{1};
+ }
+
+ // ELSE
+ return reversed;
+}
+
+#endif // __SWIZZLE_H__
--- /dev/null
+#ifndef __ARM_COMPUTE_CAST_H__
+
+#include <arm_compute/core/TensorShape.h>
+
+#include "internal/Swizzle.h"
+#include "internal/Model.h"
+
+// TODO Move asTensorShape and asTensorInfo in compilation.cc into this file
+inline ::arm_compute::TensorShape asTensorShape(const internal::tflite::operand::Shape &shape)
+{
+ const uint32_t rank = shape.rank();
+
+ ::arm_compute::TensorShape res{};
+
+ res.set_num_dimensions(rank);
+
+ for (uint32_t axis = 0; axis < rank; ++axis)
+ {
+ res[ToARMComputeAxis(rank, axis).value()] = shape.dim(axis);
+ }
+
+ return res;
+}
+
+::arm_compute::TensorInfo asTensorInfo(const internal::tflite::operand::Shape &shape)
+{
+ return ::arm_compute::TensorInfo(asTensorShape(shape), 1, ::arm_compute::DataType::F32);
+}
+
+#endif // __ARM_COMPUTE_CAST_H__