{
}
+MatrixInputAccessor::MatrixInputAccessor(const float* inputData, const nnfw::rt::Shape& inputShape)
+ : _inputData(inputData)
+ , _inputShape(inputShape)
+{
+}
+
VectorInputAccessor::VectorInputAccessor(const float* inputData, const nnfw::rt::Shape& inputShape)
: _inputData(inputData)
, _inputShape(inputShape)
return true;
}
+bool MatrixInputAccessor::access_tensor(arm_compute::ITensor &tensor)
+{
+ arm_compute::Window window;
+ window.use_tensor_dimensions(tensor.info()->tensor_shape());
+
+ assert(tensor.info()->tensor_shape().num_dimensions() <= 2);
+
+ execute_window_loop(window, [&](const arm_compute::Coordinates& id)
+ {
+ const auto row = id[1];
+ const auto col = id[0];
+ const auto W = tensor.info()->tensor_shape().x();
+
+ const auto offset = row * W + col;
+
+ *reinterpret_cast<float *>(tensor.ptr_to_element(id)) =
+ *(_inputData + offset);
+ });
+ return true;
+}
+
bool VectorInputAccessor::access_tensor(arm_compute::ITensor &tensor)
{
arm_compute::Window window;
const nnfw::rt::Shape& _inputShape;
};
+class MatrixInputAccessor : public arm_compute::graph::ITensorAccessor
+{
+public:
+ MatrixInputAccessor(const float* inputData, const nnfw::rt::Shape& inputShape);
+ MatrixInputAccessor(MatrixInputAccessor&&) = default;
+
+ // Inherited methods overriden:
+ bool access_tensor(arm_compute::ITensor& tensor) override;
+
+private:
+ const float* _inputData;
+ const nnfw::rt::Shape& _inputShape;
+};
+
class VectorInputAccessor : public arm_compute::graph::ITensorAccessor
{
public:
}
else if (inputShape.dimensions.size() == 2)
{
- // Softmax comes with 1xN matrix and this is translated to N vector in arm_compute::TensorShape
- TensorAccess<VectorInputAccessor>(input, inputData, inputShape);
+ TensorAccess<MatrixInputAccessor>(input, inputData, inputShape);
softmax_f->run();
arm_compute::CLScheduler::get().sync();
- TensorAccess<VectorOutputAccessor>(output, outputData, outputShape);
+ TensorAccess<MatrixOutputAccessor>(output, outputData, outputShape);
}
else
{