--- /dev/null
+#ifndef __NNFW_SUPPORT_TFLITE_TENSOR_VIEW_H__
+#define __NNFW_SUPPORT_TFLITE_TENSOR_VIEW_H__
+
+#include "tensorflow/contrib/lite/interpreter.h"
+
+#include "util/tensor/Shape.h"
+#include "util/tensor/Index.h"
+#include "util/tensor/Reader.h"
+
+namespace nnfw
+{
+namespace support
+{
+namespace tflite
+{
+
+template<typename T> class TensorView;
+
+template<> class TensorView<float> final : public nnfw::util::tensor::Reader<float>
+{
+public:
+ TensorView(const nnfw::util::tensor::Shape &shape, float *base);
+
+public:
+ const nnfw::util::tensor::Shape &shape(void) const { return _shape; }
+
+public:
+ float at(const nnfw::util::tensor::Index &index) const override;
+
+private:
+ nnfw::util::tensor::Shape _shape;
+
+public:
+ float *_base;
+ std::vector<uint32_t> _stride;
+
+public:
+ // TODO Introduce Operand ID class
+ static TensorView<float> make(::tflite::Interpreter &interp, int operand_id);
+};
+
+} // namespace tflite
+} // namespace support
+} // namespace nnfw
+
+#endif // __NNFW_SUPPORT_TFLITE_TENSOR_VIEW_H__
--- /dev/null
+#ifndef __NNFW_UTIL_TENSOR_READER_H__
+#define __NNFW_UTIL_TENSOR_READER_H__
+
+#include "util/tensor/Index.h"
+
+namespace nnfw
+{
+namespace util
+{
+namespace tensor
+{
+
+template <typename T> struct Reader
+{
+ virtual ~Reader() = default;
+
+ virtual T at(const Index &index) const = 0;
+};
+
+} // namespace tensor
+} // namespace util
+} // namespace nnfw
+
+#endif // __NNFW_UTIL_TENSOR_READER_H__
file(GLOB_RECURSE SOURCES "src/*.cpp")
+file(GLOB_RECURSE TESTS "src/*.test.cpp")
+list(REMOVE_ITEM SOURCES ${TESTS})
add_library(nnfw_support_tflite ${SOURCES})
target_include_directories(nnfw_support_tflite PUBLIC ${CMAKE_SOURCE_DIR}/include)
target_link_libraries(nnfw_support_tflite nnfw_util tensorflow_lite)
+
+add_executable(nnfw_support_tflite_test_TensorView src/TensorView.test.cpp)
+target_link_libraries(nnfw_support_tflite_test_TensorView nnfw_support_tflite)
--- /dev/null
+#include "support/tflite/TensorView.h"
+
+#include <cassert>
+
+namespace nnfw
+{
+namespace support
+{
+namespace tflite
+{
+
+TensorView<float>::TensorView(const nnfw::util::tensor::Shape &shape, float *base) : _shape{shape}, _base{base}
+{
+ // Set 'stride'
+ _stride.resize(_shape.rank());
+ _stride.at(_shape.rank() - 1) = 1;
+
+ for (uint32_t axis = _shape.rank() - 1; axis > 0; --axis)
+ {
+ _stride.at(axis - 1) = _stride.at(axis) * _shape.dim(axis);
+ }
+}
+
+float TensorView<float>::at(const nnfw::util::tensor::Index &index) const
+{
+ assert(index.rank() == _shape.rank());
+
+ uint32_t offset = 0;
+
+ for (size_t axis = 0; axis < _shape.rank(); ++axis)
+ {
+ offset += _stride.at(axis) * index.at(axis);
+ }
+
+ return *(_base + offset);
+}
+
+TensorView<float> TensorView<float>::make(::tflite::Interpreter &interp, int tensor_index)
+{
+ auto tensor_ptr = interp.tensor(tensor_index);
+
+ // TODO Enable the following assets
+ // assert(isFloatTensor(tensor_ptr));
+ // assert(isFeatureTensor(tensor_ptr));
+
+ // Set 'shape'
+ nnfw::util::tensor::Shape shape(tensor_ptr->dims->size);
+
+ for (uint32_t axis = 0; axis < shape.rank(); ++axis)
+ {
+ shape.dim(axis) = tensor_ptr->dims->data[axis];
+ }
+
+ return TensorView<float>(shape, interp.typed_tensor<float>(tensor_index));
+}
+
+} // namespace tflite
+} // namespace support
+} // namespace nnfw
--- /dev/null
+#include "support/tflite/TensorView.h"
+
+#include <cassert>
+
+int main(int argc, char **argv)
+{
+ float value[6] = { 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f };
+
+ const nnfw::util::tensor::Shape shape{2, 3};
+ const nnfw::support::tflite::TensorView<float> view{shape, value};
+
+ assert(view.at(nnfw::util::tensor::Index{0, 0}) == 1.0f);
+ assert(view.at(nnfw::util::tensor::Index{0, 1}) == 2.0f);
+ assert(view.at(nnfw::util::tensor::Index{0, 2}) == 3.0f);
+ assert(view.at(nnfw::util::tensor::Index{1, 0}) == 4.0f);
+ assert(view.at(nnfw::util::tensor::Index{1, 1}) == 5.0f);
+ assert(view.at(nnfw::util::tensor::Index{1, 2}) == 6.0f);
+
+ return 0;
+}
list(APPEND SOURCES "src/nnapi_test.cc")
add_executable(nnapi_test ${SOURCES})
-target_link_libraries(nnapi_test tensorflow_lite)
-target_link_libraries(nnapi_test nnfw_util)
+target_link_libraries(nnapi_test nnfw_support_tflite)
install(TARGETS nnapi_test DESTINATION bin)
#include "util/environment.h"
#include "util/fp32.h"
+#include "util/tensor/IndexIterator.h"
+#include "support/tflite/TensorView.h"
#include <iostream>
#include <chrono>
return true;
}
+class TensorIndexFormatter
+{
+public:
+ TensorIndexFormatter(const nnfw::util::tensor::Index &index) : _index(index)
+ {
+ // DO NOTHING
+ }
+
+public:
+ const nnfw::util::tensor::Index &index(void) const { return _index; }
+
+private:
+ const nnfw::util::tensor::Index &_index;
+};
+
+std::ostream &operator<<(std::ostream &os, const TensorIndexFormatter &fmt)
+{
+ const auto rank = fmt.index().rank();
+
+ assert(rank > 0);
+
+ os << fmt.index().at(0);
+
+ if (rank > 1)
+ {
+ for (uint32_t axis = 1; axis < rank; ++axis)
+ {
+ os << ", " << fmt.index().at(axis);
+ }
+ }
+
+ return os;
+}
+
struct TfLiteTensorDiff
{
- uint32_t offset;
+ nnfw::util::tensor::Index index;
float expected;
float obtained;
+
+ TfLiteTensorDiff(const nnfw::util::tensor::Index &i) : index(i)
+ {
+ // DO NOTHING
+ }
};
class TfLiteTensorComparator
}
public:
- std::vector<TfLiteTensorDiff> compare(const TfLiteTensor &expected, const TfLiteTensor &obtained) const;
+ std::vector<TfLiteTensorDiff> compare(const nnfw::support::tflite::TensorView<float> &expected,
+ const nnfw::support::tflite::TensorView<float> &obtained) const;
private:
std::function<bool (float lhs, float rhs)> _compare_fn;
};
-std::vector<TfLiteTensorDiff> TfLiteTensorComparator::compare(const TfLiteTensor &expected, const TfLiteTensor &obtained) const
+std::vector<TfLiteTensorDiff>
+TfLiteTensorComparator::compare(const nnfw::support::tflite::TensorView<float> &expected,
+ const nnfw::support::tflite::TensorView<float> &obtained) const
{
std::vector<TfLiteTensorDiff> res;
- assert(expected.type == kTfLiteFloat32);
- assert(obtained.type == kTfLiteFloat32);
- assert(expected.bytes == obtained.bytes);
-
- const auto count = expected.bytes / sizeof(float);
+ // TODO Compare shape
- for (uint32_t offset = 0; offset < count; ++offset)
+ nnfw::util::tensor::iterate(expected.shape()) << [&] (const nnfw::util::tensor::Index &index)
{
- const auto expected_value = expected.data.f[offset];
- const auto obtained_value = obtained.data.f[offset];
+ const auto expected_value = expected.at(index);
+ const auto obtained_value = obtained.at(index);
if (!_compare_fn(expected_value, obtained_value))
{
- TfLiteTensorDiff diff;
+ TfLiteTensorDiff diff(index);
- diff.offset = offset;
diff.expected = expected_value;
diff.obtained = obtained_value;
for (const auto &id : pure->outputs())
{
- auto diffs = comparator.compare(*(pure->tensor(id)), *(delegated->tensor(id)));
+ const auto expected = nnfw::support::tflite::TensorView<float>::make(*pure, id);
+ const auto obtained = nnfw::support::tflite::TensorView<float>::make(*delegated, id);
+
+ auto diffs = comparator.compare(expected, obtained);
if (diffs.size() == 0)
{
{
for (const auto &diff : diffs)
{
- std::cout << " Diff at offset " << diff.offset << std::endl;
+ std::cout << " Diff at [" << TensorIndexFormatter(diff.index) << "]" << std::endl;
std::cout << " expected: " << diff.expected << std::endl;
std::cout << " obtained: " << diff.obtained << std::endl;
std::cout << " relative diff: " << nnfw::util::fp32::relative_diff(diff.expected, diff.obtained) << std::endl;