#include "tflite/Session.h"
#include "tflite/InterpreterSession.h"
#include "tflite/NNAPISession.h"
-#include "tflite/kernels/register.h"
+#include "tflite/ext/kernels/register.h"
#include "util/fp32.h"
auto lite_model = BuildModelFromFile(lite_model_path);
auto lite_interp = BuildInterpFromModel(lite_model);
- std::shared_ptr<nnfw::support::tflite::Session> lite_sess;
+ std::shared_ptr<nnfw::tflite::Session> lite_sess;
if (use_nnapi)
{
- lite_sess = std::make_shared<nnfw::support::tflite::NNAPISession>(lite_interp.get());
+ lite_sess = std::make_shared<nnfw::tflite::NNAPISession>(lite_interp.get());
}
else
{
- lite_sess = std::make_shared<nnfw::support::tflite::InterpreterSession>(lite_interp.get());
+ lite_sess = std::make_shared<nnfw::tflite::InterpreterSession>(lite_interp.get());
}
//
if (use_nnapi)
{
- _sess = std::make_shared<nnfw::support::tflite::NNAPISession>(_interpreter.get());
+ _sess = std::make_shared<nnfw::tflite::NNAPISession>(_interpreter.get());
}
else
{
- _sess = std::make_shared<nnfw::support::tflite::InterpreterSession>(_interpreter.get());
+ _sess = std::make_shared<nnfw::tflite::InterpreterSession>(_interpreter.get());
}
_sess->prepare();
#ifndef __TFLITE_CLASSIFY_INFERENCE_INTERFACE_H__
#define __TFLITE_CLASSIFY_INFERENCE_INTERFACE_H__
-#include "tflite/kernels/register.h"
+#include "tflite/ext/kernels/register.h"
#include "tensorflow/contrib/lite/model.h"
#include "tflite/InterpreterSession.h"
private:
std::unique_ptr<tflite::Interpreter> _interpreter;
std::unique_ptr<tflite::FlatBufferModel> _model;
- std::shared_ptr<nnfw::support::tflite::Session> _sess;
+ std::shared_ptr<nnfw::tflite::Session> _sess;
};
#endif // __TFLITE_CLASSIFY_INFERENCE_INTERFACE_H__
* @ingroup COM_AI_RUNTIME
*/
-#ifndef __NNFW_SUPPORT_TFLITE_ASSERT_H__
-#define __NNFW_SUPPORT_TFLITE_ASSERT_H__
+#ifndef __NNFW_TFLITE_ASSERT_H__
+#define __NNFW_TFLITE_ASSERT_H__
#include "tensorflow/contrib/lite/context.h"
} \
}
-#endif // __NNFW_SUPPORT_TFLITE_ASSERT_H__
+#endif // __NNFW_TFLITE_ASSERT_H__
* @ingroup COM_AI_RUNTIME
*/
-#ifndef __NNFW_SUPPORT_TFLITE_COMPARE_H__
-#define __NNFW_SUPPORT_TFLITE_COMPARE_H__
+#ifndef __NNFW_TFLITE_DIFF_H__
+#define __NNFW_TFLITE_DIFF_H__
#include "tensorflow/contrib/lite/interpreter.h"
* @return @c true if two TensorView values are same, otherwise @c false
*/
template <typename T>
- bool compareSingleTensorView(const nnfw::support::tflite::TensorView<T> &expected,
- const nnfw::support::tflite::TensorView<T> &obtained, int id) const;
+ bool compareSingleTensorView(const nnfw::tflite::TensorView<T> &expected,
+ const nnfw::tflite::TensorView<T> &obtained, int id) const;
private:
const nnfw::util::tensor::Comparator &_comparator;
* @param[in] builder Interpreter Builder used to run
* @return 0 if test succeeds, otherwise failure
*/
- int run(const nnfw::support::tflite::interp::Builder &builder);
+ int run(const nnfw::tflite::Builder &builder);
public:
/**
static RandomTestRunner make(int seed);
};
-#endif // __NNFW_SUPPORT_TFLITE_COMPARE_H__
+#endif // __NNFW_TFLITE_DIFF_H__
* @ingroup COM_AI_RUNTIME
*/
-#ifndef __NNFW_SUPPORT_TFLITE_FEATURE_VIEW_H__
-#define __NNFW_SUPPORT_TFLITE_FEATURE_VIEW_H__
+#ifndef __NNFW_TFLITE_FEATURE_VIEW_H__
+#define __NNFW_TFLITE_FEATURE_VIEW_H__
#include "tensorflow/contrib/lite/interpreter.h"
namespace nnfw
{
-namespace support
-{
namespace tflite
{
};
} // namespace tflite
-} // namespace support
} // namespace nnfw
-#endif // __NNFW_SUPPORT_TFLITE_FEATURE_VIEW_H__
+#endif // __NNFW_TFLITE_FEATURE_VIEW_H__
* @ingroup COM_AI_RUNTIME
*/
-#ifndef __NNFW_SUPPORT_TFLITE_INPUT_INDEX_H__
-#define __NNFW_SUPPORT_TFLITE_INPUT_INDEX_H__
+#ifndef __NNFW_TFLITE_INPUT_INDEX_H__
+#define __NNFW_TFLITE_INPUT_INDEX_H__
namespace nnfw
{
-namespace support
-{
namespace tflite
{
};
} // namespace tflite
-} // namespace support
} // namespace nnfw
-#endif // __NNFW_SUPPORT_TFLITE_INPUT_INDEX_H__
+#endif // __NNFW_TFLITE_INPUT_INDEX_H__
* @ingroup COM_AI_RUNTIME
*/
-#ifndef __NNFW_SUPPORT_TFLITE_INTERPRETER_SESSION_H__
-#define __NNFW_SUPPORT_TFLITE_INTERPRETER_SESSION_H__
+#ifndef __NNFW_TFLITE_INTERPRETER_SESSION_H__
+#define __NNFW_TFLITE_INTERPRETER_SESSION_H__
#include "Session.h"
namespace nnfw
{
-namespace support
-{
namespace tflite
{
};
} // namespace tflite
-} // namespace support
} // namespace nnfw
-#endif // __NNFW_SUPPORT_TFLITE_INTERPRETER_SESSION_H__
+#endif // __NNFW_TFLITE_INTERPRETER_SESSION_H__
* @ingroup COM_AI_RUNTIME
*/
-#ifndef __NNFW_SUPPORT_TFLITE_NNAPI_SESSION_H__
-#define __NNFW_SUPPORT_TFLITE_NNAPI_SESSION_H__
+#ifndef __NNFW_TFLITE_NNAPI_SESSION_H__
+#define __NNFW_TFLITE_NNAPI_SESSION_H__
#include "Session.h"
-#include "tflite/nnapi_delegate.h"
+#include "tflite/ext/nnapi_delegate.h"
namespace nnfw
{
-namespace support
-{
namespace tflite
{
};
} // namespace tflite
-} // namespace support
} // namespace nnfw
-#endif // __NNFW_SUPPORT_TFLITE_NNAPI_SESSION_H__
+#endif // __NNFW_TFLITE_NNAPI_SESSION_H__
* @ingroup COM_AI_RUNTIME
*/
-#ifndef __NNFW_SUPPORT_TFLITE_OUTPUT_INDEX_H__
-#define __NNFW_SUPPORT_TFLITE_OUTPUT_INDEX_H__
+#ifndef __NNFW_TFLITE_OUTPUT_INDEX_H__
+#define __NNFW_TFLITE_OUTPUT_INDEX_H__
namespace nnfw
{
-namespace support
-{
namespace tflite
{
};
} // namespace tflite
-} // namespace support
} // namespace nnfw
-#endif // __NNFW_SUPPORT_TFLITE_OUTPUT_INDEX_H__
+#endif // __NNFW_TFLITE_OUTPUT_INDEX_H__
* @ingroup COM_AI_RUNTIME
*/
-#ifndef __NNFW_SUPPORT_TFLITE_QUANTIZATION_H__
-#define __NNFW_SUPPORT_TFLITE_QUANTIZATION_H__
+#ifndef __NNFW_TFLITE_QUANTIZATION_H__
+#define __NNFW_TFLITE_QUANTIZATION_H__
/**
* @brief Union to provide bitwise conversion of integer and float
*/
TfLiteQuantizationParams make_default_quantization(void);
-#endif // __NNFW_SUPPORT_TFLITE_QUANTIZATION_H__
+#endif // __NNFW_TFLITE_QUANTIZATION_H__
* @ingroup COM_AI_RUNTIME
*/
-#ifndef __NNFW_SUPPORT_TFLITE_SESSION_H__
-#define __NNFW_SUPPORT_TFLITE_SESSION_H__
+#ifndef __NNFW_TFLITE_SESSION_H__
+#define __NNFW_TFLITE_SESSION_H__
#include <tensorflow/contrib/lite/interpreter.h>
namespace nnfw
{
-namespace support
-{
namespace tflite
{
};
} // namespace tflite
-} // namespace support
} // namespace nnfw
-#endif // __NNFW_SUPPORT_TFLITE_INTERP_SESSION_H__
+#endif // __NNFW_TFLITE_INTERP_SESSION_H__
* @ingroup COM_AI_RUNTIME
*/
-#ifndef __NNFW_SUPPORT_TFLITE_TENSOR_LOGGER_H__
-#define __NNFW_SUPPORT_TFLITE_TENSOR_LOGGER_H__
+#ifndef __NNFW_TFLITE_TENSOR_LOGGER_H__
+#define __NNFW_TFLITE_TENSOR_LOGGER_H__
#include "util/tensor/IndexIterator.h"
#include "tflite/TensorView.h"
namespace nnfw
{
-namespace support
-{
namespace tflite
{
};
} // namespace tflite
-} // namespace support
} // namespace nnfw
-#endif // __NNFW_SUPPORT_TFLITE_TENSOR_LOGGER_H__
+#endif // __NNFW_TFLITE_TENSOR_LOGGER_H__
* @ingroup COM_AI_RUNTIME
*/
-#ifndef __NNFW_SUPPORT_TFLITE_TENSOR_SHAPE_UTILS_H__
-#define __NNFW_SUPPORT_TFLITE_TENSOR_SHAPE_UTILS_H__
+#ifndef __NNFW_TFLITE_TENSOR_SHAPE_UTILS_H__
+#define __NNFW_TFLITE_TENSOR_SHAPE_UTILS_H__
#include "util/tensor/Shape.h"
namespace nnfw
{
-namespace support
-{
namespace tflite
{
const nnfw::util::tensor::Shape &rhs_shape);
} // namespace tflite
-} // namespace support
} // namespace nnfw
-#endif // __NNFW_SUPPORT_TFLITE_TENSOR_SHAPE_UTILS_H__
+#endif // __NNFW_TFLITE_TENSOR_SHAPE_UTILS_H__
* @ingroup COM_AI_RUNTIME
*/
-#ifndef __NNFW_SUPPORT_TFLITE_TENSOR_UTILS_H__
-#define __NNFW_SUPPORT_TFLITE_TENSOR_UTILS_H__
+#ifndef __NNFW_TFLITE_TENSOR_UTILS_H__
+#define __NNFW_TFLITE_TENSOR_UTILS_H__
#include <tensorflow/contrib/lite/context.h>
namespace nnfw
{
-namespace support
-{
namespace tflite
{
}
} // namespace tflite
-} // namespace support
} // namespace nnfw
-#endif // __NNFW_SUPPORT_TFLITE_TENSOR_UTILS_H__
+#endif // __NNFW_TFLITE_TENSOR_UTILS_H__
* @ingroup COM_AI_RUNTIME
*/
-#ifndef __NNFW_SUPPORT_TFLITE_TENSOR_VIEW_H__
-#define __NNFW_SUPPORT_TFLITE_TENSOR_VIEW_H__
+#ifndef __NNFW_TFLITE_TENSOR_VIEW_H__
+#define __NNFW_TFLITE_TENSOR_VIEW_H__
#include "tensorflow/contrib/lite/interpreter.h"
namespace nnfw
{
-namespace support
-{
namespace tflite
{
};
} // namespace tflite
-} // namespace support
} // namespace nnfw
-#endif // __NNFW_SUPPORT_TFLITE_TENSOR_VIEW_H__
+#endif // __NNFW_TFLITE_TENSOR_VIEW_H__
* limitations under the License.
*/
-#ifndef __NNFW_SUPPORT_TFLITE_KERNELS_ABS_H__
-#define __NNFW_SUPPORT_TFLITE_KERNELS_ABS_H__
+#ifndef __NNFW_TFLITE_EXT_KERNELS_ABS_H__
+#define __NNFW_TFLITE_EXT_KERNELS_ABS_H__
#include "tensorflow/contrib/lite/context.h"
} // namespace ops
} // namespace tflite
-#endif
+#endif // __NNFW_TFLITE_EXT_KERNELS_ABS_H__
* @ingroup COM_AI_RUNTIME
*/
-#ifndef __NNFW_SUPPORT_TFLITE_KERNELS_CUSTOM_OP_H__
-#define __NNFW_SUPPORT_TFLITE_KERNELS_CUSTOM_OP_H__
+#ifndef __NNFW_TFLITE_EXT_KERNELS_CUSTOM_OP_H__
+#define __NNFW_TFLITE_EXT_KERNELS_CUSTOM_OP_H__
#include "tensorflow/contrib/lite/context.h"
-#include "tflite/kernels/TensorFlowMax.h"
-#include "tflite/kernels/SquaredDifference.h"
-#include "tflite/kernels/TensorFlowSum.h"
-#include "tflite/kernels/Abs.h"
+#include "tflite/ext/kernels/TensorFlowMax.h"
+#include "tflite/ext/kernels/SquaredDifference.h"
+#include "tflite/ext/kernels/TensorFlowSum.h"
+#include "tflite/ext/kernels/Abs.h"
namespace tflite
{
} // namespace ops
} // namespace tflite
-#endif // __NNFW_SUPPORT_TFLITE_KERNELS_CUSTOM_OP_H__
+#endif // __NNFW_TFLITE_EXT_KERNELS_CUSTOM_OP_H__
* @ingroup COM_AI_RUNTIME
*/
-#ifndef __NNFW_SUPPORT_TFLITE_KERNELS_SQUARED_DIFFERENCE_H__
-#define __NNFW_SUPPORT_TFLITE_KERNELS_SQUARED_DIFFERENCE_H__
+#ifndef __NNFW_TFLITE_EXT_KERNELS_SQUARED_DIFFERENCE_H__
+#define __NNFW_TFLITE_EXT_KERNELS_SQUARED_DIFFERENCE_H__
#include "tensorflow/contrib/lite/context.h"
} // namespace ops
} // namespace tflite
-#endif
+#endif // __NNFW_TFLITE_EXT_KERNELS_SQUARED_DIFFERENCE_H__
* @ingroup COM_AI_RUNTIME
*/
-#ifndef __NNFW_SUPPORT_TFLITE_KERNELS_TENSORFLOW_MAX_H__
-#define __NNFW_SUPPORT_TFLITE_KERNELS_TENSORFLOW_MAX_H__
+#ifndef __NNFW_TFLITE_EXT_KERNELS_TENSORFLOW_MAX_H__
+#define __NNFW_TFLITE_EXT_KERNELS_TENSORFLOW_MAX_H__
#include "tensorflow/contrib/lite/context.h"
} // namespace ops
} // namespace tflite
-#endif
+#endif // __NNFW_TFLITE_EXT_KERNELS_TENSORFLOW_MAX_H__
* limitations under the License.
*/
-#ifndef __NNFW_SUPPORT_TFLITE_KERNELS_TENSORFLOW_SUM_H__
-#define __NNFW_SUPPORT_TFLITE_KERNELS_TENSORFLOW_SUM_H__
+#ifndef __NNFW_TFLITE_EXT_KERNELS_TENSORFLOW_SUM_H__
+#define __NNFW_TFLITE_EXT_KERNELS_TENSORFLOW_SUM_H__
#include "tensorflow/contrib/lite/context.h"
} // namespace ops
} // namespace tflite
-#endif
+#endif // __NNFW_TFLITE_EXT_KERNELS_TENSORFLOW_SUM_H__
// NOTE This header is derived from the following file (in TensorFlow)
// 'externals/tensorflow/tensorflow/contrib/lite/kernels/register.h'
-#ifndef __NNFW_SUPPORT_TFLITE_KERNELS_REGISTER_H__
-#define __NNFW_SUPPORT_TFLITE_KERNELS_REGISTER_H__
+#ifndef __NNFW_TFLITE_EXT_KERNELS_REGISTER_H__
+#define __NNFW_TFLITE_EXT_KERNELS_REGISTER_H__
#include <unordered_map>
#include "tensorflow/contrib/lite/context.h"
} // namespace ops
} // namespace tflite
-#endif // __NNFW_SUPPORT_TFLITE_KERNELS_REGISTER_H__
+#endif // __NNFW_TFLITE_EXT_KERNELS_REGISTER_H__
// clang-format on
// NOTE This header is derived from the following file (in TensorFlow v1.12)
// 'externals/tensorflow/tensorflow/contrib/lite/nnapi_delegate.h'
-#ifndef __NNFW_SUPPORT_TFLITE_NNAPI_DELEGATE_H__
-#define __NNFW_SUPPORT_TFLITE_NNAPI_DELEGATE_H__
+#ifndef __NNFW_TFLITE_EXT_NNAPI_DELEGATE_H__
+#define __NNFW_TFLITE_EXT_NNAPI_DELEGATE_H__
#include "tensorflow/contrib/lite/allocation.h"
#ifdef OBS_BUILD
namespace nnfw {
-class NNAPIAllocation : public tflite::MMAPAllocation {
+class NNAPIAllocation : public ::tflite::MMAPAllocation {
public:
NNAPIAllocation(const char* filename, ::tflite::ErrorReporter* error_reporter);
~NNAPIAllocation();
} // namespace nnfw
-#endif // TENSORFLOW_CONTRIB_LITE_NNAPI_DELEGATE_H_
+#endif // __NNFW_TFLITE_EXT_NNAPI_DELEGATE_H__
// clang-format on
* @ingroup COM_AI_RUNTIME
*/
-#ifndef __NNFW_SUPPORT_TFLITE_INTERP_BUILDER_H__
-#define __NNFW_SUPPORT_TFLITE_INTERP_BUILDER_H__
+#ifndef __NNFW_TFLITE_INTERP_BUILDER_H__
+#define __NNFW_TFLITE_INTERP_BUILDER_H__
#include <tensorflow/contrib/lite/interpreter.h>
namespace nnfw
{
-namespace support
-{
namespace tflite
{
-namespace interp
-{
/**
* @brief Structure to Builder
virtual std::unique_ptr<::tflite::Interpreter> build(void) const = 0;
};
-} // namespace interp
} // namespace tflite
-} // namespace support
} // namespace nnfw
-#endif // __NNFW_SUPPORT_TFLITE_INTERP_BUILDER_H__
+#endif // __NNFW_TFLITE_INTERP_BUILDER_H__
* @ingroup COM_AI_RUNTIME
*/
-#ifndef __NNFW_SUPPORT_TFLITE_INTERP_FLAT_BUFFER_BUILDER_H__
-#define __NNFW_SUPPORT_TFLITE_INTERP_FLAT_BUFFER_BUILDER_H__
+#ifndef __NNFW_TFLITE_INTERP_FLAT_BUFFER_BUILDER_H__
+#define __NNFW_TFLITE_INTERP_FLAT_BUFFER_BUILDER_H__
#include <tensorflow/contrib/lite/model.h>
namespace nnfw
{
-namespace support
-{
namespace tflite
{
-namespace interp
-{
/**
* @brief Class to define FlatBufferBuilder which is inherited from Builder
const ::tflite::FlatBufferModel &_model;
};
-} // namespace interp
} // namespace tflite
-} // namespace support
} // namespace nnfw
-#endif // __NNFW_SUPPORT_TFLITE_INTERP_FLAT_BUFFER_BUILDER_H__
+#endif // __NNFW_TFLITE_INTERP_FLAT_BUFFER_BUILDER_H__
* @ingroup COM_AI_RUNTIME
*/
-#ifndef __NNFW_SUPPORT_TFLITE_INTERP_FUNCTION_BUILDER_H__
-#define __NNFW_SUPPORT_TFLITE_INTERP_FUNCTION_BUILDER_H__
+#ifndef __NNFW_TFLITE_INTERP_FUNCTION_BUILDER_H__
+#define __NNFW_TFLITE_INTERP_FUNCTION_BUILDER_H__
#include <tensorflow/contrib/lite/model.h>
namespace nnfw
{
-namespace support
-{
namespace tflite
{
-namespace interp
-{
/**
* @brief Class to define FunctionBuilder which is inherited from Builder
SetupFunc _fn;
};
-} // namespace interp
} // namespace tflite
-} // namespace support
} // namespace nnfw
-#endif // __NNFW_SUPPORT_TFLITE_INTERP_FUNCTION_BUILDER_H__
+#endif // __NNFW_TFLITE_INTERP_FUNCTION_BUILDER_H__
*/
#include "tflite/Diff.h"
-#include "tflite/nnapi_delegate.h"
+#include "tflite/ext/nnapi_delegate.h"
#include "util/fp32.h"
}
template <typename T>
-bool TfLiteInterpMatchApp::compareSingleTensorView(
- const nnfw::support::tflite::TensorView<T> &expected,
- const nnfw::support::tflite::TensorView<T> &obtained, int id) const
+bool TfLiteInterpMatchApp::compareSingleTensorView(const nnfw::tflite::TensorView<T> &expected,
+ const nnfw::tflite::TensorView<T> &obtained,
+ int id) const
{
std::vector<nnfw::util::tensor::Diff<T>> diffs;
assert(expected.shape() == obtained.shape());
template <>
bool TfLiteInterpMatchApp::compareSingleTensorView<float>(
- const nnfw::support::tflite::TensorView<float> &expected,
- const nnfw::support::tflite::TensorView<float> &obtained, int id) const
+ const nnfw::tflite::TensorView<float> &expected,
+ const nnfw::tflite::TensorView<float> &obtained, int id) const
{
DiffSummary summary;
comparators[kTfLiteUInt8] = [this](int id, ::tflite::Interpreter &interp,
::tflite::Interpreter &nnapi) {
- const auto expected = nnfw::support::tflite::TensorView<uint8_t>::make(interp, id);
- const auto obtained = nnfw::support::tflite::TensorView<uint8_t>::make(nnapi, id);
+ const auto expected = nnfw::tflite::TensorView<uint8_t>::make(interp, id);
+ const auto obtained = nnfw::tflite::TensorView<uint8_t>::make(nnapi, id);
return compareSingleTensorView(expected, obtained, id);
};
comparators[kTfLiteInt32] = [this](int id, ::tflite::Interpreter &interp,
::tflite::Interpreter &nnapi) {
- const auto expected = nnfw::support::tflite::TensorView<int32_t>::make(interp, id);
- const auto obtained = nnfw::support::tflite::TensorView<int32_t>::make(nnapi, id);
+ const auto expected = nnfw::tflite::TensorView<int32_t>::make(interp, id);
+ const auto obtained = nnfw::tflite::TensorView<int32_t>::make(nnapi, id);
return compareSingleTensorView(expected, obtained, id);
};
comparators[kTfLiteFloat32] = [this](int id, ::tflite::Interpreter &interp,
::tflite::Interpreter &nnapi) {
- const auto expected = nnfw::support::tflite::TensorView<float>::make(interp, id);
- const auto obtained = nnfw::support::tflite::TensorView<float>::make(nnapi, id);
+ const auto expected = nnfw::tflite::TensorView<float>::make(interp, id);
+ const auto obtained = nnfw::tflite::TensorView<float>::make(nnapi, id);
return compareSingleTensorView(expected, obtained, id);
};
comparators[kTfLiteBool] = [this](int id, ::tflite::Interpreter &interp,
::tflite::Interpreter &nnapi) {
- const auto expected = nnfw::support::tflite::TensorView<bool>::make(interp, id);
- const auto obtained = nnfw::support::tflite::TensorView<bool>::make(nnapi, id);
+ const auto expected = nnfw::tflite::TensorView<bool>::make(interp, id);
+ const auto obtained = nnfw::tflite::TensorView<bool>::make(nnapi, id);
return compareSingleTensorView(expected, obtained, id);
};
//
// Random Test Runner
//
-int RandomTestRunner::run(const nnfw::support::tflite::interp::Builder &builder)
+int RandomTestRunner::run(const nnfw::tflite::Builder &builder)
{
auto tfl_interp = builder.build();
auto nnapi = builder.build();
assert(tfl_interp->tensor(id)->type == kTfLiteInt32);
assert(nnapi->tensor(id)->type == kTfLiteInt32);
- auto tfl_interp_view = nnfw::support::tflite::TensorView<int32_t>::make(*tfl_interp, id);
- auto nnapi_view = nnfw::support::tflite::TensorView<int32_t>::make(*nnapi, id);
+ auto tfl_interp_view = nnfw::tflite::TensorView<int32_t>::make(*tfl_interp, id);
+ auto nnapi_view = nnfw::tflite::TensorView<int32_t>::make(*nnapi, id);
assert(tfl_interp_view.shape() == nnapi_view.shape());
assert(tfl_interp->tensor(id)->type == kTfLiteInt32);
assert(nnapi->tensor(id)->type == kTfLiteInt32);
- auto tfl_interp_view = nnfw::support::tflite::TensorView<int32_t>::make(*tfl_interp, id);
- auto nnapi_view = nnfw::support::tflite::TensorView<int32_t>::make(*nnapi, id);
+ auto tfl_interp_view = nnfw::tflite::TensorView<int32_t>::make(*tfl_interp, id);
+ auto nnapi_view = nnfw::tflite::TensorView<int32_t>::make(*nnapi, id);
assert(tfl_interp_view.shape() == nnapi_view.shape());
assert(tfl_interp->tensor(id)->type == kTfLiteUInt8);
assert(nnapi->tensor(id)->type == kTfLiteUInt8);
- auto tfl_interp_view = nnfw::support::tflite::TensorView<uint8_t>::make(*tfl_interp, id);
- auto nnapi_view = nnfw::support::tflite::TensorView<uint8_t>::make(*nnapi, id);
+ auto tfl_interp_view = nnfw::tflite::TensorView<uint8_t>::make(*tfl_interp, id);
+ auto nnapi_view = nnfw::tflite::TensorView<uint8_t>::make(*nnapi, id);
assert(tfl_interp_view.shape() == nnapi_view.shape());
assert(tfl_interp->tensor(id)->type == kTfLiteUInt8);
assert(nnapi->tensor(id)->type == kTfLiteUInt8);
- auto tfl_interp_view = nnfw::support::tflite::TensorView<uint8_t>::make(*tfl_interp, id);
- auto nnapi_view = nnfw::support::tflite::TensorView<uint8_t>::make(*nnapi, id);
+ auto tfl_interp_view = nnfw::tflite::TensorView<uint8_t>::make(*tfl_interp, id);
+ auto nnapi_view = nnfw::tflite::TensorView<uint8_t>::make(*nnapi, id);
assert(tfl_interp_view.shape() == nnapi_view.shape());
assert(tfl_interp->tensor(id)->type == kTfLiteFloat32);
assert(nnapi->tensor(id)->type == kTfLiteFloat32);
- auto tfl_interp_view = nnfw::support::tflite::TensorView<float>::make(*tfl_interp, id);
- auto nnapi_view = nnfw::support::tflite::TensorView<float>::make(*nnapi, id);
+ auto tfl_interp_view = nnfw::tflite::TensorView<float>::make(*tfl_interp, id);
+ auto nnapi_view = nnfw::tflite::TensorView<float>::make(*nnapi, id);
assert(tfl_interp_view.shape() == nnapi_view.shape());
assert(tfl_interp->tensor(id)->type == kTfLiteFloat32);
assert(nnapi->tensor(id)->type == kTfLiteFloat32);
- auto tfl_interp_view = nnfw::support::tflite::TensorView<float>::make(*tfl_interp, id);
- auto nnapi_view = nnfw::support::tflite::TensorView<float>::make(*nnapi, id);
+ auto tfl_interp_view = nnfw::tflite::TensorView<float>::make(*tfl_interp, id);
+ auto nnapi_view = nnfw::tflite::TensorView<float>::make(*nnapi, id);
assert(tfl_interp_view.shape() == nnapi_view.shape());
assert(tfl_interp->tensor(id)->type == kTfLiteBool);
assert(nnapi->tensor(id)->type == kTfLiteBool);
- auto tfl_interp_view = nnfw::support::tflite::TensorView<bool>::make(*tfl_interp, id);
- auto nnapi_view = nnfw::support::tflite::TensorView<bool>::make(*nnapi, id);
+ auto tfl_interp_view = nnfw::tflite::TensorView<bool>::make(*tfl_interp, id);
+ auto nnapi_view = nnfw::tflite::TensorView<bool>::make(*nnapi, id);
assert(tfl_interp_view.shape() == nnapi_view.shape());
assert(tfl_interp->tensor(id)->type == kTfLiteBool);
assert(nnapi->tensor(id)->type == kTfLiteBool);
- auto tfl_interp_view = nnfw::support::tflite::TensorView<bool>::make(*tfl_interp, id);
- auto nnapi_view = nnfw::support::tflite::TensorView<bool>::make(*nnapi, id);
+ auto tfl_interp_view = nnfw::tflite::TensorView<bool>::make(*tfl_interp, id);
+ auto nnapi_view = nnfw::tflite::TensorView<bool>::make(*nnapi, id);
assert(tfl_interp_view.shape() == nnapi_view.shape());
std::cout << "[NNAPI TEST] PASSED" << std::endl;
if (_param.tensor_logging)
- nnfw::support::tflite::TensorLogger::instance().save(_param.log_path, *tfl_interp);
+ nnfw::tflite::TensorLogger::instance().save(_param.log_path, *tfl_interp);
return 0;
}
namespace nnfw
{
-namespace support
-{
namespace tflite
{
}
} // namespace tflite
-} // namespace support
} // namespace nnfw
namespace nnfw
{
-namespace support
-{
namespace tflite
{
}
} // namespace tflite
-} // namespace support
} // namespace nnfw
int value[6] = {1, 2, 3, 4, 5, 6};
const nnfw::util::tensor::Shape shape{2, 3};
- const nnfw::support::tflite::TensorView<int> view{shape, value};
+ const nnfw::tflite::TensorView<int> view{shape, value};
assert(view.at(nnfw::util::tensor::Index{0, 0}) == 1);
assert(view.at(nnfw::util::tensor::Index{0, 1}) == 2);
float value[6] = {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f};
const nnfw::util::tensor::Shape shape{2, 3};
- const nnfw::support::tflite::TensorView<float> view{shape, value};
+ const nnfw::tflite::TensorView<float> view{shape, value};
assert(view.at(nnfw::util::tensor::Index{0, 0}) == 1.0f);
assert(view.at(nnfw::util::tensor::Index{0, 1}) == 2.0f);
* limitations under the License.
*/
-#include "tflite/kernels/Abs.h"
+#include "tflite/ext/kernels/Abs.h"
#include "tensorflow/contrib/lite/kernels/kernel_util.h"
#include <iostream>
* limitations under the License.
*/
-#include "tflite/kernels/SquaredDifference.h"
+#include "tflite/ext/kernels/SquaredDifference.h"
#include "tensorflow/contrib/lite/kernels/kernel_util.h"
#include <iostream>
* limitations under the License.
*/
-#include "tflite/kernels/TensorFlowMax.h"
+#include "tflite/ext/kernels/TensorFlowMax.h"
#include "tensorflow/contrib/lite/kernels/kernel_util.h"
#include <iostream>
* limitations under the License.
*/
-#include "tflite/kernels/TensorFlowSum.h"
+#include "tflite/ext/kernels/TensorFlowSum.h"
#include "tensorflow/contrib/lite/kernels/kernel_util.h"
#include <iostream>
// NOTE This code is derived from the following file (in TensorFlow)
// 'externals/tensorflow/tensorflow/contrib/lite/kernels/register.cc'
-#include "tflite/kernels/register.h"
-#include "tflite/kernels/CustomOps.h"
+#include "tflite/ext/kernels/register.h"
+#include "tflite/ext/kernels/CustomOps.h"
// TODO Use namespace nnfw
namespace tflite
// NOTE This code is derived from the following file (in TensorFlow v1.12)
// 'externals/tensorflow/tensorflow/contrib/lite/nnapi_delegate.cc'
-#include "tflite/nnapi_delegate.h"
+#include "tflite/ext/nnapi_delegate.h"
#include <fcntl.h>
#include <sys/mman.h>
#include <sys/stat.h>
#include "tflite/interp/FlatBufferBuilder.h"
-#include "tflite/kernels/register.h"
+#include "tflite/ext/kernels/register.h"
namespace nnfw
{
-namespace support
-{
namespace tflite
{
-namespace interp
-{
std::unique_ptr<::tflite::Interpreter> FlatBufferBuilder::build(void) const
{
return std::move(interpreter);
}
-} // namespace interp
} // namespace tflite
-} // namespace support
} // namespace nnfw
namespace nnfw
{
-namespace support
-{
namespace tflite
{
-namespace interp
-{
std::unique_ptr<::tflite::Interpreter> FunctionBuilder::build(void) const
{
return std::move(res);
}
-} // namespace interp
} // namespace tflite
-} // namespace support
} // namespace nnfw
* limitations under the License.
*/
-#include "tflite/kernels/register.h"
+#include "tflite/ext/kernels/register.h"
#include "tensorflow/contrib/lite/model.h"
#include "tflite/interp/FlatBufferBuilder.h"
auto model = FlatBufferModel::BuildFromFile(filename, &error_reporter);
- const nnfw::support::tflite::interp::FlatBufferBuilder builder(*model);
+ const nnfw::tflite::FlatBufferBuilder builder(*model);
try
{
* limitations under the License.
*/
-#include "tflite/kernels/register.h"
+#include "tflite/ext/kernels/register.h"
#include "tensorflow/contrib/lite/model.h"
#include "tflite/Assert.h"
interpreter->SetNumThreads(thread_count);
- std::shared_ptr<nnfw::support::tflite::Session> sess;
+ std::shared_ptr<nnfw::tflite::Session> sess;
if (use_nnapi)
{
- sess = std::make_shared<nnfw::support::tflite::NNAPISession>(interpreter.get());
+ sess = std::make_shared<nnfw::tflite::NNAPISession>(interpreter.get());
}
else
{
- sess = std::make_shared<nnfw::support::tflite::InterpreterSession>(interpreter.get());
+ sess = std::make_shared<nnfw::tflite::InterpreterSession>(interpreter.get());
}
//
if (tensor->type == kTfLiteInt32)
{
// Generate singed 32-bit integer (s32) input
- auto tensor_view = nnfw::support::tflite::TensorView<int32_t>::make(*interpreter, id);
+ auto tensor_view = nnfw::tflite::TensorView<int32_t>::make(*interpreter, id);
int32_t value = 0;
else if (tensor->type == kTfLiteUInt8)
{
// Generate unsigned 8-bit integer input
- auto tensor_view = nnfw::support::tflite::TensorView<uint8_t>::make(*interpreter, id);
+ auto tensor_view = nnfw::tflite::TensorView<uint8_t>::make(*interpreter, id);
uint8_t value = 0;
#ifdef TFLITE_FLEX
#include "tensorflow/contrib/lite/delegates/flex/delegate.h"
#endif // TFLITE_FLEX
-#include "tflite/kernels/register.h"
+#include "tflite/ext/kernels/register.h"
#include "tensorflow/contrib/lite/model.h"
#include "tensorflow/contrib/lite/op_resolver.h"
#include "tensorflow/contrib/lite/string_util.h"
// For profiling nnapi_delegate
#include "util/profiling/profiling.h"
-#include "tflite/nnapi_delegate.h"
+#include "tflite/ext/nnapi_delegate.h"
namespace {
nnfw::NNAPIDelegate nnfw_delegate_;
* limitations under the License.
*/
-#include "tflite/kernels/register.h"
+#include "tflite/ext/kernels/register.h"
#include "tensorflow/contrib/lite/model.h"
#include "tensorflow/contrib/lite/builtin_op_data.h"
assert(tensor->bytes % sizeof(float) == 0);
offset += (tensor->bytes / sizeof(float));
- _tensor_map.insert(std::make_pair(o, nnfw::support::tflite::TensorView<float>(shape, base)));
+ _tensor_map.insert(std::make_pair(o, nnfw::tflite::TensorView<float>(shape, base)));
}
// The file size and total output tensor size must match
file.close();
}
-const nnfw::support::tflite::TensorView<float> &TensorLoader::get(int tensor_idx) const
+const nnfw::tflite::TensorView<float> &TensorLoader::get(int tensor_idx) const
{
auto found = _tensor_map.find(tensor_idx);
assert(found != _tensor_map.end());
public:
TensorLoader(tflite::Interpreter &interpreter);
void load(const std::string &filename);
- const nnfw::support::tflite::TensorView<float> &get(int tensor_idx) const;
+ const nnfw::tflite::TensorView<float> &get(int tensor_idx) const;
size_t getNums() const { return _tensor_map.size(); }
private:
tflite::Interpreter &_interpreter;
std::unique_ptr<float> _raw_data;
- std::unordered_map<int, nnfw::support::tflite::TensorView<float>> _tensor_map;
+ std::unordered_map<int, nnfw::tflite::TensorView<float>> _tensor_map;
};
} // end of namespace TFLiteRun
* limitations under the License.
*/
-#include "tflite/kernels/register.h"
+#include "tflite/ext/kernels/register.h"
#include "tensorflow/contrib/lite/model.h"
#include "bin_image.h"
interpreter->SetNumThreads(1);
};
- std::shared_ptr<nnfw::support::tflite::Session> sess;
+ std::shared_ptr<nnfw::tflite::Session> sess;
if (use_nnapi)
{
- sess = std::make_shared<nnfw::support::tflite::NNAPISession>(interpreter.get());
+ sess = std::make_shared<nnfw::tflite::NNAPISession>(interpreter.get());
}
else
{
- sess = std::make_shared<nnfw::support::tflite::InterpreterSession>(interpreter.get());
+ sess = std::make_shared<nnfw::tflite::InterpreterSession>(interpreter.get());
}
sess->prepare();
if (tensor->type == kTfLiteInt32)
{
// Generate singed 32-bit integer (s32) input
- auto tensor_view = nnfw::support::tflite::TensorView<int32_t>::make(*interpreter, o);
+ auto tensor_view = nnfw::tflite::TensorView<int32_t>::make(*interpreter, o);
int32_t value = 0;
else if (tensor->type == kTfLiteUInt8)
{
// Generate unsigned 8-bit integer input
- auto tensor_view = nnfw::support::tflite::TensorView<uint8_t>::make(*interpreter, o);
+ auto tensor_view = nnfw::tflite::TensorView<uint8_t>::make(*interpreter, o);
uint8_t value = 0;
for (const auto &o : interpreter->outputs())
{
auto expected = tensor_loader.get(o);
- auto obtained = nnfw::support::tflite::TensorView<float>::make(*interpreter, o);
+ auto obtained = nnfw::tflite::TensorView<float>::make(*interpreter, o);
res = res && app.compareSingleTensorView(expected, obtained, o);
}