Remove old version and use TensorDumper/Loader instead.
Signed-off-by: Hanjoung Lee <hanjoung.lee@samsung.com>
list(APPEND TFLITE_RUN_SRCS "src/operators.cc")
list(APPEND TFLITE_RUN_SRCS "src/bin_image.cc")
list(APPEND TFLITE_RUN_SRCS "src/args.cc")
-list(APPEND TFLITE_RUN_SRCS "src/output_tensor_dumper.cc")
-list(APPEND TFLITE_RUN_SRCS "src/output_tensor_loader.cc")
list(APPEND TFLITE_RUN_SRCS "src/tensor_dumper.cc")
list(APPEND TFLITE_RUN_SRCS "src/tensor_loader.cc")
+++ /dev/null
-#include "output_tensor_dumper.h"
-
-#include <fstream>
-#include <iostream>
-
-#include "tensorflow/contrib/lite/interpreter.h"
-
-namespace TFLiteRun
-{
-
-OutputTensorDumper::OutputTensorDumper(tflite::Interpreter &interpreter) : _interpreter(interpreter)
-{
- // DO NOTHING
-}
-
-void OutputTensorDumper::dump(const std::string &filename) const
-{
- // TODO Handle file open/write error
- std::ofstream file(filename, std::ios::out | std::ios::binary);
- for (const auto &o : _interpreter.outputs())
- {
- const TfLiteTensor *tensor = _interpreter.tensor(o);
- file.write(tensor->data.raw, tensor->bytes);
- }
- file.close();
-}
-
-} // end of namespace TFLiteRun
+++ /dev/null
-#ifndef __TFLITE_RUN_OUTPUT_TENSOR_DUMPER_H__
-#define __TFLITE_RUN_OUTPUT_TENSOR_DUMPER_H__
-
-#include <string>
-
-namespace tflite
-{
-class Interpreter;
-}
-
-namespace TFLiteRun
-{
-
-class OutputTensorDumper
-{
-public:
- OutputTensorDumper(tflite::Interpreter &interpreter);
- void dump(const std::string &filename) const;
-
-private:
- tflite::Interpreter &_interpreter;
-};
-
-} // end of namespace TFLiteRun
-
-#endif // __TFLITE_RUN_OUTPUT_TENSOR_DUMPER_H__
+++ /dev/null
-#include "output_tensor_loader.h"
-
-#include <assert.h>
-
-#include <fstream>
-
-#include "util/tensor/Shape.h"
-
-namespace TFLiteRun
-{
-
-OutputTensorLoader::OutputTensorLoader(tflite::Interpreter &interpreter)
- : _interpreter(interpreter), _raw_data(nullptr)
-{
-}
-
-void OutputTensorLoader::load(const std::string &filename)
-{
- // TODO Handle file open/read error
- std::ifstream file(filename, std::ios::ate | std::ios::binary);
- size_t file_size = file.tellg();
- file.seekg(0, std::ios::beg);
-
- _raw_data = std::unique_ptr<float>(new float[file_size]);
- file.read(reinterpret_cast<char *>(_raw_data.get()), file_size);
-
- size_t offset = 0;
- for (const auto &o : _interpreter.outputs())
- {
- const TfLiteTensor *tensor = _interpreter.tensor(o);
-
- // Convert tensor shape to `Shape` from `tensor->dims`
- nnfw::util::tensor::Shape shape(static_cast<size_t>(tensor->dims->size));
- for (int d = 0; d < tensor->dims->size; d++)
- {
- shape.dim(d) = tensor->dims->data[d];
- }
-
- float *base = _raw_data.get() + offset;
-
- assert(tensor->bytes % sizeof(float) == 0);
- offset += (tensor->bytes / sizeof(float));
-
- _tensor_map.insert(std::make_pair(o, nnfw::support::tflite::TensorView<float>(shape, base)));
- }
-
- // The file size and total output tensor size must match
- assert(file_size == offset * sizeof(float));
-
- file.close();
-}
-
-const nnfw::support::tflite::TensorView<float> &OutputTensorLoader::get(int tensor_idx) const
-{
- auto found = _tensor_map.find(tensor_idx);
- assert(found != _tensor_map.end());
- return found->second;
-}
-
-} // end of namespace TFLiteRun
+++ /dev/null
-#ifndef __TFLITE_RUN_OUTPUT_TENSOR_LOADER_H__
-#define __TFLITE_RUN_OUTPUT_TENSOR_LOADER_H__
-
-#include <sys/mman.h>
-
-#include <string>
-#include <unordered_map>
-
-#include "support/tflite/TensorView.h"
-
-namespace tflite
-{
-class Interpreter;
-}
-
-namespace TFLiteRun
-{
-
-class OutputTensorLoader
-{
-public:
- OutputTensorLoader(tflite::Interpreter &interpreter);
- void load(const std::string &filename);
- const nnfw::support::tflite::TensorView<float> &get(int tensor_idx) const;
- size_t getNumOutputs() const { return _tensor_map.size(); }
-
-private:
- tflite::Interpreter &_interpreter;
- std::unique_ptr<float> _raw_data;
- std::unordered_map<int, nnfw::support::tflite::TensorView<float>> _tensor_map;
-};
-
-} // end of namespace TFLiteRun
-
-#endif // __TFLITE_RUN_OUTPUT_TENSOR_LOADER_H__
#include "operators.h"
#include "bin_image.h"
#include "args.h"
-#include "output_tensor_dumper.h"
-#include "output_tensor_loader.h"
+#include "tensor_dumper.h"
+#include "tensor_loader.h"
#include "util/environment.h"
#include "util/fp32.h"
#include "support/tflite/Diff.h"
if (!args.getDumpFilename().empty())
{
const std::string &dump_filename = args.getDumpFilename();
- TFLiteRun::OutputTensorDumper output_dumper(*interpreter);
+ TFLiteRun::TensorDumper output_dumper(*interpreter);
output_dumper.dump(dump_filename);
std::cout << "Output tensors have been dumped to file \"" << dump_filename << "\"."
<< std::endl;
std::cout << "Comparing the results with \"" << compare_filename << "\"." << std::endl;
std::cout << "========================================" << std::endl;
- TFLiteRun::OutputTensorLoader output_loader(*interpreter);
+ TFLiteRun::TensorLoader output_loader(*interpreter);
output_loader.load(compare_filename);
// TODO Code duplication (copied from RandomTestRunner)