list(APPEND TFLITE_RUN_SRCS "src/tflite_run.cc")
list(APPEND TFLITE_RUN_SRCS "src/operators.cc")
+list(APPEND TFLITE_RUN_SRCS "src/bin_image.cc")
+list(APPEND TFLITE_RUN_SRCS "src/args.cc")
add_executable(tflite_run ${TFLITE_RUN_SRCS})
target_include_directories(tflite_run PRIVATE src)
target_link_libraries(tflite_run tensorflow_lite)
+target_link_libraries(tflite_run boost_program_options boost_system boost_filesystem)
install(TARGETS tflite_run DESTINATION bin)
--- /dev/null
+#include "args.h"
+
+#include <iostream>
+
+#include <boost/filesystem.hpp>
+
+namespace TFLiteRun {
+
+Args::Args(const int argc, char** argv) {
+ Initialize();
+ Parse(argc, argv);
+}
+
+void Args::Initialize(void) {
+
+ // General options
+ po::options_description general("General options");
+ general.add_options()
+ ("help,h", "Display available options")
+ ("input,i", po::value<std::string>(&_input_filename)->default_value(""),
+ "Input filename");
+
+ _options.add(general);
+}
+
+void Args::Parse(const int argc, char** argv) {
+ po::variables_map vm;
+ po::store(po::parse_command_line(argc, argv, _options), vm);
+ po::notify(vm);
+
+ if(vm.count("help")) {
+ std::cout << "tflite_run\n\n";
+ std::cout << "Usage: " << argv[0] << " <.tflite> [<options>]\n\n";
+ std::cout << _options;
+ std::cout << "\n";
+
+ exit(0);
+ }
+
+ if( vm.count("input")) {
+ _input_filename = vm["input"].as<std::string>();
+
+ if( !_input_filename.empty() ) {
+ if( !boost::filesystem::exists(_input_filename) ) {
+ std::cerr << "input image file not found: " << _input_filename << "\n";
+ }
+ }
+ }
+}
+
+} // end of namespace TFLiteRun
--- /dev/null
+#ifndef __TFLITE_RUN_ARGS_H__
+#define __TFLITE_RUN_ARGS_H__
+
+#include <string>
+#include <boost/program_options.hpp>
+
+namespace po = boost::program_options;
+
+namespace TFLiteRun {
+
+class Args {
+public:
+ Args(const int argc, char** argv);
+ void print(void);
+
+ const std::string &getInputFilename(void) const { return _input_filename; }
+
+private:
+ void Initialize();
+ void Parse(const int argc, char** argv);
+
+private:
+ po::options_description _options;
+
+ std::string _input_filename;
+};
+
+} // end of namespace TFLiteRun
+
+#endif // __TFLITE_RUN_ARGS_H__
--- /dev/null
+#include <iostream>
+#include <fstream>
+
+#include "bin_image.h"
+
+BinImage::BinImage(unsigned int width, unsigned int height, unsigned int channels)
+ : _width(width), _height(height), _channels(channels)
+{
+}
+
+BinImage::~BinImage()
+{
+}
+
+void BinImage::loadImage(const std::string& filename)
+{
+ std::ifstream fin(filename);
+
+ if( !fin ) {
+ std::cerr << "image filename is not specified. "
+ << "Input image will not be set." << std::endl;
+ return;
+ }
+
+ _image.reserve(_width*_height*_channels);
+
+ // Assuption: binary image is stored in the order of [H,W,C]
+ for(unsigned int i = 0; i < _width*_height*_channels; ++i )
+ _image.push_back(fin.get());
+}
+
+void BinImage::AssignTensor(TfLiteTensor* t)
+{
+ float* p = t->data.f;
+ const int IMAGE_MEAN = 128;
+ const float IMAGE_STD = 128.0f;
+
+ // to prevent runtime exception
+ if( _image.size() < _width*_height*_channels ) {
+ std::cerr << "Input image size is smaller than the size required by the model."
+ << " Input will not be set." << std::endl;
+ return;
+ }
+
+ for(int x = 0; x < _width; ++x) {
+ for(int y = 0; y < _height; ++y) {
+ for(int c = 0; c < _channels; ++c) {
+ *p++ = (_image[ y*_width*_channels + x*_channels + c] - IMAGE_MEAN) / IMAGE_STD;
+ }
+ }
+ }
+}
--- /dev/null
+#ifndef __TFLITE_RUN_LIBJPEG_H__
+#define __TFLITE_RUN_LIBJPEG_H__
+
+#include <string>
+#include <vector>
+
+#include "tensorflow/contrib/lite/context.h"
+
+class BinImage
+{
+public:
+ BinImage(unsigned int width, unsigned int height, unsigned int channel);
+ ~BinImage();
+
+ void loadImage(const std::string& filename);
+
+ void AssignTensor(TfLiteTensor* t);
+
+private:
+ unsigned int _width;
+ unsigned int _height;
+ unsigned int _channels;
+
+ std::vector<unsigned char> _image;
+
+};
+
+#endif // __TFLITE_RUN_LIBJPEG_H__
#include "tensorflow/contrib/lite/model.h"
#include "operators.h"
+#include "bin_image.h"
+#include "args.h"
#include <iostream>
#include <chrono>
+#include <algorithm>
using namespace tflite;
using namespace tflite::ops::builtin;
+void print_max_idx(float* f, int size) {
+ float* p = std::max_element(f, f+size);
+ std::cout << "max:" << p - f;
+}
+
// Benckmark support
namespace benchmark
{
} // namespace benchmark
-int main(int argc, char **argv)
+int main(const int argc, char **argv)
{
const auto filename = argv[1];
StderrReporter error_reporter;
+ TFLiteRun::Args args(argc, argv);
+
auto model = FlatBufferModel::BuildFromFile(filename, &error_reporter);
+ BinImage image(299, 299, 3);
+
+ if( args.getInputFilename().size() > 0 ) {
+ image.loadImage(args.getInputFilename());
+ }
std::unique_ptr<Interpreter> interpreter;
assert(status == kTfLiteOk);
};
- t_invoke.measure() << [&status, &interpreter](void)
+ t_invoke.measure() << [&status, &interpreter, &image](void)
{
+
+ std::cout << "input tensor indices = [";
+ for( const auto& o : interpreter->inputs() ) {
+ std::cout << o << ",";
+ }
+ std::cout << "]" << std::endl;
+
+ for( const auto& o : interpreter->inputs() ) {
+ image.AssignTensor(interpreter->tensor(o));
+ }
+
status = interpreter->Invoke();
assert(status == kTfLiteOk);
+
+ std::cout << "output tensor indices = [";
+ for( const auto& o : interpreter->outputs() ) {
+ std::cout << o << "(";
+
+ print_max_idx(interpreter->tensor(o)->data.f, interpreter->tensor(o)->bytes/sizeof(float));
+
+ std::cout << "),";
+ }
+ std::cout << "]" << std::endl;
+
};
std::cout << "Prepare takes " << t_prepare.count() << " seconds" << std::endl;