Introduce (Dummy) Custom Operators
authorJonghyun Park <jh1302.park@samsung.com>
Tue, 6 Mar 2018 01:33:00 +0000 (10:33 +0900)
committer박세희/동작제어Lab(SR)/Principal Engineer/삼성전자 <saehie.park@samsung.com>
Tue, 6 Mar 2018 03:08:11 +0000 (12:08 +0900)
This commit introduces dummy custom operators.

Signed-off-by: Jonghyun Park <jh1302.park@samsung.com>
tools/tflite_run/CMakeLists.txt
tools/tflite_run/src/operators.cc [new file with mode: 0644]
tools/tflite_run/src/operators.h [new file with mode: 0644]
tools/tflite_run/src/tflite_run.cc

index 0c513a2..f4cbb31 100644 (file)
@@ -1,6 +1,8 @@
 list(APPEND TFLITE_RUN_SRCS "src/tflite_run.cc")
+list(APPEND TFLITE_RUN_SRCS "src/operators.cc")
 
 add_executable(tflite_run ${TFLITE_RUN_SRCS})
+target_include_directories(tflite_run PRIVATE src)
 target_link_libraries(tflite_run tensorflow_lite)
 
 install(TARGETS tflite_run DESTINATION bin)
diff --git a/tools/tflite_run/src/operators.cc b/tools/tflite_run/src/operators.cc
new file mode 100644 (file)
index 0000000..d01383a
--- /dev/null
@@ -0,0 +1,168 @@
+#include "tensorflow/contrib/lite/context.h"
+
+#include <iostream>
+
+//
+// Cast
+//
+namespace {
+
+struct CASTOp
+{
+};
+
+void* InitCAST(TfLiteContext* context, const char* buffer, size_t length)
+{
+  std::cerr << __FUNCTION__ << "(length: " << length << ")" << std::endl;
+  // TODO Implement this!
+  return new CASTOp;
+}
+
+void FreeCAST(TfLiteContext *context, void *buffer)
+{
+  // TODO Implement this!
+  delete static_cast<CASTOp *>(buffer);
+}
+
+TfLiteStatus PrepareCAST(TfLiteContext* context, TfLiteNode* node)
+{
+  std::cerr << __FUNCTION__ << "(...)" << std::endl;
+  // TODO Implement this!
+  return kTfLiteOk;
+}
+
+TfLiteStatus EvalCAST(TfLiteContext* context, TfLiteNode* node)
+{
+  std::cerr << __FUNCTION__ << "(...)" << std::endl;
+  // TODO Implement this!
+  return kTfLiteError;
+}
+
+}
+
+//
+// Stack
+//
+namespace {
+
+struct StackOp
+{
+};
+
+void* InitStack(TfLiteContext* context, const char* buffer, size_t length)
+{
+  std::cerr << __FUNCTION__ << "(length: " << length << ")" << std::endl;
+  // TODO Implement this!
+  return new StackOp;
+}
+
+void FreeStack(TfLiteContext *context, void *buffer)
+{
+  // TODO Implement this!
+  delete static_cast<StackOp *>(buffer);
+}
+
+TfLiteStatus PrepareStack(TfLiteContext* context, TfLiteNode* node)
+{
+  std::cerr << __FUNCTION__ << "(...)" << std::endl;
+  // TODO Implement this!
+  return kTfLiteOk;
+}
+
+TfLiteStatus EvalStack(TfLiteContext* context, TfLiteNode* node)
+{
+  std::cerr << __FUNCTION__ << "(...)" << std::endl;
+  // TODO Implement this!
+  return kTfLiteError;
+}
+
+}
+
+//
+// ArgMax
+//
+namespace {
+
+struct ArgMaxOp
+{
+};
+
+void* InitArgMax(TfLiteContext* context, const char* buffer, size_t length)
+{
+  std::cerr << __FUNCTION__ << "(length: " << length << ")" << std::endl;
+  // TODO Implement this!
+  return new ArgMaxOp;
+}
+
+void FreeArgMax(TfLiteContext *context, void *buffer)
+{
+  // TODO Implement this!
+  delete static_cast<ArgMaxOp *>(buffer);
+}
+
+TfLiteStatus PrepareArgMax(TfLiteContext* context, TfLiteNode* node)
+{
+  std::cerr << __FUNCTION__ << "(...)" << std::endl;
+  // TODO Implement this!
+  return kTfLiteOk;
+}
+
+TfLiteStatus EvalArgMax(TfLiteContext* context, TfLiteNode* node)
+{
+  std::cerr << __FUNCTION__ << "(...)" << std::endl;
+  // TODO Implement this!
+  return kTfLiteError;
+}
+
+}
+
+//
+// TensorFlowMax
+//
+namespace {
+
+struct TensorFlowMaxOp
+{
+};
+
+void* InitTensorFlowMax(TfLiteContext* context, const char* buffer, size_t length)
+{
+  std::cerr << __FUNCTION__ << "(length: " << length << ")" << std::endl;
+  // TODO Implement this!
+  return new TensorFlowMaxOp;
+}
+
+void FreeTensorFlowMax(TfLiteContext *context, void *buffer)
+{
+  // TODO Implement this!
+  delete static_cast<TensorFlowMaxOp *>(buffer);
+}
+
+TfLiteStatus PrepareTensorFlowMax(TfLiteContext* context, TfLiteNode* node)
+{
+  std::cerr << __FUNCTION__ << "(...)" << std::endl;
+  // TODO Implement this!
+  return kTfLiteOk;
+}
+
+TfLiteStatus EvalTensorFlowMax(TfLiteContext* context, TfLiteNode* node)
+{
+  std::cerr << __FUNCTION__ << "(...)" << std::endl;
+  // TODO Implement this!
+  return kTfLiteError;
+}
+
+}
+
+#define REGISTER_FUNCTION(Name)                                     \
+  TfLiteRegistration* Register_##Name(void)                          \
+  {                                                                 \
+    static TfLiteRegistration r = { ::Init##Name, ::Free##Name,     \
+                                   ::Prepare##Name, ::Eval##Name }; \
+    return &r;                                                      \
+  }
+REGISTER_FUNCTION(CAST)
+REGISTER_FUNCTION(Stack)
+REGISTER_FUNCTION(ArgMax)
+REGISTER_FUNCTION(TensorFlowMax)
+#undef REGISTER_FUNCTION
diff --git a/tools/tflite_run/src/operators.h b/tools/tflite_run/src/operators.h
new file mode 100644 (file)
index 0000000..b691126
--- /dev/null
@@ -0,0 +1,9 @@
+#ifndef __TFLITE_RUN_OPERATORS_H__
+#define __TFLITE_RUN_OPERATORS_H__
+
+TfLiteRegistration* Register_CAST(void);
+TfLiteRegistration* Register_Stack(void);
+TfLiteRegistration* Register_ArgMax(void);
+TfLiteRegistration* Register_TensorFlowMax(void);
+
+#endif // __TFLITE_RUN_OPERATORS_H__
index 143216b..e4c78ee 100644 (file)
@@ -1,6 +1,8 @@
 #include "tensorflow/contrib/lite/kernels/register.h"
 #include "tensorflow/contrib/lite/model.h"
 
+#include "operators.h"
+
 #include <iostream>
 
 using namespace tflite;
@@ -23,6 +25,13 @@ int main(int argc, char **argv)
 
   BuiltinOpResolver resolver;
 
+#define REGISTER(Name) { resolver.AddCustom(#Name, Register_##Name()); }
+  REGISTER(CAST);
+  REGISTER(Stack);
+  REGISTER(ArgMax);
+  REGISTER(TensorFlowMax);
+#undef REGISTER
+
   InterpreterBuilder builder(*model, resolver);
 
   std::unique_ptr<Interpreter> interpreter;