Adds Mir2NNFW converter used to convert nnc's mir into neurun graph representation
Add example runner used to load model using nnc's importer and run it using neurun backend
Signed-off-by: Vladimir Plazun <v.plazun@samsung.com>
--- /dev/null
+set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} ${CMAKE_CURRENT_LIST_DIR})
+
+find_package(NNFW QUIET)
+
+if(NOT NNFW_FOUND)
+ message(STATUS "NNFW not found. Skip mirunner build")
+ return()
+endif(NOT NNFW_FOUND)
+
+add_library(mir_converter STATIC MirConverter.cpp)
+
+target_link_libraries(mir_converter PUBLIC mir)
+
+if(TARGET tflite_importer)
+ target_link_libraries(mir_converter PUBLIC tflite_importer)
+else(TARGET tflite_importer)
+ message(STATUS "tflite_importer from `nnc` is not awailable. Skip mirunner build")
+ return()
+endif(TARGET tflite_importer)
+
+target_link_libraries(mir_converter PUBLIC
+ NNFW::NNAPI
+ NNFW::Neurun
+ NNFW::Misc
+ )
+
+add_executable(mirunner Runner.cpp)
+
+target_link_libraries(mirunner PRIVATE mir_converter)
--- /dev/null
+#
+# Usage: set NNFW_ROOT_DIR variable to nnfw source root
+#
+# NNFW_ROOT_DIR should be set to src root nnfw directory
+# NNFW_BUILD_ROOT should be set to build root
+
+mark_as_advanced(LIBNEURUN_LIBRARY LIBNEURUN_INCLUDE_DIR)
+
+set(NNAPI_INCLUDE_DIR /include)
+
+set(NNFW_CORE_INCLUDE_DIR include)
+set(NNFW_LIBS_MISC libs/misc/include)
+
+set(NEURUN_BASE_DIR runtimes/neurun)
+set(NEURUN_NNAPI_FRONTEND ${NEURUN_BASE_DIR}/frontend/nnapi)
+set(NEURUN_CORE_INCLUDE_DIR ${NEURUN_BASE_DIR}/core/include)
+
+if(NOT DEFINED NNFW_PRODUCT_DIR)
+ set(NNFW_PRODUCT_DIR ${NNFW_ROOT_DIR}/Product/obj)
+ message(STATUS "NNFW: Assuming nnfw build dir: ${NNFW_ROOT_DIR}/Product/obj")
+endif(NOT DEFINED NNFW_PRODUCT_DIR)
+
+# Locate required include files
+
+find_path(NEURUN_FRONTEND_INCLUDE_DIR
+ NAMES wrapper/model.h
+ PATHS ${NNFW_ROOT_DIR}/${NEURUN_NNAPI_FRONTEND}
+ )
+
+find_path(NNFW_NNAPI_INCLUDE_DIR
+ NAMES NeuralNetworks.h
+ PATHS ${NNFW_ROOT_DIR}/include)
+
+find_path(NNFW_MISC_INCLUDE_DIR
+ NAMES misc/feature/Shape.h
+ PATHS ${NNFW_ROOT_DIR}/${NNFW_LIBS_MISC})
+
+# Locate required libraries (.so, .a )
+find_library(NEURUN_LIBRARY
+ NAMES neuralnetworks
+ PATHS ${NNFW_PRODUCT_DIR}/${NEURUN_BASE_DIR}/frontend/nnapi
+ )
+
+
+find_library(NNFW_MISC_LIBARARY
+ NAMES nnfw_lib_misc
+ PATHS ${NNFW_PRODUCT_DIR}/libs/misc
+ )
+
+include(FindPackageHandleStandardArgs)
+
+# handle the QUIETLY and REQUIRED arguments and set NNFW_FOUND to TRUE
+find_package_handle_standard_args(NNFW
+ FOUND_VAR NNFW_FOUND
+ REQUIRED_VARS
+ NNFW_NNAPI_INCLUDE_DIR
+ NEURUN_CORE_INCLUDE_DIR
+ NEURUN_NNAPI_FRONTEND
+ NNFW_MISC_INCLUDE_DIR
+ NEURUN_LIBRARY
+ NNFW_MISC_LIBARARY
+ )
+
+if(NNFW_FOUND)
+ add_library(NNFW::NNAPI INTERFACE IMPORTED)
+ set_target_properties(NNFW::NNAPI PROPERTIES
+ INTERFACE_INCLUDE_DIRECTORIES "${NNFW_ROOT_DIR}/${NNAPI_INCLUDE_DIR}"
+ )
+
+ add_library(NNFW::Neurun UNKNOWN IMPORTED)
+ set_target_properties(NNFW::Neurun PROPERTIES
+ INTERFACE_INCLUDE_DIRECTORIES "${NNFW_ROOT_DIR}/${NEURUN_CORE_INCLUDE_DIR};${NNFW_ROOT_DIR}/${NEURUN_NNAPI_FRONTEND}"
+ IMPORTED_LOCATION ${NEURUN_LIBRARY}
+ )
+
+ add_library(NNFW::Misc INTERFACE IMPORTED)
+ set_target_properties(NNFW::Misc PROPERTIES
+ INTERFACE_INCLUDE_DIRECTORIES "${NNFW_MISC_INCLUDE_DIR}"
+ # IMPORTED_LOCATION "${NNFW_MISC_LIBARARY}"
+ )
+
+endif(NNFW_FOUND)
--- /dev/null
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "MirConverter.h"
+
+#include <utility>
+#include <iostream>
+#include <sstream>
+#include <string>
+
+using namespace std;
+
+namespace mirunner {
+
+shared_ptr<neurun::graph::Graph> MirConverter::convert() {
+ //stub
+ return {};
+}
+
+void MirConverter::import(const string& path){
+ //stub
+ (void)path;
+}
+
+} // namespace mirunner
--- /dev/null
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef NNFW_NNCC_CONTRIB_MIR2NNFW
+#define NNFW_NNCC_CONTRIB_MIR2NNFW
+
+#include "mir/Graph.h"
+#include "graph/Graph.h"
+
+#include <memory>
+
+namespace mirunner {
+
+class MirConverter {
+
+public:
+ explicit MirConverter() = default;
+
+ /**
+ * @brief converts stored model into neurun graph
+ * @return neurun graph pointer
+ */
+ std::shared_ptr<neurun::graph::Graph> convert();
+
+ /**
+ * @brief reads model from `path` file and stores it in mir representation
+ * @param path path to model
+ */
+ void import(const std::string& path);
+};
+
+} //namespace mirunner
+
+#endif //NNFW_NNCC_CONTRIB_MIR2NNFW
--- /dev/null
+## Mirunner
+Mirunner is a thin tflite(multiple import formats possible) model runner using nnc's frontends and IR to load model and
+runs it through nnfw's NEURUN backend
+
+## Building mirunner
+You should configure project with 2 variables set:
+
+* NNFW_ROOT_DIR - should be set to root directory of checked out and built nnfw project
+* NNFW_BUILD_ROOT _(Optional)_ - set to build root of nnfw(in case of non-default build configuration)
+( by default used "${NNFW_ROOT_DIR}/Product/obj" )
+
+## Running mirunner
+ Example:
+ ```sh
+ $ export LD_LIBRARY_PATH=<same as NNFW_BUILD_ROOT>/runtimes/neurun/backend/cpu/
+ $ mirunner <path_to_model> <path_to_binary_input_data>
+ ```
+
+As a POC runner only supports mobilenet tflite network and uses 224x224x3 input data
+
+Resulting vector is saved in `./out.dat` file in current working directory
+Runner prints maximum element value and label index to stdout
--- /dev/null
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "MirConverter.h"
+
+#include "graph/Graph.h"
+#include "NeuralNetworks.h"
+#include "wrapper/compilation.h"
+#include "wrapper/execution.h"
+
+#include <fstream>
+#include <vector>
+#include <string>
+#include <iostream>
+
+using namespace std;
+
+const int FILE_ERROR = 2;
+const int NUM_OUTPUT_CLASSES = 1001;
+const int INPUT_DIMENSION = 224;
+
+// Read vector of floats from selected file
+std::vector<float> readData(const string &path) {
+ //stub
+ (void)path;
+ return {};
+}
+
+int main(int argc, char **argv) {
+ if (argc < 3) {
+ std::cout << "Usage:\n mir_run <path to tflte model> <path to binary input>\n";
+ return 1;
+ }
+ string input_data_path{argv[2]};
+
+ mirunner::MirConverter mir_to_nnfw_converter{};
+ mir_to_nnfw_converter.import(argv[1]);
+
+ auto graph = mir_to_nnfw_converter.convert();
+
+ throw std::runtime_error("mir_nnfw_runner is not implemented yet");
+
+ return 0;
+}
--- /dev/null
+require("nnc")
+require("mir")