Remove mirunner component (#6414)
authorСергей Баранников/AI Tools Lab /SRR/Engineer/삼성전자 <s.barannikov@samsung.com>
Fri, 9 Aug 2019 09:33:07 +0000 (12:33 +0300)
committerAlexander Efimov/AI Tools Lab/./Samsung Electronics <a.efimov@samsung.com>
Fri, 9 Aug 2019 09:33:07 +0000 (12:33 +0300)
Remove `mirunner` PoC, there is no need for it anymore.

Signed-off-by: Sergei Barannikov <s.barannikov@samsung.com>
compiler/mirunner/.FORMATDENY [deleted file]
compiler/mirunner/CMakeLists.txt [deleted file]
compiler/mirunner/FindNNFW.cmake [deleted file]
compiler/mirunner/Mir2nnfwVisitor.cpp [deleted file]
compiler/mirunner/Mir2nnfwVisitor.h [deleted file]
compiler/mirunner/MirConverter.cpp [deleted file]
compiler/mirunner/MirConverter.h [deleted file]
compiler/mirunner/README.md [deleted file]
compiler/mirunner/Runner.cpp [deleted file]
compiler/mirunner/requires.cmake [deleted file]

diff --git a/compiler/mirunner/.FORMATDENY b/compiler/mirunner/.FORMATDENY
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/compiler/mirunner/CMakeLists.txt b/compiler/mirunner/CMakeLists.txt
deleted file mode 100644 (file)
index 29691f1..0000000
+++ /dev/null
@@ -1,27 +0,0 @@
-set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} ${CMAKE_CURRENT_LIST_DIR})
-
-find_package(NNFW QUIET)
-
-if(NOT NNFW_FOUND)
-    message(STATUS "NNFW not found. Skip mirunner build")
-    return()
-endif(NOT NNFW_FOUND)
-
-if(NOT TARGET tflite_importer)
-    message(STATUS "tflite_importer from `nnc` is not available. Skip mirunner build")
-    return()
-endif()
-
-add_library(mir_converter STATIC MirConverter.cpp Mir2nnfwVisitor.cpp)
-
-target_link_libraries(mir_converter PUBLIC mir)
-
-target_link_libraries(mir_converter PUBLIC
-                      NNFW::NNAPI
-                      NNFW::Neurun
-                      NNFW::Misc
-                      )
-
-add_executable(mirunner Runner.cpp)
-target_link_libraries(mirunner PRIVATE mir_converter)
-target_link_libraries(mirunner PUBLIC tflite_importer)
diff --git a/compiler/mirunner/FindNNFW.cmake b/compiler/mirunner/FindNNFW.cmake
deleted file mode 100644 (file)
index 4805c17..0000000
+++ /dev/null
@@ -1,82 +0,0 @@
-#
-#   Usage: set NNFW_ROOT_DIR variable to nnfw source root
-#
-# NNFW_ROOT_DIR should be set to src root nnfw directory
-# NNFW_BUILD_ROOT should be set to build root
-
-mark_as_advanced(LIBNEURUN_LIBRARY LIBNEURUN_INCLUDE_DIR)
-
-set(NNAPI_INCLUDE_DIR               /include)
-
-set(NNFW_CORE_INCLUDE_DIR           include)
-set(NNFW_LIBS_MISC                  libs/misc/include)
-
-set(NEURUN_BASE_DIR                 runtimes/neurun)
-set(NEURUN_NNAPI_FRONTEND           ${NEURUN_BASE_DIR}/frontend/nnapi)
-set(NEURUN_CORE_INCLUDE_DIR         ${NEURUN_BASE_DIR}/core/include)
-
-if(NOT DEFINED NNFW_PRODUCT_DIR)
-    set(NNFW_PRODUCT_DIR ${NNFW_ROOT_DIR}/Product/obj)
-    message(STATUS "NNFW: Assuming nnfw build dir: ${NNFW_ROOT_DIR}/Product/obj")
-endif(NOT DEFINED NNFW_PRODUCT_DIR)
-
-# Locate required include files
-
-find_path(NEURUN_FRONTEND_INCLUDE_DIR
-          NAMES wrapper/model.h
-          PATHS ${NNFW_ROOT_DIR}/${NEURUN_NNAPI_FRONTEND}
-        )
-
-find_path(NNFW_NNAPI_INCLUDE_DIR
-          NAMES NeuralNetworks.h
-          PATHS ${NNFW_ROOT_DIR}/include)
-
-find_path(NNFW_MISC_INCLUDE_DIR
-          NAMES misc/feature/Shape.h
-          PATHS ${NNFW_ROOT_DIR}/${NNFW_LIBS_MISC})
-
-# Locate required libraries (.so, .a )
-find_library(NEURUN_LIBRARY
-             NAMES neuralnetworks
-             PATHS ${NNFW_PRODUCT_DIR}/${NEURUN_BASE_DIR}/frontend/nnapi
-        )
-
-
-find_library(NNFW_MISC_LIBARARY
-             NAMES nnfw_lib_misc
-             PATHS ${NNFW_PRODUCT_DIR}/libs/misc
-        )
-
-include(FindPackageHandleStandardArgs)
-
-# handle the QUIETLY and REQUIRED arguments and set NNFW_FOUND to TRUE
-find_package_handle_standard_args(NNFW
-        FOUND_VAR NNFW_FOUND
-        REQUIRED_VARS
-        NNFW_NNAPI_INCLUDE_DIR
-        NEURUN_CORE_INCLUDE_DIR
-        NEURUN_NNAPI_FRONTEND
-        NNFW_MISC_INCLUDE_DIR
-        NEURUN_LIBRARY
-        NNFW_MISC_LIBARARY
-        )
-
-if(NNFW_FOUND)
-    add_library(NNFW::NNAPI INTERFACE IMPORTED)
-    set_target_properties(NNFW::NNAPI PROPERTIES
-            INTERFACE_INCLUDE_DIRECTORIES "${NNFW_ROOT_DIR}/${NNAPI_INCLUDE_DIR}"
-            )
-
-    add_library(NNFW::Neurun UNKNOWN IMPORTED)
-    set_target_properties(NNFW::Neurun PROPERTIES
-            INTERFACE_INCLUDE_DIRECTORIES "${NNFW_ROOT_DIR}/${NEURUN_CORE_INCLUDE_DIR};${NNFW_ROOT_DIR}/${NEURUN_NNAPI_FRONTEND}"
-            IMPORTED_LOCATION ${NEURUN_LIBRARY}
-            )
-
-    add_library(NNFW::Misc INTERFACE IMPORTED)
-    set_target_properties(NNFW::Misc PROPERTIES
-            INTERFACE_INCLUDE_DIRECTORIES "${NNFW_MISC_INCLUDE_DIR}"
-    #        IMPORTED_LOCATION "${NNFW_MISC_LIBARARY}"
-            )
-
-endif(NNFW_FOUND)
diff --git a/compiler/mirunner/Mir2nnfwVisitor.cpp b/compiler/mirunner/Mir2nnfwVisitor.cpp
deleted file mode 100644 (file)
index ceb67bf..0000000
+++ /dev/null
@@ -1,404 +0,0 @@
-#include "Mir2nnfwVisitor.h"
-#include "mir/TensorUtil.h"
-
-#include "wrapper/model.h"
-
-#include <iostream>
-#include <map>
-#include <sstream>
-#include <string>
-
-namespace mirunner {
-
-MIR2NNFWVisitor::MIR2NNFWVisitor() {
-  _nnfw_model = std::make_shared<ANeuralNetworksModel>();
-}
-
-void MIR2NNFWVisitor::visit(ops::BiasAddOp& op) {
-  _s << "BiasAddOp " << op.getName() << "\n";
-  // TODO: this code is functional but nnfw does not support freestanding bias for now
-  // assume bias always follow conv2d's
-  //  uint32_t input = _output_to_idx.at(op.getInput(0)->getProducer());
-  //  uint32_t weight = _output_to_idx.at(op.getInput(1)->getProducer());
-  //  uint32_t act = addOperand(ANEURALNETWORKS_FUSED_NONE);
-  //
-  //  uint32_t output = addOperandFromOpOutput(op, 0);
-  //  addOperation(ANEURALNETWORKS_ADD,
-  //               {input, weight, act}, {output});
-}
-
-void MIR2NNFWVisitor::visit(ops::CappedReluOp& op) {
-  _s << "CappedReluOp " << op.getName() << "\n";
-  // TODO: this code is functional but nnfw does not support freestanding activation for now
-  // assume CappedRelu always follow conv2d's
-  //  uint32_t input = _output_to_idx.at(op.getInput(0)->getProducer());
-  //  uint32_t output = addOperandFromOpOutput(op, 0);
-  //  // To avoid float equality comparison:
-  //  OperationCode op_code = (op.getCap() - 6.0 < 0.01)
-//                            ? ANEURALNETWORKS_RELU6 : ANEURALNETWORKS_RELU1;
-  //  addOperation(op_code, {input}, {output});
-}
-
-void MIR2NNFWVisitor::visit(ops::ConstantOp& op) {
-  _s << "ConstantOp " << op.getName() << "\n";
-  // Do not create operands from dangling ConstantOp
-  if (!op.getOutput(0)->getConsumers().empty()) {
-    addOperandFromOpOutput(op, 0);
-  }
-}
-
-void MIR2NNFWVisitor::visit(ops::Conv2DOp& op) {
-  _s << "Conv2DOp " << op.getName() << "\n";
-
-  auto& padding_before = op.getPaddingBefore();
-  auto& padding_after = op.getPaddingAfter();
-  uint32_t pad_l = addOperand(padding_before.at(1));
-  uint32_t pad_r = addOperand(padding_after.at(1));
-  uint32_t pad_t = addOperand(padding_before.at(0));
-  uint32_t pad_b = addOperand(padding_after.at(0));
-
-  uint32_t stride_w = addOperand(op.getStrides().dim(1));
-  uint32_t stride_h = addOperand(op.getStrides().dim(0));
-
-  uint32_t input = _output_to_idx.at(op.getInput(0)->getProducer());
-  uint32_t weights = _output_to_idx.at(op.getInput(1)->getProducer());
-
-  //TODO handle case with conv->relu->bias (is it possible?)
-
-  auto bias = handleBias(&op);
-  const Operation* last_op = bias.first;
-
-  auto activation = handleActivation(last_op);
-
-  std::vector<uint32_t> inputs = {input, weights, bias.second, pad_l, pad_r, pad_t, pad_b, stride_w,
-                                  stride_h, activation.second};
-
-  uint32_t output = addOperandFromOpOutput(*activation.first, 0);
-  addOperation(ANEURALNETWORKS_CONV_2D, inputs, {output});
-}
-
-void MIR2NNFWVisitor::visit(ops::DepthwiseConv2DOp& op) {
-  _s << "DepthwiseConv2DOp " << op.getName() << "\n";
-
-  auto& padding_before = op.getPaddingBefore();
-  auto& padding_after = op.getPaddingAfter();
-  uint32_t pad_l = addOperand(padding_before.at(1));
-  uint32_t pad_r = addOperand(padding_after.at(1));
-  uint32_t pad_t = addOperand(padding_before.at(0));
-  uint32_t pad_b = addOperand(padding_after.at(0));
-
-  uint32_t stride_w = addOperand(op.getStrides().dim(1));
-  uint32_t stride_h = addOperand(op.getStrides().dim(0));
-
-  uint32_t d_mult = addOperand(static_cast<int32_t>(
-                                 op.getOutputShape(0).dim(3) / op.getInputShape(0).dim(3)));
-
-  uint32_t input = _output_to_idx.at(op.getInput(0)->getProducer());
-  uint32_t weights = _output_to_idx.at(op.getInput(1)->getProducer());
-
-  //TODO handle case with conv->relu->bias (is it possible?)
-
-  auto bias = handleBias(&op);
-  const Operation* last_op = bias.first;
-
-  auto activation = handleActivation(last_op);
-
-  std::vector<uint32_t> inputs{input, weights, bias.second, pad_l, pad_r, pad_t, pad_b, stride_w, stride_h, d_mult, activation.second};
-
-  uint32_t output = addOperandFromOpOutput(*activation.first, 0);
-
-  addOperation(ANEURALNETWORKS_DEPTHWISE_CONV_2D,
-               inputs,
-               {output});
-}
-
-void MIR2NNFWVisitor::visit(ops::InputOp& op) {
-  _s << "InputOp " << op.getName() << "\n";
-
-  uint32_t id = addOperandFromOpOutput(op, 0);
-  _inputs.emplace_back(id);
-}
-
-void MIR2NNFWVisitor::visit(ops::PoolOp& op) {
-  _s << "PoolOp " << op.getName() << "\n";
-
-  auto& padding_before = op.getPaddingBefore();
-  auto& padding_after = op.getPaddingAfter();
-  uint32_t pad_l = addOperand(padding_before.at(1));
-  uint32_t pad_r = addOperand(padding_after.at(1));
-  uint32_t pad_t = addOperand(padding_before.at(0));
-  uint32_t pad_b = addOperand(padding_after.at(0));
-
-  uint32_t stride_w = addOperand(op.getStrides().dim(1));
-  uint32_t stride_h = addOperand(op.getStrides().dim(0));
-
-  auto win_shape = op.getWindowShape();
-  uint32_t filter_h = addOperand(win_shape.dim(0));
-  uint32_t filter_w = addOperand(win_shape.dim(1));
-  uint32_t act = addOperand(ANEURALNETWORKS_FUSED_NONE);
-
-  uint32_t input = _output_to_idx.at(op.getInput(0)->getProducer());
-  uint32_t output = addOperandFromOpOutput(op, 0);
-
-  OperationCode op_code;
-  switch (op.getPoolingType()) {
-    case ops::PoolOp::PoolingType::MAX:
-      op_code = ANEURALNETWORKS_MAX_POOL_2D;
-      break;
-    case ops::PoolOp::PoolingType::AVG:
-      op_code = ANEURALNETWORKS_AVERAGE_POOL_2D;
-      break;
-    case ops::PoolOp::PoolingType::MIN:
-      throw std::runtime_error("Not Implemented in NNFW");
-    default:
-      assert(false && "This should not happen");
-  }
-  addOperation(op_code,
-               {input, pad_l, pad_r, pad_t, pad_b, stride_w, stride_h, filter_w, filter_h, act},
-               {output});
-}
-
-void MIR2NNFWVisitor::visit(ops::OutputOp& op) {
-  _s << "OutputOp " << op.getName() << "\n";
-
-  _outputs.emplace_back(_output_to_idx.at(op.getInput(0)->getProducer()));
-}
-
-void MIR2NNFWVisitor::visit(ops::ReshapeOp& op) {
-  _s << "ReshapeOp " << op.getName() << "\n";
-  uint32_t input = _output_to_idx.at(op.getInput(0)->getProducer());
-  uint32_t output = addOperandFromOpOutput(op, 0);
-
-  uint32_t out_shape = addOperand(op.getOutputShape(0));
-
-  addOperation(ANEURALNETWORKS_RESHAPE, {input, out_shape}, {output});
-}
-
-void MIR2NNFWVisitor::visit(ops::SoftmaxOp& op) {
-  _s << "SoftmaxOp " << op.getName() << "\n";
-
-  // TODO: test that axis==-1
-
-  uint32_t input = _output_to_idx.at(op.getInput(0)->getProducer());
-  uint32_t beta = addOperand(1.0f);
-
-  uint32_t output = addOperandFromOpOutput(op, 0);
-
-  addOperation(ANEURALNETWORKS_SOFTMAX, {input, beta}, {output});
-}
-
-uint32_t MIR2NNFWVisitor::addOperandFromOpOutput(const Operation& mir_op, uint32_t output_idx) {
-  bool next_depthwise = false;
-  auto const_op = dynamic_cast<const ops::ConstantOp*>(&mir_op);
-
-  for (const auto in : mir_op.getOutput(0)->getConsumers()) {
-    // TODO: Check multiple output case
-    if (dynamic_cast<const ops::DepthwiseConv2DOp*>(in->getNode())) {
-      next_depthwise = true;
-      break;
-    }
-  }
-
-  // Prepare operand type (ANeuralNetworksOperandType)
-  auto shape = mir_op.getOutputShape(output_idx);
-  auto rank = shape.rank();
-  std::vector<uint32_t> dims(rank);
-  for (int32_t axis = 0; axis < rank; ++axis) {
-    dims[axis] = static_cast<uint32_t>(shape.dim(axis));
-  }
-
-  if (next_depthwise && const_op) {
-    auto tmp = dims[0];
-    dims[0] = dims[3];
-    dims[3] = dims[2];
-    dims[2] = dims[1];
-    dims[1] = tmp;
-  }
-  ANeuralNetworksOperandType type{
-    .type = ANEURALNETWORKS_TENSOR_FLOAT32, // TODO: take type from op
-    .dimensionCount = static_cast<uint32_t>(rank),
-    .dimensions = dims.data()
-  };
-
-  // Add operand to NUERUN graph
-  if (!_nnfw_model->addOperand(&type)) {
-    throw std::runtime_error("addOperandFromOpOutput error: " + mir_op.getName());
-  }
-
-  // Remember operand in our visitor _output_to_idx map
-  _output_to_idx.insert({mir_op.getOutput(output_idx), _operand_counter});
-
-  // Set operand value for ConstantOp
-
-  if (const_op) {
-    TensorVariant value_tensor = const_op->getValue();
-    if (next_depthwise) {
-      value_tensor = transposeTensor<3, 0, 1, 2>(value_tensor);
-    }
-    size_t data_size = value_tensor.getElementSize() * value_tensor.getShape().numElements();
-
-    if (!_nnfw_model->setOperandValue(_operand_counter, value_tensor.atOffset(0), data_size,
-      /*optional*/ false, /*copy*/ true)) {
-      throw std::runtime_error("addOperandFromOpOutput setOperandValue error: " + mir_op.getName());
-    }
-  }
-
-  return _operand_counter++;
-}
-
-uint32_t MIR2NNFWVisitor::addOperand(int32_t value) {
-  ANeuralNetworksOperandType type{
-    .type = ANEURALNETWORKS_INT32, .dimensionCount = 0, .dimensions = nullptr
-  };
-
-  if (!_nnfw_model->addOperand(&type)) {
-    throw std::runtime_error("addOperand int32_t error");
-  }
-
-  if (!_nnfw_model->setOperandValue(_operand_counter, &value, sizeof(int32_t) * 1,
-    /*optional*/ false, /*copy*/ true)) {
-    throw std::runtime_error("addOperand setOperandValue error");
-  }
-
-  return _operand_counter++;
-}
-
-uint32_t MIR2NNFWVisitor::addOperand(mir::Shape value) {
-  auto rank = static_cast<uint32_t>(value.rank());
-
-  ANeuralNetworksOperandType type{
-    .type = ANEURALNETWORKS_TENSOR_INT32, .dimensionCount = 1, .dimensions = &rank
-  };
-
-  if (!_nnfw_model->addOperand(&type)) {
-    throw std::runtime_error("addOperand tensor_int32_t error");
-  }
-
-  std::vector<int32_t> val_buf(value.rank());
-  for (int i = 0; i < value.rank(); i++) {
-    val_buf[i] = value.dim(i);
-  }
-  if (!_nnfw_model->setOperandValue(_operand_counter, val_buf.data(), sizeof(int32_t) * value.rank(),
-    /*optional*/ false, /*copy*/ true)) {
-    throw std::runtime_error("addOperand setOperandValue error");
-  }
-
-  return _operand_counter++;
-}
-
-uint32_t MIR2NNFWVisitor::addOperand(float value) {
-  ANeuralNetworksOperandType type{
-    .type = ANEURALNETWORKS_FLOAT32, .dimensionCount = 0, .dimensions = nullptr
-  };
-
-  if (!_nnfw_model->addOperand(&type)) {
-    throw std::runtime_error("addOperand float error");
-  }
-
-  if (!_nnfw_model->setOperandValue(_operand_counter, &value, sizeof(float) * 1,
-    /*optional*/ false, /*copy*/ true)) {
-    throw std::runtime_error("addOperand setOperandValue error");
-  }
-
-  return _operand_counter++;
-}
-
-uint32_t MIR2NNFWVisitor::addZeroBiasOperand(int32_t size) {
-  uint32_t dims[1]{static_cast<uint32_t>(size)};
-
-  ANeuralNetworksOperandType type{
-    .type = ANEURALNETWORKS_TENSOR_FLOAT32, .dimensionCount = 1, .dimensions = dims
-  };
-
-  if (!_nnfw_model->addOperand(&type)) {
-    throw std::runtime_error("addZeroBiasOperand error");
-  }
-
-  std::vector<uint32_t> val_buf(size, 0);
-
-  size_t data_size = sizeof(int32_t) * size;
-  if (!_nnfw_model->setOperandValue(_operand_counter, val_buf.data(), data_size,
-    /*optional*/ false, /*copy*/ true)) {
-    throw std::runtime_error("addZeroBiasOperand setOperandValue error");
-  }
-
-  return _operand_counter++;
-}
-
-void MIR2NNFWVisitor::addOperation(ANeuralNetworksOperationType type,
-                                   const std::vector<uint32_t>& inputs,
-                                   const std::vector<uint32_t>& outputs) {
-  if (!_nnfw_model->addOperation(type,
-                                 static_cast<uint32_t>(inputs.size()), inputs.data(),
-                                 static_cast<uint32_t>(outputs.size()), outputs.data())) {
-    throw std::runtime_error("addOperation error");
-  }
-}
-
-void MIR2NNFWVisitor::finish() {
-  for (const auto& i : _inputs) {
-    _nnfw_model->addModelInput(i);
-  }
-
-  for (const auto& i : _outputs) {
-    _nnfw_model->addModelOutput(i);
-  }
-
-  _nnfw_model->finish();
-}
-
-std::shared_ptr<neurun::graph::Graph> MIR2NNFWVisitor::getGraph() {
-  std::shared_ptr<neurun::graph::Graph> g;
-  _nnfw_model->release(g);
-  return g;
-}
-
-template <typename Op>
-std::pair<const Operation*, uint32_t> MIR2NNFWVisitor::handleBias(const Op* op) {
-  assert(op->getOutput(0)->getConsumers().size() == 1
-         && "More than one consumer for op with fused bias is unsopported for now");
-
-  const ops::BiasAddOp* bias_add = nullptr;
-  for (const Operation::Input* in : op->getOutput(0)->getConsumers()) {
-    if (in->getNode()->getType() == Operation::Type::biasAdd) {
-      bias_add = static_cast<const ops::BiasAddOp*>(in->getNode());
-      break;
-    }
-  }
-
-  if (bias_add) {
-    uint32_t bias = _output_to_idx.at(bias_add->getInput(1)->getProducer());
-    return {bias_add, bias};
-  } else {
-    uint32_t bias = addZeroBiasOperand(op->getOutputShape(0).dim(-1));
-    return {op, bias};
-  }
-}
-
-template <typename Op>
-std::pair<const Operation*, uint32_t> MIR2NNFWVisitor::handleActivation(const Op* op) {
-  assert(op->getOutput(0)->getConsumers().size() == 1
-         && "More than one consumer for op with fused activation is unsopported for now");
-
-  auto activation_code = ANEURALNETWORKS_FUSED_NONE;
-
-  const Operation* activation = nullptr;
-  for (const Operation::Input* in : op->getOutput(0)->getConsumers()) {
-    if (in->getNode()->getType() == Operation::Type::cappedReLU) {
-      auto capped_relu = static_cast<const ops::CappedReluOp*>(in->getNode());
-      activation = capped_relu;
-      assert((capped_relu->getCap() - 6.0f) < 0.01f
-              && "Capped Relu with custom cap is not supported");
-      activation_code = ANEURALNETWORKS_FUSED_RELU6;
-      break;
-    } //TODO: handle other activation supported in NEURUN
-  }
-
-  if(activation) {
-    return {activation, addOperand(activation_code)};
-  } else {
-    return {op, addOperand(activation_code)};
-  }
-}
-
-} // namespace mirunner
diff --git a/compiler/mirunner/Mir2nnfwVisitor.h b/compiler/mirunner/Mir2nnfwVisitor.h
deleted file mode 100644 (file)
index 69ca3a8..0000000
+++ /dev/null
@@ -1,139 +0,0 @@
-#ifndef NNFW_MIR2NNFW_VISITOR_H
-#define NNFW_MIR2NNFW_VISITOR_H
-
-#include "graph/Graph.h"
-#include "wrapper/model.h"
-
-#include "mir/Graph.h"
-#include "mir/Operation.h"
-#include "mir/ops/BatchNormOp.h"
-#include "mir/ops/BiasAddOp.h"
-#include "mir/ops/CappedReluOp.h"
-#include "mir/ops/ConcatOp.h"
-#include "mir/ops/ConstantOp.h"
-#include "mir/ops/Conv2DOp.h"
-#include "mir/ops/Deconv2DOp.h"
-#include "mir/ops/DepthwiseConv2DOp.h"
-#include "mir/ops/DropoutOp.h"
-#include "mir/ops/ElementwiseOp.h"
-#include "mir/ops/EluOp.h"
-#include "mir/ops/FullyConnectedOp.h"
-#include "mir/ops/GatherOp.h"
-#include "mir/ops/GemmOp.h"
-#include "mir/ops/InputOp.h"
-#include "mir/ops/OutputOp.h"
-#include "mir/ops/LeakyReluOp.h"
-#include "mir/ops/PadOp.h"
-#include "mir/ops/PoolOp.h"
-#include "mir/ops/ReduceOp.h"
-#include "mir/ops/ReluOp.h"
-#include "mir/ops/ReshapeOp.h"
-#include "mir/ops/ResizeOp.h"
-#include "mir/ops/ScaleOp.h"
-#include "mir/ops/SigmoidOp.h"
-#include "mir/ops/SliceOp.h"
-#include "mir/ops/SoftmaxOp.h"
-#include "mir/ops/SqrtOp.h"
-#include "mir/ops/SqueezeOp.h"
-#include "mir/ops/TanhOp.h"
-#include "mir/ops/TransposeOp.h"
-
-#include <iostream>
-#include <sstream>
-#include <string>
-
-namespace mirunner {
-
-using namespace mir;
-using neurun::graph::Graph;
-
-class MIR2NNFWVisitor : public IVisitor {
-public:
-
-  MIR2NNFWVisitor();
-
-  void visit(ops::BiasAddOp &op) override;
-  void visit(ops::CappedReluOp &op) override;
-  void visit(ops::ConstantOp &op) override;
-  void visit(ops::Conv2DOp &op) override;
-  void visit(ops::DepthwiseConv2DOp &op) override;
-  void visit(ops::InputOp &op) override;
-  void visit(ops::PoolOp &op) override;
-  void visit(ops::OutputOp &op) override;
-  void visit(ops::ReshapeOp &op) override;
-  void visit(ops::SoftmaxOp &op) override;
-
-  void visit(ops::BatchNormOp &op) override { assert(false && "BatchNormOp"); }
-  void visit(ops::ConcatOp &op) override { assert(false && "ConcatOp"); }
-  void visit(ops::DeConv2DOp &op) override { assert(false && "DeConv2DOp"); }
-  void visit(ops::DropoutOp &op) override { assert(false && "DropoutOp"); }
-  void visit(ops::ElementwiseOp &op) override { assert(false && "ElementwiseOp"); }
-  void visit(ops::EluOp &op) override { assert(false && "EluOp"); }
-  void visit(ops::FullyConnectedOp &op) override { assert(false && "FullyConnectedOp"); }
-  void visit(ops::GatherOp &op) override { assert(false && "GatherOp"); }
-  void visit(ops::GemmOp &op) override { assert(false && "GemmOp"); }
-  void visit(ops::LeakyReluOp &op) override { assert(false && "LeakyReluOp"); }
-  void visit(ops::PadOp &op) override { assert(false && "PadOp"); }
-  void visit(ops::ReduceOp &op) override { assert(false && "ReduceOp"); }
-  void visit(ops::ReluOp &op) override { assert(false && "ReluOp"); }
-  void visit(ops::ResizeOp &op) override { assert(false && "ResizeOp"); }
-  void visit(ops::ScaleOp &op) override { assert(false && "ScaleOp"); }
-  void visit(ops::SigmoidOp &op) override { assert(false && "SigmoidOp"); }
-  void visit(ops::SliceOp &op) override { assert(false && "SliceOp"); }
-  void visit(ops::SqrtOp &op) override { assert(false && "SqrtOp"); }
-  void visit(ops::SqueezeOp &op) override { assert(false && "SqueezeOp"); }
-  void visit(ops::TanhOp &op) override { assert(false && "TanhOp"); }
-  void visit(ops::TransposeOp &op) override { assert(false && "TransposeOp"); }
-
-  void dump() {
-    //disable since for now converter dumps to stdout directly
-    //std::cout << _s.str() << std::endl;
-  }
-
-  void finish();
-  std::shared_ptr<neurun::graph::Graph> getGraph();
-private:
-
-  // Create NEURUN operand from MIR op output
-  uint32_t addOperandFromOpOutput(const Operation& mir_op, uint32_t output_idx);
-  // Create int32_t NEURUN operand from value
-  uint32_t addOperand(int32_t value);
-  // Create tensor_int32_t NEURUN operand from value
-  uint32_t addOperand(mir::Shape value);
-  // Create int32_t NEURUN operand from value
-  uint32_t addOperand(float value);
-  // Create NEURUN 1-d operand filled with zeroes
-  uint32_t addZeroBiasOperand(int32_t size);
-
-  void addOperation(ANeuralNetworksOperationType type,
-                    const std::vector<uint32_t>& inputs,
-                    const std::vector<uint32_t>& outputs);
-
-  /**
-   * @return return a pair of next operation and bias operand to use with NNAPI
-   */
-  template<typename Op>
-  std::pair<const Operation*, uint32_t> handleBias(const Op* op);
-
-  /**
-   * @return return a pair of next operation and relu operand to use with NNAPI
-   */
-  template<typename Op>
-  std::pair<const Operation*, uint32_t> handleActivation(const Op* op);
-
-  std::ostream& _s = std::cout;
-  std::shared_ptr<ANeuralNetworksModel> _nnfw_model;
-
-  // Maps MIR Outputs to NEURUN operand indexes
-  std::unordered_map<const Operation::Output*, uint32_t> _output_to_idx;
-  uint32_t _operand_counter = 0;
-
-  // Vector of NEURUN inputs indexes
-  std::vector<uint32_t> _inputs;
-  // Vector of NEURUN outputs indexes
-  std::vector<uint32_t> _outputs;
-};
-
-} // namespace mirunner
-
-#endif //NNFW_MIR2NNFW_VISITOR_H
diff --git a/compiler/mirunner/MirConverter.cpp b/compiler/mirunner/MirConverter.cpp
deleted file mode 100644 (file)
index d20078a..0000000
+++ /dev/null
@@ -1,40 +0,0 @@
-#include <utility>
-
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "MirConverter.h"
-
-#include "Mir2nnfwVisitor.h"
-
-#include <utility>
-#include <iostream>
-#include <sstream>
-#include <string>
-
-using namespace std;
-
-namespace mirunner {
-
-std::shared_ptr<neurun::graph::Graph> MirConverter::convert(mir::Graph* g) {
-  MIR2NNFWVisitor converter;
-  g->accept(&converter);
-  converter.dump();
-  converter.finish();
-  return converter.getGraph();
-}
-
-} // namespace mirunner
diff --git a/compiler/mirunner/MirConverter.h b/compiler/mirunner/MirConverter.h
deleted file mode 100644 (file)
index f471c52..0000000
+++ /dev/null
@@ -1,41 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef NNFW_NNCC_CONTRIB_MIR2NNFW
-#define NNFW_NNCC_CONTRIB_MIR2NNFW
-
-#include "mir/Graph.h"
-#include "graph/Graph.h"
-
-#include <memory>
-
-namespace mirunner {
-
-class MirConverter {
-
-public:
-  explicit MirConverter() = default;
-
-  /**
-   * @brief converts stored model into neurun graph
-   * @return neurun graph pointer
-   */
-  std::shared_ptr<neurun::graph::Graph> convert(mir::Graph*);
-};
-
-} //namespace mirunner
-
-#endif //NNFW_NNCC_CONTRIB_MIR2NNFW
diff --git a/compiler/mirunner/README.md b/compiler/mirunner/README.md
deleted file mode 100644 (file)
index a1b204e..0000000
+++ /dev/null
@@ -1,22 +0,0 @@
-# Mirunner
-Mirunner is a thin tflite(multiple import formats possible) model runner using nnc's frontends and IR to load model and 
-runs it through nnfw's NEURUN backend
-
-## Building mirunner
-You should configure project with 2 variables set:
-
-* NNFW_ROOT_DIR - should be set to root directory of checked out and built nnfw project
-* NNFW_BUILD_ROOT _(Optional)_ - set to build root of nnfw(in case of non-default build configuration)
-( by default used "${NNFW_ROOT_DIR}/Product/obj" )
-
-## Running mirunner
-   Example:
-   ```sh
-    $ export LD_LIBRARY_PATH=<same as NNFW_BUILD_ROOT>/runtimes/neurun/backend/cpu/
-    $ mirunner <path_to_model> <path_to_binary_input_data>
-   ```
-
-As a POC runner only supports mobilenet tflite network and uses 224x224x3 input data
-
-Resulting vector is saved in `./out.dat` file in current working directory
-Runner prints maximum element value and label index to stdout
diff --git a/compiler/mirunner/Runner.cpp b/compiler/mirunner/Runner.cpp
deleted file mode 100644 (file)
index 70d96cf..0000000
+++ /dev/null
@@ -1,114 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-#include "MirConverter.h"
-
-#include "NeuralNetworks.h"
-#include "wrapper/compilation.h"
-#include "wrapper/execution.h"
-
-#include "tflite_importer.h"
-
-#include <fstream>
-#include <vector>
-#include <string>
-#include <iostream>
-
-using namespace std;
-
-const int FILE_ERROR = 2;
-const int NUM_OUTPUT_CLASSES = 1001;
-const int INPUT_DIMENSION = 224;
-
-// Read vector of floats from selected file
-std::vector<float> readData(const string &path) {
-  ifstream in(path);
-  if (!in.good()) {
-    cerr << "can not open file " << path << "\n";
-    exit(FILE_ERROR);
-  }
-  in.seekg(0, ifstream::end);
-  size_t len = in.tellg();
-  in.seekg(0, ifstream::beg);
-  assert(len % sizeof(float) == 0);
-  size_t size = len / sizeof(float);
-  vector<float> vec(size);
-  for (size_t i = 0; i < size; ++i) {
-    in.read(reinterpret_cast<char*>(&vec[i]), sizeof(float));
-  }
-  return vec;
-}
-
-int main(int argc, char **argv) {
-  if (argc < 3) {
-    std::cout << "Usage:\n mir_run <path to tflte model> <path to binary input>\n";
-    return 1;
-  }
-  string model_path{argv[1]};
-  string input_data_path{argv[2]};
-
-  mir::Graph* g;
-  {
-    nnc::TfliteImporter importer{model_path};
-
-    importer.import();
-    g = importer.createIR();
-  }
-
-  std::shared_ptr<neurun::graph::Graph> neurun_graph;
-  {
-    mirunner::MirConverter converter{};
-    neurun_graph = converter.convert(g);
-  }
-  delete g;
-
-  auto compilation = new ANeuralNetworksCompilation(neurun_graph);
-  compilation->finish();
-  std::cout << (int)compilation->state() << std::endl;
-  std::shared_ptr<neurun::exec::IExecutor> executor;
-  compilation->publish(executor);
-  auto execution = new ANeuralNetworksExecution{executor};
-  uint32_t in_shape[] = {1, INPUT_DIMENSION, INPUT_DIMENSION, 3};
-  ANeuralNetworksOperandType in_type {
-    .type = ANEURALNETWORKS_TENSOR_FLOAT32, .dimensionCount = 1, .dimensions = in_shape
-  };
-
-  uint32_t out_shape[] = {1, NUM_OUTPUT_CLASSES};
-  ANeuralNetworksOperandType out_type{
-    .type = ANEURALNETWORKS_TENSOR_FLOAT32, .dimensionCount = 1, .dimensions = out_shape
-  };
-
-  vector<float> input = readData(input_data_path);
-  assert(input.size() == INPUT_DIMENSION*INPUT_DIMENSION*3);
-
-  float output[NUM_OUTPUT_CLASSES];
-  execution->setInput(0, &in_type, input.data(), input.size() * sizeof(float));
-  execution->setOutput(0, &out_type, output, sizeof(output));
-  executor->execute();
-  std::cout << "DONE\n";
-
-  auto max = std::max_element(std::begin(output), std::end(output));
-  auto maxid = max - std::begin(output);
-
-  std::cout << maxid << " : " << *max;
-  ofstream out("./out.dat");
-  out.write((char *)output, NUM_OUTPUT_CLASSES * sizeof(float));
-  out.close();
-
-  delete execution;
-  delete compilation;
-
-  return 0;
-}
diff --git a/compiler/mirunner/requires.cmake b/compiler/mirunner/requires.cmake
deleted file mode 100644 (file)
index 3669dcf..0000000
+++ /dev/null
@@ -1,2 +0,0 @@
-require("nnc")
-require("mir")