[neurun] Move InternalType.h into ir directory (#9390)
authorSergei Barannikov/AI Tools Lab /SRR/Engineer/Samsung Electronics <s.barannikov@samsung.com>
Fri, 6 Dec 2019 11:05:05 +0000 (14:05 +0300)
committer이한종/On-Device Lab(SR)/Engineer/삼성전자 <hanjoung.lee@samsung.com>
Fri, 6 Dec 2019 11:05:05 +0000 (20:05 +0900)
* Move `InternalType.h` in `ir` directory.
* Move `Activation`, `PaddingType`, `ExplicitPadding`, `Padding`, `Stride` to `neurun::ir` namespace.

Signed-off-by: Sergei Barannikov <s.barannikov@samsung.com>
59 files changed:
runtime/neurun/backend/acl_cl/KernelGenerator.cc
runtime/neurun/backend/acl_cl/ShapeFixer.cc
runtime/neurun/backend/acl_common/Convert.cc
runtime/neurun/backend/acl_common/Convert.h
runtime/neurun/backend/acl_neon/KernelGenerator.cc
runtime/neurun/backend/acl_neon/ShapeFixer.cc
runtime/neurun/backend/cpu/kernel/AddLayer.cc
runtime/neurun/backend/cpu/kernel/AddLayer.h
runtime/neurun/backend/cpu/kernel/AvgPoolLayer.cc
runtime/neurun/backend/cpu/kernel/AvgPoolLayer.h
runtime/neurun/backend/cpu/kernel/ConvolutionLayer.cc
runtime/neurun/backend/cpu/kernel/ConvolutionLayer.h
runtime/neurun/backend/cpu/kernel/DepthwiseConvolutionLayer.cc
runtime/neurun/backend/cpu/kernel/DepthwiseConvolutionLayer.h
runtime/neurun/backend/cpu/kernel/FullyConnectedLayer.cc
runtime/neurun/backend/cpu/kernel/FullyConnectedLayer.h
runtime/neurun/backend/cpu/kernel/MaxPoolLayer.cc
runtime/neurun/backend/cpu/kernel/MaxPoolLayer.h
runtime/neurun/backend/cpu/kernel/MulLayer.cc
runtime/neurun/backend/cpu/kernel/MulLayer.h
runtime/neurun/backend/cpu/kernel/OperationUtils.cc
runtime/neurun/backend/cpu/kernel/OperationUtils.h
runtime/neurun/backend/cpu/kernel/SubLayer.cc
runtime/neurun/backend/cpu/kernel/SubLayer.h
runtime/neurun/backend/srcn/KernelGenerator.cc
runtime/neurun/backend/srcn/kernel/AddLayer.cc
runtime/neurun/backend/srcn/kernel/AddLayer.h
runtime/neurun/backend/srcn/kernel/InstanceNormLayer.cc
runtime/neurun/backend/srcn/kernel/InstanceNormLayer.h
runtime/neurun/backend/srcn/kernel/OperationUtils.h
runtime/neurun/core/include/ir/InternalType.h [moved from runtime/neurun/core/include/model/InternalType.h with 76% similarity]
runtime/neurun/core/include/model/operation/Add.h
runtime/neurun/core/include/model/operation/AvgPool2D.h
runtime/neurun/core/include/model/operation/Conv2D.h
runtime/neurun/core/include/model/operation/DepthwiseConv2D.h
runtime/neurun/core/include/model/operation/Div.h
runtime/neurun/core/include/model/operation/FullyConnected.h
runtime/neurun/core/include/model/operation/InstanceNorm.h
runtime/neurun/core/include/model/operation/L2Pool2D.h
runtime/neurun/core/include/model/operation/LSTM.h
runtime/neurun/core/include/model/operation/MaxPool2D.h
runtime/neurun/core/include/model/operation/Mul.h
runtime/neurun/core/include/model/operation/RNN.h
runtime/neurun/core/include/model/operation/Sub.h
runtime/neurun/core/include/model/operation/TransposeConv.h
runtime/neurun/core/include/util/Padding.h
runtime/neurun/core/include/util/Utils.h
runtime/neurun/core/src/compiler/OperationValidator.cc
runtime/neurun/core/src/exec/interp/operations/OperationUtil.h
runtime/neurun/core/src/ir/dumper/Dumper.cc
runtime/neurun/core/src/util/Padding.cc
runtime/neurun/core/src/util/ShapeInference.cc
runtime/neurun/core/src/util/Utils.cc
runtime/neurun/frontend/base_loader/base_loader.h
runtime/neurun/frontend/nnapi/wrapper/NNAPIConvert.h
runtime/neurun/test/core/compiler/Scheduler.cc
runtime/neurun/test/core/exec/ExecInstance.cc
runtime/neurun/test/core/exec/interp/ExecManager.cc
runtime/neurun/test/graph/operation/SetIO.cc

index a8e4e0b..0be03c5 100644 (file)
@@ -26,7 +26,7 @@
 #include "kernel/ConcatLayer.h"
 #include "model/Index.h"
 #include "ir/DataType.h"
-#include "model/InternalType.h"
+#include "ir/InternalType.h"
 #include "compiler/IExecutionBuilder.h"
 #include "exec/NopFunction.h"
 #include "util/logging.h"
@@ -61,7 +61,7 @@ private:
   void appendReLU6(::arm_compute::ICLTensor *ifm_alloc);
 
 public:
-  void append(model::Activation code, ::arm_compute::ICLTensor *ifm_alloc);
+  void append(ir::Activation code, ::arm_compute::ICLTensor *ifm_alloc);
 
 private:
   IExecutionBuilder &_builder;
@@ -109,26 +109,26 @@ void ActivationBuilder::appendReLU6(::arm_compute::ICLTensor *ifm_alloc)
   _builder.append(std::move(acl_fn));
 }
 
-void ActivationBuilder::append(model::Activation code, ::arm_compute::ICLTensor *ifm_alloc)
+void ActivationBuilder::append(ir::Activation code, ::arm_compute::ICLTensor *ifm_alloc)
 {
   switch (code)
   {
-    case model::Activation::NONE:
+    case ir::Activation::NONE:
     {
       // DO NOTHING
       break;
     }
-    case model::Activation::RELU:
+    case ir::Activation::RELU:
     {
       appendReLU(ifm_alloc);
       break;
     }
-    case model::Activation::RELU1:
+    case ir::Activation::RELU1:
     {
       appendReLU1(ifm_alloc);
       break;
     }
-    case model::Activation::RELU6:
+    case ir::Activation::RELU6:
     {
       appendReLU6(ifm_alloc);
       break;
@@ -1555,14 +1555,14 @@ void KernelGenerator::visit(const model::operation::TransposeConv &node)
 
   const auto stride = node.param().stride;
 
-  assert((node.param().padding.type == model::PaddingType::SAME) ||
-         (node.param().padding.type == model::PaddingType::VALID));
+  assert((node.param().padding.type == ir::PaddingType::SAME) ||
+         (node.param().padding.type == ir::PaddingType::VALID));
   auto padding = neurun::util::calculatePadding(node.param().padding, ofm_shape, ifm_shape, stride,
                                                 ker_shape.W, ker_shape.H);
 
   uint32_t invalid_horizontal = 0;
   uint32_t invalid_vertical = 0;
-  if (node.param().padding.type == model::PaddingType::VALID)
+  if (node.param().padding.type == ir::PaddingType::VALID)
   {
     invalid_horizontal =
         ofm_shape.W - (1 + (ifm_shape.W - 1) * stride.horizontal) - (ker_shape.W - 1);
index 2448e2e..58efe0d 100644 (file)
@@ -25,7 +25,6 @@
 
 #include "kernel/ConcatLayer.h"
 #include "model/Index.h"
-#include "model/InternalType.h"
 #include "compiler/IExecutionBuilder.h"
 #include "exec/NopFunction.h"
 #include "util/logging.h"
index ed8258b..b3e22e6 100644 (file)
@@ -125,8 +125,8 @@ namespace acl_common
   return info;
 }
 
-::arm_compute::PadStrideInfo asPadStrideInfo(const model::ExplicitPadding &padding,
-                                             const model::Stride &stride)
+::arm_compute::PadStrideInfo asPadStrideInfo(const ir::ExplicitPadding &padding,
+                                             const ir::Stride &stride)
 {
   return ::arm_compute::PadStrideInfo{stride.horizontal,
                                       stride.vertical,
@@ -137,27 +137,26 @@ namespace acl_common
                                       ::arm_compute::DimensionRoundingType::FLOOR};
 }
 
-::arm_compute::ActivationLayerInfo
-asActivationLayerInfo(const ::neurun::model::Activation &act_code)
+::arm_compute::ActivationLayerInfo asActivationLayerInfo(const ir::Activation act_code)
 {
   switch (act_code)
   {
-    case ::neurun::model::Activation::NONE:
+    case ir::Activation::NONE:
       return ::arm_compute::ActivationLayerInfo{};
-    case ::neurun::model::Activation::RELU:
+    case ir::Activation::RELU:
       return ::arm_compute::ActivationLayerInfo{
           ::arm_compute::ActivationLayerInfo::ActivationFunction::RELU};
-    case ::neurun::model::Activation::RELU1:
+    case ir::Activation::RELU1:
       return ::arm_compute::ActivationLayerInfo{
           ::arm_compute::ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 1.0f, -1.0f};
-    case ::neurun::model::Activation::RELU6:
+    case ir::Activation::RELU6:
       return ::arm_compute::ActivationLayerInfo{
           ::arm_compute::ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 6.0f, 0.0f};
     // Cases for activation of LSTM.
-    case ::neurun::model::Activation::TANH:
+    case ir::Activation::TANH:
       return ::arm_compute::ActivationLayerInfo{
           ::arm_compute::ActivationLayerInfo::ActivationFunction::TANH, 1.0f, 1.0f};
-    case ::neurun::model::Activation::SIGMOID:
+    case ir::Activation::SIGMOID:
       // NOTE The sigmoid function is a special case of the Logistic function when L=1, k=1, x0=0.
       // TODO In ACL and nnapi sepc, currently, Logistic's L always is 1, k always is 1, x0 always
       // 0(always sigmoid) regardless of values of the parameter.
index 66d4405..f8564b7 100644 (file)
@@ -22,7 +22,7 @@
 #include <arm_compute/core/TensorShape.h>
 
 #include "ir/Layout.h"
-#include "model/InternalType.h"
+#include "ir/InternalType.h"
 #include "model/Operand.h"
 #include "model/Shape.h"
 #include "model/TypeInfo.h"
@@ -53,11 +53,10 @@ namespace acl_common
                                        ir::Layout frontend_layout, ir::Layout backend_layout,
                                        bool apply_dim_correction = true);
 
-::arm_compute::PadStrideInfo asPadStrideInfo(const model::ExplicitPadding &padding,
-                                             const model::Stride &stride);
+::arm_compute::PadStrideInfo asPadStrideInfo(const ir::ExplicitPadding &padding,
+                                             const ir::Stride &stride);
 
-::arm_compute::ActivationLayerInfo
-asActivationLayerInfo(const ::neurun::model::Activation &act_code);
+::arm_compute::ActivationLayerInfo asActivationLayerInfo(ir::Activation act_code);
 
 std::unique_ptr<AclFunction> asAclFunction(std::unique_ptr<::arm_compute::IFunction> &&layer);
 
index ea03b9e..84e9177 100644 (file)
@@ -26,7 +26,7 @@
 #include "util/Padding.h"
 #include "model/Index.h"
 #include "ir/DataType.h"
-#include "model/InternalType.h"
+#include "ir/InternalType.h"
 #include "compiler/IExecutionBuilder.h"
 #include "exec/NopFunction.h"
 #include "util/logging.h"
@@ -60,7 +60,7 @@ private:
   void appendReLU6(::arm_compute::ITensor *ifm_alloc);
 
 public:
-  void append(model::Activation act, ::arm_compute::ITensor *ifm_alloc);
+  void append(ir::Activation act, ::arm_compute::ITensor *ifm_alloc);
 
 private:
   IExecutionBuilder &_builder;
@@ -108,26 +108,26 @@ void ActivationBuilder::appendReLU6(::arm_compute::ITensor *ifm_alloc)
   _builder.append(std::move(acl_fn));
 }
 
-void ActivationBuilder::append(model::Activation act, ::arm_compute::ITensor *ifm_alloc)
+void ActivationBuilder::append(ir::Activation act, ::arm_compute::ITensor *ifm_alloc)
 {
   switch (act)
   {
-    case model::Activation::NONE:
+    case ir::Activation::NONE:
     {
       // DO NOTHING
       break;
     }
-    case model::Activation::RELU:
+    case ir::Activation::RELU:
     {
       appendReLU(ifm_alloc);
       break;
     }
-    case model::Activation::RELU1:
+    case ir::Activation::RELU1:
     {
       appendReLU1(ifm_alloc);
       break;
     }
-    case model::Activation::RELU6:
+    case ir::Activation::RELU6:
     {
       appendReLU6(ifm_alloc);
       break;
@@ -1842,14 +1842,14 @@ void KernelGenerator::visit(const model::operation::TransposeConv &node)
 
   const auto stride = node.param().stride;
 
-  assert((node.param().padding.type == model::PaddingType::SAME) ||
-         (node.param().padding.type == model::PaddingType::VALID));
+  assert((node.param().padding.type == ir::PaddingType::SAME) ||
+         (node.param().padding.type == ir::PaddingType::VALID));
   auto padding = neurun::util::calculatePadding(node.param().padding, ofm_shape, ifm_shape, stride,
                                                 ker_shape.W, ker_shape.H);
 
   uint32_t invalid_horizontal = 0;
   uint32_t invalid_vertical = 0;
-  if (node.param().padding.type == model::PaddingType::VALID)
+  if (node.param().padding.type == ir::PaddingType::VALID)
   {
     invalid_horizontal =
         ofm_shape.W - (1 + (ifm_shape.W - 1) * stride.horizontal) - (ker_shape.W - 1);
index 3d69d8f..80f539a 100644 (file)
@@ -34,7 +34,6 @@
 #include "kernel/ConcatLayer.h"
 #include "util/Padding.h"
 #include "model/Index.h"
-#include "model/InternalType.h"
 #include "compiler/IExecutionBuilder.h"
 #include "exec/NopFunction.h"
 #include "util/logging.h"
index fa9af6f..389e326 100644 (file)
@@ -60,7 +60,7 @@ void AddLayer::addQuant8()
 }
 
 void AddLayer::configure(uint8_t *lhsData, const TensorDescriptor &lhsDescr, uint8_t *rhsData,
-                         const TensorDescriptor &rhsDescr, const model::Activation activation,
+                         const TensorDescriptor &rhsDescr, const ir::Activation activation,
                          uint8_t *outputData, const TensorDescriptor &outputDescr)
 {
   _lhsData.u8 = lhsData;
index cf933c6..7018e4c 100644 (file)
@@ -44,7 +44,7 @@ public:
   void addQuant8();
 
   void configure(uint8_t *lhsData, const TensorDescriptor &lhsDescr, uint8_t *rhsData,
-                 const TensorDescriptor &rhsDescr, const model::Activation activation,
+                 const TensorDescriptor &rhsDescr, const ir::Activation activation,
                  uint8_t *outputData, const TensorDescriptor &outputDescr);
 
   void run();
@@ -64,7 +64,7 @@ private:
   TensorDescriptor _rhsDescr;
   TensorDescriptor _outputDescr;
 
-  model::Activation _activation{model::Activation::NONE};
+  ir::Activation _activation{ir::Activation::NONE};
 
   OperandType _inputType{OperandType::FLOAT32};
 };
index 15e015b..3899557 100644 (file)
@@ -42,7 +42,7 @@ namespace kernel
 AvgPoolLayer::AvgPoolLayer()
     : _inputData(), _outputData(), _inputDescr(), _outputDescr(), _paddingLeft(0), _paddingTop(0),
       _paddingRight(0), _paddingBottom(0), _strideWidth(0), _strideHeight(0), _kernelWidth(0),
-      _kernelHeight(0), _activation(model::Activation::NONE), _inputType(OperandType::FLOAT32)
+      _kernelHeight(0), _activation(ir::Activation::NONE), _inputType(OperandType::FLOAT32)
 {
   // DO NOTHING
 }
@@ -77,7 +77,7 @@ void AvgPoolLayer::configure(uint8_t *inputData, const TensorDescriptor inputDes
                              const uint32_t paddingTop, const uint32_t paddingBottom,
                              const uint32_t strideWidth, const uint32_t strideHeight,
                              const uint32_t kernelWidth, const uint32_t kernelHeight,
-                             const model::Activation activation, uint8_t *outputData,
+                             const ir::Activation activation, uint8_t *outputData,
                              const TensorDescriptor outputDescr)
 {
   _inputData.u8 = inputData;
index e42f403..6339efa 100644 (file)
@@ -44,8 +44,8 @@ public:
                  const uint32_t paddingRight, const uint32_t paddingTop,
                  const uint32_t paddingBottom, const uint32_t strideWidth,
                  const uint32_t strideHeight, const uint32_t kernelWidth,
-                 const uint32_t kernelHeight, const model::Activation activation,
-                 uint8_t *outputData, const TensorDescriptor outputDescr);
+                 const uint32_t kernelHeight, const ir::Activation activation, uint8_t *outputData,
+                 const TensorDescriptor outputDescr);
 
   void run();
   void runSync()
@@ -72,7 +72,7 @@ private:
   uint32_t _kernelWidth;
   uint32_t _kernelHeight;
 
-  model::Activation _activation;
+  ir::Activation _activation;
 
   OperandType _inputType;
 };
index 289b26c..2fdb0ba 100644 (file)
@@ -31,7 +31,7 @@ namespace kernel
 ConvolutionLayer::ConvolutionLayer()
     : _inputData(), _kernelData(), _outputData(), _biasData(), _inputDescr(), _kernelDescr(),
       _outputDescr(), _biasDescr(), _paddingLeft(0), _paddingTop(0), _paddingRight(0),
-      _paddingBottom(0), _strideWidth(0), _strideHeight(0), _activation(model::Activation::NONE),
+      _paddingBottom(0), _strideWidth(0), _strideHeight(0), _activation(ir::Activation::NONE),
       _inputType(OperandType::FLOAT32)
 {
   // DO NOTHING
@@ -99,7 +99,7 @@ void ConvolutionLayer::configure(uint8_t *inputData, const TensorDescriptor inpu
                                  const uint32_t paddingLeft, const uint32_t paddingRight,
                                  const uint32_t paddingTop, const uint32_t paddingBottom,
                                  const uint32_t strideWidth, const uint32_t strideHeight,
-                                 const model::Activation activation, uint8_t *outputData,
+                                 const ir::Activation activation, uint8_t *outputData,
                                  const TensorDescriptor outputDescr)
 {
   _inputData.u8 = inputData;
index 1efb1dc..16669f3 100644 (file)
@@ -45,7 +45,7 @@ public:
                  const TensorDescriptor biasDescr, const uint32_t paddingLeft,
                  const uint32_t paddingRight, const uint32_t paddingTop,
                  const uint32_t paddingBottom, const uint32_t strideW, const uint32_t strideH,
-                 const model::Activation activation, uint8_t *outputData,
+                 const ir::Activation activation, uint8_t *outputData,
                  const TensorDescriptor outputDescr);
 
   void run();
@@ -75,7 +75,7 @@ private:
   uint32_t _strideWidth;
   uint32_t _strideHeight;
 
-  model::Activation _activation;
+  ir::Activation _activation;
 
   OperandType _inputType;
 };
index 2ba5c42..e33e346 100644 (file)
@@ -31,7 +31,7 @@ DepthwiseConvolutionLayer::DepthwiseConvolutionLayer()
     : _inputData(), _kernelData(), _outputData(), _biasData(), _inputDescr(), _kernelDescr(),
       _outputDescr(), _biasDescr(), _paddingLeft(0), _paddingTop(0), _paddingRight(0),
       _paddingBottom(0), _strideWidth(0), _strideHeight(0), _multiplier(0),
-      _activation(model::Activation::NONE), _inputType(OperandType::FLOAT32)
+      _activation(ir::Activation::NONE), _inputType(OperandType::FLOAT32)
 {
   // DO NOTHING
 }
@@ -103,7 +103,7 @@ void DepthwiseConvolutionLayer::configure(uint8_t *inputData, const TensorDescri
                                           const uint32_t paddingTop, const uint32_t paddingBottom,
                                           const uint32_t strideWidth, const uint32_t strideHeight,
                                           const uint32_t multiplier,
-                                          const model::Activation activation, uint8_t *outputData,
+                                          const ir::Activation activation, uint8_t *outputData,
                                           const TensorDescriptor outputDescr)
 {
   _inputData.u8 = inputData;
index 85230e1..575cc0a 100644 (file)
@@ -45,7 +45,7 @@ public:
                  const TensorDescriptor biasDescr, const uint32_t paddingLeft,
                  const uint32_t paddingRight, const uint32_t paddingTop,
                  const uint32_t paddingBottom, const uint32_t strideW, const uint32_t strideH,
-                 const uint32_t multiplier, const model::Activation activation, uint8_t *outputData,
+                 const uint32_t multiplier, const ir::Activation activation, uint8_t *outputData,
                  const TensorDescriptor outputDescr);
 
   void run();
@@ -77,7 +77,7 @@ private:
 
   uint32_t _multiplier;
 
-  model::Activation _activation;
+  ir::Activation _activation;
 
   OperandType _inputType;
 };
index b9361b2..055f715 100644 (file)
@@ -32,7 +32,7 @@ namespace kernel
 
 FullyConnectedLayer::FullyConnectedLayer()
     : _inputData(), _weightsData(), _biasData(), _outputData(), _inputDescr(), _weightsDescr(),
-      _biasDescr(), _outputDescr(), _activation(model::Activation::NONE),
+      _biasDescr(), _outputDescr(), _activation(ir::Activation::NONE),
       _inputType(OperandType::FLOAT32)
 {
   // DO NOTHING
@@ -86,7 +86,7 @@ void FullyConnectedLayer::fullyConnectedQuant8()
 void FullyConnectedLayer::configure(uint8_t *inputData, const TensorDescriptor inputDescr,
                                     uint8_t *weightsData, const TensorDescriptor weightsDescr,
                                     uint8_t *biasData, const TensorDescriptor biasDescr,
-                                    model::Activation activation, uint8_t *outputData,
+                                    ir::Activation activation, uint8_t *outputData,
                                     const TensorDescriptor outputDescr)
 {
   _inputData.u8 = inputData;
index 83e493a..9fdc393 100644 (file)
@@ -42,8 +42,8 @@ public:
 
   void configure(uint8_t *inputData, const TensorDescriptor inputDescr, uint8_t *weightsData,
                  const TensorDescriptor weightsDescr, uint8_t *biasData,
-                 const TensorDescriptor biasDescr, model::Activation activation,
-                 uint8_t *outputData, const TensorDescriptor outputDescr);
+                 const TensorDescriptor biasDescr, ir::Activation activation, uint8_t *outputData,
+                 const TensorDescriptor outputDescr);
 
   void run();
   void runSync()
@@ -64,7 +64,7 @@ private:
   TensorDescriptor _biasDescr;
   TensorDescriptor _outputDescr;
 
-  model::Activation _activation;
+  ir::Activation _activation;
 
   OperandType _inputType;
 };
index 0bce4c3..095cd6d 100644 (file)
@@ -41,7 +41,7 @@ namespace kernel
 MaxPoolLayer::MaxPoolLayer()
     : _inputData(), _outputData(), _inputDescr(), _outputDescr(), _paddingLeft(0), _paddingTop(0),
       _paddingRight(0), _paddingBottom(0), _strideWidth(0), _strideHeight(0), _kernelWidth(0),
-      _kernelHeight(0), _activation(model::Activation::NONE), _inputType(OperandType::FLOAT32)
+      _kernelHeight(0), _activation(ir::Activation::NONE), _inputType(OperandType::FLOAT32)
 {
   // DO NOTHING
 }
@@ -76,7 +76,7 @@ void MaxPoolLayer::configure(uint8_t *inputData, const TensorDescriptor inputDes
                              const uint32_t paddingTop, const uint32_t paddingBottom,
                              const uint32_t strideWidth, const uint32_t strideHeight,
                              const uint32_t kernelWidth, const uint32_t kernelHeight,
-                             const model::Activation activation, uint8_t *outputData,
+                             const ir::Activation activation, uint8_t *outputData,
                              const TensorDescriptor outputDescr)
 {
   _inputData.u8 = inputData;
index bfd3481..88a574c 100644 (file)
@@ -44,8 +44,8 @@ public:
                  const uint32_t paddingRight, const uint32_t paddingTop,
                  const uint32_t paddingBottom, const uint32_t strideWidth,
                  const uint32_t strideHeight, const uint32_t kernelWidth,
-                 const uint32_t kernelHeight, const model::Activation activation,
-                 uint8_t *outputData, const TensorDescriptor outputDescr);
+                 const uint32_t kernelHeight, const ir::Activation activation, uint8_t *outputData,
+                 const TensorDescriptor outputDescr);
 
   void run();
   void runSync()
@@ -72,7 +72,7 @@ private:
   uint32_t _kernelWidth;
   uint32_t _kernelHeight;
 
-  model::Activation _activation;
+  ir::Activation _activation;
 
   OperandType _inputType;
 };
index 2a6f177..9848130 100644 (file)
@@ -60,7 +60,7 @@ void MulLayer::mulQuant8()
 }
 
 void MulLayer::configure(uint8_t *lhsData, const TensorDescriptor &lhsDescr, uint8_t *rhsData,
-                         const TensorDescriptor &rhsDescr, const model::Activation activation,
+                         const TensorDescriptor &rhsDescr, const ir::Activation activation,
                          uint8_t *outputData, const TensorDescriptor &outputDescr)
 {
   _lhsData.u8 = lhsData;
index f5bda8e..05fc305 100644 (file)
@@ -44,7 +44,7 @@ public:
   void mulQuant8();
 
   void configure(uint8_t *lhsData, const TensorDescriptor &lhsDescr, uint8_t *rhsData,
-                 const TensorDescriptor &rhsDescr, const model::Activation activation,
+                 const TensorDescriptor &rhsDescr, const ir::Activation activation,
                  uint8_t *outputData, const TensorDescriptor &outputDescr);
 
   void run();
@@ -64,7 +64,7 @@ private:
   TensorDescriptor _rhsDescr;
   TensorDescriptor _outputDescr;
 
-  model::Activation _activation{model::Activation::NONE};
+  ir::Activation _activation{ir::Activation::NONE};
 
   OperandType _inputType{OperandType::FLOAT32};
 };
index e9f1140..dbcbac2 100644 (file)
@@ -106,30 +106,30 @@ void QuantizeMultiplierGreaterThanOne(double double_multiplier, int32_t *quantiz
   *quantized_multiplier = static_cast<int32_t>(q_fixed);
 }
 
-void CalculateActivationRangeFloat(model::Activation activation, float *activation_min,
+void CalculateActivationRangeFloat(ir::Activation activation, float *activation_min,
                                    float *activation_max)
 {
-  if (activation == model::Activation::RELU)
+  if (activation == ir::Activation::RELU)
   {
     *activation_min = 0.f;
     *activation_max = std::numeric_limits<float>::max();
   }
-  else if (activation == model::Activation::RELU6)
+  else if (activation == ir::Activation::RELU6)
   {
     *activation_min = 0.f;
     *activation_max = 6.f;
   }
-  else if (activation == model::Activation::RELU1)
+  else if (activation == ir::Activation::RELU1)
   {
     *activation_min = -1.f;
     *activation_max = 1.f;
   }
-  else if (activation == model::Activation::SIGMOID)
+  else if (activation == ir::Activation::SIGMOID)
   {
     *activation_min = 0.f;
     *activation_max = 1.f;
   }
-  else if (activation == model::Activation::NONE)
+  else if (activation == ir::Activation::NONE)
   {
     *activation_min = std::numeric_limits<float>::lowest();
     *activation_max = std::numeric_limits<float>::max();
@@ -140,9 +140,8 @@ void CalculateActivationRangeFloat(model::Activation activation, float *activati
   }
 }
 
-void CalculateActivationRangeUint8(model::Activation activation,
-                                   const TensorDescriptor &outputDescr, int32_t *act_min,
-                                   int32_t *act_max)
+void CalculateActivationRangeUint8(ir::Activation activation, const TensorDescriptor &outputDescr,
+                                   int32_t *act_min, int32_t *act_max)
 {
   const int32_t qmin = std::numeric_limits<uint8_t>::min();
   const int32_t qmax = std::numeric_limits<uint8_t>::max();
@@ -151,27 +150,27 @@ void CalculateActivationRangeUint8(model::Activation activation,
   auto quantize = [scale, zero_point](float f) {
     return zero_point + static_cast<int32_t>(std::round(f / scale));
   };
-  if (activation == model::Activation::RELU)
+  if (activation == ir::Activation::RELU)
   {
     *act_min = std::max(qmin, quantize(0.0));
     *act_max = qmax;
   }
-  else if (activation == model::Activation::RELU6)
+  else if (activation == ir::Activation::RELU6)
   {
     *act_min = std::max(qmin, quantize(0.0));
     *act_max = std::min(qmax, quantize(6.0));
   }
-  else if (activation == model::Activation::RELU1)
+  else if (activation == ir::Activation::RELU1)
   {
     *act_min = std::max(qmin, quantize(-1.0));
     *act_max = std::min(qmax, quantize(1.0));
   }
-  else if (activation == model::Activation::SIGMOID)
+  else if (activation == ir::Activation::SIGMOID)
   {
     *act_min = std::max(qmin, quantize(0.0));
     *act_max = std::min(qmax, quantize(1.0));
   }
-  else if (activation == model::Activation::NONE)
+  else if (activation == ir::Activation::NONE)
   {
     *act_min = qmin;
     *act_max = qmax;
index c466f9f..f8ab905 100644 (file)
@@ -25,7 +25,7 @@
 
 #include "model/Operand.h"
 #include "ir/DataType.h"
-#include <model/InternalType.h>
+#include <ir/InternalType.h>
 
 using OperandType = neurun::ir::DataType;
 
@@ -130,12 +130,11 @@ void GetQuantizedConvolutionMultiplier(const TensorDescriptor &inputDescr,
 void QuantizeMultiplierGreaterThanOne(double double_multiplier, int32_t *quantized_multiplier,
                                       int *left_shift);
 
-void CalculateActivationRangeFloat(model::Activation activation, float *activation_min,
+void CalculateActivationRangeFloat(ir::Activation activation, float *activation_min,
                                    float *activation_max);
 
-void CalculateActivationRangeUint8(model::Activation activation,
-                                   const TensorDescriptor &outputDescr, int32_t *act_min,
-                                   int32_t *act_max);
+void CalculateActivationRangeUint8(ir::Activation activation, const TensorDescriptor &outputDescr,
+                                   int32_t *act_min, int32_t *act_max);
 
 int32_t CalculateInputRadius(int input_integer_bits, int input_left_shift);
 
index 984464a..946b978 100644 (file)
@@ -59,7 +59,7 @@ void SubLayer::subQuant8()
 }
 
 void SubLayer::configure(uint8_t *lhsData, const TensorDescriptor &lhsDescr, uint8_t *rhsData,
-                         const TensorDescriptor &rhsDescr, const model::Activation activation,
+                         const TensorDescriptor &rhsDescr, const ir::Activation activation,
                          uint8_t *outputData, const TensorDescriptor &outputDescr)
 {
   _lhsData.u8 = lhsData;
index 7036ca1..c9abdb4 100644 (file)
@@ -44,7 +44,7 @@ public:
   void subQuant8();
 
   void configure(uint8_t *lhsData, const TensorDescriptor &lhsDescr, uint8_t *rhsData,
-                 const TensorDescriptor &rhsDescr, const model::Activation activation,
+                 const TensorDescriptor &rhsDescr, const ir::Activation activation,
                  uint8_t *outputData, const TensorDescriptor &outputDescr);
 
   void run();
@@ -64,7 +64,7 @@ private:
   TensorDescriptor _rhsDescr;
   TensorDescriptor _outputDescr;
 
-  model::Activation _activation{model::Activation::NONE};
+  ir::Activation _activation{ir::Activation::NONE};
 
   OperandType _inputType{OperandType::FLOAT32};
 };
index c37109f..d3f86d4 100644 (file)
@@ -211,7 +211,7 @@ void KernelGenerator::visit(const model::operation::TransposeConv &node)
   const auto ker_height = ker_shape.H;
   const auto ker_width = ker_shape.W;
   const auto stride = node.param().stride;
-  const int padding_type = (node.param().padding.type == model::PaddingType::SAME);
+  const int padding_type = (node.param().padding.type == ir::PaddingType::SAME);
   const auto padding = neurun::util::calculatePadding(node.param().padding, ofm_shape, ifm_shape,
                                                       stride, ker_width, ker_height);
 
index 80b7a34..b53dfe8 100644 (file)
@@ -58,7 +58,7 @@ namespace kernel
 
 void AddLayer::addFloat32()
 {
-  assert(_activation == model::Activation::NONE);
+  assert(_activation == ir::Activation::NONE);
 
   // ncnn kernel support
   // 1. rank < 4
@@ -90,7 +90,7 @@ void AddLayer::addQuant8()
 }
 
 void AddLayer::configure(uint8_t *lhsData, const TensorDescriptor &lhsDescr, uint8_t *rhsData,
-                         const TensorDescriptor &rhsDescr, const model::Activation activation,
+                         const TensorDescriptor &rhsDescr, const ir::Activation activation,
                          uint8_t *outputData, const TensorDescriptor &outputDescr,
                          const ir::Layout backendLayout)
 {
index 9995754..1cae171 100644 (file)
@@ -44,7 +44,7 @@ public:
   void addQuant8();
 
   void configure(uint8_t *lhsData, const TensorDescriptor &lhsDescr, uint8_t *rhsData,
-                 const TensorDescriptor &rhsDescr, const model::Activation activation,
+                 const TensorDescriptor &rhsDescr, const ir::Activation activation,
                  uint8_t *outputData, const TensorDescriptor &outputDescr,
                  const ir::Layout backendLayout);
 
@@ -65,7 +65,7 @@ private:
   TensorDescriptor _rhsDescr;
   TensorDescriptor _outputDescr;
 
-  model::Activation _activation{model::Activation::NONE};
+  ir::Activation _activation{ir::Activation::NONE};
 
   OperandType _inputType{OperandType::FLOAT32};
 
index d7627bc..c83fe6d 100644 (file)
@@ -30,7 +30,7 @@ namespace kernel
 
 InstanceNormLayer::InstanceNormLayer()
     : _inputData(), _gammaData(), _betaData(), _outputData(), _inputDescr(), _gammaDescr(),
-      _betaDescr(), _outputDescr(), _epsilon(1e-5), _activation(model::Activation::NONE),
+      _betaDescr(), _outputDescr(), _epsilon(1e-5), _activation(ir::Activation::NONE),
       _inputType(OperandType::FLOAT32), _backendLayout(ir::Layout::UNKNOWN)
 {
   // DO NOTHING
@@ -60,12 +60,12 @@ void InstanceNormLayer::instanceNormFloat32()
     const int output_width = _outputDescr.dimensions[3];
     nnfw::ncnn::Mat out_mat(output_width, output_height, output_channels, _outputData.f);
 
-    if (_activation == model::Activation::NONE)
+    if (_activation == ir::Activation::NONE)
     {
       nnfw::ncnn::ncnn_instance_norm_rowmajor(in_mat, out_mat, gamma_mat, beta_mat, input_channels,
                                               _epsilon);
     }
-    else if (_activation == model::Activation::RELU)
+    else if (_activation == ir::Activation::RELU)
     {
       nnfw::ncnn::ncnn_instance_norm_with_relu_rowmajor(in_mat, out_mat, gamma_mat, beta_mat,
                                                         input_channels, _epsilon, 0.f);
@@ -97,12 +97,12 @@ void InstanceNormLayer::instanceNormFloat32()
     const int output_channels = _outputDescr.dimensions[3];
     nnfw::ncnn::Mat out_mat(output_channels, output_width, output_height, _outputData.f);
 
-    if (_activation == model::Activation::NONE)
+    if (_activation == ir::Activation::NONE)
     {
       nnfw::ncnn::ncnn_instance_norm_colmajor(in_mat, out_mat, gamma_mat, beta_mat, input_channels,
                                               _epsilon);
     }
-    else if (_activation == model::Activation::RELU)
+    else if (_activation == ir::Activation::RELU)
     {
       nnfw::ncnn::ncnn_instance_norm_with_relu_colmajor(in_mat, out_mat, gamma_mat, beta_mat,
                                                         input_channels, _epsilon, 0.f);
@@ -121,7 +121,7 @@ void InstanceNormLayer::configure(uint8_t *inputData, const TensorDescriptor inp
                                   uint8_t *gammaData, const TensorDescriptor gammaDescr,
                                   uint8_t *betaData, const TensorDescriptor betaDescr,
                                   uint8_t *outputData, const TensorDescriptor outputDescr,
-                                  float epsilon, model::Activation activation,
+                                  float epsilon, ir::Activation activation,
                                   ir::Layout backendLayout)
 {
   _inputData.u8 = inputData;
index 2cad9e0..0ac0cef 100644 (file)
@@ -40,7 +40,7 @@ public:
   void configure(uint8_t *inputData, const TensorDescriptor inputDescr, uint8_t *gammaData,
                  const TensorDescriptor gammaDescr, uint8_t *betaData,
                  const TensorDescriptor betaDescr, uint8_t *outputData,
-                 const TensorDescriptor outputDescr, float epsilon, model::Activation activation,
+                 const TensorDescriptor outputDescr, float epsilon, ir::Activation activation,
                  ir::Layout backendLayout);
 
   void run();
@@ -63,7 +63,7 @@ private:
   TensorDescriptor _outputDescr;
 
   float _epsilon;
-  model::Activation _activation;
+  ir::Activation _activation;
 
   OperandType _inputType;
   ir::Layout _backendLayout;
index dce00b5..a0610a2 100644 (file)
@@ -23,7 +23,7 @@
 
 #include "model/Operand.h"
 #include "ir/DataType.h"
-#include <model/InternalType.h>
+#include <ir/InternalType.h>
 #include <ncnn/srcn/conv_type.h>
 
 using OperandType = neurun::ir::DataType;
  * limitations under the License.
  */
 
-#ifndef __NEURUN_MODEL_INTERNAL_TYPE_H__
-#define __NEURUN_MODEL_INTERNAL_TYPE_H__
+#ifndef __NEURUN_IR_INTERNAL_TYPE_H__
+#define __NEURUN_IR_INTERNAL_TYPE_H__
 
 #include <cstdint>
 
 namespace neurun
 {
-namespace model
+namespace ir
 {
 
 enum class Activation
@@ -62,7 +62,17 @@ struct Stride
   uint32_t horizontal;
 };
 
+} // namespace ir
+
+// TODO Remove after merging 'graph' and 'model' namespaces.
+namespace model
+{
+using Activation = ir::Activation;
+using PaddingType = ir::PaddingType;
+using ExplicitPadding = ir::ExplicitPadding;
+using Padding = ir::Padding;
+using Stride = ir::Stride;
 } // namespace model
 } // namespace neurun
 
-#endif // __NEURUN_MODEL_INTERNAL_TYPE_H__
+#endif // __NEURUN_IR_INTERNAL_TYPE_H__
index 9efd128..045c7af 100644 (file)
@@ -18,7 +18,7 @@
 #define __NEURUN_MODEL_OPERATION_ADD_H__
 
 #include "model/Operation.h"
-#include "model/InternalType.h"
+#include "ir/InternalType.h"
 
 namespace neurun
 {
index e9eed79..17429c1 100644 (file)
@@ -20,7 +20,7 @@
 #include <memory>
 
 #include "model/Operation.h"
-#include "model/InternalType.h"
+#include "ir/InternalType.h"
 
 namespace neurun
 {
index a25cb1f..258b3e7 100644 (file)
@@ -20,7 +20,7 @@
 #include <memory>
 
 #include "model/Operation.h"
-#include "model/InternalType.h"
+#include "ir/InternalType.h"
 
 namespace neurun
 {
index 3205bd5..fb49401 100644 (file)
@@ -20,7 +20,7 @@
 #include <memory>
 
 #include "model/Operation.h"
-#include "model/InternalType.h"
+#include "ir/InternalType.h"
 
 namespace neurun
 {
index beb58eb..1b85d1d 100644 (file)
@@ -18,7 +18,7 @@
 #define __NEURUN_MODEL_OPERATION_DIV_H__
 
 #include "model/Operation.h"
-#include "model/InternalType.h"
+#include "ir/InternalType.h"
 
 namespace neurun
 {
index 1178aa0..1ee6f67 100644 (file)
@@ -20,7 +20,7 @@
 #include <memory>
 
 #include "model/Operation.h"
-#include "model/InternalType.h"
+#include "ir/InternalType.h"
 
 namespace neurun
 {
index 61c3057..bab24ef 100644 (file)
@@ -18,7 +18,7 @@
 #define __NEURUN_MODEL_OPERATION_INSTANCE_NORM_H__
 
 #include "model/Operation.h"
-#include "model/InternalType.h"
+#include "ir/InternalType.h"
 
 namespace neurun
 {
index 48ef431..858cc49 100644 (file)
@@ -20,7 +20,7 @@
 #include <memory>
 
 #include "model/Operation.h"
-#include "model/InternalType.h"
+#include "ir/InternalType.h"
 
 namespace neurun
 {
index db4eb2f..3244d0b 100644 (file)
@@ -16,7 +16,7 @@
 #ifndef __NEURUN_MODEL_OPERATION_LSTM_H__
 #define __NEURUN_MODEL_OPERATION_LSTM_H__
 
-#include "model/InternalType.h"
+#include "ir/InternalType.h"
 #include "model/Operation.h"
 
 namespace neurun
index 6533235..5cb40c0 100644 (file)
@@ -20,7 +20,7 @@
 #include <memory>
 
 #include "model/Operation.h"
-#include "model/InternalType.h"
+#include "ir/InternalType.h"
 
 namespace neurun
 {
index fa5bf14..1e37d0d 100644 (file)
@@ -18,7 +18,7 @@
 #define __NEURUN_MODEL_OPERATION_MUL_H__
 
 #include "model/Operation.h"
-#include "model/InternalType.h"
+#include "ir/InternalType.h"
 
 namespace neurun
 {
index bb1c7d0..2b5bf2d 100644 (file)
@@ -16,7 +16,7 @@
 #ifndef __NEURUN_MODEL_OPERATION_RNN_H__
 #define __NEURUN_MODEL_OPERATION_RNN_H__
 
-#include "model/InternalType.h"
+#include "ir/InternalType.h"
 #include "model/Operation.h"
 
 namespace neurun
index 5156814..a752428 100644 (file)
@@ -18,7 +18,7 @@
 #define __NEURUN_MODEL_OPERATION_SUB_H__
 
 #include "model/Operation.h"
-#include "model/InternalType.h"
+#include "ir/InternalType.h"
 
 namespace neurun
 {
index 1c5dcdd..7926e02 100644 (file)
@@ -20,7 +20,7 @@
 #include <memory>
 
 #include "model/Operation.h"
-#include "model/InternalType.h"
+#include "ir/InternalType.h"
 
 namespace neurun
 {
index 2300132..7bc8b65 100644 (file)
 #include <stdint.h>
 
 #include "model/Shape.h"
-#include "model/InternalType.h"
+#include "ir/InternalType.h"
 
 namespace neurun
 {
 namespace util
 {
 
-model::ExplicitPadding validPadding(void);
-model::ExplicitPadding samePadding(const model::FeatureShape &ifm_shape,
-                                   const model::FeatureShape &ofm_shape,
-                                   const model::Stride &stride, uint32_t kw, uint32_t kh);
-model::ExplicitPadding calculatePadding(const model::Padding &padding,
-                                        const model::FeatureShape &ifm_shape,
-                                        const model::FeatureShape &ofm_shape,
-                                        const model::Stride &stride, uint32_t kw, uint32_t kh);
+ir::ExplicitPadding validPadding(void);
+ir::ExplicitPadding samePadding(const model::FeatureShape &ifm_shape,
+                                const model::FeatureShape &ofm_shape, const ir::Stride &stride,
+                                uint32_t kw, uint32_t kh);
+ir::ExplicitPadding calculatePadding(const ir::Padding &padding,
+                                     const model::FeatureShape &ifm_shape,
+                                     const model::FeatureShape &ofm_shape, const ir::Stride &stride,
+                                     uint32_t kw, uint32_t kh);
 
 } // namespace util
 } // namespace neurun
index 06cd638..63a7a97 100644 (file)
@@ -23,7 +23,7 @@
 #ifndef __NEURUN_UTIL_UTILS_H__
 #define __NEURUN_UTIL_UTILS_H__
 
-#include "model/InternalType.h"
+#include "ir/InternalType.h"
 #include "ir/Layout.h"
 #include "model/Operand.h"
 #include "util/Coordinates.h"
@@ -37,10 +37,10 @@ namespace util
 
 /**
  * @brief Converts a internal padding type to const char*
- * @param[in] code Padding type to be converted
+ * @param[in] type Padding type to be converted
  * @return A string holding the converted value
  */
-const char *to_string(const model::PaddingType &type);
+const char *to_string(ir::PaddingType type);
 
 Coordinates convertCoordinates(const Coordinates &from_coordinates, ir::Layout from_layout,
                                ir::Layout to_layout);
index 3932be5..214139b 100644 (file)
@@ -493,8 +493,8 @@ void OperationValidator::visit(const model::operation::TransposeConv &node)
   UNUSED_RELEASE(ifm_shape);
   UNUSED_RELEASE(ker_shape);
 
-  assert((node.param().padding.type == model::PaddingType::SAME) ||
-         (node.param().padding.type == model::PaddingType::VALID));
+  assert((node.param().padding.type == ir::PaddingType::SAME) ||
+         (node.param().padding.type == ir::PaddingType::VALID));
   assert(ifm_shape.N == ofm_shape.N);
   assert(ifm_shape.C == ker_shape.C);
   assert(ker_shape.N == ofm_shape.C);
index 4d2b4e1..8df4d41 100644 (file)
@@ -18,7 +18,7 @@
 #define __NEURUN_EXEC_INTERP_OPERATIONS_OPERATION_UTILS_H_
 
 #include "model/Shape.h"
-#include "model/InternalType.h"
+#include "ir/InternalType.h"
 
 #include <cker/Shape.h>
 
@@ -75,24 +75,24 @@ inline nnfw::cker::Shape convertExtendShape(const model::Shape &shape)
 }
 
 template <typename T>
-void calculateActivationRange(model::Activation activation, T *activation_min, T *activation_max)
+void calculateActivationRange(ir::Activation activation, T *activation_min, T *activation_max)
 {
-  if (activation == model::Activation::RELU)
+  if (activation == ir::Activation::RELU)
   {
     *activation_min = 0;
     *activation_max = std::numeric_limits<T>::max();
   }
-  else if (activation == model::Activation::RELU6)
+  else if (activation == ir::Activation::RELU6)
   {
     *activation_min = 0;
     *activation_max = 6;
   }
-  else if (activation == model::Activation::RELU1)
+  else if (activation == ir::Activation::RELU1)
   {
     *activation_min = -1;
     *activation_max = 1;
   }
-  else if (activation == model::Activation::NONE)
+  else if (activation == ir::Activation::NONE)
   {
     *activation_min = std::numeric_limits<T>::lowest();
     *activation_max = std::numeric_limits<T>::max();
index d725ac1..961aa36 100644 (file)
@@ -93,7 +93,7 @@ void Dumper::visit(const Concat &node)
 void Dumper::visit(const Conv2D &node)
 {
   std::string padding_type =
-      node.param().padding.type == model::PaddingType::EXPLICIT ? "Explicit" : "Implicit";
+      node.param().padding.type == ir::PaddingType::EXPLICIT ? "Explicit" : "Implicit";
   VERBOSE(LIR) << "* Conv2D(" << padding_type << ")" << std::endl;
   VERBOSE(LIR) << "  - Inputs : IFM(" << node.getInputs().at(Conv2D::Input::INPUT).value()
                << ") Kernel(" << node.getInputs().at(Conv2D::Input::KERNEL).value() << ") Bias("
@@ -112,7 +112,7 @@ void Dumper::visit(const DepthToSpace &node)
 void Dumper::visit(const DepthwiseConv2D &node)
 {
   std::string padding_type =
-      node.param().padding.type == model::PaddingType::EXPLICIT ? "Explicit" : "Implicit";
+      node.param().padding.type == ir::PaddingType::EXPLICIT ? "Explicit" : "Implicit";
   VERBOSE(LIR) << "* DepthwiseConv2D(" << padding_type << ")" << std::endl;
   VERBOSE(LIR) << "  - Inputs : IFM(" << node.getInputs().at(DepthwiseConv2D::Input::INPUT).value()
                << ") Kernel(" << node.getInputs().at(DepthwiseConv2D::Input::KERNEL).value()
@@ -308,7 +308,7 @@ void Dumper::visit(const Logistic &node)
 void Dumper::visit(const MaxPool2D &node)
 {
   std::string padding_type =
-      node.param().padding.type == model::PaddingType::EXPLICIT ? "Explicit" : "Implicit";
+      node.param().padding.type == ir::PaddingType::EXPLICIT ? "Explicit" : "Implicit";
   VERBOSE(LIR) << "* MaxPool2D(" << padding_type << ")" << std::endl;
   VERBOSE(LIR) << "  - Inputs : IFM(" << node.getInputs().at(MaxPool2D::Input::INPUT).value() << ")"
                << std::endl;
@@ -559,7 +559,7 @@ void Dumper::visit(const TopKV2 &node)
 void Dumper::visit(const TransposeConv &node)
 {
   std::string padding_type =
-      node.param().padding.type == model::PaddingType::EXPLICIT ? "Explicit" : "Implicit";
+      node.param().padding.type == ir::PaddingType::EXPLICIT ? "Explicit" : "Implicit";
   VERBOSE(LIR) << "* TransposeConv(" << padding_type << ")" << std::endl;
   VERBOSE(LIR) << "  - Inputs : Output Shape("
                << node.getInputs().at(TransposeConv::Input::OUTPUT_SHAPE).value() << ") KERNEL("
index dd5a3b5..89e4577 100644 (file)
@@ -25,7 +25,7 @@ namespace neurun
 namespace util
 {
 
-model::ExplicitPadding validPadding(void)
+ir::ExplicitPadding validPadding(void)
 {
   //
   // ANEURALNETWORKS_PADDING_VALID
@@ -36,7 +36,7 @@ model::ExplicitPadding validPadding(void)
   // the input at the end that could not fill the whole filter tile
   // will simply be ignored.
   //
-  model::ExplicitPadding padding;
+  ir::ExplicitPadding padding;
 
   padding.top = 0;
   padding.bottom = 0;
@@ -46,10 +46,10 @@ model::ExplicitPadding validPadding(void)
   return padding;
 }
 
-model::ExplicitPadding samePaddingUsingIFM(const model::FeatureShape &ifm_shape,
-                                           const model::Stride &stride, uint32_t kw, uint32_t kh)
+ir::ExplicitPadding samePaddingUsingIFM(const model::FeatureShape &ifm_shape,
+                                        const ir::Stride &stride, uint32_t kw, uint32_t kh)
 {
-  model::ExplicitPadding padding;
+  ir::ExplicitPadding padding;
 
   // ANEURALNETWORKS_PADDING_SAME (from NNAPI spec)
   //
@@ -76,9 +76,9 @@ model::ExplicitPadding samePaddingUsingIFM(const model::FeatureShape &ifm_shape,
   return padding;
 }
 
-model::ExplicitPadding samePadding(const model::FeatureShape &ifm_shape,
-                                   const model::FeatureShape &ofm_shape,
-                                   const model::Stride &stride, uint32_t kw, uint32_t kh)
+ir::ExplicitPadding samePadding(const model::FeatureShape &ifm_shape,
+                                const model::FeatureShape &ofm_shape, const ir::Stride &stride,
+                                uint32_t kw, uint32_t kh)
 {
   const int32_t vertical_expected_output = (ifm_shape.H + stride.vertical - 1) / stride.vertical;
   const int32_t horizontal_expected_output =
@@ -93,20 +93,20 @@ model::ExplicitPadding samePadding(const model::FeatureShape &ifm_shape,
   return samePaddingUsingIFM(ifm_shape, stride, kw, kh);
 }
 
-model::ExplicitPadding calculatePadding(const model::Padding &padding,
-                                        const model::FeatureShape &ifm_shape,
-                                        const model::FeatureShape &ofm_shape,
-                                        const model::Stride &stride, uint32_t kw, uint32_t kh)
+ir::ExplicitPadding calculatePadding(const ir::Padding &padding,
+                                     const model::FeatureShape &ifm_shape,
+                                     const model::FeatureShape &ofm_shape, const ir::Stride &stride,
+                                     uint32_t kw, uint32_t kh)
 {
-  if (padding.type == model::PaddingType::EXPLICIT)
+  if (padding.type == ir::PaddingType::EXPLICIT)
   {
     return padding.param;
   }
-  else if (padding.type == model::PaddingType::SAME)
+  else if (padding.type == ir::PaddingType::SAME)
   {
     return samePadding(ifm_shape, ofm_shape, stride, kw, kh);
   }
-  else if (padding.type == model::PaddingType::VALID)
+  else if (padding.type == ir::PaddingType::VALID)
   {
     return validPadding();
   }
index ffb8dab..de30b9b 100644 (file)
@@ -15,7 +15,7 @@
  */
 
 #include "util/Utils.h"
-#include "model/InternalType.h"
+#include "ir/InternalType.h"
 #include "model/Shape.h"
 #include "model/operation/AvgPool2D.h"
 #include "model/operation/MaxPool2D.h"
@@ -68,22 +68,22 @@ model::Shape broadcastShapes(const model::Shape &lhs_shape, const model::Shape &
 
 // Calculate output height and width of convolution-like operation
 std::pair<int, int> calcConvLikeHeightAndWidth(const int in_h, const int in_w, const int ker_h,
-                                               const int ker_w, const model::Padding pad,
-                                               const model::Stride stride)
+                                               const int ker_w, const ir::Padding pad,
+                                               const ir::Stride stride)
 {
   int32_t out_h = 0, out_w = 0;
 
   switch (pad.type)
   {
-    case model::PaddingType::SAME:
+    case ir::PaddingType::SAME:
       out_h = ceil_div(in_h, stride.vertical);
       out_w = ceil_div(in_w, stride.horizontal);
       break;
-    case model::PaddingType::VALID:
+    case ir::PaddingType::VALID:
       out_h = ceil_div(in_h - ker_h + 1, stride.vertical);
       out_w = ceil_div(in_w - ker_w + 1, stride.horizontal);
       break;
-    case model::PaddingType::EXPLICIT:
+    case ir::PaddingType::EXPLICIT:
       out_h = (in_h + pad.param.top + pad.param.bottom - ker_h) / stride.vertical + 1;
       out_w = (in_w + pad.param.left + pad.param.right - ker_w) / stride.horizontal + 1;
       break;
index f8daa02..1e24e28 100644 (file)
@@ -23,18 +23,18 @@ namespace neurun
 namespace util
 {
 
-const char *to_string(const model::PaddingType &type)
+const char *to_string(const ir::PaddingType type)
 {
-  assert((type == model::PaddingType::EXPLICIT) || (type == model::PaddingType::SAME) ||
-         (type == model::PaddingType::VALID));
+  assert((type == ir::PaddingType::EXPLICIT) || (type == ir::PaddingType::SAME) ||
+         (type == ir::PaddingType::VALID));
 
   switch (type)
   {
-    case model::PaddingType::EXPLICIT:
+    case ir::PaddingType::EXPLICIT:
       return "Padding::EXPLICIT";
-    case model::PaddingType::SAME:
+    case ir::PaddingType::SAME:
       return "Padding::SAME";
-    case model::PaddingType::VALID:
+    case ir::PaddingType::VALID:
       return "Padding::VALID";
   }
 
index d95678a..153e07a 100644 (file)
@@ -65,7 +65,7 @@ protected:
   void loadModel();
 
   // Helper functions
-  model::Activation convertActivation(ActivationFunctionType type);
+  ir::Activation convertActivation(ActivationFunctionType type);
   ir::DataType tensorTypeToDataType(TensorType type);
 
   // Create operands form tflite::Tensor
@@ -152,21 +152,21 @@ void BaseLoader<LoaderDomain, SpecificLoader>::BaseLoader::loadFromFile(const ch
 }
 
 template <typename LoaderDomain, typename SpecificLoader>
-model::Activation BaseLoader<LoaderDomain, SpecificLoader>::BaseLoader::convertActivation(
+ir::Activation BaseLoader<LoaderDomain, SpecificLoader>::BaseLoader::convertActivation(
     const ActivationFunctionType type)
 {
   switch (type)
   {
     case ActivationFunctionType::ActivationFunctionType_NONE:
-      return model::Activation::NONE;
+      return ir::Activation::NONE;
     case ActivationFunctionType::ActivationFunctionType_RELU:
-      return model::Activation::RELU;
+      return ir::Activation::RELU;
     case ActivationFunctionType::ActivationFunctionType_RELU_N1_TO_1:
-      return model::Activation::RELU1;
+      return ir::Activation::RELU1;
     case ActivationFunctionType::ActivationFunctionType_RELU6:
-      return model::Activation::RELU6;
+      return ir::Activation::RELU6;
     case ActivationFunctionType::ActivationFunctionType_TANH:
-      return model::Activation::TANH;
+      return ir::Activation::TANH;
     default:
       throw std::runtime_error(std::string("Unsupported activation type: ")
                                    .append(EnumNameActivationFunctionType(type)));
@@ -284,9 +284,9 @@ void BaseLoader<LoaderDomain, SpecificLoader>::loadStridesAndPaddings(Param &par
   param.stride.horizontal = options->stride_h();
   // Paddings
   if (options->padding() == Padding::Padding_SAME)
-    param.padding.type = model::PaddingType::SAME;
+    param.padding.type = ir::PaddingType::SAME;
   if (options->padding() == Padding::Padding_VALID)
-    param.padding.type = model::PaddingType::VALID;
+    param.padding.type = ir::PaddingType::VALID;
   // param paddings indexes unused
 }
 
index d736414..093c66f 100644 (file)
@@ -26,7 +26,7 @@
 
 #include <model/TypeInfo.h>
 #include <model/Shape.h>
-#include <model/InternalType.h>
+#include <ir/InternalType.h>
 
 class NNAPIConvert
 {
@@ -65,14 +65,14 @@ public:
    * @param[in] act NNAPI's FuseCode type
    * @return    neurun's internal activation type
    */
-  static ::neurun::model::Activation getFusedActivation(FuseCode act);
+  static neurun::ir::Activation getFusedActivation(FuseCode act);
 
   /**
    * @brief     Convert NNAPI PaddingCode to internal padding type
-   * @param[in] act NNAPI's PaddingCode type
+   * @param[in] type NNAPI's PaddingCode type
    * @return    neurun's internal padding type
    */
-  static ::neurun::model::PaddingType getPaddingType(PaddingCode type);
+  static neurun::ir::PaddingType getPaddingType(PaddingCode type);
 };
 
 #endif // __NEURUN_NNAPI_CONVERT_H__
index 66bfc3e..3623608 100644 (file)
@@ -19,7 +19,7 @@
 #include <backend/IShapeFixer.h>
 
 #include <model/Shape.h>
-#include <model/InternalType.h>
+#include <ir/InternalType.h>
 #include <model/TypeInfo.h>
 #include <ir/DataType.h>
 
index bbe8ba7..c31af3d 100644 (file)
@@ -57,12 +57,12 @@ public:
                                                    16));
     // 2nd add operations (result2 <= result1 + rhs2)
     operation::Add::Param param1;
-    param1.activation = neurun::model::Activation::NONE;
+    param1.activation = Activation::NONE;
     auto input_set1 = OperandIndexSequence{operand_lhs, operand_rhs1};
     auto output_set1 = OperandIndexSequence{operand_result1};
     graph->addOperation(nnfw::cpp14::make_unique<operation::Add>(input_set1, output_set1, param1));
     operation::Add::Param param2;
-    param2.activation = neurun::model::Activation::NONE;
+    param2.activation = Activation::NONE;
     auto input_set2 = OperandIndexSequence{operand_result1, operand_rhs2};
     auto output_set2 = OperandIndexSequence{operand_result2};
     graph->addOperation(nnfw::cpp14::make_unique<operation::Add>(input_set2, output_set2, param2));
index 2eba0ab..69acb74 100644 (file)
@@ -58,7 +58,7 @@ protected:
     // Add operations
 
     operation::Add::Param param;
-    param.activation = neurun::model::Activation::NONE;
+    param.activation = Activation::NONE;
     auto input_set = OperandIndexSequence{operand_lhs, operand_rhs};
     auto output_set = OperandIndexSequence{operand_result};
     _graph->addOperation(nnfw::cpp14::make_unique<operation::Add>(input_set, output_set, param));
@@ -108,13 +108,13 @@ protected:
     // 2nd add operations (result2 <= result1 + rhs2)
 
     operation::Add::Param param1;
-    param1.activation = neurun::model::Activation::NONE;
+    param1.activation = Activation::NONE;
     auto input_set1 = OperandIndexSequence{operand_lhs, operand_rhs1};
     auto output_set1 = OperandIndexSequence{operand_result1};
     _graph->addOperation(nnfw::cpp14::make_unique<operation::Add>(input_set1, output_set1, param1));
 
     operation::Add::Param param2;
-    param2.activation = neurun::model::Activation::NONE;
+    param2.activation = Activation::NONE;
     auto input_set2 = OperandIndexSequence{operand_result1, operand_rhs2};
     auto output_set2 = OperandIndexSequence{operand_result2};
     _graph->addOperation(nnfw::cpp14::make_unique<operation::Add>(input_set2, output_set2, param2));
@@ -160,7 +160,7 @@ protected:
     // Add operations
 
     operation::Add::Param param;
-    param.activation = neurun::model::Activation::NONE;
+    param.activation = Activation::NONE;
     auto input_set = OperandIndexSequence{operand_lhs, operand_rhs};
     auto output_set = OperandIndexSequence{operand_result};
     _graph->addOperation(nnfw::cpp14::make_unique<operation::Add>(input_set, output_set, param));
index ab0193f..95f1f13 100644 (file)
@@ -45,10 +45,10 @@ TEST(graph_operation_setIO, operation_setIO_conv)
   IndexSet inputs{input_operand, kernel_operand, bias_operand};
 
   Graph::Param conv_params;
-  conv_params.padding.type = neurun::model::PaddingType::SAME;
+  conv_params.padding.type = neurun::ir::PaddingType::SAME;
   conv_params.stride.horizontal = 1;
   conv_params.stride.vertical = 1;
-  conv_params.activation = neurun::model::Activation::NONE;
+  conv_params.activation = neurun::ir::Activation::NONE;
 
   auto output_operand = graph.addOperand(shape, type).value();
   IndexSet outputs{output_operand};