Update .clang-format (#4361) nncc_backup
author박종현/On-Device Lab(SR)/Staff Engineer/삼성전자 <jh1302.park@samsung.com>
Sun, 21 Jul 2019 23:43:18 +0000 (08:43 +0900)
committerGitHub Enterprise <noreply-CODE@samsung.com>
Sun, 21 Jul 2019 23:43:18 +0000 (08:43 +0900)
This commit updates .clang-format based on internal policy.

Signed-off-by: Jonghyun Park <jh1302.park@samsung.com>
41 files changed:
.clang-format
compiler/enco/core/src/CppGen/Subnet.cpp
compiler/enco/core/src/Transforms/ConcatLowering.cpp
compiler/enco/frontend/tflite/src/Op/Activation.cpp
compiler/enco/frontend/tflite/src/Op/Concatenation.cpp
compiler/exo-tflite/src/ShapeInference.cpp
compiler/exo-tflite/src/TFLExporterImpl.test.cpp
compiler/exo-tflite/src/TypeInference.cpp
compiler/i5diff/src/entry.cpp
compiler/loco-exporter/src/TypeInference.cpp
compiler/loco/include/loco/IR/CanonicalNodeImpl.h
compiler/loco/src/IR/PermutingCodec.cpp
compiler/locomotiv/src/Node/AvgPool2D.cpp
compiler/locomotiv/src/Node/BiasAdd.cpp
compiler/locomotiv/src/Node/BiasEncode.cpp
compiler/locomotiv/src/Node/ConstGen.cpp
compiler/locomotiv/src/Node/DepthwiseFilterEncode.cpp
compiler/locomotiv/src/Node/FeatureDecode.cpp
compiler/locomotiv/src/Node/FeatureEncode.cpp
compiler/locomotiv/src/Node/FilterEncode.cpp
compiler/locomotiv/src/Node/Forward.cpp
compiler/locomotiv/src/Node/MaxPool2D.cpp
compiler/locomotiv/src/Node/Push.cpp
compiler/locomotiv/src/Node/ReLU.cpp
compiler/locomotiv/src/Node/ReLU6.cpp
compiler/locomotiv/src/Node/Reshape.cpp
compiler/locomotiv/src/Node/TensorConcat.cpp
compiler/locop/src/FormattedGraph.cpp
compiler/moco-tf/src/Canonicalization/ConstCanonicalizer.cpp
compiler/moco-tf/src/Convert.cpp
compiler/moco-tf/src/Dialect/TFNodeImpl.h
compiler/moco-tf/src/NodeShape.cpp
compiler/moco-tf/src/Op/Const.cpp
compiler/moco-tf/src/TFFormattedGraph.cpp
compiler/pp/src/LinearDocument.cpp
compiler/tf2tflite/src/Driver.cpp
compiler/tfkit/src/PackCommand.cpp
compiler/tfkit/src/UnpackCommand.cpp
compiler/tflchef/core/src/Convert.cpp
compiler/tflchef/core/src/ModelChef.cpp
compiler/tflchef/tflite/src/Convert.cpp

index 44dadf7..7dcf11c 100644 (file)
@@ -1,5 +1,6 @@
 ---
 Language:        Cpp
+BasedOnStyle: Google
 AccessModifierOffset: -2
 AlignAfterOpenBracket: Align
 AlignEscapedNewlinesLeft: true
@@ -52,9 +53,11 @@ IncludeCategories:
     Priority:        3
   - Regex:           '.*'
     Priority:        1
-IndentCaseLabels: false
+IndentCaseLabels: true
 IndentWidth:     2
 IndentWrappedFunctionNames: false
+JavaScriptQuotes: Leave
+JavaScriptWrapImports: true
 KeepEmptyLinesAtTheStartOfBlocks: true
 MacroBlockBegin: ''
 MacroBlockEnd:   ''
index 9ee7610..9a636c6 100644 (file)
@@ -89,10 +89,10 @@ const char *scalar_operand_code(const ann::DType &dtype)
 {
   switch (dtype)
   {
-  case ann::DType::S32:
-    return "ANEURALNETWORKS_INT32";
-  default:
-    break;
+    case ann::DType::S32:
+      return "ANEURALNETWORKS_INT32";
+    default:
+      break;
   };
 
   throw std::invalid_argument("dtype");
@@ -102,12 +102,12 @@ const char *tensor_operand_code(const ann::DType &dtype)
 {
   switch (dtype)
   {
-  case ann::DType::S32:
-    return "ANEURALNETWORKS_TENSOR_INT32";
-  case ann::DType::F32:
-    return "ANEURALNETWORKS_TENSOR_FLOAT32";
-  default:
-    break;
+    case ann::DType::S32:
+      return "ANEURALNETWORKS_TENSOR_INT32";
+    case ann::DType::F32:
+      return "ANEURALNETWORKS_TENSOR_FLOAT32";
+    default:
+      break;
   };
 
   throw std::invalid_argument("dtype");
@@ -234,8 +234,8 @@ private:
     return #ENUM;
 #include "ANN/IR/Operation.def"
 #undef ANN_OPERATION
-    default:
-      throw std::invalid_argument{"code"};
+      default:
+        throw std::invalid_argument{"code"};
     };
   }
 
index 9b929cb..bf613c9 100644 (file)
@@ -31,16 +31,16 @@ inline uint32_t as_tensor_axis(const coco::ConcatF::Axis &axis)
 {
   switch (axis)
   {
-  case coco::ConcatF::Axis::Batch:
-    return 0;
-  case coco::ConcatF::Axis::Depth:
-    return 1;
-  case coco::ConcatF::Axis::Height:
-    return 2;
-  case coco::ConcatF::Axis::Width:
-    return 3;
-  default:
-    break;
+    case coco::ConcatF::Axis::Batch:
+      return 0;
+    case coco::ConcatF::Axis::Depth:
+      return 1;
+    case coco::ConcatF::Axis::Height:
+      return 2;
+    case coco::ConcatF::Axis::Width:
+      return 3;
+    default:
+      break;
   };
 
   throw std::invalid_argument{"axis is unknown value"};
index fb20513..d6215ba 100644 (file)
@@ -48,46 +48,46 @@ coco::FeatureObject *build_activation(tflite::ActivationFunctionType act, coco::
 
   switch (act)
   {
-  case tflite::ActivationFunctionType::ActivationFunctionType_NONE:
-  {
-    // Create Copy Instr (copying from ifm to output_obj),
-    // redundant layer but optimized by backend
-    auto copy_ins = instr_builder(m).copy(output_obj, ifm);
-
-    // Append the instruction to the block
-    block->instr()->append(copy_ins);
-    break;
-  }
-  case tflite::ActivationFunctionType::ActivationFunctionType_RELU:
-  {
-    // Create Eval(output_obj, ReLU(load(ifm)))
-    auto load_op = op_builder(m).load(ifm).pop();
-    auto relu_op = m->entity()->op()->create<coco::ReLU>();
-    relu_op->arg(load_op);
-
-    auto eval_ins = instr_builder(m).eval(output_obj, relu_op);
-
-    // Append the instruction to the block
-    block->instr()->append(eval_ins);
-    break;
-  }
-  case tflite::ActivationFunctionType::ActivationFunctionType_RELU6:
-  {
-    // Create Eval(output_obj, ReLU6(load(ifm)))
-    auto load_op = op_builder(m).load(ifm).pop();
-    auto relu6_op = m->entity()->op()->create<coco::ReLU6>();
-    relu6_op->arg(load_op);
-
-    auto eval_ins = instr_builder(m).eval(output_obj, relu6_op);
-
-    // Append the instruction to the block
-    block->instr()->append(eval_ins);
-    break;
-  }
-  default:
-    // TODO support other fused activations
-    assert(false);
-    break;
+    case tflite::ActivationFunctionType::ActivationFunctionType_NONE:
+    {
+      // Create Copy Instr (copying from ifm to output_obj),
+      // redundant layer but optimized by backend
+      auto copy_ins = instr_builder(m).copy(output_obj, ifm);
+
+      // Append the instruction to the block
+      block->instr()->append(copy_ins);
+      break;
+    }
+    case tflite::ActivationFunctionType::ActivationFunctionType_RELU:
+    {
+      // Create Eval(output_obj, ReLU(load(ifm)))
+      auto load_op = op_builder(m).load(ifm).pop();
+      auto relu_op = m->entity()->op()->create<coco::ReLU>();
+      relu_op->arg(load_op);
+
+      auto eval_ins = instr_builder(m).eval(output_obj, relu_op);
+
+      // Append the instruction to the block
+      block->instr()->append(eval_ins);
+      break;
+    }
+    case tflite::ActivationFunctionType::ActivationFunctionType_RELU6:
+    {
+      // Create Eval(output_obj, ReLU6(load(ifm)))
+      auto load_op = op_builder(m).load(ifm).pop();
+      auto relu6_op = m->entity()->op()->create<coco::ReLU6>();
+      relu6_op->arg(load_op);
+
+      auto eval_ins = instr_builder(m).eval(output_obj, relu6_op);
+
+      // Append the instruction to the block
+      block->instr()->append(eval_ins);
+      break;
+    }
+    default:
+      // TODO support other fused activations
+      assert(false);
+      break;
   }
 
   return output_obj;
index dba64d9..ce0f47b 100644 (file)
@@ -45,20 +45,20 @@ coco::ConcatF::Axis as_ConcatF_axis(uint32_t axis)
 
   switch (axis)
   {
-  case 0:
-    res = coco::ConcatF::Axis::Batch;
-    break;
-  case 1:
-    res = coco::ConcatF::Axis::Height;
-    break;
-  case 2:
-    res = coco::ConcatF::Axis::Width;
-    break;
-  case 3:
-    res = coco::ConcatF::Axis::Depth;
-    break;
-  default:
-    break;
+    case 0:
+      res = coco::ConcatF::Axis::Batch;
+      break;
+    case 1:
+      res = coco::ConcatF::Axis::Height;
+      break;
+    case 2:
+      res = coco::ConcatF::Axis::Width;
+      break;
+    case 3:
+      res = coco::ConcatF::Axis::Depth;
+      break;
+    default:
+      break;
   }
 
   return res;
index bd9e9b4..968aa13 100644 (file)
@@ -128,32 +128,33 @@ ShapeDescription ShapeGetter::visit(loco::MaxPool2D *node)
   tflite::Padding padding = getOpPadding(node->pad());
   switch (padding)
   {
-  case tflite::Padding_SAME:
-  {
-    auto height = static_cast<uint32_t>(pred_shape._dims[1]);
-    auto width = static_cast<uint32_t>(pred_shape._dims[2]);
+    case tflite::Padding_SAME:
+    {
+      auto height = static_cast<uint32_t>(pred_shape._dims[1]);
+      auto width = static_cast<uint32_t>(pred_shape._dims[2]);
 
-    int32_t proposed_res_height = ceil_div(height, node->stride()->vertical());
-    int32_t proposed_res_width = ceil_div(width, node->stride()->horizontal());
+      int32_t proposed_res_height = ceil_div(height, node->stride()->vertical());
+      int32_t proposed_res_width = ceil_div(width, node->stride()->horizontal());
 
-    shape._dims[1] = pred_shape._dims[1] == -1 ? -1 : proposed_res_height;
-    shape._dims[2] = pred_shape._dims[2] == -1 ? -1 : proposed_res_width;
-    break;
-  }
-  case tflite::Padding_VALID:
-  {
-    auto padded_h = static_cast<uint32_t>(pred_shape._dims[1] - (node->window()->vertical() - 1));
-    auto padded_w = static_cast<uint32_t>(pred_shape._dims[2] - (node->window()->horizontal() - 1));
+      shape._dims[1] = pred_shape._dims[1] == -1 ? -1 : proposed_res_height;
+      shape._dims[2] = pred_shape._dims[2] == -1 ? -1 : proposed_res_width;
+      break;
+    }
+    case tflite::Padding_VALID:
+    {
+      auto padded_h = static_cast<uint32_t>(pred_shape._dims[1] - (node->window()->vertical() - 1));
+      auto padded_w =
+          static_cast<uint32_t>(pred_shape._dims[2] - (node->window()->horizontal() - 1));
 
-    int32_t proposed_height = ceil_div(padded_h, node->stride()->vertical());
-    int32_t proposed_width = ceil_div(padded_w, node->stride()->horizontal());
+      int32_t proposed_height = ceil_div(padded_h, node->stride()->vertical());
+      int32_t proposed_width = ceil_div(padded_w, node->stride()->horizontal());
 
-    shape._dims[1] = pred_shape._dims[1] == -1 ? -1 : proposed_height;
-    shape._dims[2] = pred_shape._dims[2] == -1 ? -1 : proposed_width;
-    break;
-  }
-  default:
-    assert(false && "unknown padding type");
+      shape._dims[1] = pred_shape._dims[1] == -1 ? -1 : proposed_height;
+      shape._dims[2] = pred_shape._dims[2] == -1 ? -1 : proposed_width;
+      break;
+    }
+    default:
+      assert(false && "unknown padding type");
   }
   return shape;
 }
@@ -172,32 +173,33 @@ ShapeDescription ShapeGetter::visit(loco::AvgPool2D *node)
   tflite::Padding padding = getOpPadding(node->pad());
   switch (padding)
   {
-  case tflite::Padding_SAME:
-  {
-    auto height = static_cast<uint32_t>(ifm_shape._dims[1]);
-    auto width = static_cast<uint32_t>(ifm_shape._dims[2]);
+    case tflite::Padding_SAME:
+    {
+      auto height = static_cast<uint32_t>(ifm_shape._dims[1]);
+      auto width = static_cast<uint32_t>(ifm_shape._dims[2]);
 
-    int32_t proposed_res_height = ceil_div(height, node->stride()->vertical());
-    int32_t proposed_res_width = ceil_div(width, node->stride()->horizontal());
+      int32_t proposed_res_height = ceil_div(height, node->stride()->vertical());
+      int32_t proposed_res_width = ceil_div(width, node->stride()->horizontal());
 
-    shape._dims[1] = ifm_shape._dims[1] == -1 ? -1 : proposed_res_height;
-    shape._dims[2] = ifm_shape._dims[2] == -1 ? -1 : proposed_res_width;
-    break;
-  }
-  case tflite::Padding_VALID:
-  {
-    auto padded_h = static_cast<uint32_t>(ifm_shape._dims[1] - (node->window()->vertical() - 1));
-    auto padded_w = static_cast<uint32_t>(ifm_shape._dims[2] - (node->window()->horizontal() - 1));
+      shape._dims[1] = ifm_shape._dims[1] == -1 ? -1 : proposed_res_height;
+      shape._dims[2] = ifm_shape._dims[2] == -1 ? -1 : proposed_res_width;
+      break;
+    }
+    case tflite::Padding_VALID:
+    {
+      auto padded_h = static_cast<uint32_t>(ifm_shape._dims[1] - (node->window()->vertical() - 1));
+      auto padded_w =
+          static_cast<uint32_t>(ifm_shape._dims[2] - (node->window()->horizontal() - 1));
 
-    int32_t proposed_height = ceil_div(padded_h, node->stride()->vertical());
-    int32_t proposed_width = ceil_div(padded_w, node->stride()->horizontal());
+      int32_t proposed_height = ceil_div(padded_h, node->stride()->vertical());
+      int32_t proposed_width = ceil_div(padded_w, node->stride()->horizontal());
 
-    shape._dims[1] = ifm_shape._dims[1] == -1 ? -1 : proposed_height;
-    shape._dims[2] = ifm_shape._dims[2] == -1 ? -1 : proposed_width;
-    break;
-  }
-  default:
-    assert(false && "unknown padding type");
+      shape._dims[1] = ifm_shape._dims[1] == -1 ? -1 : proposed_height;
+      shape._dims[2] = ifm_shape._dims[2] == -1 ? -1 : proposed_width;
+      break;
+    }
+    default:
+      assert(false && "unknown padding type");
   }
   return shape;
 }
@@ -229,32 +231,32 @@ ShapeDescription ShapeGetter::visit(loco::Conv2D *node)
   tflite::Padding padding = getOpPadding(node->pad());
   switch (padding)
   {
-  case tflite::Padding_SAME:
-  {
-    auto height = static_cast<uint32_t>(ifm_shape._dims[1]);
-    auto width = static_cast<uint32_t>(ifm_shape._dims[2]);
+    case tflite::Padding_SAME:
+    {
+      auto height = static_cast<uint32_t>(ifm_shape._dims[1]);
+      auto width = static_cast<uint32_t>(ifm_shape._dims[2]);
 
-    int32_t proposed_res_height = ceil_div(height, node->stride()->vertical());
-    int32_t proposed_res_width = ceil_div(width, node->stride()->horizontal());
+      int32_t proposed_res_height = ceil_div(height, node->stride()->vertical());
+      int32_t proposed_res_width = ceil_div(width, node->stride()->horizontal());
 
-    shape._dims[1] = ifm_shape._dims[1] == -1 ? -1 : proposed_res_height;
-    shape._dims[2] = ifm_shape._dims[2] == -1 ? -1 : proposed_res_width;
-    break;
-  }
-  case tflite::Padding_VALID:
-  {
-    auto padded_h = static_cast<uint32_t>(ifm_shape._dims[1] - (ker_shape._dims[1] - 1));
-    auto padded_w = static_cast<uint32_t>(ifm_shape._dims[2] - (ker_shape._dims[2] - 1));
+      shape._dims[1] = ifm_shape._dims[1] == -1 ? -1 : proposed_res_height;
+      shape._dims[2] = ifm_shape._dims[2] == -1 ? -1 : proposed_res_width;
+      break;
+    }
+    case tflite::Padding_VALID:
+    {
+      auto padded_h = static_cast<uint32_t>(ifm_shape._dims[1] - (ker_shape._dims[1] - 1));
+      auto padded_w = static_cast<uint32_t>(ifm_shape._dims[2] - (ker_shape._dims[2] - 1));
 
-    int32_t proposed_height = ceil_div(padded_h, node->stride()->vertical());
-    int32_t proposed_width = ceil_div(padded_w, node->stride()->horizontal());
+      int32_t proposed_height = ceil_div(padded_h, node->stride()->vertical());
+      int32_t proposed_width = ceil_div(padded_w, node->stride()->horizontal());
 
-    shape._dims[1] = ifm_shape._dims[1] == -1 ? -1 : proposed_height;
-    shape._dims[2] = ifm_shape._dims[2] == -1 ? -1 : proposed_width;
-    break;
-  }
-  default:
-    assert(false && "unknown padding type");
+      shape._dims[1] = ifm_shape._dims[1] == -1 ? -1 : proposed_height;
+      shape._dims[2] = ifm_shape._dims[2] == -1 ? -1 : proposed_width;
+      break;
+    }
+    default:
+      assert(false && "unknown padding type");
   }
   return shape;
 }
index b1403ed..08fd90d 100644 (file)
@@ -130,16 +130,16 @@ TEST_F(TFLExporterImplTests, Regression_0000)
 
       switch (model->operator_codes()->Get(opcode_index)->builtin_code())
       {
-      case tflite::BuiltinOperator_RELU:
-        ASSERT_EQ(relu_exeuction_index, -1);
-        relu_exeuction_index = static_cast<int64_t>(n);
-        break;
-      case tflite::BuiltinOperator_MAX_POOL_2D:
-        ASSERT_EQ(maxpool_execution_index, -1);
-        maxpool_execution_index = static_cast<int64_t>(n);
-        break;
-      default:
-        break;
+        case tflite::BuiltinOperator_RELU:
+          ASSERT_EQ(relu_exeuction_index, -1);
+          relu_exeuction_index = static_cast<int64_t>(n);
+          break;
+        case tflite::BuiltinOperator_MAX_POOL_2D:
+          ASSERT_EQ(maxpool_execution_index, -1);
+          maxpool_execution_index = static_cast<int64_t>(n);
+          break;
+        default:
+          break;
       }
     }
 
index 4e1318e..e542f3d 100644 (file)
@@ -32,26 +32,26 @@ tflite::TensorType translateLocoTypeToTFLite(loco::DataType dtype)
 {
   switch (dtype)
   {
-  case loco::DataType::U8:
-    return tflite::TensorType_UINT8;
-  //  case loco::DataType::U16: unsupported
-  //  case loco::DataType::U32: unsupported
-  //  case loco::DataType::U64: unsupported
-  case loco::DataType::S8:
-    return tflite::TensorType_INT8;
-  case loco::DataType::S16:
-    return tflite::TensorType_INT16;
-  case loco::DataType::S32:
-    return tflite::TensorType_INT32;
-  case loco::DataType::S64:
-    return tflite::TensorType_INT64;
-  case loco::DataType::FLOAT16:
-    return tflite::TensorType_FLOAT16;
-  case loco::DataType::FLOAT32:
-    return tflite::TensorType_FLOAT32;
-  //  case loco::DataType::FLOAT64: unsupported
-  default:
-    assert(false && "unsupported data type");
+    case loco::DataType::U8:
+      return tflite::TensorType_UINT8;
+    //  case loco::DataType::U16: unsupported
+    //  case loco::DataType::U32: unsupported
+    //  case loco::DataType::U64: unsupported
+    case loco::DataType::S8:
+      return tflite::TensorType_INT8;
+    case loco::DataType::S16:
+      return tflite::TensorType_INT16;
+    case loco::DataType::S32:
+      return tflite::TensorType_INT32;
+    case loco::DataType::S64:
+      return tflite::TensorType_INT64;
+    case loco::DataType::FLOAT16:
+      return tflite::TensorType_FLOAT16;
+    case loco::DataType::FLOAT32:
+      return tflite::TensorType_FLOAT32;
+    //  case loco::DataType::FLOAT64: unsupported
+    default:
+      assert(false && "unsupported data type");
   }
 }
 
index 12d948e..81d9ff4 100644 (file)
@@ -275,34 +275,34 @@ int entry(int argc, char **argv)
 
       switch (dtype)
       {
-      case DataType::FLOAT32:
-      {
-        auto lhs_vector = as_float_vector(lhs_dataset);
-        auto rhs_vector = as_float_vector(rhs_dataset);
-
-        assert(lhs_vector.size() == rhs_vector.size());
+        case DataType::FLOAT32:
+        {
+          auto lhs_vector = as_float_vector(lhs_dataset);
+          auto rhs_vector = as_float_vector(rhs_dataset);
 
-        LexicalLayout layout;
+          assert(lhs_vector.size() == rhs_vector.size());
 
-        for (TensorIndexEnumerator e{shape}; e.valid(); e.advance())
-        {
-          const auto &ind = e.current();
-          auto lhs_value = lhs_vector.at(layout.offset(shape, ind));
-          auto rhs_value = rhs_vector.at(layout.offset(shape, ind));
+          LexicalLayout layout;
 
-          // TODO Abstract equality criterion
-          if (std::abs(lhs_value - rhs_value) >= 0.001f)
+          for (TensorIndexEnumerator e{shape}; e.valid(); e.advance())
           {
-            ErrorDetail<ErrorCode::ValueMismatch> error{};
-            mux.notify(error);
-            continue;
+            const auto &ind = e.current();
+            auto lhs_value = lhs_vector.at(layout.offset(shape, ind));
+            auto rhs_value = rhs_vector.at(layout.offset(shape, ind));
+
+            // TODO Abstract equality criterion
+            if (std::abs(lhs_value - rhs_value) >= 0.001f)
+            {
+              ErrorDetail<ErrorCode::ValueMismatch> error{};
+              mux.notify(error);
+              continue;
+            }
           }
-        }
 
-        break;
-      }
-      default:
-        throw std::runtime_error{"Not supported, yet"};
+          break;
+        }
+        default:
+          throw std::runtime_error{"Not supported, yet"};
       };
     }
   } while (false);
index 4d1b0eb..c83294a 100644 (file)
@@ -30,26 +30,26 @@ tflite::TensorType translateLocoTypeToTFLite(loco::DataType dtype)
 {
   switch (dtype)
   {
-  case loco::DataType::U8:
-    return tflite::TensorType_UINT8;
-  //  case loco::DataType::U16: unsupported
-  //  case loco::DataType::U32: unsupported
-  //  case loco::DataType::U64: unsupported
-  case loco::DataType::S8:
-    return tflite::TensorType_INT8;
-  case loco::DataType::S16:
-    return tflite::TensorType_INT16;
-  case loco::DataType::S32:
-    return tflite::TensorType_INT32;
-  case loco::DataType::S64:
-    return tflite::TensorType_INT64;
-  case loco::DataType::FLOAT16:
-    return tflite::TensorType_FLOAT16;
-  case loco::DataType::FLOAT32:
-    return tflite::TensorType_FLOAT32;
-  //  case loco::DataType::FLOAT64: unsupported
-  default:
-    assert(false && "unsupported data type");
+    case loco::DataType::U8:
+      return tflite::TensorType_UINT8;
+    //  case loco::DataType::U16: unsupported
+    //  case loco::DataType::U32: unsupported
+    //  case loco::DataType::U64: unsupported
+    case loco::DataType::S8:
+      return tflite::TensorType_INT8;
+    case loco::DataType::S16:
+      return tflite::TensorType_INT16;
+    case loco::DataType::S32:
+      return tflite::TensorType_INT32;
+    case loco::DataType::S64:
+      return tflite::TensorType_INT64;
+    case loco::DataType::FLOAT16:
+      return tflite::TensorType_FLOAT16;
+    case loco::DataType::FLOAT32:
+      return tflite::TensorType_FLOAT32;
+    //  case loco::DataType::FLOAT64: unsupported
+    default:
+      assert(false && "unsupported data type");
   }
 }
 
@@ -190,32 +190,33 @@ ShapeDescription getOpResultShape(loco::MaxPool2D *node, SerializedModelData &gd
   tflite::Padding padding = getOpPadding(node->pad());
   switch (padding)
   {
-  case tflite::Padding_SAME:
-  {
-    auto height = static_cast<uint32_t>(pred_shape._dims[1]);
-    auto width = static_cast<uint32_t>(pred_shape._dims[2]);
+    case tflite::Padding_SAME:
+    {
+      auto height = static_cast<uint32_t>(pred_shape._dims[1]);
+      auto width = static_cast<uint32_t>(pred_shape._dims[2]);
 
-    int32_t proposed_res_height = ceil_div(height, node->stride()->vertical());
-    int32_t proposed_res_width = ceil_div(width, node->stride()->horizontal());
+      int32_t proposed_res_height = ceil_div(height, node->stride()->vertical());
+      int32_t proposed_res_width = ceil_div(width, node->stride()->horizontal());
 
-    shape._dims[1] = pred_shape._dims[1] == -1 ? -1 : proposed_res_height;
-    shape._dims[2] = pred_shape._dims[2] == -1 ? -1 : proposed_res_width;
-    break;
-  }
-  case tflite::Padding_VALID:
-  {
-    auto padded_h = static_cast<uint32_t>(pred_shape._dims[1] - (node->window()->vertical() - 1));
-    auto padded_w = static_cast<uint32_t>(pred_shape._dims[2] - (node->window()->horizontal() - 1));
+      shape._dims[1] = pred_shape._dims[1] == -1 ? -1 : proposed_res_height;
+      shape._dims[2] = pred_shape._dims[2] == -1 ? -1 : proposed_res_width;
+      break;
+    }
+    case tflite::Padding_VALID:
+    {
+      auto padded_h = static_cast<uint32_t>(pred_shape._dims[1] - (node->window()->vertical() - 1));
+      auto padded_w =
+          static_cast<uint32_t>(pred_shape._dims[2] - (node->window()->horizontal() - 1));
 
-    int32_t proposed_height = ceil_div(padded_h, node->stride()->vertical());
-    int32_t proposed_width = ceil_div(padded_w, node->stride()->horizontal());
+      int32_t proposed_height = ceil_div(padded_h, node->stride()->vertical());
+      int32_t proposed_width = ceil_div(padded_w, node->stride()->horizontal());
 
-    shape._dims[1] = pred_shape._dims[1] == -1 ? -1 : proposed_height;
-    shape._dims[2] = pred_shape._dims[2] == -1 ? -1 : proposed_width;
-    break;
-  }
-  default:
-    assert(false && "unknown padding type");
+      shape._dims[1] = pred_shape._dims[1] == -1 ? -1 : proposed_height;
+      shape._dims[2] = pred_shape._dims[2] == -1 ? -1 : proposed_width;
+      break;
+    }
+    default:
+      assert(false && "unknown padding type");
   }
   return shape;
 }
@@ -234,32 +235,33 @@ ShapeDescription getOpResultShape(loco::AvgPool2D *node, SerializedModelData &gd
   tflite::Padding padding = getOpPadding(node->pad());
   switch (padding)
   {
-  case tflite::Padding_SAME:
-  {
-    auto height = static_cast<uint32_t>(ifm_shape._dims[1]);
-    auto width = static_cast<uint32_t>(ifm_shape._dims[2]);
+    case tflite::Padding_SAME:
+    {
+      auto height = static_cast<uint32_t>(ifm_shape._dims[1]);
+      auto width = static_cast<uint32_t>(ifm_shape._dims[2]);
 
-    int32_t proposed_res_height = ceil_div(height, node->stride()->vertical());
-    int32_t proposed_res_width = ceil_div(width, node->stride()->horizontal());
+      int32_t proposed_res_height = ceil_div(height, node->stride()->vertical());
+      int32_t proposed_res_width = ceil_div(width, node->stride()->horizontal());
 
-    shape._dims[1] = ifm_shape._dims[1] == -1 ? -1 : proposed_res_height;
-    shape._dims[2] = ifm_shape._dims[2] == -1 ? -1 : proposed_res_width;
-    break;
-  }
-  case tflite::Padding_VALID:
-  {
-    auto padded_h = static_cast<uint32_t>(ifm_shape._dims[1] - (node->window()->vertical() - 1));
-    auto padded_w = static_cast<uint32_t>(ifm_shape._dims[2] - (node->window()->horizontal() - 1));
+      shape._dims[1] = ifm_shape._dims[1] == -1 ? -1 : proposed_res_height;
+      shape._dims[2] = ifm_shape._dims[2] == -1 ? -1 : proposed_res_width;
+      break;
+    }
+    case tflite::Padding_VALID:
+    {
+      auto padded_h = static_cast<uint32_t>(ifm_shape._dims[1] - (node->window()->vertical() - 1));
+      auto padded_w =
+          static_cast<uint32_t>(ifm_shape._dims[2] - (node->window()->horizontal() - 1));
 
-    int32_t proposed_height = ceil_div(padded_h, node->stride()->vertical());
-    int32_t proposed_width = ceil_div(padded_w, node->stride()->horizontal());
+      int32_t proposed_height = ceil_div(padded_h, node->stride()->vertical());
+      int32_t proposed_width = ceil_div(padded_w, node->stride()->horizontal());
 
-    shape._dims[1] = ifm_shape._dims[1] == -1 ? -1 : proposed_height;
-    shape._dims[2] = ifm_shape._dims[2] == -1 ? -1 : proposed_width;
-    break;
-  }
-  default:
-    assert(false && "unknown padding type");
+      shape._dims[1] = ifm_shape._dims[1] == -1 ? -1 : proposed_height;
+      shape._dims[2] = ifm_shape._dims[2] == -1 ? -1 : proposed_width;
+      break;
+    }
+    default:
+      assert(false && "unknown padding type");
   }
   return shape;
 }
@@ -291,32 +293,32 @@ ShapeDescription getOpResultShape(loco::Conv2D *node, SerializedModelData &gd)
   tflite::Padding padding = getOpPadding(node->pad());
   switch (padding)
   {
-  case tflite::Padding_SAME:
-  {
-    auto height = static_cast<uint32_t>(ifm_shape._dims[1]);
-    auto width = static_cast<uint32_t>(ifm_shape._dims[2]);
+    case tflite::Padding_SAME:
+    {
+      auto height = static_cast<uint32_t>(ifm_shape._dims[1]);
+      auto width = static_cast<uint32_t>(ifm_shape._dims[2]);
 
-    int32_t proposed_res_height = ceil_div(height, node->stride()->vertical());
-    int32_t proposed_res_width = ceil_div(width, node->stride()->horizontal());
+      int32_t proposed_res_height = ceil_div(height, node->stride()->vertical());
+      int32_t proposed_res_width = ceil_div(width, node->stride()->horizontal());
 
-    shape._dims[1] = ifm_shape._dims[1] == -1 ? -1 : proposed_res_height;
-    shape._dims[2] = ifm_shape._dims[2] == -1 ? -1 : proposed_res_width;
-    break;
-  }
-  case tflite::Padding_VALID:
-  {
-    auto padded_h = static_cast<uint32_t>(ifm_shape._dims[1] - (ker_shape._dims[1] - 1));
-    auto padded_w = static_cast<uint32_t>(ifm_shape._dims[2] - (ker_shape._dims[2] - 1));
+      shape._dims[1] = ifm_shape._dims[1] == -1 ? -1 : proposed_res_height;
+      shape._dims[2] = ifm_shape._dims[2] == -1 ? -1 : proposed_res_width;
+      break;
+    }
+    case tflite::Padding_VALID:
+    {
+      auto padded_h = static_cast<uint32_t>(ifm_shape._dims[1] - (ker_shape._dims[1] - 1));
+      auto padded_w = static_cast<uint32_t>(ifm_shape._dims[2] - (ker_shape._dims[2] - 1));
 
-    int32_t proposed_height = ceil_div(padded_h, node->stride()->vertical());
-    int32_t proposed_width = ceil_div(padded_w, node->stride()->horizontal());
+      int32_t proposed_height = ceil_div(padded_h, node->stride()->vertical());
+      int32_t proposed_width = ceil_div(padded_w, node->stride()->horizontal());
 
-    shape._dims[1] = ifm_shape._dims[1] == -1 ? -1 : proposed_height;
-    shape._dims[2] = ifm_shape._dims[2] == -1 ? -1 : proposed_width;
-    break;
-  }
-  default:
-    assert(false && "unknown padding type");
+      shape._dims[1] = ifm_shape._dims[1] == -1 ? -1 : proposed_height;
+      shape._dims[2] = ifm_shape._dims[2] == -1 ? -1 : proposed_width;
+      break;
+    }
+    default:
+      assert(false && "unknown padding type");
   }
   return shape;
 }
index bb8c8b7..73aa4ca 100644 (file)
@@ -35,8 +35,8 @@ template <typename T> T CanonicalNode::accept(CanonicalNodeVisitorBase<T> *v) co
 
 #include "CanonicalNodes.lst"
 #undef CANONICAL_NODE
-  default:
-    break;
+    default:
+      break;
   }
 
   throw std::runtime_error{"NYI"};
@@ -52,8 +52,8 @@ template <typename T> T CanonicalNode::accept(CanonicalNodeMutableVisitorBase<T>
 
 #include "CanonicalNodes.lst"
 #undef CANONICAL_NODE
-  default:
-    break;
+    default:
+      break;
   }
 
   throw std::runtime_error{"NYI"};
index e91d5e7..7d57eed 100644 (file)
@@ -34,16 +34,16 @@ inline bool valid(const FeatureAxis &axis)
 {
   switch (axis)
   {
-  case FeatureAxis::Count:
-    return true;
-  case FeatureAxis::Depth:
-    return true;
-  case FeatureAxis::Height:
-    return true;
-  case FeatureAxis::Width:
-    return true;
-  default:
-    break;
+    case FeatureAxis::Count:
+      return true;
+    case FeatureAxis::Depth:
+      return true;
+    case FeatureAxis::Height:
+      return true;
+    case FeatureAxis::Width:
+      return true;
+    default:
+      break;
   }
 
   return false;
@@ -198,16 +198,16 @@ inline bool valid(const FilterAxis &axis)
 {
   switch (axis)
   {
-  case FilterAxis::Count:
-    return true;
-  case FilterAxis::Depth:
-    return true;
-  case FilterAxis::Height:
-    return true;
-  case FilterAxis::Width:
-    return true;
-  default:
-    break;
+    case FilterAxis::Count:
+      return true;
+    case FilterAxis::Depth:
+      return true;
+    case FilterAxis::Height:
+      return true;
+    case FilterAxis::Width:
+      return true;
+    default:
+      break;
   }
 
   return false;
@@ -317,16 +317,16 @@ inline bool valid(const DepthwiseFilterAxis &axis)
 {
   switch (axis)
   {
-  case DepthwiseFilterAxis::Depth:
-    return true;
-  case DepthwiseFilterAxis::Multiplier:
-    return true;
-  case DepthwiseFilterAxis::Height:
-    return true;
-  case DepthwiseFilterAxis::Width:
-    return true;
-  default:
-    break;
+    case DepthwiseFilterAxis::Depth:
+      return true;
+    case DepthwiseFilterAxis::Multiplier:
+      return true;
+    case DepthwiseFilterAxis::Height:
+      return true;
+    case DepthwiseFilterAxis::Width:
+      return true;
+    default:
+      break;
   }
 
   return false;
index 33d9f37..7dcaed8 100644 (file)
@@ -157,17 +157,17 @@ void NodeExecution::execute(loco::AvgPool2D *avgpool2d)
 
   switch (ifm_data->dtype())
   {
-  case loco::DataType::FLOAT32:
-  {
-    auto ifm_buf = ifm_data->as_f32_bufptr();
+    case loco::DataType::FLOAT32:
+    {
+      auto ifm_buf = ifm_data->as_f32_bufptr();
 
-    auto avgpool2d_buf = avgPool2D<float>(avgpool2d, ifm_buf);
+      auto avgpool2d_buf = avgPool2D<float>(avgpool2d, ifm_buf);
 
-    avgpool2d_data = make_data(avgpool2d_buf);
-    break;
-  }
-  default:
-    throw std::runtime_error("NYI for this DataType");
+      avgpool2d_data = make_data(avgpool2d_buf);
+      break;
+    }
+    default:
+      throw std::runtime_error("NYI for this DataType");
   }
 
   assert(avgpool2d_data != nullptr);
index eb19a8b..0e6de51 100644 (file)
@@ -53,26 +53,26 @@ void NodeExecution::execute(loco::BiasAdd<loco::Domain::Tensor> *bias_add)
 
   switch (input_data->dtype())
   {
-  case loco::DataType::FLOAT32:
-  {
-    auto input_bufptr = input_data->as_f32_bufptr();
-    auto bias_bufptr = bias_data->as_f32_bufptr();
-    auto bias_add_buf = make_buffer<float, LexicalLayout>(*input_data->shape());
+    case loco::DataType::FLOAT32:
+    {
+      auto input_bufptr = input_data->as_f32_bufptr();
+      auto bias_bufptr = bias_data->as_f32_bufptr();
+      auto bias_add_buf = make_buffer<float, LexicalLayout>(*input_data->shape());
 
-    auto *shape = input_data->shape();
+      auto *shape = input_data->shape();
 
-    for (IndexEnumerator e{*shape}; e.valid(); e.advance())
-    {
-      const auto &index = e.current();
-      nncc::core::ADT::tensor::Index bias_index({index.at(axis)});
-      bias_add_buf.at(index) = input_bufptr->at(index) + bias_bufptr->at(bias_index);
-    }
+      for (IndexEnumerator e{*shape}; e.valid(); e.advance())
+      {
+        const auto &index = e.current();
+        nncc::core::ADT::tensor::Index bias_index({index.at(axis)});
+        bias_add_buf.at(index) = input_bufptr->at(index) + bias_bufptr->at(bias_index);
+      }
 
-    bias_add_data = make_data(bias_add_buf);
-    break;
-  }
-  default:
-    throw std::runtime_error("NYI for this DataType");
+      bias_add_data = make_data(bias_add_buf);
+      break;
+    }
+    default:
+      throw std::runtime_error("NYI for this DataType");
   }
 
   assert(bias_add_data != nullptr);
index ee23e99..4df05ce 100644 (file)
@@ -39,20 +39,20 @@ void NodeExecution::execute(loco::BiasEncode *bias_enc)
 
   switch (input_data->dtype())
   {
-  case loco::DataType::S32:
-  {
-    auto input_bufptr = input_data->as_s32_bufptr();
-    bias_enc_data = make_data(*input_bufptr);
-    break;
-  }
-  case loco::DataType::FLOAT32:
-  {
-    auto input_bufptr = input_data->as_f32_bufptr();
-    bias_enc_data = make_data(*input_bufptr);
-    break;
-  }
-  default:
-    throw std::runtime_error("NYI for this DataType");
+    case loco::DataType::S32:
+    {
+      auto input_bufptr = input_data->as_s32_bufptr();
+      bias_enc_data = make_data(*input_bufptr);
+      break;
+    }
+    case loco::DataType::FLOAT32:
+    {
+      auto input_bufptr = input_data->as_f32_bufptr();
+      bias_enc_data = make_data(*input_bufptr);
+      break;
+    }
+    default:
+      throw std::runtime_error("NYI for this DataType");
   }
 
   assert(bias_enc_data != nullptr);
index aebf822..e62a2d8 100644 (file)
@@ -72,40 +72,40 @@ void NodeExecution::execute(loco::ConstGen *constgen)
 
   switch (constgen->dtype())
   {
-  case loco::DataType::S32:
-  {
-    assert(volume == constgen->size<loco::DataType::S32>());
+    case loco::DataType::S32:
+    {
+      assert(volume == constgen->size<loco::DataType::S32>());
 
-    auto buf = make_buffer<int32_t, LexicalLayout>(shape);
+      auto buf = make_buffer<int32_t, LexicalLayout>(shape);
 
-    for (IndexEnumerator e{shape}; e.valid(); e.advance())
-    {
-      const auto &index = e.current();
-      uint32_t offset = ::offset_by_index(shape, index);
-      buf.at(index) = constgen->at<loco::DataType::S32>(offset);
+      for (IndexEnumerator e{shape}; e.valid(); e.advance())
+      {
+        const auto &index = e.current();
+        uint32_t offset = ::offset_by_index(shape, index);
+        buf.at(index) = constgen->at<loco::DataType::S32>(offset);
+      }
+
+      data = locomotiv::make_data(buf);
+      break;
     }
+    case loco::DataType::FLOAT32:
+    {
+      assert(volume == constgen->size<loco::DataType::FLOAT32>());
 
-    data = locomotiv::make_data(buf);
-    break;
-  }
-  case loco::DataType::FLOAT32:
-  {
-    assert(volume == constgen->size<loco::DataType::FLOAT32>());
+      auto buf = make_buffer<float, LexicalLayout>(shape);
 
-    auto buf = make_buffer<float, LexicalLayout>(shape);
+      for (IndexEnumerator e{shape}; e.valid(); e.advance())
+      {
+        const auto &index = e.current();
+        uint32_t offset = ::offset_by_index(shape, index);
+        buf.at(index) = constgen->at<loco::DataType::FLOAT32>(offset);
+      }
 
-    for (IndexEnumerator e{shape}; e.valid(); e.advance())
-    {
-      const auto &index = e.current();
-      uint32_t offset = ::offset_by_index(shape, index);
-      buf.at(index) = constgen->at<loco::DataType::FLOAT32>(offset);
+      data = locomotiv::make_data(buf);
+      break;
     }
-
-    data = locomotiv::make_data(buf);
-    break;
-  }
-  default:
-    throw std::runtime_error("NYI for this DataType");
+    default:
+      throw std::runtime_error("NYI for this DataType");
   }
 
   assert(data != nullptr);
index d462744..30e3908 100644 (file)
@@ -91,14 +91,14 @@ void NodeExecution::execute(loco::DepthwiseFilterEncode *enc)
 
   switch (input_data->dtype())
   {
-  case loco::DataType::FLOAT32:
-  {
-    auto input_buf = input_data->as_f32_bufptr();
-    enc_data = dw_filter_encode<float>(enc, input_buf);
-    break;
-  }
-  default:
-    throw std::runtime_error("NYI for this DataType");
+    case loco::DataType::FLOAT32:
+    {
+      auto input_buf = input_data->as_f32_bufptr();
+      enc_data = dw_filter_encode<float>(enc, input_buf);
+      break;
+    }
+    default:
+      throw std::runtime_error("NYI for this DataType");
   }
 
   assert(enc_data != nullptr);
index 8f1a178..ee1b808 100644 (file)
@@ -88,20 +88,20 @@ void NodeExecution::execute(loco::FeatureDecode *dec)
 
   switch (input_data->dtype())
   {
-  case loco::DataType::S32:
-  {
-    auto input_buf = input_data->as_s32_bufptr();
-    dec_data = feature_decode<int32_t>(dec, input_buf);
-    break;
-  }
-  case loco::DataType::FLOAT32:
-  {
-    auto input_buf = input_data->as_f32_bufptr();
-    dec_data = feature_decode<float>(dec, input_buf);
-    break;
-  }
-  default:
-    throw std::runtime_error("NYI for this DataType");
+    case loco::DataType::S32:
+    {
+      auto input_buf = input_data->as_s32_bufptr();
+      dec_data = feature_decode<int32_t>(dec, input_buf);
+      break;
+    }
+    case loco::DataType::FLOAT32:
+    {
+      auto input_buf = input_data->as_f32_bufptr();
+      dec_data = feature_decode<float>(dec, input_buf);
+      break;
+    }
+    default:
+      throw std::runtime_error("NYI for this DataType");
   }
 
   assert(dec_data != nullptr);
index aa1a52f..3d7db6c 100644 (file)
@@ -90,20 +90,20 @@ void NodeExecution::execute(loco::FeatureEncode *enc)
 
   switch (input_data->dtype())
   {
-  case loco::DataType::S32:
-  {
-    auto input_buf = input_data->as_s32_bufptr();
-    enc_data = feature_encode<int32_t>(enc, input_buf);
-    break;
-  }
-  case loco::DataType::FLOAT32:
-  {
-    auto input_buf = input_data->as_f32_bufptr();
-    enc_data = feature_encode<float>(enc, input_buf);
-    break;
-  }
-  default:
-    throw std::runtime_error("NYI for this DataType");
+    case loco::DataType::S32:
+    {
+      auto input_buf = input_data->as_s32_bufptr();
+      enc_data = feature_encode<int32_t>(enc, input_buf);
+      break;
+    }
+    case loco::DataType::FLOAT32:
+    {
+      auto input_buf = input_data->as_f32_bufptr();
+      enc_data = feature_encode<float>(enc, input_buf);
+      break;
+    }
+    default:
+      throw std::runtime_error("NYI for this DataType");
   }
 
   assert(enc_data != nullptr);
index 686c7ae..d34a4ff 100644 (file)
@@ -90,20 +90,20 @@ void NodeExecution::execute(loco::FilterEncode *enc)
 
   switch (input_data->dtype())
   {
-  case loco::DataType::S32:
-  {
-    auto input_buf = input_data->as_s32_bufptr();
-    enc_data = filter_encode<int32_t>(enc, input_buf);
-    break;
-  }
-  case loco::DataType::FLOAT32:
-  {
-    auto input_buf = input_data->as_f32_bufptr();
-    enc_data = filter_encode<float>(enc, input_buf);
-    break;
-  }
-  default:
-    throw std::runtime_error("NYI for this DataType");
+    case loco::DataType::S32:
+    {
+      auto input_buf = input_data->as_s32_bufptr();
+      enc_data = filter_encode<int32_t>(enc, input_buf);
+      break;
+    }
+    case loco::DataType::FLOAT32:
+    {
+      auto input_buf = input_data->as_f32_bufptr();
+      enc_data = filter_encode<float>(enc, input_buf);
+      break;
+    }
+    default:
+      throw std::runtime_error("NYI for this DataType");
   }
 
   assert(enc_data != nullptr);
index 7de2f5f..e7fe9b8 100644 (file)
@@ -38,20 +38,20 @@ void NodeExecution::execute(loco::Forward *forward)
 
   switch (input_data->dtype())
   {
-  case loco::DataType::S32:
-  {
-    auto input_bufptr = input_data->as_s32_bufptr();
-    forward_data = make_data(*input_bufptr);
-    break;
-  }
-  case loco::DataType::FLOAT32:
-  {
-    auto input_bufptr = input_data->as_f32_bufptr();
-    forward_data = make_data(*input_bufptr);
-    break;
-  }
-  default:
-    throw std::runtime_error("NYI for this DataType");
+    case loco::DataType::S32:
+    {
+      auto input_bufptr = input_data->as_s32_bufptr();
+      forward_data = make_data(*input_bufptr);
+      break;
+    }
+    case loco::DataType::FLOAT32:
+    {
+      auto input_bufptr = input_data->as_f32_bufptr();
+      forward_data = make_data(*input_bufptr);
+      break;
+    }
+    default:
+      throw std::runtime_error("NYI for this DataType");
   }
 
   assert(forward_data != nullptr);
index 98e3dcb..000e5c8 100644 (file)
@@ -145,17 +145,17 @@ void NodeExecution::execute(loco::MaxPool2D *maxpool2d)
 
   switch (ifm_data->dtype())
   {
-  case loco::DataType::FLOAT32:
-  {
-    auto ifm_buf = ifm_data->as_f32_bufptr();
+    case loco::DataType::FLOAT32:
+    {
+      auto ifm_buf = ifm_data->as_f32_bufptr();
 
-    auto maxpool2d_buf = maxPool2D<float>(maxpool2d, ifm_buf);
+      auto maxpool2d_buf = maxPool2D<float>(maxpool2d, ifm_buf);
 
-    maxpool2d_data = make_data(maxpool2d_buf);
-    break;
-  }
-  default:
-    throw std::runtime_error("NYI for this DataType");
+      maxpool2d_data = make_data(maxpool2d_buf);
+      break;
+    }
+    default:
+      throw std::runtime_error("NYI for this DataType");
   }
 
   assert(maxpool2d_data != nullptr);
index 7d23f8d..d96650a 100644 (file)
@@ -37,20 +37,20 @@ void NodeExecution::execute(loco::Push *push)
 
   switch (from_data->dtype())
   {
-  case loco::DataType::S32:
-  {
-    auto from_bufptr = from_data->as_s32_bufptr();
-    push_data = make_data(*from_bufptr);
-    break;
-  }
-  case loco::DataType::FLOAT32:
-  {
-    auto from_bufptr = from_data->as_f32_bufptr();
-    push_data = make_data(*from_bufptr);
-    break;
-  }
-  default:
-    throw std::runtime_error("NYI for this DataType");
+    case loco::DataType::S32:
+    {
+      auto from_bufptr = from_data->as_s32_bufptr();
+      push_data = make_data(*from_bufptr);
+      break;
+    }
+    case loco::DataType::FLOAT32:
+    {
+      auto from_bufptr = from_data->as_f32_bufptr();
+      push_data = make_data(*from_bufptr);
+      break;
+    }
+    default:
+      throw std::runtime_error("NYI for this DataType");
   }
 
   assert(push_data != nullptr);
index 578a7b9..9918967 100644 (file)
@@ -55,23 +55,23 @@ void NodeExecution::execute(loco::ReLU *relu)
 
   switch (input_data->dtype())
   {
-  case loco::DataType::FLOAT32:
-  {
-    auto input_bufptr = input_data->as_f32_bufptr();
-    auto relu_buf = make_buffer<float, LexicalLayout>(*input_data->shape());
-    auto *shape = input_data->shape();
-
-    for (IndexEnumerator e{*shape}; e.valid(); e.advance())
+    case loco::DataType::FLOAT32:
     {
-      const auto &index = e.current();
-      relu_buf.at(index) = relu_ew(input_bufptr->at(index));
+      auto input_bufptr = input_data->as_f32_bufptr();
+      auto relu_buf = make_buffer<float, LexicalLayout>(*input_data->shape());
+      auto *shape = input_data->shape();
+
+      for (IndexEnumerator e{*shape}; e.valid(); e.advance())
+      {
+        const auto &index = e.current();
+        relu_buf.at(index) = relu_ew(input_bufptr->at(index));
+      }
+
+      relu_data = make_data(relu_buf);
+      break;
     }
-
-    relu_data = make_data(relu_buf);
-    break;
-  }
-  default:
-    throw std::runtime_error("NYI for this DataType");
+    default:
+      throw std::runtime_error("NYI for this DataType");
   }
 
   assert(relu_data != nullptr);
index d0ec872..efa641f 100644 (file)
@@ -54,23 +54,23 @@ void NodeExecution::execute(loco::ReLU6 *relu6)
 
   switch (input_data->dtype())
   {
-  case loco::DataType::FLOAT32:
-  {
-    auto input_bufptr = input_data->as_f32_bufptr();
-    auto *shape = input_data->shape();
-    auto relu6_buf = make_buffer<float, LexicalLayout>(*shape);
-
-    for (IndexEnumerator e{*shape}; e.valid(); e.advance())
+    case loco::DataType::FLOAT32:
     {
-      const auto &index = e.current();
-      relu6_buf.at(index) = relu6_ew(input_bufptr->at(index));
+      auto input_bufptr = input_data->as_f32_bufptr();
+      auto *shape = input_data->shape();
+      auto relu6_buf = make_buffer<float, LexicalLayout>(*shape);
+
+      for (IndexEnumerator e{*shape}; e.valid(); e.advance())
+      {
+        const auto &index = e.current();
+        relu6_buf.at(index) = relu6_ew(input_bufptr->at(index));
+      }
+
+      relu6_data = make_data(relu6_buf);
+      break;
     }
-
-    relu6_data = make_data(relu6_buf);
-    break;
-  }
-  default:
-    throw std::runtime_error("NYI for this DataType");
+    default:
+      throw std::runtime_error("NYI for this DataType");
   }
 
   assert(relu6_data != nullptr);
index 686d627..064ed2a 100644 (file)
@@ -51,36 +51,36 @@ void NodeExecution::execute(loco::Reshape<loco::ReshapeType::Fixed> *reshape)
 
   switch (input_data->dtype())
   {
-  case loco::DataType::FLOAT32:
-  {
-    auto input_bufptr = input_data->as_f32_bufptr();
-    auto *input_shape = input_data->shape();
+    case loco::DataType::FLOAT32:
+    {
+      auto input_bufptr = input_data->as_f32_bufptr();
+      auto *input_shape = input_data->shape();
 
-    using Shape = nncc::core::ADT::tensor::Shape;
-    std::unique_ptr<Shape> output_shape(new Shape());
+      using Shape = nncc::core::ADT::tensor::Shape;
+      std::unique_ptr<Shape> output_shape(new Shape());
 
-    output_shape->resize(reshape->rank());
-    for (uint32_t axis = 0; axis < output_shape->rank(); ++axis)
-    {
-      output_shape->dim(axis) = reshape->dim(axis).value();
-    }
+      output_shape->resize(reshape->rank());
+      for (uint32_t axis = 0; axis < output_shape->rank(); ++axis)
+      {
+        output_shape->dim(axis) = reshape->dim(axis).value();
+      }
 
-    auto reshape_bufptr = make_buffer<float, LexicalLayout>(*output_shape);
+      auto reshape_bufptr = make_buffer<float, LexicalLayout>(*output_shape);
 
-    float *input_ptr = const_cast<float *>(input_bufptr->base());
-    uint64_t input_len = num_elements(*input_shape) * sizeof(float);
+      float *input_ptr = const_cast<float *>(input_bufptr->base());
+      uint64_t input_len = num_elements(*input_shape) * sizeof(float);
 
-    float *output_ptr = reshape_bufptr.base();
-    uint64_t output_len = num_elements(*output_shape) * sizeof(float);
+      float *output_ptr = reshape_bufptr.base();
+      uint64_t output_len = num_elements(*output_shape) * sizeof(float);
 
-    assert(input_len == output_len);
-    memcpy(output_ptr, input_ptr, input_len);
+      assert(input_len == output_len);
+      memcpy(output_ptr, input_ptr, input_len);
 
-    reshape_data = make_data(reshape_bufptr);
-    break;
-  }
-  default:
-    throw std::runtime_error("NYI for this DataType");
+      reshape_data = make_data(reshape_bufptr);
+      break;
+    }
+    default:
+      throw std::runtime_error("NYI for this DataType");
   }
 
   assert(reshape_data != nullptr);
index 12770ef..6e0a09a 100644 (file)
@@ -74,35 +74,35 @@ void NodeExecution::execute(loco::TensorConcat *tensor_concat)
   std::unique_ptr<NodeData> concat_data = nullptr;
   switch (lhs_data->dtype())
   {
-  case loco::DataType::FLOAT32:
-  {
-    auto lhs_bufptr = lhs_data->as_f32_bufptr();
-    auto rhs_bufptr = rhs_data->as_f32_bufptr();
-    auto concat_buf = make_buffer<float, LexicalLayout>(concat_shape);
-
-    for (IndexEnumerator e{concat_shape}; e.valid(); e.advance())
+    case loco::DataType::FLOAT32:
     {
-      const auto &e_index = e.current();
+      auto lhs_bufptr = lhs_data->as_f32_bufptr();
+      auto rhs_bufptr = rhs_data->as_f32_bufptr();
+      auto concat_buf = make_buffer<float, LexicalLayout>(concat_shape);
 
-      if (e_index.at(axis) < left_dim_size)
-      {
-        // Left index is same as output index
-        concat_buf.at(e_index) = lhs_bufptr->at(e_index);
-      }
-      else
+      for (IndexEnumerator e{concat_shape}; e.valid(); e.advance())
       {
-        // Adjust right index to valid range
-        Index r_index = e_index;
-        r_index.at(axis) -= left_dim_size;
-        concat_buf.at(e_index) = rhs_bufptr->at(r_index);
+        const auto &e_index = e.current();
+
+        if (e_index.at(axis) < left_dim_size)
+        {
+          // Left index is same as output index
+          concat_buf.at(e_index) = lhs_bufptr->at(e_index);
+        }
+        else
+        {
+          // Adjust right index to valid range
+          Index r_index = e_index;
+          r_index.at(axis) -= left_dim_size;
+          concat_buf.at(e_index) = rhs_bufptr->at(r_index);
+        }
       }
-    }
 
-    concat_data = make_data(concat_buf);
-    break;
-  }
-  default:
-    throw std::runtime_error("NYI for this DataType");
+      concat_data = make_data(concat_buf);
+      break;
+    }
+    default:
+      throw std::runtime_error("NYI for this DataType");
   }
 
   assert(concat_data != nullptr);
index ff31301..ef9ab4a 100644 (file)
@@ -109,8 +109,8 @@ std::string opname(const loco::Node *node)
     return "canonical." #OPCODE;
 #include "loco/IR/CanonicalNodes.lst"
 #undef CANONICAL_NODE
-    default:
-      break;
+      default:
+        break;
     };
 
     return "canonical."
index 1638cd2..f73588b 100644 (file)
@@ -72,28 +72,28 @@ bool canonicalize_const(loco::Graph *graph, moco::tf::TFConst *node)
 
   switch (dtype)
   {
-  case loco::DataType::S32:
-  {
-    uint32_t input_elements = node->size<loco::DataType::S32>();
-    const_node->size<loco::DataType::S32>(input_elements);
-    for (uint32_t i = 0; i < input_elements; ++i)
+    case loco::DataType::S32:
     {
-      const_node->at<loco::DataType::S32>(i) = node->at<loco::DataType::S32>(i);
+      uint32_t input_elements = node->size<loco::DataType::S32>();
+      const_node->size<loco::DataType::S32>(input_elements);
+      for (uint32_t i = 0; i < input_elements; ++i)
+      {
+        const_node->at<loco::DataType::S32>(i) = node->at<loco::DataType::S32>(i);
+      }
+      break;
     }
-    break;
-  }
-  case loco::DataType::FLOAT32:
-  {
-    uint32_t input_elements = node->size<loco::DataType::FLOAT32>();
-    const_node->size<loco::DataType::FLOAT32>(input_elements);
-    for (uint32_t i = 0; i < input_elements; ++i)
+    case loco::DataType::FLOAT32:
     {
-      const_node->at<loco::DataType::FLOAT32>(i) = node->at<loco::DataType::FLOAT32>(i);
+      uint32_t input_elements = node->size<loco::DataType::FLOAT32>();
+      const_node->size<loco::DataType::FLOAT32>(input_elements);
+      for (uint32_t i = 0; i < input_elements; ++i)
+      {
+        const_node->at<loco::DataType::FLOAT32>(i) = node->at<loco::DataType::FLOAT32>(i);
+      }
+      break;
     }
-    break;
-  }
-  default:
-    throw std::runtime_error("NYI for this DataType");
+    default:
+      throw std::runtime_error("NYI for this DataType");
   }
 
   // update graph
index 0e05a1a..d0713fa 100644 (file)
@@ -122,20 +122,20 @@ loco::DataType as_loco_datatype(const tensorflow::DataType dtype)
 {
   switch (dtype)
   {
-  case tensorflow::DT_UINT8:
-    return loco::DataType::U8;
-  case tensorflow::DT_FLOAT:
-    return loco::DataType::FLOAT32;
-  case tensorflow::DT_BOOL:
-    return loco::DataType::U8;
-  case tensorflow::DT_INT32:
-    return loco::DataType::S32;
-  case tensorflow::DT_INT64:
-    return loco::DataType::S64;
-  case tensorflow::DT_STRING:
-  case tensorflow::DT_COMPLEX64:
-  default:
-    break;
+    case tensorflow::DT_UINT8:
+      return loco::DataType::U8;
+    case tensorflow::DT_FLOAT:
+      return loco::DataType::FLOAT32;
+    case tensorflow::DT_BOOL:
+      return loco::DataType::U8;
+    case tensorflow::DT_INT32:
+      return loco::DataType::S32;
+    case tensorflow::DT_INT64:
+      return loco::DataType::S64;
+    case tensorflow::DT_STRING:
+    case tensorflow::DT_COMPLEX64:
+    default:
+      break;
   }
   throw std::runtime_error{"Unsupported tensorflow dtype: " + tensorflow::DataType_Name(dtype)};
 }
index 839a039..39e8830 100644 (file)
@@ -37,8 +37,8 @@ template <typename T> T TFNode::accept(TFNodeVisitorBase<T> *v) const
 
 #include "TFNodes.lst"
 #undef TENSORFLOW_NODE
-  default:
-    break;
+    default:
+      break;
   }
 
   throw std::runtime_error{"NYI"};
@@ -54,8 +54,8 @@ template <typename T> T TFNode::accept(TFNodeMutableVisitorBase<T> *v)
 
 #include "TFNodes.lst"
 #undef TENSORFLOW_NODE
-  default:
-    break;
+    default:
+      break;
   }
 
   throw std::runtime_error{"NYI"};
index 5952c00..f95d75d 100644 (file)
@@ -111,40 +111,40 @@ std::unique_ptr<NodeShape> node_shape(loco::Node *node)
   {
     switch (shapedata->domain())
     {
-    case loco::Domain::Tensor:
-    {
-      loco::TensorShape shape = shapedata->tensor_shape();
-      std::unique_ptr<NodeShape> node_shape = stdex::make_unique<NodeShape>(shape);
-      return std::move(node_shape);
-    }
-    break;
-
-    case loco::Domain::Feature:
-    {
-      loco::FeatureShape shape = shapedata->feature_shape();
-      std::unique_ptr<NodeShape> node_shape = stdex::make_unique<NodeShape>(shape);
-      return std::move(node_shape);
-    }
-    break;
-
-    case loco::Domain::Filter:
-    {
-      loco::FilterShape shape = shapedata->filter_shape();
-      std::unique_ptr<NodeShape> node_shape = stdex::make_unique<NodeShape>(shape);
-      return std::move(node_shape);
-    }
-    break;
-
-    case loco::Domain::Bias:
-    {
-      loco_tobe::BiasShape shape = shapedata->bias_shape();
-      std::unique_ptr<NodeShape> node_shape = stdex::make_unique<NodeShape>(shape);
-      return std::move(node_shape);
-    }
-    break;
-
-    default:
-      throw std::runtime_error("Not supported loco::Domain");
+      case loco::Domain::Tensor:
+      {
+        loco::TensorShape shape = shapedata->tensor_shape();
+        std::unique_ptr<NodeShape> node_shape = stdex::make_unique<NodeShape>(shape);
+        return std::move(node_shape);
+      }
+      break;
+
+      case loco::Domain::Feature:
+      {
+        loco::FeatureShape shape = shapedata->feature_shape();
+        std::unique_ptr<NodeShape> node_shape = stdex::make_unique<NodeShape>(shape);
+        return std::move(node_shape);
+      }
+      break;
+
+      case loco::Domain::Filter:
+      {
+        loco::FilterShape shape = shapedata->filter_shape();
+        std::unique_ptr<NodeShape> node_shape = stdex::make_unique<NodeShape>(shape);
+        return std::move(node_shape);
+      }
+      break;
+
+      case loco::Domain::Bias:
+      {
+        loco_tobe::BiasShape shape = shapedata->bias_shape();
+        std::unique_ptr<NodeShape> node_shape = stdex::make_unique<NodeShape>(shape);
+        return std::move(node_shape);
+      }
+      break;
+
+      default:
+        throw std::runtime_error("Not supported loco::Domain");
     }
   }
 
index ebe68ec..1cc677f 100644 (file)
@@ -263,18 +263,18 @@ void ConstGraphBuilderImpl<ImportTarget::Canonical>::build(const tensorflow::Nod
 
   switch (dtype)
   {
-  case loco::DataType::S32:
-    read_value_int32(const_node, num_elements, input_tensor);
-    break;
+    case loco::DataType::S32:
+      read_value_int32(const_node, num_elements, input_tensor);
+      break;
 
-  case loco::DataType::FLOAT32:
-    read_value_float32(const_node, num_elements, input_tensor);
-    break;
+    case loco::DataType::FLOAT32:
+      read_value_float32(const_node, num_elements, input_tensor);
+      break;
 
-  // TODO support other types
+    // TODO support other types
 
-  default:
-    throw std::runtime_error{"Error: Unsupported data type for " + node.name()};
+    default:
+      throw std::runtime_error{"Error: Unsupported data type for " + node.name()};
   }
 
   // register string-name to node
@@ -332,18 +332,18 @@ void ConstGraphBuilderImpl<ImportTarget::TensorFlow>::build(const tensorflow::No
 
   switch (dtype)
   {
-  case loco::DataType::S32:
-    read_value_int32(const_node, num_elements, input_tensor);
-    break;
+    case loco::DataType::S32:
+      read_value_int32(const_node, num_elements, input_tensor);
+      break;
 
-  case loco::DataType::FLOAT32:
-    read_value_float32(const_node, num_elements, input_tensor);
-    break;
+    case loco::DataType::FLOAT32:
+      read_value_float32(const_node, num_elements, input_tensor);
+      break;
 
-  // TODO support other types
+    // TODO support other types
 
-  default:
-    throw std::runtime_error{"Error: Unsupported data type for " + node.name()};
+    default:
+      throw std::runtime_error{"Error: Unsupported data type for " + node.name()};
   }
 
   // register string-name to node
index 2730ef5..18cc510 100644 (file)
@@ -117,14 +117,14 @@ bool TFNodeSummaryBuilder::summary(const TFConst *node, locop::NodeSummary &s) c
   auto dtype = node->dtype();
   switch (dtype)
   {
-  case loco::DataType::S32:
-    ss << node->size<loco::DataType::S32>();
-    break;
-  case loco::DataType::FLOAT32:
-    ss << node->size<loco::DataType::FLOAT32>();
-    break;
-  default:
-    throw std::runtime_error("NYI for this DataType");
+    case loco::DataType::S32:
+      ss << node->size<loco::DataType::S32>();
+      break;
+    case loco::DataType::FLOAT32:
+      ss << node->size<loco::DataType::FLOAT32>();
+      break;
+    default:
+      throw std::runtime_error("NYI for this DataType");
   }
   s.args().append("size", ss.str());
   s.state(locop::NodeSummary::State::PartiallyKnown);
index 21ca6e5..2bc5f26 100644 (file)
@@ -55,14 +55,14 @@ const std::string &LinearDocument::line(uint32_t n) const
 {
   switch (_direction)
   {
-  case Direction::Forward:
-  {
-    return _lines.at(n);
-  }
-  case Direction::Reverse:
-  {
-    return _lines.at(lines() - n - 1);
-  }
+    case Direction::Forward:
+    {
+      return _lines.at(n);
+    }
+    case Direction::Reverse:
+    {
+      return _lines.at(lines() - n - 1);
+    }
   }
 
   throw std::runtime_error{"unreachable"};
index d390af4..a6dba61 100644 (file)
@@ -103,16 +103,16 @@ int main(int argc, char **argv)
     {
       switch (info->kind())
       {
-      case nnkit::support::tftestinfo::ParsedTensor::Kind::Input:
-        sig.add_input(moco::tf::TensorName{info->name()});
-        break;
+        case nnkit::support::tftestinfo::ParsedTensor::Kind::Input:
+          sig.add_input(moco::tf::TensorName{info->name()});
+          break;
 
-      case nnkit::support::tftestinfo::ParsedTensor::Kind::Output:
-        sig.add_output(moco::tf::TensorName{info->name()});
-        break;
+        case nnkit::support::tftestinfo::ParsedTensor::Kind::Output:
+          sig.add_output(moco::tf::TensorName{info->name()});
+          break;
 
-      default:
-        throw std::runtime_error{"Unknown kind"};
+        default:
+          throw std::runtime_error{"Unknown kind"};
       }
     }
   }
index 76ad0f1..36bf5a7 100644 (file)
@@ -127,14 +127,14 @@ void pack(tensorflow::GraphDef &graph_def)
 
       switch (dtype)
       {
-      case tensorflow::DT_FLOAT:
-        pack<float>(tensor);
-        break;
-      case tensorflow::DT_INT32:
-        pack<int32_t>(tensor);
-        break;
-      default:
-        throw std::runtime_error{"Unsupported dtype"};
+        case tensorflow::DT_FLOAT:
+          pack<float>(tensor);
+          break;
+        case tensorflow::DT_INT32:
+          pack<int32_t>(tensor);
+          break;
+        default:
+          throw std::runtime_error{"Unsupported dtype"};
       }
     }
   }
index 847f3db..46253ee 100644 (file)
@@ -118,14 +118,14 @@ void unpack(tensorflow::GraphDef &graph_def)
 
       switch (dtype)
       {
-      case tensorflow::DT_FLOAT:
-        unpack<float>(tensor);
-        break;
-      case tensorflow::DT_INT32:
-        unpack<int32_t>(tensor);
-        break;
-      default:
-        throw std::runtime_error{"Unsupported dtype"};
+        case tensorflow::DT_FLOAT:
+          unpack<float>(tensor);
+          break;
+        case tensorflow::DT_INT32:
+          unpack<int32_t>(tensor);
+          break;
+        default:
+          throw std::runtime_error{"Unsupported dtype"};
       }
     }
   }
index 554eb36..8e4f1d3 100644 (file)
@@ -22,12 +22,12 @@ tflite::Padding as_tflite_padding(const tflchef::Padding &value)
 {
   switch (value)
   {
-  case tflchef::SAME:
-    return tflite::Padding_SAME;
-  case tflchef::VALID:
-    return tflite::Padding_VALID;
-  default:
-    break;
+    case tflchef::SAME:
+      return tflite::Padding_SAME;
+    case tflchef::VALID:
+      return tflite::Padding_VALID;
+    default:
+      break;
   }
 
   throw std::runtime_error{"Unknown padding value"};
@@ -37,14 +37,14 @@ tflite::ActivationFunctionType as_tflite_activation(const tflchef::Activation &v
 {
   switch (value)
   {
-  case tflchef::NONE:
-    return tflite::ActivationFunctionType_NONE;
-  case tflchef::RELU:
-    return tflite::ActivationFunctionType_RELU;
-  case tflchef::RELU6:
-    return tflite::ActivationFunctionType_RELU6;
-  default:
-    break;
+    case tflchef::NONE:
+      return tflite::ActivationFunctionType_NONE;
+    case tflchef::RELU:
+      return tflite::ActivationFunctionType_RELU;
+    case tflchef::RELU6:
+      return tflite::ActivationFunctionType_RELU6;
+    default:
+      break;
   }
 
   throw std::runtime_error{"Unknown activation"};
@@ -54,12 +54,12 @@ tflite::TensorType as_tflite_tensortype(const tflchef::TensorType &value)
 {
   switch (value)
   {
-  case tflchef::FLOAT32:
-    return tflite::TensorType_FLOAT32;
-  case tflchef::INT32:
-    return tflite::TensorType_INT32;
-  default:
-    break;
+    case tflchef::FLOAT32:
+      return tflite::TensorType_FLOAT32;
+    case tflchef::INT32:
+      return tflite::TensorType_INT32;
+    default:
+      break;
   }
 
   throw std::runtime_error{"Unknown tensor type"};
index e4167d7..9588687 100644 (file)
@@ -171,12 +171,12 @@ DataChefRegistry &data_chef_registry(const tflchef::TensorType &type)
 
   switch (type)
   {
-  case tflchef::INT32:
-    return s32;
-  case tflchef::FLOAT32:
-    return fp32;
-  default:
-    break;
+    case tflchef::INT32:
+      return s32;
+    case tflchef::FLOAT32:
+      return fp32;
+    default:
+      break;
   }
 
   throw std::runtime_error{"Unknown tensor type"};
index da8e453..b7a93cb 100644 (file)
@@ -23,20 +23,20 @@ tflchef::TensorType as_tflchef_type(const tflite::TensorType type)
 {
   switch (type)
   {
-  case tflite::TensorType_FLOAT32:
-    return tflchef::FLOAT32;
-  case tflite::TensorType_INT32:
-    return tflchef::INT32;
-  // TODO handle other types
-  // TensorType_FLOAT16
-  // TensorType_UINT8
-  // TensorType_INT64
-  // TensorType_STRING
-  // TensorType_BOOL
-  // TensorType_INT16
-  // TensorType_COMPLEX64
-  default:
-    throw std::runtime_error{"unsupported tensor type"};
+    case tflite::TensorType_FLOAT32:
+      return tflchef::FLOAT32;
+    case tflite::TensorType_INT32:
+      return tflchef::INT32;
+    // TODO handle other types
+    // TensorType_FLOAT16
+    // TensorType_UINT8
+    // TensorType_INT64
+    // TensorType_STRING
+    // TensorType_BOOL
+    // TensorType_INT16
+    // TensorType_COMPLEX64
+    default:
+      throw std::runtime_error{"unsupported tensor type"};
   }
 }
 
@@ -44,18 +44,18 @@ tflchef::Activation as_tflchef_activation(const tflite::ActivationFunctionType t
 {
   switch (type)
   {
-  case tflite::ActivationFunctionType_NONE:
-    return tflchef::NONE;
-  case tflite::ActivationFunctionType_RELU:
-    return tflchef::RELU;
-  case tflite::ActivationFunctionType_RELU6:
-    return tflchef::RELU6;
-  // TODO handle other types
-  // ActivationFunctionType_RELU_N1_TO_1
-  // ActivationFunctionType_TANH
-  // ActivationFunctionType_SIGN_BIT
-  default:
-    throw std::runtime_error{"unsupported activation type"};
+    case tflite::ActivationFunctionType_NONE:
+      return tflchef::NONE;
+    case tflite::ActivationFunctionType_RELU:
+      return tflchef::RELU;
+    case tflite::ActivationFunctionType_RELU6:
+      return tflchef::RELU6;
+    // TODO handle other types
+    // ActivationFunctionType_RELU_N1_TO_1
+    // ActivationFunctionType_TANH
+    // ActivationFunctionType_SIGN_BIT
+    default:
+      throw std::runtime_error{"unsupported activation type"};
   }
 }
 
@@ -63,12 +63,12 @@ tflchef::Padding as_tflchef_padding(const tflite::Padding padding)
 {
   switch (padding)
   {
-  case tflite::Padding_SAME:
-    return tflchef::SAME;
-  case tflite::Padding_VALID:
-    return tflchef::VALID;
-  default:
-    throw std::runtime_error{"unsupported padding"};
+    case tflite::Padding_SAME:
+      return tflchef::SAME;
+    case tflite::Padding_VALID:
+      return tflchef::VALID;
+    default:
+      throw std::runtime_error{"unsupported padding"};
   }
 }