First implementation of some Onnx operators. (#2140)
authorАндрей Тищенко/AI Tools Lab /SRR/Staff Engineer/삼성전자 <a.tischenko@partner.samsung.com>
Tue, 13 Nov 2018 16:24:40 +0000 (19:24 +0300)
committerРоман Михайлович Русяев/AI Tools Lab /SRR/Staff Engineer/삼성전자 <r.rusyaev@samsung.com>
Tue, 13 Nov 2018 16:24:40 +0000 (19:24 +0300)
The following operators were implemented (without any debugging):
Dropuot, Concat, Scale, Reshape, Rel, Softmax.

Signed-off-by: Andrew V. Tischenko a.tischenko@partner.samsung.com
contrib/nnc/driver/Driver.cpp
contrib/nnc/include/passes/onnx_frontend/ONNXImporter.h [moved from contrib/nnc/include/passes/onnx_frontend/ONNXFrontend.h with 79% similarity]
contrib/nnc/passes/onnx_frontend/ONNXImporter.cpp [new file with mode: 0644]
contrib/nnc/passes/onnx_frontend/ONNXImporterImpl.cpp [new file with mode: 0644]
contrib/nnc/passes/onnx_frontend/ONNXImporterImpl.h [new file with mode: 0644]
contrib/nnc/passes/onnx_frontend/ONNXOpCreator.cpp [new file with mode: 0644]
contrib/nnc/passes/onnx_frontend/ONNXOpCreator.h [new file with mode: 0644]
contrib/nnc/passes/onnx_frontend/ONNXOpType.h
contrib/nnc/passes/onnx_frontend/ONNXPerfect.gperf
contrib/nnc/passes/onnx_frontend/ONNXPerfectHash.h

index 511cb18..e25ff5f 100644 (file)
@@ -21,7 +21,7 @@
 #include "passes/interpreter/InterpreterPass.h"
 #include "passes/soft_backend/CPPGenerator.h"
 #include "passes/acl_soft_backend/AclCppGenerator.h"
-#include "passes/onnx_frontend/ONNXFrontend.h"
+#include "passes/onnx_frontend/ONNXImporter.h"
 #include "support/CommandLine.h"
 #include "Definitions.h"
 #include "option/Options.h"
@@ -75,7 +75,7 @@ void Driver::registerFrontendPass() {
   else if ( cli::onnxFrontend )
   {
 #ifdef NNC_FRONTEND_ONNX_ENABLED
-    pass = std::move(std::unique_ptr<Pass>(new ONNXFrontend()));
+    pass = std::move(std::unique_ptr<Pass>(new ONNXImporter()));
 #endif // NNC_FRONTEND_ONNX_ENABLED
   }
   else if ( cli::tflFrontend )
@@ -25,16 +25,11 @@ namespace nnc {
 /**
  * @brief class represent frontend of ONNX NN framework
  */
-class ONNXFrontend : public Pass
+class ONNXImporter : public Pass
 {
 public:
-  ONNXFrontend &operator=(const ONNXFrontend &) = delete;
-  ONNXFrontend(const ONNXFrontend &) = delete;
-
-  ONNXFrontend() = default;
-  ~ONNXFrontend() override = default;
-
-  static Pass &getInstance();
+  ONNXImporter() = default;
+  ~ONNXImporter() override = default;
   PassData run(PassData data) override;
 };
 } // namespace nnc
diff --git a/contrib/nnc/passes/onnx_frontend/ONNXImporter.cpp b/contrib/nnc/passes/onnx_frontend/ONNXImporter.cpp
new file mode 100644 (file)
index 0000000..72f7524
--- /dev/null
@@ -0,0 +1,28 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "option/Options.h"
+#include "pass/PassException.h"
+#include "passes/onnx_frontend/ONNXImporter.h"
+#include "ONNXImporterImpl.h"
+
+namespace nnc {
+PassData ONNXImporter::run(PassData data) {
+  ONNXImporterImpl importer{cli::inputFile};
+  importer.import();
+  return importer.createIR();
+}
+} // namespace nnc
diff --git a/contrib/nnc/passes/onnx_frontend/ONNXImporterImpl.cpp b/contrib/nnc/passes/onnx_frontend/ONNXImporterImpl.cpp
new file mode 100644 (file)
index 0000000..1848da7
--- /dev/null
@@ -0,0 +1,274 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <google/protobuf/io/zero_copy_stream_impl.h>
+#include <google/protobuf/io/coded_stream.h>
+#include <pass/PassException.h>
+#include <passes/common_frontend/shape_helper.h>
+#include <functional>
+#include "core/modelIR/ir_node.h"
+#include "core/modelIR/operations/VariableOp.h"
+#include "core/modelIR/TensorVariant.h"
+#include "onnx/onnx_pb.h"
+#include "onnx/proto_utils.h"
+#include "passes/common_frontend/model_allocation.h"
+#include "ONNXImporterImpl.h"
+#include "ONNXPerfectHash.h"
+
+namespace nnc {
+
+void ONNXImporterImpl::import() {
+  GOOGLE_PROTOBUF_VERIFY_VERSION;
+
+  ModelAllocation ma(_modelFilename);
+  size_t size = ma.getNumBytes();
+  if (!size)
+    throw PassException("Could not load model: " + _modelFilename + "\n");
+  _model.reset(new onnx::ModelProto());
+  bool result = onnx::ParseProtoFromBytes(_model.get(), (const char*)ma.getDataPnt(), size);
+}
+
+std::shared_ptr<mir::TensorVariant> ONNXImporterImpl::createTensor(const onnx::TensorProto *tensor) {
+  mir::TensorVariant::DTYPE type = mir::TensorVariant::DTYPE::FLOAT;
+  size_t element_size;
+  size_t buffer_size;
+  const char* src_data;
+  char data[] = "DATA buffer";
+
+  if (tensor == nullptr){
+    // It's 'data' input: create special kind of tensor
+    element_size = sizeof(float);
+    buffer_size = sizeof(data);
+    src_data = reinterpret_cast<const char*>(data);
+  } else if (tensor->float_data_size() != 0) {
+    element_size = sizeof(float);
+    buffer_size = tensor->float_data_size() * element_size;
+    src_data = reinterpret_cast<const char*>(tensor->float_data().data());
+  } else if (tensor->double_data_size() != 0) {
+    // TODO: we should copy element by element to convert the items
+    element_size = sizeof(double);
+    buffer_size = tensor->double_data_size() * element_size;
+    src_data = reinterpret_cast<const char*>(tensor->double_data().data());
+    std::cerr << "WARNING: We don't support double tensors yet, investigate\n";
+  } else if (tensor->int32_data_size() != 0) {
+    element_size = sizeof(int32_t);
+    buffer_size = tensor->int32_data_size() * element_size;
+    src_data = reinterpret_cast<const char*>(tensor->int32_data().data());
+    std::cerr << "WARNING: We don't support int32 tensors yet, investigate\n";
+  } else if (tensor->int64_data_size() != 0) {
+    element_size = sizeof(int64_t);
+    buffer_size = tensor->int64_data_size() * element_size;
+    src_data = reinterpret_cast<const char*>(tensor->int64_data().data());
+    std::cerr << "WARNING: We don't support int64 tensors yet, investigate\n";
+  } else {
+    throw PassException("Invalid data in Proto file, investigate");
+  }
+
+  // Create untyped tensor. Note, tensor contents will be *copied* here.
+  std::shared_ptr<char> tensor_buffer_copy(new char[buffer_size],
+                                           std::default_delete<char[]>());
+
+  char* dst_data = tensor_buffer_copy.get();
+  memcpy(dst_data, src_data, buffer_size);
+
+  mir::Shape tensor_shape;
+  if (tensor == nullptr)
+    tensor_shape = ShapeHelper::createShape(std::vector<int>(), 0);
+  else
+    tensor_shape = ShapeHelper::createShape(
+        tensor->dims(), static_cast<size_t>(tensor->dims_size()));
+
+  auto mir_tensor = std::make_shared<mir::TensorVariant>(tensor_shape, tensor_buffer_copy, type, element_size);
+  return mir_tensor;
+}
+
+void ONNXImporterImpl::createGraphInputs() {
+  auto graph = _model->graph();
+  auto initializer = graph.initializer();
+  auto value_info = graph.value_info();
+  auto init_size = graph.initializer_size();
+  auto val_size = graph.value_info_size();
+  auto inp_size = graph.input_size();
+  std::map<std::string, const onnx::TensorProto *> onnx_tensors;
+
+  // Collect all initializers of the given graph
+  for(int i = 0; i < graph.initializer_size(); i++) { // (const onnx::TensorProto &)
+    const onnx::TensorProto& tensor = graph.initializer(i);
+    assert(onnx_tensors.find(tensor.name()) == onnx_tensors.end());
+    onnx_tensors [tensor.name()] = &tensor;
+  }
+
+  for (auto input : graph.input()) {
+    assert(input.has_name());
+    auto name = input.name();
+
+    // Every VariableOp relates to one graph input
+    auto node = _graph->create<mir::ops::VariableOp>(name);
+    _opsForBlobsTheyOutput[name] = node;
+
+    if (onnx_tensors.find(name) != onnx_tensors.end()) {
+      const onnx::TensorProto* onnx_tensor = onnx_tensors[name];
+      _inputTensors[name] = createTensor(onnx_tensor);
+      mir::Shape input_shape = ShapeHelper::createShape(onnx_tensor->dims(),
+                                                   static_cast<size_t>(onnx_tensor->dims_size()));
+      // WARNING! Temporary solution!
+      node->getOperation()->setOutputShape(0, input_shape);
+    } else {
+      assert(!name.compare("data"));
+      _inputTensors[name] = createTensor(nullptr);
+      // TODO: should we update node with special shape?
+      mir::Shape input_shape = ShapeHelper::createShape(std::vector<int>(), 0);
+      // WARNING! Temporary solution!
+      node->getOperation()->setOutputShape(0, input_shape);
+    }
+    std::cout << "Node name '" << name << "' added\n"; // < std::endl;
+  }
+}
+
+static std::pair<bool, int> getIntAttribute(onnx::NodeProto onnxNode, std::string name = "axis") {
+  for (auto att : onnxNode.attribute()) {
+    if (att.name().compare(name)) {
+      assert(att.type() == onnx::AttributeProto_AttributeType::AttributeProto_AttributeType_INT);
+      return {true, att.i()};
+    }
+  }
+  return {false, 0};
+}
+
+mir::Graph *ONNXImporterImpl::createIR() {
+  createGraphInputs();
+  std::set <std::string> problems_op_set;
+
+  // for all nodes in onnx graph
+  for (auto onnxNode : _model->graph().node()) {
+    assert(onnxNode.has_op_type());
+    auto op_type = onnxNode.op_type().c_str();
+    std::vector<mir::INode::Ref> input_nodes;
+    // Fill inputs of the given node
+    for (auto name : onnxNode.input()) {
+      if (_opsForBlobsTheyOutput.find(name) != _opsForBlobsTheyOutput.end())
+        input_nodes.push_back(_opsForBlobsTheyOutput[name]);
+      else
+        std::cout << "Node name '" << name << "' was not found\n";
+    }
+    std::vector<std::shared_ptr<mir::TensorVariant>> params;
+    std::vector<mir::INode::Ref> outputs;
+    mir::INode *prev;
+    auto *opType = ONNXPerfectHash::getONNXOpType(op_type, onnxNode.op_type().size());
+    // 2 variables used as result of getXXXAttribute()
+    bool found;
+    int  value;
+    switch (opType->opCode) {
+      case ONNXOpCode::opIdentity:
+        // TOD: We simply remove the operation because it does nothing. Is it OK?
+        break;
+      case ONNXOpCode::opConv:
+        outputs = _opCreator.createConv2D(input_nodes, params, onnxNode);
+        break;
+      // TODO: not sure it's OK for pooling
+      case ONNXOpCode::opAveragePool:
+      case ONNXOpCode::opMaxPool:
+/*
+        explicit PoolOp(const Shape &windowShape, const Shape &strides, PoolingType poolType,
+                        PaddingType padding, BorderType borderType)
+          : OpDescription(1, 1), _padding(padding), _poolingType(poolType),
+            _borderType(borderType), _windowShape(windowShape), _strides(strides)
+        {
+          _pads.resize(_windowShape.rank());
+        }
+*/
+        outputs = _opCreator.createPool(input_nodes, opType->opCode);
+        break;
+      case ONNXOpCode::opConcat:
+        std::tie (found, value) = getIntAttribute(onnxNode);
+        if (found)
+          outputs = _opCreator.createConcat(input_nodes, value);
+        else
+          throw PassException("Concat must have 'axis' attribute");
+        break;
+      case ONNXOpCode::opReshape:
+        outputs = _opCreator.createReshape(input_nodes[0], input_nodes[1]->getOperation()->getOutputShape(0));
+        break;
+      case ONNXOpCode::opRelu:
+        outputs = _opCreator.createRelu(input_nodes);
+        break;
+      case ONNXOpCode::opSoftmax: {
+        std::tie (found, value) = getIntAttribute(onnxNode);
+        int axis = found ? value : 1;
+        outputs = _opCreator.createSoftmax(input_nodes, axis);
+        break;
+      }
+      case ONNXOpCode::opScale:
+        outputs = _opCreator.createScale(input_nodes, params, onnxNode);
+        break;
+      case ONNXOpCode::opBatchNormalization:
+        outputs = _opCreator.createBatchNorm(input_nodes, params, onnxNode);
+        break;
+      case ONNXOpCode::opDropout: {
+        float ratio = 0.5;
+        if (onnxNode.attribute_size()) {
+          assert(onnxNode.attribute_size() == 1);
+          auto att = onnxNode.attribute(0);
+          // FIXME: it seems there could be optional attributes
+          assert (att.name().compare("ratio"));
+          assert (att.type() == onnx::AttributeProto_AttributeType::AttributeProto_AttributeType_FLOAT);
+          assert (att.floats_size() == 1);
+          ratio = att.floats(0);
+        }
+        outputs = _opCreator.createDropout(input_nodes, ratio);
+        break;
+      }
+      default:
+        problems_op_set.insert(op_type);
+    }
+
+    if (!outputs.size()) {
+      // FIXME: it's for debugging only
+     for (auto name : onnxNode.output()) {
+        auto node = _graph->create<mir::ops::VariableOp>(name);
+        outputs.push_back(node);
+      }
+    } else {
+      for (int i = 0; i < outputs.size(); i++) {
+        outputs[i]->setName(onnxNode.output(i));
+      }
+    }
+
+    for (auto item : outputs) {
+      if (_opsForBlobsTheyOutput.find(item->getName()) == _opsForBlobsTheyOutput.end()) {
+        _opsForBlobsTheyOutput[item->getName()] = item;
+        std::cout << "Node name '" << item->getName() << "' added\n";
+      } else
+        std::cout << "Name duplication: " << item->getName() << std::endl;
+    }
+    if (outputs.size())
+      // FIXME: it should be done properly via the given graph outputs
+      _graphOutputs.assign(outputs.begin(), outputs.end());
+  }
+  if (!problems_op_set.empty()) {
+    std::string msg("There are the following unsupported operations:\n");
+    for (auto op : problems_op_set)
+      msg.append(op + "\n");
+    std::cout << msg;
+    //throw PassException(msg);
+  }
+  // set graph outputs
+  for (auto &outputIdx : _graphOutputs)
+    _graph->markOutput(outputIdx);
+
+  return _graph;
+}
+} // namespace nnc
diff --git a/contrib/nnc/passes/onnx_frontend/ONNXImporterImpl.h b/contrib/nnc/passes/onnx_frontend/ONNXImporterImpl.h
new file mode 100644 (file)
index 0000000..d8387a8
--- /dev/null
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef NNCC_ONNX_IMPORTER_H
+#define NNCC_ONNX_IMPORTER_H
+
+#include <google/protobuf/stubs/common.h>
+#include <memory>
+#include <string>
+#include <onnx/onnx.pb.h>
+
+#include "core/modelIR/graph.h"
+#include "ONNXOpType.h"
+#include "ONNXOpCreator.h"
+#include "passes/common_frontend/nn_importer.h"
+
+namespace nnc {
+
+class ONNXImporterImpl : public NNImporter {
+public:
+    explicit ONNXImporterImpl(std::string filename) {
+      _modelFilename = std::move(filename);
+      _graph = new mir::Graph();
+      _opCreator.setMirGraph(_graph);
+    }
+
+    void import() override;
+    mir::Graph *createIR() override;
+
+private:
+    void createGraphInputs();
+    std::shared_ptr<mir::TensorVariant> createTensor(const onnx::TensorProto *tensor);
+    std::vector<std::shared_ptr<mir::TensorVariant>> createOpParams(::onnx::NodeProto node);
+
+    // This map maps caffe tensor names to MIR operations/nodes
+    // that correspond to operations having these tensors as output.
+    std::map<std::string, mir::INode::Ref> _opsForBlobsTheyOutput;
+
+    // This map keeps named tensors used as graph input initializers.
+    std::map<std::string, std::shared_ptr<mir::TensorVariant>> _inputTensors;
+    std::vector<mir::INode::Ref> _graphOutputs;
+
+    std::string _modelFilename;
+    std::unique_ptr<onnx::ModelProto> _model;
+    mir::Graph* _graph;
+    ONNXOpCreator _opCreator;
+};
+} // namespace nnc
+
+#endif // NNCC_ONNX_IMPORTER_H
diff --git a/contrib/nnc/passes/onnx_frontend/ONNXOpCreator.cpp b/contrib/nnc/passes/onnx_frontend/ONNXOpCreator.cpp
new file mode 100644 (file)
index 0000000..7d3d96d
--- /dev/null
@@ -0,0 +1,93 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <set>
+#include <cmath>
+#include "core/modelIR/Index.h"
+#include "core/modelIR/graph.h"
+#include "core/modelIR/ShapeRange.h"
+#include "core/modelIR/Tensor.h"
+#include "core/modelIR/operations/BatchNormOp.h"
+#include "core/modelIR/operations/BiasAddOp.h"
+#include "core/modelIR/operations/CappedReluOp.h"
+#include "core/modelIR/operations/ConcatOp.h"
+#include "core/modelIR/operations/Conv2DOp.h"
+#include "core/modelIR/operations/DepthwiseConv2DOp.h"
+#include "core/modelIR/operations/DropoutOp.h"
+#include "core/modelIR/operations/FullyConnectedOp.h"
+#include "core/modelIR/operations/PoolOp.h"
+#include "core/modelIR/operations/ReluOp.h"
+#include "core/modelIR/operations/ReshapeOp.h"
+#include "core/modelIR/operations/ScaleOp.h"
+#include "core/modelIR/operations/SoftmaxOp.h"
+#include "core/modelIR/operations/VariableOp.h"
+#include "passes/common_frontend/shape_helper.h"
+#include "pass/PassException.h"
+#include "ONNXOpCreator.h"
+
+namespace nnc {
+
+using namespace mir;
+
+std::vector<INode::Ref> ONNXOpCreator::createConv2D(InputOps inputs, InputParams params,
+                                                    ::onnx::NodeProto node) {
+  return std::vector<INode::Ref>();
+}
+
+std::vector<INode::Ref> ONNXOpCreator::createConcat(InputOps inputs, int axis) {
+  return createOp<ops::ConcatOp>(inputs, inputs.size(), axis);
+}
+
+std::vector<INode::Ref> ONNXOpCreator::createPool(InputOps inputs, ONNXOpCode opCode) {
+  return std::vector<INode::Ref>();
+}
+
+std::vector<INode::Ref> ONNXOpCreator::createSoftmax(InputOps inputs, int axis) {
+  return createOp<ops::SoftmaxOp>(inputs, axis);
+}
+
+std::vector<INode::Ref> ONNXOpCreator::createReshape(INode::Ref inputData, Shape outputShape) {
+  std::vector<INode::Ref> inputNodes;
+  inputNodes.push_back(inputData);
+  auto outputs = createOp<ops::ReshapeOp>(inputNodes);
+  outputs[0]->getOperation()->setOutputShape(0, outputShape);
+  return outputs;
+}
+
+std::vector<INode::Ref> ONNXOpCreator::createRelu(InputOps inputs) {
+  assert(inputs.size() == 1);
+  return createOp<ops::ReluOp>(inputs);
+}
+
+std::vector<INode::Ref> ONNXOpCreator::createScale(InputOps inputs, InputParams params,  ::onnx::NodeProto node) {
+  return std::vector<INode::Ref>();
+}
+
+std::vector<INode::Ref> ONNXOpCreator::createBatchNorm(InputOps inputs, InputParams params,  ::onnx::NodeProto node) {
+  return std::vector<INode::Ref>();
+}
+
+std::vector<INode::Ref> ONNXOpCreator::createDropout(InputOps inputs, float ratio) {
+  return createOp<ops::SoftmaxOp>(inputs, ratio);
+}
+
+void ONNXOpCreator::connectInputs(INode::Ref op, InputOps inputs) {
+  // TODO: this part doesn't support the situation where an operator takes as input
+  // some tensor that is not the 0th output of some other operator
+  for (int i = 0; i < static_cast<int>(inputs.size()); ++i)
+    op->connectInputTo(i, inputs[i]->getOutput(0));
+}
+} // namespace nnc
diff --git a/contrib/nnc/passes/onnx_frontend/ONNXOpCreator.h b/contrib/nnc/passes/onnx_frontend/ONNXOpCreator.h
new file mode 100644 (file)
index 0000000..ef282e8
--- /dev/null
@@ -0,0 +1,70 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef NNCC_ONNX_OP_CREATOR_H
+#define NNCC_ONNX_OP_CREATOR_H
+
+#include <set>
+#include <map>
+#include <vector>
+#include <memory>
+#include "core/modelIR/graph.h"
+#include "core/modelIR/ir_node.h"
+#include "core/modelIR/TensorVariant.h"
+#include "core/modelIR/operations/common.h"
+#include "core/modelIR/Shape.h"
+#include "onnx/onnx.pb.h"
+#include "ONNXOpType.h"
+
+namespace nnc {
+
+  class ONNXOpCreator {
+public:
+  using InputOps = std::vector<nnc::mir::INode::Ref>&;
+  using InputParams = std::vector<std::shared_ptr<nnc::mir::TensorVariant>>&;
+
+  ONNXOpCreator() {};
+  std::vector<nnc::mir::INode::Ref> createConv2D(InputOps inputs, InputParams params, ::onnx::NodeProto node);
+  std::vector<nnc::mir::INode::Ref> createConcat(InputOps inputs, int axis);
+  std::vector<nnc::mir::INode::Ref> createPool(InputOps inputs, ONNXOpCode opCode);
+  std::vector<nnc::mir::INode::Ref> createSoftmax(InputOps inputs, int axis);
+  std::vector<nnc::mir::INode::Ref> createReshape(nnc::mir::INode::Ref inputData, nnc::mir::Shape outputShape);
+  std::vector<nnc::mir::INode::Ref> createRelu(InputOps inputs);
+  std::vector<nnc::mir::INode::Ref> createScale(InputOps inputs, InputParams params, ::onnx::NodeProto node);
+  std::vector<nnc::mir::INode::Ref> createBatchNorm(InputOps inputs, InputParams params, ::onnx::NodeProto node);
+  std::vector<nnc::mir::INode::Ref> createDropout(InputOps inputs, float ratio);
+  void setMirGraph(mir::Graph* g){
+    _graph = g;
+  }
+private:
+  void connectInputs(nnc::mir::INode::Ref op, std::vector<nnc::mir::INode::Ref>& inputs);
+  template <typename OpType, typename ...Types>
+  std::vector<nnc::mir::INode::Ref> createOp(std::vector<nnc::mir::INode::Ref>& inputs, Types&&... args);
+  mir::Graph* _graph = nullptr;
+};
+
+template<typename OpType, typename ...Types>
+std::vector<nnc::mir::INode::Ref> ONNXOpCreator::createOp(std::vector<nnc::mir::INode::Ref>& inputs, Types&&... args) {
+  std::vector<nnc::mir::INode::Ref> outputs;
+
+  // TODO: set operation names
+  auto op = _graph->create<OpType>("", std::forward<Types>(args)...);
+  connectInputs(op, inputs);
+  outputs.push_back(op);
+  return outputs;
+}
+} // namespace nnc
+#endif //NNCC_ONNX_OP_CREATOR_H
index 4ee5b07..88a1efe 100644 (file)
@@ -19,9 +19,9 @@
 
 namespace nnc {
 enum class ONNXOpSupportState {
-  Unsupported,
-  Fullysupported,
-  Partiallysupported
+  unSupported,
+  fullySupported,
+  partiallySupported
 };
 
 enum class ONNXOpCode {
@@ -131,7 +131,7 @@ enum class ONNXOpCode {
   opUnsqueeze,
   opUpsample,
   opXor,
-  op// experimental
+  // experimental
   opATen,
   opAffine,
   opConstantFill,
index 2dc3e9a..5bbc9c7 100644 (file)
@@ -31,121 +31,121 @@ using namespace nnc;
 
 struct ONNXOpType { char *name; ONNXOpType opCode; ONNXOpSupportState state};
 %%
-Abs, ONNXOpCode::Abs, ONNXOpSupportState::Unsupported
-Acos, ONNXOpCode::Acos, ONNXOpSupportState::Unsupported
-Add, ONNXOpCode::Add, ONNXOpSupportState::Unsupported
-And, ONNXOpCode::And, ONNXOpSupportState::Unsupported
-ArgMax, ONNXOpCode::ArgMax, ONNXOpSupportState::Unsupported
-ArgMin, ONNXOpCode::ArgMin, ONNXOpSupportState::Unsupported
-Asin, ONNXOpCode::Asin, ONNXOpSupportState::Unsupported
-Atan, ONNXOpCode::Atan, ONNXOpSupportState::Unsupported
-AveragePool, ONNXOpCode::AveragePool, ONNXOpSupportState::Fullysupported
-BatchNormalization, ONNXOpCode::BatchNormalization, ONNXOpSupportState::Fullysupported
-Cast, ONNXOpCode::Cast, ONNXOpSupportState::Unsupported
-Ceil, ONNXOpCode::Ceil, ONNXOpSupportState::Unsupported
-Clip, ONNXOpCode::Clip, ONNXOpSupportState::Unsupported
-Concat, ONNXOpCode::Concat, ONNXOpSupportState::Fullysupported
-Constant, ONNXOpCode::Constant, ONNXOpSupportState::Unsupported
-ConstantLike, ONNXOpCode::ConstantLike, ONNXOpSupportState::Unsupported
-Conv, ONNXOpCode::Conv, ONNXOpSupportState::Fullysupported
-ConvTranspose, ONNXOpCode::ConvTranspose, ONNXOpSupportState::Unsupported
-Cos, ONNXOpCode::Cos, ONNXOpSupportState::Unsupported
-DepthToSpace, ONNXOpCode::DepthToSpace, ONNXOpSupportState::Unsupported
-Div, ONNXOpCode::Div, ONNXOpSupportState::Unsupported
-Dropout, ONNXOpCode::Dropout, ONNXOpSupportState::Fullysupported
-Elu, ONNXOpCode::Elu, ONNXOpSupportState::Unsupported
-Equal, ONNXOpCode::Equal, ONNXOpSupportState::Unsupported
-Exp, ONNXOpCode::Exp, ONNXOpSupportState::Unsupported
-Expand, ONNXOpCode::Expand, ONNXOpSupportState::Unsupported
-EyeLike, ONNXOpCode::EyeLike, ONNXOpSupportState::Unsupported
-Flatten, ONNXOpCode::Flatten, ONNXOpSupportState::Unsupported
-Floor, ONNXOpCode::Floor, ONNXOpSupportState::Unsupported
-GRU, ONNXOpCode::GRU, ONNXOpSupportState::Unsupported
-Gather, ONNXOpCode::Gather, ONNXOpSupportState::Unsupported
-Gemm, ONNXOpCode::Gemm, ONNXOpSupportState::Unsupported
-GlobalAveragePool, ONNXOpCode::GlobalAveragePool, ONNXOpSupportState::Unsupported
-GlobalLpPool, ONNXOpCode::GlobalLpPool, ONNXOpSupportState::Unsupported
-GlobalMaxPool, ONNXOpCode::GlobalMaxPool, ONNXOpSupportState::Unsupported
-Greater, ONNXOpCode::Greater, ONNXOpSupportState::Unsupported
-HardSigmoid, ONNXOpCode::HardSigmoid, ONNXOpSupportState::Unsupported
-Hardmax, ONNXOpCode::Hardmax, ONNXOpSupportState::Unsupported
-Identity, ONNXOpCode::Identity, ONNXOpSupportState::Supported
-If, ONNXOpCode::If, ONNXOpSupportState::Unsupported
-InstanceNormalizati, ONNXOpCode::InstanceNormalizati, ONNXOpSupportState::Unsupported
-LRN, ONNXOpCode::LRN, ONNXOpSupportState::Unsupported
-LSTM, ONNXOpCode::LSTM, ONNXOpSupportState::Unsupported
-LeakyRelu, ONNXOpCode::LeakyRelu, ONNXOpSupportState::Unsupported
-Less, ONNXOpCode::Less, ONNXOpSupportState::Unsupported
-Log, ONNXOpCode::Log, ONNXOpSupportState::Unsupported
-LogSoftmax, ONNXOpCode::LogSoftmax, ONNXOpSupportState::Unsupported
-Loop, ONNXOpCode::Loop, ONNXOpSupportState::Unsupported
-LpNormalization, ONNXOpCode::LpNormalization, ONNXOpSupportState::Unsupported
-LpPool, ONNXOpCode::LpPool, ONNXOpSupportState::Unsupported
-MatMul, ONNXOpCode::MatMul, ONNXOpSupportState::Unsupported
-Max, ONNXOpCode::Max, ONNXOpSupportState::Unsupported
-MaxPool, ONNXOpCode::MaxPool, ONNXOpSupportState::Supported
-MaxRoiPool, ONNXOpCode::MaxRoiPool, ONNXOpSupportState::Unsupported
-Mean, ONNXOpCode::Mean, ONNXOpSupportState::Unsupported
-Min, ONNXOpCode::Min, ONNXOpSupportState::Unsupported
-Mul, ONNXOpCode::Mul, ONNXOpSupportState::Unsupported
-Multinomial, ONNXOpCode::Multinomial, ONNXOpSupportState::Unsupported
-Neg, ONNXOpCode::Neg, ONNXOpSupportState::Unsupported
-Not, ONNXOpCode::Not, ONNXOpSupportState::Unsupported
-Or, ONNXOpCode::Or, ONNXOpSupportState::Unsupported
-PRelu, ONNXOpCode::PRelu, ONNXOpSupportState::Unsupported
-Pad, ONNXOpCode::Pad, ONNXOpSupportState::Unsupported
-Pow, ONNXOpCode::Pow, ONNXOpSupportState::Unsupported
-RNN, ONNXOpCode::RNN, ONNXOpSupportState::Unsupported
-RandomNormal, ONNXOpCode::RandomNormal, ONNXOpSupportState::Unsupported
-RandomNormalLike, ONNXOpCode::RandomNormalLike, ONNXOpSupportState::Unsupported
-RandomUniform, ONNXOpCode::RandomUniform, ONNXOpSupportState::Unsupported
-RandomUniformLike, ONNXOpCode::RandomUniformLike, ONNXOpSupportState::Unsupported
-Reciprocal, ONNXOpCode::Reciprocal, ONNXOpSupportState::Unsupported
-ReduceL1, ONNXOpCode::ReduceL1, ONNXOpSupportState::Unsupported
-ReduceL2, ONNXOpCode::ReduceL2, ONNXOpSupportState::Unsupported
-ReduceLogSum, ONNXOpCode::ReduceLogSum, ONNXOpSupportState::Unsupported
-ReduceLogSumExp, ONNXOpCode::ReduceLogSumExp, ONNXOpSupportState::Unsupported
-ReduceMax, ONNXOpCode::ReduceMax, ONNXOpSupportState::Unsupported
-ReduceMean, ONNXOpCode::ReduceMean, ONNXOpSupportState::Unsupported
-ReduceMin, ONNXOpCode::ReduceMin, ONNXOpSupportState::Unsupported
-ReduceProd, ONNXOpCode::ReduceProd, ONNXOpSupportState::Unsupported
-ReduceSum, ONNXOpCode::ReduceSum, ONNXOpSupportState::Unsupported
-ReduceSumSquare, ONNXOpCode::ReduceSumSquare, ONNXOpSupportState::Unsupported
-Relu, ONNXOpCode::Relu, ONNXOpSupportState::Fullysupported
-Reshape, ONNXOpCode::Reshape, ONNXOpSupportState::Fullysupported
-Scan, ONNXOpCode::Scan, ONNXOpSupportState::Unsupported
-Selu, ONNXOpCode::Selu, ONNXOpSupportState::Unsupported
-Shape, ONNXOpCode::Shape, ONNXOpSupportState::Unsupported
-Sigmoid, ONNXOpCode::Sigmoid, ONNXOpSupportState::Unsupported
-Sin, ONNXOpCode::Sin, ONNXOpSupportState::Unsupported
-Size, ONNXOpCode::Size, ONNXOpSupportState::Unsupported
-Slice, ONNXOpCode::Slice, ONNXOpSupportState::Unsupported
-Softmax, ONNXOpCode::Softmax, ONNXOpSupportState::Fullysupported
-Softplus, ONNXOpCode::Softplus, ONNXOpSupportState::Unsupported
-Softsign, ONNXOpCode::Softsign, ONNXOpSupportState::Unsupported
-SpaceToDepth, ONNXOpCode::SpaceToDepth, ONNXOpSupportState::Unsupported
-Split, ONNXOpCode::Split, ONNXOpSupportState::Fullysupported
-Sqrt, ONNXOpCode::Sqrt, ONNXOpSupportState::Unsupported
-Squeeze, ONNXOpCode::Squeeze, ONNXOpSupportState::Unsupported
-Sub, ONNXOpCode::Sub, ONNXOpSupportState::Unsupported
-Sum, ONNXOpCode::Sum, ONNXOpSupportState::Unsupported
-Tan, ONNXOpCode::Tan, ONNXOpSupportState::Unsupported
-Tanh, ONNXOpCode::Tanh, ONNXOpSupportState::Unsupported
-Tile, ONNXOpCode::Tile, ONNXOpSupportState::Unsupported
-TopK, ONNXOpCode::TopK, ONNXOpSupportState::Unsupported
-Transpose, ONNXOpCode::Transpose, ONNXOpSupportState::Unsupported
-Unsqueeze, ONNXOpCode::Unsqueeze, ONNXOpSupportState::Unsupported
-Upsample, ONNXOpCode::Upsample, ONNXOpSupportState::Unsupported
-Xor, ONNXOpCode::Xor, ONNXOpSupportState::Unsupported
-ATen, ONNXOpCode::ATen, ONNXOpSupportState::Unsupported
-Affine, ONNXOpCode::Affine, ONNXOpSupportState::Unsupported
-ConstantFill, ONNXOpCode::ConstantFill, ONNXOpSupportState::Unsupported
-Crop, ONNXOpCode::Crop, ONNXOpSupportState::Unsupported
-DynamicSlice, ONNXOpCode::DynamicSlice, ONNXOpSupportState::Unsupported
-GRUUnit, ONNXOpCode::GRUUnit, ONNXOpSupportState::Unsupported
-GivenTensorFill, ONNXOpCode::GivenTensorFill, ONNXOpSupportState::Unsupported
-ImageScaler, ONNXOpCode::ImageScaler, ONNXOpSupportState::Unsupported
-ParametricSoftplus, ONNXOpCode::ParametricSoftplus, ONNXOpSupportState::Unsupported
-Scale, ONNXOpCode::Scale, ONNXOpSupportState::Partiallysupported
-ScaledTanh, ONNXOpCode::ScaledTanh, ONNXOpSupportState::Unsupported
-ThresholdedRelu, ONNXOpCode::ThresholdedRelu, ONNXOpSupportState::Unsupported
+Abs, ONNXOpCode::opAbs, ONNXOpSupportState::unSupported
+Acos, ONNXOpCode::opAcos, ONNXOpSupportState::unSupported
+Add, ONNXOpCode::opAdd, ONNXOpSupportState::unSupported
+And, ONNXOpCode::opAnd, ONNXOpSupportState::unSupported
+ArgMax, ONNXOpCode::opArgMax, ONNXOpSupportState::unSupported
+ArgMin, ONNXOpCode::opArgMin, ONNXOpSupportState::unSupported
+Asin, ONNXOpCode::opAsin, ONNXOpSupportState::unSupported
+Atan, ONNXOpCode::opAtan, ONNXOpSupportState::unSupported
+AveragePool, ONNXOpCode::opAveragePool, ONNXOpSupportState::fullySupported
+BatchNormalization, ONNXOpCode::opBatchNormalization, ONNXOpSupportState::fullySupported
+Cast, ONNXOpCode::opCast, ONNXOpSupportState::unSupported
+Ceil, ONNXOpCode::opCeil, ONNXOpSupportState::unSupported
+Clip, ONNXOpCode::opClip, ONNXOpSupportState::unSupported
+Concat, ONNXOpCode::opConcat, ONNXOpSupportState::fullySupported
+Constant, ONNXOpCode::opConstant, ONNXOpSupportState::unSupported
+ConstantLike, ONNXOpCode::opConstantLike, ONNXOpSupportState::unSupported
+Conv, ONNXOpCode::opConv, ONNXOpSupportState::fullySupported
+ConvTranspose, ONNXOpCode::opConvTranspose, ONNXOpSupportState::unSupported
+Cos, ONNXOpCode::opCos, ONNXOpSupportState::unSupported
+DepthToSpace, ONNXOpCode::opDepthToSpace, ONNXOpSupportState::unSupported
+Div, ONNXOpCode::opDiv, ONNXOpSupportState::unSupported
+Dropout, ONNXOpCode::opDropout, ONNXOpSupportState::fullySupported
+Elu, ONNXOpCode::opElu, ONNXOpSupportState::unSupported
+Equal, ONNXOpCode::opEqual, ONNXOpSupportState::unSupported
+Exp, ONNXOpCode::opExp, ONNXOpSupportState::unSupported
+Expand, ONNXOpCode::opExpand, ONNXOpSupportState::unSupported
+EyeLike, ONNXOpCode::opEyeLike, ONNXOpSupportState::unSupported
+Flatten, ONNXOpCode::opFlatten, ONNXOpSupportState::unSupported
+Floor, ONNXOpCode::opFloor, ONNXOpSupportState::unSupported
+GRU, ONNXOpCode::opGRU, ONNXOpSupportState::unSupported
+Gather, ONNXOpCode::opGather, ONNXOpSupportState::unSupported
+Gemm, ONNXOpCode::opGemm, ONNXOpSupportState::unSupported
+GlobalAveragePool, ONNXOpCode::opGlobalAveragePool, ONNXOpSupportState::unSupported
+GlobalLpPool, ONNXOpCode::opGlobalLpPool, ONNXOpSupportState::unSupported
+GlobalMaxPool, ONNXOpCode::opGlobalMaxPool, ONNXOpSupportState::unSupported
+Greater, ONNXOpCode::opGreater, ONNXOpSupportState::unSupported
+HardSigmoid, ONNXOpCode::opHardSigmoid, ONNXOpSupportState::unSupported
+Hardmax, ONNXOpCode::opHardmax, ONNXOpSupportState::unSupported
+Identity, ONNXOpCode::opIdentity, ONNXOpSupportState::fullySupported
+If, ONNXOpCode::opIf, ONNXOpSupportState::unSupported
+InstanceNormalizati, ONNXOpCode::opInstanceNormalizati, ONNXOpSupportState::unSupported
+LRN, ONNXOpCode::opLRN, ONNXOpSupportState::unSupported
+LSTM, ONNXOpCode::opLSTM, ONNXOpSupportState::unSupported
+LeakyRelu, ONNXOpCode::opLeakyRelu, ONNXOpSupportState::unSupported
+Less, ONNXOpCode::opLess, ONNXOpSupportState::unSupported
+Log, ONNXOpCode::opLog, ONNXOpSupportState::unSupported
+LogSoftmax, ONNXOpCode::opLogSoftmax, ONNXOpSupportState::unSupported
+Loop, ONNXOpCode::opLoop, ONNXOpSupportState::unSupported
+LpNormalization, ONNXOpCode::opLpNormalization, ONNXOpSupportState::unSupported
+LpPool, ONNXOpCode::opLpPool, ONNXOpSupportState::unSupported
+MatMul, ONNXOpCode::opMatMul, ONNXOpSupportState::unSupported
+Max, ONNXOpCode::opMax, ONNXOpSupportState::unSupported
+MaxPool, ONNXOpCode::opMaxPool, ONNXOpSupportState::fullySupported
+MaxRoiPool, ONNXOpCode::opMaxRoiPool, ONNXOpSupportState::unSupported
+Mean, ONNXOpCode::opMean, ONNXOpSupportState::unSupported
+Min, ONNXOpCode::opMin, ONNXOpSupportState::unSupported
+Mul, ONNXOpCode::opMul, ONNXOpSupportState::unSupported
+Multinomial, ONNXOpCode::opMultinomial, ONNXOpSupportState::unSupported
+Neg, ONNXOpCode::opNeg, ONNXOpSupportState::unSupported
+Not, ONNXOpCode::opNot, ONNXOpSupportState::unSupported
+Or, ONNXOpCode::opOr, ONNXOpSupportState::unSupported
+PRelu, ONNXOpCode::opPRelu, ONNXOpSupportState::unSupported
+Pad, ONNXOpCode::opPad, ONNXOpSupportState::unSupported
+Pow, ONNXOpCode::opPow, ONNXOpSupportState::unSupported
+RNN, ONNXOpCode::opRNN, ONNXOpSupportState::unSupported
+RandomNormal, ONNXOpCode::opRandomNormal, ONNXOpSupportState::unSupported
+RandomNormalLike, ONNXOpCode::opRandomNormalLike, ONNXOpSupportState::unSupported
+RandomUniform, ONNXOpCode::opRandomUniform, ONNXOpSupportState::unSupported
+RandomUniformLike, ONNXOpCode::opRandomUniformLike, ONNXOpSupportState::unSupported
+Reciprocal, ONNXOpCode::opReciprocal, ONNXOpSupportState::unSupported
+ReduceL1, ONNXOpCode::opReduceL1, ONNXOpSupportState::unSupported
+ReduceL2, ONNXOpCode::opReduceL2, ONNXOpSupportState::unSupported
+ReduceLogSum, ONNXOpCode::opReduceLogSum, ONNXOpSupportState::unSupported
+ReduceLogSumExp, ONNXOpCode::opReduceLogSumExp, ONNXOpSupportState::unSupported
+ReduceMax, ONNXOpCode::opReduceMax, ONNXOpSupportState::unSupported
+ReduceMean, ONNXOpCode::opReduceMean, ONNXOpSupportState::unSupported
+ReduceMin, ONNXOpCode::opReduceMin, ONNXOpSupportState::unSupported
+ReduceProd, ONNXOpCode::opReduceProd, ONNXOpSupportState::unSupported
+ReduceSum, ONNXOpCode::opReduceSum, ONNXOpSupportState::unSupported
+ReduceSumSquare, ONNXOpCode::opReduceSumSquare, ONNXOpSupportState::unSupported
+Relu, ONNXOpCode::opRelu, ONNXOpSupportState::fullySupported
+Reshape, ONNXOpCode::opReshape, ONNXOpSupportState::fullySupported
+Scan, ONNXOpCode::opScan, ONNXOpSupportState::unSupported
+Selu, ONNXOpCode::opSelu, ONNXOpSupportState::unSupported
+Shape, ONNXOpCode::opShape, ONNXOpSupportState::unSupported
+Sigmoid, ONNXOpCode::opSigmoid, ONNXOpSupportState::unSupported
+Sin, ONNXOpCode::opSin, ONNXOpSupportState::unSupported
+Size, ONNXOpCode::opSize, ONNXOpSupportState::unSupported
+Slice, ONNXOpCode::opSlice, ONNXOpSupportState::unSupported
+Softmax, ONNXOpCode::opSoftmax, ONNXOpSupportState::fullySupported
+Softplus, ONNXOpCode::opSoftplus, ONNXOpSupportState::unSupported
+Softsign, ONNXOpCode::opSoftsign, ONNXOpSupportState::unSupported
+SpaceToDepth, ONNXOpCode::opSpaceToDepth, ONNXOpSupportState::unSupported
+Split, ONNXOpCode::opSplit, ONNXOpSupportState::fullySupported
+Sqrt, ONNXOpCode::opSqrt, ONNXOpSupportState::unSupported
+Squeeze, ONNXOpCode::opSqueeze, ONNXOpSupportState::unSupported
+Sub, ONNXOpCode::opSub, ONNXOpSupportState::unSupported
+Sum, ONNXOpCode::opSum, ONNXOpSupportState::unSupported
+Tan, ONNXOpCode::opTan, ONNXOpSupportState::unSupported
+Tanh, ONNXOpCode::opTanh, ONNXOpSupportState::unSupported
+Tile, ONNXOpCode::opTile, ONNXOpSupportState::unSupported
+TopK, ONNXOpCode::opTopK, ONNXOpSupportState::unSupported
+Transpose, ONNXOpCode::opTranspose, ONNXOpSupportState::unSupported
+Unsqueeze, ONNXOpCode::opUnsqueeze, ONNXOpSupportState::unSupported
+Upsample, ONNXOpCode::opUpsample, ONNXOpSupportState::unSupported
+Xor, ONNXOpCode::opXor, ONNXOpSupportState::unSupported
+ATen, ONNXOpCode::opATen, ONNXOpSupportState::unSupported
+Affine, ONNXOpCode::opAffine, ONNXOpSupportState::unSupported
+ConstantFill, ONNXOpCode::opConstantFill, ONNXOpSupportState::unSupported
+Crop, ONNXOpCode::opCrop, ONNXOpSupportState::unSupported
+DynamicSlice, ONNXOpCode::opDynamicSlice, ONNXOpSupportState::unSupported
+GRUUnit, ONNXOpCode::opGRUUnit, ONNXOpSupportState::unSupported
+GivenTensorFill, ONNXOpCode::opGivenTensorFill, ONNXOpSupportState::unSupported
+ImageScaler, ONNXOpCode::opImageScaler, ONNXOpSupportState::unSupported
+ParametricSoftplus, ONNXOpCode::opParametricSoftplus, ONNXOpSupportState::unSupported
+Scale, ONNXOpCode::opScale, ONNXOpSupportState::partiallySupported
+ScaledTanh, ONNXOpCode::opScaledTanh, ONNXOpSupportState::unSupported
+ThresholdedRelu, ONNXOpCode::opThresholdedRelu, ONNXOpSupportState::unSupported
index 13ea69d..b7dd901 100644 (file)
@@ -112,274 +112,274 @@ ONNXPerfectHash::getONNXOpType (const char *str, size_t len)
     {
       {""}, {""}, {""}, {""}, {""}, {""},
 #line 84 "ONNXPerfect.gperf"
-      {"MatMul", ONNXOpCode::MatMul, ONNXOpSupportState::Unsupported},
+      {"MatMul", ONNXOpCode::opMatMul, ONNXOpSupportState::unSupported},
 #line 86 "ONNXPerfect.gperf"
-      {"MaxPool", ONNXOpCode::MaxPool, ONNXOpSupportState::Supported},
+      {"MaxPool", ONNXOpCode::opMaxPool, ONNXOpSupportState::fullySupported},
       {""},
 #line 45 "ONNXPerfect.gperf"
-      {"Ceil", ONNXOpCode::Ceil, ONNXOpSupportState::Unsupported},
+      {"Ceil", ONNXOpCode::opCeil, ONNXOpSupportState::unSupported},
 #line 87 "ONNXPerfect.gperf"
-      {"MaxRoiPool", ONNXOpCode::MaxRoiPool, ONNXOpSupportState::Unsupported},
+      {"MaxRoiPool", ONNXOpCode::opMaxRoiPool, ONNXOpSupportState::unSupported},
       {""},
 #line 99 "ONNXPerfect.gperf"
-      {"RandomNormal", ONNXOpCode::RandomNormal, ONNXOpSupportState::Unsupported},
+      {"RandomNormal", ONNXOpCode::opRandomNormal, ONNXOpSupportState::unSupported},
 #line 105 "ONNXPerfect.gperf"
-      {"ReduceL2", ONNXOpCode::ReduceL2, ONNXOpSupportState::Unsupported},
+      {"ReduceL2", ONNXOpCode::opReduceL2, ONNXOpSupportState::unSupported},
 #line 88 "ONNXPerfect.gperf"
-      {"Mean", ONNXOpCode::Mean, ONNXOpSupportState::Unsupported},
+      {"Mean", ONNXOpCode::opMean, ONNXOpSupportState::unSupported},
 #line 103 "ONNXPerfect.gperf"
-      {"Reciprocal", ONNXOpCode::Reciprocal, ONNXOpSupportState::Unsupported},
+      {"Reciprocal", ONNXOpCode::opReciprocal, ONNXOpSupportState::unSupported},
       {""},
 #line 115 "ONNXPerfect.gperf"
-      {"Reshape", ONNXOpCode::Reshape, ONNXOpSupportState::Fullysupported},
+      {"Reshape", ONNXOpCode::opReshape, ONNXOpSupportState::fullySupported},
 #line 75 "ONNXPerfect.gperf"
-      {"LRN", ONNXOpCode::LRN, ONNXOpSupportState::Unsupported},
+      {"LRN", ONNXOpCode::opLRN, ONNXOpSupportState::unSupported},
 #line 110 "ONNXPerfect.gperf"
-      {"ReduceMin", ONNXOpCode::ReduceMin, ONNXOpSupportState::Unsupported},
+      {"ReduceMin", ONNXOpCode::opReduceMin, ONNXOpSupportState::unSupported},
 #line 109 "ONNXPerfect.gperf"
-      {"ReduceMean", ONNXOpCode::ReduceMean, ONNXOpSupportState::Unsupported},
+      {"ReduceMean", ONNXOpCode::opReduceMean, ONNXOpSupportState::unSupported},
 #line 100 "ONNXPerfect.gperf"
-      {"RandomNormalLike", ONNXOpCode::RandomNormalLike, ONNXOpSupportState::Unsupported},
+      {"RandomNormalLike", ONNXOpCode::opRandomNormalLike, ONNXOpSupportState::unSupported},
 #line 102 "ONNXPerfect.gperf"
-      {"RandomUniformLike", ONNXOpCode::RandomUniformLike, ONNXOpSupportState::Unsupported},
+      {"RandomUniformLike", ONNXOpCode::opRandomUniformLike, ONNXOpSupportState::unSupported},
 #line 98 "ONNXPerfect.gperf"
-      {"RNN", ONNXOpCode::RNN, ONNXOpSupportState::Unsupported},
+      {"RNN", ONNXOpCode::opRNN, ONNXOpSupportState::unSupported},
 #line 46 "ONNXPerfect.gperf"
-      {"Clip", ONNXOpCode::Clip, ONNXOpSupportState::Unsupported},
+      {"Clip", ONNXOpCode::opClip, ONNXOpSupportState::unSupported},
 #line 113 "ONNXPerfect.gperf"
-      {"ReduceSumSquare", ONNXOpCode::ReduceSumSquare, ONNXOpSupportState::Unsupported},
+      {"ReduceSumSquare", ONNXOpCode::opReduceSumSquare, ONNXOpSupportState::unSupported},
       {""},
 #line 142 "ONNXPerfect.gperf"
-      {"ConstantFill", ONNXOpCode::ConstantFill, ONNXOpSupportState::Unsupported},
+      {"ConstantFill", ONNXOpCode::opConstantFill, ONNXOpSupportState::unSupported},
 #line 139 "ONNXPerfect.gperf"
-      {"Xor", ONNXOpCode::Xor, ONNXOpSupportState::Unsupported},
+      {"Xor", ONNXOpCode::opXor, ONNXOpSupportState::unSupported},
 #line 76 "ONNXPerfect.gperf"
-      {"LSTM", ONNXOpCode::LSTM, ONNXOpSupportState::Unsupported},
+      {"LSTM", ONNXOpCode::opLSTM, ONNXOpSupportState::unSupported},
 #line 122 "ONNXPerfect.gperf"
-      {"Slice", ONNXOpCode::Slice, ONNXOpSupportState::Unsupported},
+      {"Slice", ONNXOpCode::opSlice, ONNXOpSupportState::unSupported},
 #line 83 "ONNXPerfect.gperf"
-      {"LpPool", ONNXOpCode::LpPool, ONNXOpSupportState::Unsupported},
+      {"LpPool", ONNXOpCode::opLpPool, ONNXOpSupportState::unSupported},
 #line 49 "ONNXPerfect.gperf"
-      {"ConstantLike", ONNXOpCode::ConstantLike, ONNXOpSupportState::Unsupported},
+      {"ConstantLike", ONNXOpCode::opConstantLike, ONNXOpSupportState::unSupported},
 #line 51 "ONNXPerfect.gperf"
-      {"ConvTranspose", ONNXOpCode::ConvTranspose, ONNXOpSupportState::Unsupported},
+      {"ConvTranspose", ONNXOpCode::opConvTranspose, ONNXOpSupportState::unSupported},
 #line 143 "ONNXPerfect.gperf"
-      {"Crop", ONNXOpCode::Crop, ONNXOpSupportState::Unsupported},
+      {"Crop", ONNXOpCode::opCrop, ONNXOpSupportState::unSupported},
 #line 104 "ONNXPerfect.gperf"
-      {"ReduceL1", ONNXOpCode::ReduceL1, ONNXOpSupportState::Unsupported},
+      {"ReduceL1", ONNXOpCode::opReduceL1, ONNXOpSupportState::unSupported},
       {""},
 #line 67 "ONNXPerfect.gperf"
-      {"GlobalLpPool", ONNXOpCode::GlobalLpPool, ONNXOpSupportState::Unsupported},
+      {"GlobalLpPool", ONNXOpCode::opGlobalLpPool, ONNXOpSupportState::unSupported},
 #line 68 "ONNXPerfect.gperf"
-      {"GlobalMaxPool", ONNXOpCode::GlobalMaxPool, ONNXOpSupportState::Unsupported},
+      {"GlobalMaxPool", ONNXOpCode::opGlobalMaxPool, ONNXOpSupportState::unSupported},
 #line 44 "ONNXPerfect.gperf"
-      {"Cast", ONNXOpCode::Cast, ONNXOpSupportState::Unsupported},
+      {"Cast", ONNXOpCode::opCast, ONNXOpSupportState::unSupported},
 #line 107 "ONNXPerfect.gperf"
-      {"ReduceLogSumExp", ONNXOpCode::ReduceLogSumExp, ONNXOpSupportState::Unsupported},
+      {"ReduceLogSumExp", ONNXOpCode::opReduceLogSumExp, ONNXOpSupportState::unSupported},
 #line 64 "ONNXPerfect.gperf"
-      {"Gather", ONNXOpCode::Gather, ONNXOpSupportState::Unsupported},
+      {"Gather", ONNXOpCode::opGather, ONNXOpSupportState::unSupported},
 #line 66 "ONNXPerfect.gperf"
-      {"GlobalAveragePool", ONNXOpCode::GlobalAveragePool, ONNXOpSupportState::Unsupported},
+      {"GlobalAveragePool", ONNXOpCode::opGlobalAveragePool, ONNXOpSupportState::unSupported},
 #line 90 "ONNXPerfect.gperf"
-      {"Mul", ONNXOpCode::Mul, ONNXOpSupportState::Unsupported},
+      {"Mul", ONNXOpCode::opMul, ONNXOpSupportState::unSupported},
 #line 81 "ONNXPerfect.gperf"
-      {"Loop", ONNXOpCode::Loop, ONNXOpSupportState::Unsupported},
+      {"Loop", ONNXOpCode::opLoop, ONNXOpSupportState::unSupported},
 #line 82 "ONNXPerfect.gperf"
-      {"LpNormalization", ONNXOpCode::LpNormalization, ONNXOpSupportState::Unsupported},
+      {"LpNormalization", ONNXOpCode::opLpNormalization, ONNXOpSupportState::unSupported},
 #line 70 "ONNXPerfect.gperf"
-      {"HardSigmoid", ONNXOpCode::HardSigmoid, ONNXOpSupportState::Unsupported},
+      {"HardSigmoid", ONNXOpCode::opHardSigmoid, ONNXOpSupportState::unSupported},
 #line 53 "ONNXPerfect.gperf"
-      {"DepthToSpace", ONNXOpCode::DepthToSpace, ONNXOpSupportState::Unsupported},
+      {"DepthToSpace", ONNXOpCode::opDepthToSpace, ONNXOpSupportState::unSupported},
 #line 125 "ONNXPerfect.gperf"
-      {"Softsign", ONNXOpCode::Softsign, ONNXOpSupportState::Unsupported},
+      {"Softsign", ONNXOpCode::opSoftsign, ONNXOpSupportState::unSupported},
 #line 114 "ONNXPerfect.gperf"
-      {"Relu", ONNXOpCode::Relu, ONNXOpSupportState::Fullysupported},
+      {"Relu", ONNXOpCode::opRelu, ONNXOpSupportState::fullySupported},
 #line 111 "ONNXPerfect.gperf"
-      {"ReduceProd", ONNXOpCode::ReduceProd, ONNXOpSupportState::Unsupported},
+      {"ReduceProd", ONNXOpCode::opReduceProd, ONNXOpSupportState::unSupported},
 #line 91 "ONNXPerfect.gperf"
-      {"Multinomial", ONNXOpCode::Multinomial, ONNXOpSupportState::Unsupported},
+      {"Multinomial", ONNXOpCode::opMultinomial, ONNXOpSupportState::unSupported},
 #line 69 "ONNXPerfect.gperf"
-      {"Greater", ONNXOpCode::Greater, ONNXOpSupportState::Unsupported},
+      {"Greater", ONNXOpCode::opGreater, ONNXOpSupportState::unSupported},
 #line 101 "ONNXPerfect.gperf"
-      {"RandomUniform", ONNXOpCode::RandomUniform, ONNXOpSupportState::Unsupported},
+      {"RandomUniform", ONNXOpCode::opRandomUniform, ONNXOpSupportState::unSupported},
 #line 112 "ONNXPerfect.gperf"
-      {"ReduceSum", ONNXOpCode::ReduceSum, ONNXOpSupportState::Unsupported},
+      {"ReduceSum", ONNXOpCode::opReduceSum, ONNXOpSupportState::unSupported},
       {""},
 #line 47 "ONNXPerfect.gperf"
-      {"Concat", ONNXOpCode::Concat, ONNXOpSupportState::Fullysupported},
+      {"Concat", ONNXOpCode::opConcat, ONNXOpSupportState::fullySupported},
 #line 106 "ONNXPerfect.gperf"
-      {"ReduceLogSum", ONNXOpCode::ReduceLogSum, ONNXOpSupportState::Unsupported},
+      {"ReduceLogSum", ONNXOpCode::opReduceLogSum, ONNXOpSupportState::unSupported},
 #line 48 "ONNXPerfect.gperf"
-      {"Constant", ONNXOpCode::Constant, ONNXOpSupportState::Unsupported},
+      {"Constant", ONNXOpCode::opConstant, ONNXOpSupportState::unSupported},
 #line 77 "ONNXPerfect.gperf"
-      {"LeakyRelu", ONNXOpCode::LeakyRelu, ONNXOpSupportState::Unsupported},
+      {"LeakyRelu", ONNXOpCode::opLeakyRelu, ONNXOpSupportState::unSupported},
 #line 118 "ONNXPerfect.gperf"
-      {"Shape", ONNXOpCode::Shape, ONNXOpSupportState::Unsupported},
+      {"Shape", ONNXOpCode::opShape, ONNXOpSupportState::unSupported},
 #line 39 "ONNXPerfect.gperf"
-      {"ArgMin", ONNXOpCode::ArgMin, ONNXOpSupportState::Unsupported},
+      {"ArgMin", ONNXOpCode::opArgMin, ONNXOpSupportState::unSupported},
 #line 61 "ONNXPerfect.gperf"
-      {"Flatten", ONNXOpCode::Flatten, ONNXOpSupportState::Unsupported},
+      {"Flatten", ONNXOpCode::opFlatten, ONNXOpSupportState::unSupported},
 #line 93 "ONNXPerfect.gperf"
-      {"Not", ONNXOpCode::Not, ONNXOpSupportState::Unsupported},
+      {"Not", ONNXOpCode::opNot, ONNXOpSupportState::unSupported},
 #line 50 "ONNXPerfect.gperf"
-      {"Conv", ONNXOpCode::Conv, ONNXOpSupportState::Fullysupported},
+      {"Conv", ONNXOpCode::opConv, ONNXOpSupportState::fullySupported},
 #line 62 "ONNXPerfect.gperf"
-      {"Floor", ONNXOpCode::Floor, ONNXOpSupportState::Unsupported},
+      {"Floor", ONNXOpCode::opFloor, ONNXOpSupportState::unSupported},
       {""},
 #line 145 "ONNXPerfect.gperf"
-      {"GRUUnit", ONNXOpCode::GRUUnit, ONNXOpSupportState::Unsupported},
+      {"GRUUnit", ONNXOpCode::opGRUUnit, ONNXOpSupportState::unSupported},
 #line 85 "ONNXPerfect.gperf"
-      {"Max", ONNXOpCode::Max, ONNXOpSupportState::Unsupported},
+      {"Max", ONNXOpCode::opMax, ONNXOpSupportState::unSupported},
 #line 117 "ONNXPerfect.gperf"
-      {"Selu", ONNXOpCode::Selu, ONNXOpSupportState::Unsupported},
+      {"Selu", ONNXOpCode::opSelu, ONNXOpSupportState::unSupported},
       {""}, {""},
 #line 71 "ONNXPerfect.gperf"
-      {"Hardmax", ONNXOpCode::Hardmax, ONNXOpSupportState::Unsupported},
+      {"Hardmax", ONNXOpCode::opHardmax, ONNXOpSupportState::unSupported},
 #line 89 "ONNXPerfect.gperf"
-      {"Min", ONNXOpCode::Min, ONNXOpSupportState::Unsupported},
+      {"Min", ONNXOpCode::opMin, ONNXOpSupportState::unSupported},
 #line 65 "ONNXPerfect.gperf"
-      {"Gemm", ONNXOpCode::Gemm, ONNXOpSupportState::Unsupported},
+      {"Gemm", ONNXOpCode::opGemm, ONNXOpSupportState::unSupported},
       {""},
 #line 141 "ONNXPerfect.gperf"
-      {"Affine", ONNXOpCode::Affine, ONNXOpSupportState::Unsupported},
+      {"Affine", ONNXOpCode::opAffine, ONNXOpSupportState::unSupported},
 #line 55 "ONNXPerfect.gperf"
-      {"Dropout", ONNXOpCode::Dropout, ONNXOpSupportState::Fullysupported},
+      {"Dropout", ONNXOpCode::opDropout, ONNXOpSupportState::fullySupported},
 #line 56 "ONNXPerfect.gperf"
-      {"Elu", ONNXOpCode::Elu, ONNXOpSupportState::Unsupported},
+      {"Elu", ONNXOpCode::opElu, ONNXOpSupportState::unSupported},
 #line 108 "ONNXPerfect.gperf"
-      {"ReduceMax", ONNXOpCode::ReduceMax, ONNXOpSupportState::Unsupported},
+      {"ReduceMax", ONNXOpCode::opReduceMax, ONNXOpSupportState::unSupported},
 #line 127 "ONNXPerfect.gperf"
-      {"Split", ONNXOpCode::Split, ONNXOpSupportState::Fullysupported},
+      {"Split", ONNXOpCode::opSplit, ONNXOpSupportState::fullySupported},
       {""},
 #line 126 "ONNXPerfect.gperf"
-      {"SpaceToDepth", ONNXOpCode::SpaceToDepth, ONNXOpSupportState::Unsupported},
+      {"SpaceToDepth", ONNXOpCode::opSpaceToDepth, ONNXOpSupportState::unSupported},
 #line 37 "ONNXPerfect.gperf"
-      {"And", ONNXOpCode::And, ONNXOpSupportState::Unsupported},
+      {"And", ONNXOpCode::opAnd, ONNXOpSupportState::unSupported},
 #line 41 "ONNXPerfect.gperf"
-      {"Atan", ONNXOpCode::Atan, ONNXOpSupportState::Unsupported},
+      {"Atan", ONNXOpCode::opAtan, ONNXOpSupportState::unSupported},
       {""}, {""},
 #line 94 "ONNXPerfect.gperf"
-      {"Or", ONNXOpCode::Or, ONNXOpSupportState::Unsupported},
+      {"Or", ONNXOpCode::opOr, ONNXOpSupportState::unSupported},
 #line 130 "ONNXPerfect.gperf"
-      {"Sub", ONNXOpCode::Sub, ONNXOpSupportState::Unsupported},
+      {"Sub", ONNXOpCode::opSub, ONNXOpSupportState::unSupported},
 #line 116 "ONNXPerfect.gperf"
-      {"Scan", ONNXOpCode::Scan, ONNXOpSupportState::Unsupported},
+      {"Scan", ONNXOpCode::opScan, ONNXOpSupportState::unSupported},
 #line 149 "ONNXPerfect.gperf"
-      {"Scale", ONNXOpCode::Scale, ONNXOpSupportState::Partiallysupported},
+      {"Scale", ONNXOpCode::opScale, ONNXOpSupportState::partiallySupported},
       {""},
 #line 129 "ONNXPerfect.gperf"
-      {"Squeeze", ONNXOpCode::Squeeze, ONNXOpSupportState::Unsupported},
+      {"Squeeze", ONNXOpCode::opSqueeze, ONNXOpSupportState::unSupported},
 #line 120 "ONNXPerfect.gperf"
-      {"Sin", ONNXOpCode::Sin, ONNXOpSupportState::Unsupported},
+      {"Sin", ONNXOpCode::opSin, ONNXOpSupportState::unSupported},
 #line 121 "ONNXPerfect.gperf"
-      {"Size", ONNXOpCode::Size, ONNXOpSupportState::Unsupported},
+      {"Size", ONNXOpCode::opSize, ONNXOpSupportState::unSupported},
 #line 80 "ONNXPerfect.gperf"
-      {"LogSoftmax", ONNXOpCode::LogSoftmax, ONNXOpSupportState::Unsupported},
+      {"LogSoftmax", ONNXOpCode::opLogSoftmax, ONNXOpSupportState::unSupported},
 #line 42 "ONNXPerfect.gperf"
-      {"AveragePool", ONNXOpCode::AveragePool, ONNXOpSupportState::Fullysupported},
+      {"AveragePool", ONNXOpCode::opAveragePool, ONNXOpSupportState::fullySupported},
 #line 144 "ONNXPerfect.gperf"
-      {"DynamicSlice", ONNXOpCode::DynamicSlice, ONNXOpSupportState::Unsupported},
+      {"DynamicSlice", ONNXOpCode::opDynamicSlice, ONNXOpSupportState::unSupported},
 #line 43 "ONNXPerfect.gperf"
-      {"BatchNormalization", ONNXOpCode::BatchNormalization, ONNXOpSupportState::Fullysupported},
+      {"BatchNormalization", ONNXOpCode::opBatchNormalization, ONNXOpSupportState::fullySupported},
       {""},
 #line 57 "ONNXPerfect.gperf"
-      {"Equal", ONNXOpCode::Equal, ONNXOpSupportState::Unsupported},
+      {"Equal", ONNXOpCode::opEqual, ONNXOpSupportState::unSupported},
 #line 137 "ONNXPerfect.gperf"
-      {"Unsqueeze", ONNXOpCode::Unsqueeze, ONNXOpSupportState::Unsupported},
+      {"Unsqueeze", ONNXOpCode::opUnsqueeze, ONNXOpSupportState::unSupported},
 #line 60 "ONNXPerfect.gperf"
-      {"EyeLike", ONNXOpCode::EyeLike, ONNXOpSupportState::Unsupported},
+      {"EyeLike", ONNXOpCode::opEyeLike, ONNXOpSupportState::unSupported},
 #line 131 "ONNXPerfect.gperf"
-      {"Sum", ONNXOpCode::Sum, ONNXOpSupportState::Unsupported},
+      {"Sum", ONNXOpCode::opSum, ONNXOpSupportState::unSupported},
 #line 78 "ONNXPerfect.gperf"
-      {"Less", ONNXOpCode::Less, ONNXOpSupportState::Unsupported},
+      {"Less", ONNXOpCode::opLess, ONNXOpSupportState::unSupported},
 #line 146 "ONNXPerfect.gperf"
-      {"GivenTensorFill", ONNXOpCode::GivenTensorFill, ONNXOpSupportState::Unsupported},
+      {"GivenTensorFill", ONNXOpCode::opGivenTensorFill, ONNXOpSupportState::unSupported},
       {""},
 #line 123 "ONNXPerfect.gperf"
-      {"Softmax", ONNXOpCode::Softmax, ONNXOpSupportState::Fullysupported},
+      {"Softmax", ONNXOpCode::opSoftmax, ONNXOpSupportState::fullySupported},
 #line 52 "ONNXPerfect.gperf"
-      {"Cos", ONNXOpCode::Cos, ONNXOpSupportState::Unsupported},
+      {"Cos", ONNXOpCode::opCos, ONNXOpSupportState::unSupported},
       {""},
 #line 63 "ONNXPerfect.gperf"
-      {"GRU", ONNXOpCode::GRU, ONNXOpSupportState::Unsupported},
+      {"GRU", ONNXOpCode::opGRU, ONNXOpSupportState::unSupported},
       {""}, {""},
 #line 36 "ONNXPerfect.gperf"
-      {"Add", ONNXOpCode::Add, ONNXOpSupportState::Unsupported},
+      {"Add", ONNXOpCode::opAdd, ONNXOpSupportState::unSupported},
       {""},
 #line 138 "ONNXPerfect.gperf"
-      {"Upsample", ONNXOpCode::Upsample, ONNXOpSupportState::Unsupported},
+      {"Upsample", ONNXOpCode::opUpsample, ONNXOpSupportState::unSupported},
       {""},
 #line 73 "ONNXPerfect.gperf"
-      {"If", ONNXOpCode::If, ONNXOpSupportState::Unsupported},
+      {"If", ONNXOpCode::opIf, ONNXOpSupportState::unSupported},
 #line 132 "ONNXPerfect.gperf"
-      {"Tan", ONNXOpCode::Tan, ONNXOpSupportState::Unsupported},
+      {"Tan", ONNXOpCode::opTan, ONNXOpSupportState::unSupported},
 #line 128 "ONNXPerfect.gperf"
-      {"Sqrt", ONNXOpCode::Sqrt, ONNXOpSupportState::Unsupported},
+      {"Sqrt", ONNXOpCode::opSqrt, ONNXOpSupportState::unSupported},
 #line 150 "ONNXPerfect.gperf"
-      {"ScaledTanh", ONNXOpCode::ScaledTanh, ONNXOpSupportState::Unsupported},
+      {"ScaledTanh", ONNXOpCode::opScaledTanh, ONNXOpSupportState::unSupported},
 #line 38 "ONNXPerfect.gperf"
-      {"ArgMax", ONNXOpCode::ArgMax, ONNXOpSupportState::Unsupported},
+      {"ArgMax", ONNXOpCode::opArgMax, ONNXOpSupportState::unSupported},
       {""},
 #line 58 "ONNXPerfect.gperf"
-      {"Exp", ONNXOpCode::Exp, ONNXOpSupportState::Unsupported},
+      {"Exp", ONNXOpCode::opExp, ONNXOpSupportState::unSupported},
       {""}, {""},
 #line 147 "ONNXPerfect.gperf"
-      {"ImageScaler", ONNXOpCode::ImageScaler, ONNXOpSupportState::Unsupported},
+      {"ImageScaler", ONNXOpCode::opImageScaler, ONNXOpSupportState::unSupported},
 #line 119 "ONNXPerfect.gperf"
-      {"Sigmoid", ONNXOpCode::Sigmoid, ONNXOpSupportState::Unsupported},
+      {"Sigmoid", ONNXOpCode::opSigmoid, ONNXOpSupportState::unSupported},
 #line 96 "ONNXPerfect.gperf"
-      {"Pad", ONNXOpCode::Pad, ONNXOpSupportState::Unsupported},
+      {"Pad", ONNXOpCode::opPad, ONNXOpSupportState::unSupported},
 #line 135 "ONNXPerfect.gperf"
-      {"TopK", ONNXOpCode::TopK, ONNXOpSupportState::Unsupported},
+      {"TopK", ONNXOpCode::opTopK, ONNXOpSupportState::unSupported},
       {""}, {""}, {""},
 #line 124 "ONNXPerfect.gperf"
-      {"Softplus", ONNXOpCode::Softplus, ONNXOpSupportState::Unsupported},
+      {"Softplus", ONNXOpCode::opSoftplus, ONNXOpSupportState::unSupported},
 #line 136 "ONNXPerfect.gperf"
-      {"Transpose", ONNXOpCode::Transpose, ONNXOpSupportState::Unsupported},
+      {"Transpose", ONNXOpCode::opTranspose, ONNXOpSupportState::unSupported},
 #line 95 "ONNXPerfect.gperf"
-      {"PRelu", ONNXOpCode::PRelu, ONNXOpSupportState::Unsupported},
+      {"PRelu", ONNXOpCode::opPRelu, ONNXOpSupportState::unSupported},
       {""}, {""},
 #line 54 "ONNXPerfect.gperf"
-      {"Div", ONNXOpCode::Div, ONNXOpSupportState::Unsupported},
+      {"Div", ONNXOpCode::opDiv, ONNXOpSupportState::unSupported},
 #line 40 "ONNXPerfect.gperf"
-      {"Asin", ONNXOpCode::Asin, ONNXOpSupportState::Unsupported},
+      {"Asin", ONNXOpCode::opAsin, ONNXOpSupportState::unSupported},
       {""},
 #line 59 "ONNXPerfect.gperf"
-      {"Expand", ONNXOpCode::Expand, ONNXOpSupportState::Unsupported},
+      {"Expand", ONNXOpCode::opExpand, ONNXOpSupportState::unSupported},
       {""},
 #line 92 "ONNXPerfect.gperf"
-      {"Neg", ONNXOpCode::Neg, ONNXOpSupportState::Unsupported},
+      {"Neg", ONNXOpCode::opNeg, ONNXOpSupportState::unSupported},
 #line 133 "ONNXPerfect.gperf"
-      {"Tanh", ONNXOpCode::Tanh, ONNXOpSupportState::Unsupported},
+      {"Tanh", ONNXOpCode::opTanh, ONNXOpSupportState::unSupported},
       {""}, {""}, {""},
 #line 79 "ONNXPerfect.gperf"
-      {"Log", ONNXOpCode::Log, ONNXOpSupportState::Unsupported},
+      {"Log", ONNXOpCode::opLog, ONNXOpSupportState::unSupported},
       {""}, {""}, {""}, {""},
 #line 97 "ONNXPerfect.gperf"
-      {"Pow", ONNXOpCode::Pow, ONNXOpSupportState::Unsupported},
+      {"Pow", ONNXOpCode::opPow, ONNXOpSupportState::unSupported},
 #line 74 "ONNXPerfect.gperf"
-      {"InstanceNormalizati", ONNXOpCode::InstanceNormalizati, ONNXOpSupportState::Unsupported},
+      {"InstanceNormalizati", ONNXOpCode::opInstanceNormalizati, ONNXOpSupportState::unSupported},
       {""}, {""}, {""},
 #line 34 "ONNXPerfect.gperf"
-      {"Abs", ONNXOpCode::Abs, ONNXOpSupportState::Unsupported},
+      {"Abs", ONNXOpCode::opAbs, ONNXOpSupportState::unSupported},
 #line 140 "ONNXPerfect.gperf"
-      {"ATen", ONNXOpCode::ATen, ONNXOpSupportState::Unsupported},
+      {"ATen", ONNXOpCode::opATen, ONNXOpSupportState::unSupported},
       {""}, {""}, {""},
 #line 72 "ONNXPerfect.gperf"
-      {"Identity", ONNXOpCode::Identity, ONNXOpSupportState::Supported},
+      {"Identity", ONNXOpCode::opIdentity, ONNXOpSupportState::fullySupported},
       {""}, {""}, {""}, {""}, {""}, {""}, {""}, {""}, {""},
       {""}, {""}, {""}, {""}, {""}, {""}, {""}, {""}, {""},
       {""}, {""},
 #line 134 "ONNXPerfect.gperf"
-      {"Tile", ONNXOpCode::Tile, ONNXOpSupportState::Unsupported},
+      {"Tile", ONNXOpCode::opTile, ONNXOpSupportState::unSupported},
       {""}, {""}, {""}, {""}, {""}, {""}, {""}, {""}, {""},
 #line 35 "ONNXPerfect.gperf"
-      {"Acos", ONNXOpCode::Acos, ONNXOpSupportState::Unsupported},
+      {"Acos", ONNXOpCode::opAcos, ONNXOpSupportState::unSupported},
 #line 151 "ONNXPerfect.gperf"
-      {"ThresholdedRelu", ONNXOpCode::ThresholdedRelu, ONNXOpSupportState::Unsupported},
+      {"ThresholdedRelu", ONNXOpCode::opThresholdedRelu, ONNXOpSupportState::unSupported},
       {""}, {""},
 #line 148 "ONNXPerfect.gperf"
-      {"ParametricSoftplus", ONNXOpCode::ParametricSoftplus, ONNXOpSupportState::Unsupported}
+      {"ParametricSoftplus", ONNXOpCode::opParametricSoftplus, ONNXOpSupportState::unSupported}
     };
 
   if (len <= MAX_WORD_LENGTH && len >= MIN_WORD_LENGTH)