[nnc] The initial implementation of Shape op in ONNX (#2868)
authorАндрей Тищенко/AI Tools Lab /SRR/Staff Engineer/삼성전자 <a.tischenko@partner.samsung.com>
Wed, 16 Jan 2019 15:32:41 +0000 (18:32 +0300)
committerРоман Михайлович Русяев/AI Tools Lab /SRR/Staff Engineer/삼성전자 <r.rusyaev@samsung.com>
Wed, 16 Jan 2019 15:32:41 +0000 (18:32 +0300)
inception_v3 model requires this operation.
In addition I changed the includes order inside some ONNX files.

Signed-off-by: Andrew V. Tischenko a.tischenko@partner.samsung.com
contrib/nnc/passes/onnx_frontend/ONNXImporterImpl.cpp
contrib/nnc/passes/onnx_frontend/ONNXImporterImpl.h
contrib/nnc/passes/onnx_frontend/ONNXOpCreator.cpp
contrib/nnc/passes/onnx_frontend/ONNXOpCreator.h

index 3eb544c..04b7a54 100644 (file)
  * limitations under the License.
  */
 
-#include <google/protobuf/io/zero_copy_stream_impl.h>
-#include <google/protobuf/io/coded_stream.h>
-#include <functional>
-#include <iostream>
+#include "ONNXImporterImpl.h"
+#include "ONNXPerfectHash.h"
+#include "ONNXOpCreator.h"
 
 #include "core/modelIR/IrDotDumper.h"
 #include "core/modelIR/operations/ConstantOp.h"
 #include "passes/common_frontend/shape_helper.h"
 #include "pass/PassException.h"
 
-#include "ONNXImporterImpl.h"
-#include "ONNXPerfectHash.h"
-#include "ONNXOpCreator.h"
-
+#include <google/protobuf/io/zero_copy_stream_impl.h>
+#include <google/protobuf/io/coded_stream.h>
+#include <functional>
+#include <iostream>
 
 namespace nnc {
 
@@ -71,6 +70,7 @@ static void collectUnsupportedOps(std::unique_ptr<onnx::ModelProto>& model) {
       case ONNXOpCode::opRelu:
       case ONNXOpCode::opReshape:
       case ONNXOpCode::opUnsqueeze:
+      case ONNXOpCode::opShape:
       case ONNXOpCode::opSigmoid:
       case ONNXOpCode::opScale:
       case ONNXOpCode::opSoftmax:
@@ -304,6 +304,9 @@ mir::Graph *ONNXImporterImpl::createIR() {
       case ONNXOpCode::opSum:
         outputs = _opCreator.convertElementwise(inputs, mir::ops::ElementwiseOp::OpType::add);
         break;
+      case ONNXOpCode::opShape:
+        outputs = _opCreator.convertShape(inputs);
+        break;
       case ONNXOpCode::opMul:
         outputs = _opCreator.convertElementwise(inputs, mir::ops::ElementwiseOp::OpType::mul);
         break;
index 51d0693..31f4919 100644 (file)
@@ -49,7 +49,7 @@ public:
   // This map maps onnx tensor names to MIR operations/nodes
   std::map<std::string, mir::IODescriptor> _tensorNameToIODescriptor;
   // This map keeps named tensors used as graph input initializers.
-  // In addiotn here could be tensors from opGivenTensorFill and opConstant
+  // In addition here could be tensors from opGivenTensorFill and opConstant
   std::map<std::string, mir::TensorVariant> _constantTensors;
   std::vector<mir::IODescriptor> _graphOutputs;
   std::string _modelFilename;
index 4d67f8a..12593dd 100644 (file)
  * limitations under the License.
  */
 
-#include <set>
-#include <cmath>
-#include <iostream>
-#include <core/modelIR/Scalar.h>
+#include "ONNXOpCreator.h"
+#include "ONNXImporterImpl.h"
+
 #include "core/modelIR/Index.h"
 #include "core/modelIR/Graph.h"
 #include "core/modelIR/Scalar.h"
 #include "passes/common_frontend/op_creator_helper.h"
 #include "passes/common_frontend/shape_helper.h"
 #include "pass/PassException.h"
-#include "ONNXOpCreator.h"
-#include "ONNXImporterImpl.h"
+
+#include <set>
+#include <cmath>
+#include <iostream>
 
 namespace nnc {
 
@@ -310,7 +311,7 @@ ONNXOpCreator::convertReshape(const std::vector<mir::IODescriptor>& inputs) {
   // The vector to build the new shape from
   std::vector<int32_t > shape_vector(cnt);
   ShapeRange out_range(shape_tensor_shape);
-  // FIXME: real type could be int64_t but _elementSize is correct that's why it works
+  // FIXME: real type should be int64_t
   Tensor<float> tensor_accessor(shape_tensor);
 
   int i = 0;
@@ -438,6 +439,19 @@ ONNXOpCreator::convertScale(const std::vector<mir::IODescriptor>& inputs,
 }
 
 std::vector<IODescriptor>
+ONNXOpCreator::convertShape(const std::vector<mir::IODescriptor>& inputs) {
+  const auto& input_shape = inputs[0].op->getOutputShape(inputs[0].index);
+  int size = input_shape.rank();
+  Shape output_shape({size});
+  std::vector<float> data(size);
+  for (int i; i < size; i++) {
+    data[i] = input_shape.dim(i);
+  }
+  auto result = createOp<ops::ConstantOp>(createTensor(data.data(), output_shape));
+  return {result->getOutput(0)};
+}
+
+std::vector<IODescriptor>
 ONNXOpCreator::convertGivenTensorFill(const onnx::NodeProto& onnx_node,
                                       InputTensors& input_tensors) {
   auto values_att = findAttribute(onnx_node, "values");
index f5b405a..14f5234 100644 (file)
 #ifndef NNCC_ONNX_OP_CREATOR_H
 #define NNCC_ONNX_OP_CREATOR_H
 
-#include <set>
-#include <map>
-#include <vector>
-#include <memory>
 #include "core/modelIR/Graph.h"
 #include "core/modelIR/TensorVariant.h"
 #include "core/modelIR/operations/CommonProps.h"
 #include "onnx/onnx.pb.h"
 #include "ONNXOpType.h"
 
+#include <set>
+#include <map>
+#include <vector>
+#include <memory>
+
 namespace nnc {
 
 class ONNXOpCreator {
@@ -90,6 +91,9 @@ public:
                const onnx::NodeProto& onnx_node);
 
   std::vector<mir::IODescriptor>
+  convertShape(const std::vector<mir::IODescriptor>& inputs);
+
+  std::vector<mir::IODescriptor>
   convertBatchNorm(const std::vector<mir::IODescriptor>& inputs,
                    const onnx::NodeProto& onnx_node,
                    InputTensors& input_tensors);