inception_v3 model requires this operation.
In addition I changed the includes order inside some ONNX files.
Signed-off-by: Andrew V. Tischenko a.tischenko@partner.samsung.com
* limitations under the License.
*/
-#include <google/protobuf/io/zero_copy_stream_impl.h>
-#include <google/protobuf/io/coded_stream.h>
-#include <functional>
-#include <iostream>
+#include "ONNXImporterImpl.h"
+#include "ONNXPerfectHash.h"
+#include "ONNXOpCreator.h"
#include "core/modelIR/IrDotDumper.h"
#include "core/modelIR/operations/ConstantOp.h"
#include "passes/common_frontend/shape_helper.h"
#include "pass/PassException.h"
-#include "ONNXImporterImpl.h"
-#include "ONNXPerfectHash.h"
-#include "ONNXOpCreator.h"
-
+#include <google/protobuf/io/zero_copy_stream_impl.h>
+#include <google/protobuf/io/coded_stream.h>
+#include <functional>
+#include <iostream>
namespace nnc {
case ONNXOpCode::opRelu:
case ONNXOpCode::opReshape:
case ONNXOpCode::opUnsqueeze:
+ case ONNXOpCode::opShape:
case ONNXOpCode::opSigmoid:
case ONNXOpCode::opScale:
case ONNXOpCode::opSoftmax:
case ONNXOpCode::opSum:
outputs = _opCreator.convertElementwise(inputs, mir::ops::ElementwiseOp::OpType::add);
break;
+ case ONNXOpCode::opShape:
+ outputs = _opCreator.convertShape(inputs);
+ break;
case ONNXOpCode::opMul:
outputs = _opCreator.convertElementwise(inputs, mir::ops::ElementwiseOp::OpType::mul);
break;
// This map maps onnx tensor names to MIR operations/nodes
std::map<std::string, mir::IODescriptor> _tensorNameToIODescriptor;
// This map keeps named tensors used as graph input initializers.
- // In addiotn here could be tensors from opGivenTensorFill and opConstant
+ // In addition here could be tensors from opGivenTensorFill and opConstant
std::map<std::string, mir::TensorVariant> _constantTensors;
std::vector<mir::IODescriptor> _graphOutputs;
std::string _modelFilename;
* limitations under the License.
*/
-#include <set>
-#include <cmath>
-#include <iostream>
-#include <core/modelIR/Scalar.h>
+#include "ONNXOpCreator.h"
+#include "ONNXImporterImpl.h"
+
#include "core/modelIR/Index.h"
#include "core/modelIR/Graph.h"
#include "core/modelIR/Scalar.h"
#include "passes/common_frontend/op_creator_helper.h"
#include "passes/common_frontend/shape_helper.h"
#include "pass/PassException.h"
-#include "ONNXOpCreator.h"
-#include "ONNXImporterImpl.h"
+
+#include <set>
+#include <cmath>
+#include <iostream>
namespace nnc {
// The vector to build the new shape from
std::vector<int32_t > shape_vector(cnt);
ShapeRange out_range(shape_tensor_shape);
- // FIXME: real type could be int64_t but _elementSize is correct that's why it works
+ // FIXME: real type should be int64_t
Tensor<float> tensor_accessor(shape_tensor);
int i = 0;
}
std::vector<IODescriptor>
+ONNXOpCreator::convertShape(const std::vector<mir::IODescriptor>& inputs) {
+ const auto& input_shape = inputs[0].op->getOutputShape(inputs[0].index);
+ int size = input_shape.rank();
+ Shape output_shape({size});
+ std::vector<float> data(size);
+ for (int i; i < size; i++) {
+ data[i] = input_shape.dim(i);
+ }
+ auto result = createOp<ops::ConstantOp>(createTensor(data.data(), output_shape));
+ return {result->getOutput(0)};
+}
+
+std::vector<IODescriptor>
ONNXOpCreator::convertGivenTensorFill(const onnx::NodeProto& onnx_node,
InputTensors& input_tensors) {
auto values_att = findAttribute(onnx_node, "values");
#ifndef NNCC_ONNX_OP_CREATOR_H
#define NNCC_ONNX_OP_CREATOR_H
-#include <set>
-#include <map>
-#include <vector>
-#include <memory>
#include "core/modelIR/Graph.h"
#include "core/modelIR/TensorVariant.h"
#include "core/modelIR/operations/CommonProps.h"
#include "onnx/onnx.pb.h"
#include "ONNXOpType.h"
+#include <set>
+#include <map>
+#include <vector>
+#include <memory>
+
namespace nnc {
class ONNXOpCreator {
const onnx::NodeProto& onnx_node);
std::vector<mir::IODescriptor>
+ convertShape(const std::vector<mir::IODescriptor>& inputs);
+
+ std::vector<mir::IODescriptor>
convertBatchNorm(const std::vector<mir::IODescriptor>& inputs,
const onnx::NodeProto& onnx_node,
InputTensors& input_tensors);