nncc_include(ExternalSourceTools)
nncc_include(OptionTools)
-
envoption(ONNX_URL https://github.com/onnx/onnx/archive/v1.3.0.zip)
ExternalSource_Download(ONNX ${ONNX_URL})
case SupportedCaffe2OpType::givenTensorInt64Fill:
case SupportedCaffe2OpType::mul:
case SupportedCaffe2OpType::relu:
+ case SupportedCaffe2OpType::resizeNearest:
case SupportedCaffe2OpType::sigmoid:
case SupportedCaffe2OpType::softmax:
case SupportedCaffe2OpType::sum:
void Caffe2Importer::createMIRNodesFromOp(const OperatorDef& op) {
std::vector<mir::IODescriptor> outputs;
-
// If op input not met yet - consider it as model input
if (op.input_size() > 0
&& _blobNameToIODescriptor.find(op.input(0)) == _blobNameToIODescriptor.end()) {
case SupportedCaffe2OpType::relu:
outputs = _opCreator->convertRelu(inputs);
break;
+ case SupportedCaffe2OpType::resizeNearest:
+ outputs = _opCreator->convertResize(inputs, op);
+ break;
case SupportedCaffe2OpType::sigmoid:
outputs = _opCreator->convertSigmoid(inputs);
break;
}
const std::map<std::string, SupportedCaffe2OpType> Caffe2Importer::_operatorTypes = {
-{"Add", SupportedCaffe2OpType::add},
-{"AveragePool", SupportedCaffe2OpType::averagePool},
-{"Conv", SupportedCaffe2OpType::conv},
-{"Concat", SupportedCaffe2OpType::concat},
-{"ConstantFill", SupportedCaffe2OpType::constantFill},
-{"Dropout", SupportedCaffe2OpType::dropout},
-{"FC", SupportedCaffe2OpType::FC},
-{"GivenTensorFill", SupportedCaffe2OpType::givenTensorFill},
-{"MaxPool", SupportedCaffe2OpType::maxPool},
-{"Mul", SupportedCaffe2OpType::mul},
-{"Relu", SupportedCaffe2OpType::relu},
-{"Sigmoid", SupportedCaffe2OpType::sigmoid},
-{"Softmax", SupportedCaffe2OpType::softmax},
-{"SpatialBN", SupportedCaffe2OpType::spatialBN},
-{"Sum", SupportedCaffe2OpType::sum},
-{"Clip", SupportedCaffe2OpType::clip},
-{"Reshape", SupportedCaffe2OpType::reshape},
-{"GivenTensorInt64Fill", SupportedCaffe2OpType::givenTensorInt64Fill},
+ {"Add", SupportedCaffe2OpType::add},
+ {"AveragePool", SupportedCaffe2OpType::averagePool},
+ {"Conv", SupportedCaffe2OpType::conv},
+ {"Concat", SupportedCaffe2OpType::concat},
+ {"ConstantFill", SupportedCaffe2OpType::constantFill},
+ {"Dropout", SupportedCaffe2OpType::dropout},
+ {"FC", SupportedCaffe2OpType::FC},
+ {"GivenTensorFill", SupportedCaffe2OpType::givenTensorFill},
+ {"MaxPool", SupportedCaffe2OpType::maxPool},
+ {"Mul", SupportedCaffe2OpType::mul},
+ {"Relu", SupportedCaffe2OpType::relu},
+ {"ResizeNearest", SupportedCaffe2OpType::resizeNearest},
+ {"Sigmoid", SupportedCaffe2OpType::sigmoid},
+ {"Softmax", SupportedCaffe2OpType::softmax},
+ {"SpatialBN", SupportedCaffe2OpType::spatialBN},
+ {"Sum", SupportedCaffe2OpType::sum},
+ {"Clip", SupportedCaffe2OpType::clip},
+ {"Reshape", SupportedCaffe2OpType::reshape},
+ {"GivenTensorInt64Fill", SupportedCaffe2OpType::givenTensorInt64Fill},
};
} // namespace nnc
#include "core/modelIR/operations/PoolOp.h"
#include "core/modelIR/operations/ReluOp.h"
#include "core/modelIR/operations/ReshapeOp.h"
+#include "core/modelIR/operations/ResizeOp.h"
#include "core/modelIR/operations/ScaleOp.h"
#include "core/modelIR/operations/SigmoidOp.h"
#include "core/modelIR/operations/SoftmaxOp.h"
return {relu->getOutput(0)};
}
+std::vector<IODescriptor> Caffe2OpCreator::convertResize(const std::vector<IODescriptor>& inputs,
+ const ::caffe2::OperatorDef& op) {
+ // assume NCHW and convert to MIR (NHWC)
+ std::vector<float> scales(4);
+ assert(inputs[0].op->getOutputShape(0).rank() == 4 && "only 4d tensors is supported");
+ scales[0] = 1;
+ // default to noop
+ scales[1] = getSingleArgument(op, "height_scale", 1.0f);
+ scales[2] = getSingleArgument(op, "width_scale", 1.0f);
+ scales[3] = 1;
+ auto resize = createOp<ops::ResizeOp>(
+ convertCaffeToMIR(inputs[0]), ops::ResizeOp::ResizeMethod::nearestNeighbor, scales);
+ return {convertMIRToCaffe(resize->getOutput(0))};
+}
+
std::vector<IODescriptor>
Caffe2OpCreator::convertSigmoid(const std::vector<IODescriptor>& inputs) {
auto result = createOp<ops::SigmoidOp>("Sigmoid", inputs[0]);
std::vector<mir::IODescriptor> convertRelu(const std::vector<mir::IODescriptor>&);
+ std::vector<mir::IODescriptor> convertResize(const std::vector<mir::IODescriptor>&,
+ const ::caffe2::OperatorDef&);
+
std::vector<mir::IODescriptor> convertSigmoid(const std::vector<mir::IODescriptor>&);
std::vector<mir::IODescriptor> convertSoftmax(const std::vector<mir::IODescriptor>&,
mul,
relu,
reshape,
+ resizeNearest,
sigmoid,
softmax,
spatialBN,
case ONNXOpCode::opGemm:
case ONNXOpCode::opMax:
case ONNXOpCode::opMaxPool:
+ case ONNXOpCode::opUpsample:
case ONNXOpCode::opMul:
case ONNXOpCode::opPad:
case ONNXOpCode::opRelu:
buffer_size = tensor->int64_data_size() * element_size;
auto src_data64 = reinterpret_cast<const int64_t *>(tensor->int64_data().data());
std::shared_ptr<char> shared_buffer (new char[buffer_size], std::default_delete<char[]>());
- auto dst_data = reinterpret_cast<float *>(shared_buffer.get());
+ auto dst_data = reinterpret_cast<float*>(shared_buffer.get());
for (int i = 0; i < tensor->int64_data_size(); i++) {
- dst_data[i] = static_cast<float>(src_data64 [i]);
+ dst_data[i] = static_cast<float>(src_data64[i]);
}
return mir::TensorVariant(shape, shared_buffer, type, element_size);
} else if (tensor->raw_data().size() != 0) {
auto* onnx_op_type = ONNXPerfectHash::getONNXOpType(onnx_node.op_type().c_str(), onnx_node.op_type().size());
switch (onnx_op_type->opCode) {
case ONNXOpCode::opConv: {
- assert(dynamic_cast<mir::ops::TransposeOp *>(op) != nullptr);
- if (auto* conv = dynamic_cast<mir::ops::Conv2DOp *>(op->getPrevNodes()[0].op)) {
+ assert(dynamic_cast<mir::ops::TransposeOp*>(op) != nullptr);
+ if (auto* conv = dynamic_cast<mir::ops::Conv2DOp*>(op->getPrevNodes()[0].op)) {
std::cout << " (Conv2D)Weights" << conv->getKernel().getShape() << " Strides" <<
- conv->getStrides() << " Padding(" << conv->getPaddingBefore()[0] <<
- " " << conv->getPaddingBefore()[1] << ")" << ":(" <<
- conv->getPaddingAfter()[0] << " " << conv->getPaddingAfter()[1] << ")";
+ conv->getStrides() << " Padding(" << conv->getPaddingBefore()[0] <<
+ " " << conv->getPaddingBefore()[1] << ")" << ":(" <<
+ conv->getPaddingAfter()[0] << " " << conv->getPaddingAfter()[1] << ")";
} else {
- auto *dept = dynamic_cast<mir::ops::DepthwiseConv2DOp *>(op->getPrevNodes()[0].op);
+ auto* dept = dynamic_cast<mir::ops::DepthwiseConv2DOp*>(op->getPrevNodes()[0].op);
assert(dept);
std::cout << " (DepthwiseConv2D)Weights" << dept->getKernel().getShape() << " Strides" <<
dept->getStrides() << " Padding(" << dept->getPaddingBefore()[0] <<
pool = dynamic_cast<mir::ops::PoolOp *>(op->getPrevNodes()[0].op);
}
assert(pool);
- std::cout << " Kernel " << pool->getWindowShape() << " Strides " << pool->getStrides();
+ std::cout << " Kernel " << pool->getWindowShape() << " Strides " << pool->getStrides();
std::cout << " Padding before: " << pool->getPaddingBefore()[0] << " " <<
- pool->getPaddingBefore()[1];
+ pool->getPaddingBefore()[1];
std::cout << " After: " << pool->getPaddingAfter()[0] << " " << pool->getPaddingAfter()[1];
break;
}
case ONNXOpCode::opReshape:
outputs = _opCreator.convertReshape(inputs);
break;
+ case ONNXOpCode::opUpsample:
+ outputs = _opCreator.convertUpsample(inputs, onnx_node);
+ break;
case ONNXOpCode::opRelu:
outputs = _opCreator.convertRelu(inputs);
break;
#include "core/modelIR/operations/PoolOp.h"
#include "core/modelIR/operations/ReluOp.h"
#include "core/modelIR/operations/ReshapeOp.h"
+#include "core/modelIR/operations/ResizeOp.h"
#include "core/modelIR/operations/ScaleOp.h"
#include "core/modelIR/operations/SigmoidOp.h"
#include "core/modelIR/operations/SoftmaxOp.h"
namespace nnc {
using namespace mir;
-static const onnx::AttributeProto* findAttribute(const onnx::NodeProto& onnx_node,
- std::string name) {
+
+inline static const onnx::AttributeProto* findAttribute(const onnx::NodeProto& onnx_node,
+ const std::string& name) {
for (auto& att : onnx_node.attribute()) {
if (!att.name().compare(name)) {
return &att;
return nullptr;
}
-static std::pair<bool, int> getIntAttribute(const onnx::NodeProto& onnx_node,
- std::string name = "axis") {
- for (auto att : onnx_node.attribute()) {
- if (!att.name().compare(name)) {
- assert(att.type() == onnx::AttributeProto_AttributeType::AttributeProto_AttributeType_INT);
- return {true, att.i()};
- }
- }
- return {false, 0};
+inline static std::pair<bool, int> getIntAttribute(const onnx::NodeProto& onnx_node,
+ const std::string& name = "axis") {
+ auto result = findAttribute(onnx_node, name);
+ if (!result)
+ return {false, 0};
+ assert(result->type() == onnx::AttributeProto_AttributeType::AttributeProto_AttributeType_INT);
+ return {true, result->i()};
}
-static std::pair<bool, float> getFloatAttribute(const onnx::NodeProto& onnx_node,
- std::string name) {
- for (auto att : onnx_node.attribute()) {
- if (!att.name().compare(name)) {
- assert(att.type() == onnx::AttributeProto_AttributeType::AttributeProto_AttributeType_FLOAT);
- return {true, att.f()};
- }
- }
- return {false, 0.0};
+inline static std::pair<bool, std::string> getStringAttribute(const onnx::NodeProto& onnx_node,
+ const std::string& name) {
+ auto result = findAttribute(onnx_node, name);
+ if (!result)
+ return {false, ""};
+ assert(result->type() == onnx::AttributeProto_AttributeType::AttributeProto_AttributeType_STRING);
+ return {true, result->s()};
+}
+
+inline static std::pair<bool, float> getFloatAttribute(const onnx::NodeProto& onnx_node,
+ const std::string& name) {
+ auto result = findAttribute(onnx_node, name);
+ if (!result)
+ return {false, 0.0};
+ assert(result->type() == onnx::AttributeProto_AttributeType::AttributeProto_AttributeType_FLOAT);
+ return {true, result->f()};
}
// Create vector tensor filled with the given value
cdata.strides_shape = ShapeHelper::createShape(strides->ints(), strides->ints_size());
if (pads) {
- // FIXME: it's for 2D only
- assert(pads->ints_size() == 4);
- // FIXME: how to use padding here?
+ assert(pads->ints_size() >= 2);
cdata.padding_before[0] = pads->ints(0);
cdata.padding_before[1] = pads->ints(1);
// TODO: ONNX padding could be for the beginning and ending along each axis that's why we
auto out_channels = kernel_tensor.getShape().dim(3);
bool found;
int num_groups;
- std::tie (found, num_groups) = getIntAttribute(onnx_node, "group");
+ std::tie(found, num_groups) = getIntAttribute(onnx_node, "group");
if (!found)
num_groups = 1;
bool is_depthwise = (num_groups != 1) && (in_group_size == 1) && (out_channels == num_groups);
}
std::vector<IODescriptor>
+ONNXOpCreator::convertUpsample(const std::vector<mir::IODescriptor>& inputs,
+ const onnx::NodeProto& node) {
+ bool success;
+ std::string mode;
+ std::tie(success, mode) = getStringAttribute(node, "mode");
+ if (!success) mode = "nearest";
+ assert(mode == "nearest" && "Unsupported upscale mode!");
+
+ assert(inputs.size() > 1); // relies on constants being lifted to initializer list
+ auto* scales = dynamic_cast<mir::ops::ConstantOp*>(inputs[1].op);
+ assert(scales && "Weights could be a constant tensor only");
+ auto scales_tensor = Tensor<float>(scales->getValue());
+ int rank = inputs[0].op->getOutputShape(0).rank();
+ assert(scales_tensor.getShape().numElements() == rank &&
+ "The number of elements of 'scales' should be the same as the rank of input 'X'"
+ );
+ assert(rank == 4 && "Only rank 4 is supported");
+ std::vector<float> scales_vector(4);
+ const int onnx2mir[] = {0, 3, 1, 2};
+ assert(scales_tensor.getShape().rank() == 1 && "Scales are a 1d tensor");
+ for (int i = 0; i < scales_tensor.getShape().numElements(); i++)
+ scales_vector[onnx2mir[i]] = scales_tensor.atOffset(i);
+ return {convertMIRToONNX(createOp<ops::ResizeOp>(convertONNXToMIR(inputs[0]),
+ ops::ResizeOp::ResizeMethod::nearestNeighbor,
+ scales_vector)->getOutput(0))};
+}
+
+std::vector<IODescriptor>
ONNXOpCreator::convertBatchNorm(const std::vector<mir::IODescriptor>& inputs,
const onnx::NodeProto& onnx_node,
InputTensors& input_tensors) {
(onnx_node.attribute(0).tensors_size() == 0));
assert(!onnx_node.attribute(0).name().compare("value"));
auto name = onnx_node.output(0);
- auto &onnx_tensor = onnx_node.attribute(0).t();
+ auto& onnx_tensor = onnx_node.attribute(0).t();
auto mir_tensor = ONNXImporterImpl::createTensor(&onnx_tensor);
input_tensors.insert(std::make_pair(name, mir_tensor));
auto op = _graph->create<mir::ops::ConstantOp>(name, mir_tensor)->getOutput(0);
bool trans_a = found ? ivalue : 0;
std::tie (found, ivalue) = getIntAttribute(onnx_node, "transB");
bool trans_b = found ? ivalue : 0;
- std::tie (found, ivalue) = getIntAttribute(onnx_node, "broadcast");
+ std::tie(found, ivalue) = getIntAttribute(onnx_node, "broadcast");
bool broadcast = found ? ivalue : 0;
std::tie (found, fvalue) = getFloatAttribute(onnx_node, "alpha");
float alpha_val = found ? fvalue : 1.0;
const onnx::NodeProto& onnx_node);
std::vector<mir::IODescriptor>
+ convertUpsample(const std::vector<mir::IODescriptor>& inputs,
+ const onnx::NodeProto& onnx_node);
+
+ std::vector<mir::IODescriptor>
convertElementwise(const std::vector<mir::IODescriptor>& inputs,
mir::ops::ElementwiseOp::OpType op_type);