std::vector<IODescriptor>
Caffe2OpCreator::createInput(const std::string& name, const mir::Shape& shape) {
- // TODO For now we only support convolutional networks with one element per batch.
- assert(shape.rank() == 4 && shape.dim(0) == 1);
auto variable = _graph->create<ops::VariableOp>(name, shape);
return {variable->getOutput(0)};
}
auto constant = _graph->create<mir::ops::ConstantOp>(name, _constantTensors.at(name));
_tensorNameToIODescriptor[name] = constant->getOutput(0);
} else {
- // We're dealing with graph input (assuming the picture only)
- auto onnx_input_shape = input.type().tensor_type().shape();
- assert(onnx_input_shape.dim_size() == 4);
- mir::Shape shape(4);
+ const auto& onnx_input_shape = input.type().tensor_type().shape();
+ mir::Shape shape(onnx_input_shape.dim_size());
for (int i = 0; i < onnx_input_shape.dim_size(); i++) {
assert(onnx_input_shape.dim(i).has_dim_value());
- shape.dim(i) = onnx_input_shape.dim(i).dim_value();
+ shape.dim(i) = static_cast<int32_t>(onnx_input_shape.dim(i).dim_value());
}
// TODO: Temporary solution!
auto node = _graph->create<mir::ops::VariableOp>(name, shape);
const Tensor* t = (*s->tensors())[i];
Shape input_shape = ShapeHelper::createShape(*t->shape(), t->shape()->size());
- // TODO Remove this limitation.
- assert(input_shape.dim(0) == 1);
-
auto input = _graph->create<mir::ops::VariableOp>(t->name()->c_str(), input_shape);
_tensorMap[i] = input->getOutput(0);
}