#include "caffe2_op_creator.h"
#include "core/modelIR/Shape.h"
-#include "core/modelIR/operations/VariableOp.h"
#include "pass/PassException.h"
#include "caffe2_proto_helper.h"
namespace nnc {
using namespace ::caffe2;
-using VariableOp = nnc::mir::ops::VariableOp;
using nnc::mir::Shape;
Caffe2Importer::Caffe2Importer(std::string predictNet, std::string initNet,
}
std::vector<IODescriptor>
-Caffe2OpCreator::createInput(const std::string& input_name, const mir::Shape& input_shape) {
+Caffe2OpCreator::createInput(const std::string& name, const mir::Shape& shape) {
// TODO For now we only support convolutional networks with one element per batch.
- assert(input_shape.rank() == 4 && input_shape.dim(0) == 1);
-
- // TODO Do not transpose data on input and remove transpose.
- auto transposed_shape = mir::Shape{input_shape.dim(0), input_shape.dim(2),
- input_shape.dim(3), input_shape.dim(1)};
- auto variable = _graph->create<ops::VariableOp>(input_name, transposed_shape);
- return {convertMIRToCaffe(variable->getOutput(0))};
+ assert(shape.rank() == 4 && shape.dim(0) == 1);
+ auto variable = _graph->create<ops::VariableOp>(name, shape);
+ return {variable->getOutput(0)};
}
} // namespace nnc
const ::caffe2::OperatorDef&,
const MIRTensors&);
- std::vector<mir::IODescriptor> createInput(const std::string&, const mir::Shape&);
+ std::vector<mir::IODescriptor> createInput(const std::string& name, const mir::Shape& shape);
std::vector<mir::IODescriptor> convertMaxPool(const std::vector<mir::IODescriptor>&,
const ::caffe2::OperatorDef&);
const auto& blob_name = layer.top(i);
const auto& blob_shape = params.shape(num_shapes == 1 ? 0 : i);
Shape shape = ShapeHelper::createShape(blob_shape.dim(), blob_shape.dim_size());
-
- // TODO For now we only support convolutional networks with one element per batch.
- assert(shape.rank() == 4 && shape.dim(0) == 1);
-
- // TODO Do not transpose data on input and remove transpose.
- shape = Shape{shape.dim(0), shape.dim(2), shape.dim(3), shape.dim(1)};
auto variable = createOp<ops::VariableOp>(blob_name, shape);
- descriptors.push_back(convertMIRToCaffe(variable->getOutput(0)));
+ descriptors.push_back(variable->getOutput(0));
}
return descriptors;
result = createOp<ops::BiasAddOp>(layer.name() + ".bias", result->getOutput(0), bias_weights);
}
- // FIXME Workaround until the tests for style transfer network are regenerated.
- if (layer.top(0) == "output")
- return {result->getOutput(0)};
-
return {convertMIRToCaffe(result->getOutput(0))};
}