return outputs;
}
+template <class OperationAttributes>
static void convertConvolutionParam(const ConvolutionParameter &conv_param,
- std::vector<std::int32_t> &strides,
- std::vector<std::int32_t> &padding_before,
- std::vector<std::int32_t> &padding_after)
+ OperationAttributes &attributes)
{
std::int32_t stride_h, stride_w;
if (conv_param.has_stride_h() || conv_param.has_stride_w())
stride_h = conv_param.stride(0);
stride_w = conv_param.stride(1);
}
- strides = {stride_h, stride_w};
+ attributes.strides = {stride_h, stride_w};
std::int32_t pad_h, pad_w;
if (conv_param.has_pad_h() || conv_param.has_pad_w())
pad_h = conv_param.pad(0);
pad_w = conv_param.pad(1);
}
- padding_after = padding_before = {pad_h, pad_w};
+ attributes.padding_after = attributes.padding_before = {pad_h, pad_w};
}
void CaffeOpCreator::checkConvolution(const ConvolutionParameter &opts,
const std::vector<mir::Operation::Output *> &inputs)
{
const auto ¶ms = layer.convolution_param();
- std::vector<std::int32_t> strides;
- std::vector<std::int32_t> padding_before;
- std::vector<std::int32_t> padding_after;
+ Conv2DOpAttributes attributes;
- convertConvolutionParam(params, strides, padding_before, padding_after);
+ convertConvolutionParam(params, attributes);
+ attributes.data_format = DataFormat::NCHW;
assert(layer.blobs(0).shape().dim_size() == 4);
auto kernel_weights = convertBlob(layer.blobs(0));
// TODO handle properly kernel with layer multiplier
auto transposed_tensor = transposeTensor<0, 1, 3, 2>(kernel_weights);
auto kernel = createOp<ops::ConstantOp>(transposed_tensor)->getOutput(0);
- result = createOp<ops::DepthwiseConv2DOp>(inputs[0], kernel, strides, padding_before,
- padding_after, DataFormat::NCHW)
- ->getOutput(0);
+ result = createOp<ops::DepthwiseConv2DOp>(inputs[0], kernel, attributes)->getOutput(0);
}
else
{
}
kernel_weights = transposeTensor<3, 0, 1, 2>(kernel_weights);
auto kernel = createOp<ops::ConstantOp>(kernel_weights)->getOutput(0);
- result = createOp<ops::Conv2DOp>(inputs[0], kernel, strides, padding_before, padding_after,
- DataFormat::NCHW)
- ->getOutput(0);
+ result = createOp<ops::Conv2DOp>(inputs[0], kernel, attributes)->getOutput(0);
}
// Add the bias, if any.
const std::vector<mir::Operation::Output *> &inputs)
{
auto &opts = layer.convolution_param();
- std::vector<std::int32_t> strides;
- std::vector<std::int32_t> padding_before;
- std::vector<std::int32_t> padding_after;
+ Deconv2DOpAttributes attributes;
- convertConvolutionParam(opts, strides, padding_before, padding_after);
+ convertConvolutionParam(opts, attributes);
+ attributes.data_format = DataFormat::NCHW;
auto kernel_weights = convertBlob(layer.blobs(0));
kernel_weights = transposeTensor<2, 3, 1, 0>(kernel_weights);
kernel_weights = fixGroupedKernel(opts.group(), kernel_weights);
}
auto kernel = createOp<ops::ConstantOp>(kernel_weights)->getOutput(0);
- auto result = createOp<ops::DeConv2DOp>(inputs[0], kernel, strides, padding_before, padding_after,
- DataFormat::NCHW)
- ->getOutput(0);
+ auto result = createOp<ops::DeConv2DOp>(inputs[0], kernel, attributes)->getOutput(0);
// bias_term is optional (so might not be present) and defaults to true
if (opts.bias_term())
return {concat->getOutput(0)};
}
-static void
-convertPoolingParam(const caffe::PoolingParameter ¶ms, const mir::Shape &input_shape,
- std::vector<std::int32_t> &window_size, std::vector<std::int32_t> &strides,
- std::vector<int32_t> &padding_before, std::vector<int32_t> &padding_after)
+template <class PoolingAttributes>
+static void convertPoolingParam(const caffe::PoolingParameter ¶ms,
+ const mir::Shape &input_shape, PoolingAttributes &attributes)
{
std::int32_t kernel_h, kernel_w;
assert(!params.global_pooling());
kernel_h = params.kernel_h();
kernel_w = params.kernel_w();
}
- window_size = {kernel_h, kernel_w};
+ attributes.window = {kernel_h, kernel_w};
std::int32_t stride_h, stride_w;
if (params.has_stride_h() || params.has_stride_w())
{
stride_h = stride_w = params.stride();
}
- strides = {stride_h, stride_w};
+ attributes.strides = {stride_h, stride_w};
std::int32_t pad_h, pad_w;
if (params.has_pad_h() || params.has_pad_w())
pad_h = pad_w = params.pad();
}
- padding_before = padding_after = {pad_h, pad_w};
+ attributes.padding_before = attributes.padding_after = {pad_h, pad_w};
// Caffe uses different formula for computing output shape than MIR. Adjust padding so that
// the output shape stays the same.
for (int i = 0; i < num_spatial_dims; ++i)
{
// Assuming NCHW format.
- const std::int32_t padded_input = input_shape.dim(2 + i) + padding_before[i] + padding_after[i];
- if ((padded_input - window_size[i]) % strides[i] != 0)
- ++padding_after[i];
+ const std::int32_t padded_input =
+ input_shape.dim(2 + i) + attributes.padding_before[i] + attributes.padding_after[i];
+ if ((padded_input - attributes.window[i]) % attributes.strides[i] != 0)
+ ++attributes.padding_after[i];
}
}
assert(inputs.size() == 1);
auto input = inputs[0];
- std::vector<std::int32_t> window_size;
- std::vector<std::int32_t> strides;
- std::vector<std::int32_t> padding_before, padding_after;
- convertPoolingParam(params, input->getShape(), window_size, strides, padding_before,
- padding_after);
-
mir::Operation::Output *result;
switch (params.pool())
{
case PoolingParameter::AVE:
- result = createOp<ops::AvgPool2DOp>(input, window_size, strides, padding_before,
- padding_after, true, mir::DataFormat::NCHW)
- ->getOutput(0);
+ {
+ AvgPool2DOpAttributes attributes_avg;
+ convertPoolingParam(params, input->getShape(), attributes_avg);
+ result = createOp<ops::AvgPool2DOp>(input, attributes_avg)->getOutput(0);
break;
+ }
case PoolingParameter::MAX:
- result = createOp<ops::MaxPool2DOp>(input, window_size, strides, padding_before,
- padding_after, mir::DataFormat::NCHW)
- ->getOutput(0);
+ {
+ MaxPool2DOpAttributes attributes_max;
+ convertPoolingParam(params, input->getShape(), attributes_max);
+ result = createOp<ops::MaxPool2DOp>(input, attributes_max)->getOutput(0);
break;
+ }
default:
assert(false);
}