From 65d6f1014a3689bef90cdc264e45de560f64b741 Mon Sep 17 00:00:00 2001 From: Dwarak Rajagopal Date: Thu, 14 Feb 2019 10:28:25 -0800 Subject: [PATCH] Add support of count_include_pad and test end to end test for AveragePool (#17034) Summary: Add support of count_include_pad end to end test for AveragePool We can export AveragePool from PyTorch with count_include_pad attribute. However, we don't directly support it in Caffe2's ONNX backend. We also want to check whether we can pass the end to end test for average pool operator with count_include_pad attribute (pytorch => onnx => caffe2) Pull Request resolved: https://github.com/pytorch/pytorch/pull/17034 Reviewed By: houseroad Differential Revision: D14060186 Pulled By: dwarakrajagopal fbshipit-source-id: 10dae532611c71f8c8cfc3fa701cc7c1c1c02695 --- caffe2/onnx/backend.cc | 63 ++--------------------------------- test/onnx/test_pytorch_onnx_caffe2.py | 9 ++++- 2 files changed, 10 insertions(+), 62 deletions(-) diff --git a/caffe2/onnx/backend.cc b/caffe2/onnx/backend.cc index 2c060f0..fae237a 100644 --- a/caffe2/onnx/backend.cc +++ b/caffe2/onnx/backend.cc @@ -343,8 +343,8 @@ Caffe2Backend::get_special_operators() const { {"Constant", &Caffe2Backend::CreateConstant}, {"ConstantOfShape", &Caffe2Backend::CreateConstantOfShape}, {"Conv", &Caffe2Backend::CreateConvPoolOpBase}, - {"AveragePool", &Caffe2Backend::CreatePadPool}, - {"GlobalAveragePool", &Caffe2Backend::CreatePadPool}, + {"AveragePool", &Caffe2Backend::CreateConvPoolOpBase}, + {"GlobalAveragePool", &Caffe2Backend::CreateConvPoolOpBase}, {"GlobalMaxPool", &Caffe2Backend::CreateConvPoolOpBase}, {"MaxPool", &Caffe2Backend::CreateConvPoolOpBase}, {"Reshape", &Caffe2Backend::CreateReshape}, @@ -545,65 +545,6 @@ Caffe2Ops Caffe2Backend::CreateConvPoolOpBase( return CommonOnnxNodeToCaffe2Ops(onnx_node, ctx); } -Caffe2Ops Caffe2Backend::CreatePadPool( - OnnxNode* onnx_node, - const ConversionContext& ctx) { - auto& node = onnx_node->node; - auto& attributes = onnx_node->attributes; - Caffe2Ops ret; - // Pad - bool padding = false; - const std::string pad_name = ctx.opset_version() < 2 ? "paddings" : "pads"; - const auto pad_input = dummy_->NewDummyName(); - if (attributes.HasAttribute("count_include_pad") && - attributes.HasAttribute(pad_name)) { - auto count_include_pad = attributes.get("count_include_pad", 0L); - ::google::protobuf::RepeatedField<::google::protobuf::int64> pads; - pads = - attributes - .get<::google::protobuf::RepeatedField<::google::protobuf::int64>>( - pad_name); - if (count_include_pad == 1 && pads.size() == 4 && - !(pads.Get(0) == 0 && pads.Get(1) == 0 && pads.Get(2) == 0 && - pads.Get(3) == 0)) { - padding = true; - attributes.remove(pad_name); - caffe2::Argument arg_pads; - arg_pads.add_ints(pads.Get(0)); - arg_pads.add_ints(pads.Get(1)); - arg_pads.add_ints(pads.Get(2)); - arg_pads.add_ints(pads.Get(3)); - arg_pads.set_name("pads"); - auto* c2_op = ret.ops.Add(); - BuildOperator( - c2_op, "PadImage", {node.input(0)}, {pad_input}, {arg_pads}); - } else if (count_include_pad == 1) { - std::string str; - bool pads_flag = false; - str += "["; - for (const auto& i : pads) { - str += c10::to_string(i) + ","; - pads_flag = pads_flag || i > 0; - } - str += "]"; - if (pads_flag == true) { - CAFFE_THROW( - "Caffe2 only supports padding 2D Tensor, whereas padding is ", str); - } - } - } - // Pool - auto c2_ops = Caffe2Backend::CreateConvPoolOpBase(onnx_node, ctx); - auto* pool_op = c2_ops.ops.Mutable(0); - if (padding) { - pool_op->set_input(0, pad_input); - } - auto* c2_op = ret.ops.Add(); - c2_op->CopyFrom(*pool_op); - - return ret; -} - Caffe2Ops Caffe2Backend::CreateReshape( OnnxNode* onnx_node, const ConversionContext& ctx) { diff --git a/test/onnx/test_pytorch_onnx_caffe2.py b/test/onnx/test_pytorch_onnx_caffe2.py index 58f255f..c31d7c2 100644 --- a/test/onnx/test_pytorch_onnx_caffe2.py +++ b/test/onnx/test_pytorch_onnx_caffe2.py @@ -664,11 +664,18 @@ class TestCaffe2Backend(unittest.TestCase): model = nn.MaxPool2d(5, padding=2) self.run_model_test(model, train=False, batch_size=BATCH_SIZE) - @unittest.skip("C2 and PyTorch have small difference in padding implementation") def test_avgpool2d(self): model = nn.AvgPool2d(5, padding=(2)) self.run_model_test(model, train=False, batch_size=BATCH_SIZE) + def test_avgpool2d_with_count_include_pad_set_false(self): + model = nn.AvgPool2d(7, padding=(2), count_include_pad=False) + self.run_model_test(model, train=False, batch_size=BATCH_SIZE) + + def test_avgpool2d_with_count_include_pad_set_true(self): + model = nn.AvgPool2d(7, padding=(2), count_include_pad=True) + self.run_model_test(model, train=False, batch_size=BATCH_SIZE) + def test_avgpool2d_no_padding(self): model = nn.AvgPool2d(5) self.run_model_test(model, train=False, batch_size=BATCH_SIZE) -- 2.7.4