From: Tongliang Liao Date: Tue, 12 Feb 2019 21:18:13 +0000 (-0800) Subject: Export ReduceMean/ReduceFrontMean/ReduceBackMean (Caffe2) to ReduceMean (ONNX). ... X-Git-Tag: accepted/tizen/6.5/unified/20211028.231830~1333 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=0eee56fff7c5da9261a70328f089c4172b8c1367;p=platform%2Fupstream%2Fpytorch.git Export ReduceMean/ReduceFrontMean/ReduceBackMean (Caffe2) to ReduceMean (ONNX). (#16727) Summary: The second input (`lengths`) is not supported. Pull Request resolved: https://github.com/pytorch/pytorch/pull/16727 Differential Revision: D14054105 Pulled By: houseroad fbshipit-source-id: 36b8d00460f9623696439e1bd2a6bc60b7bb263c --- diff --git a/caffe2/onnx/onnx_exporter.cc b/caffe2/onnx/onnx_exporter.cc index a0d3d38..8857898 100644 --- a/caffe2/onnx/onnx_exporter.cc +++ b/caffe2/onnx/onnx_exporter.cc @@ -6,6 +6,7 @@ #include "caffe2/utils/map_utils.h" #include "caffe2/utils/proto_utils.h" +#include #include namespace caffe2 { @@ -301,6 +302,9 @@ OnnxExporter::get_special_operators() const { {"Reshape", &OnnxExporter::CreateReshapeNodes}, {"Slice", &OnnxExporter::CreateSliceNodes}, {"ChannelShuffle", &OnnxExporter::CreateChannelShuffleNodes}, + {"ReduceMean", &OnnxExporter::CreateReduceMeanNodes}, + {"ReduceFrontMean", &OnnxExporter::CreateReduceMeanNodes}, + {"ReduceBackMean", &OnnxExporter::CreateReduceMeanNodes}, {"ResizeNearest", &OnnxExporter::CreateUpsampleNodes}}; return kSpecialOperators; } @@ -790,6 +794,73 @@ ConvertedResult OnnxExporter::CreateChannelShuffleNodes( return result; } +ConvertedResult OnnxExporter::CreateReduceMeanNodes( + const caffe2::OperatorDef& def, + const std::unordered_map& shapes) { + CAFFE_ENFORCE_GE(def.input_size(), 1); + CAFFE_ENFORCE_LE(def.input_size(), 2); + CAFFE_ENFORCE_EQ(def.input_size(), 1, "Input \"lengths\" is not supported."); + CAFFE_ENFORCE_GE(def.output_size(), 1); + const auto& x = def.input(0); + const auto& y = def.output(0); + const auto& dims = shapes.at(x).dims(); + + ConvertedResult result; + auto& nodes = result.first; + auto& const_tensors = result.second; + std::unordered_map args; + for (const auto& a : def.arg()) { + args.emplace(a.name(), &a); + } + + std::vector axes; + int64_t keepdims = 1; + + if (def.type() == "ReduceMean") { + // axes + auto it = args.find("axes"); + if (it == args.end()) { + axes.resize(dims.size()); + std::iota(axes.begin(), axes.end(), 0); + } else { + axes.assign(it->second->ints().begin(), it->second->ints().end()); + } + + // keepdims + it = args.find("keepdims"); + if (it != args.end()) { + keepdims = it->second->i(); + } + } else { + // num_reduce_dim + auto it = args.find("num_reduce_dim"); + const int64_t num_reduce_dim = it == args.end() ? 1 : it->second->i(); + CAFFE_ENFORCE_LE(num_reduce_dim, dims.size()); + axes.resize(num_reduce_dim); + + int64_t start_dim = 0; + if (def.type() == "ReduceFrontMean") { + start_dim = 0; + } else if (def.type() == "ReduceBackMean") { + start_dim = dims.size() - axes.size(); + } + std::iota(axes.begin(), axes.end(), start_dim); + + keepdims = 0; + } + + nodes.emplace_back(MakeNode("ReduceMean", + { x }, + { y }, + { + MakeAttribute("axes", axes), + MakeAttribute("keepdims", keepdims), + }, + def.name())); + + return result; +} + ConvertedResult OnnxExporter::CreateUpsampleNodes( const caffe2::OperatorDef& def, const std::unordered_map& shapes) { diff --git a/caffe2/onnx/onnx_exporter.h b/caffe2/onnx/onnx_exporter.h index 7ad8f7c..30a5233 100644 --- a/caffe2/onnx/onnx_exporter.h +++ b/caffe2/onnx/onnx_exporter.h @@ -89,6 +89,10 @@ class CAFFE2_API OnnxExporter { const caffe2::OperatorDef& def, const std::unordered_map& shapes); + ConvertedResult CreateReduceMeanNodes( + const caffe2::OperatorDef& def, + const std::unordered_map& shapes); + ConvertedResult CreateConcatNodes( const caffe2::OperatorDef& def, const std::unordered_map& shapes); diff --git a/caffe2/operators/reduce_front_back_mean_ops.cc b/caffe2/operators/reduce_front_back_mean_ops.cc index 5d1a20d..564f266 100644 --- a/caffe2/operators/reduce_front_back_mean_ops.cc +++ b/caffe2/operators/reduce_front_back_mean_ops.cc @@ -193,7 +193,8 @@ Y: [4.3333335 2.1666667 6.] .TensorInferenceFunction([](const OperatorDef& def, const vector& in) { REDUCTION_OP_SHAPE_INFERENCE(true) - }); + }) + .InheritOnnxSchema("ReduceMean"); OPERATOR_SCHEMA(ReduceFrontMeanGradient).NumInputs(2, 3).NumOutputs(1); REGISTER_CPU_OPERATOR(ReduceBackMean, SumReduceDimsOp); @@ -285,7 +286,8 @@ Y: [[3.7777777 4.888889 ]] .TensorInferenceFunction([](const OperatorDef& def, const vector& in) { REDUCTION_OP_SHAPE_INFERENCE(false) - }); + }) + .InheritOnnxSchema("ReduceMean"); OPERATOR_SCHEMA(ReduceBackMeanGradient).NumInputs(2, 3).NumOutputs(1); #undef REDUCTION_OP_SHAPE_INFERENCE diff --git a/caffe2/operators/reduce_ops.cc b/caffe2/operators/reduce_ops.cc index 56ccf47..b4fce58 100644 --- a/caffe2/operators/reduce_ops.cc +++ b/caffe2/operators/reduce_ops.cc @@ -506,7 +506,8 @@ Y: "keepdims", "(*int*): set to 1 to keep the reduced dimension(s) (default=1), else set to 0 to not keep the reduced dimension(s)") .Input(0, "X", "(*Tensor``*): input tensor") - .Output(0, "Y", "(*Tensor``*): reduced tensor"); + .Output(0, "Y", "(*Tensor``*): reduced tensor") + .InheritOnnxSchema("ReduceMean"); OPERATOR_SCHEMA(ReduceL2Gradient).NumInputs(3).NumOutputs(1); diff --git a/caffe2/python/onnx/tests/c2_ref_test.py b/caffe2/python/onnx/tests/c2_ref_test.py index 6c1848a..629cc76 100644 --- a/caffe2/python/onnx/tests/c2_ref_test.py +++ b/caffe2/python/onnx/tests/c2_ref_test.py @@ -102,6 +102,59 @@ class TestCaffe2Basic(DownloadingTestCase): output = c2_rep.run({"X": X, "Y": Y}) np.testing.assert_almost_equal(output["W3"], W_ref) + def test_reducemean(self): + X = np.random.randn(4, 6, 10, 5, 3).astype(np.float32) + + predict_net = caffe2_pb2.NetDef() + predict_net.name = 'test-reducemean-net' + predict_net.external_input[:] = ['X'] + predict_net.external_output[:] = [ + 'reduce_front_mean', + 'reduce_back_mean', + 'reduce_mean_0', + 'reduce_mean_1', + ] + predict_net.op.extend([ + core.CreateOperator( + 'ReduceFrontMean', + inputs=['X'], + outputs=['reduce_front_mean'], + num_reduce_dim=2, + ), + core.CreateOperator( + 'ReduceBackMean', + inputs=['X'], + outputs=['reduce_back_mean'], + num_reduce_dim=2, + ), + core.CreateOperator( + 'ReduceMean', + inputs=['X'], + outputs=['reduce_mean_0'], + axes=[1, 3], + keepdims=0, + ), + core.CreateOperator( + 'ReduceMean', + inputs=['X'], + outputs=['reduce_mean_1'], + axes=[1, 3], + keepdims=1, + ), + ]) + ws, c2_outputs = c2_native_run_net( + init_net=None, + predict_net=predict_net, + inputs=[X]) + + onnx_model = c2_onnx.caffe2_net_to_onnx_model( + predict_net=predict_net, + value_info={ + 'X': (onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[X.dtype], X.shape) + }) + onnx_outputs = c2.run_model(onnx_model, inputs=[X]) + self.assertSameOutputs(c2_outputs, onnx_outputs) + def test_upsample(self): X = np.random.randn(1, 1, 2, 2).astype(np.float32) width_scale = 2.0