From f79fb58744ba70970de652e46ea039b03e9ce9ff Mon Sep 17 00:00:00 2001 From: Yinghai Lu Date: Thu, 22 Nov 2018 00:28:51 -0800 Subject: [PATCH] Make sure we bind input/output of Onnxifi op positionally (#14214) Summary: Pull Request resolved: https://github.com/pytorch/pytorch/pull/14214 This is to pick up the residual task of T36325466 to make sure that input/output binding of c2 Onnxifi op is positional. Reviewed By: dzhulgakov Differential Revision: D13134470 fbshipit-source-id: d1b916dade65c79133b86507cd54ea5166fa6810 --- caffe2/operators/onnxifi_op.cc | 6 ++++-- caffe2/operators/onnxifi_op.h | 18 ++++++++++++++++-- caffe2/opt/onnxifi_transformer.cc | 6 ++++++ caffe2/python/onnx/test_onnxifi.py | 18 ++++++++++++------ 4 files changed, 38 insertions(+), 10 deletions(-) diff --git a/caffe2/operators/onnxifi_op.cc b/caffe2/operators/onnxifi_op.cc index d965996..e988848 100644 --- a/caffe2/operators/onnxifi_op.cc +++ b/caffe2/operators/onnxifi_op.cc @@ -118,10 +118,11 @@ OnnxifiOp::BuildInitializationList( template <> bool OnnxifiOp::RunOnDevice() { + CAFFE_ENFORCE_EQ(input_desc_.size(), InputSize()); for (unsigned i = 0U; i < InputSize(); ++i) { const auto& input_tensor = Input(i); const auto tensor_dims = input_tensor.sizes(); - auto& tensor_descriptor = input_desc_.at(i); + auto& tensor_descriptor = input_desc_[i]; tensor_descriptor.tag = ONNXIFI_TAG_TENSOR_DESCRIPTOR_V1; tensor_descriptor.memoryType = ONNXIFI_MEMORY_TYPE_CPU; tensor_descriptor.dimensions = tensor_dims.size(); @@ -130,10 +131,11 @@ bool OnnxifiOp::RunOnDevice() { SetInputTensorDescriptorTypeAndBuffer(input_tensor, &tensor_descriptor); } + CAFFE_ENFORCE_EQ(output_desc_.size(), OutputSize()); for (unsigned i = 0U; i < OutputSize(); ++i) { std::vector tensor_dims; uint64_t type = SetOutputShapeAndType(i, &tensor_dims); - auto& tensor_descriptor = output_desc_.at(i); + auto& tensor_descriptor = output_desc_[i]; tensor_descriptor.tag = ONNXIFI_TAG_TENSOR_DESCRIPTOR_V1; tensor_descriptor.memoryType = ONNXIFI_MEMORY_TYPE_CPU; tensor_descriptor.dimensions = tensor_dims.size(); diff --git a/caffe2/operators/onnxifi_op.h b/caffe2/operators/onnxifi_op.h index 5e1329b..c3bbd2f 100644 --- a/caffe2/operators/onnxifi_op.h +++ b/caffe2/operators/onnxifi_op.h @@ -33,12 +33,18 @@ class OnnxifiOp final : public Operator { CAFFE_ENFORCE(!onnx_model_str.empty(), "onnx_model cannot be empty"); // Setup input/output descriptor templates - for (const auto& input : operator_def.input()) { + input_names_ = + this->template GetRepeatedArgument("input_names"); + output_names_ = + this->template GetRepeatedArgument("output_names"); + CAFFE_ENFORCE_EQ(input_names_.size(), operator_def.input_size()); + CAFFE_ENFORCE_EQ(output_names_.size(), operator_def.output_size()); + for (const auto& input : input_names_) { input_desc_.push_back(onnxTensorDescriptorV1()); input_desc_.back().name = input.c_str(); } int output_idx = 0; - for (const auto& output : operator_def.output()) { + for (const auto& output : output_names_) { output_desc_.push_back(onnxTensorDescriptorV1()); output_desc_.back().name = output.c_str(); @@ -174,6 +180,14 @@ class OnnxifiOp final : public Operator { // input/output descriptors std::vector input_desc_; std::vector output_desc_; + + // We bind the op input/output by position while ONNXIFI binds input/output by + // names. In addition, op input/output names can be writtten by, for example, + // memonger. We cache the original input/output name of ONNX object here and + // bind them by position. + std::vector input_names_; + std::vector output_names_; + std::vector> input_shapes_; std::vector> output_shapes_; diff --git a/caffe2/opt/onnxifi_transformer.cc b/caffe2/opt/onnxifi_transformer.cc index c7ecee0..6338b68 100644 --- a/caffe2/opt/onnxifi_transformer.cc +++ b/caffe2/opt/onnxifi_transformer.cc @@ -171,13 +171,19 @@ OperatorDef OnnxifiTransformer::BuildOnnxifiOp( } // Add the input/output + auto* input_names = op.add_arg(); + input_names->set_name("input_names"); for (const auto& input : net.external_input()) { if (!initialization_list.count(input)) { op.add_input(input); + input_names->add_strings(input); } } + auto* output_names = op.add_arg(); + output_names->set_name("output_names"); for (const auto& output : net.external_output()) { op.add_output(output); + output_names->add_strings(output); } // Add output size hints diff --git a/caffe2/python/onnx/test_onnxifi.py b/caffe2/python/onnx/test_onnxifi.py index f2cd8a9..08948cb 100644 --- a/caffe2/python/onnx/test_onnxifi.py +++ b/caffe2/python/onnx/test_onnxifi.py @@ -53,6 +53,8 @@ class OnnxifiTest(TestCase): ["X"], ["Y"], onnx_model=model_def.SerializeToString(), + input_names=["X"], + output_names=["Y"], output_shape_hint_0=[ONNXIFI_DATATYPE_FLOAT32, batch_size, 1, 3, 2]) workspace.FeedBlob("X", X) workspace.RunOperatorOnce(op) @@ -88,17 +90,21 @@ class OnnxifiTest(TestCase): outputs=[make_tensor_value_info("Y", onnx.TensorProto.FLOAT, [1, 1, 3, 3])]) model_def = make_model(graph_def, producer_name='conv-test') + # We intentional rewrite the input/output name so test that the + # input/output binding of c2 op is positional op = core.CreateOperator( "Onnxifi", - ["X"], - ["Y"], + ["X0"], + ["Y0"], onnx_model=model_def.SerializeToString(), - initializers=["W", "W"], + initializers=["W", "W0"], + input_names=["X"], + output_names=["Y"], output_shape_hint_0=[ONNXIFI_DATATYPE_FLOAT32, 1, 1, 3, 3]) - workspace.FeedBlob("X", X) - workspace.FeedBlob("W", W) + workspace.FeedBlob("X0", X) + workspace.FeedBlob("W0", W) workspace.RunOperatorOnce(op) - Y = workspace.FetchBlob("Y") + Y = workspace.FetchBlob("Y0") np.testing.assert_almost_equal(Y, Y_without_padding) -- 2.7.4