From: Lutz Roeder Date: Tue, 19 Mar 2019 03:51:12 +0000 (-0700) Subject: Fix Caffe2 operator schemas (#15462) (#13229) (#18109) X-Git-Tag: accepted/tizen/6.5/unified/20211028.231830~755 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=195cba500f2c98c10f3d4b8fc3cca6493099b6df;p=platform%2Fupstream%2Fpytorch.git Fix Caffe2 operator schemas (#15462) (#13229) (#18109) Summary: Maratyszcza harouwu yinghai This is broken since #13065. `c_str()` returns a pointer that isn't permanent. Pull Request resolved: https://github.com/pytorch/pytorch/pull/18109 Differential Revision: D14516622 Pulled By: ezyang fbshipit-source-id: 7113d92eac4f61479c4c7b323cf78cc8aa00b17e --- diff --git a/caffe2/operators/quantized/int8_average_pool_op.cc b/caffe2/operators/quantized/int8_average_pool_op.cc index 70df83f..89d9d7b 100644 --- a/caffe2/operators/quantized/int8_average_pool_op.cc +++ b/caffe2/operators/quantized/int8_average_pool_op.cc @@ -20,19 +20,11 @@ data into the output blob Y for further processing. std::function AveragePoolDocGenerator( const char* dim, bool relu_fused = false) { - auto suffix = relu_fused ? " Output will go through rectified linear " - "function, where y = max(0, x)." - : ""; return [=](OpSchema& schema) { string doc = "AveragePool{dim} {pool_doc}"; c10::ReplaceAll(doc, "{dim}", dim); c10::ReplaceAll(doc, "{pool_doc}", kAveragePoolDoc_int8); schema.SetDoc(doc); - string output_doc = - "Output data tensor from average pooling across the input " - "tensor. Dimensions will vary based on various kernel, stride, and pad " - "sizes.{suffix}"; - c10::ReplaceAll(output_doc, "{suffix}", suffix); schema.Input( 0, "X", @@ -42,7 +34,14 @@ std::function AveragePoolDocGenerator( "size, C is the number of channels, and H and W are the height and the " "width of the data. The corresponding permutation of dimensions is " "used in the latter case."); - schema.Output(0, "Y", output_doc.c_str()); + schema.Output(0, "Y", relu_fused ? + "Output data tensor from average pooling across the input " + "tensor. Dimensions will vary based on various kernel, stride, and pad " + "sizes. Output will go through rectified linear " + "function, where y = max(0, x)." : + "Output data tensor from average pooling across the input " + "tensor. Dimensions will vary based on various kernel, stride, and pad " + "sizes."); }; } diff --git a/caffe2/operators/quantized/int8_conv_op.cc b/caffe2/operators/quantized/int8_conv_op.cc index bc02158..68ab374 100644 --- a/caffe2/operators/quantized/int8_conv_op.cc +++ b/caffe2/operators/quantized/int8_conv_op.cc @@ -19,9 +19,6 @@ why they are separate files. std::function ConvDocGenerator( const char* dim, bool relu_fused = false) { - auto suffix = relu_fused ? " Output will go through rectified linear " - "function, where y = max(0, x)." - : ""; return [=](OpSchema& schema) { string doc = R"DOC( The convolution operator consumes an input vector, a {dim}filter blob @@ -29,11 +26,6 @@ and a bias blob and computes the output. {conv_doc})DOC"; c10::ReplaceAll(doc, "{dim}", dim); c10::ReplaceAll(doc, "{conv_doc}", kConvDoc_int8); schema.SetDoc(doc); - string output_doc = - "Output data blob that contains the result of the " - "convolution. The output dimensions are functions of the kernel size, " - "stride size, and pad lengths.{suffix}"; - c10::ReplaceAll(output_doc, "{suffix}", suffix); schema.Input( 0, "X", @@ -53,7 +45,14 @@ and a bias blob and computes the output. {conv_doc})DOC"; "bias", "The 1D bias blob that is added through the " "convolution; has size (M)."); - schema.Output(0, "Y", output_doc.c_str()); + schema.Output(0, "Y", relu_fused ? + "Output data blob that contains the result of the " + "convolution. The output dimensions are functions of the kernel size, " + "stride size, and pad lengths. Output will go through rectified linear " + "function, where y = max(0, x)." : + "Output data blob that contains the result of the " + "convolution. The output dimensions are functions of the kernel size, " + "stride size, and pad lengths."); }; } diff --git a/caffe2/operators/quantized/int8_max_pool_op.cc b/caffe2/operators/quantized/int8_max_pool_op.cc index 64a1507..d1a58ba 100644 --- a/caffe2/operators/quantized/int8_max_pool_op.cc +++ b/caffe2/operators/quantized/int8_max_pool_op.cc @@ -18,18 +18,10 @@ data into the output blob Y for further processing. std::function MaxPoolDocGenerator( const char* dim, bool relu_fused = false) { - auto suffix = relu_fused ? " Output will go through rectified linear " - "function, where y = max(0, x)." - : ""; return [=](OpSchema& schema) { string doc = "MaxPool{dim} {pool_doc}"; c10::ReplaceAll(doc, "{dim}", dim); c10::ReplaceAll(doc, "{pool_doc}", kMaxPoolDoc_int8); - string output_doc = - "Output data tensor from max pooling across the input " - "tensor. Dimensions will vary based on various kernel, stride, and pad " - "sizes.{suffix}"; - c10::ReplaceAll(output_doc, "{suffix}", suffix); schema.SetDoc(doc); schema.Input( 0, @@ -40,7 +32,14 @@ std::function MaxPoolDocGenerator( "size, C is the number of channels, and H and W are the height and the " "width of the data. The corresponding permutation of dimensions is " "used in the latter case."); - schema.Output(0, "Y", output_doc.c_str()); + schema.Output(0, "Y", relu_fused ? + "Output data tensor from max pooling across the input " + "tensor. Dimensions will vary based on various kernel, stride, and pad " + "sizes. Output will go through rectified linear" + "function, where y = max(0, x)." : + "Output data tensor from max pooling across the input " + "tensor. Dimensions will vary based on various kernel, stride, and pad " + "sizes. Output will go through rectified linear"); }; }