From: Michael Liu Date: Thu, 14 Feb 2019 04:51:55 +0000 (-0800) Subject: Apply modernize-use-override - 2/2 X-Git-Tag: accepted/tizen/6.5/unified/20211028.231830~1294 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=92a516b9ff42225feaa5887eae8d9075239597fd;p=platform%2Fupstream%2Fpytorch.git Apply modernize-use-override - 2/2 Summary: Use C++11’s override and remove virtual where applicable. Change are automatically generated. Reviewed By: Orvid Differential Revision: D14054721 fbshipit-source-id: 15d266fa1779b1e3ea6270f00841d7fb1e4d44ee --- diff --git a/caffe2/operators/conv_transpose_op_cudnn.cc b/caffe2/operators/conv_transpose_op_cudnn.cc index cd2ec26..a4ee8d2 100644 --- a/caffe2/operators/conv_transpose_op_cudnn.cc +++ b/caffe2/operators/conv_transpose_op_cudnn.cc @@ -53,7 +53,7 @@ class CudnnConvTransposeOpBase : public ConvTransposeUnpoolBase { CUDNN_ENFORCE(cudnnCreateConvolutionDescriptor(&conv_desc_)); } - ~CudnnConvTransposeOpBase() { + ~CudnnConvTransposeOpBase() override { CUDNN_ENFORCE(cudnnDestroyTensorDescriptor(bottom_desc_)); CUDNN_ENFORCE(cudnnDestroyFilterDescriptor(filter_desc_)); if (InputSize() == 3) { @@ -88,7 +88,7 @@ class CudnnConvTransposeOp final : public CudnnConvTransposeOpBase { CudnnConvTransposeOp(const OperatorDef& operator_def, Workspace* ws) : CudnnConvTransposeOpBase(operator_def, ws) {} - ~CudnnConvTransposeOp() {} + ~CudnnConvTransposeOp() override {} bool RunOnDevice() override; @@ -111,7 +111,7 @@ class CudnnConvTransposeGradientOp final : public CudnnConvTransposeOpBase { "If bias is not present, you should not have 3 grad output."); } - ~CudnnConvTransposeGradientOp() {} + ~CudnnConvTransposeGradientOp() override {} bool RunOnDevice() override; diff --git a/caffe2/operators/counter_ops.cc b/caffe2/operators/counter_ops.cc index 2a2278c..e971aae 100644 --- a/caffe2/operators/counter_ops.cc +++ b/caffe2/operators/counter_ops.cc @@ -136,7 +136,7 @@ namespace { class CounterSerializer : public BlobSerializerBase { public: CounterSerializer() {} - ~CounterSerializer() {} + ~CounterSerializer() override {} void Serialize( const void* pointer, diff --git a/caffe2/operators/dataset_ops.cc b/caffe2/operators/dataset_ops.cc index 4a8efc4..f371c9f 100644 --- a/caffe2/operators/dataset_ops.cc +++ b/caffe2/operators/dataset_ops.cc @@ -1419,7 +1419,7 @@ SHOULD_NOT_DO_GRADIENT(PackRecords); class TreeCursorSerializer : public BlobSerializerBase { public: TreeCursorSerializer() {} - ~TreeCursorSerializer() {} + ~TreeCursorSerializer() override {} void Serialize( const void* pointer, diff --git a/caffe2/operators/dropout_op_cudnn.cc b/caffe2/operators/dropout_op_cudnn.cc index ffec6b1..8f3241e 100644 --- a/caffe2/operators/dropout_op_cudnn.cc +++ b/caffe2/operators/dropout_op_cudnn.cc @@ -36,7 +36,7 @@ class CuDNNDropoutOp final : public Operator { } } - ~CuDNNDropoutOp() noexcept { + ~CuDNNDropoutOp() noexcept override { CUDNN_ENFORCE(cudnnDestroyTensorDescriptor(data_desc_)); CUDNN_ENFORCE(cudnnDestroyDropoutDescriptor(dropout_desc_)); } @@ -98,7 +98,7 @@ class CuDNNDropoutGradientOp final : public Operator { CAFFE_ENFORCE(scratch_blob_); } - ~CuDNNDropoutGradientOp() noexcept { + ~CuDNNDropoutGradientOp() noexcept override { CUDNN_ENFORCE(cudnnDestroyTensorDescriptor(data_desc_)); CUDNN_ENFORCE(cudnnDestroyDropoutDescriptor(dropout_desc_)); } diff --git a/caffe2/operators/index_ops.cc b/caffe2/operators/index_ops.cc index b06c706..b4db62b 100644 --- a/caffe2/operators/index_ops.cc +++ b/caffe2/operators/index_ops.cc @@ -248,7 +248,7 @@ SHOULD_NOT_DO_GRADIENT(IndexSize); class IndexSerializer : public BlobSerializerBase { public: IndexSerializer() {} - ~IndexSerializer() {} + ~IndexSerializer() override {} void Serialize( const void* pointer, diff --git a/caffe2/operators/local_response_normalization_op_cudnn.cc b/caffe2/operators/local_response_normalization_op_cudnn.cc index 10d1a9a..02937a7 100644 --- a/caffe2/operators/local_response_normalization_op_cudnn.cc +++ b/caffe2/operators/local_response_normalization_op_cudnn.cc @@ -23,7 +23,7 @@ class CuDNNLRNOp final : public Operator { cudnnSetLRNDescriptor(norm_desc_, size_, alpha_, beta_, bias_)); } - ~CuDNNLRNOp() { + ~CuDNNLRNOp() override { CUDNN_ENFORCE(cudnnDestroyTensorDescriptor(data_desc_)); CUDNN_ENFORCE(cudnnDestroyLRNDescriptor(norm_desc_)); } @@ -65,7 +65,7 @@ class CuDNNLRNGradientOp final : public Operator { cudnnSetLRNDescriptor(norm_desc_, size_, alpha_, beta_, bias_)); } - ~CuDNNLRNGradientOp() { + ~CuDNNLRNGradientOp() override { CUDNN_ENFORCE(cudnnDestroyTensorDescriptor(data_desc_)); CUDNN_ENFORCE(cudnnDestroyLRNDescriptor(norm_desc_)); } diff --git a/caffe2/operators/mem_query_op.cu b/caffe2/operators/mem_query_op.cu index fcd14a9..a9ea235 100644 --- a/caffe2/operators/mem_query_op.cu +++ b/caffe2/operators/mem_query_op.cu @@ -8,7 +8,7 @@ class GetGPUMemoryUsageOp final : public Operator { public: GetGPUMemoryUsageOp(const OperatorDef& operator_def, Workspace* ws) : Operator(operator_def, ws) {} - ~GetGPUMemoryUsageOp() {} + ~GetGPUMemoryUsageOp() override {} bool RunOnDevice() override { CHECK_EQ(InputSize(), 0); diff --git a/caffe2/operators/norm_planar_yuv_op.cc b/caffe2/operators/norm_planar_yuv_op.cc index 724ce14..19fc08a 100644 --- a/caffe2/operators/norm_planar_yuv_op.cc +++ b/caffe2/operators/norm_planar_yuv_op.cc @@ -12,7 +12,7 @@ class NormalizePlanarYUVOp : public Operator { USE_OPERATOR_FUNCTIONS(CPUContext); using Operator::Operator; - bool RunOnDevice() { + bool RunOnDevice() override { const auto& X = Input(0); const auto& M = Input(1); // mean const auto& S = Input(2); // standard deviation diff --git a/caffe2/operators/operator_fallback_gpu_test.cc b/caffe2/operators/operator_fallback_gpu_test.cc index 7b8bb97..5606bac 100644 --- a/caffe2/operators/operator_fallback_gpu_test.cc +++ b/caffe2/operators/operator_fallback_gpu_test.cc @@ -11,7 +11,7 @@ class IncrementByOneOp final : public Operator { public: IncrementByOneOp(const OperatorDef& def, Workspace* ws) : Operator(def, ws) {} - bool RunOnDevice() { + bool RunOnDevice() override { const auto& in = Input(0); auto* out = Output(0, in.sizes(), at::dtype()); diff --git a/caffe2/operators/order_switch_ops_cudnn.cc b/caffe2/operators/order_switch_ops_cudnn.cc index 46704c8..6d4f7c5 100644 --- a/caffe2/operators/order_switch_ops_cudnn.cc +++ b/caffe2/operators/order_switch_ops_cudnn.cc @@ -22,7 +22,7 @@ class CuDNNOrderSwithOpBase : public Operator { CUDNN_ENFORCE(cudnnCreateTensorDescriptor(&Y_desc_)); } - virtual ~CuDNNOrderSwithOpBase() { + ~CuDNNOrderSwithOpBase() override { CUDNN_ENFORCE(cudnnDestroyTensorDescriptor(X_desc_)); CUDNN_ENFORCE(cudnnDestroyTensorDescriptor(Y_desc_)); } diff --git a/caffe2/operators/pool_op_cudnn.cc b/caffe2/operators/pool_op_cudnn.cc index 120d29b..45dfc41 100644 --- a/caffe2/operators/pool_op_cudnn.cc +++ b/caffe2/operators/pool_op_cudnn.cc @@ -86,7 +86,7 @@ class CuDNNPoolOp final : public ConvPoolOpBase { } } - ~CuDNNPoolOp() { + ~CuDNNPoolOp() override { CUDNN_ENFORCE(cudnnDestroyTensorDescriptor(X_desc_)); CUDNN_ENFORCE(cudnnDestroyTensorDescriptor(Y_desc_)); CUDNN_ENFORCE(cudnnDestroyPoolingDescriptor(pooling_desc_)); @@ -226,7 +226,7 @@ class CuDNNPoolGradientOp final : public ConvPoolOpBase { } } - ~CuDNNPoolGradientOp() { + ~CuDNNPoolGradientOp() override { CUDNN_ENFORCE(cudnnDestroyTensorDescriptor(X_desc_)); CUDNN_ENFORCE(cudnnDestroyTensorDescriptor(Y_desc_)); CUDNN_ENFORCE(cudnnDestroyPoolingDescriptor(pooling_desc_)); diff --git a/caffe2/operators/softmax_op_cudnn.cc b/caffe2/operators/softmax_op_cudnn.cc index 28880e9..38f9c40 100644 --- a/caffe2/operators/softmax_op_cudnn.cc +++ b/caffe2/operators/softmax_op_cudnn.cc @@ -22,7 +22,7 @@ class CuDNNSoftmaxOp final : public Operator { CUDNN_ENFORCE(cudnnCreateTensorDescriptor(&desc_)); } - ~CuDNNSoftmaxOp() { + ~CuDNNSoftmaxOp() override { CUDNN_ENFORCE(cudnnDestroyTensorDescriptor(desc_)); } @@ -84,7 +84,7 @@ class CuDNNSoftmaxGradientOp final : public Operator { CUDNN_ENFORCE(cudnnCreateTensorDescriptor(&desc_)); } - ~CuDNNSoftmaxGradientOp() { + ~CuDNNSoftmaxGradientOp() override { CUDNN_ENFORCE(cudnnDestroyTensorDescriptor(desc_)); } diff --git a/caffe2/operators/stylizer_ops.cc b/caffe2/operators/stylizer_ops.cc index 76bdbce..9b4e69f 100644 --- a/caffe2/operators/stylizer_ops.cc +++ b/caffe2/operators/stylizer_ops.cc @@ -74,7 +74,7 @@ class PackedInt8BGRANHWCToNCHWCStylizerPreprocessOp Workspace* ws) : Operator(operator_def, ws), ws_(ws) {} - bool RunOnDevice() { + bool RunOnDevice() override { const auto& X = Input(0); const auto& mean = Input(1); @@ -410,7 +410,7 @@ class BRGNCHWCToPackedInt8BGRAStylizerDeprocessOp // Expect this many channels as output static constexpr int kOutputChannels = 4; - bool RunOnDevice() { + bool RunOnDevice() override { const auto& X = Input(0); const auto& mean = Input(1); diff --git a/caffe2/operators/text_file_reader_utils_test.cc b/caffe2/operators/text_file_reader_utils_test.cc index 43d8d14..ddec638 100644 --- a/caffe2/operators/text_file_reader_utils_test.cc +++ b/caffe2/operators/text_file_reader_utils_test.cc @@ -66,7 +66,7 @@ TEST(TextFileReaderUtilsTest, TokenizeTest) { ChunkProvider(const std::string& str) : ch(str) {} std::string ch; size_t charIdx{0}; - void operator()(CharRange& range) { + void operator()(CharRange& range) override { if (charIdx >= ch.size()) { range.start = nullptr; range.end = nullptr; @@ -77,7 +77,7 @@ TEST(TextFileReaderUtilsTest, TokenizeTest) { charIdx = endIdx; } }; - void reset() { + void reset() override { charIdx = 0; } }; diff --git a/caffe2/operators/transpose_op_cudnn.cc b/caffe2/operators/transpose_op_cudnn.cc index 874ecf6..2b0f12e 100644 --- a/caffe2/operators/transpose_op_cudnn.cc +++ b/caffe2/operators/transpose_op_cudnn.cc @@ -34,7 +34,7 @@ class CuDNNTransposeOp final : public Operator { CUDNN_ENFORCE(cudnnCreateTensorDescriptor(&yDesc_)); } - ~CuDNNTransposeOp() { + ~CuDNNTransposeOp() override { CUDNN_ENFORCE(cudnnDestroyTensorDescriptor(xDesc_)); CUDNN_ENFORCE(cudnnDestroyTensorDescriptor(yDesc_)); } diff --git a/caffe2/operators/utility_ops_cudnn.cc b/caffe2/operators/utility_ops_cudnn.cc index bd04f30..7fe4503 100644 --- a/caffe2/operators/utility_ops_cudnn.cc +++ b/caffe2/operators/utility_ops_cudnn.cc @@ -21,7 +21,7 @@ class CuDNNWeightedSumOp : public Operator { add_desc_, CUDNN_OP_TENSOR_ADD, CUDNN_DATA_FLOAT, CUDNN_PROPAGATE_NAN)); } - ~CuDNNWeightedSumOp() { + ~CuDNNWeightedSumOp() override { CUDNN_ENFORCE(cudnnDestroyTensorDescriptor(data_desc_)); CUDNN_ENFORCE(cudnnDestroyOpTensorDescriptor(add_desc_)); } diff --git a/caffe2/opt/converter.cc b/caffe2/opt/converter.cc index 2f9c8fb..6f5cde5 100644 --- a/caffe2/opt/converter.cc +++ b/caffe2/opt/converter.cc @@ -137,7 +137,7 @@ class ConvConverter : public Converter { } // Does not override default converter to OperatorDef - virtual ~ConvConverter() {} + ~ConvConverter() override {} }; REGISTER_CONVERTER(Conv, ConvConverter); @@ -175,7 +175,7 @@ class ClipConverter : public Converter { } // Does not override default converter to OperatorDef - virtual ~ClipConverter() {} + ~ClipConverter() override {} }; REGISTER_CONVERTER(Clip, ClipConverter); @@ -190,7 +190,7 @@ class AveragePoolConverter : public Converter { } // Does not override default converter to OperatorDef - virtual ~AveragePoolConverter() {} + ~AveragePoolConverter() override {} }; REGISTER_CONVERTER(AveragePool, AveragePoolConverter); @@ -205,7 +205,7 @@ class MaxPoolConverter : public Converter { } // Does not override default converter to OperatorDef - virtual ~MaxPoolConverter() {} + ~MaxPoolConverter() override {} }; REGISTER_CONVERTER(MaxPool, MaxPoolConverter); @@ -231,7 +231,7 @@ class ConcatConverter : public Converter { } // Does not override default converter to OperatorDef - virtual ~ConcatConverter() {} + ~ConcatConverter() override {} }; REGISTER_CONVERTER(Concat, ConcatConverter); @@ -258,7 +258,7 @@ class FCConverter : public Converter { } // Does not override default converter to OperatorDef - virtual ~FCConverter() {} + ~FCConverter() override {} }; REGISTER_CONVERTER(FC, FCConverter); diff --git a/caffe2/python/pybind_state_ideep.cc b/caffe2/python/pybind_state_ideep.cc index f829622..7057ca9 100644 --- a/caffe2/python/pybind_state_ideep.cc +++ b/caffe2/python/pybind_state_ideep.cc @@ -160,8 +160,11 @@ public: #endif } - void Feed(const DeviceOption &option, PyArrayObject *original_array, - Blob *blob, bool in_place) { + void Feed( + const DeviceOption& option, + PyArrayObject* original_array, + Blob* blob, + bool in_place) override { #ifdef USE_NUMPY try { PyArrayObject *array = PyArray_GETCONTIGUOUS(original_array); diff --git a/caffe2/utils/threadpool/ThreadPool.cc b/caffe2/utils/threadpool/ThreadPool.cc index cf0dbc2..b6e8d75 100644 --- a/caffe2/utils/threadpool/ThreadPool.cc +++ b/caffe2/utils/threadpool/ThreadPool.cc @@ -113,12 +113,12 @@ void ThreadPool::run(const std::function& fn, size_t range) { struct FnTask : public Task { FnTask(){}; - virtual ~FnTask(){}; + ~FnTask() override{}; const std::function *fn_; int idx_; size_t start_; size_t end_; - virtual void Run() override { + void Run() override { for (auto i = start_; i < end_; ++i) { (*fn_)(idx_, i); } diff --git a/test/cpp/api/module.cpp b/test/cpp/api/module.cpp index 557b7de..4cf04d3 100644 --- a/test/cpp/api/module.cpp +++ b/test/cpp/api/module.cpp @@ -839,7 +839,7 @@ TEST_F(ModuleTest, PrettyPrint) { struct TestModule : torch::nn::Module { TestModule(int x, float y) : x_(x), y_(y) {} - void pretty_print(std::ostream& stream) const { + void pretty_print(std::ostream& stream) const override { stream << "TestModule(x=" << x_ << ", y=" << y_ << ")"; } diff --git a/test/cpp/api/tensor_options.cpp b/test/cpp/api/tensor_options.cpp index a594f80..dc8c243 100644 --- a/test/cpp/api/tensor_options.cpp +++ b/test/cpp/api/tensor_options.cpp @@ -133,7 +133,7 @@ struct DefaultDtypeTest : ::testing::Test { DefaultDtypeTest() { set_default_dtype(caffe2::TypeMeta::Make()); } - ~DefaultDtypeTest() { + ~DefaultDtypeTest() override { set_default_dtype(caffe2::TypeMeta::Make()); } };