CUDNN_ENFORCE(cudnnCreateConvolutionDescriptor(&conv_desc_));
}
- ~CudnnConvTransposeOpBase() {
+ ~CudnnConvTransposeOpBase() override {
CUDNN_ENFORCE(cudnnDestroyTensorDescriptor(bottom_desc_));
CUDNN_ENFORCE(cudnnDestroyFilterDescriptor(filter_desc_));
if (InputSize() == 3) {
CudnnConvTransposeOp(const OperatorDef& operator_def, Workspace* ws)
: CudnnConvTransposeOpBase(operator_def, ws) {}
- ~CudnnConvTransposeOp() {}
+ ~CudnnConvTransposeOp() override {}
bool RunOnDevice() override;
"If bias is not present, you should not have 3 grad output.");
}
- ~CudnnConvTransposeGradientOp() {}
+ ~CudnnConvTransposeGradientOp() override {}
bool RunOnDevice() override;
class CounterSerializer : public BlobSerializerBase {
public:
CounterSerializer() {}
- ~CounterSerializer() {}
+ ~CounterSerializer() override {}
void Serialize(
const void* pointer,
class TreeCursorSerializer : public BlobSerializerBase {
public:
TreeCursorSerializer() {}
- ~TreeCursorSerializer() {}
+ ~TreeCursorSerializer() override {}
void Serialize(
const void* pointer,
}
}
- ~CuDNNDropoutOp() noexcept {
+ ~CuDNNDropoutOp() noexcept override {
CUDNN_ENFORCE(cudnnDestroyTensorDescriptor(data_desc_));
CUDNN_ENFORCE(cudnnDestroyDropoutDescriptor(dropout_desc_));
}
CAFFE_ENFORCE(scratch_blob_);
}
- ~CuDNNDropoutGradientOp() noexcept {
+ ~CuDNNDropoutGradientOp() noexcept override {
CUDNN_ENFORCE(cudnnDestroyTensorDescriptor(data_desc_));
CUDNN_ENFORCE(cudnnDestroyDropoutDescriptor(dropout_desc_));
}
class IndexSerializer : public BlobSerializerBase {
public:
IndexSerializer() {}
- ~IndexSerializer() {}
+ ~IndexSerializer() override {}
void Serialize(
const void* pointer,
cudnnSetLRNDescriptor(norm_desc_, size_, alpha_, beta_, bias_));
}
- ~CuDNNLRNOp() {
+ ~CuDNNLRNOp() override {
CUDNN_ENFORCE(cudnnDestroyTensorDescriptor(data_desc_));
CUDNN_ENFORCE(cudnnDestroyLRNDescriptor(norm_desc_));
}
cudnnSetLRNDescriptor(norm_desc_, size_, alpha_, beta_, bias_));
}
- ~CuDNNLRNGradientOp() {
+ ~CuDNNLRNGradientOp() override {
CUDNN_ENFORCE(cudnnDestroyTensorDescriptor(data_desc_));
CUDNN_ENFORCE(cudnnDestroyLRNDescriptor(norm_desc_));
}
public:
GetGPUMemoryUsageOp(const OperatorDef& operator_def, Workspace* ws)
: Operator<CUDAContext>(operator_def, ws) {}
- ~GetGPUMemoryUsageOp() {}
+ ~GetGPUMemoryUsageOp() override {}
bool RunOnDevice() override {
CHECK_EQ(InputSize(), 0);
USE_OPERATOR_FUNCTIONS(CPUContext);
using Operator<CPUContext>::Operator;
- bool RunOnDevice() {
+ bool RunOnDevice() override {
const auto& X = Input(0);
const auto& M = Input(1); // mean
const auto& S = Input(2); // standard deviation
public:
IncrementByOneOp(const OperatorDef& def, Workspace* ws)
: Operator<CPUContext>(def, ws) {}
- bool RunOnDevice() {
+ bool RunOnDevice() override {
const auto& in = Input(0);
auto* out = Output(0, in.sizes(), at::dtype<float>());
CUDNN_ENFORCE(cudnnCreateTensorDescriptor(&Y_desc_));
}
- virtual ~CuDNNOrderSwithOpBase() {
+ ~CuDNNOrderSwithOpBase() override {
CUDNN_ENFORCE(cudnnDestroyTensorDescriptor(X_desc_));
CUDNN_ENFORCE(cudnnDestroyTensorDescriptor(Y_desc_));
}
}
}
- ~CuDNNPoolOp() {
+ ~CuDNNPoolOp() override {
CUDNN_ENFORCE(cudnnDestroyTensorDescriptor(X_desc_));
CUDNN_ENFORCE(cudnnDestroyTensorDescriptor(Y_desc_));
CUDNN_ENFORCE(cudnnDestroyPoolingDescriptor(pooling_desc_));
}
}
- ~CuDNNPoolGradientOp() {
+ ~CuDNNPoolGradientOp() override {
CUDNN_ENFORCE(cudnnDestroyTensorDescriptor(X_desc_));
CUDNN_ENFORCE(cudnnDestroyTensorDescriptor(Y_desc_));
CUDNN_ENFORCE(cudnnDestroyPoolingDescriptor(pooling_desc_));
CUDNN_ENFORCE(cudnnCreateTensorDescriptor(&desc_));
}
- ~CuDNNSoftmaxOp() {
+ ~CuDNNSoftmaxOp() override {
CUDNN_ENFORCE(cudnnDestroyTensorDescriptor(desc_));
}
CUDNN_ENFORCE(cudnnCreateTensorDescriptor(&desc_));
}
- ~CuDNNSoftmaxGradientOp() {
+ ~CuDNNSoftmaxGradientOp() override {
CUDNN_ENFORCE(cudnnDestroyTensorDescriptor(desc_));
}
Workspace* ws)
: Operator<CPUContext>(operator_def, ws), ws_(ws) {}
- bool RunOnDevice() {
+ bool RunOnDevice() override {
const auto& X = Input(0);
const auto& mean = Input(1);
// Expect this many channels as output
static constexpr int kOutputChannels = 4;
- bool RunOnDevice() {
+ bool RunOnDevice() override {
const auto& X = Input(0);
const auto& mean = Input(1);
ChunkProvider(const std::string& str) : ch(str) {}
std::string ch;
size_t charIdx{0};
- void operator()(CharRange& range) {
+ void operator()(CharRange& range) override {
if (charIdx >= ch.size()) {
range.start = nullptr;
range.end = nullptr;
charIdx = endIdx;
}
};
- void reset() {
+ void reset() override {
charIdx = 0;
}
};
CUDNN_ENFORCE(cudnnCreateTensorDescriptor(&yDesc_));
}
- ~CuDNNTransposeOp() {
+ ~CuDNNTransposeOp() override {
CUDNN_ENFORCE(cudnnDestroyTensorDescriptor(xDesc_));
CUDNN_ENFORCE(cudnnDestroyTensorDescriptor(yDesc_));
}
add_desc_, CUDNN_OP_TENSOR_ADD, CUDNN_DATA_FLOAT, CUDNN_PROPAGATE_NAN));
}
- ~CuDNNWeightedSumOp() {
+ ~CuDNNWeightedSumOp() override {
CUDNN_ENFORCE(cudnnDestroyTensorDescriptor(data_desc_));
CUDNN_ENFORCE(cudnnDestroyOpTensorDescriptor(add_desc_));
}
}
// Does not override default converter to OperatorDef
- virtual ~ConvConverter() {}
+ ~ConvConverter() override {}
};
REGISTER_CONVERTER(Conv, ConvConverter);
}
// Does not override default converter to OperatorDef
- virtual ~ClipConverter() {}
+ ~ClipConverter() override {}
};
REGISTER_CONVERTER(Clip, ClipConverter);
}
// Does not override default converter to OperatorDef
- virtual ~AveragePoolConverter() {}
+ ~AveragePoolConverter() override {}
};
REGISTER_CONVERTER(AveragePool, AveragePoolConverter);
}
// Does not override default converter to OperatorDef
- virtual ~MaxPoolConverter() {}
+ ~MaxPoolConverter() override {}
};
REGISTER_CONVERTER(MaxPool, MaxPoolConverter);
}
// Does not override default converter to OperatorDef
- virtual ~ConcatConverter() {}
+ ~ConcatConverter() override {}
};
REGISTER_CONVERTER(Concat, ConcatConverter);
}
// Does not override default converter to OperatorDef
- virtual ~FCConverter() {}
+ ~FCConverter() override {}
};
REGISTER_CONVERTER(FC, FCConverter);
#endif
}
- void Feed(const DeviceOption &option, PyArrayObject *original_array,
- Blob *blob, bool in_place) {
+ void Feed(
+ const DeviceOption& option,
+ PyArrayObject* original_array,
+ Blob* blob,
+ bool in_place) override {
#ifdef USE_NUMPY
try {
PyArrayObject *array = PyArray_GETCONTIGUOUS(original_array);
struct FnTask : public Task {
FnTask(){};
- virtual ~FnTask(){};
+ ~FnTask() override{};
const std::function<void(int, size_t)> *fn_;
int idx_;
size_t start_;
size_t end_;
- virtual void Run() override {
+ void Run() override {
for (auto i = start_; i < end_; ++i) {
(*fn_)(idx_, i);
}
struct TestModule : torch::nn::Module {
TestModule(int x, float y) : x_(x), y_(y) {}
- void pretty_print(std::ostream& stream) const {
+ void pretty_print(std::ostream& stream) const override {
stream << "TestModule(x=" << x_ << ", y=" << y_ << ")";
}
DefaultDtypeTest() {
set_default_dtype(caffe2::TypeMeta::Make<float>());
}
- ~DefaultDtypeTest() {
+ ~DefaultDtypeTest() override {
set_default_dtype(caffe2::TypeMeta::Make<float>());
}
};