From 846a64e8058b7a061dbd172884799923df9db8dc Mon Sep 17 00:00:00 2001 From: Jerry Zhang Date: Mon, 4 Feb 2019 15:46:00 -0800 Subject: [PATCH] Tensor method rename ndim()->dim() - 1/3 (#16678) Summary: Pull Request resolved: https://github.com/pytorch/pytorch/pull/16678 Codemod generated with clangr shard mode, 25 files per diff, Reviewed By: houseroad Differential Revision: D13929413 fbshipit-source-id: 677ce760bdbf9f5560630fdc40dd60af227fb696 --- caffe2/operators/accuracy_op.cu | 4 ++-- caffe2/operators/affine_channel_op.cu | 2 +- caffe2/operators/boolean_mask_ops.cu | 4 ++-- caffe2/operators/boolean_unmask_ops.cu | 4 ++-- caffe2/operators/channel_backprop_stats_op.cu | 6 +++--- caffe2/operators/channel_shuffle_op.cu | 4 ++-- caffe2/operators/channel_stats_op.cu | 6 +++--- caffe2/operators/cross_entropy_op.cu | 18 +++++++++--------- caffe2/operators/deform_conv_op_impl.h | 10 +++++----- caffe2/operators/depthwise_3x3_conv_op_cudnn.cu | 6 +++--- 10 files changed, 32 insertions(+), 32 deletions(-) diff --git a/caffe2/operators/accuracy_op.cu b/caffe2/operators/accuracy_op.cu index 3309665..4c682a6 100644 --- a/caffe2/operators/accuracy_op.cu +++ b/caffe2/operators/accuracy_op.cu @@ -48,10 +48,10 @@ bool AccuracyOp::RunOnDevice() { auto& X = Input(PREDICTION); auto& label = Input(LABEL); - CAFFE_ENFORCE_EQ(X.ndim(), 2); + CAFFE_ENFORCE_EQ(X.dim(), 2); int N = X.dim32(0); int D = X.dim32(1); - CAFFE_ENFORCE_EQ(label.ndim(), 1); + CAFFE_ENFORCE_EQ(label.dim(), 1); CAFFE_ENFORCE_EQ(label.dim32(0), N); auto* Y = Output(0, vector(), at::dtype()); float* Ydata = Y->template mutable_data(); diff --git a/caffe2/operators/affine_channel_op.cu b/caffe2/operators/affine_channel_op.cu index 4ac7d33..356c45f 100644 --- a/caffe2/operators/affine_channel_op.cu +++ b/caffe2/operators/affine_channel_op.cu @@ -103,7 +103,7 @@ bool AffineChannelGradientOp::RunOnDeviceWithOrderNHWC() { const auto& scale = is_learnable_ ? Input(2) : Input(1); auto* dX = Output(0, dY.sizes(), at::dtype()); - const int ndim = dY.ndim(); + const int ndim = dY.dim(); const int C = dY.dim32(ndim - 1); const int rows = dY.size() / C; const int cols = C; diff --git a/caffe2/operators/boolean_mask_ops.cu b/caffe2/operators/boolean_mask_ops.cu index d953e8f..4cbf368 100644 --- a/caffe2/operators/boolean_mask_ops.cu +++ b/caffe2/operators/boolean_mask_ops.cu @@ -33,8 +33,8 @@ class BooleanMaskOp final : public Operator { const auto& mask = Input(1); auto* dest = Output(0); - CAFFE_ENFORCE(src.ndim() >= 1); - CAFFE_ENFORCE_EQ(mask.ndim(), 1); + CAFFE_ENFORCE(src.dim() >= 1); + CAFFE_ENFORCE_EQ(mask.dim(), 1); CAFFE_ENFORCE(src.size(0) == mask.size(0)); const auto* maskData = mask.data(); diff --git a/caffe2/operators/boolean_unmask_ops.cu b/caffe2/operators/boolean_unmask_ops.cu index 21d3275..3ac555d 100644 --- a/caffe2/operators/boolean_unmask_ops.cu +++ b/caffe2/operators/boolean_unmask_ops.cu @@ -70,12 +70,12 @@ class BooleanUnmaskOp final : public Operator { auto* hostValueSizesData = hostValueSizes_.mutable_data(); for (int i = 0; i < numMasks; ++i) { auto& mask = Input(i * 2); - CAFFE_ENFORCE_EQ(mask.ndim(), 1); + CAFFE_ENFORCE_EQ(mask.dim(), 1); CAFFE_ENFORCE_EQ(mask.size(), maskSize); hostMasksData[i] = const_cast(mask.data()); const auto& value = Input(i * 2 + 1); - CAFFE_ENFORCE_EQ(value.ndim(), 1); + CAFFE_ENFORCE_EQ(value.dim(), 1); hostValuesData[i] = (char*)value.raw_data(); hostValueSizesData[i] = value.size(); } diff --git a/caffe2/operators/channel_backprop_stats_op.cu b/caffe2/operators/channel_backprop_stats_op.cu index 1dc2a64..325d566 100644 --- a/caffe2/operators/channel_backprop_stats_op.cu +++ b/caffe2/operators/channel_backprop_stats_op.cu @@ -154,12 +154,12 @@ bool ChannelBackpropStatsOp::RunOnDevice() { const auto& dY = Input(OUTPUT_GRAD); const auto& mean = Input(SAVED_MEAN); const auto& invStddev = Input(SAVED_INV_STDDEV); - CAFFE_ENFORCE(X.ndim() >= 3 && X.ndim() <= 5); + CAFFE_ENFORCE(X.dim() >= 3 && X.dim() <= 5); const int N = X.dim32(0); const int C = X.dim32(1); const int H = X.dim32(2); - const int W = X.ndim() > 3 ? X.dim32(3) : 1; - const int D = X.ndim() > 4 ? X.dim32(4) : 1; + const int W = X.dim() > 3 ? X.dim32(3) : 1; + const int D = X.dim() > 4 ? X.dim32(4) : 1; const auto Xarr = X.data(); const auto dYarr = dY.data(); diff --git a/caffe2/operators/channel_shuffle_op.cu b/caffe2/operators/channel_shuffle_op.cu index c93181c..edaa260 100644 --- a/caffe2/operators/channel_shuffle_op.cu +++ b/caffe2/operators/channel_shuffle_op.cu @@ -88,7 +88,7 @@ bool ChannelShuffleOp::RunOnDeviceWithOrderNHWC() { const auto& X = Input(0); auto* Y = Output(0, X.sizes(), at::dtype()); - const int ndim = X.ndim(); + const int ndim = X.dim(); const int N = X.dim32(0); const int C = X.dim32(ndim - 1); const int G = this->group_; @@ -158,7 +158,7 @@ bool ChannelShuffleGradientOp::RunOnDeviceWithOrderNHWC() { const auto& dY = Input(0); auto* dX = Output(0, dY.sizes(), at::dtype()); - const int ndim = dY.ndim(); + const int ndim = dY.dim(); const int N = dY.dim32(0); const int C = dY.dim32(ndim - 1); const int G = this->group_; diff --git a/caffe2/operators/channel_stats_op.cu b/caffe2/operators/channel_stats_op.cu index b4e8772..5243005 100644 --- a/caffe2/operators/channel_stats_op.cu +++ b/caffe2/operators/channel_stats_op.cu @@ -147,12 +147,12 @@ __global__ void ChannelStatsFinalSumsKernel( template <> bool ChannelStatsOp::RunOnDevice() { const auto& X = Input(INPUT); - CAFFE_ENFORCE(X.ndim() >= 3 && X.ndim() <= 5); + CAFFE_ENFORCE(X.dim() >= 3 && X.dim() <= 5); const int N = X.dim32(0); const int C = X.dim32(1); const int H = X.dim32(2); - const int W = X.ndim() > 3 ? X.dim32(3) : 1; - const int D = X.ndim() > 4 ? X.dim32(4) : 1; + const int W = X.dim() > 3 ? X.dim32(3) : 1; + const int D = X.dim() > 4 ? X.dim32(4) : 1; const auto X_arr = X.data(); const auto valsPerChannel = H * W * D; diff --git a/caffe2/operators/cross_entropy_op.cu b/caffe2/operators/cross_entropy_op.cu index 7f1fb84..345af22 100644 --- a/caffe2/operators/cross_entropy_op.cu +++ b/caffe2/operators/cross_entropy_op.cu @@ -32,7 +32,7 @@ bool LabelCrossEntropyOp::RunOnDevice() { auto& label = Input(1); int N, D; - if (X.ndim() > 1) { + if (X.dim() > 1) { N = X.dim32(0); D = X.size_from_dim(1); } else { @@ -40,7 +40,7 @@ bool LabelCrossEntropyOp::RunOnDevice() { D = X.dim32(0); } CAFFE_ENFORCE( - (label.ndim() == 1) || (label.ndim() == 2 && label.dim32(1) == 1)); + (label.dim() == 1) || (label.dim() == 2 && label.dim32(1) == 1)); CAFFE_ENFORCE_EQ(label.dim32(0), N); auto* Y = Output(0, vector(size_t(1), N), at::dtype()); LabelCrossEntropyKernel<<< @@ -64,7 +64,7 @@ bool LabelCrossEntropyGradientOp::RunOnDevice() { auto& dY = Input(2); int N, D; - if (X.ndim() > 1) { + if (X.dim() > 1) { N = X.dim32(0); D = X.size_from_dim(1); } else { @@ -72,9 +72,9 @@ bool LabelCrossEntropyGradientOp::RunOnDevice() { D = X.dim32(0); } CAFFE_ENFORCE( - (label.ndim() == 1) || (label.ndim() == 2 && label.dim32(1) == 1)); + (label.dim() == 1) || (label.dim() == 2 && label.dim32(1) == 1)); CAFFE_ENFORCE_EQ(label.dim32(0), N); - CAFFE_ENFORCE_EQ(dY.ndim(), 1); + CAFFE_ENFORCE_EQ(dY.dim(), 1); CAFFE_ENFORCE_EQ(dY.dim32(0), N); auto* dX = Output(0, X.sizes(), at::dtype()); math::Set( @@ -243,7 +243,7 @@ bool SigmoidCrossEntropyWithLogitsOp::RunOnDevice() { auto& logits = Input(0); auto& targets = Input(1); CAFFE_ENFORCE(logits.sizes() == targets.sizes()); - const auto inner_size = logits.ndim() > 0 ? logits.sizes().back() : 1; + const auto inner_size = logits.dim() > 0 ? logits.sizes().back() : 1; const auto outer_size = logits.size() / inner_size; std::vector dims; @@ -284,7 +284,7 @@ bool SigmoidCrossEntropyWithLogitsGradientOp:: auto& logits = Input(1); auto& targets = Input(2); CAFFE_ENFORCE(logits.sizes() == targets.sizes()); - const auto inner_size = logits.ndim() > 0 ? logits.sizes().back() : 1; + const auto inner_size = logits.dim() > 0 ? logits.sizes().back() : 1; const auto outer_size = logits.size() / inner_size; CAFFE_ENFORCE(g.size() == outer_size); @@ -364,7 +364,7 @@ bool WeightedSigmoidCrossEntropyWithLogitsOp:: auto& weights = Input(2); CAFFE_ENFORCE(logits.sizes() == targets.sizes()); CAFFE_ENFORCE(weights.sizes() == targets.sizes()); - const auto inner_size = logits.ndim() > 0 ? logits.sizes().back() : 1; + const auto inner_size = logits.dim() > 0 ? logits.sizes().back() : 1; const auto outer_size = logits.size() / inner_size; std::vector dims; @@ -397,7 +397,7 @@ bool WeightedSigmoidCrossEntropyWithLogitsGradientOp:: auto& weights = Input(3); CAFFE_ENFORCE(logits.sizes() == targets.sizes()); CAFFE_ENFORCE(weights.sizes() == targets.sizes()); - const auto inner_size = logits.ndim() > 0 ? logits.sizes().back() : 1; + const auto inner_size = logits.dim() > 0 ? logits.sizes().back() : 1; const auto outer_size = logits.size() / inner_size; CAFFE_ENFORCE(g.size() == outer_size); diff --git a/caffe2/operators/deform_conv_op_impl.h b/caffe2/operators/deform_conv_op_impl.h index 94dea27..488b8b4 100644 --- a/caffe2/operators/deform_conv_op_impl.h +++ b/caffe2/operators/deform_conv_op_impl.h @@ -19,7 +19,7 @@ bool DeformConvOp::RunOnDeviceWithOrderNCHW() { auto& filter = Input(FILTER); Tensor* Y = Output(0); const int N = X.dim32(0), C = X.dim32(1); - CAFFE_ENFORCE_EQ(X.dim(), filter.ndim()); + CAFFE_ENFORCE_EQ(X.dim(), filter.dim()); const int M = filter.dim32(0); CAFFE_ENFORCE( C == filter.dim32(1) * group_, @@ -113,7 +113,7 @@ bool DeformConvOp::RunOnDeviceWithOrderNCHW() { if (InputSize() == 4) { auto& bias = Input(BIAS); - CAFFE_ENFORCE(bias.ndim() == 1); + CAFFE_ENFORCE(bias.dim() == 1); CAFFE_ENFORCE(bias.dim32(0) == M); if (bias_multiplier_.size() != output_image_size) { // If the helper bias multiplier is not image size, reshape and fill it @@ -208,7 +208,7 @@ bool DeformConvGradientOp::RunOnDeviceWithOrderNCHW() { const int output_image_size = this->GetDimsSize(dY); ConvPoolOpBase::ComputePads(input_dims); - CAFFE_ENFORCE_EQ(X.ndim(), filter.ndim()); + CAFFE_ENFORCE_EQ(X.dim(), filter.dim()); const int M = filter.dim32(0); CAFFE_ENFORCE(filter.dim32(1) * group_ == C); @@ -218,9 +218,9 @@ bool DeformConvGradientOp::RunOnDeviceWithOrderNCHW() { kernel_.size(), "d kernel."); CAFFE_ENFORCE( - offset.ndim() == 4, + offset.dim() == 4, "Deformable convolution only supports 4d offset, has ", - offset.ndim(), + offset.dim(), "d offset."); CAFFE_ENFORCE_EQ(offset.dim32(0), N); CAFFE_ENFORCE( diff --git a/caffe2/operators/depthwise_3x3_conv_op_cudnn.cu b/caffe2/operators/depthwise_3x3_conv_op_cudnn.cu index beefcfd..29eff53 100644 --- a/caffe2/operators/depthwise_3x3_conv_op_cudnn.cu +++ b/caffe2/operators/depthwise_3x3_conv_op_cudnn.cu @@ -290,7 +290,7 @@ class Depthwise3x3ConvOp final : public ConvPoolOpBase { auto& filter = Input(1); Tensor* Y = Output(0); const int N = X.dim32(0), C = X.dim32(1); - CAFFE_ENFORCE_EQ(X.ndim(), filter.ndim()); + CAFFE_ENFORCE_EQ(X.dim(), filter.dim()); const int M = filter.dim32(0); CAFFE_ENFORCE_EQ(M, X.dim32(1)); @@ -342,7 +342,7 @@ class Depthwise3x3ConvOp final : public ConvPoolOpBase { Y->dim32(2), Y->dim32(3))); auto& bias = Input(2); - CAFFE_ENFORCE_EQ(bias.ndim(), 1); + CAFFE_ENFORCE_EQ(bias.dim(), 1); CAFFE_ENFORCE_EQ(bias.dim32(0), M); CUDNN_ENFORCE(cudnnAddTensor( cudnn_wrapper_.inline_cudnn_handle(), @@ -394,7 +394,7 @@ class Depthwise3x3ConvGradientOp final : public ConvPoolOpBase { const vector input_dims = this->GetDims(X); ConvPoolOpBase::ComputePads(input_dims); - CAFFE_ENFORCE_EQ(X.ndim(), filter.ndim()); + CAFFE_ENFORCE_EQ(X.dim(), filter.dim()); const int M = filter.dim32(0); CAFFE_ENFORCE(filter.dim32(1) * group_ == C); CAFFE_ENFORCE(M % group_ == 0); -- 2.7.4