Replace the remaining usages of IntList in caffe2 to IntArrayRef
authorJunjie Bai <bai@in.tum.de>
Thu, 21 Mar 2019 23:24:45 +0000 (16:24 -0700)
committerFacebook Github Bot <facebook-github-bot@users.noreply.github.com>
Thu, 21 Mar 2019 23:34:38 +0000 (16:34 -0700)
Summary: Pull Request resolved: https://github.com/pytorch/pytorch/pull/18282

Differential Revision: D14569269

Pulled By: bddppq

fbshipit-source-id: 5fc33701b83f9efdec4b456d2691764831d10e7f

caffe2/operators/conv_pool_op_base.h
caffe2/quantization/server/conv_pool_dnnlowp_op_base.h
caffe2/quantization/server/dnnlowp_op.h

index 455afe1..5fb07d2 100644 (file)
@@ -295,7 +295,7 @@ class ConvPoolOpBase : public Operator<Context> {
   }
 
   static void InferOutputSize64(
-      const at::IntList& input_dims,
+      const at::IntArrayRef& input_dims,
       const int output_channel,
       const StorageOrder order,
       const bool global_pooling,
index 4a2c72b..8de6e18 100644 (file)
@@ -69,7 +69,7 @@ class ConvPoolDNNLowPOpBase : public ConvPoolOpBase<CPUContext> {
     return &Outputs()[idx]->template GetMutable<int8::Int8TensorCPU>()->t;
   }
 
-  Tensor* OutputTensorCPU_(int idx, at::IntList dims, at::TensorOptions options) {
+  Tensor* OutputTensorCPU_(int idx, at::IntArrayRef dims, at::TensorOptions options) {
     auto* t = &Outputs()[idx]->template GetMutable<int8::Int8TensorCPU>()->t;
     ReinitializeTensor(t, dims, options.device(CPU));
     return t;
index 88a5a1d..9db43c3 100644 (file)
@@ -122,7 +122,7 @@ class DNNLowPOp : public Operator<CPUContext> {
     }
   }
 
-  Tensor* OutputTensorCPU_(int idx, at::IntList dims, at::TensorOptions options) {
+  Tensor* OutputTensorCPU_(int idx, at::IntArrayRef dims, at::TensorOptions options) {
     if (dequantize_output_) {
       return Output(idx, dims, options.device(CPU));
     } else {