use pragma once (#14163)
authorJongsoo Park <jongsoo@fb.com>
Tue, 20 Nov 2018 08:53:29 +0000 (00:53 -0800)
committerFacebook Github Bot <facebook-github-bot@users.noreply.github.com>
Tue, 20 Nov 2018 08:56:04 +0000 (00:56 -0800)
Summary:
Pull Request resolved: https://github.com/pytorch/pytorch/pull/14163

Some of the names we were using to guard the header file was too short (e.g. DYNAMIC_HISTOGRAM_H).

Reviewed By: csummersea

Differential Revision: D13115451

fbshipit-source-id: cef8c84c62922616ceea17effff7bdf8d67302a2

13 files changed:
caffe2/quantization/server/activation_distribution_observer.h
caffe2/quantization/server/batch_permutation_dnnlowp_op.h
caffe2/quantization/server/conv_relu_op.h
caffe2/quantization/server/dequantize_dnnlowp_op.h
caffe2/quantization/server/dynamic_histogram.h
caffe2/quantization/server/elementwise_dnnlowp_op.h
caffe2/quantization/server/fully_connected_fake_lowp_op.h
caffe2/quantization/server/group_norm_dnnlowp_op.h
caffe2/quantization/server/kl_minimization.h
caffe2/quantization/server/l2_minimization.h
caffe2/quantization/server/resize_nearest_dnnlowp_op.h
caffe2/quantization/server/sigmoid.h
caffe2/quantization/server/tanh.h

index bb96d25..1492f49 100644 (file)
@@ -1,5 +1,4 @@
-#ifndef CAFFE2_ACTIVATION_DISTRIBUTION_OBSERVER_H
-#define CAFFE2_ACTIVATION_DISTRIBUTION_OBSERVER_H
+#pragma once
 
 #include "caffe2/core/observer.h"
 #include "caffe2/core/operator.h"
@@ -147,5 +146,3 @@ class RegisterQuantizationParamsWithHistogramNetObserver final
 };
 
 } // namespace caffe2
-
-#endif // CAFFE2_ACTIVATION_DISTRIBUTION_OBSERVER_H
index 34c8b94..ed1ee25 100644 (file)
@@ -1,5 +1,4 @@
-#ifndef DEEPLEARNING_QUANTIZATION_CAFFE2_BATCH_PERMUTATION_DNNLOWP_OP_H_
-#define DEEPLEARNING_QUANTIZATION_CAFFE2_BATCH_PERMUTATION_DNNLOWP_OP_H_
+#pragma once
 
 #include "caffe2/fb/operators/batch_permutation_op.h"
 #include "caffe2/quantization/server/dnnlowp_op.h"
@@ -26,5 +25,3 @@ class BatchPermutationDNNLowPOp final
 };
 
 } // namespace caffe2
-
-#endif // DEEPLEARNING_QUANTIZATION_CAFFE2_BATCH_PERMUTATION_DNNLOWP_OP_H_
index 1564a8e..ee7a1cc 100644 (file)
@@ -1,5 +1,4 @@
-#ifndef CAFFE2_OPERATORS_CONV_RELU_OP_H_
-#define CAFFE2_OPERATORS_CONV_RELU_OP_H_
+#pragma once
 
 #include "caffe2/operators/conv_op.h"
 #include "caffe2/operators/conv_pool_op_base.h"
@@ -34,5 +33,3 @@ class ConvReluOp final : public ConvPoolOpBase<Context> {
 }; // class ConvReluOp
 
 } // namespace caffe2
-
-#endif // CAFFE2_OPERATORS_CONV_RELU_OP_H_
index 80489f9..16eb1b0 100644 (file)
@@ -1,5 +1,4 @@
-#ifndef CAFFE2_OPERATORS_DEQUANTIZE_DNNLOWP_OP_H
-#define CAFFE2_OPERATORS_DEQUANTIZE_DNNLOWP_OP_H
+#pragma once
 
 #include "caffe2/core/operator.h"
 #include "caffe2/quantization/server/dnnlowp.h"
@@ -19,5 +18,3 @@ class DequantizeDNNLowPOp final : public Operator<CPUContext> {
 }; // class DequantizeDNNLowPOp
 
 } // namespace caffe2
-
-#endif // CAFFE2_OPERATORS_DEQUANTIZE_DNNLOWP_OP_H
index 6e4e9e0..c24553d 100644 (file)
@@ -1,5 +1,4 @@
-#ifndef DYNAMIC_HISTOGRAM_H
-#define DYNAMIC_HISTOGRAM_H
+#pragma once
 
 #include <memory>
 #include <vector>
@@ -83,5 +82,3 @@ class DynamicHistogram {
 }; // class DynamicHistogram
 
 } // namespace dnnlowp
-
-#endif // DYNAMIC_HISTOGRAM_H
index d92c561..03d9bf8 100644 (file)
@@ -1,5 +1,4 @@
-#ifndef CAFFE2_OPERATORS_ELEMENTWISE_DNNLOWP_OP_H_
-#define CAFFE2_OPERATORS_ELEMENTWISE_DNNLOWP_OP_H_
+#pragma once
 
 #include "caffe2/core/tensor_int8.h"
 #include "caffe2/operators/elementwise_ops.h"
@@ -136,5 +135,3 @@ class BinaryElementwiseDNNLowPOp : public DNNLowPOp<T, FP32_OP> {
     }                                                                        \
   };
 } // namespace caffe2
-
-#endif // CAFFE2_OPERATORS_ELEMENTWISE_DNNLOWP_OP_H_
index 6f31aa4..20dac70 100644 (file)
@@ -14,8 +14,7 @@
  * limitations under the License.
  */
 
-#ifndef CAFFE2_OPERATORS_FULLY_CONNECTED_FP16_OP_H_
-#define CAFFE2_OPERATORS_FULLY_CONNECTED_FP16_OP_H_
+#pragma once
 
 #include <immintrin.h>
 #include "caffe2/core/context.h"
@@ -136,5 +135,3 @@ class FullyConnectedGradientFakeLowpFPOp : public Operator<Context> {
 };
 
 } // namespace caffe2
-
-#endif // CAFFE2_OPERATORS_FULLY_CONNECTED_FP16_OP_H_
index 39b02c1..75d44d8 100644 (file)
@@ -1,5 +1,4 @@
-#ifndef DEEPLEARNING_QUANTIZATION_CAFFE2_GROUP_NORM_DNNLOWP_OP_H_
-#define DEEPLEARNING_QUANTIZATION_CAFFE2_GROUP_NORM_DNNLOWP_OP_H_
+#pragma once
 
 #include <vector>
 
@@ -172,5 +171,3 @@ class GroupNormDNNLowPOp final : public DNNLowPOp<T, GroupNormFP32Op> {
 };
 
 } // namespace caffe2
-
-#endif // DEEPLEARNING_QUANTIZATION_CAFFE2_GROUP_NORM_DNNLOWP_OP_H_
index 2b0458e..edf95f5 100644 (file)
@@ -1,5 +1,4 @@
-#ifndef DNNLOWP_KL_MINIMIZATION_H
-#define DNNLOWP_KL_MINIMIZATION_H
+#pragma once
 
 #include "quantization_error_minimization.h"
 
@@ -17,5 +16,3 @@ class KLDivergenceMinimization final : public QuantizationErrorMinimization {
 };
 
 } // namespace dnnlowp
-
-#endif // DNNLOWP_KL_MINIMIZATION_H
index f1e4a24..15ab92b 100644 (file)
@@ -1,5 +1,4 @@
-#ifndef DNNLOWP_L2_MINIMIZATION_H
-#define DNNLOWP_L2_MINIMIZATION_H
+#pragma once
 
 #include "quantization_error_minimization.h"
 
@@ -28,5 +27,3 @@ float L2MinimizationKernelAVX2(
     int start_bin);
 
 } // namespace dnnlowp
-
-#endif // DNNLOWP_L2_MINIMIZATION_H
index 657b220..047d250 100644 (file)
@@ -1,5 +1,4 @@
-#ifndef DEEPLEARNING_QUANTIZATION_CAFFE2_RESIZE_NEAREST_DNNLOWP_OP_H_
-#define DEEPLEARNING_QUANTIZATION_CAFFE2_RESIZE_NEAREST_DNNLOWP_OP_H_
+#pragma once
 
 #include "caffe2/operators/resize_op.h"
 #include "caffe2/quantization/server/dnnlowp_op.h"
@@ -35,5 +34,3 @@ class ResizeNearestDNNLowPOp final : public DNNLowPOp<T, ResizeNearestFP32Op> {
 };
 
 } // namespace caffe2
-
-#endif // DEEPLEARNING_QUANTIZATION_CAFFE2_RESIZE_NEAREST_DNNLOWP_OP_H_
index 7736173..1772240 100644 (file)
@@ -1,5 +1,4 @@
-#ifndef DNNLOWP_SIGMOID_H
-#define DNNLOWP_SIGMOID_H
+#pragma once
 
 #include "tanh.h"
 
@@ -32,5 +31,3 @@ class Sigmoid {
 }; // class Sigmoid
 
 } // namespace dnnlowp
-
-#endif // DNNLOWP_SIGMOID_H
index 49f0f41..5012de3 100644 (file)
@@ -1,5 +1,4 @@
-#ifndef DNNLOWP_TANH_H
-#define DNNLOWP_TANH_H
+#pragma once
 
 #include "dnnlowp.h"
 
@@ -60,5 +59,3 @@ class Tanh {
 }; // class TanhApproximation
 
 } // namespace dnnlowp
-
-#endif