Change dnnlowp log level from warning to v2 (#18576)
authorSummer Deng <summerdeng@fb.com>
Fri, 29 Mar 2019 16:24:07 +0000 (09:24 -0700)
committerFacebook Github Bot <facebook-github-bot@users.noreply.github.com>
Fri, 29 Mar 2019 16:29:25 +0000 (09:29 -0700)
Summary:
Pull Request resolved: https://github.com/pytorch/pytorch/pull/18576

As in title

Reviewed By: feiyu1990

Differential Revision: D14670898

fbshipit-source-id: 1983099b2ba57daab393278553f10dcdb1812fdf

caffe2/quantization/server/caffe2_dnnlowp_utils.cc

index 0e069c6..bafd7ba 100644 (file)
@@ -306,19 +306,18 @@ static unique_ptr<QuantizationFactory> GetQuantizationFactoryOf_(
           "weight_quantization_kind",
           FLAGS_caffe2_dnnlowp_weight_quantization_kind);
 
-  LOG(WARNING) << "Quantization method for op with output " << op_def.output(0)
-               << " engine " << op_def.engine() << " activation_precision "
-               << activation_precision << " weight_precision "
-               << weight_precision << " requantization_multiplier_precision "
-               << requantization_multiplier_precision
-               << " eltwise_quantization_precision "
-               << eltwise_quantization_precision
-               << " preserve_activation_sparsity "
-               << preserve_activation_sparsity << " preserve_weight_sparsity "
-               << preserve_weight_sparsity << " force_scale_power_of_two "
-               << force_scale_power_of_two << " activation_quantization_kind "
-               << activation_quantization_kind << " weight_quantization_kind "
-               << weight_quantization_kind;
+  VLOG(2) << "Quantization method for op with output " << op_def.output(0)
+          << " engine " << op_def.engine() << " activation_precision "
+          << activation_precision << " weight_precision " << weight_precision
+          << " requantization_multiplier_precision "
+          << requantization_multiplier_precision
+          << " eltwise_quantization_precision "
+          << eltwise_quantization_precision << " preserve_activation_sparsity "
+          << preserve_activation_sparsity << " preserve_weight_sparsity "
+          << preserve_weight_sparsity << " force_scale_power_of_two "
+          << force_scale_power_of_two << " activation_quantization_kind "
+          << activation_quantization_kind << " weight_quantization_kind "
+          << weight_quantization_kind;
 
   return unique_ptr<QuantizationFactory>(new QuantizationFactory(
       activation_precision,