From: Summer Deng Date: Fri, 29 Mar 2019 16:24:07 +0000 (-0700) Subject: Change dnnlowp log level from warning to v2 (#18576) X-Git-Tag: accepted/tizen/6.5/unified/20211028.231830~557 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=7c438c82eb02d70325994100ef1b2a45e9fbc426;p=platform%2Fupstream%2Fpytorch.git Change dnnlowp log level from warning to v2 (#18576) Summary: Pull Request resolved: https://github.com/pytorch/pytorch/pull/18576 As in title Reviewed By: feiyu1990 Differential Revision: D14670898 fbshipit-source-id: 1983099b2ba57daab393278553f10dcdb1812fdf --- diff --git a/caffe2/quantization/server/caffe2_dnnlowp_utils.cc b/caffe2/quantization/server/caffe2_dnnlowp_utils.cc index 0e069c6..bafd7ba 100644 --- a/caffe2/quantization/server/caffe2_dnnlowp_utils.cc +++ b/caffe2/quantization/server/caffe2_dnnlowp_utils.cc @@ -306,19 +306,18 @@ static unique_ptr GetQuantizationFactoryOf_( "weight_quantization_kind", FLAGS_caffe2_dnnlowp_weight_quantization_kind); - LOG(WARNING) << "Quantization method for op with output " << op_def.output(0) - << " engine " << op_def.engine() << " activation_precision " - << activation_precision << " weight_precision " - << weight_precision << " requantization_multiplier_precision " - << requantization_multiplier_precision - << " eltwise_quantization_precision " - << eltwise_quantization_precision - << " preserve_activation_sparsity " - << preserve_activation_sparsity << " preserve_weight_sparsity " - << preserve_weight_sparsity << " force_scale_power_of_two " - << force_scale_power_of_two << " activation_quantization_kind " - << activation_quantization_kind << " weight_quantization_kind " - << weight_quantization_kind; + VLOG(2) << "Quantization method for op with output " << op_def.output(0) + << " engine " << op_def.engine() << " activation_precision " + << activation_precision << " weight_precision " << weight_precision + << " requantization_multiplier_precision " + << requantization_multiplier_precision + << " eltwise_quantization_precision " + << eltwise_quantization_precision << " preserve_activation_sparsity " + << preserve_activation_sparsity << " preserve_weight_sparsity " + << preserve_weight_sparsity << " force_scale_power_of_two " + << force_scale_power_of_two << " activation_quantization_kind " + << activation_quantization_kind << " weight_quantization_kind " + << weight_quantization_kind; return unique_ptr(new QuantizationFactory( activation_precision,