From a6520995fefc5af316b765ff5d5686459ab49ab1 Mon Sep 17 00:00:00 2001 From: Andrey Kamaev Date: Tue, 27 Oct 2020 12:24:57 +0300 Subject: [PATCH] MSVC warnings fix (#2620) * Fix MSVC build warnings in Ngraph * Fix MSVC build warnings in transformations library * Fix MSVC build warnings in core,legacy,preprocessing * Fix MSVC build warnings in XLink * Fix MSVC build warnings in Myriad plugin --- .../src/inference_engine/ie_blob_stream.cpp | 2 +- inference-engine/src/inference_engine/ie_data.cpp | 3 +- .../threading/ie_istreams_executor.cpp | 6 ++-- .../convert_mul_or_add_finally.hpp | 8 ++--- .../convert_opset1_to_legacy/reshape_fc_fusion.hpp | 2 +- .../src/legacy_api/src/graph_transformer.cpp | 5 ++- .../legacy_api/src/ie_cnn_layer_builder_ngraph.cpp | 2 +- .../src/legacy_api/src/ie_layers_internal.cpp | 10 +++--- inference-engine/src/legacy_api/src/net_pass.cpp | 8 ++--- .../src/legacy_api/src/ngraph_ops/crop_ie.cpp | 2 +- .../src/legacy_api/src/ngraph_ops/interp.cpp | 5 +-- .../convert_interpolate_to_interp_or_resample.cpp | 8 ++--- .../convert_lrn_to_lrn_ie.cpp | 2 +- .../convert_mul_add_to_scaleshift_or_power.cpp | 2 +- .../convert_normalizel2_to_normalize_ie.cpp | 4 +-- .../convert_one_hot_to_one_hot_ie.cpp | 4 +-- .../convert_power_to_power_ie.cpp | 2 +- .../convert_sqrt_to_power_ie.cpp | 2 +- .../convert_strided_slice_to_crop.cpp | 4 +-- .../convert_opset1_to_legacy/fc_bias_fusion.cpp | 2 +- .../include/low_precision/layer_transformation.hpp | 10 +++--- .../src/common/eltwise_base_transformation.cpp | 2 +- .../src/common/multiply_to_group_convolution.cpp | 2 +- .../src/common/mvn.cpp | 2 +- .../src/common/normalize_l2.cpp | 2 +- .../src/common/reshape.cpp | 4 +-- .../impl/ie_infer_async_request_internal.hpp | 4 +++ inference-engine/src/plugin_api/precision_utils.h | 13 +++++-- .../src/preprocessing/ie_preprocess_data.cpp | 42 +++++++++++----------- .../preprocessing/ie_preprocess_gapi_kernels.cpp | 12 +++---- .../ie_preprocess_gapi_kernels_impl.hpp | 2 +- .../src/readers/ir_reader/ie_ir_parser.cpp | 4 +-- .../ir_reader_v7/ie_cnn_net_reader_impl.cpp | 2 +- .../src/readers/ir_reader_v7/ie_layer_parsers.cpp | 4 +-- .../readers/ir_reader_v7/ie_layer_validators.cpp | 18 +++++----- .../op_conversions/convert_reduce_to_pooling.hpp | 2 +- .../include/transformations/utils/utils.hpp | 2 +- .../algebraic_simplification.cpp | 2 +- .../common_optimizations/depth_to_space_fusion.cpp | 6 ++-- .../common_optimizations/nop_elimination.cpp | 4 +-- .../optimize_strided_slice.cpp | 8 ++--- .../pull_transpose_through_fq.cpp | 2 +- .../control_flow/unroll_tensor_iterator.cpp | 12 +++---- .../src/transformations/convert_precision.cpp | 2 +- .../op_conversions/batch_norm_decomposition.cpp | 4 +-- .../convert_scatter_elements_to_scatter.cpp | 6 ++-- .../vpu/common/include/vpu/utils/simple_math.hpp | 3 +- .../vpu/common/include/vpu/utils/small_vector.hpp | 6 +++- .../src/ngraph/operations/out_shape_of_reshape.cpp | 8 ++--- .../src/ngraph/operations/static_shape_nonzero.cpp | 8 ++--- .../dynamic_to_static_shape_matmul.cpp | 2 +- .../dynamic_to_static_shape_strided_slice.cpp | 4 +-- .../src/vpu/common/src/utils/simple_math.cpp | 2 +- .../graph_transformer/include/vpu/model/data.hpp | 4 +-- .../include/vpu/model/data_desc.hpp | 4 +-- .../graph_transformer/include/vpu/model/model.hpp | 4 +-- .../graph_transformer/include/vpu/model/stage.hpp | 12 +++---- .../graph_transformer/src/backend/serialize.cpp | 6 ++-- .../src/frontend/custom_kernel.cpp | 4 +-- .../src/frontend/custom_layer.cpp | 4 +-- .../graph_transformer/src/middleend/hw/tiling.cpp | 2 +- .../src/middleend/passes/hw_extra_split.cpp | 6 ++-- .../src/middleend/passes/merge_hw_stages.cpp | 2 +- .../src/middleend/passes/merge_permute_stages.cpp | 2 +- .../src/middleend/passes/reshape_dilation_conv.cpp | 2 +- .../src/middleend/passes/split_conv3d_into_2d.cpp | 2 +- .../src/middleend/passes/split_grouped_conv.cpp | 2 +- .../src/middleend/passes/split_pool3d_into_2d.cpp | 2 +- .../src/middleend/passes/weights_analysis.cpp | 2 +- .../model/data_contents/batch_norm_contents.cpp | 4 +-- .../model/data_contents/deconvolution_contents.cpp | 8 ++--- .../model/data_contents/hw_const_data_content.cpp | 2 +- .../src/model/data_contents/priorbox_contents.cpp | 8 ++--- .../src/model/data_contents/scaled_content.cpp | 2 +- .../vpu/graph_transformer/src/model/data_desc.cpp | 12 +++---- .../vpu/graph_transformer/src/stages/concat.cpp | 2 +- .../graph_transformer/src/stages/convolution.cpp | 4 +-- .../vpu/graph_transformer/src/stages/custom.cpp | 4 +-- .../vpu/graph_transformer/src/stages/eltwise.cpp | 4 +-- .../src/stages/exp_detectionoutput.cpp | 6 ++-- .../src/vpu/graph_transformer/src/stages/fc.cpp | 2 +- .../vpu/graph_transformer/src/stages/loop_end.cpp | 10 +++--- .../graph_transformer/src/stages/loop_start.cpp | 6 ++-- .../src/vpu/graph_transformer/src/stages/mtcnn.cpp | 2 +- .../vpu/graph_transformer/src/stages/mx_stage.cpp | 12 +++---- .../src/vpu/graph_transformer/src/stages/norm.cpp | 2 +- .../vpu/graph_transformer/src/stages/pooling.cpp | 4 +-- .../vpu/graph_transformer/src/stages/proposal.cpp | 6 ++-- .../vpu/graph_transformer/src/stages/reduce.cpp | 2 +- .../vpu/graph_transformer/src/stages/resample.cpp | 2 +- .../src/vpu/graph_transformer/src/stages/rnn.cpp | 16 ++++----- .../src/vpu/myriad_plugin/myriad_executor.cpp | 10 +++--- .../src/vpu/myriad_plugin/myriad_infer_request.cpp | 6 ++-- .../thirdparty/movidius/XLink/pc/PlatformData.c | 4 +-- .../movidius/XLink/pc/PlatformDeviceControl.c | 4 +-- .../movidius/XLink/pc/Win/src/win_pthread.c | 2 +- .../movidius/XLink/pc/protocols/pcie_host.c | 6 ++-- .../movidius/XLink/pc/protocols/usb_boot.c | 2 +- .../thirdparty/movidius/mvnc/src/mvnc_api.c | 28 ++++++++------- .../thirdparty/movidius/mvnc/src/mvnc_data.c | 2 +- .../movidius/mvnc/src/watchdog/watchdog.cpp | 4 +-- .../movidius/mvnc/src/watchdog/xlink_device.cpp | 2 +- ngraph/core/builder/src/builder/reshape.cpp | 2 +- ngraph/core/include/ngraph/op/constant.hpp | 8 +++++ ngraph/core/include/ngraph/type/bfloat16.hpp | 6 ++++ ngraph/core/include/ngraph/type/float16.hpp | 6 ++++ .../include/ngraph/runtime/reference/roi_align.hpp | 2 +- .../include/onnx_import/core/tensor.hpp | 7 ++++ ngraph/frontend/onnx_import/src/utils/reshape.cpp | 4 +-- 109 files changed, 313 insertions(+), 261 deletions(-) diff --git a/inference-engine/src/inference_engine/ie_blob_stream.cpp b/inference-engine/src/inference_engine/ie_blob_stream.cpp index 24cb368..ad24157 100644 --- a/inference-engine/src/inference_engine/ie_blob_stream.cpp +++ b/inference-engine/src/inference_engine/ie_blob_stream.cpp @@ -37,7 +37,7 @@ std::streampos InferenceEngine::details::BlobStream::BlobBuffer::seekoff(std::st setg(eback(), eback() + off, egptr()); break; case std::ios_base::cur: - gbump(off); + gbump(static_cast(off)); break; case std::ios_base::end: setg(eback(), egptr() + off, egptr()); diff --git a/inference-engine/src/inference_engine/ie_data.cpp b/inference-engine/src/inference_engine/ie_data.cpp index b43ac3c..c3dc492 100644 --- a/inference-engine/src/inference_engine/ie_data.cpp +++ b/inference-engine/src/inference_engine/ie_data.cpp @@ -44,7 +44,8 @@ Blob::Ptr Blob::CreateFromData(const DataPtr& data) { } } -struct Data::Impl { +class Data::Impl { +public: /** * @brief A pointer to the layer that creates this data element, null for input data elements */ diff --git a/inference-engine/src/inference_engine/threading/ie_istreams_executor.cpp b/inference-engine/src/inference_engine/threading/ie_istreams_executor.cpp index 40371fb..6ee3091 100644 --- a/inference-engine/src/inference_engine/threading/ie_istreams_executor.cpp +++ b/inference-engine/src/inference_engine/threading/ie_istreams_executor.cpp @@ -52,9 +52,9 @@ void IStreamsExecutor::Config::SetConfig(const std::string& key, const std::stri } } else if (key == CONFIG_KEY(CPU_THROUGHPUT_STREAMS)) { if (value == CONFIG_VALUE(CPU_THROUGHPUT_NUMA)) { - _streams = getAvailableNUMANodes().size(); + _streams = static_cast(getAvailableNUMANodes().size()); } else if (value == CONFIG_VALUE(CPU_THROUGHPUT_AUTO)) { - const int sockets = getAvailableNUMANodes().size(); + const int sockets = static_cast(getAvailableNUMANodes().size()); // bare minimum of streams (that evenly divides available number of core) const int num_cores = sockets == 1 ? std::thread::hardware_concurrency() : getNumberOfCPUCores(); if (0 == num_cores % 4) @@ -149,4 +149,4 @@ IStreamsExecutor::Config IStreamsExecutor::Config::MakeDefaultMultiThreaded(cons return streamExecutorConfig; } -} // namespace InferenceEngine \ No newline at end of file +} // namespace InferenceEngine diff --git a/inference-engine/src/legacy_api/include/legacy/transformations/convert_opset1_to_legacy/convert_mul_or_add_finally.hpp b/inference-engine/src/legacy_api/include/legacy/transformations/convert_opset1_to_legacy/convert_mul_or_add_finally.hpp index 9d0252e..b34bee5 100755 --- a/inference-engine/src/legacy_api/include/legacy/transformations/convert_opset1_to_legacy/convert_mul_or_add_finally.hpp +++ b/inference-engine/src/legacy_api/include/legacy/transformations/convert_opset1_to_legacy/convert_mul_or_add_finally.hpp @@ -109,7 +109,7 @@ ngraph::graph_rewrite_callback get_callback() { 3. data_shape{64, 64} and const_shape{1, 1, 1} - constant broadcasts data_shape with additional dimension */ auto constant_broadcast_output = [](const ngraph::PartialShape & data_pshape, const ngraph::Shape & const_shape) -> bool { - if (data_pshape.rank().is_dynamic() || const_shape.size() > data_pshape.rank().get_length()) { + if (data_pshape.rank().is_dynamic() || const_shape.size() > static_cast(data_pshape.rank().get_length())) { return true; } @@ -276,11 +276,11 @@ ngraph::graph_rewrite_callback get_callback() { // In case Add we create fake scale equal to 1, in case of Multiply we create fake shift equal to 0 std::shared_ptr power; if (std::is_same()) { - power = std::make_shared(data_node, 1., 1., value, lin_op->get_output_element_type(0)); + power = std::make_shared(data_node, 1.0f, 1.0f, value, lin_op->get_output_element_type(0)); } else if (std::is_same()) { - power = std::make_shared(data_node, 1., value, 0., lin_op->get_output_element_type(0)); + power = std::make_shared(data_node, 1.0f, value, 0.0f, lin_op->get_output_element_type(0)); } else if (std::is_same()) { - power = std::make_shared(data_node, 1., 1., -value, lin_op->get_output_element_type(0)); + power = std::make_shared(data_node, 1.0f, 1.0f, -value, lin_op->get_output_element_type(0)); } else { return false; } diff --git a/inference-engine/src/legacy_api/include/legacy/transformations/convert_opset1_to_legacy/reshape_fc_fusion.hpp b/inference-engine/src/legacy_api/include/legacy/transformations/convert_opset1_to_legacy/reshape_fc_fusion.hpp index b91499e..e0e773c 100644 --- a/inference-engine/src/legacy_api/include/legacy/transformations/convert_opset1_to_legacy/reshape_fc_fusion.hpp +++ b/inference-engine/src/legacy_api/include/legacy/transformations/convert_opset1_to_legacy/reshape_fc_fusion.hpp @@ -73,7 +73,7 @@ private: // Check that Weights[O, C*H*W] consistent with Input[N, C, H, W] auto shape_w = fc->input_value(1).get_shape(); - if (shape_in[0] != shape_out[0] || std::accumulate(shape_in.begin() + 1, shape_in.end(), 1UL, std::multiplies()) != shape_w[1]) { + if (shape_in[0] != shape_out[0] || std::accumulate(shape_in.begin() + 1, shape_in.end(), size_t{1}, std::multiplies()) != shape_w[1]) { return false; } diff --git a/inference-engine/src/legacy_api/src/graph_transformer.cpp b/inference-engine/src/legacy_api/src/graph_transformer.cpp index 0a252e4..ff7c98b 100644 --- a/inference-engine/src/legacy_api/src/graph_transformer.cpp +++ b/inference-engine/src/legacy_api/src/graph_transformer.cpp @@ -372,7 +372,10 @@ static CNNLayerPtr replace_with_static_reshape(CNNLayerPtr &layer) { // tensor statistic for particular reshape. auto reshape = std::make_shared( LayerParams{layer->name, "Reshape", precision}); - reshape->shape = std::vector(shape.begin(), shape.end()); + + reshape->shape.resize(shape.size()); + for (size_t p = 0; p < shape.size(); ++p) + reshape->shape[p] = static_cast(shape[p]); // replacement auto &input_to_map = getInputTo(in_data); diff --git a/inference-engine/src/legacy_api/src/ie_cnn_layer_builder_ngraph.cpp b/inference-engine/src/legacy_api/src/ie_cnn_layer_builder_ngraph.cpp index 07f74fe..e846c64 100644 --- a/inference-engine/src/legacy_api/src/ie_cnn_layer_builder_ngraph.cpp +++ b/inference-engine/src/legacy_api/src/ie_cnn_layer_builder_ngraph.cpp @@ -1532,7 +1532,7 @@ CNNLayer::Ptr NodeConverter::createLayer(const std::shar auto img_H = img_shape[2]; auto data_H = data_shape[2]; if (attr.step == -1) - attr.step = 1. * img_H / data_H; + attr.step = static_cast(1. * img_H / data_H); else attr.step *= img_H; for (auto& size : attr.min_size) diff --git a/inference-engine/src/legacy_api/src/ie_layers_internal.cpp b/inference-engine/src/legacy_api/src/ie_layers_internal.cpp index 1a5ceaa..d37bf5e 100644 --- a/inference-engine/src/legacy_api/src/ie_layers_internal.cpp +++ b/inference-engine/src/legacy_api/src/ie_layers_internal.cpp @@ -50,9 +50,9 @@ Paddings getPaddingsInternal(const Layer& layer) { if (shape_size < 4 || shape_size > 5) THROW_IE_EXCEPTION << "input shape must be 4D or 5D"; std::vector shapes; - shapes.push_back(shape[shape_size - 1]); - shapes.push_back(shape[shape_size - 2]); - if (shape_size > 4) shapes.push_back(shape[shape_size - 3]); + shapes.push_back(static_cast(shape[shape_size - 1])); + shapes.push_back(static_cast(shape[shape_size - 2])); + if (shape_size > 4) shapes.push_back(static_cast(shape[shape_size - 3])); PropertyVector pad_begin, pad_end; @@ -134,8 +134,8 @@ int getNumIteration(const TensorIterator& tensorIterator) { << rule.axis << ", dimensions number = " << dimensions.size() << " (out of range)"; } const auto space = dimensions[axis]; - const int start = (rule.start < 0 ? (space + 1) : 0) + rule.start; - const int end = (rule.end < 0 ? (space + 1) : 0) + rule.end; + const int start = static_cast((rule.start < 0 ? (space + 1) : 0) + rule.start); + const int end = static_cast((rule.end < 0 ? (space + 1) : 0) + rule.end); const auto stride = rule.stride; if (stride == 0) { diff --git a/inference-engine/src/legacy_api/src/net_pass.cpp b/inference-engine/src/legacy_api/src/net_pass.cpp index d568c52..dafe806 100644 --- a/inference-engine/src/legacy_api/src/net_pass.cpp +++ b/inference-engine/src/legacy_api/src/net_pass.cpp @@ -199,7 +199,7 @@ inline bool is_full_ranged(const TensorIterator::PortMap& rule, const DataPtr& d if (rule.axis == -1 || !one_of(rule.stride, 1, -1)) return false; auto& shape = data->getDims(); - int size = shape[rule.axis]; + int size = static_cast(shape[rule.axis]); int begin = rule.start >= 0 ? rule.start : size + rule.start + 1; int end = rule.end >= 0 ? rule.end : size + rule.end + 1; @@ -406,7 +406,7 @@ bool convertToRNNSeq(CNNLayerPtr cur, const N& net) { // Check port mapping auto _indx_in = [&](const std::vector& scope, const DataPtr& data) { - int indx = std::find(scope.begin(), scope.end(), data) - scope.begin(); + int indx = static_cast(std::find(scope.begin(), scope.end(), data) - scope.begin()); return indx == scope.size() ? -1 : indx; }; @@ -670,7 +670,7 @@ static CNNLayerPtr _fc(std::string name, Precision prc, SizeVector dims, Blob::P res->_weights = W; res->_biases = B; - res->_out_num = dims[1]; + res->_out_num = static_cast(dims[1]); res->blobs["weights"] = W; res->blobs["biases"] = B; res->params["out-size"] = std::to_string(dims[1]); @@ -945,7 +945,7 @@ static bool unrollLSTMCellBody(CNNLayerPtr cur) { // operations auto concat = _concat(name + ":concat", prc, {N, D + S}, 2); - auto split = _split(name + ":split", prc, {N, S}, G); + auto split = _split(name + ":split", prc, {N, S}, static_cast(G)); auto fc = _fc(name + ":fc", prc, {N, S * G}, cell->_weights, cell->_biases); const std::string _f = cell->activations[0], _g = cell->activations[1], _h = cell->activations[2]; diff --git a/inference-engine/src/legacy_api/src/ngraph_ops/crop_ie.cpp b/inference-engine/src/legacy_api/src/ngraph_ops/crop_ie.cpp index 950a1f1..575d84a 100644 --- a/inference-engine/src/legacy_api/src/ngraph_ops/crop_ie.cpp +++ b/inference-engine/src/legacy_api/src/ngraph_ops/crop_ie.cpp @@ -38,7 +38,7 @@ void op::CropIE::validate_and_infer_types() { ngraph::Shape output_shape(input_shape); for (int i = 0; i < axes.size(); ++i) { - NODE_VALIDATION_CHECK(this, axes[i] >= 0 && axes[i] < output_shape.size(), + NODE_VALIDATION_CHECK(this, axes[i] >= 0 && axes[i] < static_cast(output_shape.size()), "axes should be positive and less than number of input dims"); output_shape[axes[i]] = dim[i]; } diff --git a/inference-engine/src/legacy_api/src/ngraph_ops/interp.cpp b/inference-engine/src/legacy_api/src/ngraph_ops/interp.cpp index ae60f7a..19ffed8 100644 --- a/inference-engine/src/legacy_api/src/ngraph_ops/interp.cpp +++ b/inference-engine/src/legacy_api/src/ngraph_ops/interp.cpp @@ -44,8 +44,9 @@ void op::Interp::validate_and_infer_types() { scale /= m_attrs.shrink_factor; } } - output_shape[2] = input_shape[2] * scale; - output_shape[3] = input_shape[3] * scale; + + output_shape[2] = static_cast(input_shape[2] * scale); + output_shape[3] = static_cast(input_shape[3] * scale); } if (m_attrs.height > 0) { diff --git a/inference-engine/src/legacy_api/src/transformations/convert_opset1_to_legacy/convert_interpolate_to_interp_or_resample.cpp b/inference-engine/src/legacy_api/src/transformations/convert_opset1_to_legacy/convert_interpolate_to_interp_or_resample.cpp index bc5e200..b9082d0 100644 --- a/inference-engine/src/legacy_api/src/transformations/convert_opset1_to_legacy/convert_interpolate_to_interp_or_resample.cpp +++ b/inference-engine/src/legacy_api/src/transformations/convert_opset1_to_legacy/convert_interpolate_to_interp_or_resample.cpp @@ -74,10 +74,10 @@ ngraph::pass::ConvertInterpolateToInterpOrResampleMatcher::ConvertInterpolateToI if (num_of_spatial_vars == 2 && interpolate_axes.size() == 2 && std::set{"nearest", "cubic", "area"}.count(interpolate_mode) == 0) { auto attrs = ngraph::op::InterpolateIEAttrs(); - attrs.pad_beg = interpolate_attrs.pads_begin[0]; - attrs.pad_end = interpolate_attrs.pads_end[0]; - attrs.height = out_spatial_shape[0]; - attrs.width = out_spatial_shape[1]; + attrs.pad_beg = static_cast(interpolate_attrs.pads_begin[0]); + attrs.pad_end = static_cast(interpolate_attrs.pads_end[0]); + attrs.height = static_cast(out_spatial_shape[0]); + attrs.width = static_cast(out_spatial_shape[1]); attrs.align_corners = interpolate_attrs.align_corners; attrs.mode = interpolate_mode; attrs.antialias = interpolate_attrs.antialias; diff --git a/inference-engine/src/legacy_api/src/transformations/convert_opset1_to_legacy/convert_lrn_to_lrn_ie.cpp b/inference-engine/src/legacy_api/src/transformations/convert_opset1_to_legacy/convert_lrn_to_lrn_ie.cpp index 6ce2e68..cde13b2 100644 --- a/inference-engine/src/legacy_api/src/transformations/convert_opset1_to_legacy/convert_lrn_to_lrn_ie.cpp +++ b/inference-engine/src/legacy_api/src/transformations/convert_opset1_to_legacy/convert_lrn_to_lrn_ie.cpp @@ -38,7 +38,7 @@ ngraph::pass::ConvertLRNToLegacyMatcher::ConvertLRNToLegacyMatcher() { } else { std::vector norm(lrn->get_shape().size(), false); for (auto & axis : axis_value) { - if (axis < 0 || axis >= norm.size()) { + if (axis < 0 || static_cast(axis) >= norm.size()) { return false; } norm[axis] = true; diff --git a/inference-engine/src/legacy_api/src/transformations/convert_opset1_to_legacy/convert_mul_add_to_scaleshift_or_power.cpp b/inference-engine/src/legacy_api/src/transformations/convert_opset1_to_legacy/convert_mul_add_to_scaleshift_or_power.cpp index d723802..000f4a5 100644 --- a/inference-engine/src/legacy_api/src/transformations/convert_opset1_to_legacy/convert_mul_add_to_scaleshift_or_power.cpp +++ b/inference-engine/src/legacy_api/src/transformations/convert_opset1_to_legacy/convert_mul_add_to_scaleshift_or_power.cpp @@ -200,7 +200,7 @@ void ngraph::pass::ConvertMulAddToScaleShiftOrPower::convert_mul_add_to_scaleshi } auto output_type = m.get_match_root()->get_output_element_type(0); - auto power = std::make_shared(data_node, 1., scale, shift, output_type); + auto power = std::make_shared(data_node, 1.0f, scale, shift, output_type); power->set_friendly_name(add_node->get_friendly_name()); ngraph::copy_runtime_info({mul_node, add_node}, power); ngraph::replace_node(m.get_match_root(), power); diff --git a/inference-engine/src/legacy_api/src/transformations/convert_opset1_to_legacy/convert_normalizel2_to_normalize_ie.cpp b/inference-engine/src/legacy_api/src/transformations/convert_opset1_to_legacy/convert_normalizel2_to_normalize_ie.cpp index a5f376c..3dda385 100644 --- a/inference-engine/src/legacy_api/src/transformations/convert_opset1_to_legacy/convert_normalizel2_to_normalize_ie.cpp +++ b/inference-engine/src/legacy_api/src/transformations/convert_opset1_to_legacy/convert_normalizel2_to_normalize_ie.cpp @@ -19,7 +19,7 @@ ngraph::pass::ConvertNormalizeL2WithMulToNormalizeIE::ConvertNormalizeL2WithMulT auto input_1 = std::make_shared(element::f32, Shape{1, 1, 1, 1}); auto axis = std::make_shared(element::i64, Shape{1}, std::vector{0}); - auto normalize = std::make_shared(input_0, axis, 0, ngraph::op::EpsMode::ADD); + auto normalize = std::make_shared(input_0, axis, 0.0f, ngraph::op::EpsMode::ADD); auto mul = std::make_shared (normalize, input_1); ngraph::graph_rewrite_callback callback = [](pattern::Matcher& m) { @@ -80,7 +80,7 @@ NGRAPH_RTTI_DEFINITION(ngraph::pass::ConvertNormalizeL2ToLegacyMatcher, "Convert ngraph::pass::ConvertNormalizeL2ToLegacyMatcher::ConvertNormalizeL2ToLegacyMatcher() { auto input_0 = std::make_shared(element::f32, Shape{1, 1, 1, 1}); auto axis = std::make_shared(element::i64, Shape{1}, std::vector{0}); - auto normalize = std::make_shared(input_0, axis, 0, ngraph::op::EpsMode::ADD); + auto normalize = std::make_shared(input_0, axis, 0.0f, ngraph::op::EpsMode::ADD); ngraph::matcher_pass_callback callback = [](pattern::Matcher& m) { auto normalize = std::dynamic_pointer_cast (m.get_match_root()); diff --git a/inference-engine/src/legacy_api/src/transformations/convert_opset1_to_legacy/convert_one_hot_to_one_hot_ie.cpp b/inference-engine/src/legacy_api/src/transformations/convert_opset1_to_legacy/convert_one_hot_to_one_hot_ie.cpp index f23ce0d..0da3b68 100644 --- a/inference-engine/src/legacy_api/src/transformations/convert_opset1_to_legacy/convert_one_hot_to_one_hot_ie.cpp +++ b/inference-engine/src/legacy_api/src/transformations/convert_opset1_to_legacy/convert_one_hot_to_one_hot_ie.cpp @@ -40,7 +40,7 @@ ngraph::pass::ConvertOneHotToOneHotIEMatcher::ConvertOneHotToOneHotIEMatcher() { auto off_value = std::stof(off_value_node->convert_value_to_string(0)); auto one_hot_ie = std::make_shared(one_hot->input_value(0), - one_hot->get_axis(), depth_value, on_value, off_value, m_output_type); + static_cast(one_hot->get_axis()), depth_value, on_value, off_value, m_output_type); one_hot_ie->set_friendly_name(one_hot->get_friendly_name()); // insert Convert layer to cast output to a correct data type defined by the on/off values @@ -63,4 +63,4 @@ ngraph::pass::ConvertOneHotToOneHotIEMatcher::ConvertOneHotToOneHotIEMatcher() { void ngraph::pass::ConvertOneHotToOneHotIEMatcher::detect_output_type(const std::shared_ptr &f) { m_output_type = ngraph::op::util::has_f16_constants(f) ? element::Type_t::f16 : element::Type_t::f32; -} \ No newline at end of file +} diff --git a/inference-engine/src/legacy_api/src/transformations/convert_opset1_to_legacy/convert_power_to_power_ie.cpp b/inference-engine/src/legacy_api/src/transformations/convert_opset1_to_legacy/convert_power_to_power_ie.cpp index 96a4ddc..14f6aa3 100644 --- a/inference-engine/src/legacy_api/src/transformations/convert_opset1_to_legacy/convert_power_to_power_ie.cpp +++ b/inference-engine/src/legacy_api/src/transformations/convert_opset1_to_legacy/convert_power_to_power_ie.cpp @@ -33,7 +33,7 @@ ngraph::pass::ConvertPowerToPowerIEMatcher::ConvertPowerToPowerIEMatcher() { return false; } - auto power_ie = std::make_shared(power->input(0).get_source_output(), value, 1, 0, power->output(0).get_element_type()); + auto power_ie = std::make_shared(power->input(0).get_source_output(), value, 1.0f, 0.0f, power->output(0).get_element_type()); power_ie->set_friendly_name(power->get_friendly_name()); ngraph::copy_runtime_info(power, power_ie); ngraph::replace_node(power, power_ie); diff --git a/inference-engine/src/legacy_api/src/transformations/convert_opset1_to_legacy/convert_sqrt_to_power_ie.cpp b/inference-engine/src/legacy_api/src/transformations/convert_opset1_to_legacy/convert_sqrt_to_power_ie.cpp index 479ec57..08880bc 100644 --- a/inference-engine/src/legacy_api/src/transformations/convert_opset1_to_legacy/convert_sqrt_to_power_ie.cpp +++ b/inference-engine/src/legacy_api/src/transformations/convert_opset1_to_legacy/convert_sqrt_to_power_ie.cpp @@ -25,7 +25,7 @@ ngraph::pass::ConvertSqrtToPowerIEMatcher::ConvertSqrtToPowerIEMatcher() { if (!sqrt) { return false; } - auto power_ie = std::make_shared(sqrt->input(0).get_source_output(), 0.5f, 1, 0, sqrt->output(0).get_element_type()); + auto power_ie = std::make_shared(sqrt->input(0).get_source_output(), 0.5f, 1.0f, 0.0f, sqrt->output(0).get_element_type()); power_ie->set_friendly_name(sqrt->get_friendly_name()); ngraph::copy_runtime_info(sqrt, power_ie); ngraph::replace_node(sqrt, power_ie); diff --git a/inference-engine/src/legacy_api/src/transformations/convert_opset1_to_legacy/convert_strided_slice_to_crop.cpp b/inference-engine/src/legacy_api/src/transformations/convert_opset1_to_legacy/convert_strided_slice_to_crop.cpp index 7b55925..543de6e 100644 --- a/inference-engine/src/legacy_api/src/transformations/convert_opset1_to_legacy/convert_strided_slice_to_crop.cpp +++ b/inference-engine/src/legacy_api/src/transformations/convert_opset1_to_legacy/convert_strided_slice_to_crop.cpp @@ -94,8 +94,8 @@ ngraph::pass::ConvertStridedSliceToCropMatcher::ConvertStridedSliceToCropMatcher } // -1 because it's a position of ellipses - unsigned long num_input_axis_after_ellipses = (begin.size() - axis - num_new_axis_after_ellipses - 1); - unsigned long num_of_hidden_dims = input_shape.size() - num_input_axis_after_ellipses + size_t num_input_axis_after_ellipses = (begin.size() - axis - num_new_axis_after_ellipses - 1); + size_t num_of_hidden_dims = input_shape.size() - num_input_axis_after_ellipses - num_input_axis_before_ellipses; for (size_t i = 0; i < num_of_hidden_dims; ++i) { axes.emplace_back(uniq_id); diff --git a/inference-engine/src/legacy_api/src/transformations/convert_opset1_to_legacy/fc_bias_fusion.cpp b/inference-engine/src/legacy_api/src/transformations/convert_opset1_to_legacy/fc_bias_fusion.cpp index 443f560..90e7b9f 100644 --- a/inference-engine/src/legacy_api/src/transformations/convert_opset1_to_legacy/fc_bias_fusion.cpp +++ b/inference-engine/src/legacy_api/src/transformations/convert_opset1_to_legacy/fc_bias_fusion.cpp @@ -47,7 +47,7 @@ ngraph::pass::FullyConnectedBiasFusion::FullyConnectedBiasFusion() { } Shape output_shape(m_fc->get_shape()); - size_t bias_size = std::accumulate(bias_shape.begin(), bias_shape.end(), 1, std::multiplies()); + size_t bias_size = std::accumulate(bias_shape.begin(), bias_shape.end(), size_t{1}, std::multiplies()); if (bias_shape.empty() || bias_shape.back() != output_shape.back() || bias_shape.back() != bias_size) { return false; } diff --git a/inference-engine/src/low_precision_transformations/include/low_precision/layer_transformation.hpp b/inference-engine/src/low_precision_transformations/include/low_precision/layer_transformation.hpp index 0e89c8a..eaeea83 100644 --- a/inference-engine/src/low_precision_transformations/include/low_precision/layer_transformation.hpp +++ b/inference-engine/src/low_precision_transformations/include/low_precision/layer_transformation.hpp @@ -113,24 +113,24 @@ public: static float getMin(const size_t quantizationLevels, const bool signedInterval) { if (quantizationLevels == 255) { - return signedInterval ? -127.0 : 0.0; + return signedInterval ? -127.0f : 0.0f; } else if (quantizationLevels == 256) { - return signedInterval ? -128.0 : 0.0; + return signedInterval ? -128.0f : 0.0f; } else { // THROW_TRANSFORMATION_EXCEPTION << "quantization level " << quantizationLevels << " is not supported"; // FIXME: not completed - return signedInterval ? -128.0 : 0.0; + return signedInterval ? -128.0f : 0.0f; } } static float getMax(const size_t quantizationLevels, const bool signedInterval) { if ((quantizationLevels == 255) || (quantizationLevels == 256)) { - return signedInterval ? 127.0 : 255.0; + return signedInterval ? 127.0f : 255.0f; } else { // THROW_TRANSFORMATION_EXCEPTION << "quantization level " << quantizationLevels << " is not supported"; // FIXME: not completed // return quantizationLevels - 1.0; - return signedInterval ? 127.0 : 255.0; + return signedInterval ? 127.0f : 255.0f; } } }; diff --git a/inference-engine/src/low_precision_transformations/src/common/eltwise_base_transformation.cpp b/inference-engine/src/low_precision_transformations/src/common/eltwise_base_transformation.cpp index 619a662..aa4a869 100644 --- a/inference-engine/src/low_precision_transformations/src/common/eltwise_base_transformation.cpp +++ b/inference-engine/src/low_precision_transformations/src/common/eltwise_base_transformation.cpp @@ -129,7 +129,7 @@ int EltwiseBaseTransformation::getNotEmpty(const std::shared_ptr& eltwise) const std::shared_ptr& data = dataNodes[i]; if ((allBranchesAreEqual && isBroadcasted(data->get_output_shape(0))) || (!allBranchesAreEqual && isBranchWithTargetType(as_type_ptr(data)))) { - return i; + return static_cast(i); } } diff --git a/inference-engine/src/low_precision_transformations/src/common/multiply_to_group_convolution.cpp b/inference-engine/src/low_precision_transformations/src/common/multiply_to_group_convolution.cpp index c874a04..48e48c6 100644 --- a/inference-engine/src/low_precision_transformations/src/common/multiply_to_group_convolution.cpp +++ b/inference-engine/src/low_precision_transformations/src/common/multiply_to_group_convolution.cpp @@ -49,7 +49,7 @@ bool MultiplyToGroupConvolutionTransformation::transform(TransformationContext& } } } else { - const float channelsInGroup = outputChannelsCount / group; + const size_t channelsInGroup = outputChannelsCount / group; for (size_t outputChannel = 0ul; outputChannel < outputChannelsCount; ++outputChannel) { const size_t groupIndex = outputChannel / channelsInGroup; for (size_t kernel = 0ul; kernel < kernelsCount; ++kernel) { diff --git a/inference-engine/src/low_precision_transformations/src/common/mvn.cpp b/inference-engine/src/low_precision_transformations/src/common/mvn.cpp index 1acf8a5..5998edf 100644 --- a/inference-engine/src/low_precision_transformations/src/common/mvn.cpp +++ b/inference-engine/src/low_precision_transformations/src/common/mvn.cpp @@ -27,7 +27,7 @@ std::shared_ptr createNewScalesConst(const ngraph::op::Con std::vector newData(source.size()); for (size_t i = 0; i < source.size(); ++i) { - newData[i] = source[i] < 0 ? -1 : 1; + newData[i] = source[i] < 0 ? T{-1} : T{1}; } const ngraph::element::Type type = originalConst.get_output_element_type(0); diff --git a/inference-engine/src/low_precision_transformations/src/common/normalize_l2.cpp b/inference-engine/src/low_precision_transformations/src/common/normalize_l2.cpp index 8397066..15999cc 100644 --- a/inference-engine/src/low_precision_transformations/src/common/normalize_l2.cpp +++ b/inference-engine/src/low_precision_transformations/src/common/normalize_l2.cpp @@ -26,7 +26,7 @@ std::shared_ptr createNewScalesConst(const ngraph::op::Con std::vector newData(source.size()); for (size_t i = 0; i < source.size(); ++i) { - newData[i] = source[i] < 0 ? -1 : 1; + newData[i] = source[i] < 0 ? T{-1} : T{1}; } const ngraph::element::Type type = originalConst.get_output_element_type(0); diff --git a/inference-engine/src/low_precision_transformations/src/common/reshape.cpp b/inference-engine/src/low_precision_transformations/src/common/reshape.cpp index 6693ad5..58d01b7 100644 --- a/inference-engine/src/low_precision_transformations/src/common/reshape.cpp +++ b/inference-engine/src/low_precision_transformations/src/common/reshape.cpp @@ -64,7 +64,7 @@ void reshapeDequantizationConstant(const std::shared_ptr& resha // update Reshape constant const std::vector reshapeConstValues = as_type_ptr(reshape->get_input_node_shared_ptr(1))->cast_vector(); std::vector newReshapeConstValues(reshapeConstValues); - for (int i = newReshapeConstValues.size() - 1; i >= 0; --i) { + for (int i = static_cast(newReshapeConstValues.size() - 1); i >= 0; --i) { if (newOperationConstantShape.size() <= i) { newReshapeConstValues[i] = 1; } else if (newOperationConstantShape[i] == 1ul) { @@ -116,7 +116,7 @@ bool ReshapeTransformation::isPrecisionPreserved(std::shared_ptr op) const } size_t getLastNotBroadcastedChannel(const Shape& shape) { - for (int i = shape.size() - 1; i >= 0; --i) { + for (int i = static_cast(shape.size()) - 1; i >= 0; --i) { if (shape[i] != 1ul) { return i; } diff --git a/inference-engine/src/plugin_api/cpp_interfaces/impl/ie_infer_async_request_internal.hpp b/inference-engine/src/plugin_api/cpp_interfaces/impl/ie_infer_async_request_internal.hpp index b32d305..ab5e264 100644 --- a/inference-engine/src/plugin_api/cpp_interfaces/impl/ie_infer_async_request_internal.hpp +++ b/inference-engine/src/plugin_api/cpp_interfaces/impl/ie_infer_async_request_internal.hpp @@ -13,6 +13,10 @@ namespace InferenceEngine { +#if defined(_MSC_VER) +#pragma warning(disable : 4250) +#endif + /** * @brief minimum API to be implemented by plugin, which is used in InferRequestBase forwarding mechanism * @ingroup ie_dev_api_async_infer_request_api diff --git a/inference-engine/src/plugin_api/precision_utils.h b/inference-engine/src/plugin_api/precision_utils.h index 8450273..970c1c0 100644 --- a/inference-engine/src/plugin_api/precision_utils.h +++ b/inference-engine/src/plugin_api/precision_utils.h @@ -129,6 +129,11 @@ f16tof32Arrays(float* dst, const ie_fp16* src, size_t nelem, float scale = 1.f, INFERENCE_ENGINE_API_CPP(void) f32tof16Arrays(ie_fp16* dst, const float* src, size_t nelem, float scale = 1.f, float bias = 0.f); +#if defined(_MSC_VER) +#pragma warning(push) +#pragma warning(disable : 4018) +#endif + /** * @brief Converts one integral type to another saturating the result if the source value doesn't fit * into destination type range @@ -152,7 +157,7 @@ inline OutT saturate_cast(const InT& value) { const InT min = std::numeric_limits::min() > std::numeric_limits::min() ? std::numeric_limits::min() : std::numeric_limits::min(); - return std::min(std::max(value, min), max); + return static_cast(std::min(std::max(value, min), max)); } /** @@ -175,9 +180,13 @@ inline OutT saturate_cast(const InT& value) { const InT max = std::numeric_limits::max() < std::numeric_limits::max() ? std::numeric_limits::max() : std::numeric_limits::max(); - return std::min(value, max); + return static_cast(std::min(value, max)); } +#if defined(_MSC_VER) +#pragma warning(pop) +#endif + /** * @brief Converts one integral type to another saturating the result if the source value doesn't fit * into destination type range diff --git a/inference-engine/src/preprocessing/ie_preprocess_data.cpp b/inference-engine/src/preprocessing/ie_preprocess_data.cpp index b5b2278..20c743a 100644 --- a/inference-engine/src/preprocessing/ie_preprocess_data.cpp +++ b/inference-engine/src/preprocessing/ie_preprocess_data.cpp @@ -78,7 +78,7 @@ void resize_bilinear(const Blob::Ptr inBlob, Blob::Ptr outBlob, uint8_t* buffer) for (int dx = dst_go_x; dx < dst_go_x + dwidth; dx++) { auto fx = static_cast((dx + 0.5) * scale_x - 0.5); - int32_t sx = floor(fx); + int32_t sx = static_cast(floor(fx)); fx -= sx; int32_t sx0 = sx; @@ -98,7 +98,7 @@ void resize_bilinear(const Blob::Ptr inBlob, Blob::Ptr outBlob, uint8_t* buffer) for (int dy = dst_go_y; dy < dst_go_y + dheight; dy++) { auto fy = static_cast((dy + 0.5) * scale_y - 0.5); - int32_t sy = floor(fy); + int32_t sy = static_cast(floor(fy)); fy -= sy; int32_t sy0 = sy; @@ -124,8 +124,8 @@ void resize_bilinear(const Blob::Ptr inBlob, Blob::Ptr outBlob, uint8_t* buffer) for (int x = 0; x < swidth; x++) { bool use_constant0 = yofs[y] + 0 < 0 || yofs[y] + 0 >= src_full_height; bool use_constant1 = yofs[y] + 1 < 0 || yofs[y] + 1 >= src_full_height; - float val0 = use_constant0 ? border.value : sptr_[(yofs[y] + 0) * sstep + x]; - float val1 = use_constant1 ? border.value : sptr_[(yofs[y] + 1) * sstep + x]; + float val0 = static_cast(use_constant0 ? border.value : sptr_[(yofs[y] + 0) * sstep + x]); + float val1 = static_cast(use_constant1 ? border.value : sptr_[(yofs[y] + 1) * sstep + x]); float res = val0 + beta[y] * (val1 - val0); tptr_[x] = res; @@ -159,8 +159,8 @@ int getResizeAreaTabSize(int dst_go, int ssize, int dsize, float scale) { float fsx1 = col * scale; float fsx2 = fsx1 + scale; - int sx1 = ceil(fsx1); - int sx2 = floor(fsx2); + int sx1 = static_cast(ceil(fsx1)); + int sx2 = static_cast(floor(fsx2)); sx2 = (std::min)(sx2, ssize - 1); sx1 = (std::min)(sx1, sx2); @@ -194,8 +194,8 @@ void computeResizeAreaTab(int src_go, int dst_go, int ssize, int dsize, float sc float fsx2 = fsx1 + scale; float cellWidth = (std::min)(scale, ssize - fsx1); - int sx1 = ceil(fsx1); - int sx2 = floor(fsx2); + int sx1 = static_cast(ceil(fsx1)); + int sx2 = static_cast(floor(fsx2)); sx2 = (std::min)(sx2, ssize - 1); sx1 = (std::min)(sx1, sx2); @@ -263,8 +263,8 @@ int computeResizeAreaTabFP32(int src_go, int dst_go, int ssize, int dsize, float float fsx2 = fsx1 + scale; float cellWidth = (std::min)(scale, ssize - fsx1); - int sx1 = ceil(fsx1); - int sx2 = floor(fsx2); + int sx1 = static_cast(ceil(fsx1)); + int sx2 = static_cast(floor(fsx2)); sx2 = (std::min)(sx2, ssize - 1); sx1 = (std::min)(sx1, sx2); @@ -447,7 +447,7 @@ void VResizeLinear(float** src, data_t* dst, const float* beta, int width) { if (sizeof(data_t) == 4) { for (int x = 0; x < width; x++) - dst[x] = (S0[x] * b0 + S1[x] * b1); + dst[x] = static_cast(S0[x] * b0 + S1[x] * b1); } else { for (int x = 0; x < width; x++) dst[x] = saturateU32toU8(static_cast(S0[x] * b0 + S1[x] * b1)); @@ -499,7 +499,7 @@ static void resize_area_upscale(const Blob::Ptr inBlob, Blob::Ptr outBlob, uint8 float cbuf[2] = {0}; for (int dx = 0; dx < dwidth; dx++) { - int sx = floor(dx*scale_x); + int sx = static_cast(floor(dx*scale_x)); float fx = (dx+1) - (sx+1)*inv_scale_x; fx = fx <= 0 ? 0.f : fx - floor(fx); @@ -525,7 +525,7 @@ static void resize_area_upscale(const Blob::Ptr inBlob, Blob::Ptr outBlob, uint8 } for (int dy = 0; dy < dheight; dy++) { - int sy = floor(dy*scale_y); + int sy = static_cast(floor(dy*scale_y)); float fy = (dy+1) - (sy+1)*inv_scale_y; fy = fy <= 0 ? 0.f : fy - floor(fy); @@ -592,10 +592,10 @@ size_t resize_get_buffer_size(Blob::Ptr inBlob, Blob::Ptr outBlob, const ResizeA size_t origW = strides[2]; size_t origH = strides[1] / strides[2]; - const int src_full_width = origW; - const int src_full_height = origH; - const int dst_full_width = dstDims[3]; - const int dst_full_height = dstDims[2]; + const int src_full_width = static_cast(origW); + const int src_full_height = static_cast(origH); + const int dst_full_width = static_cast(dstDims[3]); + const int dst_full_height = static_cast(dstDims[2]); float scale_x = static_cast(dstDims[3]) / srcDims[3]; float scale_y = static_cast(dstDims[2]) / srcDims[2]; @@ -619,9 +619,9 @@ size_t resize_get_buffer_size(Blob::Ptr inBlob, Blob::Ptr outBlob, const ResizeA }; auto resize_area_u8_downscale_sse_buffer_size = [&]() { - const int dwidth = dstDims[3]; - const int dheight = dstDims[2]; - const int swidth = srcDims[3]; + const int dwidth = static_cast(dstDims[3]); + const int dheight = static_cast(dstDims[2]); + const int swidth = static_cast(srcDims[3]); const int dst_go_x = 0; const int dst_go_y = 0; @@ -780,7 +780,7 @@ public: void isApplicable(const Blob::Ptr &src, const Blob::Ptr &dst) override; }; -StatusCode CreatePreProcessData(IPreProcessData *& data, ResponseDesc */*resp*/) noexcept { +StatusCode CreatePreProcessData(IPreProcessData *& data, ResponseDesc * /*resp*/) noexcept { data = new PreProcessData(); return StatusCode::OK; } diff --git a/inference-engine/src/preprocessing/ie_preprocess_gapi_kernels.cpp b/inference-engine/src/preprocessing/ie_preprocess_gapi_kernels.cpp index 153eaed..9e3cc25 100644 --- a/inference-engine/src/preprocessing/ie_preprocess_gapi_kernels.cpp +++ b/inference-engine/src/preprocessing/ie_preprocess_gapi_kernels.cpp @@ -1252,7 +1252,7 @@ struct Mapper { typedef MapperUnit Unit; static inline Unit map(double ratio, int start, int max, int outCoord) { - float f = ((outCoord + 0.5f) * ratio - 0.5f); + float f = static_cast((outCoord + 0.5) * ratio - 0.5); int s = cvFloor(f); f -= s; @@ -1278,7 +1278,7 @@ struct Mapper { typedef MapperUnit Unit; static inline Unit map(double ratio, int start, int max, int outCoord) { - float f = ((outCoord + 0.5f) * ratio - 0.5f); + float f = static_cast((outCoord + 0.5) * ratio - 0.5); int s = cvFloor(f); f -= s; @@ -1687,8 +1687,8 @@ static int getResizeAreaTabSize(int dst_go, int ssize, int dsize, float scale) { float fsx1 = col * scale; float fsx2 = fsx1 + scale; - int sx1 = ceil(fsx1); - int sx2 = floor(fsx2); + int sx1 = static_cast(ceil(fsx1)); + int sx2 = static_cast(floor(fsx2)); sx2 = (std::min)(sx2, ssize - 1); sx1 = (std::min)(sx1, sx2); @@ -1723,8 +1723,8 @@ static void computeResizeAreaTab(int src_go, int dst_go, int ssize, int dsize, f float fsx2 = fsx1 + scale; float cellWidth = (std::min)(scale, ssize - fsx1); - int sx1 = ceil(fsx1); - int sx2 = floor(fsx2); + int sx1 = static_cast(ceil(fsx1)); + int sx2 = static_cast(floor(fsx2)); sx2 = (std::min)(sx2, ssize - 1); sx1 = (std::min)(sx1, sx2); diff --git a/inference-engine/src/preprocessing/ie_preprocess_gapi_kernels_impl.hpp b/inference-engine/src/preprocessing/ie_preprocess_gapi_kernels_impl.hpp index bce88e1..8862f5f 100644 --- a/inference-engine/src/preprocessing/ie_preprocess_gapi_kernels_impl.hpp +++ b/inference-engine/src/preprocessing/ie_preprocess_gapi_kernels_impl.hpp @@ -51,7 +51,7 @@ template<> inline uint16_t saturate_cast(uint8_t x) { return x; } template<> inline float saturate_cast(uint8_t x) { return x; } template<> inline uint8_t saturate_cast(uint8_t x) { return x; } -template<> inline uint8_t saturate_cast(uint16_t x) { using lim = std::numeric_limits; return std::min(static_cast(lim::max()), std::max(static_cast(lim::min()), x));} +template<> inline uint8_t saturate_cast(uint16_t x) { using lim = std::numeric_limits; return (uint8_t)std::min(static_cast(lim::max()), std::max(static_cast(lim::min()), x));} template<> inline uint8_t saturate_cast(float x) { return saturate_cast(static_cast(std::rint(x))); } //------------------------------------------------------------------------------ diff --git a/inference-engine/src/readers/ir_reader/ie_ir_parser.cpp b/inference-engine/src/readers/ir_reader/ie_ir_parser.cpp index 1fa13c7..2dcc319 100644 --- a/inference-engine/src/readers/ir_reader/ie_ir_parser.cpp +++ b/inference-engine/src/readers/ir_reader/ie_ir_parser.cpp @@ -568,7 +568,7 @@ std::shared_ptr V10Parser::createNode(const std::vector(length) < offset + size) THROW_IE_EXCEPTION << "Cannot create " << params.type << " layer with name: " << params.name << ". Layer has incorrect weights!"; Blob::Ptr wBlob = make_blob_with_precision(TensorDesc(precision, {size / precision.size()}, Layout::C)); @@ -1514,7 +1514,7 @@ std::shared_ptr V10Parser::LayerCreator::cre if (!length) THROW_IE_EXCEPTION << "Cannot read network! The model requires weights data! " << "Bin file cannot be found! Please specify the path to bin file."; - if (length < offset + size) + if (static_cast(length) < offset + size) THROW_IE_EXCEPTION << "Cannot create " << getType() << " layer with name: " << layerParsePrms.name << ". Layer has incorrect weights!"; diff --git a/inference-engine/src/readers/ir_reader_v7/ie_cnn_net_reader_impl.cpp b/inference-engine/src/readers/ir_reader_v7/ie_cnn_net_reader_impl.cpp index 9286690..9f72537 100644 --- a/inference-engine/src/readers/ir_reader_v7/ie_cnn_net_reader_impl.cpp +++ b/inference-engine/src/readers/ir_reader_v7/ie_cnn_net_reader_impl.cpp @@ -149,7 +149,7 @@ StatusCode CNNNetReaderImpl::ReadNetwork(const pugi::xml_node& const_root, Respo _parser = parserCreator->create(_version); InferenceEngine::details::CNNNetworkImplPtr local_network = _parser->Parse(root); name = local_network->getName(); - local_network->validate(_version); + local_network->validate(static_cast(_version)); network = local_network; parseSuccess = true; } else { diff --git a/inference-engine/src/readers/ir_reader_v7/ie_layer_parsers.cpp b/inference-engine/src/readers/ir_reader_v7/ie_layer_parsers.cpp index a66902d..2ca2a80 100644 --- a/inference-engine/src/readers/ir_reader_v7/ie_layer_parsers.cpp +++ b/inference-engine/src/readers/ir_reader_v7/ie_layer_parsers.cpp @@ -193,12 +193,12 @@ CNNLayer::Ptr TILayerCreator::CreateLayer(pugi::xml_node& node, LayerParseParame std::vector inputs, outputs; for (const auto& p : all_inputs) { IE_ASSERT(ins.find(p) != ins.end()); - p2i[p] = inputs.size(); + p2i[p] = static_cast(inputs.size()); inputs.push_back(ins[p]); } for (const auto& p : all_outputs) { IE_ASSERT(outs.find(p) != outs.end()); - p2i[p] = outputs.size(); + p2i[p] = static_cast(outputs.size()); outputs.push_back(outs[p]); } diff --git a/inference-engine/src/readers/ir_reader_v7/ie_layer_validators.cpp b/inference-engine/src/readers/ir_reader_v7/ie_layer_validators.cpp index 7cd900a..3d3b89a 100644 --- a/inference-engine/src/readers/ir_reader_v7/ie_layer_validators.cpp +++ b/inference-engine/src/readers/ir_reader_v7/ie_layer_validators.cpp @@ -672,13 +672,13 @@ void GemmValidator::checkShapes(const CNNLayer* layer, const vector& THROW_IE_EXCEPTION << "Gemm input shapes must have at least 2 dimensions"; } - unsigned long xAxis0 = dims0.size() - 1; - unsigned long yAxis0 = dims0.size() - 2; + unsigned long xAxis0 = static_cast(dims0.size() - 1); + unsigned long yAxis0 = static_cast(dims0.size() - 2); if (casted->transpose_a) std::swap(xAxis0, yAxis0); - unsigned long xAxis1 = dims1.size() - 1; - unsigned long yAxis1 = dims1.size() - 2; + unsigned long xAxis1 = static_cast(dims1.size() - 1); + unsigned long yAxis1 = static_cast(dims1.size() - 2); if (casted->transpose_b) std::swap(xAxis1, yAxis1); @@ -692,8 +692,8 @@ void GemmValidator::checkShapes(const CNNLayer* layer, const vector& THROW_IE_EXCEPTION << "Gemm input shapes must have at least 2 dimensions"; } - unsigned long xAxis2 = dims2.size() - 1; - unsigned long yAxis2 = dims2.size() - 2; + unsigned long xAxis2 = static_cast(dims2.size() - 1); + unsigned long yAxis2 = static_cast(dims2.size() - 2); if (dims2[xAxis2] != dims1[xAxis1]) THROW_IE_EXCEPTION << "Gemm input2 x dimension must be equal to input1 x dimension (" << dims2[xAxis2] @@ -820,7 +820,7 @@ void ShuffleChannelsValidator::checkShapes(const CNNLayer* layer, const vectoraxis; int axis = casted->axis; - if (axis < 0) axis += inShapes[0].size(); + if (axis < 0) axis += static_cast(inShapes[0].size()); if (inShapes[0][axis] % casted->group) THROW_IE_EXCEPTION << layer->name << " Group parameter must evenly divide the channel dimension!"; @@ -1200,7 +1200,7 @@ void ReverseSequenceValidator::checkShapes(const CNNLayer* layer, const vectorbatch_axis; int batch_axis = casted->batch_axis; - if (batch_axis < 0) batch_axis += inShapes[0].size(); + if (batch_axis < 0) batch_axis += static_cast(inShapes[0].size()); if (inShapes[1][0] != inShapes[0][batch_axis]) THROW_IE_EXCEPTION << layer->name << " Incorrect 'seq_lengths_dims' parameter dimensions!"; } @@ -1368,7 +1368,7 @@ void RNNBaseValidator::checkParams(const InferenceEngine::CNNLayer* layer) { if (!one_of(act, "sigmoid", "tanh", "relu")) THROW_IE_EXCEPTION << "Unsupported activation function (" << act << ") for RNN layer."; - int act_num_required = def_acts.size(); + int act_num_required = static_cast(def_acts.size()); if (rnn->activations.size() != act_num_required) THROW_IE_EXCEPTION << "Expected " << act_num_required << " activations, but provided " << rnn->activations.size(); diff --git a/inference-engine/src/transformations/include/transformations/op_conversions/convert_reduce_to_pooling.hpp b/inference-engine/src/transformations/include/transformations/op_conversions/convert_reduce_to_pooling.hpp index 3e142cb..81d0126 100644 --- a/inference-engine/src/transformations/include/transformations/op_conversions/convert_reduce_to_pooling.hpp +++ b/inference-engine/src/transformations/include/transformations/op_conversions/convert_reduce_to_pooling.hpp @@ -158,7 +158,7 @@ ngraph::matcher_pass_callback ConvertReduceBase::convert_reduce_to_pooling() { // In case if reduction applies not to spatial dimensions // we have to fit it into 4D Pooling size_t dims_prod = 1, dims_begin = 1, dims_end = 1; - for (size_t i = 0; i < input_shape.size(); ++i) { + for (int64_t i = 0; static_cast(i) < input_shape.size(); ++i) { if (i < *axes_vector.begin()) { dims_begin *= input_shape[i]; } else if (i >= axes_vector.front() && i <= axes_vector.back()) { diff --git a/inference-engine/src/transformations/include/transformations/utils/utils.hpp b/inference-engine/src/transformations/include/transformations/utils/utils.hpp index 4bd8586..3d30498 100644 --- a/inference-engine/src/transformations/include/transformations/utils/utils.hpp +++ b/inference-engine/src/transformations/include/transformations/utils/utils.hpp @@ -26,7 +26,7 @@ bool normalize_single_value(std::vector vec, float & value) { if (val != *vec.begin()) return false; } - float ref_val = *vec.begin(); + float ref_val = static_cast(*vec.begin()); if (ref_val < std::numeric_limits::lowest() || ref_val > std::numeric_limits::max()) { return false; diff --git a/inference-engine/src/transformations/src/transformations/common_optimizations/algebraic_simplification.cpp b/inference-engine/src/transformations/src/transformations/common_optimizations/algebraic_simplification.cpp index 3ec4a88..4a9b90b 100644 --- a/inference-engine/src/transformations/src/transformations/common_optimizations/algebraic_simplification.cpp +++ b/inference-engine/src/transformations/src/transformations/common_optimizations/algebraic_simplification.cpp @@ -151,7 +151,7 @@ static bool replace_transpose_with_reshape(shared_ptr transpose) { return false; } - const auto input_shape_rank = input_shape.rank().get_length(); + const size_t input_shape_rank = input_shape.rank().get_length(); auto order = as_type_ptr(transpose->input_value(1).get_node_shared_ptr()); if (!order || !ngraph::shape_size(order->get_shape())) { diff --git a/inference-engine/src/transformations/src/transformations/common_optimizations/depth_to_space_fusion.cpp b/inference-engine/src/transformations/src/transformations/common_optimizations/depth_to_space_fusion.cpp index d46a9c1..bf916e7 100644 --- a/inference-engine/src/transformations/src/transformations/common_optimizations/depth_to_space_fusion.cpp +++ b/inference-engine/src/transformations/src/transformations/common_optimizations/depth_to_space_fusion.cpp @@ -18,7 +18,7 @@ bool check_block_first(const ngraph::Shape& shape_input, const ngraph::Shape& sh possible_block_size = shape_reshape_before[1]; if (possible_block_size == 0) return false; - uint64_t c_dim = shape_input[1] / std::pow(possible_block_size, spatial_dims); + uint64_t c_dim = static_cast(shape_input[1] / std::pow(possible_block_size, spatial_dims)); // x' = reshape(data, [N, block_size, block_size, ..., block_size, C / (block_size ^ K), D1, D2, ..., DK]) ngraph::Shape expected_shape = {shape_input[0]}; @@ -54,7 +54,7 @@ bool check_depth_first(const ngraph::Shape& shape_input, const ngraph::Shape& sh possible_block_size = shape_reshape_before[2]; if (possible_block_size == 0) return false; - uint64_t c_dim = shape_input[1] / std::pow(possible_block_size, spatial_dims); + uint64_t c_dim = static_cast(shape_input[1] / std::pow(possible_block_size, spatial_dims)); // x' = reshape(data, [N, C / (block_size ^ K), block_size, block_size, ..., block_size, D1, D2, ..., DK]) ngraph::Shape expected_shape = {shape_input[0], static_cast(c_dim)}; @@ -161,4 +161,4 @@ void ngraph::pass::DepthToSpaceFusion::depth_to_space_fusion() { auto m = std::make_shared(reshape_after, "DepthToSpaceFusion"); this->add_matcher(m, callback, PassProperty::CHANGE_DYNAMIC_STATE); -} \ No newline at end of file +} diff --git a/inference-engine/src/transformations/src/transformations/common_optimizations/nop_elimination.cpp b/inference-engine/src/transformations/src/transformations/common_optimizations/nop_elimination.cpp index 6a4e7be..4da31e8 100644 --- a/inference-engine/src/transformations/src/transformations/common_optimizations/nop_elimination.cpp +++ b/inference-engine/src/transformations/src/transformations/common_optimizations/nop_elimination.cpp @@ -165,7 +165,7 @@ static bool replace_squeeze_unsqueeze(const std::shared_ptr& node) { static std::vector get_unsqueeze_axes(const PartialShape& data_shape, const PartialShape& out_shape) { std::vector axes; - size_t i = 0; + int64_t i = 0; for (auto o = 0; o < out_shape.rank().get_length(); o++) { if (i < data_shape.rank().get_length() && data_shape[i].same_scheme(out_shape[o])) { i += 1; @@ -181,7 +181,7 @@ static std::vector get_unsqueeze_axes(const PartialShape& data_shape, static std::vector get_squeeze_axes(const PartialShape& data_shape, const PartialShape& out_shape) { std::vector axes; - size_t out_i = 0; + int64_t out_i = 0; for (auto i = 0; i < data_shape.rank().get_length(); i++) { if (out_i < out_shape.rank().get_length() && data_shape[i].same_scheme(out_shape[out_i])) { out_i += 1; diff --git a/inference-engine/src/transformations/src/transformations/common_optimizations/optimize_strided_slice.cpp b/inference-engine/src/transformations/src/transformations/common_optimizations/optimize_strided_slice.cpp index 67125a0..fbe60a8 100644 --- a/inference-engine/src/transformations/src/transformations/common_optimizations/optimize_strided_slice.cpp +++ b/inference-engine/src/transformations/src/transformations/common_optimizations/optimize_strided_slice.cpp @@ -169,7 +169,7 @@ bool ngraph::pass::GroupedStridedSliceOptimizer::run_on_function(std::shared_ptr for (const auto & ss_plan : pair.second) { if (ss_plan.second.begins[i] != 0 || ss_plan.second.ends[i] != input_shape[i]) { if (axis == -1 || axis == i) - axis = i; + axis = static_cast(i); else valid_for_replacement = false; if (ss_plan.second.strides[i] != 1) @@ -189,12 +189,12 @@ bool ngraph::pass::GroupedStridedSliceOptimizer::run_on_function(std::shared_ptr {return lhs.begin < rhs.begin;}); std::vector, uint64_t>> output_to_size; - uint64_t prev_r = 0; + int64_t prev_r = 0; for (auto & record : output_to_partition) { valid_for_replacement &= (record.begin >= prev_r); prev_r = record.end; } - valid_for_replacement &= (prev_r <= input_shape[axis]); + valid_for_replacement &= (static_cast(prev_r) <= input_shape[axis]); if (!valid_for_replacement) continue; prev_r = 0; @@ -205,7 +205,7 @@ bool ngraph::pass::GroupedStridedSliceOptimizer::run_on_function(std::shared_ptr prev_r = record.end; output_to_size.emplace_back(record.output, record.end - record.begin); } - if (prev_r < input_shape[axis]) { + if (static_cast(prev_r) < input_shape[axis]) { output_to_size.emplace_back(fake_output, input_shape[axis] - prev_r); } diff --git a/inference-engine/src/transformations/src/transformations/common_optimizations/pull_transpose_through_fq.cpp b/inference-engine/src/transformations/src/transformations/common_optimizations/pull_transpose_through_fq.cpp index aea96b9..c09d88a 100644 --- a/inference-engine/src/transformations/src/transformations/common_optimizations/pull_transpose_through_fq.cpp +++ b/inference-engine/src/transformations/src/transformations/common_optimizations/pull_transpose_through_fq.cpp @@ -35,7 +35,7 @@ ngraph::pass::PullTransposeThroughFQUp::PullTransposeThroughFQUp() { auto fq_input = fq->input_value(i); auto fq_input_rank = fq_input.get_partial_shape().rank().get_length(); std::vector unsqueeze_axes; - for (size_t j = 0; j < input_rank - fq_input_rank; ++j) { + for (int64_t j = 0; j < input_rank - fq_input_rank; ++j) { unsqueeze_axes.push_back(j); } if (!unsqueeze_axes.empty()) { diff --git a/inference-engine/src/transformations/src/transformations/control_flow/unroll_tensor_iterator.cpp b/inference-engine/src/transformations/src/transformations/control_flow/unroll_tensor_iterator.cpp index 53a5262..45f96fb 100644 --- a/inference-engine/src/transformations/src/transformations/control_flow/unroll_tensor_iterator.cpp +++ b/inference-engine/src/transformations/src/transformations/control_flow/unroll_tensor_iterator.cpp @@ -34,7 +34,7 @@ ngraph::pass::UnrollTensorIterator::UnrollTensorIterator() : MatcherPass() { // Create copies of the TensorIterator body, the number of copies is equal to the number of iterations. // Assign names to the created layers. std::vector> body_functions(num_iter); - for (uint64_t idx = 0; idx < num_iter; ++idx) { + for (int64_t idx = 0; idx < num_iter; ++idx) { body_functions[idx] = clone_function(*function); for (auto &node : body_functions[idx]->get_ops()) { node->set_friendly_name(ti->get_friendly_name() + "/" + std::to_string(idx + 1) + "/" + node->get_friendly_name()); @@ -64,7 +64,7 @@ ngraph::pass::UnrollTensorIterator::UnrollTensorIterator() : MatcherPass() { copy_runtime_info(ti, split); auto stride = input_desc->m_stride; // connect to the body - for (uint64_t j = 0; j < num_iter; j++) { + for (int64_t j = 0; j < num_iter; j++) { auto idx = stride > 0 ? j : num_iter - j - 1; auto param = body_functions[j]->get_parameters()[input_desc->m_body_parameter_index]; for (auto &output : param->outputs()) { @@ -92,7 +92,7 @@ ngraph::pass::UnrollTensorIterator::UnrollTensorIterator() : MatcherPass() { } // Back-edge processing. Connect the copies of the body to each other. - for (uint64_t j = 1; j < num_iter; j++) { + for (int64_t j = 1; j < num_iter; j++) { auto cur_param = body_functions[j]->get_parameters()[input_desc->m_body_parameter_index]; auto prev_val = body_functions[j - 1]->get_results()[input_desc->m_body_value_index]; for (auto &output : cur_param->outputs()) { @@ -108,7 +108,7 @@ ngraph::pass::UnrollTensorIterator::UnrollTensorIterator() : MatcherPass() { // Connect the input to the corresponding copy of the body. auto in_data = ti->input_values()[input_desc->m_input_index].get_node_shared_ptr(); - for (uint64_t j = 0; j < num_iter; j++) { + for (int64_t j = 0; j < num_iter; j++) { auto param = body_functions[j]->get_parameters()[input_desc->m_body_parameter_index]; for (auto &output : param->outputs()) { output.replace(in_data); @@ -138,7 +138,7 @@ ngraph::pass::UnrollTensorIterator::UnrollTensorIterator() : MatcherPass() { auto stride = output_desc->m_stride; // Connect outputs of the bodies to the Concat layer - for (uint64_t j = 0; j < num_iter; j++) { + for (int64_t j = 0; j < num_iter; j++) { auto idx = stride > 0 ? j : num_iter - j - 1; std::shared_ptr result = body_functions[idx]->get_results()[output_desc->m_body_value_index]; auto input_to_res = result->get_input_source_output(0); @@ -188,4 +188,4 @@ ngraph::pass::UnrollTensorIterator::UnrollTensorIterator() : MatcherPass() { auto m = std::make_shared(tensor_iterator, "UnrollTensorIterator"); register_matcher(m, callback); -} \ No newline at end of file +} diff --git a/inference-engine/src/transformations/src/transformations/convert_precision.cpp b/inference-engine/src/transformations/src/transformations/convert_precision.cpp index ef36b27..bb7aeac 100644 --- a/inference-engine/src/transformations/src/transformations/convert_precision.cpp +++ b/inference-engine/src/transformations/src/transformations/convert_precision.cpp @@ -322,7 +322,7 @@ inline int32_t convert_value(uint64_t val) { template <> inline int32_t convert_value(uint32_t val) { - if (val > std::numeric_limits::max()) { + if (val > static_cast(std::numeric_limits::max())) { return std::numeric_limits::max(); } return static_cast(val); diff --git a/inference-engine/src/transformations/src/transformations/op_conversions/batch_norm_decomposition.cpp b/inference-engine/src/transformations/src/transformations/op_conversions/batch_norm_decomposition.cpp index 384e916..b4bd2a9 100644 --- a/inference-engine/src/transformations/src/transformations/op_conversions/batch_norm_decomposition.cpp +++ b/inference-engine/src/transformations/src/transformations/op_conversions/batch_norm_decomposition.cpp @@ -49,7 +49,7 @@ ngraph::pass::BatchNormDecomposition::BatchNormDecomposition() { // TODO: instead of getting full shape we can concatenate sequence of ones with ShapeOf Shape input_aligned_shape = m_gamma.get_shape(); - for (size_t i = 0; i < dims_to_add; ++i) + for (int64_t i = 0; i < dims_to_add; ++i) input_aligned_shape.push_back(1); auto new_shape = opset5::Constant::create(element::i64, Shape{input_aligned_shape.size()}, input_aligned_shape); @@ -113,7 +113,7 @@ ngraph::pass::BatchNormV5Decomposition::BatchNormV5Decomposition() { // TODO: instead of getting full shape we can concatenate sequence of ones with ShapeOf Shape input_aligned_shape = m_gamma.get_shape(); - for (size_t i = 0; i < dims_to_add; ++i) + for (int64_t i = 0; i < dims_to_add; ++i) input_aligned_shape.push_back(1); auto new_shape = opset5::Constant::create(element::i64, Shape{input_aligned_shape.size()}, input_aligned_shape); diff --git a/inference-engine/src/transformations/src/transformations/op_conversions/convert_scatter_elements_to_scatter.cpp b/inference-engine/src/transformations/src/transformations/op_conversions/convert_scatter_elements_to_scatter.cpp index 975c8e2..da2794a 100644 --- a/inference-engine/src/transformations/src/transformations/op_conversions/convert_scatter_elements_to_scatter.cpp +++ b/inference-engine/src/transformations/src/transformations/op_conversions/convert_scatter_elements_to_scatter.cpp @@ -87,8 +87,8 @@ void ngraph::pass::ConvertScatterElementsToScatter::convert_scatter_elements_to_ auto compare_shapes_ranges = [](const PartialShape & lhsShape, const PartialShape & rhsShape, const Range & lhsRange, const Range & rhsRange) -> bool { // Check that ranges are equal and suits to Shapes sizes if (lhsRange != rhsRange || - lhsRange.r > lhsShape.rank().get_length() || - rhsRange.r > rhsShape.rank().get_length()) { + lhsRange.r > static_cast(lhsShape.rank().get_length()) || + rhsRange.r > static_cast(rhsShape.rank().get_length())) { return false; } @@ -210,4 +210,4 @@ void ngraph::pass::ConvertScatterElementsToScatter::convert_scatter_elements_to_ auto m = std::make_shared(scatter, "ConvertScatterElementsToScatter"); this->add_matcher(m, callback, PassProperty::CHANGE_DYNAMIC_STATE); -} \ No newline at end of file +} diff --git a/inference-engine/src/vpu/common/include/vpu/utils/simple_math.hpp b/inference-engine/src/vpu/common/include/vpu/utils/simple_math.hpp index bd49139..1e85361 100644 --- a/inference-engine/src/vpu/common/include/vpu/utils/simple_math.hpp +++ b/inference-engine/src/vpu/common/include/vpu/utils/simple_math.hpp @@ -25,7 +25,7 @@ namespace vpu { template -Optional parseNumber(const std::string& s) { +Optional parseNumber(const std::string& s) { T value{}; if ((std::istringstream(s) >> value >> std::ws).eof()) { return {value}; @@ -78,6 +78,7 @@ public: } float toFloat() const { return isInt ? static_cast(value.i) : value.f; } + int toInt() const { return isInt ? value.i : static_cast(value.f); } OPERATOR(+) OPERATOR(-) diff --git a/inference-engine/src/vpu/common/include/vpu/utils/small_vector.hpp b/inference-engine/src/vpu/common/include/vpu/utils/small_vector.hpp index ee89d53..f39bbc2 100644 --- a/inference-engine/src/vpu/common/include/vpu/utils/small_vector.hpp +++ b/inference-engine/src/vpu/common/include/vpu/utils/small_vector.hpp @@ -305,7 +305,11 @@ public: const_reverse_iterator crend() const noexcept { return _base.crend(); } bool empty() const noexcept { return _base.empty(); } - size_type size() const noexcept { return _base.size(); } +#if ENABLE_MYRIAD + int size() const noexcept { return static_cast(_base.size()); } +#else + size_t size() const noexcept { return _base.size(); } +#endif void reserve(size_type cap) { _base.reserve(cap); } diff --git a/inference-engine/src/vpu/common/src/ngraph/operations/out_shape_of_reshape.cpp b/inference-engine/src/vpu/common/src/ngraph/operations/out_shape_of_reshape.cpp index c90b42d..fa91f5c 100644 --- a/inference-engine/src/vpu/common/src/ngraph/operations/out_shape_of_reshape.cpp +++ b/inference-engine/src/vpu/common/src/ngraph/operations/out_shape_of_reshape.cpp @@ -98,7 +98,7 @@ bool setShapeToHostTensorData(const HostTensorPtr& data, const Shape& shape) { } for (int i = 0; i < outputRank; i++) { - dataPtr[i] = shape[i]; + dataPtr[i] = static_cast(shape[i]); } return true; } @@ -189,9 +189,9 @@ bool evaluateOutShapeOfReshape( return false; } - int zeroDimsCount = std::count_if(outputShape.begin(), outputShape.end(), + int64_t zeroDimsCount = std::count_if(outputShape.begin(), outputShape.end(), [](int64_t value) { return value == 0; }); - int negativeDimsCount = std::count_if(outputShape.begin(), outputShape.end(), + int64_t negativeDimsCount = std::count_if(outputShape.begin(), outputShape.end(), [](int64_t value) { return value == -1; }); if (negativeDimsCount > 1) { return false; @@ -220,7 +220,7 @@ bool evaluateOutShapeOfReshape( outputShape[i] = inputShape[i]; outputTotalDimCount *= inputShape[i]; } else if (outputShape[i] == -1) { - negativeDimIdx = i; + negativeDimIdx = static_cast(i); } else { outputTotalDimCount *= outputShape[i]; } diff --git a/inference-engine/src/vpu/common/src/ngraph/operations/static_shape_nonzero.cpp b/inference-engine/src/vpu/common/src/ngraph/operations/static_shape_nonzero.cpp index 6d9e224..c0120a9 100644 --- a/inference-engine/src/vpu/common/src/ngraph/operations/static_shape_nonzero.cpp +++ b/inference-engine/src/vpu/common/src/ngraph/operations/static_shape_nonzero.cpp @@ -66,14 +66,14 @@ void evaluateStaticShapeNonZero(const Shape& inputShape, const auto inputRank = nonZeroOutput->get_partial_shape()[0].get_length(); const auto nonZeroCount = nonZeroOutput->get_partial_shape()[1].get_length(); - for (size_t i = 0; i < inputRank; ++i) { - for (size_t j = 0; j < nonZeroCount; j++) { + for (int64_t i = 0; i < inputRank; ++i) { + for (int64_t j = 0; j < nonZeroCount; j++) { outIndicesBuffer[i * totalInputSize + j] = nonZeroOutputBuffer[i * nonZeroCount + j]; } } - outShapeBuffer[0] = inputRank; - outShapeBuffer[1] = nonZeroCount; + outShapeBuffer[0] = static_cast::value_type>(inputRank); + outShapeBuffer[1] = static_cast::value_type>(nonZeroCount); } } // namespace diff --git a/inference-engine/src/vpu/common/src/ngraph/transformations/dynamic_to_static_shape_matmul.cpp b/inference-engine/src/vpu/common/src/ngraph/transformations/dynamic_to_static_shape_matmul.cpp index 5b2621c..a04c4e7 100644 --- a/inference-engine/src/vpu/common/src/ngraph/transformations/dynamic_to_static_shape_matmul.cpp +++ b/inference-engine/src/vpu/common/src/ngraph/transformations/dynamic_to_static_shape_matmul.cpp @@ -18,7 +18,7 @@ namespace vpu { void get_normalized_shape(ngraph::Output& shape, size_t actual_rank_value, size_t max_rank_value, bool transpose, const ngraph::element::Type& elementType) { - if (const unsigned rank_diff = max_rank_value - actual_rank_value) { + if (const size_t rank_diff = max_rank_value - actual_rank_value) { ngraph::OutputVector extended_shape_parts = {ngraph::opset3::Constant::create(elementType, {rank_diff}, std::vector(rank_diff, 1)), shape}; shape = std::make_shared(extended_shape_parts, 0); diff --git a/inference-engine/src/vpu/common/src/ngraph/transformations/dynamic_to_static_shape_strided_slice.cpp b/inference-engine/src/vpu/common/src/ngraph/transformations/dynamic_to_static_shape_strided_slice.cpp index dacf2dd..6542b8c 100644 --- a/inference-engine/src/vpu/common/src/ngraph/transformations/dynamic_to_static_shape_strided_slice.cpp +++ b/inference-engine/src/vpu/common/src/ngraph/transformations/dynamic_to_static_shape_strided_slice.cpp @@ -38,12 +38,12 @@ std::shared_ptr calculate_output_shape( VPU_THROW_UNLESS(begin.size() == end.size() && begin.size() == strides.size(), "Begin, end and strides inputs must be of the same size, but {}, {} and {} given accordingly", begin.size(), end.size(), strides.size()); - const auto inputShapeRank = input_shape.get_partial_shape()[0].get_length(); + const auto inputShapeRank = static_cast(input_shape.get_partial_shape()[0].get_length()); VPU_THROW_UNLESS(inputShapeRank >= begin.size(), "Input shape rank must not be less than begin/end/strides size, but {} and {} given accordingly", inputShapeRank, begin.size()); ngraph::OutputVector output_dimensions; - for (int64_t axis = 0; axis < begin.size(); ++axis) { + for (size_t axis = 0; axis < begin.size(); ++axis) { auto lb = begin[axis], ub = end[axis], stride = strides[axis]; ngraph::Output lower_bound = ngraph::opset3::Constant::create(shape_type, {1}, {lb}); diff --git a/inference-engine/src/vpu/common/src/utils/simple_math.cpp b/inference-engine/src/vpu/common/src/utils/simple_math.cpp index d8669f6..8e2a62b 100644 --- a/inference-engine/src/vpu/common/src/utils/simple_math.cpp +++ b/inference-engine/src/vpu/common/src/utils/simple_math.cpp @@ -189,7 +189,7 @@ int MathExpression::evaluate() const { VPU_THROW_EXCEPTION << "Illegal expression: not enough operators"; } - return values.top().toFloat(); + return values.top().toInt(); } } // namespace vpu diff --git a/inference-engine/src/vpu/graph_transformer/include/vpu/model/data.hpp b/inference-engine/src/vpu/graph_transformer/include/vpu/model/data.hpp index 32609aa..62dd2c2 100644 --- a/inference-engine/src/vpu/graph_transformer/include/vpu/model/data.hpp +++ b/inference-engine/src/vpu/graph_transformer/include/vpu/model/data.hpp @@ -189,7 +189,7 @@ public: } inline int numConsumers() const { - return _consumerEdges.size(); + return static_cast(_consumerEdges.size()); } inline auto consumers() const -> decltype(mapRange(consumerEdges())) { return mapRange(consumerEdges()); @@ -207,7 +207,7 @@ public: } inline int numChildDatas() const { - return _childDataToDataEdges.size(); + return static_cast(_childDataToDataEdges.size()); } inline auto childDatas() const -> decltype(mapRange(childDataToDataEdges())) { return mapRange(childDataToDataEdges()); diff --git a/inference-engine/src/vpu/graph_transformer/include/vpu/model/data_desc.hpp b/inference-engine/src/vpu/graph_transformer/include/vpu/model/data_desc.hpp index e0f17fd..f200a16 100644 --- a/inference-engine/src/vpu/graph_transformer/include/vpu/model/data_desc.hpp +++ b/inference-engine/src/vpu/graph_transformer/include/vpu/model/data_desc.hpp @@ -409,7 +409,7 @@ public: DimsOrder() = default; static DimsOrder fromCode(StorageOrder64 code); - static DimsOrder fromNumDims(int numDims); + static DimsOrder fromNumDims(size_t numDims); static DimsOrder fromPermutation(const DimVector& perm); static DimsOrder fromLayout(ie::Layout const& layout); @@ -513,7 +513,7 @@ public: int ind = 0; for (auto i = dimsBegin; i < dimsEnd; i++) { auto val = *i; - _dims.set(perm[ind], val); + _dims.set(perm[ind], static_cast(val)); ++ind; } } else { diff --git a/inference-engine/src/vpu/graph_transformer/include/vpu/model/model.hpp b/inference-engine/src/vpu/graph_transformer/include/vpu/model/model.hpp index f102418d..52d9024 100644 --- a/inference-engine/src/vpu/graph_transformer/include/vpu/model/model.hpp +++ b/inference-engine/src/vpu/graph_transformer/include/vpu/model/model.hpp @@ -308,12 +308,12 @@ public: // Nodes accessors // - inline int numDatas() const { return _dataPtrList.size(); } + inline int numDatas() const { return static_cast(_dataPtrList.size()); } inline auto datas() const -> decltype(_dataList | asRange()) { return _dataList | asRange(); } - inline int numStages() const { return _stagePtrList.size(); } + inline int numStages() const { return static_cast(_stagePtrList.size()); } inline auto initialStages() const -> decltype(_initialStages | asRange()) { return _initialStages | asRange(); } diff --git a/inference-engine/src/vpu/graph_transformer/include/vpu/model/stage.hpp b/inference-engine/src/vpu/graph_transformer/include/vpu/model/stage.hpp index e680cc8..da7b73a 100644 --- a/inference-engine/src/vpu/graph_transformer/include/vpu/model/stage.hpp +++ b/inference-engine/src/vpu/graph_transformer/include/vpu/model/stage.hpp @@ -466,11 +466,11 @@ private: public: inline int numInputs() const { return _inputEdges.size(); } inline StageInput inputEdge(int ind) const { - IE_ASSERT(ind >= 0 && static_cast(ind) < _inputEdges.size()); + IE_ASSERT(ind >= 0 && ind < _inputEdges.size()); return _inputEdges[ind]; } inline Data input(int ind) const { - IE_ASSERT(ind >= 0 && static_cast(ind) < _inputEdges.size()); + IE_ASSERT(ind >= 0 && ind < _inputEdges.size()); return _inputEdges[ind]->input(); } inline auto inputs() const -> decltype(mapRange(inputEdges())) { @@ -479,11 +479,11 @@ public: inline int numOutputs() const { return _outputEdges.size(); } inline StageOutput outputEdge(int ind) const { - IE_ASSERT(ind >= 0 && static_cast(ind) < _outputEdges.size()); + IE_ASSERT(ind >= 0 && ind < _outputEdges.size()); return _outputEdges[ind]; } inline Data output(int ind) const { - IE_ASSERT(ind >= 0 && static_cast(ind) < _outputEdges.size()); + IE_ASSERT(ind >= 0 && ind < _outputEdges.size()); return _outputEdges[ind]->output(); } inline auto outputs() const -> decltype(mapRange(outputEdges())) { @@ -499,11 +499,11 @@ public: inline int numTempBuffers() const { return _tempBufferEdges.size(); } inline StageTempBuffer tempBufferEdge(int ind) const { - IE_ASSERT(ind >= 0 && static_cast(ind) < _tempBufferEdges.size()); + IE_ASSERT(ind >= 0 && ind < _tempBufferEdges.size()); return _tempBufferEdges[ind]; } inline Data tempBuffer(int ind) const { - IE_ASSERT(ind >= 0 && static_cast(ind) < _tempBufferEdges.size()); + IE_ASSERT(ind >= 0 && ind < _tempBufferEdges.size()); return _tempBufferEdges[ind]->tempBuffer(); } inline auto tempBuffers() const -> decltype(mapRange(tempBufferEdges())) { diff --git a/inference-engine/src/vpu/graph_transformer/src/backend/serialize.cpp b/inference-engine/src/vpu/graph_transformer/src/backend/serialize.cpp index fc83a4e..8c86f1d 100644 --- a/inference-engine/src/vpu/graph_transformer/src/backend/serialize.cpp +++ b/inference-engine/src/vpu/graph_transformer/src/backend/serialize.cpp @@ -235,9 +235,9 @@ void BackEnd::serialize( blobHdr.bss_mem_size = checked_cast(usedMemory.BSS); blobHdr.number_of_cmx_slices = checked_cast(env.resources.numCMXSlices); blobHdr.number_of_shaves = checked_cast(env.resources.numSHAVEs); - blobHdr.has_hw_stage = checked_cast(modelStagesStat.hasHwStage); - blobHdr.has_shave_stage = checked_cast(modelStagesStat.hasShaveStage); - blobHdr.has_dma_stage = checked_cast(modelStagesStat.hasDmaStage); + blobHdr.has_hw_stage = static_cast(modelStagesStat.hasHwStage); + blobHdr.has_shave_stage = static_cast(modelStagesStat.hasShaveStage); + blobHdr.has_dma_stage = static_cast(modelStagesStat.hasDmaStage); blobHdr.input_info_section_offset = checked_cast(hdrSize); blobHdr.output_info_section_offset = checked_cast(blobHdr.input_info_section_offset + inputInfoSecSize); blobHdr.stage_section_offset = checked_cast(blobHdr.output_info_section_offset + outputInfoSecSize); diff --git a/inference-engine/src/vpu/graph_transformer/src/frontend/custom_kernel.cpp b/inference-engine/src/vpu/graph_transformer/src/frontend/custom_kernel.cpp index da70641..55fdf99 100644 --- a/inference-engine/src/vpu/graph_transformer/src/frontend/custom_kernel.cpp +++ b/inference-engine/src/vpu/graph_transformer/src/frontend/custom_kernel.cpp @@ -113,7 +113,7 @@ SmallVector deduceKernelParameters(const md_parser_t& parser, int k auto arguments = SmallVector{}; arguments.reserve(argCount); - for (size_t i = 0; i < argCount; i++) { + for (uint32_t i = 0; i < argCount; i++) { const auto arg = parser.get_argument(kernelDesc, i); VPU_THROW_UNLESS(arg, "Error while parsing custom layer elf file."); @@ -243,7 +243,7 @@ CustomKernel::CustomKernel(const pugi::xml_node& kernel, std::string configDir): param.type == CustomParamType::Data; }; - _inputDataCount = std::count_if(begin(_kernelParams), end(_kernelParams), isInputData); + _inputDataCount = static_cast(std::count_if(begin(_kernelParams), end(_kernelParams), isInputData)); } std::pair parseDimSource(const std::string& dims) { diff --git a/inference-engine/src/vpu/graph_transformer/src/frontend/custom_layer.cpp b/inference-engine/src/vpu/graph_transformer/src/frontend/custom_layer.cpp index a20ffbb..9c6cd32 100644 --- a/inference-engine/src/vpu/graph_transformer/src/frontend/custom_layer.cpp +++ b/inference-engine/src/vpu/graph_transformer/src/frontend/custom_layer.cpp @@ -178,10 +178,10 @@ CustomLayer::CustomLayer(std::string configDir, const pugi::xml_node& customLaye "each kernel should be provided with 'stage' attribute.", _layerName); const auto stageNum = std::stod(stageAttr.value()); - VPU_THROW_UNLESS(stageOrder.find(stageNum) == stageOrder.end(), + VPU_THROW_UNLESS(stageOrder.find(static_cast(stageNum)) == stageOrder.end(), "Error while binding %s custom layer: found duplicating stage id.", _layerName); - stageOrder.emplace(stageNum, CustomKernel{kernel, _configDir}); + stageOrder.emplace(static_cast(stageNum), CustomKernel{kernel, _configDir}); } VPU_THROW_UNLESS(!stageOrder.empty(), diff --git a/inference-engine/src/vpu/graph_transformer/src/middleend/hw/tiling.cpp b/inference-engine/src/vpu/graph_transformer/src/middleend/hw/tiling.cpp index 06e234a..d2be8f2 100644 --- a/inference-engine/src/vpu/graph_transformer/src/middleend/hw/tiling.cpp +++ b/inference-engine/src/vpu/graph_transformer/src/middleend/hw/tiling.cpp @@ -87,7 +87,7 @@ int calcOutputSize( int padBefore, int padAfter, bool useCeil) { if (useCeil) { - return std::ceil(static_cast(inputSize - kernelSize + padBefore + padAfter) / kernelStride + 1); + return static_cast(std::ceil(static_cast(inputSize - kernelSize + padBefore + padAfter) / kernelStride + 1)); } else { return (inputSize - kernelSize + padBefore + padAfter) / kernelStride + 1; } diff --git a/inference-engine/src/vpu/graph_transformer/src/middleend/passes/hw_extra_split.cpp b/inference-engine/src/vpu/graph_transformer/src/middleend/passes/hw_extra_split.cpp index 4066905..44e7fee 100644 --- a/inference-engine/src/vpu/graph_transformer/src/middleend/passes/hw_extra_split.cpp +++ b/inference-engine/src/vpu/graph_transformer/src/middleend/passes/hw_extra_split.cpp @@ -434,10 +434,10 @@ void PassImpl::splitHwConv( const auto tileInfo = tileInfos[i]; const auto tile = tileInfo.tile; - const std::string postfix = getPostfix(postfixDescription, i+1, tileInfos.size()); + const std::string postfix = getPostfix(postfixDescription, i+1, static_cast(tileInfos.size())); const auto startChannel = tileInfo.slice.start; - const auto numChannels = tileInfo.slice.size; + const auto numChannels = static_cast(tileInfo.slice.size); const auto newWeights = splitWeights(model, weights, postfix, startChannel, numChannels); const auto newBiases = splitBiases(model, biases, postfix, startChannel, numChannels); @@ -610,7 +610,7 @@ std::vector PassImpl::splitHwConvInMultipleOutChannelsTiles( tileInfo.cost = descCost; bestSol.clear(); - for (int i = 0; i < numDescr; ++i) { + for (uint32_t i = 0; i < numDescr; ++i) { if (i == numDescr-1) tileInfo.lastOutChans = (remOutChans > 0) ? remOutChans : outChansPerDescr; diff --git a/inference-engine/src/vpu/graph_transformer/src/middleend/passes/merge_hw_stages.cpp b/inference-engine/src/vpu/graph_transformer/src/middleend/passes/merge_hw_stages.cpp index c02d764..88494db 100644 --- a/inference-engine/src/vpu/graph_transformer/src/middleend/passes/merge_hw_stages.cpp +++ b/inference-engine/src/vpu/graph_transformer/src/middleend/passes/merge_hw_stages.cpp @@ -157,7 +157,7 @@ void PassImpl::run(const Model& model) { stage->attrs().set("reluScale", 1.0f); } else { stage->attrs().set("a0", 1); - stage->attrs().set("a1", 1.0f / negativeSlope); + stage->attrs().set("a1", static_cast(1.0f / negativeSlope)); stage->attrs().set("reluScale", negativeSlope); } } diff --git a/inference-engine/src/vpu/graph_transformer/src/middleend/passes/merge_permute_stages.cpp b/inference-engine/src/vpu/graph_transformer/src/middleend/passes/merge_permute_stages.cpp index 4ca636b..c762cbc 100644 --- a/inference-engine/src/vpu/graph_transformer/src/middleend/passes/merge_permute_stages.cpp +++ b/inference-engine/src/vpu/graph_transformer/src/middleend/passes/merge_permute_stages.cpp @@ -58,7 +58,7 @@ private: static bool isTrivialPermute(const PermutationIndexVector& permutation, const vpu::DimValues& dims) { InferenceEngine::SizeVector dimsVector(dims.size()); for (const auto& dim : dims) { - auto index = dimToIeInd(dim.first, dims.size()); + auto index = dimToIeInd(dim.first, static_cast(dims.size())); dimsVector[dims.size() - 1 - index] = dim.second; } diff --git a/inference-engine/src/vpu/graph_transformer/src/middleend/passes/reshape_dilation_conv.cpp b/inference-engine/src/vpu/graph_transformer/src/middleend/passes/reshape_dilation_conv.cpp index 09823ed..cf3c5b2 100644 --- a/inference-engine/src/vpu/graph_transformer/src/middleend/passes/reshape_dilation_conv.cpp +++ b/inference-engine/src/vpu/graph_transformer/src/middleend/passes/reshape_dilation_conv.cpp @@ -133,7 +133,7 @@ void PassImpl::run(const Model& model) { static_cast(InputExtended_height) / static_cast(input->desc().dim(Dim::H))); - const float MAX_INPUTEXTENDED_SCALE = 1.8; + const float MAX_INPUTEXTENDED_SCALE = 1.8f; const float MIN_INPUTEXTENDED_SCALE = 1; if (InputExtended_scale >= MAX_INPUTEXTENDED_SCALE) { diff --git a/inference-engine/src/vpu/graph_transformer/src/middleend/passes/split_conv3d_into_2d.cpp b/inference-engine/src/vpu/graph_transformer/src/middleend/passes/split_conv3d_into_2d.cpp index 130178d..b9f3d7c 100644 --- a/inference-engine/src/vpu/graph_transformer/src/middleend/passes/split_conv3d_into_2d.cpp +++ b/inference-engine/src/vpu/graph_transformer/src/middleend/passes/split_conv3d_into_2d.cpp @@ -78,7 +78,7 @@ void PassImpl::run(const Model& model) { auto groups = stage->attrs().get("groups"); auto try_hw = stage->attrs().get("try_hw"); - int kernelNDims = pads_begin.size(); + int kernelNDims = static_cast(pads_begin.size()); VPU_THROW_UNLESS(kernelNDims == pads_end.size(), "wrong pads ndims=%lu, expected=%d", pads_end.size(), kernelNDims); VPU_THROW_UNLESS(kernelNDims == strides.size(), diff --git a/inference-engine/src/vpu/graph_transformer/src/middleend/passes/split_grouped_conv.cpp b/inference-engine/src/vpu/graph_transformer/src/middleend/passes/split_grouped_conv.cpp index 57d7d92..e748df9 100644 --- a/inference-engine/src/vpu/graph_transformer/src/middleend/passes/split_grouped_conv.cpp +++ b/inference-engine/src/vpu/graph_transformer/src/middleend/passes/split_grouped_conv.cpp @@ -137,7 +137,7 @@ void PassImpl::run(const Model& model) { if (stage->type() == StageType::StubDeconv) { deconvolutionRelayout( origWeights, weights->desc().totalDimSize(), - newWeightsPtr, newWeightsSize, + newWeightsPtr, static_cast(newWeightsSize), kernelSizeX, kernelSizeY, input->desc().dim(Dim::C), output->desc().dim(Dim::C), diff --git a/inference-engine/src/vpu/graph_transformer/src/middleend/passes/split_pool3d_into_2d.cpp b/inference-engine/src/vpu/graph_transformer/src/middleend/passes/split_pool3d_into_2d.cpp index 358cc5b..8a08230 100644 --- a/inference-engine/src/vpu/graph_transformer/src/middleend/passes/split_pool3d_into_2d.cpp +++ b/inference-engine/src/vpu/graph_transformer/src/middleend/passes/split_pool3d_into_2d.cpp @@ -76,7 +76,7 @@ void PassImpl::run(const Model& model) { const auto try_hw = stage->attrs().get("try_hw"); - int kernelNDims = kernel_shape.size(); + auto kernelNDims = kernel_shape.size(); if (kernelNDims != 3) { continue; } diff --git a/inference-engine/src/vpu/graph_transformer/src/middleend/passes/weights_analysis.cpp b/inference-engine/src/vpu/graph_transformer/src/middleend/passes/weights_analysis.cpp index 4c2cf2c..f5fa3d7 100644 --- a/inference-engine/src/vpu/graph_transformer/src/middleend/passes/weights_analysis.cpp +++ b/inference-engine/src/vpu/graph_transformer/src/middleend/passes/weights_analysis.cpp @@ -67,7 +67,7 @@ int getMeanValue(const std::vector& exponents) { if (realSize == 0) { return smallestExp; } else { - return sum / realSize; + return static_cast(sum / realSize); } } diff --git a/inference-engine/src/vpu/graph_transformer/src/model/data_contents/batch_norm_contents.cpp b/inference-engine/src/vpu/graph_transformer/src/model/data_contents/batch_norm_contents.cpp index eb5eebb..a927bdf 100644 --- a/inference-engine/src/vpu/graph_transformer/src/model/data_contents/batch_norm_contents.cpp +++ b/inference-engine/src/vpu/graph_transformer/src/model/data_contents/batch_norm_contents.cpp @@ -31,7 +31,7 @@ void BatchNormalizationWeightsContent::fillTempBuf(void* tempBuf) const { auto srcPtr = _origContent->get(); auto dstPtr = static_cast(tempBuf); - ie::parallel_for(_origContent->byteSize() / sizeof(fp16_t), [this, srcPtr, dstPtr](int i) { + ie::parallel_for(_origContent->byteSize() / sizeof(fp16_t), [this, srcPtr, dstPtr](size_t i) { float val = ie::PrecisionUtils::f16tof32(srcPtr[i]) + _epsilon; val = 1.0f / std::sqrt(val); dstPtr[i] = ie::PrecisionUtils::f32tof16(val); @@ -58,7 +58,7 @@ void BatchNormalizationBiasesContent::fillTempBuf(void* tempBuf) const { auto dstPtr = static_cast(tempBuf); - ie::parallel_for(_origContent->byteSize() / sizeof(fp16_t), [origPtr, weightsPtr, dstPtr](int i) { + ie::parallel_for(_origContent->byteSize() / sizeof(fp16_t), [origPtr, weightsPtr, dstPtr](size_t i) { // TODO : need to be extracted from IE layer. float beta = 0.0f; diff --git a/inference-engine/src/vpu/graph_transformer/src/model/data_contents/deconvolution_contents.cpp b/inference-engine/src/vpu/graph_transformer/src/model/data_contents/deconvolution_contents.cpp index 5e04f32..0eb443a 100644 --- a/inference-engine/src/vpu/graph_transformer/src/model/data_contents/deconvolution_contents.cpp +++ b/inference-engine/src/vpu/graph_transformer/src/model/data_contents/deconvolution_contents.cpp @@ -64,8 +64,8 @@ DepthDeconvolutionCHWWeightsContent::DepthDeconvolutionCHWWeightsContent( void DepthDeconvolutionCHWWeightsContent::fillTempBuf(void* tempBuf) const { VPU_PROFILE(DepthDeconvolutionCHWWeightsContent); depthDeconvolutionRelayoutCHW( - _origContent->get(), _origContent->byteSize() / sizeof(fp16_t), - static_cast(tempBuf), _origContent->byteSize() / sizeof(fp16_t), + _origContent->get(), static_cast(_origContent->byteSize() / sizeof(fp16_t)), + static_cast(tempBuf), static_cast(_origContent->byteSize() / sizeof(fp16_t)), _KX, _KY, _channels); } @@ -105,8 +105,8 @@ DepthDeconvolutionHWCWeightsContent::DepthDeconvolutionHWCWeightsContent( void DepthDeconvolutionHWCWeightsContent::fillTempBuf(void* tempBuf) const { VPU_PROFILE(DepthDeconvolutionHWCWeightsContent); depthDeconvolutionRelayoutHWC( - _origContent->get(), _origContent->byteSize() / sizeof(fp16_t), - static_cast(tempBuf), _origContent->byteSize() / sizeof(fp16_t), + _origContent->get(), static_cast(_origContent->byteSize() / sizeof(fp16_t)), + static_cast(tempBuf), static_cast(_origContent->byteSize() / sizeof(fp16_t)), _KX, _KY, _channels); } diff --git a/inference-engine/src/vpu/graph_transformer/src/model/data_contents/hw_const_data_content.cpp b/inference-engine/src/vpu/graph_transformer/src/model/data_contents/hw_const_data_content.cpp index b3c393a..7cb5f4e 100644 --- a/inference-engine/src/vpu/graph_transformer/src/model/data_contents/hw_const_data_content.cpp +++ b/inference-engine/src/vpu/graph_transformer/src/model/data_contents/hw_const_data_content.cpp @@ -65,7 +65,7 @@ void HwConstData::fillTempBuf(void* outBuf) const { const auto inChannelStride = K * kernelStride; const auto outerStride = IC * inChannelStride; - ie::parallel_for(numOC, [=](int oc) { + ie::parallel_for(numOC, [=](size_t oc) { const auto ocSlice = oc; oc += startOC; diff --git a/inference-engine/src/vpu/graph_transformer/src/model/data_contents/priorbox_contents.cpp b/inference-engine/src/vpu/graph_transformer/src/model/data_contents/priorbox_contents.cpp index d2dadac..25397b8 100644 --- a/inference-engine/src/vpu/graph_transformer/src/model/data_contents/priorbox_contents.cpp +++ b/inference-engine/src/vpu/graph_transformer/src/model/data_contents/priorbox_contents.cpp @@ -91,14 +91,14 @@ void PriorBoxContent::fillTempBuf(void* tempBuf) const { if (!_densitys.empty()) { for (const auto& _density : _densitys) { if (!_fixed_ratios.empty()) { - _num_priors += _fixed_ratios.size() * (static_cast(pow(_density, 2)) - 1); + _num_priors += static_cast(_fixed_ratios.size()) * (static_cast(pow(_density, 2)) - 1); } else { - _num_priors += _aspect_ratios.size() * (static_cast(pow(_density, 2)) - 1); + _num_priors += static_cast(_aspect_ratios.size()) * (static_cast(pow(_density, 2)) - 1); } } } - _num_priors += _max_sizes.size(); + _num_priors += static_cast(_max_sizes.size()); auto W = _inDesc0.dim(Dim::W); auto H = _inDesc0.dim(Dim::H); @@ -317,7 +317,7 @@ void PriorBoxClusteredContent::fillTempBuf(void* tempBuf) const { auto num_priors_ = widths_.size(); if (variance_.empty()) { - variance_.push_back(0.1); + variance_.push_back(0.1f); } auto layer_width = _inDesc0.dim(Dim::W); diff --git a/inference-engine/src/vpu/graph_transformer/src/model/data_contents/scaled_content.cpp b/inference-engine/src/vpu/graph_transformer/src/model/data_contents/scaled_content.cpp index c96635a..9351ccb 100644 --- a/inference-engine/src/vpu/graph_transformer/src/model/data_contents/scaled_content.cpp +++ b/inference-engine/src/vpu/graph_transformer/src/model/data_contents/scaled_content.cpp @@ -29,7 +29,7 @@ void ScaledContent::fillTempBuf(void *tempBuf) const { auto dstPtr = static_cast(tempBuf); - ie::parallel_for(totalSize, [this, srcPtr, dstPtr](int i) { + ie::parallel_for(totalSize, [this, srcPtr, dstPtr](size_t i) { dstPtr[i] = ie::PrecisionUtils::f32tof16(ie::PrecisionUtils::f16tof32(srcPtr[i]) * _factor); }); } diff --git a/inference-engine/src/vpu/graph_transformer/src/model/data_desc.cpp b/inference-engine/src/vpu/graph_transformer/src/model/data_desc.cpp index d10b17f..0d297b6 100644 --- a/inference-engine/src/vpu/graph_transformer/src/model/data_desc.cpp +++ b/inference-engine/src/vpu/graph_transformer/src/model/data_desc.cpp @@ -103,7 +103,7 @@ DimsOrder DimsOrder::fromCode(StorageOrder64 code) { return out; } -DimsOrder DimsOrder::fromNumDims(int numDims) { +DimsOrder DimsOrder::fromNumDims(size_t numDims) { static const StorageOrder64 FULL_ORDER_DEFAULT = maskOrder(static_cast(0x0fedcba987654321ull), MAX_DIMS_64); @@ -118,7 +118,7 @@ DimsOrder DimsOrder::fromNumDims(int numDims) { } else if (numDims == 5) { return DimsOrder::NCDHW; } else { - return DimsOrder::fromCode(maskOrder(FULL_ORDER_DEFAULT, numDims)); + return DimsOrder::fromCode(maskOrder(FULL_ORDER_DEFAULT, static_cast(numDims))); } } @@ -300,7 +300,7 @@ void printTo(std::ostream& os, DimsOrder order) { } for (; i >= 0; i--) { - auto curDim = (code >> (i * 4)) & 0xF; + auto curDim = static_cast((code >> (i * 4)) & 0xF); auto it = DIM_NAMES.find(curDim); if (it != DIM_NAMES.end()) { @@ -343,7 +343,7 @@ DataDesc::DataDesc(const ie::TensorDesc& ieDesc) { // IE dims are always in ChannelMajor Layout, so we need to use fromNumDims() layout to perform permutation. const auto perm = DimsOrder::fromNumDims(ieDims.size()).toPermutation(); for (size_t i = 0; i < perm.size(); ++i) { - _dims.set(perm[i], ieDims[ieDims.size() - 1 - i]); + _dims.set(perm[i], static_cast(ieDims[ieDims.size() - 1 - i])); } } @@ -510,7 +510,7 @@ StridesRequirement StridesRequirement::fixed(const std::vector& strides, co }; for (const auto& dim : dimOrderVec) { - const auto idx = dimToIeInd(dim, dims.size()); + const auto idx = dimToIeInd(dim, static_cast(dims.size())); setStride(dim, strides[idx]); } @@ -576,7 +576,7 @@ DimValues calcStrides(const DataDesc& desc, const StridesRequirement& reqs) { for (std::size_t i = 1; i < perm.size(); i++) { strides.set(perm[i], strides[perm[i - 1]] * desc.dim(perm[i - 1])); - strides.set(perm[i], applyStrideRequirement(strides[perm[i]], i, reqs)); + strides.set(perm[i], applyStrideRequirement(strides[perm[i]], static_cast(i), reqs)); } } diff --git a/inference-engine/src/vpu/graph_transformer/src/stages/concat.cpp b/inference-engine/src/vpu/graph_transformer/src/stages/concat.cpp index 3ecc59a..acd6251 100644 --- a/inference-engine/src/vpu/graph_transformer/src/stages/concat.cpp +++ b/inference-engine/src/vpu/graph_transformer/src/stages/concat.cpp @@ -282,7 +282,7 @@ void FrontEnd::parseConcat( "{} layer with name {} must be able to convert to ie::ConcatLayer", layer->type, layer->name); - VPU_THROW_UNLESS(concat->_axis < output->desc().numDims(), + VPU_THROW_UNLESS(static_cast(concat->_axis) < output->desc().numDims(), "{} layer with name {} must have axis attribute no grater than number of " "dimensions, actually provided axis = {}, numDims = {}", layer->type, layer->name, concat->_axis, output->desc().numDims()); diff --git a/inference-engine/src/vpu/graph_transformer/src/stages/convolution.cpp b/inference-engine/src/vpu/graph_transformer/src/stages/convolution.cpp index da37a56..f4802ba 100644 --- a/inference-engine/src/vpu/graph_transformer/src/stages/convolution.cpp +++ b/inference-engine/src/vpu/graph_transformer/src/stages/convolution.cpp @@ -316,7 +316,7 @@ private: } static void append_pv(BlobSerializer& serializer, const PV& pv) { - int ndims = pv.size(); + int ndims = static_cast(pv.size()); append_i(serializer, ndims); for (int i = 0; i < ndims; i++) { append_i(serializer, pv[i]); @@ -345,7 +345,7 @@ void parseConvND(const Model & model, VPU_THROW_UNLESS(convLayer != nullptr, "failed dynamic cast to ConvolutionLayer"); auto kernelShape = convLayer->_kernel; - int kernelNDims = kernelShape.size(); + int kernelNDims = static_cast(kernelShape.size()); // Yet, only 3D kernel supported (NCDHW) // Later, if support 4D, 5D, etc, please // check if (kernelNDims >= 3), so that diff --git a/inference-engine/src/vpu/graph_transformer/src/stages/custom.cpp b/inference-engine/src/vpu/graph_transformer/src/stages/custom.cpp index cf71b2c..7aa3935 100644 --- a/inference-engine/src/vpu/graph_transformer/src/stages/custom.cpp +++ b/inference-engine/src/vpu/graph_transformer/src/stages/custom.cpp @@ -196,7 +196,7 @@ private: IE_ASSERT(origData != nullptr); auto dims = origData->getDims(); - int ndims = dims.size(); + auto ndims = dims.size(); if (ndims > 4) { VPU_THROW_UNLESS(dim.length() == 1, @@ -477,7 +477,7 @@ void FrontEnd::parseCustom(const Model& model, const ie::CNNLayerPtr& layer, con stage->attrs().set("inputOrders", std::move(inputOrders)); stage->attrs().set("outputOrders", std::move(outputOrders)); - int buffer_size = kernel.kernelBinary().length() + 1024; + auto buffer_size = kernel.kernelBinary().length() + 1024; model->addTempBuffer(stage, buffer_size); } } diff --git a/inference-engine/src/vpu/graph_transformer/src/stages/eltwise.cpp b/inference-engine/src/vpu/graph_transformer/src/stages/eltwise.cpp index 3010cee..b4fd261 100644 --- a/inference-engine/src/vpu/graph_transformer/src/stages/eltwise.cpp +++ b/inference-engine/src/vpu/graph_transformer/src/stages/eltwise.cpp @@ -286,14 +286,14 @@ void FrontEnd::parseEltwise(const Model& model, const ie::CNNLayerPtr& _layer, c if (type == DataType::FP16) { stage->attrs().set("coeff1", layer->coeff[0]); } else { - stage->attrs().set("coeff1", layer->coeff[0]); + stage->attrs().set("coeff1", static_cast(layer->coeff[0])); } } if (layer->coeff.size() > 1 || subCoefficient != 1) { if (type == DataType::FP16) { stage->attrs().set("coeff2", subCoefficient * (layer->coeff.size() > 1 ? layer->coeff[1] : 1.0f)); } else { - stage->attrs().set("coeff2", subCoefficient * (layer->coeff.size() > 1 ? layer->coeff[1] : 1)); + stage->attrs().set("coeff2", subCoefficient * (layer->coeff.size() > 1 ? static_cast(layer->coeff[1]) : 1)); } } } diff --git a/inference-engine/src/vpu/graph_transformer/src/stages/exp_detectionoutput.cpp b/inference-engine/src/vpu/graph_transformer/src/stages/exp_detectionoutput.cpp index 1fae7e9..34a57cc 100644 --- a/inference-engine/src/vpu/graph_transformer/src/stages/exp_detectionoutput.cpp +++ b/inference-engine/src/vpu/graph_transformer/src/stages/exp_detectionoutput.cpp @@ -94,9 +94,9 @@ void FrontEnd::parseExpDetectionOutput(const Model& model, const ie::CNNLayerPtr params.max_delta_log_wh = layer->GetParamAsFloat("max_delta_log_wh", 0.0f); params.nms_threshold = layer->GetParamAsFloat("nms_threshold", 0.0f); params.score_threshold = layer->GetParamAsFloat("score_threshold", 0.0f); - params.max_detections_per_image = layer->GetParamAsFloat("max_detections_per_image", 0); - params.num_classes = layer->GetParamAsFloat("num_classes", 0); - params.post_nms_count = layer->GetParamAsFloat("post_nms_count", 0); + params.max_detections_per_image = layer->GetParamAsInt("max_detections_per_image", 0); + params.num_classes = layer->GetParamAsInt("num_classes", 0); + params.post_nms_count = layer->GetParamAsInt("post_nms_count", 0); params.class_agnostic_box_regression = layer->GetParamAsFloat("class_agnostic_box_regression", 0) ? 1 : 0; auto inputBoxes = inputs[0]; // [numRois][4] diff --git a/inference-engine/src/vpu/graph_transformer/src/stages/fc.cpp b/inference-engine/src/vpu/graph_transformer/src/stages/fc.cpp index c324f7c..5c56017 100644 --- a/inference-engine/src/vpu/graph_transformer/src/stages/fc.cpp +++ b/inference-engine/src/vpu/graph_transformer/src/stages/fc.cpp @@ -59,7 +59,7 @@ void FrontEnd::parseFullyConnected(const Model& model, const ie::CNNLayerPtr& _l std::tie(weights, biases) = getWeightsAndBiases(model, layer); IE_ASSERT(weights->desc().totalDimSize() >= - input->desc().totalDimSize() / input->desc().dim(Dim::N, 1) * layer->_out_num); + input->desc().totalDimSize() / input->desc().dim(Dim::N, 1) * static_cast(layer->_out_num)); weights = model->duplicateData( weights, "@fc", diff --git a/inference-engine/src/vpu/graph_transformer/src/stages/loop_end.cpp b/inference-engine/src/vpu/graph_transformer/src/stages/loop_end.cpp index 4bfb725..f78b216 100644 --- a/inference-engine/src/vpu/graph_transformer/src/stages/loop_end.cpp +++ b/inference-engine/src/vpu/graph_transformer/src/stages/loop_end.cpp @@ -28,8 +28,8 @@ protected: void propagateDataOrderImpl(StageDataInfo& orderInfo) override { const auto& endCopies = attrs().getOrDefault("end-iteration-components", {}); for (const auto& iteration : endCopies) { - const auto& dstIdx = iteration.first.first; - const auto& srcIdx = iteration.second; + const auto& dstIdx = static_cast(iteration.first.first); + const auto& srcIdx = static_cast(iteration.second); orderInfo.setOutput(outputEdge(dstIdx), inputEdge(srcIdx)->input()->desc().dimsOrder()); } @@ -71,7 +71,7 @@ protected: for (const auto& component : endCopies) { const auto& rule = component.first.second; auto axis = rule.axis; - auto axisInd = static_cast(output(component.first.first)->desc().dimsOrder().dimInd(axis)); + auto axisInd = static_cast(output(static_cast(component.first.first))->desc().dimsOrder().dimInd(axis)); serializer.append(axisInd); serializer.append(rule.start); @@ -89,8 +89,8 @@ protected: } for (const auto& iteration : endCopies) { - output(iteration.first.first)->serializeBuffer(serializer); - input(iteration.second)->serializeBuffer(serializer); + output(static_cast(iteration.first.first))->serializeBuffer(serializer); + input(static_cast(iteration.second))->serializeBuffer(serializer); } } }; diff --git a/inference-engine/src/vpu/graph_transformer/src/stages/loop_start.cpp b/inference-engine/src/vpu/graph_transformer/src/stages/loop_start.cpp index ae79ec6..baaee3f 100644 --- a/inference-engine/src/vpu/graph_transformer/src/stages/loop_start.cpp +++ b/inference-engine/src/vpu/graph_transformer/src/stages/loop_start.cpp @@ -60,7 +60,7 @@ protected: for (const auto& component : startCopies) { const auto& rule = component.first.second; auto axis = rule.axis; - auto axisInd = static_cast(input(component.first.first)->desc().dimsOrder().dimInd(axis)); + auto axisInd = static_cast(input(static_cast(component.first.first))->desc().dimsOrder().dimInd(axis)); serializer.append(axisInd); serializer.append(rule.start); @@ -78,8 +78,8 @@ protected: } for (const auto& iteration : startCopies) { - input(iteration.first.first)->serializeBuffer(serializer); - output(iteration.second)->serializeBuffer(serializer); + input(static_cast(iteration.first.first))->serializeBuffer(serializer); + output(static_cast(iteration.second))->serializeBuffer(serializer); } } }; diff --git a/inference-engine/src/vpu/graph_transformer/src/stages/mtcnn.cpp b/inference-engine/src/vpu/graph_transformer/src/stages/mtcnn.cpp index 405fa29..6b4306c 100644 --- a/inference-engine/src/vpu/graph_transformer/src/stages/mtcnn.cpp +++ b/inference-engine/src/vpu/graph_transformer/src/stages/mtcnn.cpp @@ -141,7 +141,7 @@ ie::CNNNetwork loadSubNetwork( ie::SizeVector inputShape; std::tie(inputName, inputShape) = *inputShapes.begin(); if (zdir_batchsize != nullptr) - *zdir_batchsize = inputShape[1]/3; + *zdir_batchsize = static_cast(inputShape[1]/3); inputShape[0] = 1; // set batch size to the first input dimension inputShape[2] = imgSize.second; // changes input height to the image one inputShape[3] = imgSize.first; // changes input width to the image one diff --git a/inference-engine/src/vpu/graph_transformer/src/stages/mx_stage.cpp b/inference-engine/src/vpu/graph_transformer/src/stages/mx_stage.cpp index 536093f..8c79f1a 100644 --- a/inference-engine/src/vpu/graph_transformer/src/stages/mx_stage.cpp +++ b/inference-engine/src/vpu/graph_transformer/src/stages/mx_stage.cpp @@ -128,7 +128,7 @@ void MyriadXHwStage::serializeParamsImpl(BlobSerializer& serializer) const { serializer.append(checked_cast(hwOpParams.opMode)); - serializer.append(checked_cast(hwOpParams.withPad)); + serializer.append(static_cast(hwOpParams.withPad)); if (hwOpParams.withPad) { serializer.append(checked_cast(hwOpParams.padMode)); } @@ -147,7 +147,7 @@ void MyriadXHwStage::serializeParamsImpl(BlobSerializer& serializer) const { serializer.append(checked_cast(hwOpParams.fcInputNum)); serializer.append(checked_cast(hwOpParams.fcOutputOffset)); serializer.append(checked_cast(hwOpParams.fcOutputNum)); - serializer.append(checked_cast(hwOpParams.fcAccum)); + serializer.append(static_cast(hwOpParams.fcAccum)); } if (hwOpParams.opType != HwOpType::FC) { @@ -161,20 +161,20 @@ void MyriadXHwStage::serializeParamsImpl(BlobSerializer& serializer) const { serializer.append(checked_cast(hwOpParams.poolKernelHeight)); } - serializer.append(checked_cast(hwOpParams.withReLU)); + serializer.append(static_cast(hwOpParams.withReLU)); if (hwOpParams.withReLU) { serializer.append(checked_cast(hwOpParams.t0)); serializer.append(checked_cast(hwOpParams.a0)); serializer.append(checked_cast(hwOpParams.a1)); } - serializer.append(checked_cast(hwOpParams.withClamp)); + serializer.append(static_cast(hwOpParams.withClamp)); if (hwOpParams.withClamp) { serializer.append(checked_cast(hwOpParams.clampMaxVal)); } - serializer.append(checked_cast(hwOpParams.reuseData)); - serializer.append(checked_cast(hwOpParams.reuseCoeff)); + serializer.append(static_cast(hwOpParams.reuseData)); + serializer.append(static_cast(hwOpParams.reuseCoeff)); } serializer.append(checked_cast(injectedStage() == nullptr ? 0 : 1)); diff --git a/inference-engine/src/vpu/graph_transformer/src/stages/norm.cpp b/inference-engine/src/vpu/graph_transformer/src/stages/norm.cpp index 3d65168..7f9cfb7 100644 --- a/inference-engine/src/vpu/graph_transformer/src/stages/norm.cpp +++ b/inference-engine/src/vpu/graph_transformer/src/stages/norm.cpp @@ -64,7 +64,7 @@ private: auto beta = attrs().get("beta"); serializer.append(static_cast(size)); - serializer.append(ie::PrecisionUtils::f32tof16(k)); + serializer.append(ie::PrecisionUtils::f32tof16(static_cast(k))); // why float? serializer.append(ie::PrecisionUtils::f32tof16(alpha)); serializer.append(ie::PrecisionUtils::f32tof16(beta)); serializer.append(ie::PrecisionUtils::f32tof16(0)); // for alignment diff --git a/inference-engine/src/vpu/graph_transformer/src/stages/pooling.cpp b/inference-engine/src/vpu/graph_transformer/src/stages/pooling.cpp index 5090530..a631e94 100644 --- a/inference-engine/src/vpu/graph_transformer/src/stages/pooling.cpp +++ b/inference-engine/src/vpu/graph_transformer/src/stages/pooling.cpp @@ -360,7 +360,7 @@ private: } static void append_pv(BlobSerializer& serializer, const PV& pv) { - int ndims = pv.size(); + int ndims = static_cast(pv.size()); append_i(serializer, ndims); for (int i = 0; i < ndims; i++) { append_i(serializer, pv[i]); @@ -387,7 +387,7 @@ void parsePoolND(const Model & model, VPU_THROW_UNLESS(poolLayer != nullptr, "failed dynamic cast to PoolingLayer"); auto kernel_shape = poolLayer->_kernel; - int kernel_ndims = kernel_shape.size(); + int kernel_ndims = static_cast(kernel_shape.size()); // Yet, only 3D kernel supported (NCDHW) // Later, if support 4D, 5D, etc, please // check if (kernelNDims >= 3), so that diff --git a/inference-engine/src/vpu/graph_transformer/src/stages/proposal.cpp b/inference-engine/src/vpu/graph_transformer/src/stages/proposal.cpp index 5237c7b..0b74764 100644 --- a/inference-engine/src/vpu/graph_transformer/src/stages/proposal.cpp +++ b/inference-engine/src/vpu/graph_transformer/src/stages/proposal.cpp @@ -178,7 +178,7 @@ void FrontEnd::parseProposal(const Model& model, const ie::CNNLayerPtr& layer, c stage->attrs().set("scales", scales); stage->attrs().set("ratios", ratios); - int number_of_anchors = ratios.size() * scales.size(); + int number_of_anchors = static_cast(ratios.size() * scales.size()); // Allocate slightly larger buffer than needed for handling remnant in distribution among SHAVEs int buffer_size = (inputs[0]->desc().dim(Dim::H) + 16) * inputs[0]->desc().dim(Dim::W) * number_of_anchors * 5 * sizeof(float); @@ -189,8 +189,8 @@ void FrontEnd::parseProposal(const Model& model, const ie::CNNLayerPtr& layer, c }; const int num_proposals = number_of_anchors * inputs[0]->desc().dim(Dim::H) * inputs[0]->desc().dim(Dim::W); const int pre_nms_topn = std::min(num_proposals, stage->attrs().get("pre_nms_topn")); - const int required_cmx_size_per_shave = std::max(2 * (1 + pre_nms_topn) * sizeof(SortItem), - (1 + pre_nms_topn) * sizeof(SortItem) + number_of_anchors * sizeof(float)); + const int required_cmx_size_per_shave = static_cast(std::max(2 * (1 + pre_nms_topn) * sizeof(SortItem), + (1 + pre_nms_topn) * sizeof(SortItem) + number_of_anchors * sizeof(float))); const auto& env = CompileEnv::get(); const int required_cmx_buffer_size = env.resources.numSHAVEs * required_cmx_size_per_shave; diff --git a/inference-engine/src/vpu/graph_transformer/src/stages/reduce.cpp b/inference-engine/src/vpu/graph_transformer/src/stages/reduce.cpp index b019617..cd58b33 100644 --- a/inference-engine/src/vpu/graph_transformer/src/stages/reduce.cpp +++ b/inference-engine/src/vpu/graph_transformer/src/stages/reduce.cpp @@ -69,7 +69,7 @@ private: auto irIndex = oldIndices[i]; if (irIndex < 0) { // handle negative indices - irIndex = ndims - std::abs(irIndex); + irIndex = static_cast(ndims - std::abs(irIndex)); } VPU_THROW_UNLESS(irIndex < ndims, "Stage {} of type {} expects input with index {} ({}) include values less than ", diff --git a/inference-engine/src/vpu/graph_transformer/src/stages/resample.cpp b/inference-engine/src/vpu/graph_transformer/src/stages/resample.cpp index af62437..9f94731 100644 --- a/inference-engine/src/vpu/graph_transformer/src/stages/resample.cpp +++ b/inference-engine/src/vpu/graph_transformer/src/stages/resample.cpp @@ -80,7 +80,7 @@ void FrontEnd::parseResample(const Model& model, const ie::CNNLayerPtr& layer, c auto stage = model->addNewStage(layer->name, StageType::Resample, layer, inputs, outputs); stage->attrs().set("antialias", layer->GetParamAsInt("antialias", 0)); - stage->attrs().set("factor", layer->GetParamAsInt("factor", -1.0f)); + stage->attrs().set("factor", layer->GetParamAsFloat("factor", -1.0f)); auto method = layer->GetParamAsString("type", "caffe.ResampleParameter.NEAREST"); if (cmp(method, "caffe.ResampleParameter.NEAREST")) { diff --git a/inference-engine/src/vpu/graph_transformer/src/stages/rnn.cpp b/inference-engine/src/vpu/graph_transformer/src/stages/rnn.cpp index c5a21e8..9f45840 100644 --- a/inference-engine/src/vpu/graph_transformer/src/stages/rnn.cpp +++ b/inference-engine/src/vpu/graph_transformer/src/stages/rnn.cpp @@ -88,7 +88,7 @@ private: inputEdges().size()); // check number of outputs, without temp buffer - const int outputsNumber = outputEdges().size(); + const int outputsNumber = static_cast(outputEdges().size()); const int useCellState = outputsNumber >= 2; const int outputEdgesExpected = 1 + (useCellState ? 1 : 0) @@ -193,8 +193,8 @@ void FrontEnd::parseRNN(const Model& model, const ie::CNNLayerPtr& _layer, const newWeightsPtr + ngates * stateSize * inputSize, ngates, - stateSize, - inputSize); + static_cast(stateSize), + static_cast(inputSize)); }; auto newWeights = model->addConstData(_layer->name + "@weights", weights->desc(), generator); @@ -215,8 +215,8 @@ void FrontEnd::parseRNN(const Model& model, const ie::CNNLayerPtr& _layer, const bool RNNForward = layer->direction == ie::RNNSequenceLayer::FWD; stage->attrs().set("RNNForward", RNNForward); - stage->attrs().set("nCells", nCells); - stage->attrs().set("nBatches", nBatches); + stage->attrs().set("nCells", static_cast(nCells)); + stage->attrs().set("nBatches", static_cast(nBatches)); } void FrontEnd::parseLSTMCell(const Model& model, const ie::CNNLayerPtr& _layer, const DataVector &inputs, const DataVector &outputs) { @@ -262,8 +262,8 @@ void FrontEnd::parseLSTMCell(const Model& model, const ie::CNNLayerPtr& _layer, newWeightsPtr, newWeightsPtr + ngates * stateSize * inputSize, ngates, - stateSize, - inputSize); + static_cast(stateSize), + static_cast(inputSize)); }; auto newWeights = model->addConstData(_layer->name + "@weights", weights->desc(), generator); @@ -306,7 +306,7 @@ void FrontEnd::parseLSTMCell(const Model& model, const ie::CNNLayerPtr& _layer, auto stage = model->addNewStage(layer->name, StageType::LSTMCell, layer, stageInputs, realOutputs); stage->attrs().set("RNNForward", true); stage->attrs().set("nCells", 1); - stage->attrs().set("nBatches", nBatches); + stage->attrs().set("nBatches", static_cast(nBatches)); } } // namespace vpu diff --git a/inference-engine/src/vpu/myriad_plugin/myriad_executor.cpp b/inference-engine/src/vpu/myriad_plugin/myriad_executor.cpp index 9e408dc..de52653 100644 --- a/inference-engine/src/vpu/myriad_plugin/myriad_executor.cpp +++ b/inference-engine/src/vpu/myriad_plugin/myriad_executor.cpp @@ -132,14 +132,14 @@ ncStatus_t MyriadExecutor::bootNextDevice(std::vector &devicePool, configDevName.copy(in_deviceDesc.name, NC_MAX_NAME_SIZE - 1); } - statusOpen = ncSetDeviceConnectTimeout(config.deviceConnectTimeout().count()); + statusOpen = ncSetDeviceConnectTimeout(static_cast(config.deviceConnectTimeout().count())); if (statusOpen) { return statusOpen; } ncDeviceOpenParams_t deviceOpenParams = {}; deviceOpenParams.watchdogHndl = _mvnc->watchdogHndl(); - deviceOpenParams.watchdogInterval = config.watchdogInterval().count(); + deviceOpenParams.watchdogInterval = static_cast(config.watchdogInterval().count()); deviceOpenParams.memoryType = checked_cast(config.memoryType()); deviceOpenParams.customFirmwareDirectory = dirName.c_str(); @@ -308,7 +308,7 @@ void MyriadExecutor::allocateGraph(DevicePtr &device, GraphDesc &graphDesc, const std::pair &graphHeaderDesc, size_t numStages, const std::string & networkName, int executors) { VPU_PROFILE(allocateGraph); - _numStages = numStages; + _numStages = static_cast(numStages); graphDesc._name = networkName; if (device->_deviceHandle == nullptr) { THROW_IE_EXCEPTION << "Failed to allocate graph: MYRIAD device is not opened."; @@ -331,7 +331,7 @@ void MyriadExecutor::allocateGraph(DevicePtr &device, GraphDesc &graphDesc, graphFileContent.data(), static_cast(graphFileContent.size()), graphHeaderDesc.first, - graphHeaderDesc.second); + static_cast(graphHeaderDesc.second)); if (status != NC_OK) { THROW_IE_EXCEPTION << "Failed to allocate graph: " << ncStatusToStr(nullptr, status); } @@ -418,7 +418,7 @@ void MyriadExecutor::queueInference(GraphDesc &graphDesc, void *input_data, size } if (result_data != nullptr && result_bytes != 0) { - getResult(graphDesc, result_data, result_bytes); + getResult(graphDesc, result_data, static_cast(result_bytes)); } } diff --git a/inference-engine/src/vpu/myriad_plugin/myriad_infer_request.cpp b/inference-engine/src/vpu/myriad_plugin/myriad_infer_request.cpp index 9414277..061a910 100644 --- a/inference-engine/src/vpu/myriad_plugin/myriad_infer_request.cpp +++ b/inference-engine/src/vpu/myriad_plugin/myriad_infer_request.cpp @@ -208,12 +208,12 @@ void MyriadInferRequest::GetResult() { const auto& blob = (*it).second; if (blob->getTensorDesc().getLayout() == getVpuLayout(name)) { - _executor->getResult(_graphDesc, blob->buffer(), blob->byteSize()); + _executor->getResult(_graphDesc, blob->buffer(), static_cast(blob->byteSize())); return; } } - _executor->getResult(_graphDesc, resultBuffer.data(), resultBuffer.size()); + _executor->getResult(_graphDesc, resultBuffer.data(), static_cast(resultBuffer.size())); for (const auto& output : _outputs) { const auto& ieBlobName = output.first; @@ -291,6 +291,6 @@ void MyriadInferRequest::GetPerformanceCounts(std::map(perfInfo.size()), _config.perfReport(), _config.printReceiveTensorTime()); } diff --git a/inference-engine/thirdparty/movidius/XLink/pc/PlatformData.c b/inference-engine/thirdparty/movidius/XLink/pc/PlatformData.c index 54d84d1..5c16d56 100644 --- a/inference-engine/thirdparty/movidius/XLink/pc/PlatformData.c +++ b/inference-engine/thirdparty/movidius/XLink/pc/PlatformData.c @@ -347,7 +347,7 @@ int usb_read(libusb_device_handle *f, void *data, size_t size) const int chunk_size = DEFAULT_CHUNKSZ; while(size > 0) { - int bt, ss = size; + int bt, ss = (int)size; if(ss > chunk_size) ss = chunk_size; #if (defined(_WIN32) || defined(_WIN64)) @@ -368,7 +368,7 @@ int usb_write(libusb_device_handle *f, const void *data, size_t size) const int chunk_size = DEFAULT_CHUNKSZ; while(size > 0) { - int bt, ss = size; + int bt, ss = (int)size; if(ss > chunk_size) ss = chunk_size; #if (defined(_WIN32) || defined(_WIN64) ) diff --git a/inference-engine/thirdparty/movidius/XLink/pc/PlatformDeviceControl.c b/inference-engine/thirdparty/movidius/XLink/pc/PlatformDeviceControl.c index 9d282bb..6481839 100644 --- a/inference-engine/thirdparty/movidius/XLink/pc/PlatformDeviceControl.c +++ b/inference-engine/thirdparty/movidius/XLink/pc/PlatformDeviceControl.c @@ -158,7 +158,7 @@ int XLinkPlatformBootFirmware(deviceDesc_t* deviceDesc, const char* firmware, si printf("Path to your boot util is too long for the char array here!\n"); } // Boot it - int rc = usb_boot(deviceDesc->name, firmware, length); + int rc = usb_boot(deviceDesc->name, firmware, (unsigned)length); if(!rc) { mvLog(MVLOG_DEBUG, "Boot successful, device address %s", deviceDesc->name); @@ -228,7 +228,7 @@ libusb_device_handle *usbLinkOpen(const char *path) libusb_device *dev = NULL; double waittm = seconds() + statuswaittimeout; while(seconds() < waittm){ - int size = strlen(path); + int size = (int)strlen(path); #if (!defined(_WIN32) && !defined(_WIN64)) uint16_t bcdusb = -1; diff --git a/inference-engine/thirdparty/movidius/XLink/pc/Win/src/win_pthread.c b/inference-engine/thirdparty/movidius/XLink/pc/Win/src/win_pthread.c index 79612cb..b45a920 100644 --- a/inference-engine/thirdparty/movidius/XLink/pc/Win/src/win_pthread.c +++ b/inference-engine/thirdparty/movidius/XLink/pc/Win/src/win_pthread.c @@ -110,7 +110,7 @@ int pthread_create(pthread_t *thread, pthread_attr_t *attr, if (attr) { thread->pthread_state = attr->pthread_state; - stack_size = attr->stack_size; + stack_size = (unsigned)attr->stack_size; } thread->handle = (HANDLE)_beginthreadex((void *)NULL, stack_size, _pthread_start_routine, thread, 0, NULL); diff --git a/inference-engine/thirdparty/movidius/XLink/pc/protocols/pcie_host.c b/inference-engine/thirdparty/movidius/XLink/pc/protocols/pcie_host.c index 68eeecd..e547829 100644 --- a/inference-engine/thirdparty/movidius/XLink/pc/protocols/pcie_host.c +++ b/inference-engine/thirdparty/movidius/XLink/pc/protocols/pcie_host.c @@ -158,7 +158,7 @@ int pcie_write(HANDLE fd, void * buf, size_t bufSize) Overlapped.hEvent = Event; ResetEvent(Overlapped.hEvent); - OutputCode = WriteFile(dev, buf, bufSize, NULL, &Overlapped); + OutputCode = WriteFile(dev, buf, (DWORD)bufSize, NULL, &Overlapped); if (OutputCode == FALSE) { if (GetLastError() == ERROR_IO_PENDING) { @@ -235,7 +235,7 @@ int pcie_read(HANDLE fd, void * buf, size_t bufSize) Overlapped.hEvent = Event; ResetEvent(Overlapped.hEvent); - OutputCode = ReadFile(dev, buf, bufSize, NULL, &Overlapped); + OutputCode = ReadFile(dev, buf, (DWORD)bufSize, NULL, &Overlapped); if (OutputCode == FALSE) { if (GetLastError() == ERROR_IO_PENDING) { @@ -583,7 +583,7 @@ pcieHostError_t pcie_boot_device(HANDLE fd, const char *buffer, size_t length) bResult = DeviceIoControl(fd, // device to be queried MXLK_BOOT_DEV, // operation to perform - (void*)buffer, length, + (void*)buffer, (DWORD)length, &output_buffer, sizeof(output_buffer), // output buffer &junk, // # bytes returned (LPOVERLAPPED) NULL); // synchronous I/O diff --git a/inference-engine/thirdparty/movidius/XLink/pc/protocols/usb_boot.c b/inference-engine/thirdparty/movidius/XLink/pc/protocols/usb_boot.c index 21e47b8..3ef9ca2 100644 --- a/inference-engine/thirdparty/movidius/XLink/pc/protocols/usb_boot.c +++ b/inference-engine/thirdparty/movidius/XLink/pc/protocols/usb_boot.c @@ -544,7 +544,7 @@ static int wait_findopen(const char *device_address, int timeout, libusb_device for(;;) { highres_gettime(&t1); - int addr_size = strlen(device_address); + int addr_size = (int)strlen(device_address); #if (!defined(_WIN32) && !defined(_WIN64) ) rc = usb_find_device_with_bcd(0, (char*)device_address, addr_size, (void**)dev, DEFAULT_VID, get_pid_by_name(device_address), bcdusb); diff --git a/inference-engine/thirdparty/movidius/mvnc/src/mvnc_api.c b/inference-engine/thirdparty/movidius/mvnc/src/mvnc_api.c index b44ea52..200848f 100644 --- a/inference-engine/thirdparty/movidius/mvnc/src/mvnc_api.c +++ b/inference-engine/thirdparty/movidius/mvnc/src/mvnc_api.c @@ -91,7 +91,9 @@ static int global_lock_fd = -1; // To suppress warning in the macro below +#if defined __GNUC__ || defined __clang__ #pragma GCC diagnostic ignored "-Wformat-extra-args" +#endif /** * @brief The macro checks a stream id passed to it @@ -286,7 +288,7 @@ static void resetAll() // Try to reboot them int i; - for (i = 0; i < stalled_count; ++i) { + for (i = 0; i < (int)stalled_count; ++i) { mvLog(MVLOG_DEBUG, "Found stalled device %s", stalledDevices[i].name); XLinkHandler_t* handler = calloc(1, sizeof(XLinkHandler_t)); @@ -388,7 +390,7 @@ static char getPathSeparator() { */ static void addEndPathSeparator(char* buffer, const int buffer_length) { - const int filePathLen = strnlen(buffer, buffer_length); + const int filePathLen = (int)strnlen(buffer, buffer_length); if ((filePathLen > 1) && (filePathLen < buffer_length - 1) && buffer[filePathLen - 1] != getPathSeparator()) { buffer[filePathLen] = getPathSeparator(); @@ -969,9 +971,9 @@ ncStatus_t ncDeviceOpen(struct ncDeviceHandle_t **deviceHandlePtr, device_disappear = 1; } int i, j; - for (i = 0; i < numberOfDevicesAfterBoot; ++i) { + for (i = 0; i < (int)numberOfDevicesAfterBoot; ++i) { int found_in_before_boot_list = 0; - for (j = 0; j < numberOfDevicesBeforeBoot; ++j) { + for (j = 0; j < (int)numberOfDevicesBeforeBoot; ++j) { if(strcmp(afterBootDevices[i].name, beforeBootDevices[j].name) == 0) { found_in_before_boot_list = 1; } @@ -1183,7 +1185,7 @@ ncStatus_t ncAvailableDevices(struct ncDeviceDescr_t *deviceDescrPtr, XLinkFindAllSuitableDevices( X_LINK_UNBOOTED, in_deviceDsc, deviceDescArray, NC_MAX_DEVICES, &amountOfFoundDevices); int i; - for (i = 0; i < amountOfFoundDevices; ++i) { + for (i = 0; i < (int)amountOfFoundDevices; ++i) { copyXLinkDeviceDescrToNc(&deviceDescArray[i], &deviceDescrPtr[i]); } @@ -1834,9 +1836,9 @@ ncStatus_t ncDeviceClose(struct ncDeviceHandle_t **deviceHandlePtr, WatchdogHndl booted_disappeared = 1; } int i, j; - for (i = 0; i < foundDevicesAfterReset; ++i) { + for (i = 0; i < (int)foundDevicesAfterReset; ++i) { int found_in_before_reset_list = 0; - for (j = 0; j < foundDevicesBeforeReset; ++j) { + for (j = 0; j < (int)foundDevicesBeforeReset; ++j) { if(strcmp(beforeResetDevices[i].name, afterResetDevices[j].name) == 0) { found_in_before_reset_list = 1; } @@ -2008,7 +2010,7 @@ ncStatus_t ncGraphAllocate(struct ncDeviceHandle_t * deviceHandle, g->id = graphIdCount++; streamId_t streamId; - if (g->executors_number > d->dev_attr.max_executors) { + if (g->executors_number > (int)d->dev_attr.max_executors) { mvLog(MVLOG_ERROR, "Executors number is greater than max allowed!"); unlockAllInferences(); return NC_INVALID_PARAMETERS; @@ -2473,7 +2475,7 @@ static ncStatus_t getGraphOption(struct _graphPrivate_t *g, break; } case NC_RW_GRAPH_EXECUTORS_NUM:{ - int size = sizeof(int); + unsigned size = sizeof(int); if (*dataLength < size) { mvLog(MVLOG_ERROR, "data length of data (%d) is smaller that required (%d)!\n", @@ -2637,7 +2639,7 @@ static ncStatus_t getDeviceOption(struct _devicePrivate_t *d, if (rc) { return rc; } - d->throttle_happened = d->thermal_stats[0]; + d->throttle_happened = (int)d->thermal_stats[0]; *(int *) data = d->throttle_happened; *dataLength = sizeof(int); break; @@ -2654,10 +2656,10 @@ static ncStatus_t getDeviceOption(struct _devicePrivate_t *d, mvLog(MVLOG_ERROR, "data length of output buffer (%d) is smaller that required (%zu)!\n", *dataLength, strlen(d->dev_addr) + 1); - *dataLength = strlen(d->dev_addr) + 1; + *dataLength = (unsigned)(strlen(d->dev_addr) + 1); return NC_INVALID_DATA_LENGTH; } - *dataLength = strlen(d->dev_addr) + 1; + *dataLength = (unsigned)(strlen(d->dev_addr) + 1); mv_strncpy((char *) data, *dataLength, d->dev_addr, *dataLength - 1); break; case NC_RO_DEVICE_PLATFORM: @@ -3256,7 +3258,7 @@ ncStatus_t ncFifoReadElem(struct ncFifoHandle_t * fifoHandle, void *outputData, return NC_UNAUTHORIZED; } - if (*outputDataLen < handle->datasize) { + if (*outputDataLen < (unsigned)handle->datasize) { mvLog(MVLOG_ERROR, "This datasize in tensorDesc (%d) is smaller than required (%d)!", *outputDataLen, handle->datasize); diff --git a/inference-engine/thirdparty/movidius/mvnc/src/mvnc_data.c b/inference-engine/thirdparty/movidius/mvnc/src/mvnc_data.c index bc199ec..6f88763 100644 --- a/inference-engine/thirdparty/movidius/mvnc/src/mvnc_data.c +++ b/inference-engine/thirdparty/movidius/mvnc/src/mvnc_data.c @@ -274,7 +274,7 @@ ncStatus_t bootDevice(deviceDesc_t* deviceDescToBoot, } } - XLinkError_t rc = XLinkBootFirmware(deviceDescToBoot, firmware, length); + XLinkError_t rc = XLinkBootFirmware(deviceDescToBoot, firmware, (unsigned long)length); free(firmware); if(rc) { diff --git a/inference-engine/thirdparty/movidius/mvnc/src/watchdog/watchdog.cpp b/inference-engine/thirdparty/movidius/mvnc/src/watchdog/watchdog.cpp index a1821c8..4cc6624 100644 --- a/inference-engine/thirdparty/movidius/mvnc/src/watchdog/watchdog.cpp +++ b/inference-engine/thirdparty/movidius/mvnc/src/watchdog/watchdog.cpp @@ -233,9 +233,9 @@ void WatchdogImpl::waitFor(const milliseconds sleepInterval) { #if (defined(__APPLE__) || defined(_WIN32)) timeToWait.tv_sec = sec.count(); - timeToWait.tv_nsec = + timeToWait.tv_nsec = (long)( std::chrono::duration_cast(sleepInterval).count() - - std::chrono::nanoseconds(sec).count(); + std::chrono::nanoseconds(sec).count()); #else clock_gettime(CLOCK_MONOTONIC, &timeToWait); const auto secondInNanoSeconds = 1000000000L; diff --git a/inference-engine/thirdparty/movidius/mvnc/src/watchdog/xlink_device.cpp b/inference-engine/thirdparty/movidius/mvnc/src/watchdog/xlink_device.cpp index 5662325..774686b 100644 --- a/inference-engine/thirdparty/movidius/mvnc/src/watchdog/xlink_device.cpp +++ b/inference-engine/thirdparty/movidius/mvnc/src/watchdog/xlink_device.cpp @@ -68,7 +68,7 @@ void XLinkDevice::keepAlive(const time_point ¤t_time) noexcept { m_lastPingTime = current_time; - int diff = duration_cast(current_time - m_lastPongTime).count(); + int diff = (int)duration_cast(current_time - m_lastPongTime).count(); if (bPong) { m_lastPongTime = current_time; diff --git a/ngraph/core/builder/src/builder/reshape.cpp b/ngraph/core/builder/src/builder/reshape.cpp index e7bab23..56521ae 100644 --- a/ngraph/core/builder/src/builder/reshape.cpp +++ b/ngraph/core/builder/src/builder/reshape.cpp @@ -218,7 +218,7 @@ shared_ptr builder::opset1::collapse(const Output& value, // Multiply all alements of shape from start_axis to end_axis inclusive size_t collapsed_axis_size = accumulate(next(begin(shape), start_axis), next(begin(shape), end_axis + 1), - 1UL, + size_t{1}, multiplies()); Shape output_shape{}; output_shape.insert(begin(output_shape), begin(shape), next(begin(shape), start_axis)); diff --git a/ngraph/core/include/ngraph/op/constant.hpp b/ngraph/core/include/ngraph/op/constant.hpp index 3ca268f..243b7bd 100644 --- a/ngraph/core/include/ngraph/op/constant.hpp +++ b/ngraph/core/include/ngraph/op/constant.hpp @@ -342,6 +342,11 @@ namespace ngraph { auto source_type = get_element_type(); std::vector rc; + +#if defined(_MSC_VER) +#pragma warning(push) +#pragma warning(disable : 4244) +#endif switch (source_type) { case element::Type_t::boolean: @@ -424,6 +429,9 @@ namespace ngraph } default: throw std::runtime_error("unsupported type"); } +#if defined(_MSC_VER) +#pragma warning(pop) +#endif return rc; } diff --git a/ngraph/core/include/ngraph/type/bfloat16.hpp b/ngraph/core/include/ngraph/type/bfloat16.hpp index 72d47f5..08f2745 100644 --- a/ngraph/core/include/ngraph/type/bfloat16.hpp +++ b/ngraph/core/include/ngraph/type/bfloat16.hpp @@ -53,6 +53,12 @@ namespace ngraph { } + template + explicit bfloat16(I value) + : m_value{bfloat16{static_cast(value)}.m_value} + { + } + std::string to_string() const; size_t size() const; template diff --git a/ngraph/core/include/ngraph/type/float16.hpp b/ngraph/core/include/ngraph/type/float16.hpp index a63c3c8..7023323 100644 --- a/ngraph/core/include/ngraph/type/float16.hpp +++ b/ngraph/core/include/ngraph/type/float16.hpp @@ -48,6 +48,12 @@ namespace ngraph float16(float value); + template + explicit float16(I value) + : m_value{float16{static_cast(value)}.m_value} + { + } + std::string to_string() const; size_t size() const; template diff --git a/ngraph/core/reference/include/ngraph/runtime/reference/roi_align.hpp b/ngraph/core/reference/include/ngraph/runtime/reference/roi_align.hpp index f586c10..b268b6c 100644 --- a/ngraph/core/reference/include/ngraph/runtime/reference/roi_align.hpp +++ b/ngraph/core/reference/include/ngraph/runtime/reference/roi_align.hpp @@ -106,7 +106,7 @@ namespace ngraph { // For this sample we save 4x point (0,0) with weight 0 pooling_points.insert(pooling_points.end(), 4, {0, 0}); - pooling_weights.insert(pooling_weights.end(), 4, {0}); + pooling_weights.insert(pooling_weights.end(), 4, T{0}); continue; } diff --git a/ngraph/frontend/onnx_import/include/onnx_import/core/tensor.hpp b/ngraph/frontend/onnx_import/include/onnx_import/core/tensor.hpp index 94ae06e..67890b7 100644 --- a/ngraph/frontend/onnx_import/include/onnx_import/core/tensor.hpp +++ b/ngraph/frontend/onnx_import/include/onnx_import/core/tensor.hpp @@ -119,7 +119,14 @@ namespace ngraph template inline std::vector __get_data(const Container& container) { +#if defined(_MSC_VER) +#pragma warning(push) +#pragma warning(disable : 4267) +#endif return std::vector(std::begin(container), std::end(container)); +#if defined(_MSC_VER) +#pragma warning(pop) +#endif } /// Returns the size if bytes of an ONNX data type. diff --git a/ngraph/frontend/onnx_import/src/utils/reshape.cpp b/ngraph/frontend/onnx_import/src/utils/reshape.cpp index ec8d963..ddd4674 100644 --- a/ngraph/frontend/onnx_import/src/utils/reshape.cpp +++ b/ngraph/frontend/onnx_import/src/utils/reshape.cpp @@ -74,12 +74,12 @@ namespace ngraph std::size_t input_shape_product = std::accumulate(std::begin(input_shape), std::end(input_shape), - 1UL, + size_t{1}, std::multiplies()); std::size_t output_shape_product = std::accumulate(std::begin(inferred_dims), std::end(inferred_dims), - 1UL, + size_t{1}, std::multiplies()); *neg_value_it = input_shape_product / output_shape_product; } -- 2.7.4