From 01c395188796577ac85eda0a7a6a7c88cf4f48fe Mon Sep 17 00:00:00 2001 From: Maxim Andronov Date: Sun, 22 Nov 2020 20:42:53 +0300 Subject: [PATCH] Fixed static analysis issues (#3254) --- .../samples/classification_sample_async/main.cpp | 4 ++++ inference-engine/samples/style_transfer_sample/main.cpp | 3 ++- .../src/mkldnn_plugin/mkldnn_exec_network.cpp | 2 ++ inference-engine/src/mkldnn_plugin/mkldnn_graph.cpp | 7 ++++--- .../src/mkldnn_plugin/mkldnn_graph_optimizer.cpp | 16 ++++++++++++++++ .../src/mkldnn_plugin/nodes/non_max_suppression.cpp | 2 +- inference-engine/src/multi_device/multi_device.cpp | 2 ++ 7 files changed, 31 insertions(+), 5 deletions(-) diff --git a/inference-engine/samples/classification_sample_async/main.cpp b/inference-engine/samples/classification_sample_async/main.cpp index 1e764ff..264d4ba 100644 --- a/inference-engine/samples/classification_sample_async/main.cpp +++ b/inference-engine/samples/classification_sample_async/main.cpp @@ -169,6 +169,8 @@ int main(int argc, char *argv[]) { auto minputHolder = minput->wmap(); auto data = minputHolder.as::value_type *>(); + if (data == nullptr) + throw std::runtime_error("Input blob has not allocated buffer"); /** Iterate over all input images **/ for (size_t image_id = 0; image_id < imagesData.size(); ++image_id) { /** Iterate over all pixel in image (b,g,r) **/ @@ -217,6 +219,8 @@ int main(int argc, char *argv[]) { // --------------------------- 8. Process output ------------------------------------------------------- slog::info << "Processing output blobs" << slog::endl; OutputsDataMap outputInfo(network.getOutputsInfo()); + if (outputInfo.empty()) + throw std::runtime_error("Can't get output blobs"); Blob::Ptr outputBlob = inferRequest.GetBlob(outputInfo.begin()->first); /** Validating -nt value **/ diff --git a/inference-engine/samples/style_transfer_sample/main.cpp b/inference-engine/samples/style_transfer_sample/main.cpp index e410c71..6a036a9 100644 --- a/inference-engine/samples/style_transfer_sample/main.cpp +++ b/inference-engine/samples/style_transfer_sample/main.cpp @@ -177,7 +177,8 @@ int main(int argc, char *argv[]) { size_t image_size = minput->getTensorDesc().getDims()[3] * minput->getTensorDesc().getDims()[2]; auto data = ilmHolder.as::value_type *>(); - + if (data == nullptr) + throw std::runtime_error("Input blob has not allocated buffer"); /** Iterate over all input images **/ for (size_t image_id = 0; image_id < imagesData.size(); ++image_id) { /** Iterate over all pixel in image (b,g,r) **/ diff --git a/inference-engine/src/mkldnn_plugin/mkldnn_exec_network.cpp b/inference-engine/src/mkldnn_plugin/mkldnn_exec_network.cpp index e19708a..4bcd2d9 100644 --- a/inference-engine/src/mkldnn_plugin/mkldnn_exec_network.cpp +++ b/inference-engine/src/mkldnn_plugin/mkldnn_exec_network.cpp @@ -118,6 +118,8 @@ MKLDNNExecNetwork::MKLDNNExecNetwork(const InferenceEngine::ICNNNetwork &network createConstInputTo(layer, shiftBlob, "biases"); } else if (scalesBlob != nullptr) { Blob::Ptr biases = make_shared_blob(scalesBlob->getTensorDesc()); + if (biases == nullptr) + THROW_IE_EXCEPTION << "Cannot make 'biases' shared blob"; biases->allocate(); auto biasesPtr = biases->buffer().as(); for (size_t i = 0; i < biases->size(); i++) diff --git a/inference-engine/src/mkldnn_plugin/mkldnn_graph.cpp b/inference-engine/src/mkldnn_plugin/mkldnn_graph.cpp index 32c9cff..7883277 100644 --- a/inference-engine/src/mkldnn_plugin/mkldnn_graph.cpp +++ b/inference-engine/src/mkldnn_plugin/mkldnn_graph.cpp @@ -1090,10 +1090,11 @@ void MKLDNNGraph::InsertReorder(MKLDNNEdgePtr edge, std::string layerName, const inDesc.getPrecision()})); MKLDNNNodePtr newReorder(new MKLDNNReorderNode(layer, getEngine(), weightsCache)); auto *reorderPtr = dynamic_cast(newReorder.get()); - if (reorderPtr) { - reorderPtr->setDescs(inDesc, outDesc); - reorderPtr->_scales = scales; + if (reorderPtr == nullptr) { + THROW_IE_EXCEPTION << "MKLDNNGraph::InsertReorder: Cannot cast to MKLDNNReorderNode"; } + reorderPtr->setDescs(inDesc, outDesc); + reorderPtr->_scales = scales; auto oIndex = edge->getOutputNum(); auto iIndex = edge->getInputNum(); diff --git a/inference-engine/src/mkldnn_plugin/mkldnn_graph_optimizer.cpp b/inference-engine/src/mkldnn_plugin/mkldnn_graph_optimizer.cpp index 9ca7177..05cec5d 100644 --- a/inference-engine/src/mkldnn_plugin/mkldnn_graph_optimizer.cpp +++ b/inference-engine/src/mkldnn_plugin/mkldnn_graph_optimizer.cpp @@ -254,7 +254,12 @@ void MKLDNNGraphOptimizer::FuseConvolutionAndZeroPoints(MKLDNNGraph &graph) { return false; auto zeroPointsBlob = dynamic_cast*>(arg0->getCnnLayer()->blobs["custom"].get()); + if (zeroPointsBlob == nullptr) + THROW_IE_EXCEPTION << "Cannot cast to TBlob internal zero points blob"; + auto zeroPointsData = zeroPointsBlob->buffer().as(); + if (zeroPointsData == nullptr) + THROW_IE_EXCEPTION << "zeroPointsBlob has not allocated buffer"; for (int j = 0; j < parent0->getParentEdgesAtPort(1)[0]->getDims()[1]; j++) { convNode->inputZeroPoints.push_back(zeroPointsData[j]); @@ -302,7 +307,12 @@ void MKLDNNGraphOptimizer::FuseConvolutionAndZeroPoints(MKLDNNGraph &graph) { return false; auto zeroPointsBlob = dynamic_cast*>(arg0->getCnnLayer()->blobs["custom"].get()); + if (zeroPointsBlob == nullptr) + THROW_IE_EXCEPTION << "Cannot cast to TBlob internal zero points blob"; + auto zeroPointsData = zeroPointsBlob->buffer().as(); + if (zeroPointsData == nullptr) + THROW_IE_EXCEPTION << "zeroPointsBlob has not allocated buffer"; for (int j = 0; j < parent0->getParentEdgesAtPort(1)[0]->getDims()[0]; j++) { convNode->weightsZeroPoints.push_back(static_cast(zeroPointsData[j])); @@ -338,8 +348,14 @@ void MKLDNNGraphOptimizer::FuseConvolutionAndZeroPoints(MKLDNNGraph &graph) { weightsLayer = getCreatorLayer(weightsLayer->insData[0].lock()).lock(); } + auto weightsBlob = dynamic_cast*>(weightsLayer->blobs["custom"].get()); + if (weightsBlob == nullptr) + THROW_IE_EXCEPTION << "Cannot cast to TBlob internal weights blob"; + auto weightsPtr = weightsBlob->buffer().as(); + if (weightsPtr == nullptr) + THROW_IE_EXCEPTION << "weightsBlob has not allocated buffer"; ptrdiff_t G = convLayer->_group; ptrdiff_t OC = weightsLayer->outData[0]->getDims()[0] / G; diff --git a/inference-engine/src/mkldnn_plugin/nodes/non_max_suppression.cpp b/inference-engine/src/mkldnn_plugin/nodes/non_max_suppression.cpp index 9f8e326..9abad7b 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/non_max_suppression.cpp +++ b/inference-engine/src/mkldnn_plugin/nodes/non_max_suppression.cpp @@ -184,7 +184,7 @@ public: int batch_index; int class_index; int box_index; - filteredBoxes() {} + filteredBoxes() = default; filteredBoxes(float _score, int _batch_index, int _class_index, int _box_index) : score(_score), batch_index(_batch_index), class_index(_class_index), box_index(_box_index) {} }; diff --git a/inference-engine/src/multi_device/multi_device.cpp b/inference-engine/src/multi_device/multi_device.cpp index 64ac092..72f95d6 100644 --- a/inference-engine/src/multi_device/multi_device.cpp +++ b/inference-engine/src/multi_device/multi_device.cpp @@ -551,6 +551,8 @@ QueryNetworkResult MultiDeviceInferencePlugin::QueryNetwork(const ICNNNetwork& THROW_IE_EXCEPTION << NOT_IMPLEMENTED_str; } else { auto cnnNetworkImpl = std::make_shared(network); + if (cnnNetworkImpl == nullptr) + THROW_IE_EXCEPTION << "Cannot create CNNNetworkImpl shared_ptr"; queryNetwork(*cnnNetworkImpl); } } else { -- 2.7.4