IVGCVSW-4246 Clean build end-to-end tests with -Wextra
authorDerek Lamberti <derek.lamberti@arm.com>
Tue, 10 Dec 2019 21:20:10 +0000 (21:20 +0000)
committerFrancis Murtagh <francis.murtagh@arm.com>
Mon, 30 Dec 2019 15:03:02 +0000 (15:03 +0000)
Change-Id: Ia25f919e45a210e1e2d5d50b0c9098bf01d88013
Signed-off-by: Derek Lamberti <derek.lamberti@arm.com>
14 files changed:
tests/InferenceTest.hpp
tests/InferenceTest.inl
tests/MobileNetSsdInferenceTest.hpp
tests/NetworkExecutionUtils/NetworkExecutionUtils.hpp
tests/TfLiteInceptionV3Quantized-Armnn/TfLiteInceptionV3Quantized-Armnn.cpp
tests/TfLiteInceptionV4Quantized-Armnn/TfLiteInceptionV4Quantized-Armnn.cpp
tests/TfLiteMnasNet-Armnn/TfLiteMnasNet-Armnn.cpp
tests/TfLiteMobileNetQuantizedSoftmax-Armnn/TfLiteMobileNetQuantizedSoftmax-Armnn.cpp
tests/TfLiteMobilenetQuantized-Armnn/TfLiteMobilenetQuantized-Armnn.cpp
tests/TfLiteMobilenetV2Quantized-Armnn/TfLiteMobilenetV2Quantized-Armnn.cpp
tests/TfLiteResNetV2-50-Quantized-Armnn/TfLiteResNetV2-50-Quantized-Armnn.cpp
tests/TfLiteResNetV2-Armnn/TfLiteResNetV2-Armnn.cpp
tests/TfLiteVGG16Quantized-Armnn/TfLiteVGG16Quantized-Armnn.cpp
tests/YoloInferenceTest.hpp

index 7b7dcec..6423d1c 100644 (file)
@@ -9,7 +9,7 @@
 #include <armnn/TypesUtils.hpp>
 #include "InferenceModel.hpp"
 
-
+#include <boost/core/ignore_unused.hpp>
 #include <boost/program_options.hpp>
 
 
@@ -91,8 +91,15 @@ class IInferenceTestCaseProvider
 public:
     virtual ~IInferenceTestCaseProvider() {}
 
-    virtual void AddCommandLineOptions(boost::program_options::options_description& options) {};
-    virtual bool ProcessCommandLineOptions(const InferenceTestOptions &commonOptions) { return true; };
+    virtual void AddCommandLineOptions(boost::program_options::options_description& options)
+    {
+        boost::ignore_unused(options);
+    };
+    virtual bool ProcessCommandLineOptions(const InferenceTestOptions &commonOptions)
+    {
+        boost::ignore_unused(commonOptions);
+        return true;
+    };
     virtual std::unique_ptr<IInferenceTestCase> GetTestCase(unsigned int testCaseId) = 0;
     virtual bool OnInferenceTestFinished() { return true; };
 };
index fd888e2..c05e70d 100644 (file)
@@ -79,6 +79,7 @@ struct ClassifierResultProcessor : public boost::static_visitor<>
 
     void operator()(const std::vector<int>& values)
     {
+        boost::ignore_unused(values);
         BOOST_ASSERT_MSG(false, "Non-float predictions output not supported.");
     }
 
index a950b93..c99844b 100644 (file)
@@ -33,6 +33,8 @@ public:
 
     TestCaseResult ProcessResult(const InferenceTestOptions& options) override
     {
+        boost::ignore_unused(options);
+
         const std::vector<float>& output1 = boost::get<std::vector<float>>(this->GetOutputs()[0]); // bounding boxes
         BOOST_ASSERT(output1.size() == k_OutputSize1);
 
index 279bf30..f9e9b14 100644 (file)
@@ -732,6 +732,7 @@ int RunCsvTest(const armnnUtils::CsvRow &csvRow, const std::shared_ptr<armnn::IR
                const bool enableProfiling, const bool enableFp16TurboMode, const double& thresholdTime,
                const bool printIntermediate, bool enableLayerDetails = false, bool parseUnuspported = false)
 {
+    boost::ignore_unused(runtime);
     std::string modelFormat;
     std::string modelPath;
     std::string inputNames;
index bf5a865..f4b3955 100644 (file)
@@ -37,7 +37,7 @@ int main(int argc, char* argv[])
                      "input",                             // input tensor name
                      "output",                            // output tensor name
                      { 0, 1, 2 },                         // test images to test with as above
-                     [&imageSet](const char* dataDir, const ModelType & model) {
+                     [&imageSet](const char* dataDir, const ModelType &) {
                          // we need to get the input quantization parameters from
                          // the parsed model
                          return DatabaseType(
index b0af830..169ecb0 100644 (file)
@@ -37,7 +37,7 @@ int main(int argc, char* argv[])
                      "input",                             // input tensor name
                      "InceptionV4/Logits/Predictions",    // output tensor name
                      { 0, 1, 2 },                         // test images to test with as above
-                     [&imageSet](const char* dataDir, const ModelType & model) {
+                     [&imageSet](const char* dataDir, const ModelType &) {
                          // we need to get the input quantization parameters from
                          // the parsed model
                          return DatabaseType(
index 4cf16d7..4194d4b 100644 (file)
@@ -37,7 +37,7 @@ int main(int argc, char* argv[])
                      "input",                             // input tensor name
                      "output",                            // output tensor name
                      { 0, 1, 2 },                         // test images to test with as above
-                     [&imageSet](const char* dataDir, const ModelType & model) {
+                     [&imageSet](const char* dataDir, const ModelType &) {
                          return DatabaseType(
                              dataDir,
                              224,
index f193a98..f497de5 100644 (file)
@@ -39,7 +39,7 @@ int main(int argc, char* argv[])
                 "input",      // input tensor name
                 "MobilenetV1/Predictions/Reshape_1",        // output tensor name
                 { 0, 1, 2 },               // test images to test with as above
-                [&imageSet](const char* dataDir, const ModelType & model) {
+                [&imageSet](const char* dataDir, const ModelType &) {
                     // we need to get the input quantization parameters from
                     // the parsed model
                     return DatabaseType(
index 1b411f9..b2d3f0f 100644 (file)
@@ -105,7 +105,7 @@ int main(int argc, char* argv[])
                      "input",                             // input tensor name
                      "MobilenetV1/Predictions/Reshape_1", // output tensor name
                      indices,                             // vector of indices to select which images to validate
-                     [&imageSet](const char* dataDir, const ModelType & model) {
+                     [&imageSet](const char* dataDir, const ModelType &) {
                          // we need to get the input quantization parameters from
                          // the parsed model
                          return DatabaseType(
index 9bc1034..b8def4f 100644 (file)
@@ -37,7 +37,7 @@ int main(int argc, char* argv[])
                      "input",                             // input tensor name
                      "output",                            // output tensor name
                      { 0, 1, 2 },                         // test images to test with as above
-                     [&imageSet](const char* dataDir, const ModelType & model) {
+                     [&imageSet](const char* dataDir, const ModelType &) {
                          // we need to get the input quantization parameters from
                          // the parsed model
                          return DatabaseType(
index 98235e3..7446809 100644 (file)
@@ -37,7 +37,7 @@ int main(int argc, char* argv[])
                      "input",                                    // input tensor name
                      "resnet_v2_50/predictions/Reshape_1",       // output tensor name
                      { 0, 1, 2 },                                // test images to test with as above
-                     [&imageSet](const char* dataDir, const ModelType & model) {
+                     [&imageSet](const char* dataDir, const ModelType &) {
                          // we need to get the input quantization parameters from
                          // the parsed model
                          return DatabaseType(
index 1e2ffbf..107660e 100644 (file)
@@ -37,7 +37,7 @@ int main(int argc, char* argv[])
                      "input",                             // input tensor name
                      "output",                            // output tensor name
                      { 0, 1, 2 },                         // test images to test with as above
-                     [&imageSet](const char* dataDir, const ModelType & model) {
+                     [&imageSet](const char* dataDir, const ModelType &) {
                          return DatabaseType(
                              dataDir,
                              299,
index 030f01c..8da553f 100644 (file)
@@ -39,7 +39,7 @@ int main(int argc, char* argv[])
                      "input",                   // input tensor name
                      "vgg_16/fc8/squeezed",     // output tensor name
                      { 0, 1, 2 },               // test images to test with as above
-                     [&imageSet](const char* dataDir, const ModelType & model) {
+                     [&imageSet](const char* dataDir, const ModelType &) {
                          // we need to get the input quantization parameters from
                          // the parsed model
                          return DatabaseType(
index 91ea977..16d0355 100644 (file)
@@ -32,6 +32,8 @@ public:
 
     virtual TestCaseResult ProcessResult(const InferenceTestOptions& options) override
     {
+        boost::ignore_unused(options);
+
         using Boost3dArray = boost::multi_array<float, 3>;
 
         const std::vector<float>& output = boost::get<std::vector<float>>(this->GetOutputs()[0]);