IVGCVSW-3675 Add ExecuteNetwork option to print intermediate layers
authorMatthew Jackson <matthew.jackson@arm.com>
Tue, 27 Aug 2019 14:35:59 +0000 (15:35 +0100)
committerMatteo Martincigh <matteo.martincigh@arm.com>
Wed, 28 Aug 2019 14:07:28 +0000 (14:07 +0000)
Signed-off-by: Matthew Jackson <matthew.jackson@arm.com>
Change-Id: Id7ab186ec607ff6e5ee6869c4ad562af4c40b97a

tests/ExecuteNetwork/ExecuteNetwork.cpp
tests/InferenceModel.hpp
tests/NetworkExecutionUtils/NetworkExecutionUtils.hpp

index 1a03062..bccd50d 100644 (file)
@@ -87,7 +87,9 @@ int main(int argc, const char* argv[])
             ("threshold-time,r", po::value<double>(&thresholdTime)->default_value(0.0),
              "Threshold time is the maximum allowed time for inference measured in milliseconds. If the actual "
              "inference time is greater than the threshold time, the test will fail. By default, no threshold "
-             "time is used.");
+             "time is used.")
+            ("print-intermediate-layers,p", po::bool_switch()->default_value(false),
+             "If this option is enabled, the output of every graph layer will be printed.");
     }
     catch (const std::exception& e)
     {
@@ -128,6 +130,7 @@ int main(int argc, const char* argv[])
     bool enableProfiling = vm["event-based-profiling"].as<bool>();
     bool enableFp16TurboMode = vm["fp16-turbo-mode"].as<bool>();
     bool quantizeInput = vm["quantize-input"].as<bool>();
+    bool printIntermediate = vm["print-intermediate-layers"].as<bool>();
 
     // Check whether we have to load test cases from a file.
     if (CheckOption(vm, "test-cases"))
@@ -169,7 +172,7 @@ int main(int argc, const char* argv[])
             {
                 testCase.values.insert(testCase.values.begin(), executableName);
                 results.push_back(std::async(std::launch::async, RunCsvTest, std::cref(testCase), std::cref(runtime),
-                                             enableProfiling, enableFp16TurboMode, thresholdTime));
+                                             enableProfiling, enableFp16TurboMode, thresholdTime, printIntermediate));
             }
 
             // Check results
@@ -187,7 +190,8 @@ int main(int argc, const char* argv[])
             for (auto&  testCase : testCases)
             {
                 testCase.values.insert(testCase.values.begin(), executableName);
-                if (RunCsvTest(testCase, runtime, enableProfiling, enableFp16TurboMode, thresholdTime) != EXIT_SUCCESS)
+                if (RunCsvTest(testCase, runtime, enableProfiling,
+                               enableFp16TurboMode, thresholdTime, printIntermediate) != EXIT_SUCCESS)
                 {
                     return EXIT_FAILURE;
                 }
@@ -222,6 +226,6 @@ int main(int argc, const char* argv[])
 
         return RunTest(modelFormat, inputTensorShapes, computeDevices, dynamicBackendsPath, modelPath, inputNames,
                        inputTensorDataFilePaths, inputTypes, quantizeInput, outputTypes, outputNames,
-                       enableProfiling, enableFp16TurboMode, thresholdTime, subgraphId);
+                       enableProfiling, enableFp16TurboMode, thresholdTime, printIntermediate, subgraphId);
     }
 }
index 13e8031..0ede898 100644 (file)
@@ -91,6 +91,7 @@ struct Params
     bool                            m_IsModelBinary;
     bool                            m_VisualizePostOptimizationModel;
     bool                            m_EnableFp16TurboMode;
+    bool                            m_PrintIntermediateLayers;
 
     Params()
         : m_ComputeDevices{}
@@ -98,6 +99,7 @@ struct Params
         , m_IsModelBinary(true)
         , m_VisualizePostOptimizationModel(false)
         , m_EnableFp16TurboMode(false)
+        , m_PrintIntermediateLayers(false)
     {}
 };
 
@@ -395,6 +397,7 @@ public:
 
             armnn::OptimizerOptions options;
             options.m_ReduceFp32ToFp16 = params.m_EnableFp16TurboMode;
+            options.m_Debug = params.m_PrintIntermediateLayers;
 
             optNet = armnn::Optimize(*network, params.m_ComputeDevices, m_Runtime->GetDeviceSpec(), options);
             if (!optNet)
index 810f499..ddf797b 100644 (file)
@@ -265,6 +265,7 @@ int MainImpl(const char* modelPath,
              bool enableProfiling,
              bool enableFp16TurboMode,
              const double& thresholdTime,
+             bool printIntermediate,
              const size_t subgraphId,
              const std::shared_ptr<armnn::IRuntime>& runtime = nullptr)
 {
@@ -280,6 +281,7 @@ int MainImpl(const char* modelPath,
         params.m_IsModelBinary = isModelBinary;
         params.m_ComputeDevices = computeDevices;
         params.m_DynamicBackendsPath = dynamicBackendsPath;
+        params.m_PrintIntermediateLayers = printIntermediate;
 
         for(const std::string& inputName: inputNames)
         {
@@ -420,6 +422,7 @@ int RunTest(const std::string& format,
             bool enableProfiling,
             bool enableFp16TurboMode,
             const double& thresholdTime,
+            bool printIntermediate,
             const size_t subgraphId,
             const std::shared_ptr<armnn::IRuntime>& runtime = nullptr)
 {
@@ -519,7 +522,7 @@ int RunTest(const std::string& format,
         dynamicBackendsPath, inputNamesVector, inputTensorShapes,
         inputTensorDataFilePathsVector, inputTypesVector, quantizeInput,
         outputTypesVector, outputNamesVector, enableProfiling,
-        enableFp16TurboMode, thresholdTime, subgraphId, runtime);
+        enableFp16TurboMode, thresholdTime, printIntermediate, subgraphId, runtime);
 #else
     BOOST_LOG_TRIVIAL(fatal) << "Not built with serialization support.";
     return EXIT_FAILURE;
@@ -534,7 +537,7 @@ int RunTest(const std::string& format,
                                                                inputTensorDataFilePathsVector, inputTypesVector,
                                                                quantizeInput, outputTypesVector, outputNamesVector,
                                                                enableProfiling, enableFp16TurboMode, thresholdTime,
-                                                               subgraphId, runtime);
+                                                               printIntermediate, subgraphId, runtime);
 #else
         BOOST_LOG_TRIVIAL(fatal) << "Not built with Caffe parser support.";
         return EXIT_FAILURE;
@@ -549,7 +552,7 @@ int RunTest(const std::string& format,
                                                          inputTensorDataFilePathsVector, inputTypesVector,
                                                          quantizeInput, outputTypesVector, outputNamesVector,
                                                          enableProfiling, enableFp16TurboMode, thresholdTime,
-                                                         subgraphId, runtime);
+                                                         printIntermediate, subgraphId, runtime);
 #else
     BOOST_LOG_TRIVIAL(fatal) << "Not built with Onnx parser support.";
     return EXIT_FAILURE;
@@ -564,7 +567,7 @@ int RunTest(const std::string& format,
                                                          inputTensorDataFilePathsVector, inputTypesVector,
                                                          quantizeInput, outputTypesVector, outputNamesVector,
                                                          enableProfiling, enableFp16TurboMode, thresholdTime,
-                                                         subgraphId, runtime);
+                                                         printIntermediate, subgraphId, runtime);
 #else
         BOOST_LOG_TRIVIAL(fatal) << "Not built with Tensorflow parser support.";
         return EXIT_FAILURE;
@@ -585,7 +588,7 @@ int RunTest(const std::string& format,
                                                                  inputTensorDataFilePathsVector, inputTypesVector,
                                                                  quantizeInput, outputTypesVector, outputNamesVector,
                                                                  enableProfiling, enableFp16TurboMode, thresholdTime,
-                                                                 subgraphId, runtime);
+                                                                 printIntermediate, subgraphId, runtime);
 #else
         BOOST_LOG_TRIVIAL(fatal) << "Unknown model format: '" << modelFormat <<
             "'. Please include 'caffe', 'tensorflow', 'tflite' or 'onnx'";
@@ -601,7 +604,8 @@ int RunTest(const std::string& format,
 }
 
 int RunCsvTest(const armnnUtils::CsvRow &csvRow, const std::shared_ptr<armnn::IRuntime>& runtime,
-               const bool enableProfiling, const bool enableFp16TurboMode, const double& thresholdTime)
+               const bool enableProfiling, const bool enableFp16TurboMode, const double& thresholdTime,
+               const bool printIntermediate)
 {
     std::string modelFormat;
     std::string modelPath;
@@ -709,5 +713,5 @@ int RunCsvTest(const armnnUtils::CsvRow &csvRow, const std::shared_ptr<armnn::IR
 
     return RunTest(modelFormat, inputTensorShapes, computeDevices, dynamicBackendsPath, modelPath, inputNames,
                    inputTensorDataFilePaths, inputTypes, quantizeInput, outputTypes, outputNames,
-                   enableProfiling, enableFp16TurboMode, thresholdTime, subgraphId);
+                   enableProfiling, enableFp16TurboMode, thresholdTime, printIntermediate, subgraphId);
 }