IVGCVBENCH-1337 Added additional layer parameters to dot file and -v option
authorAndre Ghattas <andre.ghattas@arm.com>
Wed, 7 Aug 2019 11:18:38 +0000 (12:18 +0100)
committerMatteo Martincigh <matteo.martincigh@arm.com>
Wed, 4 Sep 2019 09:41:41 +0000 (09:41 +0000)
* Generic layer parameters now show up in dot file
* Convolution layer parameters have also been added to dot file
* ExecucteNetwork has an additional -v flag which generated dot file if there

Change-Id: I210bb19b45384eb3639b7e488c7a89049fa6f18d
Signed-off-by: Andre Ghattas <andre.ghattas@arm.com>
Signed-off-by: Szilard Papp <szilard.papp@arm.com>
12 files changed:
src/armnn/Layer.cpp
src/armnn/Layer.hpp
src/armnn/SerializeLayerParameters.cpp
src/armnn/layers/Convolution2dLayer.cpp
src/armnn/layers/Convolution2dLayer.hpp
src/armnn/layers/DepthwiseConvolution2dLayer.cpp
src/armnn/layers/DepthwiseConvolution2dLayer.hpp
src/armnn/layers/LayerWithParameters.hpp
src/backends/backendsCommon/test/OptimizedNetworkTests.cpp
src/backends/reference/test/RefOptimizedNetworkTests.cpp
tests/ExecuteNetwork/ExecuteNetwork.cpp
tests/NetworkExecutionUtils/NetworkExecutionUtils.hpp

index 528020b..1e38433 100644 (file)
@@ -400,4 +400,23 @@ std::vector<TensorShape> Layer::InferOutputShapes(const std::vector<TensorShape>
     }
     return inputShapes;
 }
+
+void Layer::SerializeLayerParameters(ParameterStringifyFunction& fn) const
+{
+    std::string layerType = GetLayerTypeAsCString(m_Type);
+    std::string backendId = std::string(m_BackendId);
+    if(!(m_LayerName.compare("") == 0) && !m_LayerName.empty())
+    {
+        fn("LayerName",m_LayerName);
+    }
+    if(!(layerType.compare("") == 0) && !layerType.empty())
+    {
+        fn("LayerType",layerType);
+    }
+    if(!(backendId.compare("") == 0) && !backendId.empty())
+    {
+        fn("BackendID",backendId);
+    }
+}
+
 } // namespace armnn
index 5944ea8..c571e50 100644 (file)
@@ -281,7 +281,7 @@ public:
 
     /// Helper to serialize the layer parameters to string.
     /// (currently used in DotSerializer and company).
-    virtual void SerializeLayerParameters(ParameterStringifyFunction &) const {}
+    virtual void SerializeLayerParameters(ParameterStringifyFunction& fn) const;
 
     // Free up the constant source data
     virtual void ReleaseConstantData();
index d416a28..1b0ec02 100644 (file)
@@ -68,6 +68,7 @@ StringifyLayerParameters<Convolution2dDescriptor>::Serialize(ParameterStringifyF
     }
 
     fn("BiasEnabled",(desc.m_BiasEnabled?"true":"false"));
+    fn("DataLayout",GetDataLayoutName(desc.m_DataLayout));
 }
 
 void
@@ -95,6 +96,7 @@ StringifyLayerParameters<DepthwiseConvolution2dDescriptor>::Serialize(ParameterS
     }
 
     fn("BiasEnabled",(desc.m_BiasEnabled?"true":"false"));
+    fn("DataLayout",std::to_string(int(desc.m_DataLayout)));
 }
 
 void
index 2c7a570..4300d55 100644 (file)
@@ -9,7 +9,7 @@
 #include <armnn/TypesUtils.hpp>
 #include <backendsCommon/CpuTensorHandle.hpp>
 #include <backendsCommon/WorkloadFactory.hpp>
-
+#include <string>
 #include <DataLayoutIndexed.hpp>
 
 using namespace armnnUtils;
@@ -20,6 +20,27 @@ namespace armnn
 Convolution2dLayer::Convolution2dLayer(const Convolution2dDescriptor& param, const char* name)
     : LayerWithParameters(1, 1, LayerType::Convolution2d, param, name)
 {
+
+}
+
+void Convolution2dLayer::SerializeLayerParameters(ParameterStringifyFunction& fn) const
+{
+    //using DescriptorType = Parameters;
+    const std::vector<TensorShape>& inputShapes =
+    {
+        GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape(),
+        m_Weight->GetTensorInfo().GetShape()
+    };
+    const TensorShape filterShape = inputShapes[1];
+    DataLayoutIndexed dataLayoutIndex(m_Param.m_DataLayout);
+    unsigned int filterWidth = filterShape[dataLayoutIndex.GetWidthIndex()];
+    unsigned int filterHeight = filterShape[dataLayoutIndex.GetHeightIndex()];
+    unsigned int outChannels = filterShape[0];
+
+    fn("OutputChannels",std::to_string(outChannels));
+    fn("FilterWidth",std::to_string(filterWidth));
+    fn("FilterHeight",std::to_string(filterHeight));
+    LayerWithParameters<Convolution2dDescriptor>::SerializeLayerParameters(fn);
 }
 
 std::unique_ptr<IWorkload> Convolution2dLayer::CreateWorkload(const Graph& graph, const IWorkloadFactory& factory) const
index 05a26da..0e85b33 100644 (file)
@@ -15,6 +15,7 @@ class ScopedCpuTensorHandle;
 class Convolution2dLayer : public LayerWithParameters<Convolution2dDescriptor>
 {
 public:
+
     /// A unique pointer to store Weight values.
     std::unique_ptr<ScopedCpuTensorHandle> m_Weight;
     /// A unique pointer to store Bias values.
@@ -43,6 +44,8 @@ public:
 
     void Accept(ILayerVisitor& visitor) const override;
 
+    void SerializeLayerParameters(ParameterStringifyFunction& fn) const override;
+
 protected:
     /// Constructor to create a Convolution2dLayer.
     /// @param [in] param Convolution2dDescriptor to configure the convolution2d operation.
index e49c179..a50a0f6 100644 (file)
@@ -10,7 +10,7 @@
 
 #include <backendsCommon/CpuTensorHandle.hpp>
 #include <backendsCommon/WorkloadFactory.hpp>
-
+#include <string>
 #include <DataLayoutIndexed.hpp>
 
 using namespace armnnUtils;
@@ -24,6 +24,28 @@ DepthwiseConvolution2dLayer::DepthwiseConvolution2dLayer(const DepthwiseConvolut
 {
 }
 
+void DepthwiseConvolution2dLayer::SerializeLayerParameters(ParameterStringifyFunction& fn) const
+{
+    const std::vector<TensorShape>& inputShapes =
+    {
+        GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape(),
+        m_Weight->GetTensorInfo().GetShape()
+    };
+    const TensorShape filterShape = inputShapes[1];
+    DataLayoutIndexed dataLayoutIndex(m_Param.m_DataLayout);
+    unsigned int inputChannels = filterShape[1];
+    unsigned int filterWidth = filterShape[3];
+    unsigned int filterHeight = filterShape[2];
+    unsigned int depthMultiplier = filterShape[0];
+
+    fn("FilterWidth",std::to_string(filterWidth));
+    fn("FilterHeight",std::to_string(filterHeight));
+    fn("DepthMultiplier",std::to_string(depthMultiplier));
+    fn("InputChannels",std::to_string(inputChannels));
+
+    LayerWithParameters<DepthwiseConvolution2dDescriptor>::SerializeLayerParameters(fn);
+}
+
 std::unique_ptr<IWorkload> DepthwiseConvolution2dLayer::CreateWorkload(const Graph& graph,
                                                                        const IWorkloadFactory& factory) const
 {
index 065ba6c..f575910 100644 (file)
@@ -43,6 +43,8 @@ public:
 
     void Accept(ILayerVisitor& visitor) const override;
 
+    void SerializeLayerParameters(ParameterStringifyFunction& fn) const override;
+
 protected:
     /// Constructor to create a DepthwiseConvolution2dLayer.
     /// @param [in] param DepthwiseConvolution2dDescriptor to configure the depthwise convolution2d.
index ba43d6f..cce9ca2 100644 (file)
@@ -19,9 +19,10 @@ public:
 
     /// Helper to serialize the layer parameters to string
     /// (currently used in DotSerializer and company).
-    void SerializeLayerParameters(ParameterStringifyFunction & fn) const
+    void SerializeLayerParameters(ParameterStringifyFunction& fn) const override
     {
         StringifyLayerParameters<Parameters>::Serialize(fn, m_Param);
+        Layer::SerializeLayerParameters(fn);
     }
 
 protected:
index 7b6135d..cbe74b8 100644 (file)
@@ -51,9 +51,9 @@ BOOST_AUTO_TEST_CASE(SerializeToDot)
         "digraph Optimized {\n"
         "    node [shape=\"record\"];\n"
         "    edge [fontsize=8 fontcolor=\"blue\" fontname=\"arial-bold\"];\n"
-        "    " << inputId << " [label=\"{Input}\"];\n"
-        "    " << addId << " [label=\"{Addition}\"];\n"
-        "    " << outputId << " [label=\"{Output}\"];\n"
+        "    " << inputId << " [label=\"{Input|LayerType : Input\\lBackendID : CpuRef\\l}\"];\n"
+        "    " << addId << " [label=\"{Addition|LayerType : Addition\\lBackendID : CpuRef\\l}\"];\n"
+        "    " << outputId << " [label=\"{Output|LayerType : Output\\lBackendID : CpuRef\\l}\"];\n"
         "    " << inputId << " -> " << addId << " [label=< [4] >];\n"
         "    " << inputId << " -> " << addId << " [label=< [4] >];\n"
         "    " << addId << " -> " << outputId << " [label=< [4] >];\n"
index 68617b9..1a29e73 100644 (file)
@@ -200,9 +200,9 @@ BOOST_AUTO_TEST_CASE(FP16TurboModeTestOnCpuRef)
              "digraph Optimized {\n"
              "    node [shape=\"record\"];\n"
              "    edge [fontsize=8 fontcolor=\"blue\" fontname=\"arial-bold\"];\n"
-             "    " << inputId << " [label=\"{Input}\"];\n"
-             "    " << floorId << " [label=\"{Floor}\"];\n"
-             "    " << outputId << " [label=\"{Output}\"];\n"
+             "    " << inputId << " [label=\"{Input|LayerType : Input\\lBackendID : CpuRef\\l}\"];\n"
+             "    " << floorId << " [label=\"{Floor|LayerType : Floor\\lBackendID : CpuRef\\l}\"];\n"
+             "    " << outputId << " [label=\"{Output|LayerType : Output\\lBackendID : CpuRef\\l}\"];\n"
              "    " << inputId << " -> " << floorId << " [label=< [4] >];\n"
              "    " << floorId << " -> " << outputId << " [label=< [4] >];\n"
              "}\n";
index 0761551..828d249 100644 (file)
@@ -86,6 +86,8 @@ int main(int argc, const char* argv[])
              "If left empty (the default), the output tensors will not be written to a file.")
             ("event-based-profiling,e", po::bool_switch()->default_value(false),
              "Enables built in profiler. If unset, defaults to off.")
+            ("visualize-optimized-model,v", po::bool_switch()->default_value(false),
+             "Enables built optimized model visualizer. If unset, defaults to off.")
             ("fp16-turbo-mode,h", po::bool_switch()->default_value(false), "If this option is enabled, FP32 layers, "
              "weights and biases will be converted to FP16 where the backend supports it")
             ("threshold-time,r", po::value<double>(&thresholdTime)->default_value(0.0),
@@ -132,6 +134,7 @@ int main(int argc, const char* argv[])
     // Get the value of the switch arguments.
     bool concurrent = vm["concurrent"].as<bool>();
     bool enableProfiling = vm["event-based-profiling"].as<bool>();
+    bool enableLayerDetails = vm["visualize-optimized-model"].as<bool>();
     bool enableFp16TurboMode = vm["fp16-turbo-mode"].as<bool>();
     bool quantizeInput = vm["quantize-input"].as<bool>();
     bool printIntermediate = vm["print-intermediate-layers"].as<bool>();
@@ -176,7 +179,8 @@ int main(int argc, const char* argv[])
             {
                 testCase.values.insert(testCase.values.begin(), executableName);
                 results.push_back(std::async(std::launch::async, RunCsvTest, std::cref(testCase), std::cref(runtime),
-                                             enableProfiling, enableFp16TurboMode, thresholdTime, printIntermediate));
+                                             enableProfiling, enableFp16TurboMode, thresholdTime, printIntermediate,
+                                             enableLayerDetails));
             }
 
             // Check results
@@ -195,7 +199,8 @@ int main(int argc, const char* argv[])
             {
                 testCase.values.insert(testCase.values.begin(), executableName);
                 if (RunCsvTest(testCase, runtime, enableProfiling,
-                               enableFp16TurboMode, thresholdTime, printIntermediate) != EXIT_SUCCESS)
+                               enableFp16TurboMode, thresholdTime, printIntermediate,
+                               enableLayerDetails) != EXIT_SUCCESS)
                 {
                     return EXIT_FAILURE;
                 }
@@ -231,6 +236,6 @@ int main(int argc, const char* argv[])
         return RunTest(modelFormat, inputTensorShapes, computeDevices, dynamicBackendsPath, modelPath, inputNames,
                        inputTensorDataFilePaths, inputTypes, quantizeInput, outputTypes, outputNames,
                        outputTensorFiles, enableProfiling, enableFp16TurboMode, thresholdTime, printIntermediate,
-                       subgraphId);
+                       subgraphId, enableLayerDetails);
     }
 }
index 1813600..635eaf3 100644 (file)
@@ -297,6 +297,7 @@ int MainImpl(const char* modelPath,
              const double& thresholdTime,
              bool printIntermediate,
              const size_t subgraphId,
+             bool enableLayerDetails = false,
              const std::shared_ptr<armnn::IRuntime>& runtime = nullptr)
 {
     using TContainer = boost::variant<std::vector<float>, std::vector<int>, std::vector<unsigned char>>;
@@ -312,6 +313,7 @@ int MainImpl(const char* modelPath,
         params.m_ComputeDevices = computeDevices;
         params.m_DynamicBackendsPath = dynamicBackendsPath;
         params.m_PrintIntermediateLayers = printIntermediate;
+        params.m_VisualizePostOptimizationModel = enableLayerDetails;
 
         for(const std::string& inputName: inputNames)
         {
@@ -456,6 +458,7 @@ int RunTest(const std::string& format,
             const double& thresholdTime,
             bool printIntermediate,
             const size_t subgraphId,
+            bool enableLayerDetails = false,
             const std::shared_ptr<armnn::IRuntime>& runtime = nullptr)
 {
     std::string modelFormat = boost::trim_copy(format);
@@ -568,7 +571,7 @@ int RunTest(const std::string& format,
         dynamicBackendsPath, inputNamesVector, inputTensorShapes,
         inputTensorDataFilePathsVector, inputTypesVector, quantizeInput,
         outputTypesVector, outputNamesVector, outputTensorFilesVector, enableProfiling,
-        enableFp16TurboMode, thresholdTime, printIntermediate, subgraphId, runtime);
+        enableFp16TurboMode, thresholdTime, printIntermediate, subgraphId, enableLayerDetails, runtime);
 #else
     BOOST_LOG_TRIVIAL(fatal) << "Not built with serialization support.";
     return EXIT_FAILURE;
@@ -584,7 +587,8 @@ int RunTest(const std::string& format,
                                                                quantizeInput, outputTypesVector, outputNamesVector,
                                                                outputTensorFilesVector, enableProfiling,
                                                                enableFp16TurboMode, thresholdTime,
-                                                               printIntermediate, subgraphId, runtime);
+                                                               printIntermediate, subgraphId, enableLayerDetails,
+                                                               runtime);
 #else
         BOOST_LOG_TRIVIAL(fatal) << "Not built with Caffe parser support.";
         return EXIT_FAILURE;
@@ -598,9 +602,9 @@ int RunTest(const std::string& format,
                                                          inputNamesVector, inputTensorShapes,
                                                          inputTensorDataFilePathsVector, inputTypesVector,
                                                          quantizeInput, outputTypesVector, outputNamesVector,
-                                                         outputTensorFilesVector, enableProfiling,
-                                                         enableFp16TurboMode, thresholdTime,
-                                                         printIntermediate, subgraphId, runtime);
+                                                         outputTensorFilesVector, enableProfiling, enableFp16TurboMode,
+                                                         thresholdTime,printIntermediate, subgraphId,
+                                                         enableLayerDetails, runtime);
 #else
     BOOST_LOG_TRIVIAL(fatal) << "Not built with Onnx parser support.";
     return EXIT_FAILURE;
@@ -614,9 +618,9 @@ int RunTest(const std::string& format,
                                                          inputNamesVector, inputTensorShapes,
                                                          inputTensorDataFilePathsVector, inputTypesVector,
                                                          quantizeInput, outputTypesVector, outputNamesVector,
-                                                         outputTensorFilesVector, enableProfiling,
-                                                         enableFp16TurboMode, thresholdTime,
-                                                         printIntermediate, subgraphId, runtime);
+                                                         outputTensorFilesVector, enableProfiling, enableFp16TurboMode,
+                                                         thresholdTime,printIntermediate, subgraphId,
+                                                         enableLayerDetails, runtime);
 #else
         BOOST_LOG_TRIVIAL(fatal) << "Not built with Tensorflow parser support.";
         return EXIT_FAILURE;
@@ -637,8 +641,8 @@ int RunTest(const std::string& format,
                                                                  inputTensorDataFilePathsVector, inputTypesVector,
                                                                  quantizeInput, outputTypesVector, outputNamesVector,
                                                                  outputTensorFilesVector, enableProfiling,
-                                                                 enableFp16TurboMode, thresholdTime,
-                                                                 printIntermediate, subgraphId, runtime);
+                                                                 enableFp16TurboMode, thresholdTime, printIntermediate,
+                                                                 subgraphId, enableLayerDetails, runtime);
 #else
         BOOST_LOG_TRIVIAL(fatal) << "Unknown model format: '" << modelFormat <<
             "'. Please include 'caffe', 'tensorflow', 'tflite' or 'onnx'";
@@ -655,7 +659,7 @@ int RunTest(const std::string& format,
 
 int RunCsvTest(const armnnUtils::CsvRow &csvRow, const std::shared_ptr<armnn::IRuntime>& runtime,
                const bool enableProfiling, const bool enableFp16TurboMode, const double& thresholdTime,
-               const bool printIntermediate)
+               const bool printIntermediate, bool enableLayerDetails = false)
 {
     std::string modelFormat;
     std::string modelPath;
@@ -767,5 +771,6 @@ int RunCsvTest(const armnnUtils::CsvRow &csvRow, const std::shared_ptr<armnn::IR
 
     return RunTest(modelFormat, inputTensorShapes, computeDevices, dynamicBackendsPath, modelPath, inputNames,
                    inputTensorDataFilePaths, inputTypes, quantizeInput, outputTypes, outputNames, outputTensorFiles,
-                   enableProfiling, enableFp16TurboMode, thresholdTime, printIntermediate, subgraphId);
+                   enableProfiling, enableFp16TurboMode, thresholdTime, printIntermediate, subgraphId,
+                   enableLayerDetails);
 }