IVGCVSW-2564 Add support for multiple input and output bindings in InferenceModel
authorAron Virginas-Tar <Aron.Virginas-Tar@arm.com>
Thu, 24 Jan 2019 17:05:36 +0000 (17:05 +0000)
committerAron Virginas-Tar <Aron.Virginas-Tar@arm.com>
Wed, 30 Jan 2019 11:25:56 +0000 (11:25 +0000)
Change-Id: I64d724367d42dca4b768b6c6e42acda714985950

tests/CaffeYolo-Armnn/CaffeYolo-Armnn.cpp
tests/ExecuteNetwork/ExecuteNetwork.cpp
tests/InferenceModel.hpp
tests/InferenceTest.hpp
tests/InferenceTest.inl
tests/MultipleNetworksCifar10/MultipleNetworksCifar10.cpp
tests/YoloInferenceTest.hpp

index 98db023..b752c7c 100644 (file)
@@ -31,9 +31,9 @@ int main(int argc, char* argv[])
 
                         typename YoloInferenceModel::Params modelParams;
                         modelParams.m_ModelPath = modelOptions.m_ModelDir + "yolov1_tiny_voc2007_model.caffemodel";
-                        modelParams.m_InputBinding = "data";
-                        modelParams.m_OutputBinding = "fc12";
-                        modelParams.m_InputTensorShape = &inputTensorShape;
+                        modelParams.m_InputBindings = { "data" };
+                        modelParams.m_OutputBindings = { "fc12" };
+                        modelParams.m_InputShapes = { inputTensorShape };
                         modelParams.m_IsModelBinary = true;
                         modelParams.m_ComputeDevice = modelOptions.m_ComputeDevice;
                         modelParams.m_VisualizePostOptimizationModel = modelOptions.m_VisualizePostOptimizationModel;
index dd76975..d783a0e 100644 (file)
@@ -137,13 +137,12 @@ std::vector<unsigned int> ParseArray(std::istream& stream)
         [](const std::string& s) { return boost::numeric_cast<unsigned int>(std::stoi(s)); });
 }
 
-void PrintArray(const std::vector<float>& v)
+void PrintOutputData(const std::string& outputLayerName, const std::vector<float>& data)
 {
-    for (size_t i = 0; i < v.size(); i++)
-    {
-        printf("%f ", v[i]);
-    }
-    printf("\n");
+    std::cout << outputLayerName << ": ";
+    std::copy(data.begin(), data.end(),
+              std::ostream_iterator<float>(std::cout, " "));
+    std::cout << std::endl;
 }
 
 void RemoveDuplicateDevices(std::vector<armnn::BackendId>& computeDevices)
@@ -179,8 +178,10 @@ int MainImpl(const char* modelPath,
              const size_t subgraphId,
              const std::shared_ptr<armnn::IRuntime>& runtime = nullptr)
 {
+    using TContainer = std::vector<TDataType>;
+
     // Loads input tensor.
-    std::vector<TDataType> input;
+    TContainer inputDataContainer;
     {
         std::ifstream inputTensorFile(inputTensorDataFilePath);
         if (!inputTensorFile.good())
@@ -188,7 +189,7 @@ int MainImpl(const char* modelPath,
             BOOST_LOG_TRIVIAL(fatal) << "Failed to load input tensor data file from " << inputTensorDataFilePath;
             return EXIT_FAILURE;
         }
-        input = ParseArray<TDataType>(inputTensorFile);
+        inputDataContainer = ParseArray<TDataType>(inputTensorFile);
     }
 
     try
@@ -198,19 +199,23 @@ int MainImpl(const char* modelPath,
         params.m_ModelPath = modelPath;
         params.m_IsModelBinary = isModelBinary;
         params.m_ComputeDevice = computeDevice;
-        params.m_InputBinding = inputName;
-        params.m_InputTensorShape = inputTensorShape;
-        params.m_OutputBinding = outputName;
+        params.m_InputBindings = { inputName };
+        params.m_InputShapes = { *inputTensorShape };
+        params.m_OutputBindings = { outputName };
         params.m_EnableProfiling = enableProfiling;
         params.m_SubgraphId = subgraphId;
         InferenceModel<TParser, TDataType> model(params, runtime);
 
-        // Executes the model.
-        std::vector<TDataType> output(model.GetOutputSize());
-        model.Run(input, output);
+        // Executes the model
+        const size_t numOutputs = params.m_OutputBindings.size();
+        std::vector<TContainer> outputDataContainers(numOutputs);
+        model.Run({ inputDataContainer }, outputDataContainers);
 
-        // Prints the output tensor.
-        PrintArray(output);
+        // Print output tensors
+        for (size_t i = 0; i < numOutputs; i++)
+        {
+            PrintOutputData(params.m_OutputBindings[i], outputDataContainers[i]);
+        }
     }
     catch (armnn::Exception const& e)
     {
index 5fefd05..1c89238 100644 (file)
@@ -27,6 +27,7 @@
 #include <fstream>
 #include <map>
 #include <string>
+#include <vector>
 #include <type_traits>
 
 namespace
@@ -73,20 +74,19 @@ using QuantizationParams = std::pair<float,int32_t>;
 
 struct Params
 {
-    std::string m_ModelPath;
-    std::string m_InputBinding;
-    std::string m_OutputBinding;
-    const armnn::TensorShape* m_InputTensorShape;
-    std::vector<armnn::BackendId> m_ComputeDevice;
-    bool m_EnableProfiling;
-    size_t m_SubgraphId;
-    bool m_IsModelBinary;
-    bool m_VisualizePostOptimizationModel;
-    bool m_EnableFp16TurboMode;
+    std::string                     m_ModelPath;
+    std::vector<std::string>        m_InputBindings;
+    std::vector<armnn::TensorShape> m_InputShapes;
+    std::vector<std::string>        m_OutputBindings;
+    std::vector<armnn::BackendId>   m_ComputeDevice;
+    bool                            m_EnableProfiling;
+    size_t                          m_SubgraphId;
+    bool                            m_IsModelBinary;
+    bool                            m_VisualizePostOptimizationModel;
+    bool                            m_EnableFp16TurboMode;
 
     Params()
-        : m_InputTensorShape(nullptr)
-        , m_ComputeDevice{armnn::Compute::CpuRef}
+        : m_ComputeDevice{armnn::Compute::CpuRef}
         , m_EnableProfiling(false)
         , m_SubgraphId(0)
         , m_IsModelBinary(true)
@@ -105,33 +105,54 @@ public:
     using BindingPointInfo = InferenceModelInternal::BindingPointInfo;
 
     static armnn::INetworkPtr Create(const Params& params,
-                                     BindingPointInfo& inputBindings,
-                                     BindingPointInfo& outputBindings)
+                                     std::vector<BindingPointInfo>& inputBindings,
+                                     std::vector<BindingPointInfo>& outputBindings)
     {
-      const std::string& modelPath = params.m_ModelPath;
-
-      // Create a network from a file on disk
-      auto parser(IParser::Create());
-
-      std::map<std::string, armnn::TensorShape> inputShapes;
-      if (params.m_InputTensorShape)
-      {
-          inputShapes[params.m_InputBinding] = *params.m_InputTensorShape;
-      }
-      std::vector<std::string> requestedOutputs{ params.m_OutputBinding };
-      armnn::INetworkPtr network{nullptr, [](armnn::INetwork *){}};
-
-      {
-          ARMNN_SCOPED_HEAP_PROFILING("Parsing");
-          // Handle text and binary input differently by calling the corresponding parser function
-          network = (params.m_IsModelBinary ?
-              parser->CreateNetworkFromBinaryFile(modelPath.c_str(), inputShapes, requestedOutputs) :
-              parser->CreateNetworkFromTextFile(modelPath.c_str(), inputShapes, requestedOutputs));
-      }
-
-      inputBindings  = parser->GetNetworkInputBindingInfo(params.m_InputBinding);
-      outputBindings = parser->GetNetworkOutputBindingInfo(params.m_OutputBinding);
-      return network;
+        const std::string& modelPath = params.m_ModelPath;
+
+        // Create a network from a file on disk
+        auto parser(IParser::Create());
+
+        std::map<std::string, armnn::TensorShape> inputShapes;
+        if (!params.m_InputShapes.empty())
+        {
+            const size_t numInputShapes   = params.m_InputShapes.size();
+            const size_t numInputBindings = params.m_InputBindings.size();
+            if (numInputShapes < numInputBindings)
+            {
+                throw armnn::Exception(boost::str(boost::format(
+                    "Not every input has its tensor shape specified: expected=%1%, got=%2%")
+                    % numInputBindings % numInputShapes));
+            }
+
+            for (size_t i = 0; i < numInputShapes; i++)
+            {
+                inputShapes[params.m_InputBindings[i]] = params.m_InputShapes[i];
+            }
+        }
+
+        std::vector<std::string> requestedOutputs = params.m_OutputBindings;
+        armnn::INetworkPtr network{nullptr, [](armnn::INetwork *){}};
+
+        {
+            ARMNN_SCOPED_HEAP_PROFILING("Parsing");
+            // Handle text and binary input differently by calling the corresponding parser function
+            network = (params.m_IsModelBinary ?
+                parser->CreateNetworkFromBinaryFile(modelPath.c_str(), inputShapes, requestedOutputs) :
+                parser->CreateNetworkFromTextFile(modelPath.c_str(), inputShapes, requestedOutputs));
+        }
+
+        for (const std::string& inputLayerName : params.m_InputBindings)
+        {
+            inputBindings.push_back(parser->GetNetworkInputBindingInfo(inputLayerName));
+        }
+
+        for (const std::string& outputLayerName : params.m_OutputBindings)
+        {
+            outputBindings.push_back(parser->GetNetworkOutputBindingInfo(outputLayerName));
+        }
+
+        return network;
     }
 };
 
@@ -145,24 +166,36 @@ public:
     using BindingPointInfo = InferenceModelInternal::BindingPointInfo;
 
     static armnn::INetworkPtr Create(const Params& params,
-                                     BindingPointInfo& inputBindings,
-                                     BindingPointInfo& outputBindings)
+                                     std::vector<BindingPointInfo>& inputBindings,
+                                     std::vector<BindingPointInfo>& outputBindings)
     {
-      const std::string& modelPath = params.m_ModelPath;
+        const std::string& modelPath = params.m_ModelPath;
 
-      // Create a network from a file on disk
-      auto parser(IParser::Create());
+        // Create a network from a file on disk
+        auto parser(IParser::Create());
 
-      armnn::INetworkPtr network{nullptr, [](armnn::INetwork *){}};
+        armnn::INetworkPtr network{nullptr, [](armnn::INetwork *){}};
 
-      {
-          ARMNN_SCOPED_HEAP_PROFILING("Parsing");
-          network = parser->CreateNetworkFromBinaryFile(modelPath.c_str());
-      }
+        {
+            ARMNN_SCOPED_HEAP_PROFILING("Parsing");
+            network = parser->CreateNetworkFromBinaryFile(modelPath.c_str());
+        }
+
+        for (const std::string& inputLayerName : params.m_InputBindings)
+        {
+            BindingPointInfo inputBinding =
+                parser->GetNetworkInputBindingInfo(params.m_SubgraphId, inputLayerName);
+            inputBindings.push_back(inputBinding);
+        }
 
-      inputBindings  = parser->GetNetworkInputBindingInfo(params.m_SubgraphId, params.m_InputBinding);
-      outputBindings = parser->GetNetworkOutputBindingInfo(params.m_SubgraphId, params.m_OutputBinding);
-      return network;
+        for (const std::string& outputLayerName : params.m_OutputBindings)
+        {
+            BindingPointInfo outputBinding =
+                parser->GetNetworkOutputBindingInfo(params.m_SubgraphId, outputLayerName);
+            outputBindings.push_back(outputBinding);
+        }
+
+        return network;
     }
 };
 #endif
@@ -177,67 +210,111 @@ public:
     using BindingPointInfo = InferenceModelInternal::BindingPointInfo;
 
     static armnn::INetworkPtr Create(const Params& params,
-                                     BindingPointInfo& inputBindings,
-                                     BindingPointInfo& outputBindings)
+                                     std::vector<BindingPointInfo>& inputBindings,
+                                     std::vector<BindingPointInfo>& outputBindings)
     {
-      const std::string& modelPath = params.m_ModelPath;
+        const std::string& modelPath = params.m_ModelPath;
+
+        // Create a network from a file on disk
+        auto parser(IParser::Create());
+
+        armnn::INetworkPtr network{nullptr, [](armnn::INetwork *){}};
 
-      // Create a network from a file on disk
-      auto parser(IParser::Create());
+        {
+            ARMNN_SCOPED_HEAP_PROFILING("Parsing");
+            network = (params.m_IsModelBinary ?
+                parser->CreateNetworkFromBinaryFile(modelPath.c_str()) :
+                parser->CreateNetworkFromTextFile(modelPath.c_str()));
+        }
 
-      armnn::INetworkPtr network{nullptr, [](armnn::INetwork *){}};
+        for (const std::string& inputLayerName : params.m_InputBindings)
+        {
+            BindingPointInfo inputBinding = parser->GetNetworkInputBindingInfo(inputLayerName);
+            inputBindings.push_back(inputBinding);
+        }
 
-      {
-          ARMNN_SCOPED_HEAP_PROFILING("Parsing");
-          network = (params.m_IsModelBinary ?
-              parser->CreateNetworkFromBinaryFile(modelPath.c_str()) :
-              parser->CreateNetworkFromTextFile(modelPath.c_str()));
-      }
+        for (const std::string& outputLayerName : params.m_OutputBindings)
+        {
+            BindingPointInfo outputBinding = parser->GetNetworkOutputBindingInfo(outputLayerName);
+            outputBindings.push_back(outputBinding);
+        }
 
-      inputBindings  = parser->GetNetworkInputBindingInfo(params.m_InputBinding);
-      outputBindings = parser->GetNetworkOutputBindingInfo(params.m_OutputBinding);
-      return network;
+        return network;
     }
 };
 #endif
 
 template<typename TContainer>
-inline armnn::InputTensors MakeInputTensors(const InferenceModelInternal::BindingPointInfo& input,
-    const TContainer& inputTensorData)
+inline armnn::InputTensors MakeInputTensors(
+    const std::vector<InferenceModelInternal::BindingPointInfo>& inputBindings,
+    const std::vector<TContainer>& inputDataContainers)
 {
-    if (inputTensorData.size() != input.second.GetNumElements())
+    armnn::InputTensors inputTensors;
+
+    const size_t numInputs = inputBindings.size();
+    if (numInputs != inputDataContainers.size())
     {
-        try
-        {
-            throw armnn::Exception(boost::str(boost::format("Input tensor has incorrect size. Expected %1% elements "
-                "but got %2%.") % input.second.GetNumElements() % inputTensorData.size()));
-        } catch (const boost::exception& e)
+        throw armnn::Exception(boost::str(boost::format("Number of inputs does not match number of "
+            "tensor data containers: %1% != %2%") % numInputs % inputDataContainers.size()));
+    }
+
+    for (size_t i = 0; i < numInputs; i++)
+    {
+        const InferenceModelInternal::BindingPointInfo& inputBinding = inputBindings[i];
+        const TContainer& inputData = inputDataContainers[i];
+
+        if (inputData.size() != inputBinding.second.GetNumElements())
         {
-            // Coverity fix: it should not be possible to get here but boost::str and boost::format can both
-            // throw uncaught exceptions, convert them to armnn exceptions and rethrow.
-            throw armnn::Exception(diagnostic_information(e));
+            throw armnn::Exception("Input tensor has incorrect size");
         }
+
+        armnn::ConstTensor inputTensor(inputBinding.second, inputData.data());
+        inputTensors.push_back(std::make_pair(inputBinding.first, inputTensor));
     }
-    return { { input.first, armnn::ConstTensor(input.second, inputTensorData.data()) } };
+
+    return inputTensors;
 }
 
 template<typename TContainer>
-inline armnn::OutputTensors MakeOutputTensors(const InferenceModelInternal::BindingPointInfo& output,
-    TContainer& outputTensorData)
+inline armnn::OutputTensors MakeOutputTensors(
+    const std::vector<InferenceModelInternal::BindingPointInfo>& outputBindings,
+    std::vector<TContainer>& outputDataContainers)
 {
-    if (outputTensorData.size() != output.second.GetNumElements())
+    armnn::OutputTensors outputTensors;
+
+    const size_t numOutputs = outputBindings.size();
+    if (numOutputs != outputDataContainers.size())
     {
-        throw armnn::Exception("Output tensor has incorrect size");
+        throw armnn::Exception(boost::str(boost::format("Number of outputs does not match number of "
+            "tensor data containers: %1% != %2%") % numOutputs % outputDataContainers.size()));
+    }
+
+    for (size_t i = 0; i < numOutputs; i++)
+    {
+        const InferenceModelInternal::BindingPointInfo& outputBinding = outputBindings[i];
+        TContainer& outputData = outputDataContainers[i];
+
+        if (outputData.size() != outputBinding.second.GetNumElements())
+        {
+            throw armnn::Exception("Output tensor has incorrect size");
+        }
+
+        armnn::Tensor outputTensor(outputBinding.second, outputData.data());
+        outputTensors.push_back(std::make_pair(outputBinding.first, outputTensor));
     }
-    return { { output.first, armnn::Tensor(output.second, outputTensorData.data()) } };
+
+    return outputTensors;
 }
 
 template <typename IParser, typename TDataType>
 class InferenceModel
 {
 public:
-    using DataType = TDataType;
-    using Params = InferenceModelInternal::Params;
+    using DataType           = TDataType;
+    using Params             = InferenceModelInternal::Params;
+    using BindingPointInfo   = InferenceModelInternal::BindingPointInfo;
+    using QuantizationParams = InferenceModelInternal::QuantizationParams;
+    using TContainer         = std::vector<TDataType>;
 
     struct CommandLineOptions
     {
@@ -290,8 +367,8 @@ public:
             throw armnn::Exception("Some backend IDs are invalid: " + invalidBackends);
         }
 
-        armnn::INetworkPtr network = CreateNetworkImpl<IParser>::Create(params, m_InputBindingInfo,
-           m_OutputBindingInfo);
+        armnn::INetworkPtr network =
+            CreateNetworkImpl<IParser>::Create(params, m_InputBindings, m_OutputBindings);
 
         armnn::IOptimizedNetworkPtr optNet{nullptr, [](armnn::IOptimizedNetwork *){}};
         {
@@ -327,14 +404,41 @@ public:
         }
     }
 
-    unsigned int GetOutputSize() const
+    void CheckInputIndexIsValid(unsigned int inputIndex) const
+    {
+        if (m_InputBindings.size() < inputIndex + 1)
+        {
+            throw armnn::Exception(boost::str(boost::format("Input index out of range: %1%") % inputIndex));
+        }
+    }
+
+    void CheckOutputIndexIsValid(unsigned int outputIndex) const
+    {
+        if (m_OutputBindings.size() < outputIndex + 1)
+        {
+            throw armnn::Exception(boost::str(boost::format("Output index out of range: %1%") % outputIndex));
+        }
+    }
+
+    unsigned int GetOutputSize(unsigned int outputIndex = 0u) const
     {
-        return m_OutputBindingInfo.second.GetNumElements();
+        CheckOutputIndexIsValid(outputIndex);
+        return m_OutputBindings[outputIndex].second.GetNumElements();
     }
 
-    void Run(const std::vector<TDataType>& input, std::vector<TDataType>& output)
+    void Run(const std::vector<TContainer>& inputContainers, std::vector<TContainer>& outputContainers)
     {
-        BOOST_ASSERT(output.size() == GetOutputSize());
+        for (unsigned int i = 0; i < outputContainers.size(); i++)
+        {
+            const unsigned int expectedOutputDataSize = GetOutputSize(i);
+            const unsigned int actualOutputDataSize   = boost::numeric_cast<unsigned int>(outputContainers[i].size());
+            if (actualOutputDataSize < expectedOutputDataSize)
+            {
+                unsigned int outputIndex = boost::numeric_cast<unsigned int>(i);
+                throw armnn::Exception(boost::str(boost::format("Not enough data for output #%1%: expected "
+                    "%2% elements, got %3%") % outputIndex % expectedOutputDataSize % actualOutputDataSize));
+            }
+        }
 
         std::shared_ptr<armnn::IProfiler> profiler = m_Runtime->GetProfiler(m_NetworkIdentifier);
         if (profiler)
@@ -343,8 +447,8 @@ public:
         }
 
         armnn::Status ret = m_Runtime->EnqueueWorkload(m_NetworkIdentifier,
-                                                       MakeInputTensors(input),
-                                                       MakeOutputTensors(output));
+                                                       MakeInputTensors(inputContainers),
+                                                       MakeOutputTensors(outputContainers));
 
         // if profiling is enabled print out the results
         if (profiler && profiler->IsProfilingEnabled())
@@ -358,39 +462,62 @@ public:
         }
     }
 
-    const InferenceModelInternal::BindingPointInfo & GetInputBindingInfo() const
+    const BindingPointInfo& GetInputBindingInfo(unsigned int inputIndex = 0u) const
+    {
+        CheckInputIndexIsValid(inputIndex);
+        return m_InputBindings[inputIndex];
+    }
+
+    const std::vector<BindingPointInfo>& GetInputBindingInfos() const
+    {
+        return m_InputBindings;
+    }
+
+    const BindingPointInfo& GetOutputBindingInfo(unsigned int outputIndex = 0u) const
+    {
+        CheckOutputIndexIsValid(outputIndex);
+        return m_OutputBindings[outputIndex];
+    }
+
+    const std::vector<BindingPointInfo>& GetOutputBindingInfos() const
     {
-        return m_InputBindingInfo;
+        return m_OutputBindings;
     }
 
-    const InferenceModelInternal::BindingPointInfo & GetOutputBindingInfo() const
+    QuantizationParams GetQuantizationParams(unsigned int outputIndex = 0u) const
     {
-        return m_OutputBindingInfo;
+        CheckOutputIndexIsValid(outputIndex);
+        return std::make_pair(m_OutputBindings[outputIndex].second.GetQuantizationScale(),
+                              m_OutputBindings[outputIndex].second.GetQuantizationOffset());
     }
 
-    InferenceModelInternal::QuantizationParams GetQuantizationParams() const
+    std::vector<QuantizationParams> GetAllQuantizationParams() const
     {
-        return std::make_pair(m_OutputBindingInfo.second.GetQuantizationScale(),
-                              m_OutputBindingInfo.second.GetQuantizationOffset());
+        std::vector<QuantizationParams> quantizationParams;
+        for (unsigned int i = 0u; i < m_OutputBindings.size(); i++)
+        {
+            quantizationParams.push_back(GetQuantizationParams(i));
+        }
+        return quantizationParams;
     }
 
 private:
     armnn::NetworkId m_NetworkIdentifier;
     std::shared_ptr<armnn::IRuntime> m_Runtime;
 
-    InferenceModelInternal::BindingPointInfo m_InputBindingInfo;
-    InferenceModelInternal::BindingPointInfo m_OutputBindingInfo;
+    std::vector<InferenceModelInternal::BindingPointInfo> m_InputBindings;
+    std::vector<InferenceModelInternal::BindingPointInfo> m_OutputBindings;
     bool m_EnableProfiling;
 
     template<typename TContainer>
-    armnn::InputTensors MakeInputTensors(const TContainer& inputTensorData)
+    armnn::InputTensors MakeInputTensors(const std::vector<TContainer>& inputDataContainers)
     {
-        return ::MakeInputTensors(m_InputBindingInfo, inputTensorData);
+        return ::MakeInputTensors(m_InputBindings, inputDataContainers);
     }
 
     template<typename TContainer>
-    armnn::OutputTensors MakeOutputTensors(TContainer& outputTensorData)
+    armnn::OutputTensors MakeOutputTensors(std::vector<TContainer>& outputDataContainers)
     {
-        return ::MakeOutputTensors(m_OutputBindingInfo, outputTensorData);
+        return ::MakeOutputTensors(m_OutputBindings, outputDataContainers);
     }
-};
+};
\ No newline at end of file
index 3ea7096..3c22df9 100644 (file)
@@ -100,31 +100,40 @@ template <typename TModel>
 class InferenceModelTestCase : public IInferenceTestCase
 {
 public:
+    using TContainer = std::vector<typename TModel::DataType>;
+
     InferenceModelTestCase(TModel& model,
-        unsigned int testCaseId,
-        std::vector<typename TModel::DataType> modelInput,
-        unsigned int outputSize)
+                           unsigned int testCaseId,
+                           const std::vector<TContainer>& inputs,
+                           const std::vector<unsigned int>& outputSizes)
         : m_Model(model)
         , m_TestCaseId(testCaseId)
-        , m_Input(std::move(modelInput))
+        , m_Inputs(std::move(inputs))
     {
-        m_Output.resize(outputSize);
+        // Initialize output vector
+        const size_t numOutputs = outputSizes.size();
+        m_Outputs.resize(numOutputs);
+
+        for (size_t i = 0; i < numOutputs; i++)
+        {
+            m_Outputs[i].resize(outputSizes[i]);
+        }
     }
 
     virtual void Run() override
     {
-        m_Model.Run(m_Input, m_Output);
+        m_Model.Run(m_Inputs, m_Outputs);
     }
 
 protected:
     unsigned int GetTestCaseId() const { return m_TestCaseId; }
-    const std::vector<typename TModel::DataType>& GetOutput() const { return m_Output; }
+    const std::vector<TContainer>& GetOutputs() const { return m_Outputs; }
 
 private:
-    TModel& m_Model;
-    unsigned int m_TestCaseId;
-    std::vector<typename TModel::DataType> m_Input;
-    std::vector<typename TModel::DataType> m_Output;
+    TModel&                 m_Model;
+    unsigned int            m_TestCaseId;
+    std::vector<TContainer> m_Inputs;
+    std::vector<TContainer> m_Outputs;
 };
 
 template <typename TDataType>
index 7ce017c..4dde354 100644 (file)
@@ -1,4 +1,4 @@
-//
+//
 // Copyright © 2017 Arm Ltd. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
@@ -39,7 +39,7 @@ ClassifierTestCase<TTestCaseDatabase, TModel>::ClassifierTestCase(
     unsigned int testCaseId,
     unsigned int label,
     std::vector<typename TModel::DataType> modelInput)
-    : InferenceModelTestCase<TModel>(model, testCaseId, std::move(modelInput), model.GetOutputSize())
+    : InferenceModelTestCase<TModel>(model, testCaseId, { std::move(modelInput) }, { model.GetOutputSize() })
     , m_Label(label)
     , m_QuantizationParams(model.GetQuantizationParams())
     , m_NumInferencesRef(numInferencesRef)
@@ -52,7 +52,7 @@ ClassifierTestCase<TTestCaseDatabase, TModel>::ClassifierTestCase(
 template <typename TTestCaseDatabase, typename TModel>
 TestCaseResult ClassifierTestCase<TTestCaseDatabase, TModel>::ProcessResult(const InferenceTestOptions& params)
 {
-    auto& output = this->GetOutput();
+    auto& output = this->GetOutputs()[0];
     const auto testCaseId = this->GetTestCaseId();
 
     std::map<float,int> resultMap;
@@ -309,7 +309,12 @@ int ClassifierInferenceTestMain(int argc,
                                 const std::vector<unsigned int>& defaultTestCaseIds,
                                 TConstructDatabaseCallable constructDatabase,
                                 const armnn::TensorShape* inputTensorShape)
+
 {
+    BOOST_ASSERT(modelFilename);
+    BOOST_ASSERT(inputBindingName);
+    BOOST_ASSERT(outputBindingName);
+
     return InferenceTestMain(argc, argv, defaultTestCaseIds,
         [=]
         ()
@@ -328,9 +333,14 @@ int ClassifierInferenceTestMain(int argc,
 
                     typename InferenceModel::Params modelParams;
                     modelParams.m_ModelPath = modelOptions.m_ModelDir + modelFilename;
-                    modelParams.m_InputBinding = inputBindingName;
-                    modelParams.m_OutputBinding = outputBindingName;
-                    modelParams.m_InputTensorShape = inputTensorShape;
+                    modelParams.m_InputBindings  = { inputBindingName };
+                    modelParams.m_OutputBindings = { outputBindingName };
+
+                    if (inputTensorShape)
+                    {
+                        modelParams.m_InputShapes.push_back(*inputTensorShape);
+                    }
+
                     modelParams.m_IsModelBinary = isModelBinary;
                     modelParams.m_ComputeDevice = modelOptions.m_ComputeDevice;
                     modelParams.m_VisualizePostOptimizationModel = modelOptions.m_VisualizePostOptimizationModel;
index f31e0c9..44b8890 100644 (file)
@@ -173,14 +173,23 @@ int main(int argc, char* argv[])
             // Loads test case data (including image data).
             std::unique_ptr<Cifar10Database::TTestCaseData> testCaseData = cifar10.GetTestCaseData(i);
 
-            // Tests inference.
-            std::vector<std::array<float, 10>> outputs(networksCount);
+            using TInputContainer  = std::vector<float>;
+            using TOutputContainer = std::array<float, 10>;
 
+            // Tests inference.
+            std::vector<TOutputContainer> outputs(networksCount);
             for (unsigned int k = 0; k < networksCount; ++k)
             {
+                using BindingPointInfo = InferenceModelInternal::BindingPointInfo;
+                std::vector<BindingPointInfo> inputBindings  = { networks[k].m_InputBindingInfo  };
+                std::vector<BindingPointInfo> outputBindings = { networks[k].m_OutputBindingInfo };
+
+                std::vector<TInputContainer> inputData   = { testCaseData->m_InputImage };
+                std::vector<TOutputContainer> outputData = { outputs[k] };
+
                 status = runtime->EnqueueWorkload(networks[k].m_Network,
-                    MakeInputTensors(networks[k].m_InputBindingInfo, testCaseData->m_InputImage),
-                    MakeOutputTensors(networks[k].m_OutputBindingInfo, outputs[k]));
+                    MakeInputTensors(inputBindings, inputData),
+                    MakeOutputTensors(outputBindings, outputData));
                 if (status == armnn::Status::Failure)
                 {
                     BOOST_LOG_TRIVIAL(fatal) << "armnn::IRuntime: Failed to enqueue workload";
index 5e2a482..98a9d2f 100644 (file)
@@ -24,7 +24,7 @@ public:
     YoloTestCase(Model& model,
         unsigned int testCaseId,
         YoloTestCaseData& testCaseData)
-     : InferenceModelTestCase<Model>(model, testCaseId, std::move(testCaseData.m_InputImage), YoloOutputSize)
+     : InferenceModelTestCase<Model>(model, testCaseId, { std::move(testCaseData.m_InputImage) }, { YoloOutputSize })
      , m_FloatComparer(boost::math::fpc::percent_tolerance(1.0f))
      , m_TopObjectDetections(std::move(testCaseData.m_TopObjectDetections))
     {
@@ -34,7 +34,7 @@ public:
     {
         using Boost3dArray = boost::multi_array<float, 3>;
 
-        const std::vector<float>& output = this->GetOutput();
+        const std::vector<float>& output = this->GetOutputs()[0];
         BOOST_ASSERT(output.size() == YoloOutputSize);
 
         constexpr Boost3dArray::index gridSize = 7;
@@ -178,7 +178,7 @@ class YoloTestCaseProvider : public IInferenceTestCaseProvider
 {
 public:
     template <typename TConstructModelCallable>
-    YoloTestCaseProvider(TConstructModelCallable constructModel)
+    explicit YoloTestCaseProvider(TConstructModelCallable constructModel)
         : m_ConstructModel(constructModel)
     {
     }