// SPDX-License-Identifier: MIT
//
+#include "ImageTensorGenerator.hpp"
#include "../InferenceTestImage.hpp"
+#include <armnn/TypesUtils.hpp>
#include <boost/filesystem.hpp>
#include <boost/filesystem/operations.hpp>
#include <boost/filesystem/path.hpp>
#include <boost/log/trivial.hpp>
#include <boost/program_options.hpp>
+#include <boost/variant.hpp>
#include <algorithm>
#include <fstream>
return false;
}
- std::vector<std::string> supportedLayouts = {
- "NHWC",
- "NCHW"
- };
+ std::vector<std::string> supportedLayouts = { "NHWC", "NCHW" };
auto iterator = std::find(supportedLayouts.begin(), supportedLayouts.end(), layout);
if (iterator == supportedLayouts.end())
("help,h", "Display help messages")
("infile,i", po::value<std::string>(&m_InputFileName)->required(),
"Input image file to generate tensor from")
- ("layout,l", po::value<std::string>(&m_Layout)->default_value("NHWC"),
- "Output data layout, \"NHWC\" or \"NCHW\", default value NHWC")
+ ("model-format,f", po::value<std::string>(&m_ModelFormat)->required(),
+ "Format of the model file, Accepted values (caffe, tensorflow, tflite)")
("outfile,o", po::value<std::string>(&m_OutputFileName)->required(),
- "Output raw tensor file path");
+ "Output raw tensor file path")
+ ("output-type,z", po::value<std::string>(&m_OutputType)->default_value("float"),
+ "The data type of the output tensors."
+ "If unset, defaults to \"float\" for all defined inputs. "
+ "Accepted values (float, int or qasymm8)")
+ ("new-width,w", po::value<std::string>(&m_NewWidth)->default_value("0"),
+ "Resize image to new width. Keep original width if unspecified")
+ ("new-height,h", po::value<std::string>(&m_NewHeight)->default_value("0"),
+ "Resize image to new height. Keep original height if unspecified")
+ ("layout,l", po::value<std::string>(&m_Layout)->default_value("NHWC"),
+ "Output data layout, \"NHWC\" or \"NCHW\", default value NHWC");
}
catch (const std::exception& e)
{
}
std::string GetInputFileName() {return m_InputFileName;}
- std::string GetLayout() {return m_Layout;}
+ armnn::DataLayout GetLayout()
+ {
+ if (m_Layout == "NHWC")
+ {
+ return armnn::DataLayout::NHWC;
+ }
+ else if (m_Layout == "NCHW")
+ {
+ return armnn::DataLayout::NCHW;
+ }
+ else
+ {
+ throw armnn::Exception("Unsupported data layout: " + m_Layout);
+ }
+ }
std::string GetOutputFileName() {return m_OutputFileName;}
+ unsigned int GetNewWidth() {return static_cast<unsigned int>(std::stoi(m_NewWidth));}
+ unsigned int GetNewHeight() {return static_cast<unsigned int>(std::stoi(m_NewHeight));}
+ SupportedFrontend GetModelFormat()
+ {
+ if (m_ModelFormat == "caffe")
+ {
+ return SupportedFrontend::Caffe;
+ }
+ else if (m_ModelFormat == "tensorflow")
+ {
+ return SupportedFrontend::TensorFlow;
+ }
+ else if (m_ModelFormat == "tflite")
+ {
+ return SupportedFrontend::TFLite;
+ }
+ else
+ {
+ throw armnn::Exception("Unsupported model format" + m_ModelFormat);
+ }
+ }
+ armnn::DataType GetOutputType()
+ {
+ if (m_OutputType == "float")
+ {
+ return armnn::DataType::Float32;
+ }
+ else if (m_OutputType == "int")
+ {
+ return armnn::DataType::Signed32;
+ }
+ else if (m_OutputType == "qasymm8")
+ {
+ return armnn::DataType::QuantisedAsymm8;
+ }
+ else
+ {
+ throw armnn::Exception("Unsupported input type" + m_OutputType);
+ }
+ }
private:
std::string m_InputFileName;
std::string m_Layout;
std::string m_OutputFileName;
+ std::string m_NewWidth;
+ std::string m_NewHeight;
+ std::string m_ModelFormat;
+ std::string m_OutputType;
};
} // namespace anonymous
{
return -1;
}
-
const std::string imagePath(cmdline.GetInputFileName());
const std::string outputPath(cmdline.GetOutputFileName());
-
- // generate image tensor
- std::vector<float> imageData;
+ const SupportedFrontend& modelFormat(cmdline.GetModelFormat());
+ const armnn::DataType outputType(cmdline.GetOutputType());
+ const unsigned int newWidth = cmdline.GetNewWidth();
+ const unsigned int newHeight = cmdline.GetNewHeight();
+ const unsigned int batchSize = 1;
+ const armnn::DataLayout outputLayout(cmdline.GetLayout());
+
+ using TContainer = boost::variant<std::vector<float>, std::vector<int>, std::vector<uint8_t>>;
+ std::vector<TContainer> imageDataContainers;
+ const NormalizationParameters& normParams = GetNormalizationParameters(modelFormat, outputType);
try
{
- InferenceTestImage testImage(imagePath.c_str());
- imageData = cmdline.GetLayout() == "NHWC"
- ? GetImageDataAsNormalizedFloats(ImageChannelLayout::Rgb, testImage)
- : GetImageDataInArmNnLayoutAsNormalizedFloats(ImageChannelLayout::Rgb, testImage);
+ switch (outputType)
+ {
+ case armnn::DataType::Signed32:
+ imageDataContainers.push_back(PrepareImageTensor<int>(
+ imagePath, newWidth, newHeight, normParams, batchSize, outputLayout));
+ break;
+ case armnn::DataType::QuantisedAsymm8:
+ imageDataContainers.push_back(PrepareImageTensor<uint8_t>(
+ imagePath, newWidth, newHeight, normParams, batchSize, outputLayout));
+ break;
+ case armnn::DataType::Float32:
+ default:
+ imageDataContainers.push_back(PrepareImageTensor<float>(
+ imagePath, newWidth, newHeight, normParams, batchSize, outputLayout));
+ break;
+ }
}
catch (const InferenceTestImageException& e)
{
imageTensorFile.open(outputPath, std::ofstream::out);
if (imageTensorFile.is_open())
{
- std::copy(imageData.begin(), imageData.end(), std::ostream_iterator<float>(imageTensorFile, " "));
+ boost::apply_visitor([&imageTensorFile](auto&& imageData) { WriteImageTensorImpl(imageData, imageTensorFile); },
+ imageDataContainers[0]);
if (!imageTensorFile)
{
BOOST_LOG_TRIVIAL(fatal) << "Failed to write to output file" << outputPath;
}
return 0;
-}
\ No newline at end of file
+}
--- /dev/null
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "../InferenceTestImage.hpp"
+#include "Permute.hpp"
+#include <armnn/TypesUtils.hpp>
+
+#include <algorithm>
+#include <fstream>
+#include <iterator>
+#include <string>
+
+struct NormalizationParameters
+{
+ float scale{ 1.0 };
+ std::array<float, 3> mean{ 0.0, 0.0, 0.0 };
+ std::array<float, 3> stddev{ 1.0, 1.0, 1.0 };
+};
+
+enum class SupportedFrontend
+{
+ Caffe = 0,
+ TensorFlow = 1,
+ TFLite = 2,
+};
+
+// Get normalization parameters.
+// Note that different flavours of models have different normalization methods.
+// This tool currently only supports Caffe, TF and TFLite models
+NormalizationParameters GetNormalizationParameters(const SupportedFrontend& modelFormat,
+ const armnn::DataType& outputType)
+{
+ NormalizationParameters normParams;
+ // Explicitly set default parameters
+ normParams.scale = 1.0;
+ normParams.mean = { 0.0, 0.0, 0.0 };
+ normParams.stddev = { 1.0, 1.0, 1.0 };
+ switch (modelFormat)
+ {
+ case SupportedFrontend::Caffe:
+ break;
+ case SupportedFrontend::TensorFlow:
+ case SupportedFrontend::TFLite:
+ default:
+ switch (outputType)
+ {
+ case armnn::DataType::Float32:
+ normParams.scale = 127.5;
+ normParams.mean = { 1.0, 1.0, 1.0 };
+ break;
+ case armnn::DataType::Signed32:
+ normParams.mean = { 128.0, 128.0, 128.0 };
+ break;
+ case armnn::DataType::QuantisedAsymm8:
+ default:
+ break;
+ }
+ break;
+ }
+ return normParams;
+}
+
+// Prepare raw image tensor data by loading the image from imagePath and preprocessing it.
+template <typename ElemType>
+std::vector<ElemType> PrepareImageTensor(const std::string& imagePath,
+ unsigned int newWidth,
+ unsigned int newHeight,
+ const NormalizationParameters& normParams,
+ unsigned int batchSize = 1,
+ const armnn::DataLayout& outputLayout = armnn::DataLayout::NHWC);
+
+// Prepare float32 image tensor
+template <>
+std::vector<float> PrepareImageTensor<float>(const std::string& imagePath,
+ unsigned int newWidth,
+ unsigned int newHeight,
+ const NormalizationParameters& normParams,
+ unsigned int batchSize,
+ const armnn::DataLayout& outputLayout)
+{
+ // Generate image tensor
+ std::vector<float> imageData;
+ InferenceTestImage testImage(imagePath.c_str());
+ if (newWidth == 0)
+ {
+ newWidth = testImage.GetWidth();
+ }
+ if (newHeight == 0)
+ {
+ newHeight = testImage.GetHeight();
+ }
+ // Resize the image to new width and height or keep at original dimensions if the new width and height are specified
+ // as 0 Centre/Normalise the image.
+ imageData = testImage.Resize(newWidth, newHeight, CHECK_LOCATION(),
+ InferenceTestImage::ResizingMethods::BilinearAndNormalized, normParams.mean,
+ normParams.stddev, normParams.scale);
+ if (outputLayout == armnn::DataLayout::NCHW)
+ {
+ // Convert to NCHW format
+ const armnn::PermutationVector NHWCToArmNN = { 0, 2, 3, 1 };
+ armnn::TensorShape dstShape({ batchSize, 3, newHeight, newWidth });
+ std::vector<float> tempImage(imageData.size());
+ armnnUtils::Permute(dstShape, NHWCToArmNN, imageData.data(), tempImage.data(), sizeof(float));
+ imageData.swap(tempImage);
+ }
+ return imageData;
+}
+
+// Prepare int32 image tensor
+template <>
+std::vector<int> PrepareImageTensor<int>(const std::string& imagePath,
+ unsigned int newWidth,
+ unsigned int newHeight,
+ const NormalizationParameters& normParams,
+ unsigned int batchSize,
+ const armnn::DataLayout& outputLayout)
+{
+ // Get float32 image tensor
+ std::vector<float> imageDataFloat =
+ PrepareImageTensor<float>(imagePath, newWidth, newHeight, normParams, batchSize, outputLayout);
+ // Convert to int32 image tensor with static cast
+ std::vector<int> imageDataInt;
+ imageDataInt.reserve(imageDataFloat.size());
+ std::transform(imageDataFloat.begin(), imageDataFloat.end(), std::back_inserter(imageDataInt),
+ [](float val) { return static_cast<int>(val); });
+ return imageDataInt;
+}
+
+// Prepare qasymm8 image tensor
+template <>
+std::vector<uint8_t> PrepareImageTensor<uint8_t>(const std::string& imagePath,
+ unsigned int newWidth,
+ unsigned int newHeight,
+ const NormalizationParameters& normParams,
+ unsigned int batchSize,
+ const armnn::DataLayout& outputLayout)
+{
+ // Get float32 image tensor
+ std::vector<float> imageDataFloat =
+ PrepareImageTensor<float>(imagePath, newWidth, newHeight, normParams, batchSize, outputLayout);
+ std::vector<uint8_t> imageDataQasymm8;
+ imageDataQasymm8.reserve(imageDataFloat.size());
+ // Convert to uint8 image tensor with static cast
+ std::transform(imageDataFloat.begin(), imageDataFloat.end(), std::back_inserter(imageDataQasymm8),
+ [](float val) { return static_cast<uint8_t>(val); });
+ return imageDataQasymm8;
+}
+
+// Write image tensor to ofstream
+template <typename ElemType>
+void WriteImageTensorImpl(const std::vector<ElemType>& imageData, std::ofstream& imageTensorFile)
+{
+ std::copy(imageData.begin(), imageData.end(), std::ostream_iterator<ElemType>(imageTensorFile, " "));
+}
\ No newline at end of file
// SPDX-License-Identifier: MIT
//
+#include "../ImageTensorGenerator/ImageTensorGenerator.hpp"
+#include "../InferenceTest.hpp"
#include "ModelAccuracyChecker.hpp"
-#include "../ImagePreprocessor.hpp"
#include "armnnDeserializer/IDeserializer.hpp"
-#include "../NetworkExecutionUtils/NetworkExecutionUtils.hpp"
#include <boost/filesystem.hpp>
-#include <boost/range/iterator_range.hpp>
#include <boost/program_options/variables_map.hpp>
+#include <boost/range/iterator_range.hpp>
+
+#include <map>
using namespace armnn::test;
std::vector<armnn::BackendId> computeDevice;
std::vector<armnn::BackendId> defaultBackends = {armnn::Compute::CpuAcc, armnn::Compute::CpuRef};
std::string modelPath;
+ std::string modelFormat;
std::string dataDir;
- std::string inputType = "float";
std::string inputName;
+ std::string inputLayout;
std::string outputName;
std::string validationLabelPath;
desc.add_options()
("help,h", "Display help messages")
("model-path,m", po::value<std::string>(&modelPath)->required(), "Path to armnn format model file")
- ("compute,c", po::value<std::vector<armnn::BackendId>>(&computeDevice)->default_value(defaultBackends),
- backendsMessage.c_str())
- ("data-dir,d", po::value<std::string>(&dataDir)->required(),
- "Path to directory containing the ImageNet test data")
- ("input-type,y", po::value(&inputType), "The data type of the input tensors."
- "If unset, defaults to \"float\" for all defined inputs. "
- "Accepted values (float, int or qasymm8)")
+ ("model-format,f", po::value<std::string>(&modelFormat)->required(),
+ "The model format. Supported values: caffe, tensorflow, tflite")
("input-name,i", po::value<std::string>(&inputName)->required(),
"Identifier of the input tensors in the network separated by comma.")
("output-name,o", po::value<std::string>(&outputName)->required(),
"Identifier of the output tensors in the network separated by comma.")
+ ("data-dir,d", po::value<std::string>(&dataDir)->required(),
+ "Path to directory containing the ImageNet test data")
("validation-labels-path,v", po::value<std::string>(&validationLabelPath)->required(),
- "Path to ImageNet Validation Label file");
+ "Path to ImageNet Validation Label file")
+ ("data-layout,l", po::value<std::string>(&inputLayout)->default_value("NHWC"),
+ "Data layout. Supported value: NHWC, NCHW. Default: NHCW")
+ ("compute,c", po::value<std::vector<armnn::BackendId>>(&computeDevice)->default_value(defaultBackends),
+ backendsMessage.c_str());
}
catch (const std::exception& e)
{
armnnUtils::ModelAccuracyChecker checker(validationLabels);
using TContainer = boost::variant<std::vector<float>, std::vector<int>, std::vector<uint8_t>>;
- if(ValidateDirectory(dataDir))
+ if (ValidateDirectory(dataDir))
{
InferenceModel<armnnDeserializer::IDeserializer, float>::Params params;
- params.m_ModelPath = modelPath;
- params.m_IsModelBinary = true;
+ params.m_ModelPath = modelPath;
+ params.m_IsModelBinary = true;
params.m_ComputeDevices = computeDevice;
params.m_InputBindings.push_back(inputName);
params.m_OutputBindings.push_back(outputName);
using TParser = armnnDeserializer::IDeserializer;
InferenceModel<TParser, float> model(params, false);
- for (auto & imageEntry : boost::make_iterator_range(directory_iterator(pathToDataDir), {}))
+ // Get input tensor information
+ const armnn::TensorInfo& inputTensorInfo = model.GetInputBindingInfo().second;
+ const armnn::TensorShape& inputTensorShape = inputTensorInfo.GetShape();
+ const armnn::DataType& inputTensorDataType = inputTensorInfo.GetDataType();
+ armnn::DataLayout inputTensorDataLayout;
+ if (inputLayout == "NCHW")
+ {
+ inputTensorDataLayout = armnn::DataLayout::NCHW;
+ }
+ else if (inputLayout == "NHWC")
+ {
+ inputTensorDataLayout = armnn::DataLayout::NHWC;
+ }
+ else
+ {
+ BOOST_LOG_TRIVIAL(fatal) << "Invalid Data layout: " << inputLayout;
+ return 1;
+ }
+ const unsigned int inputTensorWidth =
+ inputTensorDataLayout == armnn::DataLayout::NCHW ? inputTensorShape[3] : inputTensorShape[2];
+ const unsigned int inputTensorHeight =
+ inputTensorDataLayout == armnn::DataLayout::NCHW ? inputTensorShape[2] : inputTensorShape[1];
+ const unsigned int batchSize = 1;
+ // Get normalisation parameters
+ SupportedFrontend modelFrontend;
+ if (modelFormat == "caffe")
+ {
+ modelFrontend = SupportedFrontend::Caffe;
+ }
+ else if (modelFormat == "tensorflow")
+ {
+ modelFrontend = SupportedFrontend::TensorFlow;
+ }
+ else if (modelFormat == "tflite")
+ {
+ modelFrontend = SupportedFrontend::TFLite;
+ }
+ else
+ {
+ BOOST_LOG_TRIVIAL(fatal) << "Unsupported frontend: " << modelFormat;
+ return 1;
+ }
+ const NormalizationParameters& normParams = GetNormalizationParameters(modelFrontend, inputTensorDataType);
+ for (auto& imageEntry : boost::make_iterator_range(directory_iterator(pathToDataDir), {}))
{
cout << "Processing image: " << imageEntry << "\n";
- std::ifstream inputTensorFile(imageEntry.path().string());
vector<TContainer> inputDataContainers;
vector<TContainer> outputDataContainers;
- if (inputType.compare("float") == 0)
- {
- inputDataContainers.push_back(
- ParseDataArray<armnn::DataType::Float32>(inputTensorFile));
- outputDataContainers = {vector<float>(1001)};
- }
- else if (inputType.compare("int") == 0)
- {
- inputDataContainers.push_back(
- ParseDataArray<armnn::DataType::Signed32>(inputTensorFile));
- outputDataContainers = {vector<int>(1001)};
- }
- else if (inputType.compare("qasymm8") == 0)
- {
- auto inputBinding = model.GetInputBindingInfo();
- inputDataContainers.push_back(
- ParseDataArray<armnn::DataType::QuantisedAsymm8>(
- inputTensorFile,
- inputBinding.second.GetQuantizationScale(),
- inputBinding.second.GetQuantizationOffset()));
- outputDataContainers = {vector<uint8_t >(1001)};
- }
- else
+ const string& imagePath = imageEntry.path().string();
+ switch (inputTensorDataType)
{
- BOOST_LOG_TRIVIAL(fatal) << "Unsupported tensor data type \"" << inputType << "\". ";
- return EXIT_FAILURE;
+ case armnn::DataType::Signed32:
+ inputDataContainers.push_back(
+ PrepareImageTensor<int>(imagePath,
+ inputTensorWidth, inputTensorHeight,
+ normParams,
+ batchSize,
+ inputTensorDataLayout));
+ outputDataContainers = {vector<int>(1001)};
+ break;
+ case armnn::DataType::QuantisedAsymm8:
+ inputDataContainers.push_back(
+ PrepareImageTensor<uint8_t>(imagePath,
+ inputTensorWidth, inputTensorHeight,
+ normParams,
+ batchSize,
+ inputTensorDataLayout));
+ outputDataContainers = {vector<uint8_t>(1001)};
+ break;
+ case armnn::DataType::Float32:
+ default:
+ inputDataContainers.push_back(
+ PrepareImageTensor<float>(imagePath,
+ inputTensorWidth, inputTensorHeight,
+ normParams,
+ batchSize,
+ inputTensorDataLayout));
+ outputDataContainers = {vector<float>(1001)};
+ break;
}
status = runtime->EnqueueWorkload(networkId,