FullyConnectedDescriptor desc;
desc.m_BiasEnabled = false;
+ desc.m_TransposeWeightMatrix = true;
auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
FullyConnectedWithNoBiasFixture()
: FullyConnectedFixture("[ 1, 4, 1, 1 ]", // inputShape
"[ 1, 1 ]", // outputShape
- "[ 4, 1 ]", // filterShape
+ "[ 1, 4 ]", // filterShape
"[ 2, 3, 4, 5 ]") // filterData
{}
};
FullyConnectedWithBiasFixture()
: FullyConnectedFixture("[ 1, 4, 1, 1 ]", // inputShape
"[ 1, 1 ]", // outputShape
- "[ 4, 1 ]", // filterShape
+ "[ 1, 4 ]", // filterShape
"[ 2, 3, 4, 5 ]", // filterData
"[ 1 ]", // biasShape
"[ 10, 0, 0, 0 ]" ) // biasData
ImagePreprocessor.hpp
ImagePreprocessor.cpp)
TfLiteParserTest(TfLiteMobilenetQuantized-Armnn "${TfLiteMobilenetQuantized-Armnn_sources}")
+
+ set(TfLiteVGG16Quantized-Armnn_sources
+ TfLiteVGG16Quantized-Armnn/TfLiteVGG16Quantized-Armnn.cpp
+ ImagePreprocessor.hpp
+ ImagePreprocessor.cpp)
+ TfLiteParserTest(TfLiteVGG16Quantized-Armnn "${TfLiteVGG16Quantized-Armnn_sources}")
endif()
if (BUILD_ONNX_PARSER)
InferenceTestImage::ResizingMethods::BilinearAndNormalized,
m_Mean, m_Stddev);
+ // duplicate data across the batch
+ for (unsigned int i = 1; i < m_BatchSize; i++)
+ {
+ result.insert( result.end(), result.begin(), result.begin() + GetNumImageElements() );
+ }
+
if (m_DataFormat == DataFormat::NCHW)
{
const armnn::PermutationVector NHWCToArmNN = { 0, 2, 3, 1 };
- armnn::TensorShape dstShape({1, 3, m_Height, m_Width});
+ armnn::TensorShape dstShape({m_BatchSize, 3, m_Height, m_Width});
std::vector<float> tempImage(result.size());
armnnUtils::Permute<float>(dstShape, NHWCToArmNN, result.data(), tempImage.data());
result.swap(tempImage);
int32_t offset=0,
const std::array<float, 3> mean={{0, 0, 0}},
const std::array<float, 3> stddev={{1, 1, 1}},
- DataFormat dataFormat=DataFormat::NHWC)
+ DataFormat dataFormat=DataFormat::NHWC,
+ unsigned int batchSize=1)
: m_BinaryDirectory(binaryFileDirectory)
, m_Height(height)
, m_Width(width)
+ , m_BatchSize(batchSize)
, m_Scale(scale)
, m_Offset(offset)
, m_ImageSet(imageSet)
std::string m_BinaryDirectory;
unsigned int m_Height;
unsigned int m_Width;
+ unsigned int m_BatchSize;
// Quantization parameters
float m_Scale;
int32_t m_Offset;
--- /dev/null
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#include "../InferenceTest.hpp"
+#include "../ImagePreprocessor.hpp"
+#include "armnnTfLiteParser/ITfLiteParser.hpp"
+
+using namespace armnnTfLiteParser;
+
+int main(int argc, char* argv[])
+{
+ int retVal = EXIT_FAILURE;
+ try
+ {
+ // Coverity fix: The following code may throw an exception of type std::length_error.
+ std::vector<ImageSet> imageSet =
+ {
+ // Class number in probability print out offset by 1000 due to batch size fix
+ {"Dog.jpg", 669},
+ {"Cat.jpg", 669},
+ {"shark.jpg", 669},
+ };
+
+ armnn::TensorShape inputTensorShape({ 2, 224, 224, 3 });
+
+ using DataType = uint8_t;
+ using DatabaseType = ImagePreprocessor<DataType>;
+ using ParserType = armnnTfLiteParser::ITfLiteParser;
+ using ModelType = InferenceModel<ParserType, DataType>;
+
+ // Coverity fix: ClassifierInferenceTestMain() may throw uncaught exceptions.
+ retVal = armnn::test::ClassifierInferenceTestMain<DatabaseType,
+ ParserType>(
+ argc, argv,
+ "vgg_16_u8.tflite", // model name
+ true, // model is binary
+ "content_vgg/concat", // input tensor name
+ "content_vgg/prob", // output tensor name
+ { 0, 1, 2 }, // test images to test with as above
+ [&imageSet](const char* dataDir, const ModelType & model) {
+ // we need to get the input quantization parameters from
+ // the parsed model
+ auto inputBinding = model.GetInputBindingInfo();
+ return DatabaseType(
+ dataDir,
+ 224,
+ 224,
+ imageSet,
+ inputBinding.second.GetQuantizationScale(),
+ inputBinding.second.GetQuantizationOffset(),
+ {{0, 0, 0}},
+ {{1, 1, 1}},
+ DatabaseType::DataFormat::NCHW,
+ 2);
+ },
+ &inputTensorShape);
+ }
+ catch (const std::exception& e)
+ {
+ // Coverity fix: BOOST_LOG_TRIVIAL (typically used to report errors) may throw an
+ // exception of type std::length_error.
+ // Using stderr instead in this context as there is no point in nesting try-catch blocks here.
+ std::cerr << "WARNING: " << *argv << ": An error has occurred when running "
+ "the classifier inference tests: " << e.what() << std::endl;
+ }
+ return retVal;
+}