From 169d2f120cc9021f170fede22a448fd6b66fc979 Mon Sep 17 00:00:00 2001 From: Aron Virginas-Tar Date: Mon, 1 Jul 2019 19:01:44 +0100 Subject: [PATCH] IVGCVSW-3382 Deprecate ResizeBilinear and use Resize with Bilinear method !android-nn-driver:1451 Signed-off-by: Aron Virginas-Tar Change-Id: Ieedbce1f6e95891137a250fdd07e2f7e4e1f4828 --- Android.mk | 1 - CMakeLists.txt | 2 - include/armnn/ILayerSupport.hpp | 1 + include/armnn/ILayerVisitor.hpp | 1 + include/armnn/INetwork.hpp | 1 + include/armnn/LayerSupport.hpp | 9 +++ src/armnn/InternalTypes.cpp | 1 - src/armnn/InternalTypes.hpp | 1 - src/armnn/LayerSupport.cpp | 20 +++++- src/armnn/LayersFwd.hpp | 2 - src/armnn/Network.cpp | 14 ++-- src/armnn/Network.hpp | 1 + src/armnn/QuantizerVisitor.cpp | 12 ++-- src/armnn/QuantizerVisitor.hpp | 9 +-- src/armnn/layers/ResizeBilinearLayer.cpp | 74 ---------------------- src/armnn/layers/ResizeBilinearLayer.hpp | 49 -------------- src/armnn/test/CreateWorkload.hpp | 29 +++++---- src/armnn/test/OptimizerTests.cpp | 13 ++-- src/armnn/test/QuantizerTest.cpp | 48 +------------- .../test/TestNameAndDescriptorLayerVisitor.cpp | 18 +++--- .../test/TestNameAndDescriptorLayerVisitor.hpp | 30 +++++---- src/armnnDeserializer/Deserializer.cpp | 9 +-- src/armnnSerializer/Serializer.hpp | 9 +-- src/armnnSerializer/test/SerializerTests.cpp | 67 ++------------------ src/armnnTfLiteParser/TfLiteParser.cpp | 9 +-- src/armnnTfParser/TfParser.cpp | 9 +-- src/backends/backendsCommon/LayerSupportBase.hpp | 9 +-- src/backends/backendsCommon/WorkloadFactory.cpp | 11 +--- src/backends/backendsCommon/WorkloadFactory.hpp | 7 +- .../test/IsLayerSupportedTestImpl.hpp | 2 - src/backends/backendsCommon/test/LayerTests.hpp | 30 ++++++--- src/backends/cl/ClLayerSupport.cpp | 18 ++++++ src/backends/cl/ClLayerSupport.hpp | 6 ++ src/backends/cl/ClWorkloadFactory.cpp | 19 ++++++ src/backends/cl/ClWorkloadFactory.hpp | 4 ++ src/backends/cl/test/ClCreateWorkloadTests.cpp | 5 +- src/backends/neon/NeonLayerSupport.cpp | 16 +++++ src/backends/neon/NeonLayerSupport.hpp | 6 ++ src/backends/neon/NeonWorkloadFactory.cpp | 19 ++++++ src/backends/neon/NeonWorkloadFactory.hpp | 4 ++ src/backends/reference/RefWorkloadFactory.cpp | 12 ++-- .../reference/test/RefCreateWorkloadTests.cpp | 8 +-- 42 files changed, 264 insertions(+), 351 deletions(-) delete mode 100644 src/armnn/layers/ResizeBilinearLayer.cpp delete mode 100644 src/armnn/layers/ResizeBilinearLayer.hpp diff --git a/Android.mk b/Android.mk index f57f3b6..e2daeea 100644 --- a/Android.mk +++ b/Android.mk @@ -126,7 +126,6 @@ LOCAL_SRC_FILES := \ src/armnn/layers/PreluLayer.cpp \ src/armnn/layers/QuantizeLayer.cpp \ src/armnn/layers/ReshapeLayer.cpp \ - src/armnn/layers/ResizeBilinearLayer.cpp \ src/armnn/layers/ResizeLayer.cpp \ src/armnn/layers/RsqrtLayer.cpp \ src/armnn/layers/SpaceToBatchNdLayer.cpp \ diff --git a/CMakeLists.txt b/CMakeLists.txt index 52f1e1f..dc34b1a 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -300,8 +300,6 @@ list(APPEND armnn_sources src/armnn/layers/PreluLayer.cpp src/armnn/layers/ReshapeLayer.hpp src/armnn/layers/ReshapeLayer.cpp - src/armnn/layers/ResizeBilinearLayer.hpp - src/armnn/layers/ResizeBilinearLayer.cpp src/armnn/layers/ResizeLayer.hpp src/armnn/layers/ResizeLayer.cpp src/armnn/layers/RsqrtLayer.cpp diff --git a/include/armnn/ILayerSupport.hpp b/include/armnn/ILayerSupport.hpp index 635b9cc..58722fe 100644 --- a/include/armnn/ILayerSupport.hpp +++ b/include/armnn/ILayerSupport.hpp @@ -251,6 +251,7 @@ public: const ReshapeDescriptor& descriptor, Optional reasonIfUnsupported = EmptyOptional()) const = 0; + ARMNN_DEPRECATED_MSG("Use IsResizeSupported instead") virtual bool IsResizeBilinearSupported(const TensorInfo& input, const TensorInfo& output, Optional reasonIfUnsupported = EmptyOptional()) const = 0; diff --git a/include/armnn/ILayerVisitor.hpp b/include/armnn/ILayerVisitor.hpp index 37cd383..86cf4a3 100644 --- a/include/armnn/ILayerVisitor.hpp +++ b/include/armnn/ILayerVisitor.hpp @@ -314,6 +314,7 @@ public: /// @param layer - pointer to the layer which is calling back to this visit function. /// @param resizeDesc - Parameters for the resize operation. /// @param name - Optional name for the layer. + ARMNN_DEPRECATED_MSG("Use VisitResizeLayer instead") virtual void VisitResizeBilinearLayer(const IConnectableLayer* layer, const ResizeBilinearDescriptor& resizeDesc, const char* name = nullptr) = 0; diff --git a/include/armnn/INetwork.hpp b/include/armnn/INetwork.hpp index 598e1eb..1f1b510 100644 --- a/include/armnn/INetwork.hpp +++ b/include/armnn/INetwork.hpp @@ -293,6 +293,7 @@ public: /// @param resizeDesc - Parameters for the resize operation. /// @param name - Optional name for the layer. /// @return - Interface for configuring the layer. + ARMNN_DEPRECATED_MSG("Use AddResizeLayer instead") virtual IConnectableLayer* AddResizeBilinearLayer(const ResizeBilinearDescriptor& resizeDesc, const char* name = nullptr) = 0; diff --git a/include/armnn/LayerSupport.hpp b/include/armnn/LayerSupport.hpp index f0dca77..35336ed 100644 --- a/include/armnn/LayerSupport.hpp +++ b/include/armnn/LayerSupport.hpp @@ -305,6 +305,7 @@ bool IsReshapeSupported(const BackendId& backend, size_t reasonIfUnsupportedMaxLength = 1024); /// Deprecated in favor of IBackend and ILayerSupport interfaces +ARMNN_DEPRECATED_MSG("Use IsResizeSupported instead") bool IsResizeBilinearSupported(const BackendId& backend, const TensorInfo& input, const TensorInfo& output, @@ -312,6 +313,14 @@ bool IsResizeBilinearSupported(const BackendId& backend, size_t reasonIfUnsupportedMaxLength = 1024); /// Deprecated in favor of IBackend and ILayerSupport interfaces +bool IsResizeSupported(const BackendId& backend, + const TensorInfo& input, + const TensorInfo& output, + const ResizeDescriptor& descriptor, + char* reasonIfUnsupported = nullptr, + size_t reasonIfUnsupportedMaxLength = 1024); + +/// Deprecated in favor of IBackend and ILayerSupport interfaces bool IsRsqrtSupported(const BackendId& backend, const TensorInfo& input, const TensorInfo& output, diff --git a/src/armnn/InternalTypes.cpp b/src/armnn/InternalTypes.cpp index 393e744..417581f 100644 --- a/src/armnn/InternalTypes.cpp +++ b/src/armnn/InternalTypes.cpp @@ -52,7 +52,6 @@ char const* GetLayerTypeAsCString(LayerType type) case LayerType::Reshape: return "Reshape"; case LayerType::Rsqrt: return "Rsqrt"; case LayerType::Resize: return "Resize"; - case LayerType::ResizeBilinear: return "ResizeBilinear"; case LayerType::Softmax: return "Softmax"; case LayerType::SpaceToBatchNd: return "SpaceToBatchNd"; case LayerType::Splitter: return "Splitter"; diff --git a/src/armnn/InternalTypes.hpp b/src/armnn/InternalTypes.hpp index 6c49eac..b097265 100644 --- a/src/armnn/InternalTypes.hpp +++ b/src/armnn/InternalTypes.hpp @@ -52,7 +52,6 @@ enum class LayerType Prelu, Quantize, Reshape, - ResizeBilinear, Resize, Rsqrt, Softmax, diff --git a/src/armnn/LayerSupport.cpp b/src/armnn/LayerSupport.cpp index d1583a5..b2ca85c 100644 --- a/src/armnn/LayerSupport.cpp +++ b/src/armnn/LayerSupport.cpp @@ -507,13 +507,31 @@ bool IsReshapeSupported(const BackendId& backend, FORWARD_LAYER_SUPPORT_FUNC(backend, IsReshapeSupported, input, descriptor); } +bool IsResizeSupported(const BackendId& backend, + const TensorInfo& input, + const TensorInfo& output, + const ResizeDescriptor& descriptor, + char* reasonIfUnsupported, + size_t reasonIfUnsupportedMaxLength) +{ + FORWARD_LAYER_SUPPORT_FUNC(backend, IsResizeSupported, input, output, descriptor); +} + +ARMNN_DEPRECATED_MSG("Use IsResizeSupported instead") bool IsResizeBilinearSupported(const BackendId& backend, const TensorInfo& input, const TensorInfo& output, char* reasonIfUnsupported, size_t reasonIfUnsupportedMaxLength) { - FORWARD_LAYER_SUPPORT_FUNC(backend, IsResizeBilinearSupported, input, output); + ResizeDescriptor descriptor; + descriptor.m_Method = ResizeMethod::Bilinear; + + const TensorShape& outputShape = output.GetShape(); + descriptor.m_TargetWidth = outputShape[3]; + descriptor.m_TargetHeight = outputShape[2]; + + FORWARD_LAYER_SUPPORT_FUNC(backend, IsResizeSupported, input, output, descriptor); } bool IsRsqrtSupported(const BackendId& backend, diff --git a/src/armnn/LayersFwd.hpp b/src/armnn/LayersFwd.hpp index 2e049ec..0f9633a 100644 --- a/src/armnn/LayersFwd.hpp +++ b/src/armnn/LayersFwd.hpp @@ -44,7 +44,6 @@ #include "layers/PreluLayer.hpp" #include "layers/QuantizeLayer.hpp" #include "layers/ReshapeLayer.hpp" -#include "layers/ResizeBilinearLayer.hpp" #include "layers/ResizeLayer.hpp" #include "layers/RsqrtLayer.hpp" #include "layers/SoftmaxLayer.hpp" @@ -122,7 +121,6 @@ DECLARE_LAYER(Prelu) DECLARE_LAYER(Quantize) DECLARE_LAYER(Reshape) DECLARE_LAYER(Resize) -DECLARE_LAYER(ResizeBilinear) DECLARE_LAYER(Rsqrt) DECLARE_LAYER(Softmax) DECLARE_LAYER(SpaceToBatchNd) diff --git a/src/armnn/Network.cpp b/src/armnn/Network.cpp index 63432da..f9115ea 100644 --- a/src/armnn/Network.cpp +++ b/src/armnn/Network.cpp @@ -1182,16 +1182,22 @@ IConnectableLayer* Network::AddBatchNormalizationLayer(const BatchNormalizationD return layer; } -IConnectableLayer* Network::AddResizeBilinearLayer(const ResizeBilinearDescriptor& -resizeDescriptor, const char* name) +IConnectableLayer* Network::AddResizeBilinearLayer(const ResizeBilinearDescriptor& descriptor, + const char* name) { - return m_Graph->AddLayer(resizeDescriptor,name); + ResizeDescriptor resizeDescriptor; + resizeDescriptor.m_Method = ResizeMethod::Bilinear; + resizeDescriptor.m_DataLayout = descriptor.m_DataLayout; + resizeDescriptor.m_TargetWidth = descriptor.m_TargetWidth; + resizeDescriptor.m_TargetHeight = descriptor.m_TargetHeight; + + return m_Graph->AddLayer(resizeDescriptor, name); } IConnectableLayer* Network::AddResizeLayer(const ResizeDescriptor& resizeDescriptor, const char* name) { - return m_Graph->AddLayer(resizeDescriptor,name); + return m_Graph->AddLayer(resizeDescriptor, name); } IConnectableLayer* Network::AddL2NormalizationLayer(const L2NormalizationDescriptor& desc, diff --git a/src/armnn/Network.hpp b/src/armnn/Network.hpp index f0dfb1d..7fc5b65 100644 --- a/src/armnn/Network.hpp +++ b/src/armnn/Network.hpp @@ -134,6 +134,7 @@ public: const ConstTensor& gamma, const char* name = nullptr) override; + ARMNN_DEPRECATED_MSG("Use AddResizeLayer instead") IConnectableLayer* AddResizeBilinearLayer(const ResizeBilinearDescriptor& resizeDesc, const char* name = nullptr) override; diff --git a/src/armnn/QuantizerVisitor.cpp b/src/armnn/QuantizerVisitor.cpp index f2e0506..37c2541 100644 --- a/src/armnn/QuantizerVisitor.cpp +++ b/src/armnn/QuantizerVisitor.cpp @@ -377,12 +377,16 @@ void QuantizerVisitor::VisitReshapeLayer(const IConnectableLayer* layer, } void QuantizerVisitor::VisitResizeBilinearLayer(const IConnectableLayer* layer, - const ResizeBilinearDescriptor& resizeDesc, + const ResizeBilinearDescriptor& resizeBilinearDescriptor, const char* name) { - IConnectableLayer* newLayer = m_QuantizedNetwork->AddResizeBilinearLayer(resizeDesc, name); - RecordLayer(layer, newLayer); - SetQuantizedInputConnections(layer, newLayer); + ResizeDescriptor resizeDescriptor; + resizeDescriptor.m_Method = ResizeMethod::Bilinear; + resizeDescriptor.m_TargetWidth = resizeBilinearDescriptor.m_TargetWidth; + resizeDescriptor.m_TargetHeight = resizeBilinearDescriptor.m_TargetHeight; + resizeDescriptor.m_DataLayout = resizeBilinearDescriptor.m_DataLayout; + + VisitResizeLayer(layer, resizeDescriptor, name); } void QuantizerVisitor::VisitResizeLayer(const IConnectableLayer* layer, diff --git a/src/armnn/QuantizerVisitor.hpp b/src/armnn/QuantizerVisitor.hpp index 26158c3..688eea6 100644 --- a/src/armnn/QuantizerVisitor.hpp +++ b/src/armnn/QuantizerVisitor.hpp @@ -110,14 +110,15 @@ public: const ReshapeDescriptor& reshapeDescriptor, const char* name = nullptr) override; - void VisitResizeBilinearLayer(const IConnectableLayer* layer, - const ResizeBilinearDescriptor& resizeDesc, - const char* name = nullptr) override; - void VisitResizeLayer(const IConnectableLayer* layer, const ResizeDescriptor& resizeDescriptor, const char* name = nullptr) override; + ARMNN_DEPRECATED_MSG("Use VisitResizeLayer instead") + void VisitResizeBilinearLayer(const IConnectableLayer* layer, + const ResizeBilinearDescriptor& resizeDesc, + const char* name = nullptr) override; + void VisitRsqrtLayer(const IConnectableLayer*, const char* name = nullptr) override; diff --git a/src/armnn/layers/ResizeBilinearLayer.cpp b/src/armnn/layers/ResizeBilinearLayer.cpp deleted file mode 100644 index 03fe317..0000000 --- a/src/armnn/layers/ResizeBilinearLayer.cpp +++ /dev/null @@ -1,74 +0,0 @@ -// -// Copyright © 2017 Arm Ltd. All rights reserved. -// SPDX-License-Identifier: MIT -// -#include "ResizeBilinearLayer.hpp" - -#include "LayerCloneBase.hpp" - -#include - -#include -#include - -#include - -using namespace armnnUtils; - -namespace armnn -{ - -ResizeBilinearLayer::ResizeBilinearLayer(const ResizeBilinearDescriptor& param, const char* name) - : LayerWithParameters(1, 1, LayerType::ResizeBilinear, param, name) -{ -} - -std::unique_ptr ResizeBilinearLayer::CreateWorkload(const Graph& graph, - const IWorkloadFactory& factory) const -{ - ResizeBilinearQueueDescriptor descriptor; - return factory.CreateResizeBilinear(descriptor, PrepInfoAndDesc(descriptor, graph)); -} - -ResizeBilinearLayer* ResizeBilinearLayer::Clone(Graph& graph) const -{ - return CloneBase(graph, m_Param, GetName()); -} - -std::vector ResizeBilinearLayer::InferOutputShapes(const std::vector& inputShapes) const -{ - BOOST_ASSERT(inputShapes.size() == 1); - const TensorShape& inputShape = inputShapes[0]; - const DataLayoutIndexed dimensionIndices = m_Param.m_DataLayout; - unsigned int outWidth = m_Param.m_TargetWidth; - unsigned int outHeight = m_Param.m_TargetHeight; - unsigned int outChannels = inputShape[dimensionIndices.GetChannelsIndex()]; - unsigned int outBatch = inputShape[0]; - - TensorShape tensorShape = m_Param.m_DataLayout == armnn::DataLayout::NHWC ? - TensorShape( { outBatch, outHeight, outWidth, outChannels } ) : - TensorShape( { outBatch, outChannels, outHeight, outWidth }); - - return std::vector({ tensorShape }); -} - -void ResizeBilinearLayer::ValidateTensorShapesFromInputs() -{ - VerifyLayerConnections(1, CHECK_LOCATION()); - - auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() }); - - BOOST_ASSERT(inferredShapes.size() == 1); - - ConditionalThrowIfNotEqual( - "ResizeBilinearLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.", - GetOutputSlot(0).GetTensorInfo().GetShape(), - inferredShapes[0]); -} - -void ResizeBilinearLayer::Accept(ILayerVisitor& visitor) const -{ - visitor.VisitResizeBilinearLayer(this, GetParameters(), GetName()); -} - -} // namespace armnn diff --git a/src/armnn/layers/ResizeBilinearLayer.hpp b/src/armnn/layers/ResizeBilinearLayer.hpp deleted file mode 100644 index 4bf264c..0000000 --- a/src/armnn/layers/ResizeBilinearLayer.hpp +++ /dev/null @@ -1,49 +0,0 @@ -// -// Copyright © 2017 Arm Ltd. All rights reserved. -// SPDX-License-Identifier: MIT -// -#pragma once - -#include "LayerWithParameters.hpp" - -namespace armnn -{ - -/// This layer represents a resize bilinear operation. -class ResizeBilinearLayer : public LayerWithParameters -{ -public: - /// Makes a workload for the ResizeBilinear type. - /// @param [in] graph The graph where this layer can be found. - /// @param [in] factory The workload factory which will create the workload. - /// @return A pointer to the created workload, or nullptr if not created. - virtual std::unique_ptrCreateWorkload(const Graph& graph, - const IWorkloadFactory& factory) const override; - - /// Creates a dynamically-allocated copy of this layer. - /// @param [in] graph The graph into which this layer is being cloned. - ResizeBilinearLayer* Clone(Graph& graph) const override; - - /// Check if the input tensor shape(s) - /// will lead to a valid configuration of @ref ResizeBilinearLayer. - void ValidateTensorShapesFromInputs() override; - - /// By default returns inputShapes if the number of inputs are equal to number of outputs, - /// otherwise infers the output shapes from given input shapes and layer properties. - /// @param [in] inputShapes The input shapes layer has. - /// @return A vector to the inferred output shape. - std::vector InferOutputShapes(const std::vector& inputShapes) const override; - - void Accept(ILayerVisitor& visitor) const override; - -protected: - /// Constructor to create a ResizeBilinearLayerLayer. - /// @param [in] param ResizeBilinearDescriptor to configure the resize bilinear operation. - /// @param [in] name Optional name for the layer. - ResizeBilinearLayer(const ResizeBilinearDescriptor& param, const char* name); - - /// Default destructor - ~ResizeBilinearLayer() = default; -}; - -} // namespace diff --git a/src/armnn/test/CreateWorkload.hpp b/src/armnn/test/CreateWorkload.hpp index 0048646..774df6a 100644 --- a/src/armnn/test/CreateWorkload.hpp +++ b/src/armnn/test/CreateWorkload.hpp @@ -836,10 +836,10 @@ void CreateSplitterMultipleInputsOneOutputWorkloadTest(armnn::IWorkloadFactory& wlActiv1_1 = std::move(workloadActiv1_1); } -template -std::unique_ptr CreateResizeBilinearWorkloadTest(armnn::IWorkloadFactory& factory, - armnn::Graph& graph, - DataLayout dataLayout = DataLayout::NCHW) +template +std::unique_ptr CreateResizeBilinearWorkloadTest(armnn::IWorkloadFactory& factory, + armnn::Graph& graph, + DataLayout dataLayout = DataLayout::NCHW) { TensorShape inputShape; TensorShape outputShape; @@ -856,15 +856,16 @@ std::unique_ptr CreateResizeBilinearWorkloadTest(armnn:: } // Creates the layer we're testing. - ResizeBilinearDescriptor resizeDesc; + ResizeDescriptor resizeDesc; armnnUtils::DataLayoutIndexed dimensionIndices = dataLayout; - resizeDesc.m_TargetWidth = outputShape[dimensionIndices.GetWidthIndex()]; + resizeDesc.m_Method = ResizeMethod::Bilinear; + resizeDesc.m_TargetWidth = outputShape[dimensionIndices.GetWidthIndex()]; resizeDesc.m_TargetHeight = outputShape[dimensionIndices.GetHeightIndex()]; - resizeDesc.m_DataLayout = dataLayout; - Layer* const layer = graph.AddLayer(resizeDesc, "layer"); + resizeDesc.m_DataLayout = dataLayout; + Layer* const layer = graph.AddLayer(resizeDesc, "resize"); // Creates extra layers. - Layer* const input = graph.AddLayer(0, "input"); + Layer* const input = graph.AddLayer(0, "input"); Layer* const output = graph.AddLayer(0, "output"); // Connects up. @@ -875,12 +876,12 @@ std::unique_ptr CreateResizeBilinearWorkloadTest(armnn:: CreateTensorHandles(graph, factory); // Makes the workload and checks it. - auto workload = MakeAndCheckWorkload(*layer, graph, factory); + auto workload = MakeAndCheckWorkload(*layer, graph, factory); - ResizeBilinearQueueDescriptor queueDescriptor = workload->GetData(); - BOOST_TEST(queueDescriptor.m_Inputs.size() == 1); - BOOST_TEST(queueDescriptor.m_Outputs.size() == 1); - BOOST_TEST((queueDescriptor.m_Parameters.m_DataLayout == dataLayout)); + auto queueDescriptor = workload->GetData(); + BOOST_CHECK(queueDescriptor.m_Inputs.size() == 1); + BOOST_CHECK(queueDescriptor.m_Outputs.size() == 1); + BOOST_CHECK(queueDescriptor.m_Parameters.m_DataLayout == dataLayout); // Returns so we can do extra, backend-specific tests. return workload; diff --git a/src/armnn/test/OptimizerTests.cpp b/src/armnn/test/OptimizerTests.cpp index 97bd8de..b06403c 100644 --- a/src/armnn/test/OptimizerTests.cpp +++ b/src/armnn/test/OptimizerTests.cpp @@ -953,18 +953,19 @@ BOOST_AUTO_TEST_CASE(Pooling2dValidateTensorShapesFromInputsNhwc) void CreateResizeBilinearGraph(Graph &graph, const unsigned int* inputShape, const unsigned int* outputShape, DataLayout dataLayout = DataLayout::NCHW) { - armnn::TensorInfo inputInfo(4, inputShape, DataType::Float32); - armnn::TensorInfo outputInfo(4, outputShape, DataType::Float32); + TensorInfo inputInfo(4, inputShape, DataType::Float32); + TensorInfo outputInfo(4, outputShape, DataType::Float32); - ResizeBilinearDescriptor desc; + ResizeDescriptor desc; + desc.m_Method = ResizeMethod::Bilinear; desc.m_TargetHeight = 3; - desc.m_TargetWidth = 4; - desc.m_DataLayout = dataLayout; + desc.m_TargetWidth = 4; + desc.m_DataLayout = dataLayout; Layer* input = graph.AddLayer(0, "input"); input->GetOutputSlot().SetTensorInfo(inputInfo); - ResizeBilinearLayer* layer = graph.AddLayer(desc, "resizeBilinear"); + ResizeLayer* layer = graph.AddLayer(desc, "resizeBilinear"); layer->GetOutputSlot().SetTensorInfo(outputInfo); Layer* output = graph.AddLayer(0, "output"); diff --git a/src/armnn/test/QuantizerTest.cpp b/src/armnn/test/QuantizerTest.cpp index 57f602d..09e71ae 100644 --- a/src/armnn/test/QuantizerTest.cpp +++ b/src/armnn/test/QuantizerTest.cpp @@ -1477,52 +1477,6 @@ BOOST_AUTO_TEST_CASE(QuantizeSplitter) VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16); } -BOOST_AUTO_TEST_CASE(QuantizeResizeBilinear) -{ - class TestResizeBilinearQuantization : public TestLeakyReLuActivationQuantization - { - public: - TestResizeBilinearQuantization(const TensorShape& inputShape, const TensorShape& outputShape) - : TestLeakyReLuActivationQuantization(inputShape, outputShape) {} - - TestResizeBilinearQuantization(const QuantizerOptions& options, - const TensorShape& inputShape, - const TensorShape& outputShape) - : TestLeakyReLuActivationQuantization(options, inputShape, outputShape) {} - - void VisitResizeBilinearLayer(const IConnectableLayer* layer, - const ResizeBilinearDescriptor& resizeDescriptor, - const char* name = nullptr) override - { - CheckForwardedQuantizationSettings(layer); - } - }; - - INetworkPtr network = INetwork::Create(); - - const TensorShape shape{1U}; - TensorInfo info(shape, DataType::Float32); - - IConnectableLayer* activation = CreateStartOfLeakyReluNetwork(network.get(), info); - - // Add the layer under test - ResizeBilinearDescriptor descriptor; - descriptor.m_TargetHeight = 3; - descriptor.m_TargetWidth = 3; - IConnectableLayer* spaceToBatch = network->AddResizeBilinearLayer(descriptor); - - CompleteLeakyReluNetwork(network.get(), activation, spaceToBatch, info); - - INetworkPtr quantizedNetworkQAsymm8 = INetworkQuantizer::Create(network.get())->ExportNetwork(); - TestResizeBilinearQuantization validatorQAsymm8(shape, shape); - VisitLayersTopologically(quantizedNetworkQAsymm8.get(), validatorQAsymm8); - - const QuantizerOptions options(DataType::QuantisedSymm16); - INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), options)->ExportNetwork(); - TestResizeBilinearQuantization validatorQSymm16(options, shape, shape); - VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16); -} - BOOST_AUTO_TEST_CASE(QuantizeResize) { class TestResizeQuantization : public TestLeakyReLuActivationQuantization @@ -1556,7 +1510,7 @@ BOOST_AUTO_TEST_CASE(QuantizeResize) // Add the layer under test ResizeDescriptor descriptor; descriptor.m_TargetHeight = 3; - descriptor.m_TargetWidth = 3; + descriptor.m_TargetWidth = 3; IConnectableLayer* resizeLayer = network->AddResizeLayer(descriptor); CompleteLeakyReluNetwork(network.get(), activation, resizeLayer, info); diff --git a/src/armnn/test/TestNameAndDescriptorLayerVisitor.cpp b/src/armnn/test/TestNameAndDescriptorLayerVisitor.cpp index 478f029..b841e72 100644 --- a/src/armnn/test/TestNameAndDescriptorLayerVisitor.cpp +++ b/src/armnn/test/TestNameAndDescriptorLayerVisitor.cpp @@ -255,30 +255,30 @@ BOOST_AUTO_TEST_CASE(CheckConcatLayerVisitorNameNullAndDescriptor) layer->Accept(visitor); } -BOOST_AUTO_TEST_CASE(CheckResizeBilinearLayerVisitorNameAndDescriptor) +BOOST_AUTO_TEST_CASE(CheckResizeLayerVisitorNameAndDescriptor) { - const char* layerName = "ResizeBilinearLayer"; - ResizeBilinearDescriptor descriptor; + const char* layerName = "ResizeLayer"; + ResizeDescriptor descriptor; descriptor.m_TargetHeight = 1; descriptor.m_TargetWidth = 1; descriptor.m_DataLayout = DataLayout::NHWC; - TestResizeBilinearLayerVisitor visitor(descriptor, layerName); + TestResizeLayerVisitor visitor(descriptor, layerName); Network net; - IConnectableLayer *const layer = net.AddResizeBilinearLayer(descriptor, layerName); + IConnectableLayer *const layer = net.AddResizeLayer(descriptor, layerName); layer->Accept(visitor); } -BOOST_AUTO_TEST_CASE(CheckResizeBilinearLayerVisitorNameNullAndDescriptor) +BOOST_AUTO_TEST_CASE(CheckResizeLayerVisitorNameNullAndDescriptor) { - ResizeBilinearDescriptor descriptor; + ResizeDescriptor descriptor; descriptor.m_TargetHeight = 1; descriptor.m_TargetWidth = 1; descriptor.m_DataLayout = DataLayout::NHWC; - TestResizeBilinearLayerVisitor visitor(descriptor); + TestResizeLayerVisitor visitor(descriptor); Network net; - IConnectableLayer *const layer = net.AddResizeBilinearLayer(descriptor); + IConnectableLayer *const layer = net.AddResizeLayer(descriptor); layer->Accept(visitor); } diff --git a/src/armnn/test/TestNameAndDescriptorLayerVisitor.hpp b/src/armnn/test/TestNameAndDescriptorLayerVisitor.hpp index 0db956d..f1936d6 100644 --- a/src/armnn/test/TestNameAndDescriptorLayerVisitor.hpp +++ b/src/armnn/test/TestNameAndDescriptorLayerVisitor.hpp @@ -385,33 +385,35 @@ public: }; }; -class TestResizeBilinearLayerVisitor : public TestLayerVisitor +class TestResizeLayerVisitor : public TestLayerVisitor { private: - ResizeBilinearDescriptor m_VisitorDescriptor; + ResizeDescriptor m_VisitorDescriptor; public: - explicit TestResizeBilinearLayerVisitor(const ResizeBilinearDescriptor& resizeDesc, const char* name = nullptr) + explicit TestResizeLayerVisitor(const ResizeDescriptor& descriptor, const char* name = nullptr) : TestLayerVisitor(name) { - m_VisitorDescriptor.m_TargetWidth = resizeDesc.m_TargetWidth; - m_VisitorDescriptor.m_TargetHeight = resizeDesc.m_TargetHeight; - m_VisitorDescriptor.m_DataLayout = resizeDesc.m_DataLayout; + m_VisitorDescriptor.m_Method = descriptor.m_Method; + m_VisitorDescriptor.m_TargetWidth = descriptor.m_TargetWidth; + m_VisitorDescriptor.m_TargetHeight = descriptor.m_TargetHeight; + m_VisitorDescriptor.m_DataLayout = descriptor.m_DataLayout; }; - void CheckDescriptor(const ResizeBilinearDescriptor& resizeDesc) + void CheckDescriptor(const ResizeDescriptor& descriptor) { - BOOST_CHECK_EQUAL(resizeDesc.m_TargetWidth, m_VisitorDescriptor.m_TargetWidth); - BOOST_CHECK_EQUAL(resizeDesc.m_TargetHeight, m_VisitorDescriptor.m_TargetHeight); - BOOST_CHECK(resizeDesc.m_DataLayout == m_VisitorDescriptor.m_DataLayout); + BOOST_CHECK(descriptor.m_Method == m_VisitorDescriptor.m_Method); + BOOST_CHECK(descriptor.m_TargetWidth == m_VisitorDescriptor.m_TargetWidth); + BOOST_CHECK(descriptor.m_TargetHeight == m_VisitorDescriptor.m_TargetHeight); + BOOST_CHECK(descriptor.m_DataLayout == m_VisitorDescriptor.m_DataLayout); } - void VisitResizeBilinearLayer(const IConnectableLayer* layer, - const ResizeBilinearDescriptor& resizeDesc, - const char* name = nullptr) override + void VisitResizeLayer(const IConnectableLayer* layer, + const ResizeDescriptor& descriptor, + const char* name = nullptr) override { CheckLayerPointer(layer); - CheckDescriptor(resizeDesc); + CheckDescriptor(descriptor); CheckLayerName(name); }; }; diff --git a/src/armnnDeserializer/Deserializer.cpp b/src/armnnDeserializer/Deserializer.cpp index d853a08..461208b 100644 --- a/src/armnnDeserializer/Deserializer.cpp +++ b/src/armnnDeserializer/Deserializer.cpp @@ -1699,13 +1699,14 @@ void Deserializer::ParseResizeBilinear(GraphPtr graph, unsigned int layerIndex) auto flatBufferDescriptor = graph->layers()->Get(layerIndex)->layer_as_ResizeBilinearLayer()->descriptor(); - armnn::ResizeBilinearDescriptor descriptor; - descriptor.m_TargetWidth = flatBufferDescriptor->targetWidth(); + armnn::ResizeDescriptor descriptor; + descriptor.m_TargetWidth = flatBufferDescriptor->targetWidth(); descriptor.m_TargetHeight = flatBufferDescriptor->targetHeight(); - descriptor.m_DataLayout = ToDataLayout(flatBufferDescriptor->dataLayout()); + descriptor.m_Method = armnn::ResizeMethod::Bilinear; + descriptor.m_DataLayout = ToDataLayout(flatBufferDescriptor->dataLayout()); auto layerName = GetLayerName(graph, layerIndex); - IConnectableLayer* layer = m_Network->AddResizeBilinearLayer(descriptor, layerName.c_str()); + IConnectableLayer* layer = m_Network->AddResizeLayer(descriptor, layerName.c_str()); armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]); layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo); diff --git a/src/armnnSerializer/Serializer.hpp b/src/armnnSerializer/Serializer.hpp index 2529796..0383d10 100644 --- a/src/armnnSerializer/Serializer.hpp +++ b/src/armnnSerializer/Serializer.hpp @@ -170,14 +170,15 @@ public: const armnn::ReshapeDescriptor& reshapeDescriptor, const char* name = nullptr) override; - void VisitResizeBilinearLayer(const armnn::IConnectableLayer* layer, - const armnn::ResizeBilinearDescriptor& resizeDescriptor, - const char* name = nullptr) override; - void VisitResizeLayer(const armnn::IConnectableLayer* layer, const armnn::ResizeDescriptor& resizeDescriptor, const char* name = nullptr) override; + ARMNN_DEPRECATED_MSG("Use VisitResizeLayer instead") + void VisitResizeBilinearLayer(const armnn::IConnectableLayer* layer, + const armnn::ResizeBilinearDescriptor& resizeDescriptor, + const char* name = nullptr) override; + void VisitRsqrtLayer(const armnn::IConnectableLayer* layer, const char* name = nullptr) override; diff --git a/src/armnnSerializer/test/SerializerTests.cpp b/src/armnnSerializer/test/SerializerTests.cpp index e51f76b..285d21c 100644 --- a/src/armnnSerializer/test/SerializerTests.cpp +++ b/src/armnnSerializer/test/SerializerTests.cpp @@ -2048,10 +2048,10 @@ BOOST_AUTO_TEST_CASE(SerializeResize) private: void VerifyDescriptor(const armnn::ResizeDescriptor& descriptor) { - BOOST_CHECK(descriptor.m_DataLayout == m_Descriptor.m_DataLayout); - BOOST_CHECK(descriptor.m_TargetWidth == m_Descriptor.m_TargetWidth); + BOOST_CHECK(descriptor.m_DataLayout == m_Descriptor.m_DataLayout); + BOOST_CHECK(descriptor.m_TargetWidth == m_Descriptor.m_TargetWidth); BOOST_CHECK(descriptor.m_TargetHeight == m_Descriptor.m_TargetHeight); - BOOST_CHECK(descriptor.m_Method == m_Descriptor.m_Method); + BOOST_CHECK(descriptor.m_Method == m_Descriptor.m_Method); } armnn::ResizeDescriptor m_Descriptor; @@ -2062,9 +2062,9 @@ BOOST_AUTO_TEST_CASE(SerializeResize) const armnn::TensorInfo outputInfo = armnn::TensorInfo({1, 3, 2, 4}, armnn::DataType::Float32); armnn::ResizeDescriptor desc; - desc.m_TargetWidth = 4; + desc.m_TargetWidth = 4; desc.m_TargetHeight = 2; - desc.m_Method = armnn::ResizeMethod::NearestNeighbor; + desc.m_Method = armnn::ResizeMethod::NearestNeighbor; armnn::INetworkPtr network = armnn::INetwork::Create(); armnn::IConnectableLayer* const inputLayer = network->AddInputLayer(0); @@ -2084,63 +2084,6 @@ BOOST_AUTO_TEST_CASE(SerializeResize) deserializedNetwork->Accept(verifier); } -BOOST_AUTO_TEST_CASE(SerializeResizeBilinear) -{ - class ResizeBilinearLayerVerifier : public LayerVerifierBase - { - public: - ResizeBilinearLayerVerifier(const std::string& layerName, - const std::vector& inputInfos, - const std::vector& outputInfos, - const armnn::ResizeBilinearDescriptor& descriptor) - : LayerVerifierBase(layerName, inputInfos, outputInfos) - , m_Descriptor(descriptor) {} - - void VisitResizeBilinearLayer(const armnn::IConnectableLayer* layer, - const armnn::ResizeBilinearDescriptor& descriptor, - const char* name) override - { - VerifyNameAndConnections(layer, name); - VerifyDescriptor(descriptor); - } - - private: - void VerifyDescriptor(const armnn::ResizeBilinearDescriptor& descriptor) - { - BOOST_TEST(GetDataLayoutName(descriptor.m_DataLayout) == GetDataLayoutName(m_Descriptor.m_DataLayout)); - BOOST_TEST(descriptor.m_TargetWidth == m_Descriptor.m_TargetWidth); - BOOST_TEST(descriptor.m_TargetHeight == m_Descriptor.m_TargetHeight); - } - - armnn::ResizeBilinearDescriptor m_Descriptor; - }; - - const std::string layerName("resizeBilinear"); - const armnn::TensorInfo inputInfo = armnn::TensorInfo({1, 3, 5, 5}, armnn::DataType::Float32); - const armnn::TensorInfo outputInfo = armnn::TensorInfo({1, 3, 2, 4}, armnn::DataType::Float32); - - armnn::ResizeBilinearDescriptor desc; - desc.m_TargetWidth = 4; - desc.m_TargetHeight = 2; - - armnn::INetworkPtr network = armnn::INetwork::Create(); - armnn::IConnectableLayer* const inputLayer = network->AddInputLayer(0); - armnn::IConnectableLayer* const resizeLayer = network->AddResizeBilinearLayer(desc, layerName.c_str()); - armnn::IConnectableLayer* const outputLayer = network->AddOutputLayer(0); - - inputLayer->GetOutputSlot(0).Connect(resizeLayer->GetInputSlot(0)); - resizeLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0)); - - inputLayer->GetOutputSlot(0).SetTensorInfo(inputInfo); - resizeLayer->GetOutputSlot(0).SetTensorInfo(outputInfo); - - armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network)); - BOOST_CHECK(deserializedNetwork); - - ResizeBilinearLayerVerifier verifier(layerName, {inputInfo}, {outputInfo}, desc); - deserializedNetwork->Accept(verifier); -} - BOOST_AUTO_TEST_CASE(SerializeRsqrt) { class RsqrtLayerVerifier : public LayerVerifierBase diff --git a/src/armnnTfLiteParser/TfLiteParser.cpp b/src/armnnTfLiteParser/TfLiteParser.cpp index d0d130d..9ee3279 100644 --- a/src/armnnTfLiteParser/TfLiteParser.cpp +++ b/src/armnnTfLiteParser/TfLiteParser.cpp @@ -1642,13 +1642,14 @@ void TfLiteParser::ParseResizeBilinear(size_t subgraphIndex, size_t operatorInde BufferRawPtr sizeBufferPtr = GetBuffer(m_Model, inputs[1]->buffer); ::memcpy(sizeTensorData.data(), sizeBufferPtr->data.data(), sizeTensorInfo.GetNumBytes()); - ResizeBilinearDescriptor desc; + ResizeDescriptor desc; + desc.m_Method = armnn::ResizeMethod::Bilinear; desc.m_TargetHeight = static_cast (sizeTensorData[0]); - desc.m_TargetWidth = static_cast (sizeTensorData[1]); - desc.m_DataLayout = armnn::DataLayout::NHWC; + desc.m_TargetWidth = static_cast (sizeTensorData[1]); + desc.m_DataLayout = armnn::DataLayout::NHWC; auto layerName = boost::str(boost::format("ResizeBilinear:%1%:%2%") % subgraphIndex % operatorIndex); - IConnectableLayer* layer = m_Network->AddResizeBilinearLayer(desc, layerName.c_str()); + IConnectableLayer* layer = m_Network->AddResizeLayer(desc, layerName.c_str()); TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]); layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo); diff --git a/src/armnnTfParser/TfParser.cpp b/src/armnnTfParser/TfParser.cpp index 78f4790..39e6971 100755 --- a/src/armnnTfParser/TfParser.cpp +++ b/src/armnnTfParser/TfParser.cpp @@ -2255,12 +2255,13 @@ ParsedTfOperationPtr TfParser::ParseResizeBilinear(const tensorflow::NodeDef& no ConstTensor sizeTensor = sizeNode->GetConstTensor(sizeTensorData); // The descriptor only has target height and width attributes, which we get from the size tensor. - ResizeBilinearDescriptor desc; + ResizeDescriptor desc; + desc.m_Method = armnn::ResizeMethod::Bilinear; desc.m_TargetHeight = static_cast (sizeTensorData[0]); - desc.m_TargetWidth = static_cast (sizeTensorData[1]); - desc.m_DataLayout = armnn::DataLayout::NHWC; + desc.m_TargetWidth = static_cast (sizeTensorData[1]); + desc.m_DataLayout = armnn::DataLayout::NHWC; - IConnectableLayer* layer = m_Network->AddResizeBilinearLayer(desc, nodeDef.name().c_str()); + IConnectableLayer* layer = m_Network->AddResizeLayer(desc, nodeDef.name().c_str()); IOutputSlot& inputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index); TensorInfo inputTensorInfo = inputSlot.GetTensorInfo(); diff --git a/src/backends/backendsCommon/LayerSupportBase.hpp b/src/backends/backendsCommon/LayerSupportBase.hpp index 8abd975..03a928a 100644 --- a/src/backends/backendsCommon/LayerSupportBase.hpp +++ b/src/backends/backendsCommon/LayerSupportBase.hpp @@ -238,15 +238,16 @@ public: const ReshapeDescriptor& descriptor, Optional reasonIfUnsupported = EmptyOptional()) const override; - bool IsResizeBilinearSupported(const TensorInfo& input, - const TensorInfo& output, - Optional reasonIfUnsupported = EmptyOptional()) const override; - bool IsResizeSupported(const TensorInfo& input, const TensorInfo& output, const ResizeDescriptor& descriptor, Optional reasonIfUnsupported = EmptyOptional()) const override; + ARMNN_DEPRECATED_MSG("Use IsResizeSupported instead") + bool IsResizeBilinearSupported(const TensorInfo& input, + const TensorInfo& output, + Optional reasonIfUnsupported = EmptyOptional()) const override; + bool IsRsqrtSupported(const TensorInfo& input, const TensorInfo& output, Optional reasonIfUnsupported = EmptyOptional()) const override; diff --git a/src/backends/backendsCommon/WorkloadFactory.cpp b/src/backends/backendsCommon/WorkloadFactory.cpp index 8ef5985..1aca6bf 100644 --- a/src/backends/backendsCommon/WorkloadFactory.cpp +++ b/src/backends/backendsCommon/WorkloadFactory.cpp @@ -674,7 +674,7 @@ bool IWorkloadFactory::IsLayerSupported(const BackendId& backendId, case LayerType::Resize: { auto cLayer = boost::polymorphic_downcast(&layer); - const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); + const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); result = layerSupportObject->IsResizeSupported(OverrideDataType(input, dataType), OverrideDataType(output, dataType), @@ -682,15 +682,6 @@ bool IWorkloadFactory::IsLayerSupported(const BackendId& backendId, reason); break; } - case LayerType::ResizeBilinear: - { - const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); - const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); - result = layerSupportObject->IsResizeBilinearSupported(OverrideDataType(input, dataType), - OverrideDataType(output, dataType), - reason); - break; - } case LayerType::Rsqrt: { const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); diff --git a/src/backends/backendsCommon/WorkloadFactory.hpp b/src/backends/backendsCommon/WorkloadFactory.hpp index 02a8002..e09640f 100644 --- a/src/backends/backendsCommon/WorkloadFactory.hpp +++ b/src/backends/backendsCommon/WorkloadFactory.hpp @@ -164,12 +164,13 @@ public: virtual std::unique_ptr CreateReshape(const ReshapeQueueDescriptor& descriptor, const WorkloadInfo& info) const; - virtual std::unique_ptr CreateResizeBilinear(const ResizeBilinearQueueDescriptor& descriptor, - const WorkloadInfo& info) const; - virtual std::unique_ptr CreateResize(const ResizeQueueDescriptor& descriptor, const WorkloadInfo& info) const; + ARMNN_DEPRECATED_MSG("Use CreateResize instead") + virtual std::unique_ptr CreateResizeBilinear(const ResizeBilinearQueueDescriptor& descriptor, + const WorkloadInfo& info) const; + virtual std::unique_ptr CreateRsqrt(const RsqrtQueueDescriptor& descriptor, const WorkloadInfo& info) const; diff --git a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp index 6f3a9d3..b02ab7b 100644 --- a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp +++ b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp @@ -396,8 +396,6 @@ DECLARE_LAYER_POLICY_1_PARAM(Division) DECLARE_LAYER_POLICY_2_PARAM(Resize) -DECLARE_LAYER_POLICY_2_PARAM(ResizeBilinear) - DECLARE_LAYER_POLICY_2_PARAM(Reshape) DECLARE_LAYER_POLICY_1_PARAM(Rsqrt) diff --git a/src/backends/backendsCommon/test/LayerTests.hpp b/src/backends/backendsCommon/test/LayerTests.hpp index 66324e1..259ad01 100644 --- a/src/backends/backendsCommon/test/LayerTests.hpp +++ b/src/backends/backendsCommon/test/LayerTests.hpp @@ -2557,13 +2557,15 @@ LayerTestResult ResizeBilinearNopTest( std::unique_ptr inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo); std::unique_ptr outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); - armnn::ResizeBilinearQueueDescriptor descriptor; + armnn::ResizeQueueDescriptor descriptor; + descriptor.m_Parameters.m_Method = armnn::ResizeMethod::Bilinear; descriptor.m_Parameters.m_DataLayout = dataLayout; + armnn::WorkloadInfo info; AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get()); AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get()); - std::unique_ptr workload = workloadFactory.CreateResizeBilinear(descriptor, info); + std::unique_ptr workload = workloadFactory.CreateResize(descriptor, info); inputHandle->Allocate(); outputHandle->Allocate(); @@ -2655,13 +2657,15 @@ LayerTestResult SimpleResizeBilinearTest( std::unique_ptr inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo); std::unique_ptr outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); - armnn::ResizeBilinearQueueDescriptor descriptor; + armnn::ResizeQueueDescriptor descriptor; + descriptor.m_Parameters.m_Method = armnn::ResizeMethod::Bilinear; descriptor.m_Parameters.m_DataLayout = dataLayout; + armnn::WorkloadInfo info; AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get()); AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get()); - std::unique_ptr workload = workloadFactory.CreateResizeBilinear(descriptor, info); + std::unique_ptr workload = workloadFactory.CreateResize(descriptor, info); inputHandle->Allocate(); outputHandle->Allocate(); @@ -2756,13 +2760,15 @@ LayerTestResult ResizeBilinearSqMinTest( std::unique_ptr inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo); std::unique_ptr outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); - armnn::ResizeBilinearQueueDescriptor descriptor; + armnn::ResizeQueueDescriptor descriptor; + descriptor.m_Parameters.m_Method = armnn::ResizeMethod::Bilinear; descriptor.m_Parameters.m_DataLayout = dataLayout; + armnn::WorkloadInfo info; AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get()); AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get()); - std::unique_ptr workload = workloadFactory.CreateResizeBilinear(descriptor, info); + std::unique_ptr workload = workloadFactory.CreateResize(descriptor, info); inputHandle->Allocate(); outputHandle->Allocate(); @@ -2852,13 +2858,15 @@ LayerTestResult ResizeBilinearMinTest( std::unique_ptr inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo); std::unique_ptr outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); - armnn::ResizeBilinearQueueDescriptor descriptor; + armnn::ResizeQueueDescriptor descriptor; + descriptor.m_Parameters.m_Method = armnn::ResizeMethod::Bilinear; descriptor.m_Parameters.m_DataLayout = dataLayout; + armnn::WorkloadInfo info; AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get()); AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get()); - std::unique_ptr workload = workloadFactory.CreateResizeBilinear(descriptor, info); + std::unique_ptr workload = workloadFactory.CreateResize(descriptor, info); inputHandle->Allocate(); outputHandle->Allocate(); @@ -2955,13 +2963,15 @@ LayerTestResult ResizeBilinearMagTest( std::unique_ptr inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo); std::unique_ptr outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); - armnn::ResizeBilinearQueueDescriptor descriptor; + armnn::ResizeQueueDescriptor descriptor; + descriptor.m_Parameters.m_Method = armnn::ResizeMethod::Bilinear; descriptor.m_Parameters.m_DataLayout = dataLayout; + armnn::WorkloadInfo info; AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get()); AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get()); - std::unique_ptr workload = workloadFactory.CreateResizeBilinear(descriptor, info); + std::unique_ptr workload = workloadFactory.CreateResize(descriptor, info); inputHandle->Allocate(); outputHandle->Allocate(); diff --git a/src/backends/cl/ClLayerSupport.cpp b/src/backends/cl/ClLayerSupport.cpp index ec134a1..d79f612 100644 --- a/src/backends/cl/ClLayerSupport.cpp +++ b/src/backends/cl/ClLayerSupport.cpp @@ -591,6 +591,24 @@ bool ClLayerSupport::IsReshapeSupported(const TensorInfo& input, return true; } +bool ClLayerSupport::IsResizeSupported(const TensorInfo& input, + const TensorInfo& output, + const ResizeDescriptor& descriptor, + Optional reasonIfUnsupported) const +{ + ignore_unused(output); + + if (descriptor.m_Method == ResizeMethod::Bilinear) + { + return IsSupportedForDataTypeCl(reasonIfUnsupported, + input.GetDataType(), + &TrueFunc<>, + &FalseFuncU8<>); + } + + return false; +} + bool ClLayerSupport::IsResizeBilinearSupported(const TensorInfo& input, const TensorInfo& output, Optional reasonIfUnsupported) const diff --git a/src/backends/cl/ClLayerSupport.hpp b/src/backends/cl/ClLayerSupport.hpp index 4d0f5bd..1461f41 100644 --- a/src/backends/cl/ClLayerSupport.hpp +++ b/src/backends/cl/ClLayerSupport.hpp @@ -198,6 +198,12 @@ public: const ReshapeDescriptor& descriptor, Optional reasonIfUnsupported = EmptyOptional()) const override; + bool IsResizeSupported(const TensorInfo& input, + const TensorInfo& output, + const ResizeDescriptor& descriptor, + Optional reasonIfUnsupported = EmptyOptional()) const override; + + ARMNN_DEPRECATED_MSG("Use IsResizeSupported instead") bool IsResizeBilinearSupported(const TensorInfo& input, const TensorInfo& output, Optional reasonIfUnsupported = EmptyOptional()) const override; diff --git a/src/backends/cl/ClWorkloadFactory.cpp b/src/backends/cl/ClWorkloadFactory.cpp index 4bce653..c662a9d 100644 --- a/src/backends/cl/ClWorkloadFactory.cpp +++ b/src/backends/cl/ClWorkloadFactory.cpp @@ -251,6 +251,25 @@ std::unique_ptr ClWorkloadFactory::CreateMemCopy(const MemCopy return MakeWorkload(descriptor, info); } +std::unique_ptr ClWorkloadFactory::CreateResize(const ResizeQueueDescriptor& descriptor, + const WorkloadInfo& info) const +{ + if (descriptor.m_Parameters.m_Method == ResizeMethod::Bilinear) + { + ResizeBilinearQueueDescriptor resizeBilinearDescriptor; + resizeBilinearDescriptor.m_Inputs = descriptor.m_Inputs; + resizeBilinearDescriptor.m_Outputs = descriptor.m_Outputs; + + resizeBilinearDescriptor.m_Parameters.m_DataLayout = descriptor.m_Parameters.m_DataLayout; + resizeBilinearDescriptor.m_Parameters.m_TargetWidth = descriptor.m_Parameters.m_TargetWidth; + resizeBilinearDescriptor.m_Parameters.m_TargetHeight = descriptor.m_Parameters.m_TargetHeight; + + return MakeWorkload(resizeBilinearDescriptor, info); + } + + return MakeWorkload(descriptor, info); +} + std::unique_ptr ClWorkloadFactory::CreateResizeBilinear( const ResizeBilinearQueueDescriptor& descriptor, const WorkloadInfo& info) const diff --git a/src/backends/cl/ClWorkloadFactory.hpp b/src/backends/cl/ClWorkloadFactory.hpp index 8c3e756..32925f7 100644 --- a/src/backends/cl/ClWorkloadFactory.hpp +++ b/src/backends/cl/ClWorkloadFactory.hpp @@ -91,6 +91,10 @@ public: std::unique_ptr CreateMemCopy(const MemCopyQueueDescriptor& descriptor, const WorkloadInfo& info) const override; + std::unique_ptr CreateResize(const ResizeQueueDescriptor& descriptor, + const WorkloadInfo& info) const override; + + ARMNN_DEPRECATED_MSG("Use CreateResize instead") std::unique_ptr CreateResizeBilinear(const ResizeBilinearQueueDescriptor& descriptor, const WorkloadInfo& info) const override; diff --git a/src/backends/cl/test/ClCreateWorkloadTests.cpp b/src/backends/cl/test/ClCreateWorkloadTests.cpp index d401701..aa1393f 100644 --- a/src/backends/cl/test/ClCreateWorkloadTests.cpp +++ b/src/backends/cl/test/ClCreateWorkloadTests.cpp @@ -726,8 +726,9 @@ static void ClResizeBilinearWorkloadTest(DataLayout dataLayout) auto workload = CreateResizeBilinearWorkloadTest(factory, graph, dataLayout); // Checks that inputs/outputs are as we expect them (see definition of CreateResizeBilinearWorkloadTest). - ResizeBilinearQueueDescriptor queueDescriptor = workload->GetData(); - auto inputHandle = boost::polymorphic_downcast(queueDescriptor.m_Inputs[0]); + auto queueDescriptor = workload->GetData(); + + auto inputHandle = boost::polymorphic_downcast(queueDescriptor.m_Inputs[0]); auto outputHandle = boost::polymorphic_downcast(queueDescriptor.m_Outputs[0]); switch (dataLayout) diff --git a/src/backends/neon/NeonLayerSupport.cpp b/src/backends/neon/NeonLayerSupport.cpp index 32027d4..e49ad79 100644 --- a/src/backends/neon/NeonLayerSupport.cpp +++ b/src/backends/neon/NeonLayerSupport.cpp @@ -471,6 +471,22 @@ bool NeonLayerSupport::IsReshapeSupported(const TensorInfo& input, &TrueFunc<>); } +bool NeonLayerSupport::IsResizeSupported(const TensorInfo& input, + const TensorInfo& output, + const ResizeDescriptor& descriptor, + Optional reasonIfUnsupported) const +{ + if (descriptor.m_Method == ResizeMethod::Bilinear) + { + FORWARD_WORKLOAD_VALIDATE_FUNC(NeonResizeBilinearWorkloadValidate, + reasonIfUnsupported, + input, + output); + } + + return false; +} + bool NeonLayerSupport::IsResizeBilinearSupported(const TensorInfo& input, const TensorInfo& output, Optional reasonIfUnsupported) const diff --git a/src/backends/neon/NeonLayerSupport.hpp b/src/backends/neon/NeonLayerSupport.hpp index 1539ffe..781da42 100644 --- a/src/backends/neon/NeonLayerSupport.hpp +++ b/src/backends/neon/NeonLayerSupport.hpp @@ -157,6 +157,12 @@ public: const ReshapeDescriptor& descriptor, Optional reasonIfUnsupported = EmptyOptional()) const override; + bool IsResizeSupported(const TensorInfo& input, + const TensorInfo& output, + const ResizeDescriptor& descriptor, + Optional reasonIfUnsupported = EmptyOptional()) const override; + + ARMNN_DEPRECATED_MSG("Use IsResizeSupported instead") bool IsResizeBilinearSupported(const TensorInfo& input, const TensorInfo& output, Optional reasonIfUnsupported = EmptyOptional()) const override; diff --git a/src/backends/neon/NeonWorkloadFactory.cpp b/src/backends/neon/NeonWorkloadFactory.cpp index d784a48..a991318 100644 --- a/src/backends/neon/NeonWorkloadFactory.cpp +++ b/src/backends/neon/NeonWorkloadFactory.cpp @@ -218,6 +218,25 @@ std::unique_ptr NeonWorkloadFactory::CreateMemCopy(const MemCo return MakeWorkloadHelper(descriptor, info); } +std::unique_ptr NeonWorkloadFactory::CreateResize(const ResizeQueueDescriptor& descriptor, + const WorkloadInfo& info) const +{ + if (descriptor.m_Parameters.m_Method == ResizeMethod::Bilinear) + { + ResizeBilinearQueueDescriptor resizeBilinearDescriptor; + resizeBilinearDescriptor.m_Inputs = descriptor.m_Inputs; + resizeBilinearDescriptor.m_Outputs = descriptor.m_Outputs; + + resizeBilinearDescriptor.m_Parameters.m_DataLayout = descriptor.m_Parameters.m_DataLayout; + resizeBilinearDescriptor.m_Parameters.m_TargetWidth = descriptor.m_Parameters.m_TargetWidth; + resizeBilinearDescriptor.m_Parameters.m_TargetHeight = descriptor.m_Parameters.m_TargetHeight; + + return std::make_unique(resizeBilinearDescriptor, info); + } + + return MakeWorkloadHelper(descriptor, info); +} + std::unique_ptr NeonWorkloadFactory::CreateResizeBilinear( const ResizeBilinearQueueDescriptor& descriptor, const WorkloadInfo& info) const diff --git a/src/backends/neon/NeonWorkloadFactory.hpp b/src/backends/neon/NeonWorkloadFactory.hpp index 4ad52e4..ebd5001 100644 --- a/src/backends/neon/NeonWorkloadFactory.hpp +++ b/src/backends/neon/NeonWorkloadFactory.hpp @@ -92,6 +92,10 @@ public: std::unique_ptr CreateMemCopy(const MemCopyQueueDescriptor& descriptor, const WorkloadInfo& info) const override; + std::unique_ptr CreateResize(const ResizeQueueDescriptor& descriptor, + const WorkloadInfo& info) const override; + + ARMNN_DEPRECATED_MSG("Use CreateResize instead") std::unique_ptr CreateResizeBilinear(const ResizeBilinearQueueDescriptor& descriptor, const WorkloadInfo& info) const override; diff --git a/src/backends/reference/RefWorkloadFactory.cpp b/src/backends/reference/RefWorkloadFactory.cpp index 8d2a2b1..b16e856 100644 --- a/src/backends/reference/RefWorkloadFactory.cpp +++ b/src/backends/reference/RefWorkloadFactory.cpp @@ -253,11 +253,13 @@ std::unique_ptr RefWorkloadFactory::CreateResize(const ResizeQueueDes std::unique_ptr RefWorkloadFactory::CreateResizeBilinear(const ResizeBilinearQueueDescriptor& descriptor, const WorkloadInfo& info) const { - if (IsFloat16(info)) - { - return MakeWorkload(descriptor, info); - } - return std::make_unique(descriptor, info); + ResizeQueueDescriptor resizeDescriptor; + resizeDescriptor.m_Parameters.m_Method = ResizeMethod::Bilinear; + resizeDescriptor.m_Parameters.m_DataLayout = descriptor.m_Parameters.m_DataLayout; + resizeDescriptor.m_Parameters.m_TargetWidth = descriptor.m_Parameters.m_TargetWidth; + resizeDescriptor.m_Parameters.m_TargetHeight = descriptor.m_Parameters.m_TargetHeight; + + return CreateResize(resizeDescriptor, info); } std::unique_ptr RefWorkloadFactory::CreateFakeQuantization( diff --git a/src/backends/reference/test/RefCreateWorkloadTests.cpp b/src/backends/reference/test/RefCreateWorkloadTests.cpp index 9071679..945a874 100644 --- a/src/backends/reference/test/RefCreateWorkloadTests.cpp +++ b/src/backends/reference/test/RefCreateWorkloadTests.cpp @@ -643,22 +643,22 @@ static void RefCreateResizeBilinearTest(DataLayout dataLayout) BOOST_AUTO_TEST_CASE(CreateResizeBilinearFloat32) { - RefCreateResizeBilinearTest(DataLayout::NCHW); + RefCreateResizeBilinearTest(DataLayout::NCHW); } BOOST_AUTO_TEST_CASE(CreateResizeBilinearUint8) { - RefCreateResizeBilinearTest(DataLayout::NCHW); + RefCreateResizeBilinearTest(DataLayout::NCHW); } BOOST_AUTO_TEST_CASE(CreateResizeBilinearQuantisedAsymm16) { - RefCreateResizeBilinearTest(DataLayout::NCHW); + RefCreateResizeBilinearTest(DataLayout::NCHW); } BOOST_AUTO_TEST_CASE(CreateResizeBilinearFloat32Nhwc) { - RefCreateResizeBilinearTest(DataLayout::NHWC); + RefCreateResizeBilinearTest(DataLayout::NHWC); } template -- 2.7.4