src/armnn/layers/PreluLayer.cpp \
src/armnn/layers/QuantizeLayer.cpp \
src/armnn/layers/ReshapeLayer.cpp \
- src/armnn/layers/ResizeBilinearLayer.cpp \
src/armnn/layers/ResizeLayer.cpp \
src/armnn/layers/RsqrtLayer.cpp \
src/armnn/layers/SpaceToBatchNdLayer.cpp \
src/armnn/layers/PreluLayer.cpp
src/armnn/layers/ReshapeLayer.hpp
src/armnn/layers/ReshapeLayer.cpp
- src/armnn/layers/ResizeBilinearLayer.hpp
- src/armnn/layers/ResizeBilinearLayer.cpp
src/armnn/layers/ResizeLayer.hpp
src/armnn/layers/ResizeLayer.cpp
src/armnn/layers/RsqrtLayer.cpp
const ReshapeDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
+ ARMNN_DEPRECATED_MSG("Use IsResizeSupported instead")
virtual bool IsResizeBilinearSupported(const TensorInfo& input,
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
/// @param layer - pointer to the layer which is calling back to this visit function.
/// @param resizeDesc - Parameters for the resize operation.
/// @param name - Optional name for the layer.
+ ARMNN_DEPRECATED_MSG("Use VisitResizeLayer instead")
virtual void VisitResizeBilinearLayer(const IConnectableLayer* layer,
const ResizeBilinearDescriptor& resizeDesc,
const char* name = nullptr) = 0;
/// @param resizeDesc - Parameters for the resize operation.
/// @param name - Optional name for the layer.
/// @return - Interface for configuring the layer.
+ ARMNN_DEPRECATED_MSG("Use AddResizeLayer instead")
virtual IConnectableLayer* AddResizeBilinearLayer(const ResizeBilinearDescriptor& resizeDesc,
const char* name = nullptr) = 0;
size_t reasonIfUnsupportedMaxLength = 1024);
/// Deprecated in favor of IBackend and ILayerSupport interfaces
+ARMNN_DEPRECATED_MSG("Use IsResizeSupported instead")
bool IsResizeBilinearSupported(const BackendId& backend,
const TensorInfo& input,
const TensorInfo& output,
size_t reasonIfUnsupportedMaxLength = 1024);
/// Deprecated in favor of IBackend and ILayerSupport interfaces
+bool IsResizeSupported(const BackendId& backend,
+ const TensorInfo& input,
+ const TensorInfo& output,
+ const ResizeDescriptor& descriptor,
+ char* reasonIfUnsupported = nullptr,
+ size_t reasonIfUnsupportedMaxLength = 1024);
+
+/// Deprecated in favor of IBackend and ILayerSupport interfaces
bool IsRsqrtSupported(const BackendId& backend,
const TensorInfo& input,
const TensorInfo& output,
case LayerType::Reshape: return "Reshape";
case LayerType::Rsqrt: return "Rsqrt";
case LayerType::Resize: return "Resize";
- case LayerType::ResizeBilinear: return "ResizeBilinear";
case LayerType::Softmax: return "Softmax";
case LayerType::SpaceToBatchNd: return "SpaceToBatchNd";
case LayerType::Splitter: return "Splitter";
Prelu,
Quantize,
Reshape,
- ResizeBilinear,
Resize,
Rsqrt,
Softmax,
FORWARD_LAYER_SUPPORT_FUNC(backend, IsReshapeSupported, input, descriptor);
}
+bool IsResizeSupported(const BackendId& backend,
+ const TensorInfo& input,
+ const TensorInfo& output,
+ const ResizeDescriptor& descriptor,
+ char* reasonIfUnsupported,
+ size_t reasonIfUnsupportedMaxLength)
+{
+ FORWARD_LAYER_SUPPORT_FUNC(backend, IsResizeSupported, input, output, descriptor);
+}
+
+ARMNN_DEPRECATED_MSG("Use IsResizeSupported instead")
bool IsResizeBilinearSupported(const BackendId& backend,
const TensorInfo& input,
const TensorInfo& output,
char* reasonIfUnsupported,
size_t reasonIfUnsupportedMaxLength)
{
- FORWARD_LAYER_SUPPORT_FUNC(backend, IsResizeBilinearSupported, input, output);
+ ResizeDescriptor descriptor;
+ descriptor.m_Method = ResizeMethod::Bilinear;
+
+ const TensorShape& outputShape = output.GetShape();
+ descriptor.m_TargetWidth = outputShape[3];
+ descriptor.m_TargetHeight = outputShape[2];
+
+ FORWARD_LAYER_SUPPORT_FUNC(backend, IsResizeSupported, input, output, descriptor);
}
bool IsRsqrtSupported(const BackendId& backend,
#include "layers/PreluLayer.hpp"
#include "layers/QuantizeLayer.hpp"
#include "layers/ReshapeLayer.hpp"
-#include "layers/ResizeBilinearLayer.hpp"
#include "layers/ResizeLayer.hpp"
#include "layers/RsqrtLayer.hpp"
#include "layers/SoftmaxLayer.hpp"
DECLARE_LAYER(Quantize)
DECLARE_LAYER(Reshape)
DECLARE_LAYER(Resize)
-DECLARE_LAYER(ResizeBilinear)
DECLARE_LAYER(Rsqrt)
DECLARE_LAYER(Softmax)
DECLARE_LAYER(SpaceToBatchNd)
return layer;
}
-IConnectableLayer* Network::AddResizeBilinearLayer(const ResizeBilinearDescriptor&
-resizeDescriptor, const char* name)
+IConnectableLayer* Network::AddResizeBilinearLayer(const ResizeBilinearDescriptor& descriptor,
+ const char* name)
{
- return m_Graph->AddLayer<ResizeBilinearLayer>(resizeDescriptor,name);
+ ResizeDescriptor resizeDescriptor;
+ resizeDescriptor.m_Method = ResizeMethod::Bilinear;
+ resizeDescriptor.m_DataLayout = descriptor.m_DataLayout;
+ resizeDescriptor.m_TargetWidth = descriptor.m_TargetWidth;
+ resizeDescriptor.m_TargetHeight = descriptor.m_TargetHeight;
+
+ return m_Graph->AddLayer<ResizeLayer>(resizeDescriptor, name);
}
IConnectableLayer* Network::AddResizeLayer(const ResizeDescriptor&
resizeDescriptor, const char* name)
{
- return m_Graph->AddLayer<ResizeLayer>(resizeDescriptor,name);
+ return m_Graph->AddLayer<ResizeLayer>(resizeDescriptor, name);
}
IConnectableLayer* Network::AddL2NormalizationLayer(const L2NormalizationDescriptor& desc,
const ConstTensor& gamma,
const char* name = nullptr) override;
+ ARMNN_DEPRECATED_MSG("Use AddResizeLayer instead")
IConnectableLayer* AddResizeBilinearLayer(const ResizeBilinearDescriptor& resizeDesc,
const char* name = nullptr) override;
}
void QuantizerVisitor::VisitResizeBilinearLayer(const IConnectableLayer* layer,
- const ResizeBilinearDescriptor& resizeDesc,
+ const ResizeBilinearDescriptor& resizeBilinearDescriptor,
const char* name)
{
- IConnectableLayer* newLayer = m_QuantizedNetwork->AddResizeBilinearLayer(resizeDesc, name);
- RecordLayer(layer, newLayer);
- SetQuantizedInputConnections(layer, newLayer);
+ ResizeDescriptor resizeDescriptor;
+ resizeDescriptor.m_Method = ResizeMethod::Bilinear;
+ resizeDescriptor.m_TargetWidth = resizeBilinearDescriptor.m_TargetWidth;
+ resizeDescriptor.m_TargetHeight = resizeBilinearDescriptor.m_TargetHeight;
+ resizeDescriptor.m_DataLayout = resizeBilinearDescriptor.m_DataLayout;
+
+ VisitResizeLayer(layer, resizeDescriptor, name);
}
void QuantizerVisitor::VisitResizeLayer(const IConnectableLayer* layer,
const ReshapeDescriptor& reshapeDescriptor,
const char* name = nullptr) override;
- void VisitResizeBilinearLayer(const IConnectableLayer* layer,
- const ResizeBilinearDescriptor& resizeDesc,
- const char* name = nullptr) override;
-
void VisitResizeLayer(const IConnectableLayer* layer,
const ResizeDescriptor& resizeDescriptor,
const char* name = nullptr) override;
+ ARMNN_DEPRECATED_MSG("Use VisitResizeLayer instead")
+ void VisitResizeBilinearLayer(const IConnectableLayer* layer,
+ const ResizeBilinearDescriptor& resizeDesc,
+ const char* name = nullptr) override;
+
void VisitRsqrtLayer(const IConnectableLayer*,
const char* name = nullptr) override;
+++ /dev/null
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-#include "ResizeBilinearLayer.hpp"
-
-#include "LayerCloneBase.hpp"
-
-#include <armnn/TypesUtils.hpp>
-
-#include <backendsCommon/WorkloadData.hpp>
-#include <backendsCommon/WorkloadFactory.hpp>
-
-#include <DataLayoutIndexed.hpp>
-
-using namespace armnnUtils;
-
-namespace armnn
-{
-
-ResizeBilinearLayer::ResizeBilinearLayer(const ResizeBilinearDescriptor& param, const char* name)
- : LayerWithParameters(1, 1, LayerType::ResizeBilinear, param, name)
-{
-}
-
-std::unique_ptr<IWorkload> ResizeBilinearLayer::CreateWorkload(const Graph& graph,
- const IWorkloadFactory& factory) const
-{
- ResizeBilinearQueueDescriptor descriptor;
- return factory.CreateResizeBilinear(descriptor, PrepInfoAndDesc(descriptor, graph));
-}
-
-ResizeBilinearLayer* ResizeBilinearLayer::Clone(Graph& graph) const
-{
- return CloneBase<ResizeBilinearLayer>(graph, m_Param, GetName());
-}
-
-std::vector<TensorShape> ResizeBilinearLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
-{
- BOOST_ASSERT(inputShapes.size() == 1);
- const TensorShape& inputShape = inputShapes[0];
- const DataLayoutIndexed dimensionIndices = m_Param.m_DataLayout;
- unsigned int outWidth = m_Param.m_TargetWidth;
- unsigned int outHeight = m_Param.m_TargetHeight;
- unsigned int outChannels = inputShape[dimensionIndices.GetChannelsIndex()];
- unsigned int outBatch = inputShape[0];
-
- TensorShape tensorShape = m_Param.m_DataLayout == armnn::DataLayout::NHWC ?
- TensorShape( { outBatch, outHeight, outWidth, outChannels } ) :
- TensorShape( { outBatch, outChannels, outHeight, outWidth });
-
- return std::vector<TensorShape>({ tensorShape });
-}
-
-void ResizeBilinearLayer::ValidateTensorShapesFromInputs()
-{
- VerifyLayerConnections(1, CHECK_LOCATION());
-
- auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
-
- BOOST_ASSERT(inferredShapes.size() == 1);
-
- ConditionalThrowIfNotEqual<LayerValidationException>(
- "ResizeBilinearLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.",
- GetOutputSlot(0).GetTensorInfo().GetShape(),
- inferredShapes[0]);
-}
-
-void ResizeBilinearLayer::Accept(ILayerVisitor& visitor) const
-{
- visitor.VisitResizeBilinearLayer(this, GetParameters(), GetName());
-}
-
-} // namespace armnn
+++ /dev/null
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-#pragma once
-
-#include "LayerWithParameters.hpp"
-
-namespace armnn
-{
-
-/// This layer represents a resize bilinear operation.
-class ResizeBilinearLayer : public LayerWithParameters<ResizeBilinearDescriptor>
-{
-public:
- /// Makes a workload for the ResizeBilinear type.
- /// @param [in] graph The graph where this layer can be found.
- /// @param [in] factory The workload factory which will create the workload.
- /// @return A pointer to the created workload, or nullptr if not created.
- virtual std::unique_ptr<IWorkload>CreateWorkload(const Graph& graph,
- const IWorkloadFactory& factory) const override;
-
- /// Creates a dynamically-allocated copy of this layer.
- /// @param [in] graph The graph into which this layer is being cloned.
- ResizeBilinearLayer* Clone(Graph& graph) const override;
-
- /// Check if the input tensor shape(s)
- /// will lead to a valid configuration of @ref ResizeBilinearLayer.
- void ValidateTensorShapesFromInputs() override;
-
- /// By default returns inputShapes if the number of inputs are equal to number of outputs,
- /// otherwise infers the output shapes from given input shapes and layer properties.
- /// @param [in] inputShapes The input shapes layer has.
- /// @return A vector to the inferred output shape.
- std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
-
- void Accept(ILayerVisitor& visitor) const override;
-
-protected:
- /// Constructor to create a ResizeBilinearLayerLayer.
- /// @param [in] param ResizeBilinearDescriptor to configure the resize bilinear operation.
- /// @param [in] name Optional name for the layer.
- ResizeBilinearLayer(const ResizeBilinearDescriptor& param, const char* name);
-
- /// Default destructor
- ~ResizeBilinearLayer() = default;
-};
-
-} // namespace
wlActiv1_1 = std::move(workloadActiv1_1);
}
-template <typename ResizeBilinearWorkload, armnn::DataType DataType>
-std::unique_ptr<ResizeBilinearWorkload> CreateResizeBilinearWorkloadTest(armnn::IWorkloadFactory& factory,
- armnn::Graph& graph,
- DataLayout dataLayout = DataLayout::NCHW)
+template <typename ResizeWorkload, armnn::DataType DataType>
+std::unique_ptr<ResizeWorkload> CreateResizeBilinearWorkloadTest(armnn::IWorkloadFactory& factory,
+ armnn::Graph& graph,
+ DataLayout dataLayout = DataLayout::NCHW)
{
TensorShape inputShape;
TensorShape outputShape;
}
// Creates the layer we're testing.
- ResizeBilinearDescriptor resizeDesc;
+ ResizeDescriptor resizeDesc;
armnnUtils::DataLayoutIndexed dimensionIndices = dataLayout;
- resizeDesc.m_TargetWidth = outputShape[dimensionIndices.GetWidthIndex()];
+ resizeDesc.m_Method = ResizeMethod::Bilinear;
+ resizeDesc.m_TargetWidth = outputShape[dimensionIndices.GetWidthIndex()];
resizeDesc.m_TargetHeight = outputShape[dimensionIndices.GetHeightIndex()];
- resizeDesc.m_DataLayout = dataLayout;
- Layer* const layer = graph.AddLayer<ResizeBilinearLayer>(resizeDesc, "layer");
+ resizeDesc.m_DataLayout = dataLayout;
+ Layer* const layer = graph.AddLayer<ResizeLayer>(resizeDesc, "resize");
// Creates extra layers.
- Layer* const input = graph.AddLayer<InputLayer>(0, "input");
+ Layer* const input = graph.AddLayer<InputLayer>(0, "input");
Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
// Connects up.
CreateTensorHandles(graph, factory);
// Makes the workload and checks it.
- auto workload = MakeAndCheckWorkload<ResizeBilinearWorkload>(*layer, graph, factory);
+ auto workload = MakeAndCheckWorkload<ResizeWorkload>(*layer, graph, factory);
- ResizeBilinearQueueDescriptor queueDescriptor = workload->GetData();
- BOOST_TEST(queueDescriptor.m_Inputs.size() == 1);
- BOOST_TEST(queueDescriptor.m_Outputs.size() == 1);
- BOOST_TEST((queueDescriptor.m_Parameters.m_DataLayout == dataLayout));
+ auto queueDescriptor = workload->GetData();
+ BOOST_CHECK(queueDescriptor.m_Inputs.size() == 1);
+ BOOST_CHECK(queueDescriptor.m_Outputs.size() == 1);
+ BOOST_CHECK(queueDescriptor.m_Parameters.m_DataLayout == dataLayout);
// Returns so we can do extra, backend-specific tests.
return workload;
void CreateResizeBilinearGraph(Graph &graph, const unsigned int* inputShape, const unsigned int* outputShape,
DataLayout dataLayout = DataLayout::NCHW)
{
- armnn::TensorInfo inputInfo(4, inputShape, DataType::Float32);
- armnn::TensorInfo outputInfo(4, outputShape, DataType::Float32);
+ TensorInfo inputInfo(4, inputShape, DataType::Float32);
+ TensorInfo outputInfo(4, outputShape, DataType::Float32);
- ResizeBilinearDescriptor desc;
+ ResizeDescriptor desc;
+ desc.m_Method = ResizeMethod::Bilinear;
desc.m_TargetHeight = 3;
- desc.m_TargetWidth = 4;
- desc.m_DataLayout = dataLayout;
+ desc.m_TargetWidth = 4;
+ desc.m_DataLayout = dataLayout;
Layer* input = graph.AddLayer<InputLayer>(0, "input");
input->GetOutputSlot().SetTensorInfo(inputInfo);
- ResizeBilinearLayer* layer = graph.AddLayer<ResizeBilinearLayer>(desc, "resizeBilinear");
+ ResizeLayer* layer = graph.AddLayer<ResizeLayer>(desc, "resizeBilinear");
layer->GetOutputSlot().SetTensorInfo(outputInfo);
Layer* output = graph.AddLayer<OutputLayer>(0, "output");
VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16);
}
-BOOST_AUTO_TEST_CASE(QuantizeResizeBilinear)
-{
- class TestResizeBilinearQuantization : public TestLeakyReLuActivationQuantization
- {
- public:
- TestResizeBilinearQuantization(const TensorShape& inputShape, const TensorShape& outputShape)
- : TestLeakyReLuActivationQuantization(inputShape, outputShape) {}
-
- TestResizeBilinearQuantization(const QuantizerOptions& options,
- const TensorShape& inputShape,
- const TensorShape& outputShape)
- : TestLeakyReLuActivationQuantization(options, inputShape, outputShape) {}
-
- void VisitResizeBilinearLayer(const IConnectableLayer* layer,
- const ResizeBilinearDescriptor& resizeDescriptor,
- const char* name = nullptr) override
- {
- CheckForwardedQuantizationSettings(layer);
- }
- };
-
- INetworkPtr network = INetwork::Create();
-
- const TensorShape shape{1U};
- TensorInfo info(shape, DataType::Float32);
-
- IConnectableLayer* activation = CreateStartOfLeakyReluNetwork(network.get(), info);
-
- // Add the layer under test
- ResizeBilinearDescriptor descriptor;
- descriptor.m_TargetHeight = 3;
- descriptor.m_TargetWidth = 3;
- IConnectableLayer* spaceToBatch = network->AddResizeBilinearLayer(descriptor);
-
- CompleteLeakyReluNetwork(network.get(), activation, spaceToBatch, info);
-
- INetworkPtr quantizedNetworkQAsymm8 = INetworkQuantizer::Create(network.get())->ExportNetwork();
- TestResizeBilinearQuantization validatorQAsymm8(shape, shape);
- VisitLayersTopologically(quantizedNetworkQAsymm8.get(), validatorQAsymm8);
-
- const QuantizerOptions options(DataType::QuantisedSymm16);
- INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), options)->ExportNetwork();
- TestResizeBilinearQuantization validatorQSymm16(options, shape, shape);
- VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16);
-}
-
BOOST_AUTO_TEST_CASE(QuantizeResize)
{
class TestResizeQuantization : public TestLeakyReLuActivationQuantization
// Add the layer under test
ResizeDescriptor descriptor;
descriptor.m_TargetHeight = 3;
- descriptor.m_TargetWidth = 3;
+ descriptor.m_TargetWidth = 3;
IConnectableLayer* resizeLayer = network->AddResizeLayer(descriptor);
CompleteLeakyReluNetwork(network.get(), activation, resizeLayer, info);
layer->Accept(visitor);
}
-BOOST_AUTO_TEST_CASE(CheckResizeBilinearLayerVisitorNameAndDescriptor)
+BOOST_AUTO_TEST_CASE(CheckResizeLayerVisitorNameAndDescriptor)
{
- const char* layerName = "ResizeBilinearLayer";
- ResizeBilinearDescriptor descriptor;
+ const char* layerName = "ResizeLayer";
+ ResizeDescriptor descriptor;
descriptor.m_TargetHeight = 1;
descriptor.m_TargetWidth = 1;
descriptor.m_DataLayout = DataLayout::NHWC;
- TestResizeBilinearLayerVisitor visitor(descriptor, layerName);
+ TestResizeLayerVisitor visitor(descriptor, layerName);
Network net;
- IConnectableLayer *const layer = net.AddResizeBilinearLayer(descriptor, layerName);
+ IConnectableLayer *const layer = net.AddResizeLayer(descriptor, layerName);
layer->Accept(visitor);
}
-BOOST_AUTO_TEST_CASE(CheckResizeBilinearLayerVisitorNameNullAndDescriptor)
+BOOST_AUTO_TEST_CASE(CheckResizeLayerVisitorNameNullAndDescriptor)
{
- ResizeBilinearDescriptor descriptor;
+ ResizeDescriptor descriptor;
descriptor.m_TargetHeight = 1;
descriptor.m_TargetWidth = 1;
descriptor.m_DataLayout = DataLayout::NHWC;
- TestResizeBilinearLayerVisitor visitor(descriptor);
+ TestResizeLayerVisitor visitor(descriptor);
Network net;
- IConnectableLayer *const layer = net.AddResizeBilinearLayer(descriptor);
+ IConnectableLayer *const layer = net.AddResizeLayer(descriptor);
layer->Accept(visitor);
}
};
};
-class TestResizeBilinearLayerVisitor : public TestLayerVisitor
+class TestResizeLayerVisitor : public TestLayerVisitor
{
private:
- ResizeBilinearDescriptor m_VisitorDescriptor;
+ ResizeDescriptor m_VisitorDescriptor;
public:
- explicit TestResizeBilinearLayerVisitor(const ResizeBilinearDescriptor& resizeDesc, const char* name = nullptr)
+ explicit TestResizeLayerVisitor(const ResizeDescriptor& descriptor, const char* name = nullptr)
: TestLayerVisitor(name)
{
- m_VisitorDescriptor.m_TargetWidth = resizeDesc.m_TargetWidth;
- m_VisitorDescriptor.m_TargetHeight = resizeDesc.m_TargetHeight;
- m_VisitorDescriptor.m_DataLayout = resizeDesc.m_DataLayout;
+ m_VisitorDescriptor.m_Method = descriptor.m_Method;
+ m_VisitorDescriptor.m_TargetWidth = descriptor.m_TargetWidth;
+ m_VisitorDescriptor.m_TargetHeight = descriptor.m_TargetHeight;
+ m_VisitorDescriptor.m_DataLayout = descriptor.m_DataLayout;
};
- void CheckDescriptor(const ResizeBilinearDescriptor& resizeDesc)
+ void CheckDescriptor(const ResizeDescriptor& descriptor)
{
- BOOST_CHECK_EQUAL(resizeDesc.m_TargetWidth, m_VisitorDescriptor.m_TargetWidth);
- BOOST_CHECK_EQUAL(resizeDesc.m_TargetHeight, m_VisitorDescriptor.m_TargetHeight);
- BOOST_CHECK(resizeDesc.m_DataLayout == m_VisitorDescriptor.m_DataLayout);
+ BOOST_CHECK(descriptor.m_Method == m_VisitorDescriptor.m_Method);
+ BOOST_CHECK(descriptor.m_TargetWidth == m_VisitorDescriptor.m_TargetWidth);
+ BOOST_CHECK(descriptor.m_TargetHeight == m_VisitorDescriptor.m_TargetHeight);
+ BOOST_CHECK(descriptor.m_DataLayout == m_VisitorDescriptor.m_DataLayout);
}
- void VisitResizeBilinearLayer(const IConnectableLayer* layer,
- const ResizeBilinearDescriptor& resizeDesc,
- const char* name = nullptr) override
+ void VisitResizeLayer(const IConnectableLayer* layer,
+ const ResizeDescriptor& descriptor,
+ const char* name = nullptr) override
{
CheckLayerPointer(layer);
- CheckDescriptor(resizeDesc);
+ CheckDescriptor(descriptor);
CheckLayerName(name);
};
};
auto flatBufferDescriptor = graph->layers()->Get(layerIndex)->layer_as_ResizeBilinearLayer()->descriptor();
- armnn::ResizeBilinearDescriptor descriptor;
- descriptor.m_TargetWidth = flatBufferDescriptor->targetWidth();
+ armnn::ResizeDescriptor descriptor;
+ descriptor.m_TargetWidth = flatBufferDescriptor->targetWidth();
descriptor.m_TargetHeight = flatBufferDescriptor->targetHeight();
- descriptor.m_DataLayout = ToDataLayout(flatBufferDescriptor->dataLayout());
+ descriptor.m_Method = armnn::ResizeMethod::Bilinear;
+ descriptor.m_DataLayout = ToDataLayout(flatBufferDescriptor->dataLayout());
auto layerName = GetLayerName(graph, layerIndex);
- IConnectableLayer* layer = m_Network->AddResizeBilinearLayer(descriptor, layerName.c_str());
+ IConnectableLayer* layer = m_Network->AddResizeLayer(descriptor, layerName.c_str());
armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
const armnn::ReshapeDescriptor& reshapeDescriptor,
const char* name = nullptr) override;
- void VisitResizeBilinearLayer(const armnn::IConnectableLayer* layer,
- const armnn::ResizeBilinearDescriptor& resizeDescriptor,
- const char* name = nullptr) override;
-
void VisitResizeLayer(const armnn::IConnectableLayer* layer,
const armnn::ResizeDescriptor& resizeDescriptor,
const char* name = nullptr) override;
+ ARMNN_DEPRECATED_MSG("Use VisitResizeLayer instead")
+ void VisitResizeBilinearLayer(const armnn::IConnectableLayer* layer,
+ const armnn::ResizeBilinearDescriptor& resizeDescriptor,
+ const char* name = nullptr) override;
+
void VisitRsqrtLayer(const armnn::IConnectableLayer* layer,
const char* name = nullptr) override;
private:
void VerifyDescriptor(const armnn::ResizeDescriptor& descriptor)
{
- BOOST_CHECK(descriptor.m_DataLayout == m_Descriptor.m_DataLayout);
- BOOST_CHECK(descriptor.m_TargetWidth == m_Descriptor.m_TargetWidth);
+ BOOST_CHECK(descriptor.m_DataLayout == m_Descriptor.m_DataLayout);
+ BOOST_CHECK(descriptor.m_TargetWidth == m_Descriptor.m_TargetWidth);
BOOST_CHECK(descriptor.m_TargetHeight == m_Descriptor.m_TargetHeight);
- BOOST_CHECK(descriptor.m_Method == m_Descriptor.m_Method);
+ BOOST_CHECK(descriptor.m_Method == m_Descriptor.m_Method);
}
armnn::ResizeDescriptor m_Descriptor;
const armnn::TensorInfo outputInfo = armnn::TensorInfo({1, 3, 2, 4}, armnn::DataType::Float32);
armnn::ResizeDescriptor desc;
- desc.m_TargetWidth = 4;
+ desc.m_TargetWidth = 4;
desc.m_TargetHeight = 2;
- desc.m_Method = armnn::ResizeMethod::NearestNeighbor;
+ desc.m_Method = armnn::ResizeMethod::NearestNeighbor;
armnn::INetworkPtr network = armnn::INetwork::Create();
armnn::IConnectableLayer* const inputLayer = network->AddInputLayer(0);
deserializedNetwork->Accept(verifier);
}
-BOOST_AUTO_TEST_CASE(SerializeResizeBilinear)
-{
- class ResizeBilinearLayerVerifier : public LayerVerifierBase
- {
- public:
- ResizeBilinearLayerVerifier(const std::string& layerName,
- const std::vector<armnn::TensorInfo>& inputInfos,
- const std::vector<armnn::TensorInfo>& outputInfos,
- const armnn::ResizeBilinearDescriptor& descriptor)
- : LayerVerifierBase(layerName, inputInfos, outputInfos)
- , m_Descriptor(descriptor) {}
-
- void VisitResizeBilinearLayer(const armnn::IConnectableLayer* layer,
- const armnn::ResizeBilinearDescriptor& descriptor,
- const char* name) override
- {
- VerifyNameAndConnections(layer, name);
- VerifyDescriptor(descriptor);
- }
-
- private:
- void VerifyDescriptor(const armnn::ResizeBilinearDescriptor& descriptor)
- {
- BOOST_TEST(GetDataLayoutName(descriptor.m_DataLayout) == GetDataLayoutName(m_Descriptor.m_DataLayout));
- BOOST_TEST(descriptor.m_TargetWidth == m_Descriptor.m_TargetWidth);
- BOOST_TEST(descriptor.m_TargetHeight == m_Descriptor.m_TargetHeight);
- }
-
- armnn::ResizeBilinearDescriptor m_Descriptor;
- };
-
- const std::string layerName("resizeBilinear");
- const armnn::TensorInfo inputInfo = armnn::TensorInfo({1, 3, 5, 5}, armnn::DataType::Float32);
- const armnn::TensorInfo outputInfo = armnn::TensorInfo({1, 3, 2, 4}, armnn::DataType::Float32);
-
- armnn::ResizeBilinearDescriptor desc;
- desc.m_TargetWidth = 4;
- desc.m_TargetHeight = 2;
-
- armnn::INetworkPtr network = armnn::INetwork::Create();
- armnn::IConnectableLayer* const inputLayer = network->AddInputLayer(0);
- armnn::IConnectableLayer* const resizeLayer = network->AddResizeBilinearLayer(desc, layerName.c_str());
- armnn::IConnectableLayer* const outputLayer = network->AddOutputLayer(0);
-
- inputLayer->GetOutputSlot(0).Connect(resizeLayer->GetInputSlot(0));
- resizeLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0));
-
- inputLayer->GetOutputSlot(0).SetTensorInfo(inputInfo);
- resizeLayer->GetOutputSlot(0).SetTensorInfo(outputInfo);
-
- armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network));
- BOOST_CHECK(deserializedNetwork);
-
- ResizeBilinearLayerVerifier verifier(layerName, {inputInfo}, {outputInfo}, desc);
- deserializedNetwork->Accept(verifier);
-}
-
BOOST_AUTO_TEST_CASE(SerializeRsqrt)
{
class RsqrtLayerVerifier : public LayerVerifierBase
BufferRawPtr sizeBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
::memcpy(sizeTensorData.data(), sizeBufferPtr->data.data(), sizeTensorInfo.GetNumBytes());
- ResizeBilinearDescriptor desc;
+ ResizeDescriptor desc;
+ desc.m_Method = armnn::ResizeMethod::Bilinear;
desc.m_TargetHeight = static_cast<uint32_t> (sizeTensorData[0]);
- desc.m_TargetWidth = static_cast<uint32_t> (sizeTensorData[1]);
- desc.m_DataLayout = armnn::DataLayout::NHWC;
+ desc.m_TargetWidth = static_cast<uint32_t> (sizeTensorData[1]);
+ desc.m_DataLayout = armnn::DataLayout::NHWC;
auto layerName = boost::str(boost::format("ResizeBilinear:%1%:%2%") % subgraphIndex % operatorIndex);
- IConnectableLayer* layer = m_Network->AddResizeBilinearLayer(desc, layerName.c_str());
+ IConnectableLayer* layer = m_Network->AddResizeLayer(desc, layerName.c_str());
TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
ConstTensor sizeTensor = sizeNode->GetConstTensor(sizeTensorData);
// The descriptor only has target height and width attributes, which we get from the size tensor.
- ResizeBilinearDescriptor desc;
+ ResizeDescriptor desc;
+ desc.m_Method = armnn::ResizeMethod::Bilinear;
desc.m_TargetHeight = static_cast<uint32_t> (sizeTensorData[0]);
- desc.m_TargetWidth = static_cast<uint32_t> (sizeTensorData[1]);
- desc.m_DataLayout = armnn::DataLayout::NHWC;
+ desc.m_TargetWidth = static_cast<uint32_t> (sizeTensorData[1]);
+ desc.m_DataLayout = armnn::DataLayout::NHWC;
- IConnectableLayer* layer = m_Network->AddResizeBilinearLayer(desc, nodeDef.name().c_str());
+ IConnectableLayer* layer = m_Network->AddResizeLayer(desc, nodeDef.name().c_str());
IOutputSlot& inputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
TensorInfo inputTensorInfo = inputSlot.GetTensorInfo();
const ReshapeDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
- bool IsResizeBilinearSupported(const TensorInfo& input,
- const TensorInfo& output,
- Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
-
bool IsResizeSupported(const TensorInfo& input,
const TensorInfo& output,
const ResizeDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+ ARMNN_DEPRECATED_MSG("Use IsResizeSupported instead")
+ bool IsResizeBilinearSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
bool IsRsqrtSupported(const TensorInfo& input,
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
case LayerType::Resize:
{
auto cLayer = boost::polymorphic_downcast<const ResizeLayer*>(&layer);
- const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
+ const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
result = layerSupportObject->IsResizeSupported(OverrideDataType(input, dataType),
OverrideDataType(output, dataType),
reason);
break;
}
- case LayerType::ResizeBilinear:
- {
- const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
- const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
- result = layerSupportObject->IsResizeBilinearSupported(OverrideDataType(input, dataType),
- OverrideDataType(output, dataType),
- reason);
- break;
- }
case LayerType::Rsqrt:
{
const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
virtual std::unique_ptr<IWorkload> CreateReshape(const ReshapeQueueDescriptor& descriptor,
const WorkloadInfo& info) const;
- virtual std::unique_ptr<IWorkload> CreateResizeBilinear(const ResizeBilinearQueueDescriptor& descriptor,
- const WorkloadInfo& info) const;
-
virtual std::unique_ptr<IWorkload> CreateResize(const ResizeQueueDescriptor& descriptor,
const WorkloadInfo& info) const;
+ ARMNN_DEPRECATED_MSG("Use CreateResize instead")
+ virtual std::unique_ptr<IWorkload> CreateResizeBilinear(const ResizeBilinearQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const;
+
virtual std::unique_ptr<IWorkload> CreateRsqrt(const RsqrtQueueDescriptor& descriptor,
const WorkloadInfo& info) const;
DECLARE_LAYER_POLICY_2_PARAM(Resize)
-DECLARE_LAYER_POLICY_2_PARAM(ResizeBilinear)
-
DECLARE_LAYER_POLICY_2_PARAM(Reshape)
DECLARE_LAYER_POLICY_1_PARAM(Rsqrt)
std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
- armnn::ResizeBilinearQueueDescriptor descriptor;
+ armnn::ResizeQueueDescriptor descriptor;
+ descriptor.m_Parameters.m_Method = armnn::ResizeMethod::Bilinear;
descriptor.m_Parameters.m_DataLayout = dataLayout;
+
armnn::WorkloadInfo info;
AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
- std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
+ std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResize(descriptor, info);
inputHandle->Allocate();
outputHandle->Allocate();
std::unique_ptr <armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
std::unique_ptr <armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
- armnn::ResizeBilinearQueueDescriptor descriptor;
+ armnn::ResizeQueueDescriptor descriptor;
+ descriptor.m_Parameters.m_Method = armnn::ResizeMethod::Bilinear;
descriptor.m_Parameters.m_DataLayout = dataLayout;
+
armnn::WorkloadInfo info;
AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
- std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
+ std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResize(descriptor, info);
inputHandle->Allocate();
outputHandle->Allocate();
std::unique_ptr <armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
std::unique_ptr <armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
- armnn::ResizeBilinearQueueDescriptor descriptor;
+ armnn::ResizeQueueDescriptor descriptor;
+ descriptor.m_Parameters.m_Method = armnn::ResizeMethod::Bilinear;
descriptor.m_Parameters.m_DataLayout = dataLayout;
+
armnn::WorkloadInfo info;
AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
- std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
+ std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResize(descriptor, info);
inputHandle->Allocate();
outputHandle->Allocate();
std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
- armnn::ResizeBilinearQueueDescriptor descriptor;
+ armnn::ResizeQueueDescriptor descriptor;
+ descriptor.m_Parameters.m_Method = armnn::ResizeMethod::Bilinear;
descriptor.m_Parameters.m_DataLayout = dataLayout;
+
armnn::WorkloadInfo info;
AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
- std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
+ std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResize(descriptor, info);
inputHandle->Allocate();
outputHandle->Allocate();
std::unique_ptr <armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
std::unique_ptr <armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
- armnn::ResizeBilinearQueueDescriptor descriptor;
+ armnn::ResizeQueueDescriptor descriptor;
+ descriptor.m_Parameters.m_Method = armnn::ResizeMethod::Bilinear;
descriptor.m_Parameters.m_DataLayout = dataLayout;
+
armnn::WorkloadInfo info;
AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
- std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
+ std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResize(descriptor, info);
inputHandle->Allocate();
outputHandle->Allocate();
return true;
}
+bool ClLayerSupport::IsResizeSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const ResizeDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported) const
+{
+ ignore_unused(output);
+
+ if (descriptor.m_Method == ResizeMethod::Bilinear)
+ {
+ return IsSupportedForDataTypeCl(reasonIfUnsupported,
+ input.GetDataType(),
+ &TrueFunc<>,
+ &FalseFuncU8<>);
+ }
+
+ return false;
+}
+
bool ClLayerSupport::IsResizeBilinearSupported(const TensorInfo& input,
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported) const
const ReshapeDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+ bool IsResizeSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const ResizeDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
+ ARMNN_DEPRECATED_MSG("Use IsResizeSupported instead")
bool IsResizeBilinearSupported(const TensorInfo& input,
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
return MakeWorkload<CopyMemGenericWorkload, CopyMemGenericWorkload>(descriptor, info);
}
+std::unique_ptr<armnn::IWorkload> ClWorkloadFactory::CreateResize(const ResizeQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const
+{
+ if (descriptor.m_Parameters.m_Method == ResizeMethod::Bilinear)
+ {
+ ResizeBilinearQueueDescriptor resizeBilinearDescriptor;
+ resizeBilinearDescriptor.m_Inputs = descriptor.m_Inputs;
+ resizeBilinearDescriptor.m_Outputs = descriptor.m_Outputs;
+
+ resizeBilinearDescriptor.m_Parameters.m_DataLayout = descriptor.m_Parameters.m_DataLayout;
+ resizeBilinearDescriptor.m_Parameters.m_TargetWidth = descriptor.m_Parameters.m_TargetWidth;
+ resizeBilinearDescriptor.m_Parameters.m_TargetHeight = descriptor.m_Parameters.m_TargetHeight;
+
+ return MakeWorkload<ClResizeBilinearFloatWorkload, NullWorkload>(resizeBilinearDescriptor, info);
+ }
+
+ return MakeWorkload<NullWorkload, NullWorkload>(descriptor, info);
+}
+
std::unique_ptr<armnn::IWorkload> ClWorkloadFactory::CreateResizeBilinear(
const ResizeBilinearQueueDescriptor& descriptor,
const WorkloadInfo& info) const
std::unique_ptr<IWorkload> CreateMemCopy(const MemCopyQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ std::unique_ptr<IWorkload> CreateResize(const ResizeQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const override;
+
+ ARMNN_DEPRECATED_MSG("Use CreateResize instead")
std::unique_ptr<IWorkload> CreateResizeBilinear(const ResizeBilinearQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
auto workload = CreateResizeBilinearWorkloadTest<ResizeBilinearWorkloadType, DataType>(factory, graph, dataLayout);
// Checks that inputs/outputs are as we expect them (see definition of CreateResizeBilinearWorkloadTest).
- ResizeBilinearQueueDescriptor queueDescriptor = workload->GetData();
- auto inputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
+ auto queueDescriptor = workload->GetData();
+
+ auto inputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
auto outputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
switch (dataLayout)
&TrueFunc<>);
}
+bool NeonLayerSupport::IsResizeSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const ResizeDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported) const
+{
+ if (descriptor.m_Method == ResizeMethod::Bilinear)
+ {
+ FORWARD_WORKLOAD_VALIDATE_FUNC(NeonResizeBilinearWorkloadValidate,
+ reasonIfUnsupported,
+ input,
+ output);
+ }
+
+ return false;
+}
+
bool NeonLayerSupport::IsResizeBilinearSupported(const TensorInfo& input,
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported) const
const ReshapeDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+ bool IsResizeSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const ResizeDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
+ ARMNN_DEPRECATED_MSG("Use IsResizeSupported instead")
bool IsResizeBilinearSupported(const TensorInfo& input,
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
return MakeWorkloadHelper<CopyMemGenericWorkload, CopyMemGenericWorkload>(descriptor, info);
}
+std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateResize(const ResizeQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const
+{
+ if (descriptor.m_Parameters.m_Method == ResizeMethod::Bilinear)
+ {
+ ResizeBilinearQueueDescriptor resizeBilinearDescriptor;
+ resizeBilinearDescriptor.m_Inputs = descriptor.m_Inputs;
+ resizeBilinearDescriptor.m_Outputs = descriptor.m_Outputs;
+
+ resizeBilinearDescriptor.m_Parameters.m_DataLayout = descriptor.m_Parameters.m_DataLayout;
+ resizeBilinearDescriptor.m_Parameters.m_TargetWidth = descriptor.m_Parameters.m_TargetWidth;
+ resizeBilinearDescriptor.m_Parameters.m_TargetHeight = descriptor.m_Parameters.m_TargetHeight;
+
+ return std::make_unique<NeonResizeBilinearWorkload>(resizeBilinearDescriptor, info);
+ }
+
+ return MakeWorkloadHelper<NullWorkload, NullWorkload>(descriptor, info);
+}
+
std::unique_ptr<armnn::IWorkload> NeonWorkloadFactory::CreateResizeBilinear(
const ResizeBilinearQueueDescriptor& descriptor,
const WorkloadInfo& info) const
std::unique_ptr<IWorkload> CreateMemCopy(const MemCopyQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ std::unique_ptr<IWorkload> CreateResize(const ResizeQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const override;
+
+ ARMNN_DEPRECATED_MSG("Use CreateResize instead")
std::unique_ptr<IWorkload> CreateResizeBilinear(const ResizeBilinearQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
std::unique_ptr<IWorkload> RefWorkloadFactory::CreateResizeBilinear(const ResizeBilinearQueueDescriptor& descriptor,
const WorkloadInfo& info) const
{
- if (IsFloat16(info))
- {
- return MakeWorkload<NullWorkload, NullWorkload>(descriptor, info);
- }
- return std::make_unique<RefResizeBilinearWorkload>(descriptor, info);
+ ResizeQueueDescriptor resizeDescriptor;
+ resizeDescriptor.m_Parameters.m_Method = ResizeMethod::Bilinear;
+ resizeDescriptor.m_Parameters.m_DataLayout = descriptor.m_Parameters.m_DataLayout;
+ resizeDescriptor.m_Parameters.m_TargetWidth = descriptor.m_Parameters.m_TargetWidth;
+ resizeDescriptor.m_Parameters.m_TargetHeight = descriptor.m_Parameters.m_TargetHeight;
+
+ return CreateResize(resizeDescriptor, info);
}
std::unique_ptr<IWorkload> RefWorkloadFactory::CreateFakeQuantization(
BOOST_AUTO_TEST_CASE(CreateResizeBilinearFloat32)
{
- RefCreateResizeBilinearTest<RefResizeBilinearWorkload, armnn::DataType::Float32>(DataLayout::NCHW);
+ RefCreateResizeBilinearTest<RefResizeWorkload, armnn::DataType::Float32>(DataLayout::NCHW);
}
BOOST_AUTO_TEST_CASE(CreateResizeBilinearUint8)
{
- RefCreateResizeBilinearTest<RefResizeBilinearWorkload, armnn::DataType::QuantisedAsymm8>(DataLayout::NCHW);
+ RefCreateResizeBilinearTest<RefResizeWorkload, armnn::DataType::QuantisedAsymm8>(DataLayout::NCHW);
}
BOOST_AUTO_TEST_CASE(CreateResizeBilinearQuantisedAsymm16)
{
- RefCreateResizeBilinearTest<RefResizeBilinearWorkload, armnn::DataType::QuantisedSymm16>(DataLayout::NCHW);
+ RefCreateResizeBilinearTest<RefResizeWorkload, armnn::DataType::QuantisedSymm16>(DataLayout::NCHW);
}
BOOST_AUTO_TEST_CASE(CreateResizeBilinearFloat32Nhwc)
{
- RefCreateResizeBilinearTest<RefResizeBilinearWorkload, armnn::DataType::Float32>(DataLayout::NHWC);
+ RefCreateResizeBilinearTest<RefResizeWorkload, armnn::DataType::Float32>(DataLayout::NHWC);
}
template <typename RsqrtWorkloadType, armnn::DataType DataType>