src/armnn/layers/QuantizeLayer.cpp \
src/armnn/layers/ReshapeLayer.cpp \
src/armnn/layers/ResizeBilinearLayer.cpp \
+ src/armnn/layers/ResizeLayer.cpp \
src/armnn/layers/RsqrtLayer.cpp \
src/armnn/layers/SpaceToBatchNdLayer.cpp \
src/armnn/layers/SpaceToDepthLayer.cpp \
src/armnn/layers/ReshapeLayer.cpp
src/armnn/layers/ResizeBilinearLayer.hpp
src/armnn/layers/ResizeBilinearLayer.cpp
+ src/armnn/layers/ResizeLayer.hpp
+ src/armnn/layers/ResizeLayer.cpp
src/armnn/layers/RsqrtLayer.cpp
src/armnn/layers/RsqrtLayer.hpp
src/armnn/layers/SoftmaxLayer.hpp
DataLayout m_DataLayout;
};
+/// A ResizeDescriptor for the ResizeLayer.
+struct ResizeDescriptor
+{
+ ResizeDescriptor()
+ : m_TargetWidth(0)
+ , m_TargetHeight(0)
+ , m_Method(ResizeMethod::NearestNeighbor)
+ , m_DataLayout(DataLayout::NCHW)
+ {}
+
+ /// Target width value.
+ uint32_t m_TargetWidth;
+ /// Target height value.
+ uint32_t m_TargetHeight;
+ /// The Interpolation method to use
+ /// (Bilinear, NearestNeighbor).
+ ResizeMethod m_Method;
+ /// The data layout to be used (NCHW, NHWC).
+ DataLayout m_DataLayout;
+};
+
+
/// A ReshapeDescriptor for the ReshapeLayer.
struct ReshapeDescriptor
{
struct PreCompiledDescriptor;
struct ReshapeDescriptor;
struct ResizeBilinearDescriptor;
+struct ResizeDescriptor;
struct SoftmaxDescriptor;
struct SpaceToBatchNdDescriptor;
struct SpaceToDepthDescriptor;
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
+ virtual bool IsResizeSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const ResizeDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
+
virtual bool IsRsqrtSupported(const TensorInfo& input,
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
const ResizeBilinearDescriptor& resizeDesc,
const char* name = nullptr) = 0;
+ /// Function that a resize layer should call back to when its Accept(ILayerVisitor&) function is invoked.
+ /// @param layer - pointer to the layer which is calling back to this visit function.
+ /// @param resizeDescriptor - Parameters for the resize operation.
+ /// @param name - Optional name for the layer.
+ virtual void VisitResizeLayer(const IConnectableLayer* layer,
+ const ResizeDescriptor& resizeDescriptor,
+ const char* name = nullptr) = 0;
+
/// Function a Reciprocal of square root layer should call back to when its Accept(ILayerVisitor&)
/// function is invoked.
/// @param layer - pointer to the layer which is calling back to this visit function.
virtual IConnectableLayer* AddResizeBilinearLayer(const ResizeBilinearDescriptor& resizeDesc,
const char* name = nullptr) = 0;
+ /// Adds a resize layer to the network.
+ /// @param resizeDescriptor - Parameters for the resize operation.
+ /// @param name - Optional name for the layer.
+ /// @return - Interface for configuring the layer.
+ virtual IConnectableLayer* AddResizeLayer(const ResizeDescriptor& resizeDescriptor,
+ const char* name = nullptr) = 0;
+
/// Adds an L2 normalization layer to the network.
/// Normalization is performed along dimension 1, but requires a 4d input.
/// @param desc - Parameters for the L2 normalization operation.
const ResizeBilinearDescriptor&,
const char*) override { DefaultPolicy::Apply(__func__); }
+ void VisitResizeLayer(const IConnectableLayer*,
+ const ResizeDescriptor&,
+ const char*) override { DefaultPolicy::Apply(__func__); }
+
void VisitRsqrtLayer(const IConnectableLayer*,
const char*) override { DefaultPolicy::Apply(__func__); }
L2 = 2
};
+enum class ResizeMethod
+{
+ Bilinear = 0,
+ NearestNeighbor = 1
+};
+
///
/// The padding method modifies the output of pooling layers.
/// In both supported methods, the values are ignored (they are
Quantize,
Reshape,
ResizeBilinear,
+ Resize,
Rsqrt,
Softmax,
SpaceToBatchNd,
#include "layers/QuantizeLayer.hpp"
#include "layers/ReshapeLayer.hpp"
#include "layers/ResizeBilinearLayer.hpp"
+#include "layers/ResizeLayer.hpp"
#include "layers/RsqrtLayer.hpp"
#include "layers/SoftmaxLayer.hpp"
#include "layers/SpaceToBatchNdLayer.hpp"
DECLARE_LAYER(Prelu)
DECLARE_LAYER(Quantize)
DECLARE_LAYER(Reshape)
+DECLARE_LAYER(Resize)
DECLARE_LAYER(ResizeBilinear)
DECLARE_LAYER(Rsqrt)
DECLARE_LAYER(Softmax)
return m_Graph->AddLayer<ResizeBilinearLayer>(resizeDescriptor,name);
}
+IConnectableLayer* Network::AddResizeLayer(const ResizeDescriptor&
+resizeDescriptor, const char* name)
+{
+ return m_Graph->AddLayer<ResizeLayer>(resizeDescriptor,name);
+}
+
IConnectableLayer* Network::AddL2NormalizationLayer(const L2NormalizationDescriptor& desc,
const char* name)
{
IConnectableLayer* AddResizeBilinearLayer(const ResizeBilinearDescriptor& resizeDesc,
const char* name = nullptr) override;
+ IConnectableLayer* AddResizeLayer(const ResizeDescriptor& resizeDescriptor,
+ const char* name = nullptr) override;
+
IConnectableLayer* AddL2NormalizationLayer(const L2NormalizationDescriptor& desc,
const char* name = nullptr) override;
--- /dev/null
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#include "ResizeLayer.hpp"
+
+#include "LayerCloneBase.hpp"
+
+#include <armnn/TypesUtils.hpp>
+
+#include <backendsCommon/WorkloadData.hpp>
+#include <backendsCommon/WorkloadFactory.hpp>
+
+#include <DataLayoutIndexed.hpp>
+
+using namespace armnnUtils;
+
+namespace armnn
+{
+
+ResizeLayer::ResizeLayer(const ResizeDescriptor& param, const char* name)
+ : LayerWithParameters(1, 1, LayerType::Resize, param, name)
+{
+}
+
+std::unique_ptr<IWorkload> ResizeLayer::CreateWorkload(const Graph& graph,
+ const IWorkloadFactory& factory) const
+{
+ ResizeQueueDescriptor descriptor;
+ return factory.CreateResize(descriptor, PrepInfoAndDesc(descriptor, graph));
+}
+
+ResizeLayer* ResizeLayer::Clone(Graph& graph) const
+{
+ return CloneBase<ResizeLayer>(graph, m_Param, GetName());
+}
+
+std::vector<TensorShape> ResizeLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
+{
+ BOOST_ASSERT(inputShapes.size() == 1);
+
+ const TensorShape& inputShape = inputShapes[0];
+ const DataLayoutIndexed dimensionIndices = m_Param.m_DataLayout;
+
+ unsigned int outWidth = m_Param.m_TargetWidth;
+ unsigned int outHeight = m_Param.m_TargetHeight;
+ unsigned int outChannels = inputShape[dimensionIndices.GetChannelsIndex()];
+ unsigned int outBatch = inputShape[0];
+
+ TensorShape tensorShape = m_Param.m_DataLayout == armnn::DataLayout::NHWC ?
+ TensorShape( { outBatch, outHeight, outWidth, outChannels } ) :
+ TensorShape( { outBatch, outChannels, outHeight, outWidth });
+
+ return std::vector<TensorShape>({ tensorShape });
+}
+
+void ResizeLayer::ValidateTensorShapesFromInputs()
+{
+ VerifyLayerConnections(1, CHECK_LOCATION());
+
+ auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
+
+ BOOST_ASSERT(inferredShapes.size() == 1);
+
+ ConditionalThrowIfNotEqual<LayerValidationException>(
+ "ResizeLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.",
+ GetOutputSlot(0).GetTensorInfo().GetShape(),
+ inferredShapes[0]);
+}
+
+void ResizeLayer::Accept(ILayerVisitor& visitor) const
+{
+ visitor.VisitResizeLayer(this, GetParameters(), GetName());
+}
+
+} // namespace armnn
--- /dev/null
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#pragma once
+
+#include "LayerWithParameters.hpp"
+
+namespace armnn
+{
+
+/// This layer represents a resize operation.
+class ResizeLayer : public LayerWithParameters<ResizeDescriptor>
+{
+public:
+ /// Makes a workload for the Resize type.
+ /// @param [in] graph The graph where this layer can be found.
+ /// @param [in] factory The workload factory which will create the workload.
+ /// @return A pointer to the created workload, or nullptr if not created.
+ virtual std::unique_ptr<IWorkload>CreateWorkload(const Graph& graph,
+ const IWorkloadFactory& factory) const override;
+
+ /// Creates a dynamically-allocated copy of this layer.
+ /// @param [in] graph The graph into which this layer is being cloned.
+ ResizeLayer* Clone(Graph& graph) const override;
+
+ /// Check if the input tensor shape(s)
+ /// will lead to a valid configuration of @ref ResizeLayer.
+ void ValidateTensorShapesFromInputs() override;
+
+ /// By default returns inputShapes if the number of inputs are equal to number of outputs,
+ /// otherwise infers the output shapes from given input shapes and layer properties.
+ /// @param [in] inputShapes The input shapes layer has.
+ /// @return A vector to the inferred output shape.
+ std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
+
+ void Accept(ILayerVisitor& visitor) const override;
+
+protected:
+ /// Constructor to create a ResizeLayerLayer.
+ /// @param [in] param ResizeDescriptor to configure the resize operation.
+ /// @param [in] name Optional name for the layer.
+ ResizeLayer(const ResizeDescriptor& param, const char* name);
+
+ /// Default destructor
+ ~ResizeLayer() = default;
+};
+
+} // namespace armnn
CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_ResizeBilinearLayer);
}
+void SerializerVisitor::VisitResizeLayer(const armnn::IConnectableLayer* layer,
+ const armnn::ResizeDescriptor& resizeDescriptor,
+ const char* name)
+{
+ throw armnn::Exception("SerializerVisitor::VisitResizeLayer is not yet implemented");
+}
+
void SerializerVisitor::VisitRsqrtLayer(const armnn::IConnectableLayer* layer, const char* name)
{
auto fbRsqrtBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Rsqrt);
const armnn::ResizeBilinearDescriptor& resizeDescriptor,
const char* name = nullptr) override;
+ void VisitResizeLayer(const armnn::IConnectableLayer* layer,
+ const armnn::ResizeDescriptor& resizeDescriptor,
+ const char* name = nullptr) override;
+
void VisitRsqrtLayer(const armnn::IConnectableLayer* layer,
const char* name = nullptr) override;
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
+bool LayerSupportBase::IsResizeSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const ResizeDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported) const
+{
+ return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
+}
+
bool LayerSupportBase::IsRsqrtSupported(const TensorInfo &input,
const TensorInfo &output,
Optional<std::string &> reasonIfUnsupported) const
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+ bool IsResizeSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const ResizeDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
bool IsRsqrtSupported(const TensorInfo& input,
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
void Validate(const WorkloadInfo& workloadInfo) const;
};
+struct ResizeQueueDescriptor : QueueDescriptorWithParameters<ResizeDescriptor>
+{
+ void Validate(const WorkloadInfo& workloadInfo) const;
+};
+
struct FakeQuantizationQueueDescriptor : QueueDescriptorWithParameters<FakeQuantizationDescriptor>
{
FakeQuantizationQueueDescriptor()
reason);
break;
}
+ case LayerType::Resize:
+ {
+ auto cLayer = boost::polymorphic_downcast<const ResizeLayer*>(&layer);
+ const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
+ const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
+ result = layerSupportObject->IsResizeSupported(OverrideDataType(input, dataType),
+ OverrideDataType(output, dataType),
+ cLayer->GetParameters(),
+ reason);
+ break;
+ }
case LayerType::ResizeBilinear:
{
const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
return std::unique_ptr<IWorkload>();
}
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateResize(const ResizeQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const
+{
+ return std::unique_ptr<IWorkload>();
+}
+
std::unique_ptr<IWorkload> IWorkloadFactory::CreateRsqrt(const RsqrtQueueDescriptor& descriptor,
const WorkloadInfo& info) const
{
virtual std::unique_ptr<IWorkload> CreateResizeBilinear(const ResizeBilinearQueueDescriptor& descriptor,
const WorkloadInfo& info) const;
+ virtual std::unique_ptr<IWorkload> CreateResize(const ResizeQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const;
+
virtual std::unique_ptr<IWorkload> CreateRsqrt(const RsqrtQueueDescriptor& descriptor,
const WorkloadInfo& info) const;
DECLARE_LAYER_POLICY_1_PARAM(Division)
+DECLARE_LAYER_POLICY_2_PARAM(Resize)
+
DECLARE_LAYER_POLICY_2_PARAM(ResizeBilinear)
DECLARE_LAYER_POLICY_2_PARAM(Reshape)