src/armnn/layers/ResizeBilinearLayer.cpp \
src/armnn/layers/RsqrtLayer.cpp \
src/armnn/layers/SpaceToBatchNdLayer.cpp \
+ src/armnn/layers/SpaceToDepthLayer.cpp \
src/armnn/layers/SoftmaxLayer.cpp \
src/armnn/layers/SplitterLayer.cpp \
src/armnn/layers/StridedSliceLayer.cpp \
src/armnn/layers/ReshapeLayer.cpp
src/armnn/layers/SpaceToBatchNdLayer.hpp
src/armnn/layers/SpaceToBatchNdLayer.cpp
+ src/armnn/layers/SpaceToDepthLayer.hpp
+ src/armnn/layers/SpaceToDepthLayer.cpp
src/armnn/layers/ResizeBilinearLayer.hpp
src/armnn/layers/ResizeBilinearLayer.cpp
src/armnn/layers/RsqrtLayer.cpp
DataLayout m_DataLayout;
};
+/// A SpaceToDepthDescriptor for the SpaceToDepthLayer
+struct SpaceToDepthDescriptor
+{
+ SpaceToDepthDescriptor()
+ : m_BlockSize(1u)
+ , m_DataLayout(DataLayout::NHWC)
+ {}
+
+ /// Scalar specifying the input block size. It must be >= 1
+ unsigned int m_BlockSize;
+ /// The data layout to be used (NCHW, NHWC).
+ DataLayout m_DataLayout;
+};
+
/// An LstmDescriptor for the LstmLayer.
struct LstmDescriptor
{
unsigned int m_NumOutputSlots;
};
-}
+} // namespace armnn
\ No newline at end of file
struct ResizeBilinearDescriptor;
struct SoftmaxDescriptor;
struct SpaceToBatchNdDescriptor;
+struct SpaceToDepthDescriptor;
struct StridedSliceDescriptor;
struct ViewsDescriptor;
const SpaceToBatchNdDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
+ virtual bool IsSpaceToDepthSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const SpaceToDepthDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
+
ARMNN_DEPRECATED_MSG("Use IsSplitterSupported with outputs instead")
virtual bool IsSplitterSupported(const TensorInfo& input,
const ViewsDescriptor& descriptor,
const SpaceToBatchNdDescriptor& spaceToBatchNdDescriptor,
const char* name = nullptr) = 0;
+ /// Function a space to depth layer should call back to when its Accept(ILayerVisitor&) function is invoked.
+ /// @param layer - pointer to the layer which is calling back to this visit function.
+ /// @param spaceToDepthDescriptor - Parameters for the space to depth operation.
+ /// @param name - Optional name for the layer.
+ virtual void VisitSpaceToDepthLayer(const IConnectableLayer* layer,
+ const SpaceToDepthDescriptor& spaceToDepthDescriptor,
+ const char* name = nullptr) = 0;
+
/// Function that a splitter layer should call back to when its Accept(ILayerVisitor&) function is invoked.
/// @param layer - pointer to the layer which is calling back to this visit function.
/// @param splitterDescriptor - ViewsDescriptor to configure the splitting process.
virtual IConnectableLayer* AddSpaceToBatchNdLayer(const SpaceToBatchNdDescriptor& spaceToBatchNdDescriptor,
const char* name = nullptr) = 0;
+ /// Adds a space to depth layer to the network.
+ /// @param spaceToDepthDescriptor - Parameters for the space to depth operation.
+ /// @param name - Optional name for the layer.
+ /// @return - Interface for configuring the layer.
+ virtual IConnectableLayer* AddSpaceToDepthLayer(const SpaceToDepthDescriptor& spaceToDepthDescriptor,
+ const char* name = nullptr) = 0;
+
/// Adds a floor layer to the network.
/// @param name - Optional name for the layer.
/// @return - Interface for configuring the layer.
char* reasonIfUnsupported = nullptr,
size_t reasonIfUnsupportedMaxLength = 1024);
+/// Deprecated in favor of IBackend and ILayerSupport interfaces
+bool IsSpaceToDepthSupported(const BackendId& backend,
+ const TensorInfo& input,
+ const TensorInfo& output,
+ const SpaceToDepthDescriptor& descriptor,
+ char* reasonIfUnsupported = nullptr,
+ size_t reasonIfUnsupportedMaxLength = 1024);
+
ARMNN_DEPRECATED_MSG("Use IsSplitterSupported with outputs instead")
bool IsSplitterSupported(const BackendId& backend,
const TensorInfo& input,
const SpaceToBatchNdDescriptor&,
const char*) override { DefaultPolicy::Apply(__func__); }
+ void VisitSpaceToDepthLayer(const IConnectableLayer*,
+ const SpaceToDepthDescriptor&,
+ const char*) override { DefaultPolicy::Apply(__func__); }
+
void VisitSplitterLayer(const IConnectableLayer*,
const ViewsDescriptor&,
const char*) override { DefaultPolicy::Apply(__func__); }
Rsqrt,
Softmax,
SpaceToBatchNd,
+ SpaceToDepth,
Splitter,
StridedSlice,
Subtraction,
const char* GetLayerTypeAsCString(LayerType type);
using Coordinates = std::array<unsigned int, MaxNumOfTensorDimensions>;
-using Dimensions = std::array<unsigned int, MaxNumOfTensorDimensions>;
+using Dimensions = std::array<unsigned int, MaxNumOfTensorDimensions>;
}
#include "layers/RsqrtLayer.hpp"
#include "layers/SoftmaxLayer.hpp"
#include "layers/SpaceToBatchNdLayer.hpp"
+#include "layers/SpaceToDepthLayer.hpp"
#include "layers/SplitterLayer.hpp"
#include "layers/StridedSliceLayer.hpp"
#include "layers/SubtractionLayer.hpp"
DECLARE_LAYER(Rsqrt)
DECLARE_LAYER(Softmax)
DECLARE_LAYER(SpaceToBatchNd)
+DECLARE_LAYER(SpaceToDepth)
DECLARE_LAYER(Splitter)
DECLARE_LAYER(StridedSlice)
DECLARE_LAYER(Subtraction)
return m_Graph->AddLayer<SpaceToBatchNdLayer>(spaceToBatchNdDescriptor, name);
}
+IConnectableLayer* Network::AddSpaceToDepthLayer(const SpaceToDepthDescriptor& spaceToDepthDescriptor,
+ const char* name)
+{
+ return m_Graph->AddLayer<SpaceToDepthLayer>(spaceToDepthDescriptor, name);
+}
+
IConnectableLayer* Network::AddFloorLayer(const char* name)
{
return m_Graph->AddLayer<FloorLayer>(name);
IConnectableLayer* AddSpaceToBatchNdLayer(const SpaceToBatchNdDescriptor& spaceToBatchNdDescriptor,
const char* name = nullptr) override;
+ IConnectableLayer* AddSpaceToDepthLayer(const SpaceToDepthDescriptor& spaceToDepthDescriptor,
+ const char* name = nullptr) override;
+
IConnectableLayer* AddFloorLayer(const char* name = nullptr) override;
IConnectableLayer* AddOutputLayer(LayerBindingId id, const char* name = nullptr) override;
--- /dev/null
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "SpaceToDepthLayer.hpp"
+
+#include "LayerCloneBase.hpp"
+
+#include <armnn/TypesUtils.hpp>
+
+#include <backendsCommon/WorkloadData.hpp>
+#include <backendsCommon/WorkloadFactory.hpp>
+
+#include <DataLayoutIndexed.hpp>
+
+#include <numeric>
+
+using namespace armnnUtils;
+
+namespace armnn
+{
+
+SpaceToDepthLayer::SpaceToDepthLayer(const SpaceToDepthDescriptor param, const char* name)
+ : LayerWithParameters(1, 1, LayerType::SpaceToDepth, param, name)
+{}
+
+std::unique_ptr<IWorkload> SpaceToDepthLayer::CreateWorkload(const Graph& graph,
+ const IWorkloadFactory& factory) const
+{
+ SpaceToDepthQueueDescriptor descriptor;
+ descriptor.m_Parameters.m_BlockSize = m_Param.m_BlockSize;
+ descriptor.m_Parameters.m_DataLayout = m_Param.m_DataLayout;
+
+ return factory.CreateSpaceToDepth(descriptor, PrepInfoAndDesc(descriptor, graph));
+}
+
+SpaceToDepthLayer* SpaceToDepthLayer::Clone(Graph& graph) const
+{
+ return CloneBase<SpaceToDepthLayer>(graph, m_Param, GetName());
+}
+
+std::vector<TensorShape> SpaceToDepthLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
+{
+ BOOST_ASSERT(inputShapes.size() == 1);
+
+ TensorShape inputShape = inputShapes[0];
+ TensorShape outputShape(inputShape);
+
+ outputShape[0] = inputShape[0];
+
+ DataLayoutIndexed dimensionIndices{m_Param.m_DataLayout};
+ unsigned int hIndex = dimensionIndices.GetHeightIndex();
+ unsigned int wIndex = dimensionIndices.GetWidthIndex();
+ unsigned int cIndex = dimensionIndices.GetChannelsIndex();
+
+ outputShape[hIndex] = inputShape[hIndex] / m_Param.m_BlockSize;
+ outputShape[wIndex] = inputShape[wIndex] / m_Param.m_BlockSize;
+
+ outputShape[cIndex] = inputShape[cIndex] * m_Param.m_BlockSize * m_Param.m_BlockSize;
+
+ return std::vector<TensorShape>({ outputShape });
+}
+
+void SpaceToDepthLayer::ValidateTensorShapesFromInputs()
+{
+ VerifyLayerConnections(1, CHECK_LOCATION());
+
+ std::vector<TensorShape> inferredShapes = InferOutputShapes({
+ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
+
+ BOOST_ASSERT(inferredShapes.size() == 1);
+
+ ConditionalThrowIfNotEqual<LayerValidationException>(
+ "SpaceToDepthLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.",
+ GetOutputSlot(0).GetTensorInfo().GetShape(),
+ inferredShapes[0]);
+}
+
+void SpaceToDepthLayer::Accept(ILayerVisitor& visitor) const
+{
+ visitor.VisitSpaceToDepthLayer(this, GetParameters(), GetName());
+}
+
+} // namespace armnn
\ No newline at end of file
--- /dev/null
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "LayerWithParameters.hpp"
+
+namespace armnn
+{
+
+/// This layer represents a SpaceToDepth operation.
+class SpaceToDepthLayer : public LayerWithParameters<SpaceToDepthDescriptor>
+{
+public:
+ /// Makes a workload for the SpaceToDepth type.
+ /// @param [in] graph The graph where this layer can be found.
+ /// @param [in] factory The workload factory which will create the workload.
+ /// @return A pointer to the created workload, or nullptr if not created.
+ virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph,
+ const IWorkloadFactory& factory) const override;
+
+ /// Creates a dynamically-allocated copy of this layer.
+ /// @param [in] graph The graph into which this layer is being cloned.
+ SpaceToDepthLayer* Clone(Graph& graph) const override;
+
+ /// By default returns inputShapes if the number of inputs are equal to number of outputs,
+ /// otherwise infers the output shapes from given input shapes and layer properties.
+ /// @param [in] inputShapes The input shapes layer has.
+ /// @return A vector to the inferred output shape.
+ std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
+
+ /// Check if the input tensor shape(s)
+ /// will lead to a valid configuration of @ref SpaceToDepthLayer.
+ void ValidateTensorShapesFromInputs() override;
+
+ void Accept(ILayerVisitor& visitor) const override;
+
+protected:
+ /// Constructor to create a SpaceToDepthLayer.
+ /// @param [in] param SpaceToDepthDescriptor to configure the SpaceToDepthLayer operation.
+ /// @param [in] name Optional name for the layer.
+ SpaceToDepthLayer(const SpaceToDepthDescriptor param, const char* name);
+
+ /// Default destructor
+ ~SpaceToDepthLayer() = default;
+};
+
+} // namespace armnn
//
#include <armnn/ArmNN.hpp>
-#include <boost/algorithm/string.hpp>
-#include <boost/test/unit_test.hpp>
-#include <layers/BatchToSpaceNdLayer.hpp>
#include <Graph.hpp>
+#include <layers/BatchToSpaceNdLayer.hpp>
+#include <layers/SpaceToDepthLayer.hpp>
+#include <boost/algorithm/string.hpp>
+#include <boost/test/unit_test.hpp>
BOOST_AUTO_TEST_SUITE(LayerValidateOutput)
BOOST_CHECK(expectedShape == batchToSpaceLayer->InferOutputShapes(shapes).at(0));
}
+BOOST_AUTO_TEST_CASE(TestSpaceToDepthInferOutputShape)
+{
+ armnn::Graph graph;
+
+ armnn::SpaceToDepthDescriptor descriptor;
+ descriptor.m_BlockSize = 2;
+ descriptor.m_DataLayout = armnn::DataLayout::NHWC;
+
+ armnn::SpaceToDepthLayer* const spaceToDepthLayer =
+ graph.AddLayer<armnn::SpaceToDepthLayer>(descriptor, "spaceToDepth");
+
+ std::vector<armnn::TensorShape> shapes;
+ const std::vector<unsigned int> dimSizes{ 1, 16, 8, 3 };
+ armnn::TensorShape shape(4, dimSizes.data());
+ shapes.push_back(shape);
+
+ const std::vector<unsigned int> expectedDimSizes{ 1, 8, 4, 12 };
+ armnn::TensorShape expectedShape(4, expectedDimSizes.data());
+
+ BOOST_CHECK(expectedShape == spaceToDepthLayer->InferOutputShapes(shapes).at(0));
+}
+
BOOST_AUTO_TEST_SUITE_END()
CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_SpaceToBatchNdLayer);
}
+// Build FlatBuffer for SpaceToDepthLayer
+void SerializerVisitor::VisitSpaceToDepthLayer(const armnn::IConnectableLayer* layer,
+ const armnn::SpaceToDepthDescriptor& spaceToDepthDescriptor,
+ const char* name)
+{
+ throw armnn::Exception("SerializerVisitor::VisitSpaceToDepthLayer is not yet implemented");
+}
+
// Build FlatBuffer for Splitter Layer
void SerializerVisitor::VisitSplitterLayer(const armnn::IConnectableLayer* layer,
const armnn::ViewsDescriptor& viewsDescriptor,
const armnn::SpaceToBatchNdDescriptor& spaceToBatchNdDescriptor,
const char* name = nullptr) override;
+ void VisitSpaceToDepthLayer(const armnn::IConnectableLayer* layer,
+ const armnn::SpaceToDepthDescriptor& spaceToDepthDescriptor,
+ const char* name = nullptr) override;
+
void VisitNormalizationLayer(const armnn::IConnectableLayer* layer,
const armnn::NormalizationDescriptor& normalizationDescriptor,
const char* name = nullptr) override;
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
+bool LayerSupportBase::IsSpaceToDepthSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const SpaceToDepthDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported) const
+{
+ return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
+}
+
bool LayerSupportBase::IsSplitterSupported(const TensorInfo& input,
const ViewsDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported) const
const SpaceToBatchNdDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+ bool IsSpaceToDepthSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const SpaceToDepthDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
ARMNN_DEPRECATED_MSG("Use IsSplitterSupported with outputs instead")
bool IsSplitterSupported(const TensorInfo& input,
const ViewsDescriptor& descriptor,
void Validate(const WorkloadInfo& workloadInfo) const;
};
+struct SpaceToDepthQueueDescriptor : QueueDescriptorWithParameters<SpaceToDepthDescriptor>
+{
+ void Validate(const WorkloadInfo& workloadInfo) const;
+};
+
struct FloorQueueDescriptor : QueueDescriptor
{
void Validate(const WorkloadInfo& workloadInfo) const;
reason);
break;
}
+ case LayerType::SpaceToDepth:
+ {
+ auto cLayer = boost::polymorphic_downcast<const SpaceToDepthLayer*>(&layer);
+
+ const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
+ const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
+
+ result = layerSupportObject->IsSpaceToDepthSupported(OverrideDataType(input, dataType),
+ OverrideDataType(output, dataType),
+ cLayer->GetParameters(),
+ reason);
+ break;
+ }
case LayerType::Splitter:
{
auto cLayer = boost::polymorphic_downcast<const SplitterLayer*>(&layer);
return std::unique_ptr<IWorkload>();
}
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateSpaceToDepth(const SpaceToDepthQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const
+{
+ return std::unique_ptr<IWorkload>();
+}
+
std::unique_ptr<IWorkload> IWorkloadFactory::CreateStridedSlice(const StridedSliceQueueDescriptor& descriptor,
const WorkloadInfo& Info) const
{
virtual std::unique_ptr<IWorkload> CreateSpaceToBatchNd(const SpaceToBatchNdQueueDescriptor& descriptor,
const WorkloadInfo& info) const;
+ virtual std::unique_ptr<IWorkload> CreateSpaceToDepth(const SpaceToDepthQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const;
+
virtual std::unique_ptr<IWorkload> CreateSubtraction(const SubtractionQueueDescriptor& descriptor,
const WorkloadInfo& info) const;
DECLARE_LAYER_POLICY_2_PARAM(SpaceToBatchNd)
+DECLARE_LAYER_POLICY_2_PARAM(SpaceToDepth)
+
DECLARE_LAYER_POLICY_2_PARAM(Splitter)
DECLARE_LAYER_POLICY_2_PARAM(StridedSlice)