src/armnn/layers/SpaceToDepthLayer.cpp \
src/armnn/layers/SplitterLayer.cpp \
src/armnn/layers/StackLayer.cpp \
+ src/armnn/layers/StandInLayer.cpp \
src/armnn/layers/StridedSliceLayer.cpp \
src/armnn/layers/SubtractionLayer.cpp \
src/armnn/layers/SwitchLayer.cpp \
src/armnn/layers/SplitterLayer.cpp
src/armnn/layers/StackLayer.hpp
src/armnn/layers/StackLayer.cpp
+ src/armnn/layers/StandInLayer.cpp
+ src/armnn/layers/StandInLayer.hpp
src/armnn/layers/StridedSliceLayer.cpp
src/armnn/layers/StridedSliceLayer.hpp
src/armnn/layers/SubtractionLayer.cpp
TensorShape m_InputShape;
};
+/// A StandInDescriptor for the StandIn layer
+struct StandInDescriptor
+{
+ StandInDescriptor() {};
+
+ StandInDescriptor(uint32_t numInputs, uint32_t numOutputs)
+ : m_NumInputs(numInputs)
+ , m_NumOutputs(numOutputs)
+ {}
+
+ /// Number of input tensors
+ uint32_t m_NumInputs = 0;
+ /// Number of output tensors
+ uint32_t m_NumOutputs = 0;
+};
+
/// A StridedSliceDescriptor for the StridedSliceLayer.
struct StridedSliceDescriptor
{
struct SpaceToDepthDescriptor;
struct SliceDescriptor;
struct StackDescriptor;
+struct StandInDescriptor;
struct StridedSliceDescriptor;
struct TransposeConvolution2dDescriptor;
struct ViewsDescriptor;
const StackDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
+ virtual bool IsStandInSupported(const std::vector<const TensorInfo*>& inputs,
+ const std::vector<const TensorInfo*>& outputs,
+ const StandInDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
+
+
virtual bool IsStridedSliceSupported(const TensorInfo& input,
const TensorInfo& output,
const StridedSliceDescriptor& descriptor,
const StackDescriptor& stackDescriptor,
const char* name = nullptr) = 0;
+ /// Function a StandInLayer should call back to when its Accept(ILaterVisitor&) function is invoked
+ /// @param layer - pointer to the layer which is calling back to this visit function.
+ /// @param standInDescriptor - Parameters for the stand-in layer.
+ /// @param name - Optional name for the layer.
+ virtual void VisitStandInLayer(const IConnectableLayer* layer,
+ const StandInDescriptor& standInDescriptor,
+ const char* name = nullptr) = 0;
+
/// Function a strided slice layer should call back to when its Accept(ILayerVisitor&) function is invoked.
/// @param layer - pointer to the layer which is calling back to this visit function.
/// @param stridedSliceDescriptor - Parameters for the strided slice operation.
virtual IConnectableLayer* AddStackLayer(const StackDescriptor& descriptor,
const char* name = nullptr) = 0;
+
+ /// Add a stand-in layer for a type unknown to the Arm NN framework.
+ /// Note: Due to the nature of this layer, no validation can be performed by the framework.
+ /// Furthermore, Any model containing this layer cannot make use of dynamic tensors since the
+ /// tensor sizes cannot be inferred.
+ /// @descriptor - Descriptor for the StandIn layer.
+ /// @return - Interface for configuring the layer.
+ virtual IConnectableLayer* AddStandInLayer(const StandInDescriptor& descriptor,
+ const char* name = nullptr) = 0;
+
/// Add a QuantizedLstm layer to the network
/// @param params - The weights and biases for the Quantized LSTM cell
/// @param name - Optional name for the layer
const StackDescriptor&,
const char*) override { DefaultPolicy::Apply(__func__); }
+ void VisitStandInLayer(const IConnectableLayer*,
+ const StandInDescriptor&,
+ const char*) override { DefaultPolicy::Apply(__func__); }
+
void VisitStridedSliceLayer(const IConnectableLayer*,
const StridedSliceDescriptor&,
const char*) override { DefaultPolicy::Apply(__func__); }
SpaceToDepth,
Splitter,
Stack,
+ StandIn,
StridedSlice,
Subtraction,
Switch,
#include "layers/SpaceToDepthLayer.hpp"
#include "layers/SplitterLayer.hpp"
#include "layers/StackLayer.hpp"
+#include "layers/StandInLayer.hpp"
#include "layers/StridedSliceLayer.hpp"
#include "layers/SubtractionLayer.hpp"
#include "layers/SwitchLayer.hpp"
DECLARE_LAYER(SpaceToDepth)
DECLARE_LAYER(Splitter)
DECLARE_LAYER(Stack)
+DECLARE_LAYER(StandIn)
DECLARE_LAYER(StridedSlice)
DECLARE_LAYER(Subtraction)
DECLARE_LAYER(Switch)
return m_Graph->AddLayer<StackLayer>(stackDescriptor, name);
}
+
+IConnectableLayer* Network::AddStandInLayer(const StandInDescriptor& desc,
+ const char* name)
+{
+ return m_Graph->AddLayer<StandInLayer>(desc, name);
+}
+
IConnectableLayer* Network::AddQuantizedLstmLayer(const QuantizedLstmInputParams& params,
const char* name)
{
IConnectableLayer* AddStackLayer(const StackDescriptor& stackDescriptor,
const char* name = nullptr) override;
+ IConnectableLayer* AddStandInLayer(const StandInDescriptor& descriptor,
+ const char* name = nullptr) override;
+
IConnectableLayer* AddQuantizedLstmLayer(const QuantizedLstmInputParams& params,
const char* name = nullptr) override;
--- /dev/null
+//
+// Copyright © 2019 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "StandInLayer.hpp"
+#include "LayerCloneBase.hpp"
+
+namespace armnn
+{
+
+StandInLayer::StandInLayer(const StandInDescriptor& param, const char* name)
+ : LayerWithParameters(param.m_NumInputs, 1, LayerType::StandIn, param, name)
+{
+}
+
+std::unique_ptr<IWorkload> StandInLayer::CreateWorkload(const Graph& graph, const IWorkloadFactory& factory) const
+{
+ // This throws in the event that it's called. We would expect that any backend that
+ // "claims" to support the StandInLayer type would actually substitute it with a PrecompiledLayer
+ // during graph optimization. There is no interface on the IWorkloadFactory to create a StandInWorkload.
+ throw Exception("Stand in layer does not support creating workloads");
+}
+
+StandInLayer* StandInLayer::Clone(Graph& graph) const
+{
+ return CloneBase<StandInLayer>(graph, m_Param, GetName());
+}
+
+std::vector<TensorShape> StandInLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
+{
+ throw Exception("Stand in layer does not support infering output shapes");
+}
+
+void StandInLayer::ValidateTensorShapesFromInputs()
+{
+ // Cannot validate this layer since no implementation details can be known by the framework
+ // so do nothing here.
+}
+
+void StandInLayer::Accept(ILayerVisitor& visitor) const
+{
+ visitor.VisitStandInLayer(this, GetParameters(), GetName());
+}
+} //namespace armnn
+
+
--- /dev/null
+//
+// Copyright © 2019 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "LayerWithParameters.hpp"
+
+namespace armnn
+{
+
+/// This layer represents an unknown operation in the input graph.
+class StandInLayer : public LayerWithParameters<StandInDescriptor>
+{
+public:
+ /// Empty implementation explictly does NOT create a workload. Throws Exception if called.
+ /// @param [in] graph The graph where this layer can be found.
+ /// @param [in] factory The workload factory which will create the workload.
+ /// @return Does not return anything. Throws Exception if called.
+ virtual std::unique_ptr<IWorkload>CreateWorkload(const Graph& graph,
+ const IWorkloadFactory& factory) const override;
+
+ /// Creates a dynamically-allocated copy of this layer.
+ /// @param [in] graph The graph into which this layer is being cloned.
+ StandInLayer* Clone(Graph& graph) const override;
+
+ /// Check if the input tensor shape(s)
+ /// Does nothing since cannot validate any properties of this layer.
+ void ValidateTensorShapesFromInputs() override;
+
+ /// Empty implementation that throws Exception if called.
+ /// otherwise infers the output shapes from given input shapes and layer properties.
+ /// @param [in] inputShapes The input shapes layer has.
+ /// @return Does not return anything. Throws Exception if called.
+ std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
+
+ /// Accepts a visitor object and calls VisitStandInLayer() method.
+ /// @param visitor The visitor on which to call VisitStandInLayer() method.
+ void Accept(ILayerVisitor& visitor) const override;
+
+protected:
+ /// Constructor to create a StandInLayer.
+ /// @param [in] param StandInDescriptor to configure the stand-in operation.
+ /// @param [in] name Optional name for the layer.
+ StandInLayer(const StandInDescriptor& param, const char* name);
+
+ /// Default destructor
+ ~StandInLayer() = default;
+};
+
+} //namespace armnn
+
+
+
+
CreateAnyLayer(stackLayer.o, serializer::Layer::Layer_StackLayer);
}
+void SerializerVisitor::VisitStandInLayer(const armnn::IConnectableLayer *layer,
+ const armnn::StandInDescriptor& standInDescriptor,
+ const char *name)
+{
+ // TODO: IVGCVSW-4010 Implement serialization
+}
+
void SerializerVisitor::VisitStridedSliceLayer(const armnn::IConnectableLayer* layer,
const armnn::StridedSliceDescriptor& stridedSliceDescriptor,
const char* name)
const armnn::ViewsDescriptor& viewsDescriptor,
const char* name = nullptr) override;
+ void VisitStandInLayer(const armnn::IConnectableLayer* layer,
+ const armnn::StandInDescriptor& standInDescriptor,
+ const char* name = nullptr) override;
+
void VisitStackLayer(const armnn::IConnectableLayer* layer,
const armnn::StackDescriptor& stackDescriptor,
const char* name = nullptr) override;
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
+bool LayerSupportBase::IsStandInSupported(const std::vector<const TensorInfo*>& inputs,
+ const std::vector<const TensorInfo*>& outputs,
+ const StandInDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported) const
+{
+ if (reasonIfUnsupported)
+ {
+ std::stringstream message;
+ message << "StandIn layer is not executable via backends";
+
+ reasonIfUnsupported.value() = message.str();
+ }
+
+ return false;
+}
+
bool LayerSupportBase::IsStridedSliceSupported(const TensorInfo& input,
const TensorInfo& output,
const StridedSliceDescriptor& descriptor,
const StackDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+ bool IsStandInSupported(const std::vector<const TensorInfo*>& inputs,
+ const std::vector<const TensorInfo*>& outputs,
+ const StandInDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
bool IsStridedSliceSupported(const TensorInfo& input,
const TensorInfo& output,
const StridedSliceDescriptor& descriptor,
break;
}
+ case LayerType::StandIn:
+ {
+ auto cLayer = boost::polymorphic_downcast<const StandInLayer*>(&layer);
+
+ // Get vector of all inputs.
+ auto getTensorInfoIn = [&dataType](const InputSlot& slot)
+ {
+ return OverrideDataType(slot.GetConnectedOutputSlot()->GetTensorInfo(), dataType);
+ };
+ auto getTensorInfoOut = [&dataType](const OutputSlot& slot)
+ {
+ return OverrideDataType(slot.GetTensorInfo(), dataType);
+ };
+ auto beginI = boost::make_transform_iterator(layer.GetInputSlots().begin(), getTensorInfoIn);
+ auto endI = boost::make_transform_iterator(layer.GetInputSlots().end(), getTensorInfoIn);
+ std::vector<TensorInfo> inputs(beginI, endI);
+
+ auto beginO = boost::make_transform_iterator(layer.GetOutputSlots().begin(), getTensorInfoOut);
+ auto endO = boost::make_transform_iterator(layer.GetOutputSlots().end(), getTensorInfoOut);
+ std::vector<TensorInfo> outputs(beginO, endO);
+
+
+ auto getTensorInfoPtr = [](const TensorInfo& info)
+ {
+ return &info;
+ };
+ auto beginPtrI = boost::make_transform_iterator(inputs.begin(), getTensorInfoPtr);
+ auto endPtrI = boost::make_transform_iterator(inputs.end(), getTensorInfoPtr);
+ std::vector<const TensorInfo*> inputPtrs(beginPtrI, endPtrI);
+
+ auto beginPtrO = boost::make_transform_iterator(outputs.begin(), getTensorInfoPtr);
+ auto endPtrO = boost::make_transform_iterator(outputs.end(), getTensorInfoPtr);
+ std::vector<const TensorInfo*> outputPtrs(beginPtrO, endPtrO);
+
+
+ result = layerSupportObject->IsStandInSupported(inputPtrs,
+ outputPtrs,
+ cLayer->GetParameters(),
+ reason);
+ break;
+ }
case LayerType::StridedSlice:
{
auto cLayer = boost::polymorphic_downcast<const StridedSliceLayer*>(&layer);
// Use this version for layers whose constructor takes 2 parameters(descriptor and name).
#define DECLARE_LAYER_POLICY_2_PARAM(name) DECLARE_LAYER_POLICY_CUSTOM_PARAM(name, armnn::name##Descriptor)
+
+#define DECLARE_LAYER_POLICY_EXCEPTION(name, descType) \
+template<armnn::DataType DataType> \
+struct LayerTypePolicy<armnn::LayerType::name, DataType> \
+{ \
+ using Type = armnn::name##Layer; \
+ using Desc = descType; \
+ constexpr static const char* NameStr = #name; \
+ \
+ static std::unique_ptr<armnn::IWorkload> MakeDummyWorkload(armnn::IWorkloadFactory *factory, \
+ unsigned int nIn, unsigned int nOut) \
+ { \
+ return std::unique_ptr<armnn::IWorkload>(); \
+ } \
+};
+
+#define DECLARE_LAYER_POLICY_EXCEPTION_1_PARAM(name) DECLARE_LAYER_POLICY_EXCEPTION(name, void)
+#define DECLARE_LAYER_POLICY_EXCEPTION_2_PARAM(name) DECLARE_LAYER_POLICY_EXCEPTION(name, armnn::name##Descriptor)
+
// Layer policy template.
template<armnn::LayerType Type, armnn::DataType DataType>
struct LayerTypePolicy;
DECLARE_LAYER_POLICY_2_PARAM(Stack)
+DECLARE_LAYER_POLICY_EXCEPTION_2_PARAM(StandIn)
+
DECLARE_LAYER_POLICY_2_PARAM(StridedSlice)
DECLARE_LAYER_POLICY_1_PARAM(Subtraction)