src/armnn/layers/SpaceToDepthLayer.cpp \
src/armnn/layers/SoftmaxLayer.cpp \
src/armnn/layers/SplitterLayer.cpp \
+ src/armnn/layers/StackLayer.cpp \
src/armnn/layers/StridedSliceLayer.cpp \
src/armnn/layers/SubtractionLayer.cpp \
src/armnn/layers/SwitchLayer.cpp \
src/armnn/layers/SpaceToDepthLayer.cpp
src/armnn/layers/SplitterLayer.hpp
src/armnn/layers/SplitterLayer.cpp
+ src/armnn/layers/StackLayer.hpp
+ src/armnn/layers/StackLayer.cpp
src/armnn/layers/StridedSliceLayer.cpp
src/armnn/layers/StridedSliceLayer.hpp
src/armnn/layers/SubtractionLayer.cpp
float m_PadValue;
};
+/// A StackDescriptor for the StackLayer.
+struct StackDescriptor
+{
+ StackDescriptor()
+ : m_Axis(0)
+ , m_NumInputs(0)
+ , m_InputShape()
+ {}
+
+ StackDescriptor(uint32_t axis, uint32_t numInputs, const TensorShape& inputShape)
+ : m_Axis(axis)
+ , m_NumInputs(numInputs)
+ , m_InputShape(inputShape)
+ {}
+
+ /// 0-based axis along which to stack the input tensors.
+ uint32_t m_Axis;
+ /// Number of input tensors.
+ uint32_t m_NumInputs;
+ /// Required shape of all input tensors.
+ TensorShape m_InputShape;
+};
+
/// A StridedSliceDescriptor for the StridedSliceLayer.
struct StridedSliceDescriptor
{
struct SoftmaxDescriptor;
struct SpaceToBatchNdDescriptor;
struct SpaceToDepthDescriptor;
+struct StackDescriptor;
struct StridedSliceDescriptor;
struct TransposeConvolution2dDescriptor;
struct ViewsDescriptor;
const ViewsDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
+ virtual bool IsStackSupported(const std::vector<const TensorInfo*> inputs,
+ const TensorInfo& output,
+ const StackDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
+
virtual bool IsStridedSliceSupported(const TensorInfo& input,
const TensorInfo& output,
const StridedSliceDescriptor& descriptor,
const ViewsDescriptor& splitterDescriptor,
const char* name = nullptr) = 0;
+ /// Function a stack layer should call back to when its Accept(ILayerVisitor&) function is invoked.
+ /// @param layer - pointer to the layer which is calling back to this visit function.
+ /// @param stackDescriptor - Parameters for the stack operation.
+ /// @param name - Optional name for the layer.
+ virtual void VisitStackLayer(const IConnectableLayer* layer,
+ const StackDescriptor& stackDescriptor,
+ const char* name = nullptr) = 0;
+
/// Function a strided slice layer should call back to when its Accept(ILayerVisitor&) function is invoked.
/// @param layer - pointer to the layer which is calling back to this visit function.
/// @param stridedSliceDescriptor - Parameters for the strided slice operation.
const Optional<ConstTensor>& biases,
const char* name = nullptr) = 0;
+ /// Adds a stack layer to the network.
+ /// @param descriptor - Description of the stack layer.
+ /// @param name - Optional name for the layer.
+ /// @return - Interface for configuring the layer.
+ virtual IConnectableLayer* AddStackLayer(const StackDescriptor& descriptor,
+ const char* name = nullptr) = 0;
+
virtual void Accept(ILayerVisitor& visitor) const = 0;
protected:
size_t reasonIfUnsupportedMaxLength = 1024);
/// Deprecated in favor of IBackend and ILayerSupport interfaces
+bool IsStackSupported(const BackendId& backend,
+ const std::vector<const TensorInfo*> inputs,
+ const TensorInfo& output,
+ const StackDescriptor& descriptor,
+ char* reasonIfUnsupported = nullptr,
+ size_t reasonIfUnsupportedMaxLength = 1024);
+
+/// Deprecated in favor of IBackend and ILayerSupport interfaces
bool IsStridedSliceSupported(const BackendId& backend,
const TensorInfo& input,
const TensorInfo& output,
const ViewsDescriptor&,
const char*) override { DefaultPolicy::Apply(__func__); }
+ void VisitStackLayer(const IConnectableLayer*,
+ const StackDescriptor&,
+ const char*) override { DefaultPolicy::Apply(__func__); }
+
void VisitStridedSliceLayer(const IConnectableLayer*,
const StridedSliceDescriptor&,
const char*) override { DefaultPolicy::Apply(__func__); }
SpaceToBatchNd,
SpaceToDepth,
Splitter,
+ Stack,
StridedSlice,
Subtraction,
Switch,
#include "layers/SpaceToBatchNdLayer.hpp"
#include "layers/SpaceToDepthLayer.hpp"
#include "layers/SplitterLayer.hpp"
+#include "layers/StackLayer.hpp"
#include "layers/StridedSliceLayer.hpp"
#include "layers/SubtractionLayer.hpp"
#include "layers/SwitchLayer.hpp"
DECLARE_LAYER(SpaceToBatchNd)
DECLARE_LAYER(SpaceToDepth)
DECLARE_LAYER(Splitter)
+DECLARE_LAYER(Stack)
DECLARE_LAYER(StridedSlice)
DECLARE_LAYER(Subtraction)
DECLARE_LAYER(Switch)
return layer;
}
+IConnectableLayer* Network::AddStackLayer(const StackDescriptor& stackDescriptor,
+ const char* name)
+{
+ return m_Graph->AddLayer<StackLayer>(stackDescriptor, name);
+}
+
void Network::Accept(ILayerVisitor& visitor) const
{
for (auto layer : GetGraph())
const Optional<ConstTensor>& biases,
const char* name = nullptr) override;
+ IConnectableLayer* AddStackLayer(const StackDescriptor& stackDescriptor,
+ const char* name = nullptr) override;
+
void Accept(ILayerVisitor& visitor) const override;
private:
--- /dev/null
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#include "StackLayer.hpp"
+#include "LayerCloneBase.hpp"
+
+#include <armnn/TypesUtils.hpp>
+#include <backendsCommon/WorkloadData.hpp>
+#include <backendsCommon/WorkloadFactory.hpp>
+
+#include <queue>
+
+namespace armnn
+{
+
+StackLayer::StackLayer(const StackDescriptor& param, const char* name)
+ : LayerWithParameters(param.m_NumInputs, 1, LayerType::Stack, param, name)
+{
+}
+
+std::unique_ptr<IWorkload> StackLayer::CreateWorkload(const Graph& graph, const IWorkloadFactory& factory) const
+{
+ StackQueueDescriptor descriptor;
+ return factory.CreateStack(descriptor, PrepInfoAndDesc(descriptor, graph));
+}
+
+StackLayer* StackLayer::Clone(Graph& graph) const
+{
+ return CloneBase<StackLayer>(graph, m_Param, GetName());
+}
+
+std::vector<TensorShape> StackLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
+{
+ const TensorShape& inputShape = m_Param.m_InputShape;
+ const unsigned int inputNumDimensions = inputShape.GetNumDimensions();
+ const unsigned int axis = m_Param.m_Axis;
+
+ BOOST_ASSERT(axis <= inputNumDimensions);
+
+ unsigned int dimensionSizes[inputNumDimensions + 1];
+ for (unsigned int i = 0; i < axis; ++i)
+ {
+ dimensionSizes[i] = inputShape[i];
+ }
+
+ dimensionSizes[axis] = m_Param.m_NumInputs;
+
+ for (unsigned int i = axis + 1; i < inputNumDimensions + 1; ++i)
+ {
+ dimensionSizes[i] = inputShape[i-1];
+ }
+
+ TensorShape targetShape = TensorShape(inputNumDimensions + 1, dimensionSizes);
+
+ return std::vector<TensorShape>({ targetShape });
+}
+
+void StackLayer::ValidateTensorShapesFromInputs()
+{
+ // Validates Stack layer.
+ ConditionalThrowIfNotEqual<LayerValidationException>(
+ "StackLayer: Num Input Slots must match Num Inputs.",
+ m_Param.m_NumInputs,
+ GetNumInputSlots());
+
+ VerifyLayerConnections(m_Param.m_NumInputs, CHECK_LOCATION());
+
+ // Constructs and validates input shapes
+ std::vector<TensorShape> inputShapes;
+ for (unsigned int i = 0; i < GetNumInputSlots(); ++i)
+ {
+ TensorShape inputShape = GetInputSlot(i).GetConnection()->GetTensorInfo().GetShape();
+ if (inputShape != m_Param.m_InputShape)
+ {
+ throw LayerValidationException("ConcatLayer: TensorShape set on InputSlot[" +
+ std::to_string(i) +
+ "] does not match defined input shape");
+ }
+ inputShapes.push_back(inputShape);
+ }
+
+ auto inferredShapes = InferOutputShapes(inputShapes);
+
+ BOOST_ASSERT(inferredShapes.size() == 1);
+
+ ConditionalThrowIfNotEqual<LayerValidationException>(
+ "StackLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.",
+ GetOutputSlot(0).GetTensorInfo().GetShape(),
+ inferredShapes[0]);
+}
+
+void StackLayer::Accept(ILayerVisitor& visitor) const
+{
+ visitor.VisitStackLayer(this, GetParameters(), GetName());
+}
+
+} // namespace armnn armnn
--- /dev/null
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#pragma once
+
+#include "LayerWithParameters.hpp"
+
+namespace armnn
+{
+
+/// This layer represents a stack operation.
+class StackLayer : public LayerWithParameters<StackDescriptor>
+{
+public:
+ /// Makes a workload for the Stack type.
+ /// @param [in] graph The graph where this layer can be found.
+ /// @param [in] factory The workload factory which will create the workload.
+ /// @return A pointer to the created workload, or nullptr if not created.
+ virtual std::unique_ptr<IWorkload>CreateWorkload(const Graph& graph,
+ const IWorkloadFactory& factory) const override;
+
+ /// Creates a dynamically-allocated copy of this layer.
+ /// @param [in] graph The graph into which this layer is being cloned.
+ StackLayer* Clone(Graph& graph) const override;
+
+ /// Check if the input tensor shape(s)
+ /// will lead to a valid configuration of @ref StackLayer.
+ void ValidateTensorShapesFromInputs() override;
+
+ /// By default returns inputShapes if the number of inputs are equal to number of outputs,
+ /// otherwise infers the output shapes from given input shapes and layer properties.
+ /// @param [in] inputShapes The input shapes layer has.
+ /// @return A vector to the inferred output shape.
+ std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
+
+ void Accept(ILayerVisitor& visitor) const override;
+
+protected:
+ /// Constructor to create a StackLayer.
+ /// @param [in] param StackDescriptor to configure the stack operation.
+ /// @param [in] name Optional name for the layer.
+ StackLayer(const StackDescriptor& param, const char* name);
+
+ /// Default destructor
+ ~StackLayer() = default;
+};
+
+} // namespace
ARMNN_SIMPLE_TEST_CASE(PreluValidateTensorShapesFromInputsMatch, PreluValidateTensorShapesFromInputsMatchTest)
ARMNN_SIMPLE_TEST_CASE(PreluValidateTensorShapesFromInputsNoMatch, PreluValidateTensorShapesFromInputsNoMatchTest)
+// Stack
+ARMNN_SIMPLE_TEST_CASE(StackInferOutputShapeFromInputsMatch, StackInferOutputShapeFromInputsMatchTest)
+ARMNN_SIMPLE_TEST_CASE(StackInferOutputShapeFromInputsNoMatch, StackInferOutputShapeFromInputsNoMatchTest)
+ARMNN_SIMPLE_TEST_CASE(StackValidateTensorShapesFromInputsMatch, StackValidateTensorShapesFromInputsMatchTest)
+ARMNN_SIMPLE_TEST_CASE(StackValidateTensorShapesFromInputsNoMatch, StackValidateTensorShapesFromInputsNoMatchTest)
+
BOOST_AUTO_TEST_SUITE_END()
#include <layers/BatchToSpaceNdLayer.hpp>
#include <layers/SpaceToDepthLayer.hpp>
#include <layers/PreluLayer.hpp>
+#include <layers/StackLayer.hpp>
#include <boost/algorithm/string.hpp>
#include <boost/test/unit_test.hpp>
// Graph::InferTensorInfos calls Layer::ValidateTensorShapesFromInputs
BOOST_CHECK_THROW(graph.InferTensorInfos(), armnn::LayerValidationException);
}
+
+void StackInferOutputShapeImpl(const armnn::StackDescriptor descriptor,
+ const std::vector<armnn::TensorShape>& inputShapes,
+ std::vector<armnn::TensorShape>& outputShapes)
+{
+ armnn::Graph graph;
+ armnn::StackLayer* const stackLayer = graph.AddLayer<armnn::StackLayer>(descriptor, "stack");
+ outputShapes = stackLayer->InferOutputShapes(inputShapes);
+}
+
+void StackInferOutputShapeFromInputsMatchTest()
+{
+ armnn::Graph graph;
+
+ armnn::StackDescriptor descriptor;
+ descriptor.m_Axis = 1;
+ descriptor.m_NumInputs = 3;
+ descriptor.m_InputShape = armnn::TensorShape
+ (
+ { 4, 2 } // Defined input shape
+ );
+
+ const std::vector<armnn::TensorShape> inputShapes
+ {
+ { 4, 2 }, // Actual input shapes
+ { 4, 2 },
+ { 4, 2 }
+ };
+
+ std::vector<armnn::TensorShape> outputShapes;
+ BOOST_CHECK_NO_THROW(StackInferOutputShapeImpl(descriptor, inputShapes, outputShapes));
+
+ armnn::TensorShape expectedOutputShape
+ (
+ { 4, 3, 2 }
+ );
+ BOOST_CHECK(outputShapes.size() == 1);
+ BOOST_CHECK(outputShapes[0] == expectedOutputShape);
+}
+
+void StackInferOutputShapeFromInputsNoMatchTest()
+{
+ armnn::Graph graph;
+
+ armnn::StackDescriptor descriptor;
+ descriptor.m_Axis = 1;
+ descriptor.m_NumInputs = 3;
+ descriptor.m_InputShape = armnn::TensorShape
+ (
+ { 4, 2 } // Defined input shape
+ );
+
+ const std::vector<armnn::TensorShape> inputShapes
+ {
+ { 4, 2 }, // Actual input shapes
+ { 4, 5 }, // Incorrectly shaped input tensor
+ { 4, 2 }
+ };
+
+ // Output shape is inferred from the descriptor, so should still be correct despite mismatching input shapes
+ std::vector<armnn::TensorShape> outputShapes;
+ BOOST_CHECK_NO_THROW(StackInferOutputShapeImpl(descriptor, inputShapes, outputShapes));
+
+ armnn::TensorShape expectedOutputShape
+ (
+ { 4, 3, 2 }
+ );
+ BOOST_CHECK(outputShapes.size() == 1);
+ BOOST_CHECK(outputShapes[0] == expectedOutputShape);
+}
+
+void CreateStackLayerHelper(armnn::Graph& graph,
+ const armnn::StackDescriptor& descriptor,
+ const std::vector<armnn::TensorShape>& inputShapes,
+ const armnn::TensorShape& outputShape)
+{
+ // Creates the Stack layer
+ armnn::Layer* const stackLayer = graph.AddLayer<armnn::StackLayer>(descriptor, "stack");
+
+ // Creates extra layers
+ std::vector<armnn::Layer*> inputs;
+ for (unsigned int i=0; i<inputShapes.size(); ++i)
+ {
+ inputs.push_back(graph.AddLayer<armnn::InputLayer>(static_cast<int>(i), "input"));
+ }
+ armnn::Layer* const output = graph.AddLayer<armnn::OutputLayer>(0, "output");
+
+ // Connects up
+ std::vector<armnn::TensorInfo> inputTensorInfos;
+ for (unsigned int i=0; i<inputs.size(); ++i)
+ {
+ inputTensorInfos.push_back(armnn::TensorInfo(inputShapes[i], armnn::DataType::Float32));
+ }
+ armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Float32);
+
+ for (unsigned int i=0; i<inputs.size(); ++i)
+ {
+ Connect(inputs[i], stackLayer, inputTensorInfos[i], 0, i);
+ }
+ Connect(stackLayer, output, outputTensorInfo, 0, 0);
+}
+
+void StackValidateTensorShapesFromInputsMatchTest()
+{
+ armnn::Graph graph;
+
+ armnn::StackDescriptor descriptor;
+ descriptor.m_Axis = 0;
+ descriptor.m_NumInputs = 3;
+ descriptor.m_InputShape = armnn::TensorShape
+ (
+ { 2, 5 } // Defined input shape
+ );
+
+ const std::vector<armnn::TensorShape> inputShapes
+ {
+ { 2, 5 }, // Actual input shapes
+ { 2, 5 },
+ { 2, 5 }
+ };
+
+ // Creates the Stack layer
+ CreateStackLayerHelper(graph, descriptor, inputShapes, { 3, 2, 5 });
+
+ // Graph::InferTensorInfos calls Layer::ValidateTensorShapesFromInputs
+ BOOST_CHECK_NO_THROW(graph.InferTensorInfos());
+}
+
+void StackValidateTensorShapesFromInputsNoMatchTest()
+{
+ armnn::Graph graph;
+
+ armnn::StackDescriptor descriptor;
+ descriptor.m_Axis = 0;
+ descriptor.m_NumInputs = 3;
+ descriptor.m_InputShape = armnn::TensorShape
+ (
+ { 2, 5 } // Defined input shape
+ );
+
+ const std::vector<armnn::TensorShape> inputShapes
+ {
+ { 2, 5 }, // Actual input shapes
+ { 2, 2 }, // Incorrectly shaped input tensor
+ { 2, 5 }
+ };
+
+ // Creates the Stack layer
+ CreateStackLayerHelper(graph, descriptor, inputShapes, { 3, 2, 5 });
+
+ // Graph::InferTensorInfos calls Layer::ValidateTensorShapesFromInputs
+ BOOST_CHECK_THROW(graph.InferTensorInfos(), armnn::LayerValidationException);
+}
CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_NormalizationLayer);
}
+void SerializerVisitor::VisitStackLayer(const armnn::IConnectableLayer* layer,
+ const armnn::StackDescriptor& stackDescriptor,
+ const char* name)
+{
+ throw UnimplementedException("SerializerVisitor::VisitStackLayer not yet implemented");
+}
+
void SerializerVisitor::VisitStridedSliceLayer(const armnn::IConnectableLayer* layer,
const armnn::StridedSliceDescriptor& stridedSliceDescriptor,
const char* name)
const armnn::ViewsDescriptor& viewsDescriptor,
const char* name = nullptr) override;
+ void VisitStackLayer(const armnn::IConnectableLayer* layer,
+ const armnn::StackDescriptor& stackDescriptor,
+ const char* name = nullptr) override;
+
void VisitStridedSliceLayer(const armnn::IConnectableLayer* layer,
const armnn::StridedSliceDescriptor& stridedSliceDescriptor,
const char* name = nullptr) override;
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
+bool LayerSupportBase::IsStackSupported(const std::vector<const TensorInfo*> inputs,
+ const TensorInfo& output,
+ const StackDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported) const
+{
+ return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
+}
+
bool LayerSupportBase::IsStridedSliceSupported(const TensorInfo& input,
const TensorInfo& output,
const StridedSliceDescriptor& descriptor,
const ViewsDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+ bool IsStackSupported(const std::vector<const TensorInfo*> inputs,
+ const TensorInfo& output,
+ const StackDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
bool IsStridedSliceSupported(const TensorInfo& input,
const TensorInfo& output,
const StridedSliceDescriptor& descriptor,
}
//---------------------------------------------------------------
+void StackQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
+{
+ ValidateNumOutputs(workloadInfo, "StackQueueDescriptor", 1);
+
+ if (m_Parameters.m_NumInputs != workloadInfo.m_InputTensorInfos.size())
+ {
+ throw InvalidArgumentException("StackQueueDescriptor: Must have the defined number of input tensors.");
+ }
+
+ // All inputs must have the same shape, which is defined in parameters
+ const TensorShape& inputShape = m_Parameters.m_InputShape;
+ for (unsigned int i = 0; i < workloadInfo.m_InputTensorInfos.size(); ++i)
+ {
+ if (workloadInfo.m_InputTensorInfos[i].GetShape() != inputShape)
+ {
+ throw InvalidArgumentException("StackQueueDescriptor: All input tensor shapes "
+ "must match the defined shape.");
+ }
+ }
+
+ // m_Axis is 0-based and may take values from 0 to the number of input dimensions (inclusive),
+ // since the output tensor has an additional dimension.
+ if (m_Parameters.m_Axis > inputShape.GetNumDimensions())
+ {
+ throw InvalidArgumentException("StackQueueDescriptor: Axis may not be greater "
+ "than the number of input dimensions.");
+ }
+
+ // Output shape must be as inferred from the input shape
+ const TensorShape& outputShape = workloadInfo.m_OutputTensorInfos[0].GetShape();
+ for (unsigned int i = 0; i < m_Parameters.m_Axis; ++i)
+ {
+ if (outputShape[i] != inputShape[i])
+ {
+ throw InvalidArgumentException("StackQueueDescriptor: Output tensor must "
+ "match shape inferred from input tensor.");
+ }
+ }
+
+ if (outputShape[m_Parameters.m_Axis] != m_Parameters.m_NumInputs)
+ {
+ throw InvalidArgumentException("StackQueueDescriptor: Output tensor must "
+ "match shape inferred from input tensor.");
+ }
+
+ for (unsigned int i = m_Parameters.m_Axis + 1; i < inputShape.GetNumDimensions() + 1; ++i)
+ {
+ if (outputShape[i] != inputShape[i-1])
+ {
+ throw InvalidArgumentException("StackQueueDescriptor: Output tensor must "
+ "match shape inferred from input tensor.");
+ }
+ }
+
+ // Check the supported data types
+ std::vector<DataType> supportedTypes =
+ {
+ DataType::Float32,
+ DataType::Float16,
+ DataType::Boolean,
+ DataType::Signed32,
+ DataType::QuantisedAsymm8,
+ DataType::QuantisedSymm16
+ };
+
+ ValidateDataTypes(workloadInfo.m_InputTensorInfos[0],
+ supportedTypes,
+ "StackQueueDescriptor");
+
+ for (unsigned int i = 1; i < workloadInfo.m_InputTensorInfos.size(); ++i)
+ {
+ ValidateTensorDataTypesMatch(workloadInfo.m_InputTensorInfos[0],
+ workloadInfo.m_InputTensorInfos[i],
+ "StackQueueDescriptor",
+ "InputTensor[0]",
+ "InputTensor[" + std::to_string(i) + "]");
+ }
+ ValidateTensorDataTypesMatch(workloadInfo.m_InputTensorInfos[0],
+ workloadInfo.m_OutputTensorInfos[0],
+ "StackQueueDescriptor",
+ "InputTensor[0]",
+ "OutputTensor[0]");
+}
+
+//---------------------------------------------------------------
void FullyConnectedQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
{
ValidateNumInputs(workloadInfo, "FullyConnectedQueueDescriptor", 1);
// Deprecated. Use ConcatQueueDescriptor instead
using MergerQueueDescriptor = ConcatQueueDescriptor;
+// Stack layer workload data.
+struct StackQueueDescriptor : QueueDescriptorWithParameters<StackDescriptor>
+{
+ void Validate(const WorkloadInfo& workloadInfo) const;
+};
+
// Activation layer workload data.
struct ActivationQueueDescriptor : QueueDescriptorWithParameters<ActivationDescriptor>
{
reason);
break;
}
+ case LayerType::Stack:
+ {
+ auto cLayer = boost::polymorphic_downcast<const StackLayer*>(&layer);
+
+ // Get vector of all inputs.
+ auto getTensorInfo = [&dataType](const InputSlot& slot)
+ {
+ return OverrideDataType(slot.GetConnectedOutputSlot()->GetTensorInfo(), dataType);
+ };
+ auto beginI = boost::make_transform_iterator(layer.GetInputSlots().begin(), getTensorInfo);
+ auto endI = boost::make_transform_iterator(layer.GetInputSlots().end(), getTensorInfo);
+ std::vector<TensorInfo> inputs(beginI, endI);
+
+ auto getTensorInfoPtr = [](const TensorInfo& info)
+ {
+ return &info;
+ };
+ auto beginPtr = boost::make_transform_iterator(inputs.begin(), getTensorInfoPtr);
+ auto endPtr = boost::make_transform_iterator(inputs.end(), getTensorInfoPtr);
+ std::vector<const TensorInfo*> inputPtrs(beginPtr, endPtr);
+
+ const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
+
+ result = layerSupportObject->IsStackSupported(inputPtrs, output, cLayer->GetParameters(), reason);
+
+ break;
+ }
case LayerType::StridedSlice:
{
auto cLayer = boost::polymorphic_downcast<const StridedSliceLayer*>(&layer);
return std::unique_ptr<IWorkload>();
}
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateStack(const StackQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const
+{
+ return std::unique_ptr<IWorkload>();
+}
+
std::unique_ptr<IWorkload> IWorkloadFactory::CreateStridedSlice(const StridedSliceQueueDescriptor& descriptor,
const WorkloadInfo& Info) const
{
virtual std::unique_ptr<IWorkload> CreateSplitter(const SplitterQueueDescriptor& descriptor,
const WorkloadInfo& info) const;
+ virtual std::unique_ptr<IWorkload> CreateStack(const StackQueueDescriptor& descriptor,
+ const WorkloadInfo& Info) const;
+
virtual std::unique_ptr<IWorkload> CreateStridedSlice(const StridedSliceQueueDescriptor& descriptor,
const WorkloadInfo& Info) const;
DECLARE_LAYER_POLICY_2_PARAM(Splitter)
+DECLARE_LAYER_POLICY_2_PARAM(Stack)
+
DECLARE_LAYER_POLICY_2_PARAM(StridedSlice)
DECLARE_LAYER_POLICY_1_PARAM(Subtraction)