2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
7 #include "LayerFwd.hpp"
9 #include <backendsCommon/OutputHandler.hpp>
10 #include <backendsCommon/WorkloadDataCollector.hpp>
11 #include <backendsCommon/WorkloadInfo.hpp>
12 #include "InternalTypes.hpp"
13 #include "SerializeLayerParameters.hpp"
15 #include <armnn/Types.hpp>
16 #include <armnn/Tensor.hpp>
17 #include <armnn/INetwork.hpp>
27 #include <boost/numeric/conversion/cast.hpp>
28 #include <boost/core/ignore_unused.hpp>
29 #include <boost/cast.hpp>
35 class IWorkloadFactory;
39 class InputSlot final : public IInputSlot
42 explicit InputSlot(Layer& owner, unsigned int slotIndex)
43 : m_OwningLayer(owner)
44 , m_Connection(nullptr)
45 , m_SlotIndex(slotIndex)
50 Layer& GetOwningLayer() const { return m_OwningLayer; }
51 unsigned int GetSlotIndex() const { return m_SlotIndex; }
53 const OutputSlot* GetConnectedOutputSlot() const { return m_Connection; }
54 OutputSlot* GetConnectedOutputSlot() { return m_Connection; }
56 /// Links the slot to an output slot or breaks an existing link if passing nullptr.
57 void SetConnection(OutputSlot* source)
59 if (m_Connection != nullptr && source != nullptr)
61 throw InvalidArgumentException("Tried to connect an output slot to an input slot, "
62 "but the latter already has a connection");
64 m_Connection = source;
67 // Inserts single-output existing layer at this point in the graph.
68 void Insert(Layer& layer);
72 const IOutputSlot* GetConnection() const override;
73 IOutputSlot* GetConnection() override;
77 OutputSlot* m_Connection;
78 const unsigned int m_SlotIndex;
81 class OutputSlot final : public IOutputSlot
84 explicit OutputSlot(Layer& owner, OutputHandler& outputHandler)
85 : m_OwningLayer(owner)
86 , m_OutputHandler(outputHandler)
93 // Coverity fix: DisconnectAll() may throw uncaught exceptions.
96 catch (const std::exception& e)
98 // Coverity fix: BOOST_LOG_TRIVIAL (typically used to report errors) may throw an
99 // exception of type std::length_error.
100 // Using stderr instead in this context as there is no point in nesting try-catch blocks here.
101 std::cerr << "WARNING: An error has occurred when disconnecting all output slots: "
102 << e.what() << std::endl;
106 Layer& GetOwningLayer() const { return m_OwningLayer; }
108 const OutputHandler& GetOutputHandler() const { return m_OutputHandler; }
109 OutputHandler& GetOutputHandler() { return m_OutputHandler; }
111 int Connect(InputSlot& destination);
112 void Disconnect(InputSlot& slot);
114 const std::vector<InputSlot*>& GetConnections() const { return m_Connections; }
116 bool ValidateTensorShape(const TensorShape& shape) const;
118 // Disconnect all conections.
119 void DisconnectAll();
121 /// Moves all connections to another OutputSlot.
122 void MoveAllConnections(OutputSlot& destination);
126 unsigned int GetNumConnections() const override { return boost::numeric_cast<unsigned int>(m_Connections.size()); }
127 const InputSlot* GetConnection(unsigned int index) const override;
128 InputSlot* GetConnection(unsigned int index) override;
130 void SetTensorInfo(const TensorInfo& tensorInfo) override;
131 const TensorInfo& GetTensorInfo() const override;
132 bool IsTensorInfoSet() const override;
134 int Connect(IInputSlot& destination) override
136 return Connect(*boost::polymorphic_downcast<InputSlot*>(&destination));
139 void Disconnect(IInputSlot& slot) override
141 return Disconnect(*boost::polymorphic_downcast<InputSlot*>(&slot));
145 void ValidateConnectionIndex(unsigned int index) const;
147 Layer& m_OwningLayer;
148 OutputHandler& m_OutputHandler;
149 std::vector<InputSlot*> m_Connections;
152 // InputSlot inlines that need OutputSlot declaration.
154 inline InputSlot::~InputSlot()
156 if (m_Connection != nullptr)
160 // Coverity fix: Disconnect() may throw uncaught exceptions.
161 m_Connection->Disconnect(*this);
163 catch (const std::exception& e)
165 // Coverity fix: BOOST_LOG_TRIVIAL (typically used to report errors) may throw an
166 // exception of type std::length_error.
167 // Using stderr instead in this context as there is no point in nesting try-catch blocks here.
168 std::cerr << "WARNING: An error has occurred when disconnecting an input slot: "
169 << e.what() << std::endl;
174 inline const IOutputSlot* InputSlot::GetConnection() const { return GetConnectedOutputSlot(); }
175 inline IOutputSlot* InputSlot::GetConnection() { return GetConnectedOutputSlot(); }
178 class ScopedCpuTensorHandle;
182 using LayerPriority = unsigned int;
184 class Layer : public IConnectableLayer
187 /// @param name - Optional name for the layer (may be nullptr).
188 Layer(unsigned int numInputSlots, unsigned int numOutputSlots, LayerType type, const char* name);
189 Layer(unsigned int numInputSlots, unsigned int numOutputSlots, LayerType type, DataLayout layout, const char* name);
191 const std::string& GetNameStr() const
196 const OutputHandler& GetOutputHandler(unsigned int i = 0) const
198 return m_OutputHandlers[i];
201 OutputHandler& GetOutputHandler(unsigned int i = 0)
203 return const_cast<OutputHandler&>(const_cast<const Layer*>(this)->GetOutputHandler(i));
206 const std::vector<InputSlot>& GetInputSlots() const { return m_InputSlots; }
207 const std::vector<OutputSlot>& GetOutputSlots() const { return m_OutputSlots; }
209 // Allows non-const access to input slots, but don't expose vector (vector size is fixed at layer construction).
210 std::vector<InputSlot>::iterator BeginInputSlots() { return m_InputSlots.begin(); }
211 std::vector<InputSlot>::iterator EndInputSlots() { return m_InputSlots.end(); }
213 // Allows non-const access to output slots, but don't expose vector (vector size is fixed at layer construction).
214 std::vector<OutputSlot>::iterator BeginOutputSlots() { return m_OutputSlots.begin(); }
215 std::vector<OutputSlot>::iterator EndOutputSlots() { return m_OutputSlots.end(); }
217 // Checks whether the outputs of this layer don't have any connection.
218 bool IsOutputUnconnected()
220 unsigned int numConnections = 0;
222 for (auto&& output : GetOutputSlots())
224 numConnections += output.GetNumConnections();
227 return (GetNumOutputSlots() > 0) && (numConnections == 0);
231 void ResetPriority() const;
232 LayerPriority GetPriority() const;
234 LayerType GetType() const { return m_Type; }
236 DataType GetDataType() const;
238 DataLayout GetDataLayout() const { return m_DataLayout; }
240 const BackendId& GetBackendId() const { return m_BackendId; }
241 void SetBackendId(const BackendId& id) { m_BackendId = id; }
245 virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph, const IWorkloadFactory& factory) const = 0;
247 virtual void CreateTensorHandles(Graph& graph, const IWorkloadFactory& factory);
249 /// Creates a dynamically-allocated copy of this layer.
250 /// @param graph - The Graph into which this Layer is being cloned.
251 virtual Layer* Clone(Graph& graph) const = 0;
253 void VerifyLayerConnections(unsigned int expectedConnections, const CheckLocation& location) const;
255 virtual void ValidateTensorShapesFromInputs() = 0;
257 std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
259 /// Helper to serialize the layer parameters to string.
260 /// (currently used in DotSerializer and company).
261 virtual void SerializeLayerParameters(ParameterStringifyFunction &) const {}
263 // Free up the constant source data
264 virtual void ReleaseConstantData();
266 template<typename Op>
267 void OperateOnConstantTensors(Op op)
269 for (auto constant : GetConstantTensorsByRef())
280 const char* GetName() const override { return m_LayerName.c_str(); }
282 unsigned int GetNumInputSlots() const override { return static_cast<unsigned int>(m_InputSlots.size()); }
283 unsigned int GetNumOutputSlots() const override { return static_cast<unsigned int>(m_OutputSlots.size()); }
285 const InputSlot& GetInputSlot(unsigned int index) const override { return m_InputSlots.at(index); }
286 InputSlot& GetInputSlot(unsigned int index) override { return m_InputSlots.at(index); }
287 const OutputSlot& GetOutputSlot(unsigned int index = 0) const override { return m_OutputSlots.at(index); }
288 OutputSlot& GetOutputSlot(unsigned int index = 0) override { return m_OutputSlots.at(index); }
290 void SetGuid(LayerGuid guid) { m_Guid = guid; }
291 LayerGuid GetGuid() const final { return m_Guid; }
293 void AddRelatedLayerName(const std::string layerName) { m_RelatedLayerNames.emplace_back(layerName); }
295 const std::list<std::string>& GetRelatedLayerNames() { return m_RelatedLayerNames; }
298 // Graph needs access to the virtual destructor.
300 virtual ~Layer() = default;
302 template <typename QueueDescriptor>
303 void CollectQueueDescriptorInputs(QueueDescriptor& descriptor, WorkloadInfo& info, const Graph& graph) const
305 WorkloadDataCollector dataCollector(descriptor.m_Inputs, info.m_InputTensorInfos);
306 CollectWorkloadInputs(dataCollector, graph);
309 template <typename QueueDescriptor>
310 void CollectQueueDescriptorOutputs(QueueDescriptor& descriptor, WorkloadInfo& info, const Graph& graph) const
312 WorkloadDataCollector dataCollector(descriptor.m_Outputs, info.m_OutputTensorInfos);
313 CollectWorkloadOutputs(dataCollector, graph);
316 /// Helper function to reduce duplication in *Layer::CreateWorkload.
317 template <typename QueueDescriptor>
318 WorkloadInfo PrepInfoAndDesc(QueueDescriptor& descriptor, const Graph& graph) const
321 CollectQueueDescriptorInputs(descriptor, info, graph);
322 CollectQueueDescriptorOutputs(descriptor, info, graph);
326 template <typename LayerType, typename ... Params>
327 LayerType* CloneBase(Graph& graph, Params&& ... params) const;
329 // Retrieve the Handles to the constants
330 using ConstantTensors = std::vector<std::reference_wrapper<std::unique_ptr<ScopedCpuTensorHandle>>>;
331 virtual ConstantTensors GetConstantTensorsByRef() {return ConstantTensors(); };
334 void CollectWorkloadInputs(WorkloadDataCollector& dataCollector, const Graph& graph) const;
335 void CollectWorkloadOutputs(WorkloadDataCollector& dataCollector, const Graph& graph) const;
338 std::vector<OutputHandler> m_OutputHandlers;
341 const std::string m_LayerName;
343 std::vector<InputSlot> m_InputSlots;
344 std::vector<OutputSlot> m_OutputSlots;
346 const LayerType m_Type;
347 const DataLayout m_DataLayout;
348 BackendId m_BackendId;
350 /// Used for sorting.
351 mutable LayerPriority m_Priority = 0;
352 mutable bool m_Visiting = false;
356 std::list<std::string> m_RelatedLayerNames;
359 // A layer user-provided data can be bound to (e.g. inputs, outputs).
360 class BindableLayer : public Layer
363 BindableLayer(unsigned int numInputSlots,
364 unsigned int numOutputSlots,
368 : Layer(numInputSlots, numOutputSlots, type, name)
373 LayerBindingId GetBindingId() const { return m_Id; };
376 ~BindableLayer() = default;