IVGCVSW-1946: Remove armnn/src from the include paths
[platform/upstream/armnn.git] / src / armnn / Network.cpp
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 #include "Network.hpp"
6 #include "Graph.hpp"
7 #include "Layer.hpp"
8 #include "DeviceSpec.hpp"
9 #include "Optimizer.hpp"
10 #include "optimizations/All.hpp"
11
12 #include <backendsCommon/CpuTensorHandle.hpp>
13 #include <backendsCommon/WorkloadFactory.hpp>
14
15 #include <armnn/Exceptions.hpp>
16 #include <armnn/Utils.hpp>
17 #include <armnn/TypesUtils.hpp>
18
19 #include <fcntl.h>
20 #include <algorithm>
21 #include <fstream>
22 #include <memory>
23 #include <vector>
24 #include <algorithm>
25
26 #include <boost/assert.hpp>
27 #include <boost/format.hpp>
28 #include <boost/log/trivial.hpp>
29 #include <boost/numeric/conversion/converter_policies.hpp>
30 #include <boost/cast.hpp>
31
32 namespace armnn
33 {
34
35 armnn::INetwork* INetwork::CreateRaw()
36 {
37     return new Network();
38 }
39
40 armnn::INetworkPtr INetwork::Create()
41 {
42     return INetworkPtr(CreateRaw(), &INetwork::Destroy);
43 }
44
45 void INetwork::Destroy(INetwork* network)
46 {
47     delete boost::polymorphic_downcast<Network*>(network);
48 }
49
50 Status Network::PrintGraph()
51 {
52     m_Graph->Print();
53     return Status::Success;
54 }
55
56 void IOptimizedNetwork::Destroy(IOptimizedNetwork* network)
57 {
58     delete boost::polymorphic_downcast<OptimizedNetwork*>(network);
59 }
60
61 Status OptimizedNetwork::PrintGraph()
62 {
63     m_Graph->Print();
64     return Status::Success;
65 }
66
67 Status OptimizedNetwork::SerializeToDot(std::ostream& stream) const
68 {
69     return m_Graph->SerializeToDot(stream);
70 }
71
72 bool CheckScaleSetOnQuantizedType(Layer* layer, Optional<std::vector<std::string>&> errMessages)
73 {
74     bool noErrors = true;
75     unsigned int numOutputs = layer->GetNumOutputSlots();
76     for (unsigned int i = 0; i < numOutputs; i++) {
77         const OutputSlot &outputSlot = layer->GetOutputSlot(i);
78         const TensorInfo &info = outputSlot.GetTensorInfo();
79         if (DataType::QuantisedAsymm8 == info.GetDataType()) {
80             if (0.f == info.GetQuantizationScale()) {
81                 noErrors = false;
82                 std::stringstream ss;
83                 ss << "ERROR: output " << i << " of layer " << GetLayerTypeAsCString(layer->GetType())
84                    << " (" << layer->GetNameStr() << ") is of type"
85                    << " Quantized 8 bit but its scale parameter has not been set";
86                 BOOST_LOG_TRIVIAL(warning) << ss.str() ;
87                 if (errMessages) {
88                     errMessages.value().push_back(ss.str());
89                 }
90             }
91         }
92     }
93     return noErrors;
94 }
95
96 IOptimizedNetworkPtr Optimize(const INetwork& inNetwork,
97                               const std::vector<BackendId>& backendPreferences,
98                               const IDeviceSpec& deviceSpec,
99                               const OptimizerOptions& options,
100                               Optional<std::vector<std::string>&> errMessages)
101 {
102     if (backendPreferences.empty()) {
103         throw armnn::InvalidArgumentException("Invoked Optimize with no backends specified");
104     }
105     const Network& network = *boost::polymorphic_downcast<const Network*>(&inNetwork);
106     std::unique_ptr<Graph> graph = std::make_unique<Graph>(network.GetGraph());
107
108     auto optNet = IOptimizedNetworkPtr(new OptimizedNetwork(std::move(graph)), &IOptimizedNetwork::Destroy);
109
110     OptimizedNetwork* optNetObjPtr = boost::polymorphic_downcast<OptimizedNetwork*>(optNet.get());
111
112     // Perform optimisation passes
113     using namespace optimizations;
114     Optimizer::Pass(optNetObjPtr->GetGraph(), MakeOptimizations(SquashEqualPermuteSiblings(),
115                                                                 SquashEqualReshapeSiblings(),
116                                                                 OptimizeInversePermutes(),
117                                                                 MovePermuteUp(),
118                                                                 PermuteAsReshape(),
119                                                                 OptimizeConsecutiveReshapes()));
120
121     // Infer the tensor infos for all output slots. Throws an exception on failure.
122     optNetObjPtr->GetGraph().InferTensorInfos();
123
124     // if Fp32 to Fp16 optimization is set convert Fp32 network to Fp16
125     if (options.m_ReduceFp32ToFp16)
126     {
127         Optimizer::Pass(optNetObjPtr->GetGraph(), MakeOptimizations(Fp32NetworkToFp16Converter()));
128     }
129
130     // We know that DeviceSpec should be the only implementation of IDeviceSpec.
131     const DeviceSpec& spec = *boost::polymorphic_downcast<const DeviceSpec*>(&deviceSpec);
132     auto const& supportedBackends = spec.GetSupportedBackends();
133
134     // determine which of the preferred backends we have available for use
135     // and whether we have specified CpuRef as one of those backends.
136     bool cpuRefUsed = false;
137     std::vector<BackendId> availablePreferredBackends;
138     for (const auto& backend : backendPreferences)
139     {
140         // Check if the backend is in the available backend devices.
141         if (supportedBackends.count(backend) > 0)
142         {
143             availablePreferredBackends.push_back(backend);
144             if (backend == armnn::Compute::CpuRef) {
145                 cpuRefUsed = true;
146             }
147         }
148     }
149     if (availablePreferredBackends.empty()) {
150         std::stringstream failureMsg;
151         failureMsg << "ERROR: None of the preferred backends " << backendPreferences
152                    << " are supported. Current platform provides " << supportedBackends;
153         BOOST_LOG_TRIVIAL(warning) << failureMsg.str();
154         if (errMessages) {
155             errMessages.value().push_back(failureMsg.str());
156         }
157         return IOptimizedNetworkPtr(nullptr, &IOptimizedNetwork::Destroy);
158     }
159
160     auto ReturnWithError = [&](Layer* layer)
161     {
162         std::stringstream failureMsg;
163         failureMsg << "ERROR: Layer of type " << GetLayerTypeAsCString(layer->GetType())
164                    << " is not supported on any preferred backend " << backendPreferences;
165         BOOST_LOG_TRIVIAL(warning) << failureMsg.str();
166         if (errMessages) {
167             errMessages.value().push_back(failureMsg.str());
168         }
169         return IOptimizedNetworkPtr(nullptr, &IOptimizedNetwork::Destroy);
170     };
171
172     // Assign a compute device for all nodes
173     bool bErrorFound = false;
174     for (auto&& layer : optNetObjPtr->GetGraph())
175     {
176         DataType dataType = layer->GetDataType();
177         std::string reasonIfUnsupported;
178         bool found = false;
179         if (!CheckScaleSetOnQuantizedType(layer, errMessages))
180         {
181             // don't bomb immediately, find all the quantized outputs
182             // which haven't had a scale set and report them all back.
183             bErrorFound = true;
184         }
185         for (const auto& backend : availablePreferredBackends)
186         {
187             // need to set the compute device on the layer
188             // before we can check if it is supported
189             layer->SetBackendId(backend);
190             if (!IWorkloadFactory::IsLayerSupported(*layer, dataType, reasonIfUnsupported))
191             {
192                 if (dataType == DataType::Float16)
193                 {
194                     if (IWorkloadFactory::IsLayerSupported(*layer, DataType::Float32, reasonIfUnsupported)
195                         && layer->GetType() != LayerType::ConvertFp32ToFp16
196                         && layer->GetType() != LayerType::ConvertFp16ToFp32)
197                     {
198                         // Insert FP16 -> FP32 conversion layer before current layer
199                         std::vector<ConvertFp16ToFp32Layer*> convertFp16ToFp32Layers =
200                             InsertConvertFp16ToFp32LayersBefore(optNetObjPtr->GetGraph(), *layer);
201
202                         // Insert FP32 -> FP16 conversion layer after current layer
203                         std::vector<ConvertFp32ToFp16Layer*> convertFp32ToFp16Layers =
204                             InsertConvertFp32ToFp16LayersAfter(optNetObjPtr->GetGraph(), *layer);
205
206                         // Assign a supported backend to the newly introduced conversion layers
207                         auto AssignFirstSupportedBackend = [&](Layer* layer, BackendId preferredBackend)
208                         {
209                             bool supportedBackendFound = false;
210                             std::string reasonIfUnsupported;
211
212                             // Try preferred backend first
213                             layer->SetBackendId(preferredBackend);
214                             if (IWorkloadFactory::IsLayerSupported(*layer,
215                                                                    EmptyOptional(),
216                                                                    reasonIfUnsupported))
217                             {
218                                 supportedBackendFound = true;
219                             }
220                             else
221                             {
222                                 for (const auto& backend : availablePreferredBackends)
223                                 {
224                                     // Skip preferred backend (we already determined that it is not supported)
225                                     if (backend == preferredBackend)
226                                     {
227                                         continue;
228                                     }
229
230                                     layer->SetBackendId(backend);
231                                     if (IWorkloadFactory::IsLayerSupported(*layer,
232                                                                            EmptyOptional(),
233                                                                            reasonIfUnsupported))
234                                     {
235                                         supportedBackendFound = true;
236                                         break;
237                                     }
238                                 }
239                             }
240
241                             return supportedBackendFound;
242                         };
243
244                         for (ConvertFp16ToFp32Layer* convertLayer : convertFp16ToFp32Layers)
245                         {
246                             if (!AssignFirstSupportedBackend(convertLayer, backend))
247                             {
248                                 return ReturnWithError(convertLayer);
249                             }
250                         }
251
252                         for (ConvertFp32ToFp16Layer* convertLayer : convertFp32ToFp16Layers)
253                         {
254                             if (!AssignFirstSupportedBackend(convertLayer, backend))
255                             {
256                                 return ReturnWithError(convertLayer);
257                             }
258                         }
259
260                         found = true;
261                         break;
262                     }
263                 }
264                 std::stringstream warningMsg;
265                 warningMsg << "WARNING: Layer of type " << GetLayerTypeAsCString(layer->GetType())
266                            << " is not supported on requested backend " << layer->GetBackendId().Get()
267                            << " for data type " << GetDataTypeName(dataType)
268                            << " (reason: " << reasonIfUnsupported
269                            << "), falling back to the next backend.";
270                 BOOST_LOG_TRIVIAL(warning) << warningMsg.str();
271                 if (errMessages) {
272                     errMessages.value().push_back(warningMsg.str());
273                 }
274             }
275             else
276             {
277                 found = true;
278                 break;
279             }
280         }
281
282         // If the layer is unsupported by any devices, log and return a null network.
283         if (!found) {
284             // NOTE: if the layer is not an operation queue type AND we have not got CpuRef as a
285             //       fallback we should set the compute device on the layer to CpuRef (these are not
286             //       available as accelerated operations, or are only available under certain
287             //       conditions, currently they comprise MemCopy, Constant, Permute)
288             armnn::LayerType layerType = layer->GetType();
289             if (!cpuRefUsed && (layerType == armnn::LayerType::MemCopy ||
290                                 layerType == armnn::LayerType::Constant ||
291                                 layerType == armnn::LayerType::Permute))
292             {
293                 layer->SetBackendId(armnn::Compute::CpuRef);
294             }
295             else
296             {
297                 return ReturnWithError(layer);
298             }
299         }
300     }
301     if (bErrorFound)
302     {
303         return IOptimizedNetworkPtr(nullptr, &IOptimizedNetwork::Destroy);
304     }
305
306     Optimizer::Pass(optNetObjPtr->GetGraph(), MakeOptimizations(OptimizeInverseConversionsFp16(),
307                                                                 OptimizeInverseConversionsFp32()));
308
309     optNetObjPtr->GetGraph().AddCopyLayers();
310
311     // Convert constants
312     Optimizer::Pass(optNetObjPtr->GetGraph(), MakeOptimizations(ConvertConstantsFloatToHalf()));
313     Optimizer::Pass(optNetObjPtr->GetGraph(), MakeOptimizations(ConvertConstantsHalfToFloat()));
314
315     return optNet;
316 }
317
318
319 Network::Network()
320 : m_Graph(std::make_unique<Graph>())
321 {
322 }
323
324 Network::~Network()
325 {
326 }
327
328 IConnectableLayer* Network::AddInputLayer(LayerBindingId id, const char* name)
329 {
330     return m_Graph->AddLayer<InputLayer>(id, name);
331 }
332
333 IConnectableLayer* Network::AddFullyConnectedLayerImpl(const FullyConnectedDescriptor& fullyConnectedDescriptor,
334                                                        const ConstTensor& weights,
335                                                        const ConstTensor* biases,
336                                                        const char* name)
337 {
338     if (fullyConnectedDescriptor.m_BiasEnabled && (biases == nullptr))
339     {
340         throw InvalidArgumentException("AddFullyConnectedLayer: biases cannot be NULL");
341     }
342
343     const auto layer = m_Graph->AddLayer<FullyConnectedLayer>(fullyConnectedDescriptor, name);
344
345     layer->m_Weight = std::make_unique<ScopedCpuTensorHandle>(weights);
346
347     if (fullyConnectedDescriptor.m_BiasEnabled)
348     {
349         layer->m_Bias = std::make_unique<ScopedCpuTensorHandle>(*biases);
350     }
351
352     return layer;
353 }
354
355 IConnectableLayer* Network::AddFullyConnectedLayer(const FullyConnectedDescriptor& fullyConnectedDescriptor,
356                                                    const ConstTensor& weights,
357                                                    const char* name)
358 {
359     return AddFullyConnectedLayerImpl(fullyConnectedDescriptor, weights, nullptr, name);
360 }
361
362 IConnectableLayer* Network::AddFullyConnectedLayer(const FullyConnectedDescriptor& fullyConnectedDescriptor,
363                                                    const ConstTensor& weights,
364                                                    const ConstTensor& biases,
365                                                    const char* name)
366 {
367     return AddFullyConnectedLayerImpl(fullyConnectedDescriptor, weights, &biases, name);
368 }
369
370 IConnectableLayer* Network::AddConvolution2dLayerImpl(const Convolution2dDescriptor& convolution2dDescriptor,
371                                                       const ConstTensor& weights,
372                                                       const ConstTensor* biases,
373                                                       const char* name)
374 {
375     if (convolution2dDescriptor.m_BiasEnabled && (biases == nullptr))
376     {
377         throw InvalidArgumentException("AddConvolution2dLayer: biases cannot be NULL");
378     }
379
380     const auto layer = m_Graph->AddLayer<Convolution2dLayer>(convolution2dDescriptor, name);
381
382     layer->m_Weight = std::make_unique<ScopedCpuTensorHandle>(weights);
383
384     if (convolution2dDescriptor.m_BiasEnabled)
385     {
386         layer->m_Bias = std::make_unique<ScopedCpuTensorHandle>(*biases);
387     }
388
389     return layer;
390 }
391
392 IConnectableLayer* Network::AddConvolution2dLayer(const Convolution2dDescriptor& convolution2dDescriptor,
393                                                   const ConstTensor& weights,
394                                                   const char* name)
395 {
396     return AddConvolution2dLayerImpl(convolution2dDescriptor, weights, nullptr, name);
397 }
398 IConnectableLayer* Network::AddConvolution2dLayer(const Convolution2dDescriptor& convolution2dDescriptor,
399                                                   const ConstTensor& weights,
400                                                   const ConstTensor& biases,
401                                                   const char* name)
402 {
403     return AddConvolution2dLayerImpl(convolution2dDescriptor, weights, &biases, name);
404 }
405
406 IConnectableLayer* Network::AddDepthwiseConvolution2dLayerImpl(
407     const DepthwiseConvolution2dDescriptor& convolution2dDescriptor,
408     const ConstTensor& weights,
409     const ConstTensor* biases,
410     const char* name)
411 {
412     if (convolution2dDescriptor.m_BiasEnabled && (biases == nullptr))
413     {
414         throw InvalidArgumentException("AddDepthwiseConvolution2dLayer: biases cannot be NULL");
415     }
416
417     const auto layer = m_Graph->AddLayer<DepthwiseConvolution2dLayer>(convolution2dDescriptor,
418             name);
419
420     layer->m_Weight = std::make_unique<ScopedCpuTensorHandle>(weights);
421
422     if (convolution2dDescriptor.m_BiasEnabled)
423     {
424         layer->m_Bias = std::make_unique<ScopedCpuTensorHandle>(*biases);
425     }
426
427     return layer;
428 }
429
430 IConnectableLayer* Network::AddDepthwiseConvolution2dLayer(
431     const DepthwiseConvolution2dDescriptor& convolution2dDescriptor,
432     const ConstTensor& weights,
433     const char* name)
434 {
435     return AddDepthwiseConvolution2dLayerImpl(convolution2dDescriptor, weights, nullptr, name);
436 }
437 IConnectableLayer* Network::AddDepthwiseConvolution2dLayer(
438     const DepthwiseConvolution2dDescriptor& convolution2dDescriptor,
439     const ConstTensor& weights,
440     const ConstTensor& biases,
441     const char* name)
442 {
443     return AddDepthwiseConvolution2dLayerImpl(convolution2dDescriptor, weights, &biases, name);
444 }
445
446 IConnectableLayer* Network::AddPermuteLayer(const PermuteDescriptor& permuteDescriptor,
447                                             const char* name)
448 {
449     return m_Graph->AddLayer<PermuteLayer>(permuteDescriptor, name);
450 }
451
452 IConnectableLayer* Network::AddPooling2dLayer(const Pooling2dDescriptor& pooling2dDescriptor,
453     const char* name)
454 {
455     return m_Graph->AddLayer<Pooling2dLayer>(pooling2dDescriptor, name);
456 }
457
458 IConnectableLayer* Network::AddActivationLayer(const ActivationDescriptor& activationDescriptor,
459     const char* name)
460 {
461     return m_Graph->AddLayer<ActivationLayer>(activationDescriptor, name);
462 }
463
464 IConnectableLayer* Network::AddNormalizationLayer(const NormalizationDescriptor&
465 normalizationDescriptor,
466     const char* name)
467 {
468     return m_Graph->AddLayer<NormalizationLayer>(normalizationDescriptor, name);
469 }
470
471 IConnectableLayer* Network::AddSoftmaxLayer(const SoftmaxDescriptor& softmaxDescriptor,
472     const char* name)
473 {
474     return m_Graph->AddLayer<SoftmaxLayer>(softmaxDescriptor, name);
475 }
476
477 IConnectableLayer* Network::AddSplitterLayer(const ViewsDescriptor& splitterDescriptor,
478     const char* name)
479 {
480     return m_Graph->AddLayer<SplitterLayer>(splitterDescriptor, name);
481 }
482
483 IConnectableLayer* Network::AddMergerLayer(const OriginsDescriptor& mergerDescriptor,
484     const char* name)
485 {
486     return m_Graph->AddLayer<MergerLayer>(mergerDescriptor, name);
487 }
488
489 IConnectableLayer* Network::AddAdditionLayer(const char* name)
490 {
491     return m_Graph->AddLayer<AdditionLayer>(name);
492 }
493
494 IConnectableLayer* Network::AddMultiplicationLayer(const char* name)
495 {
496     return m_Graph->AddLayer<MultiplicationLayer>(name);
497 }
498
499 IConnectableLayer* Network::AddOutputLayer(LayerBindingId id, const char* name)
500 {
501     return m_Graph->AddLayer<OutputLayer>(id, name);
502 }
503
504 IConnectableLayer* Network::AddBatchNormalizationLayer(const BatchNormalizationDescriptor& desc,
505                                                        const ConstTensor&                  mean,
506                                                        const ConstTensor&                  variance,
507                                                        const ConstTensor&                  beta,
508                                                        const ConstTensor&                  gamma,
509                                                        const char*                         name)
510 {
511     const auto layer = m_Graph->AddLayer<BatchNormalizationLayer>(desc, name);
512
513     layer->m_Mean = std::make_unique<ScopedCpuTensorHandle>(mean);
514     layer->m_Variance = std::make_unique<ScopedCpuTensorHandle>(variance);
515     layer->m_Beta = std::make_unique<ScopedCpuTensorHandle>(beta);
516     layer->m_Gamma = std::make_unique<ScopedCpuTensorHandle>(gamma);
517
518     return layer;
519 }
520
521 IConnectableLayer* Network::AddResizeBilinearLayer(const ResizeBilinearDescriptor&
522 resizeDescriptor, const char* name)
523 {
524     return m_Graph->AddLayer<ResizeBilinearLayer>(resizeDescriptor,name);
525 }
526
527 IConnectableLayer* Network::AddL2NormalizationLayer(const L2NormalizationDescriptor& desc,
528                                                     const char* name)
529 {
530     return m_Graph->AddLayer<L2NormalizationLayer>(desc, name);
531 }
532
533 IConnectableLayer* Network::AddConstantLayer(const ConstTensor& input, const char* name)
534 {
535     auto layer = m_Graph->AddLayer<ConstantLayer>(name);
536
537     layer->m_LayerOutput = std::make_unique<ScopedCpuTensorHandle>(input);
538
539     return layer;
540 }
541
542 IConnectableLayer* Network::AddReshapeLayer(const ReshapeDescriptor& reshapeDescriptor,
543                                             const char* name)
544 {
545     return m_Graph->AddLayer<ReshapeLayer>(reshapeDescriptor, name);
546 }
547
548 IConnectableLayer* Network::AddSpaceToBatchNdLayer(const SpaceToBatchNdDescriptor& spaceToBatchNdDescriptor,
549                                                    const char* name)
550 {
551     return m_Graph->AddLayer<SpaceToBatchNdLayer>(spaceToBatchNdDescriptor, name);
552 }
553
554 IConnectableLayer* Network::AddFloorLayer(const char* name)
555 {
556     return m_Graph->AddLayer<FloorLayer>(name);
557 }
558
559 IConnectableLayer* Network::AddLstmLayer(const LstmDescriptor&  descriptor,
560                                          const LstmInputParams& params,
561                                          const char* name)
562 {
563     const auto layer = m_Graph->AddLayer<LstmLayer>(descriptor, name);
564
565     //Lstm Basic Parameters
566     layer->m_BasicParameters.m_InputToForgetWeights =
567         std::make_unique<ScopedCpuTensorHandle>(*(params.m_InputToForgetWeights));
568     layer->m_BasicParameters.m_InputToCellWeights =
569         std::make_unique<ScopedCpuTensorHandle>(*(params.m_InputToCellWeights));
570     layer->m_BasicParameters.m_InputToOutputWeights =
571         std::make_unique<ScopedCpuTensorHandle>(*(params.m_InputToOutputWeights));
572     layer->m_BasicParameters.m_RecurrentToForgetWeights =
573         std::make_unique<ScopedCpuTensorHandle>(*(params.m_RecurrentToForgetWeights));
574     layer->m_BasicParameters.m_RecurrentToCellWeights =
575         std::make_unique<ScopedCpuTensorHandle>(*(params.m_RecurrentToCellWeights));
576     layer->m_BasicParameters.m_RecurrentToOutputWeights =
577         std::make_unique<ScopedCpuTensorHandle>(*(params.m_RecurrentToOutputWeights));
578     layer->m_BasicParameters.m_ForgetGateBias =
579             std::make_unique<ScopedCpuTensorHandle>(*(params.m_ForgetGateBias));
580     layer->m_BasicParameters.m_CellBias =
581             std::make_unique<ScopedCpuTensorHandle>(*(params.m_CellBias));
582     layer->m_BasicParameters.m_OutputGateBias =
583             std::make_unique<ScopedCpuTensorHandle>(*(params.m_OutputGateBias));
584
585     //Lstm Cifg parameters
586     if(!descriptor.m_CifgEnabled)
587     {
588         if(params.m_InputToInputWeights == nullptr)
589         {
590             throw InvalidArgumentException("AddLstmLayer: Input To Input Weights cannot be NULL");
591         }
592         if(params.m_RecurrentToInputWeights == nullptr)
593         {
594             throw InvalidArgumentException(
595                     "AddLstmLayer: Recurrent To Input Weights cannot be NULL");
596         }
597         if(params.m_InputGateBias == nullptr)
598         {
599             throw InvalidArgumentException("AddLstmLayer: Input Gate Bias cannot be NULL");
600         }
601         layer->m_CifgParameters.m_InputToInputWeights =
602             std::make_unique<ScopedCpuTensorHandle>(*(params.m_InputToInputWeights));
603         layer->m_CifgParameters.m_RecurrentToInputWeights =
604             std::make_unique<ScopedCpuTensorHandle>(*(params.m_RecurrentToInputWeights));
605         // In the VTS tests, cell-to-input weights may be null, even if the other CIFG params are not.
606         if(params.m_CellToInputWeights != nullptr)
607         {
608             layer->m_CifgParameters.m_CellToInputWeights =
609                     std::make_unique<ScopedCpuTensorHandle>(*(params.m_CellToInputWeights));
610         }
611         layer->m_CifgParameters.m_InputGateBias =
612             std::make_unique<ScopedCpuTensorHandle>(*(params.m_InputGateBias));
613     }
614
615     //Lstm projection parameters
616     if(descriptor.m_ProjectionEnabled)
617     {
618         if(params.m_ProjectionWeights == nullptr)
619         {
620             throw InvalidArgumentException("AddLstmLayer: Projection Weights cannot be NULL");
621         }
622         layer->m_ProjectionParameters.m_ProjectionWeights =
623             std::make_unique<ScopedCpuTensorHandle>(*(params.m_ProjectionWeights));
624         if(params.m_ProjectionBias != nullptr)
625         {
626             layer->m_ProjectionParameters.m_ProjectionBias =
627                 std::make_unique<ScopedCpuTensorHandle>(*(params.m_ProjectionBias));
628         }
629     }
630
631     //Lstm Peephole params
632     if(descriptor.m_PeepholeEnabled)
633     {
634         if(params.m_CellToForgetWeights == nullptr)
635         {
636             throw InvalidArgumentException("AddLstmLayer: Cell To Forget Weights cannot be NULL");
637         }
638         if(params.m_CellToOutputWeights == nullptr)
639         {
640             throw InvalidArgumentException("AddLstmLayer: Cell To Output Weights cannot be NULL");
641         }
642         layer->m_PeepholeParameters.m_CellToForgetWeights =
643             std::make_unique<ScopedCpuTensorHandle>(*(params.m_CellToForgetWeights));
644         layer->m_PeepholeParameters.m_CellToOutputWeights =
645             std::make_unique<ScopedCpuTensorHandle>(*(params.m_CellToOutputWeights));
646     }
647     return layer;
648 }
649
650 IConnectableLayer* Network::AddDivisionLayer(const char* name)
651 {
652     return m_Graph->AddLayer<DivisionLayer>(name);
653 }
654
655 IConnectableLayer* Network::AddSubtractionLayer(const char* name)
656 {
657     return m_Graph->AddLayer<SubtractionLayer>(name);
658 }
659
660 IConnectableLayer* Network::AddMeanLayer(const MeanDescriptor& meanDescriptor, const char* name)
661 {
662     return m_Graph->AddLayer<MeanLayer>(meanDescriptor,name);
663 }
664
665 IConnectableLayer* Network::AddPadLayer(const PadDescriptor& padDescriptor, const char* name)
666 {
667     return m_Graph->AddLayer<PadLayer>(padDescriptor,name);
668 }
669
670 OptimizedNetwork::OptimizedNetwork(std::unique_ptr<Graph> graph)
671     : m_Graph(std::move(graph))
672 {
673 }
674
675 OptimizedNetwork::~OptimizedNetwork()
676 {
677 }
678
679 } // namespace armnn