IVGCVSW-2019 : replace Compute enum in the backend preferences list
[platform/upstream/armnn.git] / src / armnn / Network.cpp
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 #include "Network.hpp"
6 #include "Graph.hpp"
7 #include "Layer.hpp"
8 #include "DeviceSpec.hpp"
9 #include "Optimizer.hpp"
10 #include "optimizations/All.hpp"
11
12 #include <backends/CpuTensorHandle.hpp>
13 #include <backends/WorkloadFactory.hpp>
14
15 #include <armnn/Exceptions.hpp>
16 #include <armnn/Utils.hpp>
17 #include <armnn/TypesUtils.hpp>
18
19 #include <fcntl.h>
20 #include <algorithm>
21 #include <fstream>
22 #include <memory>
23 #include <vector>
24 #include <algorithm>
25
26 #include <boost/assert.hpp>
27 #include <boost/format.hpp>
28 #include <boost/log/trivial.hpp>
29 #include <boost/numeric/conversion/converter_policies.hpp>
30 #include <boost/cast.hpp>
31
32 namespace armnn
33 {
34
35 armnn::INetwork* INetwork::CreateRaw()
36 {
37     return new Network();
38 }
39
40 armnn::INetworkPtr INetwork::Create()
41 {
42     return INetworkPtr(CreateRaw(), &INetwork::Destroy);
43 }
44
45 void INetwork::Destroy(INetwork* network)
46 {
47     delete boost::polymorphic_downcast<Network*>(network);
48 }
49
50 Status Network::PrintGraph()
51 {
52     m_Graph->Print();
53     return Status::Success;
54 }
55
56 void IOptimizedNetwork::Destroy(IOptimizedNetwork* network)
57 {
58     delete boost::polymorphic_downcast<OptimizedNetwork*>(network);
59 }
60
61 Status OptimizedNetwork::PrintGraph()
62 {
63     m_Graph->Print();
64     return Status::Success;
65 }
66
67 Status OptimizedNetwork::SerializeToDot(std::ostream& stream) const
68 {
69     return m_Graph->SerializeToDot(stream);
70 }
71
72 bool CheckScaleSetOnQuantizedType(Layer* layer, Optional<std::vector<std::string>&> errMessages)
73 {
74     bool noErrors = true;
75     unsigned int numOutputs = layer->GetNumOutputSlots();
76     for (unsigned int i = 0; i < numOutputs; i++) {
77         const OutputSlot &outputSlot = layer->GetOutputSlot(i);
78         const TensorInfo &info = outputSlot.GetTensorInfo();
79         if (DataType::QuantisedAsymm8 == info.GetDataType()) {
80             if (0.f == info.GetQuantizationScale()) {
81                 noErrors = false;
82                 std::stringstream ss;
83                 ss << "ERROR: output " << i << " of layer " << GetLayerTypeAsCString(layer->GetType())
84                    << " (" << layer->GetNameStr() << ") is of type"
85                    << " Quantized 8 bit but its scale parameter has not been set";
86                 BOOST_LOG_TRIVIAL(warning) << ss.str() ;
87                 if (errMessages) {
88                     errMessages.value().push_back(ss.str());
89                 }
90             }
91         }
92     }
93     return noErrors;
94 }
95
96 IOptimizedNetworkPtr Optimize(const INetwork& inNetwork,
97                               const std::vector<BackendId>& backendPreferences,
98                               const IDeviceSpec& deviceSpec,
99                               const OptimizerOptions& options,
100                               Optional<std::vector<std::string>&> errMessages)
101 {
102     if (backendPreferences.empty()) {
103         throw armnn::InvalidArgumentException("Invoked Optimize with no backends specified");
104     }
105     const Network& network = *boost::polymorphic_downcast<const Network*>(&inNetwork);
106     std::unique_ptr<Graph> graph = std::make_unique<Graph>(network.GetGraph());
107
108     auto optNet = IOptimizedNetworkPtr(new OptimizedNetwork(std::move(graph)), &IOptimizedNetwork::Destroy);
109
110     OptimizedNetwork* optNetObjPtr = boost::polymorphic_downcast<OptimizedNetwork*>(optNet.get());
111
112     // Perform optimisation passes
113     using namespace optimizations;
114     Optimizer::Pass(optNetObjPtr->GetGraph(), MakeOptimizations(SquashEqualPermuteSiblings(),
115                                                                 SquashEqualReshapeSiblings(),
116                                                                 OptimizeInversePermutes(),
117                                                                 MovePermuteUp(),
118                                                                 PermuteAsReshape(),
119                                                                 OptimizeConsecutiveReshapes()));
120
121     // Infer the tensor infos for all output slots. Throws an exception on failure.
122     optNetObjPtr->GetGraph().InferTensorInfos();
123
124     // if Fp32 to Fp16 optimization is set convert Fp32 network to Fp16
125     if (options.m_ReduceFp32ToFp16)
126     {
127         Optimizer::Pass(optNetObjPtr->GetGraph(), MakeOptimizations(Fp32NetworkToFp16Converter()));
128     }
129
130     // We know that DeviceSpec should be the only implementation of IDeviceSpec.
131     const DeviceSpec& spec = *boost::polymorphic_downcast<const DeviceSpec*>(&deviceSpec);
132
133     // determine which of the preferred backends we have available for use
134     // and whether we have specified CpuRef as one of those backends.
135     bool cpuRefUsed = false;
136     std::vector<BackendId> availablePreferredBackends;
137     for (const auto& backend : backendPreferences)
138     {
139         // Check if the backend is in the available backend devices.
140         if (std::find(spec.m_SupportedComputeDevices.begin(),
141                       spec.m_SupportedComputeDevices.end(), backend) !=
142                       spec.m_SupportedComputeDevices.end())
143         {
144             availablePreferredBackends.push_back(backend);
145             if (backend == armnn::Compute::CpuRef) {
146                 cpuRefUsed = true;
147             }
148         }
149     }
150     if (availablePreferredBackends.empty()) {
151         std::stringstream failureMsg;
152         failureMsg << "ERROR: None of the preferred backends " << backendPreferences
153                    << " are supported. Current platform provides " << spec.m_SupportedComputeDevices;
154         BOOST_LOG_TRIVIAL(warning) << failureMsg.str();
155         if (errMessages) {
156             errMessages.value().push_back(failureMsg.str());
157         }
158         return IOptimizedNetworkPtr(nullptr, &IOptimizedNetwork::Destroy);
159     }
160
161     auto ReturnWithError = [&](Layer* layer)
162     {
163         std::stringstream failureMsg;
164         failureMsg << "ERROR: Layer of type " << GetLayerTypeAsCString(layer->GetType())
165                    << " is not supported on any preferred backend " << backendPreferences;
166         BOOST_LOG_TRIVIAL(warning) << failureMsg.str();
167         if (errMessages) {
168             errMessages.value().push_back(failureMsg.str());
169         }
170         return IOptimizedNetworkPtr(nullptr, &IOptimizedNetwork::Destroy);
171     };
172
173     // Assign a compute device for all nodes
174     bool bErrorFound = false;
175     for (auto&& layer : optNetObjPtr->GetGraph())
176     {
177         DataType dataType = layer->GetDataType();
178         std::string reasonIfUnsupported;
179         bool found = false;
180         if (!CheckScaleSetOnQuantizedType(layer, errMessages))
181         {
182             // don't bomb immediately, find all the quantized outputs
183             // which haven't had a scale set and report them all back.
184             bErrorFound = true;
185         }
186         for (const auto& backend : availablePreferredBackends)
187         {
188             // need to set the compute device on the layer
189             // before we can check if it is supported
190             layer->SetBackendId(backend);
191             if (!IWorkloadFactory::IsLayerSupported(*layer, dataType, reasonIfUnsupported))
192             {
193                 if (dataType == DataType::Float16)
194                 {
195                     if (IWorkloadFactory::IsLayerSupported(*layer, DataType::Float32, reasonIfUnsupported)
196                         && layer->GetType() != LayerType::ConvertFp32ToFp16
197                         && layer->GetType() != LayerType::ConvertFp16ToFp32)
198                     {
199                         // Insert FP16 -> FP32 conversion layer before current layer
200                         std::vector<ConvertFp16ToFp32Layer*> convertFp16ToFp32Layers =
201                             InsertConvertFp16ToFp32LayersBefore(optNetObjPtr->GetGraph(), *layer);
202
203                         // Insert FP32 -> FP16 conversion layer after current layer
204                         std::vector<ConvertFp32ToFp16Layer*> convertFp32ToFp16Layers =
205                             InsertConvertFp32ToFp16LayersAfter(optNetObjPtr->GetGraph(), *layer);
206
207                         // Assign a supported backend to the newly introduced conversion layers
208                         auto AssignFirstSupportedBackend = [&](Layer* layer, BackendId preferredBackend)
209                         {
210                             bool supportedBackendFound = false;
211                             std::string reasonIfUnsupported;
212
213                             // Try preferred backend first
214                             layer->SetBackendId(preferredBackend);
215                             if (IWorkloadFactory::IsLayerSupported(*layer, boost::none, reasonIfUnsupported))
216                             {
217                                 supportedBackendFound = true;
218                             }
219                             else
220                             {
221                                 for (const auto& backend : availablePreferredBackends)
222                                 {
223                                     // Skip preferred backend (we already determined that it is not supported)
224                                     if (backend == preferredBackend)
225                                     {
226                                         continue;
227                                     }
228
229                                     layer->SetBackendId(backend);
230                                     if (IWorkloadFactory::IsLayerSupported(*layer, boost::none, reasonIfUnsupported))
231                                     {
232                                         supportedBackendFound = true;
233                                         break;
234                                     }
235                                 }
236                             }
237
238                             return supportedBackendFound;
239                         };
240
241                         for (ConvertFp16ToFp32Layer* convertLayer : convertFp16ToFp32Layers)
242                         {
243                             if (!AssignFirstSupportedBackend(convertLayer, backend))
244                             {
245                                 return ReturnWithError(convertLayer);
246                             }
247                         }
248
249                         for (ConvertFp32ToFp16Layer* convertLayer : convertFp32ToFp16Layers)
250                         {
251                             if (!AssignFirstSupportedBackend(convertLayer, backend))
252                             {
253                                 return ReturnWithError(convertLayer);
254                             }
255                         }
256
257                         found = true;
258                         break;
259                     }
260                 }
261                 std::stringstream warningMsg;
262                 warningMsg << "WARNING: Layer of type " << GetLayerTypeAsCString(layer->GetType())
263                            << " is not supported on requested backend " << layer->GetBackendId().Get()
264                            << " for data type " << GetDataTypeName(dataType)
265                            << " (reason: " << reasonIfUnsupported
266                            << "), falling back to the next backend.";
267                 BOOST_LOG_TRIVIAL(warning) << warningMsg.str();
268                 if (errMessages) {
269                     errMessages.value().push_back(warningMsg.str());
270                 }
271             }
272             else
273             {
274                 found = true;
275                 break;
276             }
277         }
278
279         // If the layer is unsupported by any devices, log and return a null network.
280         if (!found) {
281             // NOTE: if the layer is not an operation queue type AND we have not got CpuRef as a
282             //       fallback we should set the compute device on the layer to CpuRef (these are not
283             //       available as accelerated operations, or are only available under certain
284             //       conditions, currently they comprise MemCopy, Constant, Permute)
285             armnn::LayerType layerType = layer->GetType();
286             if (!cpuRefUsed && (layerType == armnn::LayerType::MemCopy ||
287                                 layerType == armnn::LayerType::Constant ||
288                                 layerType == armnn::LayerType::Permute))
289             {
290                 layer->SetBackendId(armnn::Compute::CpuRef);
291             }
292             else
293             {
294                 return ReturnWithError(layer);
295             }
296         }
297     }
298     if (bErrorFound)
299     {
300         return IOptimizedNetworkPtr(nullptr, &IOptimizedNetwork::Destroy);
301     }
302
303     Optimizer::Pass(optNetObjPtr->GetGraph(), MakeOptimizations(OptimizeInverseConversionsFp16(),
304                                                                 OptimizeInverseConversionsFp32()));
305
306     optNetObjPtr->GetGraph().AddCopyLayers();
307
308     // Convert constants
309     Optimizer::Pass(optNetObjPtr->GetGraph(), MakeOptimizations(ConvertConstantsFloatToHalf()));
310     Optimizer::Pass(optNetObjPtr->GetGraph(), MakeOptimizations(ConvertConstantsHalfToFloat()));
311
312     return optNet;
313 }
314
315
316 Network::Network()
317 : m_Graph(std::make_unique<Graph>())
318 {
319 }
320
321 Network::~Network()
322 {
323 }
324
325 IConnectableLayer* Network::AddInputLayer(LayerBindingId id, const char* name)
326 {
327     return m_Graph->AddLayer<InputLayer>(id, name);
328 }
329
330 IConnectableLayer* Network::AddFullyConnectedLayerImpl(const FullyConnectedDescriptor& fullyConnectedDescriptor,
331                                                        const ConstTensor& weights,
332                                                        const ConstTensor* biases,
333                                                        const char* name)
334 {
335     if (fullyConnectedDescriptor.m_BiasEnabled && (biases == nullptr))
336     {
337         throw InvalidArgumentException("AddFullyConnectedLayer: biases cannot be NULL");
338     }
339
340     const auto layer = m_Graph->AddLayer<FullyConnectedLayer>(fullyConnectedDescriptor, name);
341
342     layer->m_Weight = std::make_unique<ScopedCpuTensorHandle>(weights);
343
344     if (fullyConnectedDescriptor.m_BiasEnabled)
345     {
346         layer->m_Bias = std::make_unique<ScopedCpuTensorHandle>(*biases);
347     }
348
349     return layer;
350 }
351
352 IConnectableLayer* Network::AddFullyConnectedLayer(const FullyConnectedDescriptor& fullyConnectedDescriptor,
353                                                    const ConstTensor& weights,
354                                                    const char* name)
355 {
356     return AddFullyConnectedLayerImpl(fullyConnectedDescriptor, weights, nullptr, name);
357 }
358
359 IConnectableLayer* Network::AddFullyConnectedLayer(const FullyConnectedDescriptor& fullyConnectedDescriptor,
360                                                    const ConstTensor& weights,
361                                                    const ConstTensor& biases,
362                                                    const char* name)
363 {
364     return AddFullyConnectedLayerImpl(fullyConnectedDescriptor, weights, &biases, name);
365 }
366
367 IConnectableLayer* Network::AddConvolution2dLayerImpl(const Convolution2dDescriptor& convolution2dDescriptor,
368                                                       const ConstTensor& weights,
369                                                       const ConstTensor* biases,
370                                                       const char* name)
371 {
372     if (convolution2dDescriptor.m_BiasEnabled && (biases == nullptr))
373     {
374         throw InvalidArgumentException("AddConvolution2dLayer: biases cannot be NULL");
375     }
376
377     const auto layer = m_Graph->AddLayer<Convolution2dLayer>(convolution2dDescriptor, name);
378
379     layer->m_Weight = std::make_unique<ScopedCpuTensorHandle>(weights);
380
381     if (convolution2dDescriptor.m_BiasEnabled)
382     {
383         layer->m_Bias = std::make_unique<ScopedCpuTensorHandle>(*biases);
384     }
385
386     return layer;
387 }
388
389 IConnectableLayer* Network::AddConvolution2dLayer(const Convolution2dDescriptor& convolution2dDescriptor,
390                                                   const ConstTensor& weights,
391                                                   const char* name)
392 {
393     return AddConvolution2dLayerImpl(convolution2dDescriptor, weights, nullptr, name);
394 }
395 IConnectableLayer* Network::AddConvolution2dLayer(const Convolution2dDescriptor& convolution2dDescriptor,
396                                                   const ConstTensor& weights,
397                                                   const ConstTensor& biases,
398                                                   const char* name)
399 {
400     return AddConvolution2dLayerImpl(convolution2dDescriptor, weights, &biases, name);
401 }
402
403 IConnectableLayer* Network::AddDepthwiseConvolution2dLayerImpl(
404     const DepthwiseConvolution2dDescriptor& convolution2dDescriptor,
405     const ConstTensor& weights,
406     const ConstTensor* biases,
407     const char* name)
408 {
409     if (convolution2dDescriptor.m_BiasEnabled && (biases == nullptr))
410     {
411         throw InvalidArgumentException("AddDepthwiseConvolution2dLayer: biases cannot be NULL");
412     }
413
414     const auto layer = m_Graph->AddLayer<DepthwiseConvolution2dLayer>(convolution2dDescriptor,
415             name);
416
417     layer->m_Weight = std::make_unique<ScopedCpuTensorHandle>(weights);
418
419     if (convolution2dDescriptor.m_BiasEnabled)
420     {
421         layer->m_Bias = std::make_unique<ScopedCpuTensorHandle>(*biases);
422     }
423
424     return layer;
425 }
426
427 IConnectableLayer* Network::AddDepthwiseConvolution2dLayer(
428     const DepthwiseConvolution2dDescriptor& convolution2dDescriptor,
429     const ConstTensor& weights,
430     const char* name)
431 {
432     return AddDepthwiseConvolution2dLayerImpl(convolution2dDescriptor, weights, nullptr, name);
433 }
434 IConnectableLayer* Network::AddDepthwiseConvolution2dLayer(
435     const DepthwiseConvolution2dDescriptor& convolution2dDescriptor,
436     const ConstTensor& weights,
437     const ConstTensor& biases,
438     const char* name)
439 {
440     return AddDepthwiseConvolution2dLayerImpl(convolution2dDescriptor, weights, &biases, name);
441 }
442
443 IConnectableLayer* Network::AddPermuteLayer(const PermuteDescriptor& permuteDescriptor,
444                                             const char* name)
445 {
446     return m_Graph->AddLayer<PermuteLayer>(permuteDescriptor, name);
447 }
448
449 IConnectableLayer* Network::AddPooling2dLayer(const Pooling2dDescriptor& pooling2dDescriptor,
450     const char* name)
451 {
452     return m_Graph->AddLayer<Pooling2dLayer>(pooling2dDescriptor, name);
453 }
454
455 IConnectableLayer* Network::AddActivationLayer(const ActivationDescriptor& activationDescriptor,
456     const char* name)
457 {
458     return m_Graph->AddLayer<ActivationLayer>(activationDescriptor, name);
459 }
460
461 IConnectableLayer* Network::AddNormalizationLayer(const NormalizationDescriptor&
462 normalizationDescriptor,
463     const char* name)
464 {
465     return m_Graph->AddLayer<NormalizationLayer>(normalizationDescriptor, name);
466 }
467
468 IConnectableLayer* Network::AddSoftmaxLayer(const SoftmaxDescriptor& softmaxDescriptor,
469     const char* name)
470 {
471     return m_Graph->AddLayer<SoftmaxLayer>(softmaxDescriptor, name);
472 }
473
474 IConnectableLayer* Network::AddSplitterLayer(const ViewsDescriptor& splitterDescriptor,
475     const char* name)
476 {
477     return m_Graph->AddLayer<SplitterLayer>(splitterDescriptor, name);
478 }
479
480 IConnectableLayer* Network::AddMergerLayer(const OriginsDescriptor& mergerDescriptor,
481     const char* name)
482 {
483     return m_Graph->AddLayer<MergerLayer>(mergerDescriptor, name);
484 }
485
486 IConnectableLayer* Network::AddAdditionLayer(const char* name)
487 {
488     return m_Graph->AddLayer<AdditionLayer>(name);
489 }
490
491 IConnectableLayer* Network::AddMultiplicationLayer(const char* name)
492 {
493     return m_Graph->AddLayer<MultiplicationLayer>(name);
494 }
495
496 IConnectableLayer* Network::AddOutputLayer(LayerBindingId id, const char* name)
497 {
498     return m_Graph->AddLayer<OutputLayer>(id, name);
499 }
500
501 IConnectableLayer* Network::AddBatchNormalizationLayer(const BatchNormalizationDescriptor& desc,
502                                                        const ConstTensor&                  mean,
503                                                        const ConstTensor&                  variance,
504                                                        const ConstTensor&                  beta,
505                                                        const ConstTensor&                  gamma,
506                                                        const char*                         name)
507 {
508     const auto layer = m_Graph->AddLayer<BatchNormalizationLayer>(desc, name);
509
510     layer->m_Mean = std::make_unique<ScopedCpuTensorHandle>(mean);
511     layer->m_Variance = std::make_unique<ScopedCpuTensorHandle>(variance);
512     layer->m_Beta = std::make_unique<ScopedCpuTensorHandle>(beta);
513     layer->m_Gamma = std::make_unique<ScopedCpuTensorHandle>(gamma);
514
515     return layer;
516 }
517
518 IConnectableLayer* Network::AddResizeBilinearLayer(const ResizeBilinearDescriptor&
519 resizeDescriptor, const char* name)
520 {
521     return m_Graph->AddLayer<ResizeBilinearLayer>(resizeDescriptor,name);
522 }
523
524 IConnectableLayer* Network::AddL2NormalizationLayer(const L2NormalizationDescriptor& desc,
525                                                     const char* name)
526 {
527     return m_Graph->AddLayer<L2NormalizationLayer>(desc, name);
528 }
529
530 IConnectableLayer* Network::AddConstantLayer(const ConstTensor& input, const char* name)
531 {
532     auto layer = m_Graph->AddLayer<ConstantLayer>(name);
533
534     layer->m_LayerOutput = std::make_unique<ScopedCpuTensorHandle>(input);
535
536     return layer;
537 }
538
539 IConnectableLayer* Network::AddReshapeLayer(const ReshapeDescriptor& reshapeDescriptor,
540                                             const char* name)
541 {
542     return m_Graph->AddLayer<ReshapeLayer>(reshapeDescriptor, name);
543 }
544
545 IConnectableLayer* Network::AddFloorLayer(const char* name)
546 {
547     return m_Graph->AddLayer<FloorLayer>(name);
548 }
549
550 IConnectableLayer* Network::AddLstmLayer(const LstmDescriptor&  descriptor,
551                                          const LstmInputParams& params,
552                                          const char* name)
553 {
554     const auto layer = m_Graph->AddLayer<LstmLayer>(descriptor, name);
555
556     //Lstm Basic Parameters
557     layer->m_BasicParameters.m_InputToForgetWeights =
558         std::make_unique<ScopedCpuTensorHandle>(*(params.m_InputToForgetWeights));
559     layer->m_BasicParameters.m_InputToCellWeights =
560         std::make_unique<ScopedCpuTensorHandle>(*(params.m_InputToCellWeights));
561     layer->m_BasicParameters.m_InputToOutputWeights =
562         std::make_unique<ScopedCpuTensorHandle>(*(params.m_InputToOutputWeights));
563     layer->m_BasicParameters.m_RecurrentToForgetWeights =
564         std::make_unique<ScopedCpuTensorHandle>(*(params.m_RecurrentToForgetWeights));
565     layer->m_BasicParameters.m_RecurrentToCellWeights =
566         std::make_unique<ScopedCpuTensorHandle>(*(params.m_RecurrentToCellWeights));
567     layer->m_BasicParameters.m_RecurrentToOutputWeights =
568         std::make_unique<ScopedCpuTensorHandle>(*(params.m_RecurrentToOutputWeights));
569     layer->m_BasicParameters.m_ForgetGateBias =
570             std::make_unique<ScopedCpuTensorHandle>(*(params.m_ForgetGateBias));
571     layer->m_BasicParameters.m_CellBias =
572             std::make_unique<ScopedCpuTensorHandle>(*(params.m_CellBias));
573     layer->m_BasicParameters.m_OutputGateBias =
574             std::make_unique<ScopedCpuTensorHandle>(*(params.m_OutputGateBias));
575
576     //Lstm Cifg parameters
577     if(!descriptor.m_CifgEnabled)
578     {
579         if(params.m_InputToInputWeights == nullptr)
580         {
581             throw InvalidArgumentException("AddLstmLayer: Input To Input Weights cannot be NULL");
582         }
583         if(params.m_RecurrentToInputWeights == nullptr)
584         {
585             throw InvalidArgumentException(
586                     "AddLstmLayer: Recurrent To Input Weights cannot be NULL");
587         }
588         if(params.m_InputGateBias == nullptr)
589         {
590             throw InvalidArgumentException("AddLstmLayer: Input Gate Bias cannot be NULL");
591         }
592         layer->m_CifgParameters.m_InputToInputWeights =
593             std::make_unique<ScopedCpuTensorHandle>(*(params.m_InputToInputWeights));
594         layer->m_CifgParameters.m_RecurrentToInputWeights =
595             std::make_unique<ScopedCpuTensorHandle>(*(params.m_RecurrentToInputWeights));
596         // In the VTS tests, cell-to-input weights may be null, even if the other CIFG params are not.
597         if(params.m_CellToInputWeights != nullptr)
598         {
599             layer->m_CifgParameters.m_CellToInputWeights =
600                     std::make_unique<ScopedCpuTensorHandle>(*(params.m_CellToInputWeights));
601         }
602         layer->m_CifgParameters.m_InputGateBias =
603             std::make_unique<ScopedCpuTensorHandle>(*(params.m_InputGateBias));
604     }
605
606     //Lstm projection parameters
607     if(descriptor.m_ProjectionEnabled)
608     {
609         if(params.m_ProjectionWeights == nullptr)
610         {
611             throw InvalidArgumentException("AddLstmLayer: Projection Weights cannot be NULL");
612         }
613         layer->m_ProjectionParameters.m_ProjectionWeights =
614             std::make_unique<ScopedCpuTensorHandle>(*(params.m_ProjectionWeights));
615         if(params.m_ProjectionBias != nullptr)
616         {
617             layer->m_ProjectionParameters.m_ProjectionBias =
618                 std::make_unique<ScopedCpuTensorHandle>(*(params.m_ProjectionBias));
619         }
620     }
621
622     //Lstm Peephole params
623     if(descriptor.m_PeepholeEnabled)
624     {
625         if(params.m_CellToForgetWeights == nullptr)
626         {
627             throw InvalidArgumentException("AddLstmLayer: Cell To Forget Weights cannot be NULL");
628         }
629         if(params.m_CellToOutputWeights == nullptr)
630         {
631             throw InvalidArgumentException("AddLstmLayer: Cell To Output Weights cannot be NULL");
632         }
633         layer->m_PeepholeParameters.m_CellToForgetWeights =
634             std::make_unique<ScopedCpuTensorHandle>(*(params.m_CellToForgetWeights));
635         layer->m_PeepholeParameters.m_CellToOutputWeights =
636             std::make_unique<ScopedCpuTensorHandle>(*(params.m_CellToOutputWeights));
637     }
638     return layer;
639 }
640
641 IConnectableLayer* Network::AddDivisionLayer(const char* name)
642 {
643     return m_Graph->AddLayer<DivisionLayer>(name);
644 }
645
646 IConnectableLayer* Network::AddSubtractionLayer(const char* name)
647 {
648     return m_Graph->AddLayer<SubtractionLayer>(name);
649 }
650
651 IConnectableLayer* Network::AddMeanLayer(const MeanDescriptor& meanDescriptor, const char* name)
652 {
653     return m_Graph->AddLayer<MeanLayer>(meanDescriptor,name);
654 }
655
656 IConnectableLayer* Network::AddPadLayer(const PadDescriptor& padDescriptor, const char* name)
657 {
658     return m_Graph->AddLayer<PadLayer>(padDescriptor,name);
659 }
660
661 OptimizedNetwork::OptimizedNetwork(std::unique_ptr<Graph> graph)
662     : m_Graph(std::move(graph))
663 {
664 }
665
666 OptimizedNetwork::~OptimizedNetwork()
667 {
668 }
669
670 } // namespace armnn