IVGCVSW-2247 Adding a min Elementwise Workload and tests
[platform/upstream/armnn.git] / src / armnn / Network.cpp
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 #include "Network.hpp"
6 #include "Graph.hpp"
7 #include "Layer.hpp"
8 #include "DeviceSpec.hpp"
9 #include "Optimizer.hpp"
10 #include "optimizations/All.hpp"
11
12 #include <backendsCommon/CpuTensorHandle.hpp>
13 #include <backendsCommon/WorkloadFactory.hpp>
14 #include <backendsCommon/BackendRegistry.hpp>
15 #include <backendsCommon/IBackendInternal.hpp>
16
17 #include <armnn/Exceptions.hpp>
18 #include <armnn/Utils.hpp>
19 #include <armnn/TypesUtils.hpp>
20
21 #include <fcntl.h>
22 #include <algorithm>
23 #include <fstream>
24 #include <memory>
25 #include <vector>
26 #include <algorithm>
27
28 #include <boost/assert.hpp>
29 #include <boost/format.hpp>
30 #include <boost/log/trivial.hpp>
31 #include <boost/numeric/conversion/converter_policies.hpp>
32 #include <boost/cast.hpp>
33
34 namespace armnn
35 {
36
37 armnn::INetwork* INetwork::CreateRaw()
38 {
39     return new Network();
40 }
41
42 armnn::INetworkPtr INetwork::Create()
43 {
44     return INetworkPtr(CreateRaw(), &INetwork::Destroy);
45 }
46
47 void INetwork::Destroy(INetwork* network)
48 {
49     delete boost::polymorphic_downcast<Network*>(network);
50 }
51
52 Status Network::PrintGraph()
53 {
54     m_Graph->Print();
55     return Status::Success;
56 }
57
58 void IOptimizedNetwork::Destroy(IOptimizedNetwork* network)
59 {
60     delete boost::polymorphic_downcast<OptimizedNetwork*>(network);
61 }
62
63 Status OptimizedNetwork::PrintGraph()
64 {
65     m_Graph->Print();
66     return Status::Success;
67 }
68
69 Status OptimizedNetwork::SerializeToDot(std::ostream& stream) const
70 {
71     return m_Graph->SerializeToDot(stream);
72 }
73
74 bool CheckScaleSetOnQuantizedType(Layer* layer, Optional<std::vector<std::string>&> errMessages)
75 {
76     bool noErrors = true;
77     unsigned int numOutputs = layer->GetNumOutputSlots();
78     for (unsigned int i = 0; i < numOutputs; i++) {
79         const OutputSlot &outputSlot = layer->GetOutputSlot(i);
80         const TensorInfo &info = outputSlot.GetTensorInfo();
81         if (DataType::QuantisedAsymm8 == info.GetDataType()) {
82             if (0.f == info.GetQuantizationScale()) {
83                 noErrors = false;
84                 std::stringstream ss;
85                 ss << "ERROR: output " << i << " of layer " << GetLayerTypeAsCString(layer->GetType())
86                    << " (" << layer->GetNameStr() << ") is of type"
87                    << " Quantized 8 bit but its scale parameter has not been set";
88                 BOOST_LOG_TRIVIAL(warning) << ss.str() ;
89                 if (errMessages) {
90                     errMessages.value().push_back(ss.str());
91                 }
92             }
93         }
94     }
95     return noErrors;
96 }
97
98 IOptimizedNetworkPtr Optimize(const INetwork& inNetwork,
99                               const std::vector<BackendId>& backendPreferences,
100                               const IDeviceSpec& deviceSpec,
101                               const OptimizerOptions& options,
102                               Optional<std::vector<std::string>&> errMessages)
103 {
104     if (backendPreferences.empty()) {
105         throw armnn::InvalidArgumentException("Invoked Optimize with no backends specified");
106     }
107     const Network& network = *boost::polymorphic_downcast<const Network*>(&inNetwork);
108     std::unique_ptr<Graph> graph = std::make_unique<Graph>(network.GetGraph());
109
110     auto optNet = IOptimizedNetworkPtr(new OptimizedNetwork(std::move(graph)), &IOptimizedNetwork::Destroy);
111
112     OptimizedNetwork* optNetObjPtr = boost::polymorphic_downcast<OptimizedNetwork*>(optNet.get());
113
114     // Perform optimisation passes
115     using namespace optimizations;
116     Optimizer::Pass(optNetObjPtr->GetGraph(), MakeOptimizations(SquashEqualPermuteSiblings(),
117                                                                 SquashEqualReshapeSiblings(),
118                                                                 OptimizeInversePermutes(),
119                                                                 MovePermuteUp(),
120                                                                 PermuteAsReshape(),
121                                                                 OptimizeConsecutiveReshapes()));
122
123     // Infer the tensor infos for all output slots. Throws an exception on failure.
124     optNetObjPtr->GetGraph().InferTensorInfos();
125
126     // if Fp32 to Fp16 optimization is set convert Fp32 network to Fp16
127     if (options.m_ReduceFp32ToFp16)
128     {
129         Optimizer::Pass(optNetObjPtr->GetGraph(), MakeOptimizations(Fp32NetworkToFp16Converter()));
130     }
131
132     // We know that DeviceSpec should be the only implementation of IDeviceSpec.
133     const DeviceSpec& spec = *boost::polymorphic_downcast<const DeviceSpec*>(&deviceSpec);
134     auto const& supportedBackends = spec.GetSupportedBackends();
135
136     // determine which of the preferred backends we have available for use
137     // and whether we have specified CpuRef as one of those backends.
138     bool cpuRefUsed = false;
139     std::vector<BackendId> availablePreferredBackends;
140     for (const auto& backend : backendPreferences)
141     {
142         // Check if the backend is in the available backend devices.
143         if (supportedBackends.count(backend) > 0)
144         {
145             availablePreferredBackends.push_back(backend);
146             if (backend == armnn::Compute::CpuRef) {
147                 cpuRefUsed = true;
148             }
149         }
150     }
151     if (availablePreferredBackends.empty()) {
152         std::stringstream failureMsg;
153         failureMsg << "ERROR: None of the preferred backends " << backendPreferences
154                    << " are supported. Current platform provides " << supportedBackends;
155         BOOST_LOG_TRIVIAL(warning) << failureMsg.str();
156         if (errMessages) {
157             errMessages.value().push_back(failureMsg.str());
158         }
159         return IOptimizedNetworkPtr(nullptr, &IOptimizedNetwork::Destroy);
160     }
161
162     auto ReturnWithError = [&](Layer* layer)
163     {
164         std::stringstream failureMsg;
165         failureMsg << "ERROR: Layer of type " << GetLayerTypeAsCString(layer->GetType())
166                    << " is not supported on any preferred backend " << backendPreferences;
167         BOOST_LOG_TRIVIAL(warning) << failureMsg.str();
168         if (errMessages) {
169             errMessages.value().push_back(failureMsg.str());
170         }
171         return IOptimizedNetworkPtr(nullptr, &IOptimizedNetwork::Destroy);
172     };
173
174     // The backends that we choose to run layers on
175     std::unordered_set<BackendId> chosenBackends;
176
177     // Assign a compute device for all nodes
178     bool bErrorFound = false;
179     for (auto&& layer : optNetObjPtr->GetGraph())
180     {
181         DataType dataType = layer->GetDataType();
182         std::string reasonIfUnsupported;
183         bool found = false;
184         if (!CheckScaleSetOnQuantizedType(layer, errMessages))
185         {
186             // don't bomb immediately, find all the quantized outputs
187             // which haven't had a scale set and report them all back.
188             bErrorFound = true;
189         }
190         for (const auto& backend : availablePreferredBackends)
191         {
192             // need to set the compute device on the layer
193             // before we can check if it is supported
194             layer->SetBackendId(backend);
195             if (!IWorkloadFactory::IsLayerSupported(*layer, dataType, reasonIfUnsupported))
196             {
197                 if (dataType == DataType::Float16)
198                 {
199                     if (IWorkloadFactory::IsLayerSupported(*layer, DataType::Float32, reasonIfUnsupported)
200                         && layer->GetType() != LayerType::ConvertFp32ToFp16
201                         && layer->GetType() != LayerType::ConvertFp16ToFp32)
202                     {
203                         // Insert FP16 -> FP32 conversion layer before current layer
204                         std::vector<ConvertFp16ToFp32Layer*> convertFp16ToFp32Layers =
205                             InsertConvertFp16ToFp32LayersBefore(optNetObjPtr->GetGraph(), *layer);
206
207                         // Insert FP32 -> FP16 conversion layer after current layer
208                         std::vector<ConvertFp32ToFp16Layer*> convertFp32ToFp16Layers =
209                             InsertConvertFp32ToFp16LayersAfter(optNetObjPtr->GetGraph(), *layer);
210
211                         // Assign a supported backend to the newly introduced conversion layers
212                         auto AssignFirstSupportedBackend = [&](Layer* layer, BackendId preferredBackend)
213                         {
214                             bool supportedBackendFound = false;
215                             std::string reasonIfUnsupported;
216
217                             // Try preferred backend first
218                             layer->SetBackendId(preferredBackend);
219                             if (IWorkloadFactory::IsLayerSupported(*layer,
220                                                                    EmptyOptional(),
221                                                                    reasonIfUnsupported))
222                             {
223                                 supportedBackendFound = true;
224                             }
225                             else
226                             {
227                                 for (const auto& backend : availablePreferredBackends)
228                                 {
229                                     // Skip preferred backend (we already determined that it is not supported)
230                                     if (backend == preferredBackend)
231                                     {
232                                         continue;
233                                     }
234
235                                     layer->SetBackendId(backend);
236                                     if (IWorkloadFactory::IsLayerSupported(*layer,
237                                                                            EmptyOptional(),
238                                                                            reasonIfUnsupported))
239                                     {
240                                         supportedBackendFound = true;
241                                         break;
242                                     }
243                                 }
244                             }
245
246                             return supportedBackendFound;
247                         };
248
249                         for (ConvertFp16ToFp32Layer* convertLayer : convertFp16ToFp32Layers)
250                         {
251                             if (!AssignFirstSupportedBackend(convertLayer, backend))
252                             {
253                                 return ReturnWithError(convertLayer);
254                             }
255                         }
256
257                         for (ConvertFp32ToFp16Layer* convertLayer : convertFp32ToFp16Layers)
258                         {
259                             if (!AssignFirstSupportedBackend(convertLayer, backend))
260                             {
261                                 return ReturnWithError(convertLayer);
262                             }
263                         }
264
265                         found = true;
266                         break;
267                     }
268                 }
269                 std::stringstream warningMsg;
270                 warningMsg << "WARNING: Layer of type " << GetLayerTypeAsCString(layer->GetType())
271                            << " is not supported on requested backend " << layer->GetBackendId().Get()
272                            << " for data type " << GetDataTypeName(dataType)
273                            << " (reason: " << reasonIfUnsupported
274                            << "), falling back to the next backend.";
275                 BOOST_LOG_TRIVIAL(warning) << warningMsg.str();
276                 if (errMessages) {
277                     errMessages.value().push_back(warningMsg.str());
278                 }
279             }
280             else
281             {
282                 found = true;
283                 chosenBackends.insert(backend);
284                 break;
285             }
286         }
287
288         // If the layer is unsupported by any devices, log and return a null network.
289         if (!found) {
290             // NOTE: if the layer is not an operation queue type AND we have not got CpuRef as a
291             //       fallback we should set the compute device on the layer to CpuRef (these are not
292             //       available as accelerated operations, or are only available under certain
293             //       conditions, currently they comprise MemCopy, Constant, Permute)
294             armnn::LayerType layerType = layer->GetType();
295             if (!cpuRefUsed && (layerType == armnn::LayerType::MemCopy ||
296                                 layerType == armnn::LayerType::Constant ||
297                                 layerType == armnn::LayerType::Permute))
298             {
299                 layer->SetBackendId(armnn::Compute::CpuRef);
300                 chosenBackends.insert(armnn::Compute::CpuRef);
301             }
302             else
303             {
304                 return ReturnWithError(layer);
305             }
306         }
307     }
308     if (bErrorFound)
309     {
310         return IOptimizedNetworkPtr(nullptr, &IOptimizedNetwork::Destroy);
311     }
312
313     Optimizer::Pass(optNetObjPtr->GetGraph(), MakeOptimizations(OptimizeInverseConversionsFp16(),
314                                                                 OptimizeInverseConversionsFp32()));
315
316     optNetObjPtr->GetGraph().AddCopyLayers();
317
318     // Convert constants
319     Optimizer::Pass(optNetObjPtr->GetGraph(), MakeOptimizations(ConvertConstantsFloatToHalf()));
320     Optimizer::Pass(optNetObjPtr->GetGraph(), MakeOptimizations(ConvertConstantsHalfToFloat()));
321
322     // Run backend specific optimizations
323     for (auto&& chosenBackend : chosenBackends)
324     {
325         auto factoryFun = BackendRegistryInstance().GetFactory(chosenBackend);
326         auto backendPtr = factoryFun();
327         BOOST_ASSERT(backendPtr.get() != nullptr);
328
329         auto backendSpecificOptimizations = backendPtr->GetOptimizations();
330         if (!backendSpecificOptimizations.empty())
331         {
332             Optimizer::Pass(optNetObjPtr->GetGraph(), backendSpecificOptimizations);
333         }
334     }
335
336     return optNet;
337 }
338
339
340 Network::Network()
341 : m_Graph(std::make_unique<Graph>())
342 {
343 }
344
345 Network::~Network()
346 {
347 }
348
349 IConnectableLayer* Network::AddInputLayer(LayerBindingId id, const char* name)
350 {
351     return m_Graph->AddLayer<InputLayer>(id, name);
352 }
353
354 IConnectableLayer* Network::AddBatchToSpaceNdLayer(const BatchToSpaceNdDescriptor& batchToSpaceNdDescriptor,
355                                             const char* name)
356 {
357     return m_Graph->AddLayer<BatchToSpaceNdLayer>(batchToSpaceNdDescriptor, name);
358 }
359
360 IConnectableLayer* Network::AddFullyConnectedLayerImpl(const FullyConnectedDescriptor& fullyConnectedDescriptor,
361                                                        const ConstTensor& weights,
362                                                        const ConstTensor* biases,
363                                                        const char* name)
364 {
365     if (fullyConnectedDescriptor.m_BiasEnabled && (biases == nullptr))
366     {
367         throw InvalidArgumentException("AddFullyConnectedLayer: biases cannot be NULL");
368     }
369
370     const auto layer = m_Graph->AddLayer<FullyConnectedLayer>(fullyConnectedDescriptor, name);
371
372     layer->m_Weight = std::make_unique<ScopedCpuTensorHandle>(weights);
373
374     if (fullyConnectedDescriptor.m_BiasEnabled)
375     {
376         layer->m_Bias = std::make_unique<ScopedCpuTensorHandle>(*biases);
377     }
378
379     return layer;
380 }
381
382 IConnectableLayer* Network::AddFullyConnectedLayer(const FullyConnectedDescriptor& fullyConnectedDescriptor,
383                                                    const ConstTensor& weights,
384                                                    const char* name)
385 {
386     return AddFullyConnectedLayerImpl(fullyConnectedDescriptor, weights, nullptr, name);
387 }
388
389 IConnectableLayer* Network::AddFullyConnectedLayer(const FullyConnectedDescriptor& fullyConnectedDescriptor,
390                                                    const ConstTensor& weights,
391                                                    const ConstTensor& biases,
392                                                    const char* name)
393 {
394     return AddFullyConnectedLayerImpl(fullyConnectedDescriptor, weights, &biases, name);
395 }
396
397 IConnectableLayer* Network::AddConvolution2dLayerImpl(const Convolution2dDescriptor& convolution2dDescriptor,
398                                                       const ConstTensor& weights,
399                                                       const ConstTensor* biases,
400                                                       const char* name)
401 {
402     if (convolution2dDescriptor.m_BiasEnabled && (biases == nullptr))
403     {
404         throw InvalidArgumentException("AddConvolution2dLayer: biases cannot be NULL");
405     }
406
407     const auto layer = m_Graph->AddLayer<Convolution2dLayer>(convolution2dDescriptor, name);
408
409     layer->m_Weight = std::make_unique<ScopedCpuTensorHandle>(weights);
410
411     if (convolution2dDescriptor.m_BiasEnabled)
412     {
413         layer->m_Bias = std::make_unique<ScopedCpuTensorHandle>(*biases);
414     }
415
416     return layer;
417 }
418
419 IConnectableLayer* Network::AddConvolution2dLayer(const Convolution2dDescriptor& convolution2dDescriptor,
420                                                   const ConstTensor& weights,
421                                                   const char* name)
422 {
423     return AddConvolution2dLayerImpl(convolution2dDescriptor, weights, nullptr, name);
424 }
425 IConnectableLayer* Network::AddConvolution2dLayer(const Convolution2dDescriptor& convolution2dDescriptor,
426                                                   const ConstTensor& weights,
427                                                   const ConstTensor& biases,
428                                                   const char* name)
429 {
430     return AddConvolution2dLayerImpl(convolution2dDescriptor, weights, &biases, name);
431 }
432
433 IConnectableLayer* Network::AddDepthwiseConvolution2dLayerImpl(
434     const DepthwiseConvolution2dDescriptor& convolution2dDescriptor,
435     const ConstTensor& weights,
436     const ConstTensor* biases,
437     const char* name)
438 {
439     if (convolution2dDescriptor.m_BiasEnabled && (biases == nullptr))
440     {
441         throw InvalidArgumentException("AddDepthwiseConvolution2dLayer: biases cannot be NULL");
442     }
443
444     const auto layer = m_Graph->AddLayer<DepthwiseConvolution2dLayer>(convolution2dDescriptor,
445             name);
446
447     layer->m_Weight = std::make_unique<ScopedCpuTensorHandle>(weights);
448
449     if (convolution2dDescriptor.m_BiasEnabled)
450     {
451         layer->m_Bias = std::make_unique<ScopedCpuTensorHandle>(*biases);
452     }
453
454     return layer;
455 }
456
457 IConnectableLayer* Network::AddDepthwiseConvolution2dLayer(
458     const DepthwiseConvolution2dDescriptor& convolution2dDescriptor,
459     const ConstTensor& weights,
460     const char* name)
461 {
462     return AddDepthwiseConvolution2dLayerImpl(convolution2dDescriptor, weights, nullptr, name);
463 }
464 IConnectableLayer* Network::AddDepthwiseConvolution2dLayer(
465     const DepthwiseConvolution2dDescriptor& convolution2dDescriptor,
466     const ConstTensor& weights,
467     const ConstTensor& biases,
468     const char* name)
469 {
470     return AddDepthwiseConvolution2dLayerImpl(convolution2dDescriptor, weights, &biases, name);
471 }
472
473 IConnectableLayer* Network::AddPermuteLayer(const PermuteDescriptor& permuteDescriptor,
474                                             const char* name)
475 {
476     return m_Graph->AddLayer<PermuteLayer>(permuteDescriptor, name);
477 }
478
479 IConnectableLayer* Network::AddPooling2dLayer(const Pooling2dDescriptor& pooling2dDescriptor,
480     const char* name)
481 {
482     return m_Graph->AddLayer<Pooling2dLayer>(pooling2dDescriptor, name);
483 }
484
485 IConnectableLayer* Network::AddActivationLayer(const ActivationDescriptor& activationDescriptor,
486     const char* name)
487 {
488     return m_Graph->AddLayer<ActivationLayer>(activationDescriptor, name);
489 }
490
491 IConnectableLayer* Network::AddNormalizationLayer(const NormalizationDescriptor&
492 normalizationDescriptor,
493     const char* name)
494 {
495     return m_Graph->AddLayer<NormalizationLayer>(normalizationDescriptor, name);
496 }
497
498 IConnectableLayer* Network::AddSoftmaxLayer(const SoftmaxDescriptor& softmaxDescriptor,
499     const char* name)
500 {
501     return m_Graph->AddLayer<SoftmaxLayer>(softmaxDescriptor, name);
502 }
503
504 IConnectableLayer* Network::AddSplitterLayer(const ViewsDescriptor& splitterDescriptor,
505     const char* name)
506 {
507     return m_Graph->AddLayer<SplitterLayer>(splitterDescriptor, name);
508 }
509
510 IConnectableLayer* Network::AddMaximumLayer(const char* name)
511 {
512     return m_Graph->AddLayer<MaximumLayer>(name);
513 }
514
515 IConnectableLayer* Network::AddMinimumLayer(const char* name)
516 {
517     return m_Graph->AddLayer<MinimumLayer>(name);
518 }
519
520 IConnectableLayer* Network::AddMergerLayer(const OriginsDescriptor& mergerDescriptor,
521     const char* name)
522 {
523     return m_Graph->AddLayer<MergerLayer>(mergerDescriptor, name);
524 }
525
526 IConnectableLayer* Network::AddAdditionLayer(const char* name)
527 {
528     return m_Graph->AddLayer<AdditionLayer>(name);
529 }
530
531 IConnectableLayer* Network::AddMultiplicationLayer(const char* name)
532 {
533     return m_Graph->AddLayer<MultiplicationLayer>(name);
534 }
535
536 IConnectableLayer* Network::AddOutputLayer(LayerBindingId id, const char* name)
537 {
538     return m_Graph->AddLayer<OutputLayer>(id, name);
539 }
540
541 IConnectableLayer* Network::AddBatchNormalizationLayer(const BatchNormalizationDescriptor& desc,
542                                                        const ConstTensor&                  mean,
543                                                        const ConstTensor&                  variance,
544                                                        const ConstTensor&                  beta,
545                                                        const ConstTensor&                  gamma,
546                                                        const char*                         name)
547 {
548     const auto layer = m_Graph->AddLayer<BatchNormalizationLayer>(desc, name);
549
550     layer->m_Mean = std::make_unique<ScopedCpuTensorHandle>(mean);
551     layer->m_Variance = std::make_unique<ScopedCpuTensorHandle>(variance);
552     layer->m_Beta = std::make_unique<ScopedCpuTensorHandle>(beta);
553     layer->m_Gamma = std::make_unique<ScopedCpuTensorHandle>(gamma);
554
555     return layer;
556 }
557
558 IConnectableLayer* Network::AddResizeBilinearLayer(const ResizeBilinearDescriptor&
559 resizeDescriptor, const char* name)
560 {
561     return m_Graph->AddLayer<ResizeBilinearLayer>(resizeDescriptor,name);
562 }
563
564 IConnectableLayer* Network::AddL2NormalizationLayer(const L2NormalizationDescriptor& desc,
565                                                     const char* name)
566 {
567     return m_Graph->AddLayer<L2NormalizationLayer>(desc, name);
568 }
569
570 IConnectableLayer* Network::AddConstantLayer(const ConstTensor& input, const char* name)
571 {
572     auto layer = m_Graph->AddLayer<ConstantLayer>(name);
573
574     layer->m_LayerOutput = std::make_unique<ScopedCpuTensorHandle>(input);
575
576     return layer;
577 }
578
579 IConnectableLayer* Network::AddReshapeLayer(const ReshapeDescriptor& reshapeDescriptor,
580                                             const char* name)
581 {
582     return m_Graph->AddLayer<ReshapeLayer>(reshapeDescriptor, name);
583 }
584
585 IConnectableLayer* Network::AddSpaceToBatchNdLayer(const SpaceToBatchNdDescriptor& spaceToBatchNdDescriptor,
586                                                    const char* name)
587 {
588     return m_Graph->AddLayer<SpaceToBatchNdLayer>(spaceToBatchNdDescriptor, name);
589 }
590
591 IConnectableLayer* Network::AddFloorLayer(const char* name)
592 {
593     return m_Graph->AddLayer<FloorLayer>(name);
594 }
595
596 IConnectableLayer* Network::AddLstmLayer(const LstmDescriptor&  descriptor,
597                                          const LstmInputParams& params,
598                                          const char* name)
599 {
600     const auto layer = m_Graph->AddLayer<LstmLayer>(descriptor, name);
601
602     //Lstm Basic Parameters
603     layer->m_BasicParameters.m_InputToForgetWeights =
604         std::make_unique<ScopedCpuTensorHandle>(*(params.m_InputToForgetWeights));
605     layer->m_BasicParameters.m_InputToCellWeights =
606         std::make_unique<ScopedCpuTensorHandle>(*(params.m_InputToCellWeights));
607     layer->m_BasicParameters.m_InputToOutputWeights =
608         std::make_unique<ScopedCpuTensorHandle>(*(params.m_InputToOutputWeights));
609     layer->m_BasicParameters.m_RecurrentToForgetWeights =
610         std::make_unique<ScopedCpuTensorHandle>(*(params.m_RecurrentToForgetWeights));
611     layer->m_BasicParameters.m_RecurrentToCellWeights =
612         std::make_unique<ScopedCpuTensorHandle>(*(params.m_RecurrentToCellWeights));
613     layer->m_BasicParameters.m_RecurrentToOutputWeights =
614         std::make_unique<ScopedCpuTensorHandle>(*(params.m_RecurrentToOutputWeights));
615     layer->m_BasicParameters.m_ForgetGateBias =
616             std::make_unique<ScopedCpuTensorHandle>(*(params.m_ForgetGateBias));
617     layer->m_BasicParameters.m_CellBias =
618             std::make_unique<ScopedCpuTensorHandle>(*(params.m_CellBias));
619     layer->m_BasicParameters.m_OutputGateBias =
620             std::make_unique<ScopedCpuTensorHandle>(*(params.m_OutputGateBias));
621
622     //Lstm Cifg parameters
623     if(!descriptor.m_CifgEnabled)
624     {
625         if(params.m_InputToInputWeights == nullptr)
626         {
627             throw InvalidArgumentException("AddLstmLayer: Input To Input Weights cannot be NULL");
628         }
629         if(params.m_RecurrentToInputWeights == nullptr)
630         {
631             throw InvalidArgumentException(
632                     "AddLstmLayer: Recurrent To Input Weights cannot be NULL");
633         }
634         if(params.m_InputGateBias == nullptr)
635         {
636             throw InvalidArgumentException("AddLstmLayer: Input Gate Bias cannot be NULL");
637         }
638         layer->m_CifgParameters.m_InputToInputWeights =
639             std::make_unique<ScopedCpuTensorHandle>(*(params.m_InputToInputWeights));
640         layer->m_CifgParameters.m_RecurrentToInputWeights =
641             std::make_unique<ScopedCpuTensorHandle>(*(params.m_RecurrentToInputWeights));
642         // In the VTS tests, cell-to-input weights may be null, even if the other CIFG params are not.
643         if(params.m_CellToInputWeights != nullptr)
644         {
645             layer->m_CifgParameters.m_CellToInputWeights =
646                     std::make_unique<ScopedCpuTensorHandle>(*(params.m_CellToInputWeights));
647         }
648         layer->m_CifgParameters.m_InputGateBias =
649             std::make_unique<ScopedCpuTensorHandle>(*(params.m_InputGateBias));
650     }
651
652     //Lstm projection parameters
653     if(descriptor.m_ProjectionEnabled)
654     {
655         if(params.m_ProjectionWeights == nullptr)
656         {
657             throw InvalidArgumentException("AddLstmLayer: Projection Weights cannot be NULL");
658         }
659         layer->m_ProjectionParameters.m_ProjectionWeights =
660             std::make_unique<ScopedCpuTensorHandle>(*(params.m_ProjectionWeights));
661         if(params.m_ProjectionBias != nullptr)
662         {
663             layer->m_ProjectionParameters.m_ProjectionBias =
664                 std::make_unique<ScopedCpuTensorHandle>(*(params.m_ProjectionBias));
665         }
666     }
667
668     //Lstm Peephole params
669     if(descriptor.m_PeepholeEnabled)
670     {
671         if(params.m_CellToForgetWeights == nullptr)
672         {
673             throw InvalidArgumentException("AddLstmLayer: Cell To Forget Weights cannot be NULL");
674         }
675         if(params.m_CellToOutputWeights == nullptr)
676         {
677             throw InvalidArgumentException("AddLstmLayer: Cell To Output Weights cannot be NULL");
678         }
679         layer->m_PeepholeParameters.m_CellToForgetWeights =
680             std::make_unique<ScopedCpuTensorHandle>(*(params.m_CellToForgetWeights));
681         layer->m_PeepholeParameters.m_CellToOutputWeights =
682             std::make_unique<ScopedCpuTensorHandle>(*(params.m_CellToOutputWeights));
683     }
684     return layer;
685 }
686
687 IConnectableLayer* Network::AddDivisionLayer(const char* name)
688 {
689     return m_Graph->AddLayer<DivisionLayer>(name);
690 }
691
692 IConnectableLayer* Network::AddSubtractionLayer(const char* name)
693 {
694     return m_Graph->AddLayer<SubtractionLayer>(name);
695 }
696
697 IConnectableLayer* Network::AddMeanLayer(const MeanDescriptor& meanDescriptor, const char* name)
698 {
699     return m_Graph->AddLayer<MeanLayer>(meanDescriptor,name);
700 }
701
702 IConnectableLayer* Network::AddPadLayer(const PadDescriptor& padDescriptor, const char* name)
703 {
704     return m_Graph->AddLayer<PadLayer>(padDescriptor,name);
705 }
706
707 IConnectableLayer* Network::AddStridedSliceLayer(const StridedSliceDescriptor& stridedSliceDescriptor,
708                                                  const char* name)
709 {
710     return m_Graph->AddLayer<StridedSliceLayer>(stridedSliceDescriptor, name);
711 }
712
713 OptimizedNetwork::OptimizedNetwork(std::unique_ptr<Graph> graph)
714     : m_Graph(std::move(graph))
715 {
716 }
717
718 OptimizedNetwork::~OptimizedNetwork()
719 {
720 }
721
722 } // namespace armnn