21de1440f134e8271588c9aa3cc81672bb64af0d
[platform/upstream/armnn.git] / src / armnn / LoadedNetwork.hpp
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 #pragma once
6
7 #include <armnn/Tensor.hpp>
8 #include <armnn/Types.hpp>
9
10 #include "Network.hpp"
11 #include "LayerFwd.hpp"
12 #include "Profiling.hpp"
13
14 #include <backends/IBackendInternal.hpp>
15 #include <backends/Workload.hpp>
16 #include <backends/WorkloadFactory.hpp>
17
18 #include <mutex>
19 #include <unordered_map>
20
21 namespace cl
22 {
23     class Context;
24     class CommandQueue;
25     class Device;
26 }
27
28 namespace armnn
29 {
30
31 class LoadedNetwork
32 {
33 public:
34     using WorkloadQueue = std::vector< std::unique_ptr<IWorkload> >;
35     ~LoadedNetwork(){ FreeWorkingMemory(); }
36
37     TensorInfo GetInputTensorInfo(LayerBindingId layerId) const;
38     TensorInfo GetOutputTensorInfo(LayerBindingId layerId) const;
39
40     Status EnqueueWorkload(const InputTensors& inputTensors, const OutputTensors& outputTensors);
41
42     static std::unique_ptr<LoadedNetwork> MakeLoadedNetwork(std::unique_ptr<OptimizedNetwork> net,
43                                                             const IRuntime::CreationOptions& options,
44                                                             std::string & errorMessage);
45
46     // NOTE we return by reference as the purpose of this method is only to provide
47     // access to the private m_Profiler and in theory we should not need to increment
48     // the shared_ptr's reference counter
49     const std::shared_ptr<Profiler>& GetProfiler() const { return m_Profiler; }
50
51     void AllocateWorkingMemory();
52     void FreeWorkingMemory();
53
54 private:
55     LoadedNetwork(std::unique_ptr<OptimizedNetwork> net, const IRuntime::CreationOptions& options);
56
57     void EnqueueInput(const BindableLayer& layer, ITensorHandle* tensorHandle, const TensorInfo& tensorInfo);
58
59     void EnqueueOutput(const BindableLayer& layer, ITensorHandle* tensorHandle, const TensorInfo& tensorInfo);
60
61     bool Execute();
62
63     const IWorkloadFactory& GetWorkloadFactory(const Layer& layer) const;
64
65     using BackendPtrMap = std::unordered_map<BackendId, IBackendInternalUniquePtr>;
66     using WorkloadFactoryMap = std::unordered_map<BackendId, IBackendInternal::IWorkloadFactoryPtr>;
67
68     BackendPtrMap       m_Backends;
69     WorkloadFactoryMap  m_WorkloadFactories;
70
71     std::unique_ptr<OptimizedNetwork> m_OptimizedNetwork;
72     WorkloadQueue m_InputQueue;
73     WorkloadQueue m_WorkloadQueue;
74     WorkloadQueue m_OutputQueue;
75     std::shared_ptr<Profiler> m_Profiler;
76
77     using UniqueMutexLock = std::unique_lock<std::mutex>;
78     mutable std::mutex m_WorkingMemMutex;
79     UniqueMutexLock m_WorkingMemLock;
80
81     bool m_IsWorkingMemAllocated=false;
82 };
83
84 }