Release 18.08
[platform/upstream/armnn.git] / src / armnn / backends / test / TensorCopyUtils.cpp
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // See LICENSE file in the project root for full license information.
4 //
5
6 #include <algorithm>
7 #include <cstring>
8 #include <boost/cast.hpp>
9 #include <Half.hpp>
10
11 #include "TensorCopyUtils.hpp"
12
13 #ifdef ARMCOMPUTECL_ENABLED
14 #include "backends/ClTensorHandle.hpp"
15 #endif
16
17 #if ARMCOMPUTENEON_ENABLED
18 #include "backends/NeonTensorHandle.hpp"
19 #endif
20
21 #if ARMCOMPUTECLENABLED || ARMCOMPUTENEON_ENABLED
22 #include "backends/ArmComputeTensorUtils.hpp"
23 #endif
24
25 #include "backends/CpuTensorHandle.hpp"
26
27 void CopyDataToITensorHandle(armnn::ITensorHandle* tensorHandle, const void* mem)
28 {
29     switch (tensorHandle->GetType())
30     {
31         case armnn::ITensorHandle::Cpu:
32         {
33             auto handle = boost::polymorphic_downcast<armnn::ScopedCpuTensorHandle*>(tensorHandle);
34             memcpy(handle->GetTensor<void>(), mem, handle->GetTensorInfo().GetNumBytes());
35             break;
36         }
37 #ifdef ARMCOMPUTECL_ENABLED
38         case armnn::ITensorHandle::CL:
39         {
40             using armnn::armcomputetensorutils::CopyArmComputeITensorData;
41             auto handle = boost::polymorphic_downcast<armnn::IClTensorHandle*>(tensorHandle);
42             handle->Map(true);
43             switch(handle->GetDataType())
44             {
45                 case arm_compute::DataType::F32:
46                     CopyArmComputeITensorData(static_cast<const float*>(mem), handle->GetTensor());
47                     break;
48                 case arm_compute::DataType::QASYMM8:
49                     CopyArmComputeITensorData(static_cast<const uint8_t*>(mem), handle->GetTensor());
50                     break;
51                 case arm_compute::DataType::F16:
52                     CopyArmComputeITensorData(static_cast<const armnn::Half*>(mem), handle->GetTensor());
53                     break;
54                 default:
55                 {
56                     throw armnn::UnimplementedException();
57                 }
58             }
59             handle->Unmap();
60             break;
61         }
62 #endif
63 #if ARMCOMPUTENEON_ENABLED
64         case armnn::ITensorHandle::Neon:
65         {
66             using armnn::armcomputetensorutils::CopyArmComputeITensorData;
67             auto handle = boost::polymorphic_downcast<armnn::INeonTensorHandle*>(tensorHandle);
68             switch (handle->GetDataType())
69             {
70                 case arm_compute::DataType::F32:
71                     CopyArmComputeITensorData(static_cast<const float*>(mem), handle->GetTensor());
72                     break;
73                 case arm_compute::DataType::QASYMM8:
74                     CopyArmComputeITensorData(static_cast<const uint8_t*>(mem), handle->GetTensor());
75                     break;
76                 default:
77                 {
78                     throw armnn::UnimplementedException();
79                 }
80             }
81             break;
82         }
83 #endif
84         default:
85         {
86             throw armnn::UnimplementedException();
87         }
88     }
89 }
90
91 void CopyDataFromITensorHandle(void* mem, const armnn::ITensorHandle* tensorHandle)
92 {
93     switch (tensorHandle->GetType())
94     {
95         case armnn::ITensorHandle::Cpu:
96         {
97             auto handle = boost::polymorphic_downcast<const armnn::ScopedCpuTensorHandle*>(tensorHandle);
98             memcpy(mem, handle->GetTensor<void>(), handle->GetTensorInfo().GetNumBytes());
99             break;
100         }
101 #ifdef ARMCOMPUTECL_ENABLED
102         case armnn::ITensorHandle::CL:
103         {
104             using armnn::armcomputetensorutils::CopyArmComputeITensorData;
105             auto handle = boost::polymorphic_downcast<const armnn::IClTensorHandle*>(tensorHandle);
106             const_cast<armnn::IClTensorHandle*>(handle)->Map(true);
107             switch(handle->GetDataType())
108             {
109                 case arm_compute::DataType::F32:
110                     CopyArmComputeITensorData(handle->GetTensor(), static_cast<float*>(mem));
111                     break;
112                 case arm_compute::DataType::QASYMM8:
113                     CopyArmComputeITensorData(handle->GetTensor(), static_cast<uint8_t*>(mem));
114                     break;
115                 case arm_compute::DataType::F16:
116                     CopyArmComputeITensorData(handle->GetTensor(), static_cast<armnn::Half*>(mem));
117                     break;
118                 default:
119                 {
120                     throw armnn::UnimplementedException();
121                 }
122             }
123             const_cast<armnn::IClTensorHandle*>(handle)->Unmap();
124             break;
125         }
126 #endif
127 #if ARMCOMPUTENEON_ENABLED
128         case armnn::ITensorHandle::Neon:
129         {
130             using armnn::armcomputetensorutils::CopyArmComputeITensorData;
131             auto handle = boost::polymorphic_downcast<const armnn::INeonTensorHandle*>(tensorHandle);
132             switch (handle->GetDataType())
133             {
134                 case arm_compute::DataType::F32:
135                     CopyArmComputeITensorData(handle->GetTensor(), static_cast<float*>(mem));
136                     break;
137                 case arm_compute::DataType::QASYMM8:
138                     CopyArmComputeITensorData(handle->GetTensor(), static_cast<uint8_t*>(mem));
139                     break;
140                 default:
141                 {
142                     throw armnn::UnimplementedException();
143                 }
144             }
145             break;
146         }
147 #endif
148         default:
149         {
150             throw armnn::UnimplementedException();
151         }
152     }
153 }
154
155 void AllocateAndCopyDataToITensorHandle(armnn::ITensorHandle* tensorHandle, const void* mem)
156 {
157     tensorHandle->Allocate();
158     CopyDataToITensorHandle(tensorHandle, mem);
159 }