IVGCVSW-1946: Remove armnn/src from the include paths
[platform/upstream/armnn.git] / src / backends / backendsCommon / test / TensorCopyUtils.cpp
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5
6 #include "TensorCopyUtils.hpp"
7
8 #include <Half.hpp>
9
10 #ifdef ARMCOMPUTECL_ENABLED
11 #include <cl/ClTensorHandle.hpp>
12 #endif
13
14 #if ARMCOMPUTENEON_ENABLED
15 #include <neon/NeonTensorHandle.hpp>
16 #endif
17
18 #if ARMCOMPUTECLENABLED || ARMCOMPUTENEON_ENABLED
19 #include <aclCommon/ArmComputeTensorUtils.hpp>
20 #endif
21
22 #include <backendsCommon/CpuTensorHandle.hpp>
23
24 #include <boost/cast.hpp>
25
26 #include <algorithm>
27 #include <cstring>
28
29 void CopyDataToITensorHandle(armnn::ITensorHandle* tensorHandle, const void* mem)
30 {
31     switch (tensorHandle->GetType())
32     {
33         case armnn::ITensorHandle::Cpu:
34         {
35             auto handle = boost::polymorphic_downcast<armnn::ScopedCpuTensorHandle*>(tensorHandle);
36             memcpy(handle->GetTensor<void>(), mem, handle->GetTensorInfo().GetNumBytes());
37             break;
38         }
39 #ifdef ARMCOMPUTECL_ENABLED
40         case armnn::ITensorHandle::CL:
41         {
42             using armnn::armcomputetensorutils::CopyArmComputeITensorData;
43             auto handle = boost::polymorphic_downcast<armnn::IClTensorHandle*>(tensorHandle);
44             handle->Map(true);
45             switch(handle->GetDataType())
46             {
47                 case arm_compute::DataType::F32:
48                     CopyArmComputeITensorData(static_cast<const float*>(mem), handle->GetTensor());
49                     break;
50                 case arm_compute::DataType::QASYMM8:
51                     CopyArmComputeITensorData(static_cast<const uint8_t*>(mem), handle->GetTensor());
52                     break;
53                 case arm_compute::DataType::F16:
54                     CopyArmComputeITensorData(static_cast<const armnn::Half*>(mem), handle->GetTensor());
55                     break;
56                 default:
57                 {
58                     throw armnn::UnimplementedException();
59                 }
60             }
61             handle->Unmap();
62             break;
63         }
64 #endif
65 #if ARMCOMPUTENEON_ENABLED
66         case armnn::ITensorHandle::Neon:
67         {
68             using armnn::armcomputetensorutils::CopyArmComputeITensorData;
69             auto handle = boost::polymorphic_downcast<armnn::INeonTensorHandle*>(tensorHandle);
70             switch (handle->GetDataType())
71             {
72                 case arm_compute::DataType::F32:
73                     CopyArmComputeITensorData(static_cast<const float*>(mem), handle->GetTensor());
74                     break;
75                 case arm_compute::DataType::QASYMM8:
76                     CopyArmComputeITensorData(static_cast<const uint8_t*>(mem), handle->GetTensor());
77                     break;
78                 default:
79                 {
80                     throw armnn::UnimplementedException();
81                 }
82             }
83             break;
84         }
85 #endif
86         default:
87         {
88             throw armnn::UnimplementedException();
89         }
90     }
91 }
92
93 void CopyDataFromITensorHandle(void* mem, const armnn::ITensorHandle* tensorHandle)
94 {
95     switch (tensorHandle->GetType())
96     {
97         case armnn::ITensorHandle::Cpu:
98         {
99             auto handle = boost::polymorphic_downcast<const armnn::ScopedCpuTensorHandle*>(tensorHandle);
100             memcpy(mem, handle->GetTensor<void>(), handle->GetTensorInfo().GetNumBytes());
101             break;
102         }
103 #ifdef ARMCOMPUTECL_ENABLED
104         case armnn::ITensorHandle::CL:
105         {
106             using armnn::armcomputetensorutils::CopyArmComputeITensorData;
107             auto handle = boost::polymorphic_downcast<const armnn::IClTensorHandle*>(tensorHandle);
108             const_cast<armnn::IClTensorHandle*>(handle)->Map(true);
109             switch(handle->GetDataType())
110             {
111                 case arm_compute::DataType::F32:
112                     CopyArmComputeITensorData(handle->GetTensor(), static_cast<float*>(mem));
113                     break;
114                 case arm_compute::DataType::QASYMM8:
115                     CopyArmComputeITensorData(handle->GetTensor(), static_cast<uint8_t*>(mem));
116                     break;
117                 case arm_compute::DataType::F16:
118                     CopyArmComputeITensorData(handle->GetTensor(), static_cast<armnn::Half*>(mem));
119                     break;
120                 default:
121                 {
122                     throw armnn::UnimplementedException();
123                 }
124             }
125             const_cast<armnn::IClTensorHandle*>(handle)->Unmap();
126             break;
127         }
128 #endif
129 #if ARMCOMPUTENEON_ENABLED
130         case armnn::ITensorHandle::Neon:
131         {
132             using armnn::armcomputetensorutils::CopyArmComputeITensorData;
133             auto handle = boost::polymorphic_downcast<const armnn::INeonTensorHandle*>(tensorHandle);
134             switch (handle->GetDataType())
135             {
136                 case arm_compute::DataType::F32:
137                     CopyArmComputeITensorData(handle->GetTensor(), static_cast<float*>(mem));
138                     break;
139                 case arm_compute::DataType::QASYMM8:
140                     CopyArmComputeITensorData(handle->GetTensor(), static_cast<uint8_t*>(mem));
141                     break;
142                 default:
143                 {
144                     throw armnn::UnimplementedException();
145                 }
146             }
147             break;
148         }
149 #endif
150         default:
151         {
152             throw armnn::UnimplementedException();
153         }
154     }
155 }
156
157 void AllocateAndCopyDataToITensorHandle(armnn::ITensorHandle* tensorHandle, const void* mem)
158 {
159     tensorHandle->Allocate();
160     CopyDataToITensorHandle(tensorHandle, mem);
161 }