Release 18.08
[platform/upstream/armnn.git] / src / armnn / backends / NeonWorkloads / NeonConvertFp32ToFp16Workload.cpp
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // See LICENSE file in the project root for full license information.
4 //
5
6 #include "NeonConvertFp32ToFp16Workload.hpp"
7
8 #include "Half.hpp"
9 #include "FloatingPointConverter.hpp"
10
11 #include "Profiling.hpp"
12 #include "backends/WorkloadUtils.hpp"
13
14 namespace armnn
15 {
16
17 NeonConvertFp32ToFp16Workload::NeonConvertFp32ToFp16Workload(const ConvertFp32ToFp16QueueDescriptor& descriptor,
18                                                              const WorkloadInfo& info)
19     : Float32ToFloat16Workload<ConvertFp32ToFp16QueueDescriptor>(descriptor, info)
20 {
21     this->m_Data.ValidateInputsOutputs("NeonConvertFp32ToFp16Workload", 1, 1);
22     GatherTensorHandlePairs(descriptor, m_TensorHandlePairs);
23 }
24
25 void NeonConvertFp32ToFp16Workload::Execute() const
26 {
27     ARMNN_SCOPED_PROFILING_EVENT_NEON("NeonConvertFp32ToFp16Workload_Execute");
28
29     auto convertFunc = [](uint8_t* dst, const uint8_t* src, size_t size)
30         {
31             auto input = reinterpret_cast<const float*>(src);
32             auto output = reinterpret_cast<Half*>(dst);
33             size_t numElements = size/2; // 2 bytes per fp16
34             armnnUtils::FloatingPointConverter::ConvertFloat32To16(input, numElements, output);
35         };
36
37     for (const auto& pair : m_TensorHandlePairs)
38     {
39         CopyTensorContentsGeneric(pair.first, pair.second, convertFunc);
40     }
41 }
42
43 } //namespace armnn