Release 18.08
[platform/upstream/armnn.git] / src / armnn / backends / NeonWorkloads / NeonSoftmaxUint8Workload.cpp
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // See LICENSE file in the project root for full license information.
4 //
5
6 #include "NeonSoftmaxUint8Workload.hpp"
7
8 namespace armnn
9 {
10
11 NeonSoftmaxUint8Workload::NeonSoftmaxUint8Workload(const SoftmaxQueueDescriptor& descriptor,
12                                                    const WorkloadInfo& info,
13                                                    std::shared_ptr<arm_compute::MemoryManagerOnDemand>& memoryManager)
14     : Uint8Workload<SoftmaxQueueDescriptor>(descriptor, info)
15     , m_SoftmaxLayer(memoryManager)
16 {
17     m_Data.ValidateInputsOutputs("NeonSoftmaxUint8Workload", 1, 1);
18
19     arm_compute::ITensor& input = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
20     arm_compute::ITensor& output = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
21
22     const auto outputQuantization = output.info()->quantization_info();
23
24     if ((outputQuantization.scale != (1.0f / 256.0f)) || (outputQuantization.offset != 0))
25     {
26         throw InvalidArgumentException(
27             "Invalid quantization for output. Only scale = 1.0f / 256.0f and offset = 0 supported");
28     }
29
30     m_SoftmaxLayer.configure(&input, &output, descriptor.m_Parameters.m_Beta);
31 }
32
33 void NeonSoftmaxUint8Workload::Execute() const
34 {
35     ARMNN_SCOPED_PROFILING_EVENT_NEON("NeonSoftmaxUint8Workload_Execute");
36
37     m_SoftmaxLayer.run();
38 }
39
40 } //namespace armnn
41