2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // See LICENSE file in the project root for full license information.
6 #include "NeonSoftmaxUint8Workload.hpp"
11 NeonSoftmaxUint8Workload::NeonSoftmaxUint8Workload(const SoftmaxQueueDescriptor& descriptor,
12 const WorkloadInfo& info,
13 std::shared_ptr<arm_compute::MemoryManagerOnDemand>& memoryManager)
14 : Uint8Workload<SoftmaxQueueDescriptor>(descriptor, info)
15 , m_SoftmaxLayer(memoryManager)
17 m_Data.ValidateInputsOutputs("NeonSoftmaxUint8Workload", 1, 1);
19 arm_compute::ITensor& input = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
20 arm_compute::ITensor& output = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
22 const auto outputQuantization = output.info()->quantization_info();
24 if ((outputQuantization.scale != (1.0f / 256.0f)) || (outputQuantization.offset != 0))
26 throw InvalidArgumentException(
27 "Invalid quantization for output. Only scale = 1.0f / 256.0f and offset = 0 supported");
30 m_SoftmaxLayer.configure(&input, &output, descriptor.m_Parameters.m_Beta);
33 void NeonSoftmaxUint8Workload::Execute() const
35 ARMNN_SCOPED_PROFILING_EVENT_NEON("NeonSoftmaxUint8Workload_Execute");