33 if (armnn::IsQuantizedType<T>())
36 inputTensorInfo.SetQuantizationOffset(128);
37 alphaTensorInfo.SetQuantizationScale(0.25f);
38 alphaTensorInfo.SetQuantizationOffset(50);
39 outputTensorInfo.SetQuantizationScale(0.5f);
40 outputTensorInfo.SetQuantizationOffset(120);
43 std::vector<float> inputData
47 0.0f, 0.0f, 0.0f, 1.0f, 1.0f, 1.0f, -1.0f, -1.0f, -1.0f, -2.0f, -2.0f, -2.0f
49 std::vector<float> alphaData
55 std::vector<float> outputExpectedData =
59 0.0f, 0.0f, 0.0f, 1.0f, 1.0f, 1.0f, 0.0f, -1.0f, -2.0f, 0.0f, -2.0f, -4.0f
62 auto input = MakeTensor<T, 4>(inputTensorInfo,
63 armnnUtils::QuantizedVector<T>(inputData,
64 inputTensorInfo.GetQuantizationScale(),
65 inputTensorInfo.GetQuantizationOffset()));
67 auto alpha = MakeTensor<T, 4>(alphaTensorInfo,
68 armnnUtils::QuantizedVector<T>(alphaData,
69 alphaTensorInfo.GetQuantizationScale(),
70 alphaTensorInfo.GetQuantizationOffset()));
73 result.outputExpected =
74 MakeTensor<T, 4>(outputTensorInfo,
75 armnnUtils::QuantizedVector<T>(outputExpectedData,
76 outputTensorInfo.GetQuantizationScale(),
77 outputTensorInfo.GetQuantizationOffset()));
79 std::unique_ptr <armnn::ITensorHandle> inputHandle = workloadFactory.
CreateTensorHandle(inputTensorInfo);
80 std::unique_ptr <armnn::ITensorHandle> alphaHandle = workloadFactory.
CreateTensorHandle(alphaTensorInfo);
81 std::unique_ptr <armnn::ITensorHandle> outputHandle = workloadFactory.
CreateTensorHandle(outputTensorInfo);
85 AddInputToWorkload (descriptor, info, inputTensorInfo, inputHandle.get());
86 AddInputToWorkload (descriptor, info, alphaTensorInfo, alphaHandle.get());
87 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
89 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.
CreatePrelu(descriptor, info);
91 inputHandle->Allocate();
92 alphaHandle->Allocate();
93 outputHandle->Allocate();
void IgnoreUnused(Ts &&...)
void SetQuantizationScale(float scale)
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo, const bool IsMemoryManaged=true) const =0
Contains information about inputs and outputs to a layer.
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
virtual std::unique_ptr< IWorkload > CreatePrelu(const PreluQueueDescriptor &descriptor, const WorkloadInfo &info) const