2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
7 #include "QuantizeHelper.hpp"
9 #include <armnn/ArmNN.hpp>
10 #include <armnn/Tensor.hpp>
11 #include <armnn/TypesUtils.hpp>
13 #include <backendsCommon/CpuTensorHandle.hpp>
14 #include <backendsCommon/WorkloadFactory.hpp>
16 #include <test/TensorHelpers.hpp>
19 LayerTestResult<T, 4> SimpleReshapeTestImpl(
20 armnn::IWorkloadFactory& workloadFactory,
21 armnn::TensorInfo inputTensorInfo,
22 armnn::TensorInfo outputTensorInfo,
23 const std::vector<T>& inputData,
24 const std::vector<T>& outputExpectedData)
26 auto input = MakeTensor<T, 4>(inputTensorInfo, inputData);
28 LayerTestResult<T, 4> ret(outputTensorInfo);
29 ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputExpectedData);
31 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
32 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
34 armnn::ReshapeQueueDescriptor data;
35 armnn::WorkloadInfo info;
36 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
37 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
39 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateReshape(data, info);
41 inputHandle->Allocate();
42 outputHandle->Allocate();
44 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
48 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
53 LayerTestResult<float, 4> SimpleReshapeFloat32Test(armnn::IWorkloadFactory& workloadFactory)
55 armnn::TensorInfo inputTensorInfo;
56 armnn::TensorInfo outputTensorInfo;
58 unsigned int inputShape[] = { 2, 2, 3, 3 };
59 unsigned int outputShape[] = { 2, 2, 9, 1 };
61 inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::DataType::Float32);
62 outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::DataType::Float32);
64 std::vector<float> input = std::vector<float>(
83 std::vector<float> outputExpected = std::vector<float>(
85 0.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f,
87 9.0f, 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f,
89 18.0f, 19.0f, 20.0f, 21.0f, 22.0f, 23.0f, 24.0f, 25.0f, 26.0f,
91 27.0f, 28.0f, 29.0f, 30.0f, 31.0f, 32.0f, 33.0f, 34.0f, 35.0f,
94 return SimpleReshapeTestImpl<float>(workloadFactory, inputTensorInfo, outputTensorInfo, input, outputExpected);
97 LayerTestResult<float, 4> SimpleFloorTest(armnn::IWorkloadFactory& workloadFactory)
99 const armnn::TensorInfo inputTensorInfo({1, 3, 2, 3}, armnn::DataType::Float32);
100 const armnn::TensorInfo outputTensorInfo(inputTensorInfo);
102 auto input = MakeTensor<float, 4>(inputTensorInfo,
103 { -37.5f, -15.2f, -8.76f, -2.0f, -1.5f, -1.3f, -0.5f, -0.4f, 0.0f,
104 1.0f, 0.4f, 0.5f, 1.3f, 1.5f, 2.0f, 8.76f, 15.2f, 37.5f });
106 LayerTestResult<float, 4> ret(outputTensorInfo);
107 ret.outputExpected = MakeTensor<float, 4>(outputTensorInfo,
108 { -38.0f, -16.0f, -9.0f, -2.0f, -2.0f, -2.0f, -1.0f, -1.0f, 0.0f,
109 1.0f, 0.0f, 0.0f, 1.0f, 1.0f, 2.0f, 8.0f, 15.0f, 37.0f });
111 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
112 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
114 armnn::FloorQueueDescriptor data;
115 armnn::WorkloadInfo info;
116 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
117 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
119 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateFloor(data, info);
121 inputHandle->Allocate();
122 outputHandle->Allocate();
124 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
128 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
133 LayerTestResult<uint8_t, 4> SimpleReshapeUint8Test(armnn::IWorkloadFactory& workloadFactory)
135 armnn::TensorInfo inputTensorInfo;
136 armnn::TensorInfo outputTensorInfo;
138 unsigned int inputShape[] = { 2, 2, 3, 3 };
139 unsigned int outputShape[] = { 2, 2, 9, 1 };
141 inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::DataType::QuantisedAsymm8);
142 inputTensorInfo.SetQuantizationScale(1.0f);
143 outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::DataType::QuantisedAsymm8);
144 outputTensorInfo.SetQuantizationScale(1.0f);
146 std::vector<uint8_t> input = std::vector<uint8_t>(
165 std::vector<uint8_t> outputExpected = std::vector<uint8_t>(
167 0, 1, 2, 3, 4, 5, 6, 7, 8,
169 9, 10, 11, 12, 13, 14, 15, 16, 17,
171 18, 19, 20, 21, 22, 23, 24, 25, 26,
173 27, 28, 29, 30, 31, 32, 33, 34, 35,
176 return SimpleReshapeTestImpl<uint8_t>(workloadFactory, inputTensorInfo, outputTensorInfo, input, outputExpected);