void FloorQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
{
- ValidateNumInputs(workloadInfo, "FloorQueueDescriptor", 1);
- ValidateNumOutputs(workloadInfo, "FlootQueueDescriptor", 1);
+ const std::string floorQueueDescString = "FloorQueueDescriptor";
+
+ ValidateNumInputs(workloadInfo, floorQueueDescString, 1);
+ ValidateNumOutputs(workloadInfo, floorQueueDescString, 1);
+
+ std::vector<DataType> supportedTypes =
+ {
+ DataType::Float32
+ };
+
+ ValidateDataTypes(workloadInfo.m_InputTensorInfos[0], supportedTypes, floorQueueDescString);
+ ValidateDataTypes(workloadInfo.m_OutputTensorInfos[0], supportedTypes, floorQueueDescString);
if (workloadInfo.m_InputTensorInfos[0] != workloadInfo.m_OutputTensorInfos[0])
{
- throw InvalidArgumentException("FloorQueueDescriptor: Input and output tensor infos do not match.");
+ throw InvalidArgumentException(floorQueueDescString + ": Input and output tensor infos do not match.");
}
}
Pooling2dTestImpl.hpp
QuantizeHelper.hpp
QuantizeTestImpl.hpp
- FloorTestImpl.hpp
RuntimeTestImpl.hpp
SoftmaxTestImpl.hpp
SplitterEndToEndTestImpl.hpp
+++ /dev/null
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-#pragma once
-
-#include "QuantizeHelper.hpp"
-#include "WorkloadTestUtils.hpp"
-
-#include <armnn/ArmNN.hpp>
-#include <armnn/Tensor.hpp>
-#include <armnn/TypesUtils.hpp>
-
-#include <backendsCommon/CpuTensorHandle.hpp>
-#include <backendsCommon/IBackendInternal.hpp>
-#include <backendsCommon/WorkloadFactory.hpp>
-
-#include <test/TensorHelpers.hpp>
-
-LayerTestResult<float, 4> SimpleFloorTest(
- armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
-{
- const armnn::TensorInfo inputTensorInfo({1, 3, 2, 3}, armnn::DataType::Float32);
- const armnn::TensorInfo outputTensorInfo(inputTensorInfo);
-
- auto input = MakeTensor<float, 4>(inputTensorInfo,
- { -37.5f, -15.2f, -8.76f, -2.0f, -1.5f, -1.3f, -0.5f, -0.4f, 0.0f,
- 1.0f, 0.4f, 0.5f, 1.3f, 1.5f, 2.0f, 8.76f, 15.2f, 37.5f });
-
- LayerTestResult<float, 4> ret(outputTensorInfo);
- ret.outputExpected = MakeTensor<float, 4>(outputTensorInfo,
- { -38.0f, -16.0f, -9.0f, -2.0f, -2.0f, -2.0f, -1.0f, -1.0f, 0.0f,
- 1.0f, 0.0f, 0.0f, 1.0f, 1.0f, 2.0f, 8.0f, 15.0f, 37.0f });
-
- std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
- std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
-
- armnn::FloorQueueDescriptor data;
- armnn::WorkloadInfo info;
- AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
- AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
-
- std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateFloor(data, info);
-
- inputHandle->Allocate();
- outputHandle->Allocate();
-
- CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
-
- workload->Execute();
-
- CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
-
- return ret;
-}
#include "BatchNormTestImpl.hpp"
#include "ActivationTestImpl.hpp"
#include "Pooling2dTestImpl.hpp"
-#include "FloorTestImpl.hpp"
#include "FullyConnectedTestImpl.hpp"
#include "GatherTestImpl.hpp"
#include "SpaceToBatchNdTestImpl.hpp"
return SimpleReshapeTestImpl<T>(
workloadFactory, memoryManager, inputTensorInfo, outputTensorInfo, input, outputExpected);
}
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 4> SimpleFloorTest(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+ const armnn::TensorInfo inputTensorInfo({1, 3, 2, 3}, ArmnnType);
+ const armnn::TensorInfo outputTensorInfo(inputTensorInfo);
+
+ auto input = MakeTensor<T, 4>(inputTensorInfo, ConvertToDataType<ArmnnType>(
+ { -37.5f, -15.2f, -8.76f, -2.0f, -1.5f, -1.3f, -0.5f, -0.4f, 0.0f,
+ 1.0f, 0.4f, 0.5f, 1.3f, 1.5f, 2.0f, 8.76f, 15.2f, 37.5f },
+ inputTensorInfo));
+
+ LayerTestResult<T, 4> ret(outputTensorInfo);
+ ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, ConvertToDataType<ArmnnType>(
+ { -38.0f, -16.0f, -9.0f, -2.0f, -2.0f, -2.0f, -1.0f, -1.0f, 0.0f,
+ 1.0f, 0.0f, 0.0f, 1.0f, 1.0f, 2.0f, 8.0f, 15.0f, 37.0f },
+ outputTensorInfo));
+
+ std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
+ std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
+
+ armnn::FloorQueueDescriptor data;
+ armnn::WorkloadInfo info;
+ AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
+ AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
+
+ std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateFloor(data, info);
+
+ inputHandle->Allocate();
+ outputHandle->Allocate();
+
+ CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
+
+ workload->Execute();
+
+ CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
+
+ return ret;
+}
\ No newline at end of file
ARMNN_AUTO_TEST_CASE(Concatenation4dDiffShapeDim3Uint8, Concatenation4dDiffShapeDim3Uint8Test, false)
// Floor
-ARMNN_AUTO_TEST_CASE(SimpleFloor, SimpleFloorTest)
+ARMNN_AUTO_TEST_CASE(SimpleFloor, SimpleFloorTest<armnn::DataType::Float32>)
// Reshape
ARMNN_AUTO_TEST_CASE(SimpleReshapeFloat32, SimpleReshapeTest<armnn::DataType::Float32>)
ARMNN_AUTO_TEST_CASE(L2Normalization4dNhwc, L2Normalization4dTest, armnn::DataLayout::NHWC)
// Floor
-ARMNN_AUTO_TEST_CASE(SimpleFloor, SimpleFloorTest)
+ARMNN_AUTO_TEST_CASE(SimpleFloor, SimpleFloorTest<armnn::DataType::Float32>)
// Greater
ARMNN_AUTO_TEST_CASE(SimpleGreater, GreaterSimpleTest)
Optional<std::string&> reasonIfUnsupported) const
{
ignore_unused(output);
- return IsSupportedForDataTypeRef(reasonIfUnsupported,
- input.GetDataType(),
- &TrueFunc<>,
- &FalseFuncU8<>);
+ bool supported = true;
+
+ std::array<DataType,1> supportedTypes =
+ {
+ DataType::Float32
+ };
+
+ supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
+ "Reference Floor: input type not supported.");
+
+ supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
+ "Reference Floor: output type not supported.");
+
+ return supported;
}
bool RefLayerSupport::IsFullyConnectedSupported(const TensorInfo& input,
std::unique_ptr<IWorkload> RefWorkloadFactory::CreateFloor(const FloorQueueDescriptor& descriptor,
const WorkloadInfo& info) const
{
- return MakeWorkload<RefFloorFloat32Workload, NullWorkload>(descriptor, info);
+ return std::make_unique<RefFloorWorkload>(descriptor, info);
}
std::unique_ptr<IWorkload> RefWorkloadFactory::CreateLstm(const LstmQueueDescriptor& descriptor,
workloads/RefDetectionPostProcessUint8Workload.cpp \
workloads/RefElementwiseWorkload.cpp \
workloads/RefFakeQuantizationFloat32Workload.cpp \
- workloads/RefFloorFloat32Workload.cpp \
+ workloads/RefFloorWorkload.cpp \
workloads/RefFullyConnectedWorkload.cpp \
workloads/RefGatherWorkload.cpp \
workloads/RefL2NormalizationFloat32Workload.cpp \
ARMNN_AUTO_TEST_CASE(Concatenation4dDiffShapeDim3Uint8, Concatenation4dDiffShapeDim3Uint8Test, true)
// Floor
-ARMNN_AUTO_TEST_CASE(SimpleFloor, SimpleFloorTest)
+ARMNN_AUTO_TEST_CASE(SimpleFloor, SimpleFloorTest<armnn::DataType::Float32>)
// Reshape
ARMNN_AUTO_TEST_CASE(SimpleReshapeFloat32, SimpleReshapeTest<armnn::DataType::Float32>)
RefDetectionPostProcessFloat32Workload.hpp
RefFakeQuantizationFloat32Workload.cpp
RefFakeQuantizationFloat32Workload.hpp
- RefFloorFloat32Workload.cpp
- RefFloorFloat32Workload.hpp
+ RefFloorWorkload.cpp
+ RefFloorWorkload.hpp
RefFullyConnectedWorkload.cpp
RefFullyConnectedWorkload.hpp
RefGatherWorkload.cpp
+++ /dev/null
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "RefFloorFloat32Workload.hpp"
-
-#include "RefWorkloadUtils.hpp"
-
-#include "Profiling.hpp"
-
-namespace armnn
-{
-
-void RefFloorFloat32Workload::Execute() const
-{
- ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefFloorFloat32Workload_Execute");
-
- const float* const input = GetInputTensorDataFloat(0, m_Data);
- float* const output = GetOutputTensorDataFloat(0, m_Data);
-
- unsigned int numElements = GetTensorInfo(m_Data.m_Inputs[0]).GetNumElements();
- for (unsigned int i = 0; i < numElements; ++i)
- {
- output[i] = floorf(input[i]);
- }
-}
-
-} //namespace armnn
--- /dev/null
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "RefFloorWorkload.hpp"
+
+#include "Decoders.hpp"
+#include "Encoders.hpp"
+#include "RefWorkloadUtils.hpp"
+#include "Profiling.hpp"
+
+namespace armnn
+{
+
+void RefFloorWorkload::Execute() const
+{
+ ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefFloorFloat32Workload_Execute");
+
+ const TensorInfo &inputTensorInfo = GetTensorInfo(m_Data.m_Inputs[0]);
+ std::unique_ptr<Decoder<float>> decoderPtr = MakeDecoder<float>(inputTensorInfo, m_Data.m_Inputs[0]->Map());
+ Decoder<float> &decoder = *decoderPtr;
+
+ const TensorInfo &outputTensorInfo = GetTensorInfo(m_Data.m_Outputs[0]);
+ std::unique_ptr<Encoder<float>> encoderPtr = MakeEncoder<float>(outputTensorInfo, m_Data.m_Outputs[0]->Map());
+ Encoder<float> &encoder = *encoderPtr;
+
+ unsigned int numElements = GetTensorInfo(m_Data.m_Inputs[0]).GetNumElements();
+
+ for (unsigned int i = 0; i < numElements; ++i)
+ {
+ encoder.Set(floorf(decoder.Get()));
+ ++decoder;
+ ++encoder;
+ }
+}
+
+} //namespace armnn
namespace armnn
{
-class RefFloorFloat32Workload : public Float32Workload<FloorQueueDescriptor>
+class RefFloorWorkload : public BaseWorkload<FloorQueueDescriptor>
{
public:
- using Float32Workload<FloorQueueDescriptor>::Float32Workload;
+ using BaseWorkload<FloorQueueDescriptor>::BaseWorkload;
virtual void Execute() const override;
};
#include "RefDepthwiseConvolution2dWorkload.hpp"
#include "FullyConnected.hpp"
#include "Gather.hpp"
-#include "RefFloorFloat32Workload.hpp"
+#include "RefFloorWorkload.hpp"
#include "RefSoftmaxWorkload.hpp"
#include "RefResizeBilinearFloat32Workload.hpp"
#include "RefBatchNormalizationUint8Workload.hpp"