ARMNN_ASSERT(srcFloat32Buffer != nullptr);
ARMNN_ASSERT(dstFloat16Buffer != nullptr);
- armnn::Half* pHalf = reinterpret_cast<armnn::Half*>(dstFloat16Buffer);
+ armnn::Half* pHalf = static_cast<armnn::Half*>(dstFloat16Buffer);
for (size_t i = 0; i < numElements; i++)
{
ARMNN_ASSERT(srcFloat16Buffer != nullptr);
ARMNN_ASSERT(dstFloat32Buffer != nullptr);
- const armnn::Half* pHalf = reinterpret_cast<const armnn::Half*>(srcFloat16Buffer);
+ const armnn::Half* pHalf = static_cast<const armnn::Half*>(srcFloat16Buffer);
for (size_t i = 0; i < numElements; i++)
{
ARMNN_ASSERT(srcFloat32Buffer != nullptr);
ARMNN_ASSERT(dstBFloat16Buffer != nullptr);
- armnn::BFloat16* bf16 = reinterpret_cast<armnn::BFloat16*>(dstBFloat16Buffer);
+ armnn::BFloat16* bf16 = static_cast<armnn::BFloat16*>(dstBFloat16Buffer);
for (size_t i = 0; i < numElements; i++)
{
ARMNN_ASSERT(srcBFloat16Buffer != nullptr);
ARMNN_ASSERT(dstFloat32Buffer != nullptr);
- const armnn::BFloat16* bf16 = reinterpret_cast<const armnn::BFloat16*>(srcBFloat16Buffer);
+ const armnn::BFloat16* bf16 = static_cast<const armnn::BFloat16*>(srcBFloat16Buffer);
for (size_t i = 0; i < numElements; i++)
{
// Fill layer workload data.
struct FillQueueDescriptor : QueueDescriptorWithParameters<FillDescriptor>
{
- FillQueueDescriptor()
- : m_Value(nullptr)
- {
- }
-
- const ConstCpuTensorHandle* m_Value;
-
void Validate(const WorkloadInfo& workloadInfo) const;
};
test/layerTests/ElementwiseUnaryTestImpl.cpp \
test/layerTests/ExpTestImpl.cpp \
test/layerTests/FakeQuantizationTestImpl.cpp \
+ test/layerTests/FillTestImpl.cpp \
test/layerTests/FloorTestImpl.cpp \
test/layerTests/FullyConnectedTestImpl.cpp \
test/layerTests/GatherTestImpl.cpp \
layerTests/ExpTestImpl.hpp
layerTests/FakeQuantizationTestImpl.cpp
layerTests/FakeQuantizationTestImpl.hpp
+ layerTests/FillTestImpl.cpp
+ layerTests/FillTestImpl.hpp
layerTests/FloorTestImpl.cpp
layerTests/FloorTestImpl.hpp
layerTests/FullyConnectedTestImpl.cpp
#include <backendsCommon/test/layerTests/ElementwiseUnaryTestImpl.hpp>
#include <backendsCommon/test/layerTests/ExpTestImpl.hpp>
#include <backendsCommon/test/layerTests/FakeQuantizationTestImpl.hpp>
+#include <backendsCommon/test/layerTests/FillTestImpl.hpp>
#include <backendsCommon/test/layerTests/FloorTestImpl.hpp>
#include <backendsCommon/test/layerTests/FullyConnectedTestImpl.hpp>
#include <backendsCommon/test/layerTests/GatherTestImpl.hpp>
--- /dev/null
+//
+// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "FillTestImpl.hpp"
+
+#include <backendsCommon/test/DataTypeUtils.hpp>
+#include <backendsCommon/test/TensorCopyUtils.hpp>
+#include <backendsCommon/test/WorkloadTestUtils.hpp>
+
+#include <test/TensorHelpers.hpp>
+
+template<armnn::DataType ArmnnType, typename T>
+LayerTestResult<T, 4> SimpleFillTest(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+ IgnoreUnused(memoryManager);
+ armnn::TensorInfo inputTensorInfo({4}, ArmnnType);
+ inputTensorInfo.SetQuantizationScale(0.0f);
+
+ armnn::TensorInfo outputTensorInfo({2, 2, 3, 2}, ArmnnType);
+ outputTensorInfo.SetQuantizationScale(0.0f);
+
+ auto input = MakeTensor<T, 1>(inputTensorInfo, ConvertToDataType<ArmnnType>(
+ {2, 2, 3, 2},
+ inputTensorInfo));
+
+ LayerTestResult<T, 4> ret(outputTensorInfo);
+ ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, ConvertToDataType<ArmnnType>(
+ { 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f,
+ 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f },
+ outputTensorInfo));
+
+ std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
+ std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
+
+ armnn::FillQueueDescriptor data;
+ data.m_Parameters.m_Value = 1.0f;
+ armnn::WorkloadInfo info;
+ AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
+ AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
+
+ std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateFill(data, info);
+
+ inputHandle->Allocate();
+ outputHandle->Allocate();
+
+ CopyDataToITensorHandle(inputHandle.get(), &input[0]);
+
+ workload->Execute();
+
+ CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
+
+ return ret;
+}
+
+//
+// Explicit template specializations
+//
+template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
+SimpleFillTest<armnn::DataType::Float32>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::Float16>, 4>
+SimpleFillTest<armnn::DataType::Float16>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
\ No newline at end of file
--- /dev/null
+//
+// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "LayerTestResult.hpp"
+
+#include <ResolveType.hpp>
+
+#include <armnn/backends/IBackendInternal.hpp>
+#include <backendsCommon/WorkloadFactory.hpp>
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 4> SimpleFillTest(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
return supported;
}
+bool RefLayerSupport::IsFillSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const FillDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported) const
+{
+ IgnoreUnused(descriptor);
+ IgnoreUnused(output);
+
+ bool supported = true;
+
+ std::array<DataType,2> supportedTypes =
+ {
+ DataType::Float32,
+ DataType::Float16
+ };
+
+ supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
+ "Reference Fill: input type not supported.");
+
+ return supported;
+}
+
bool RefLayerSupport::IsFloorSupported(const TensorInfo& input,
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported) const
const FakeQuantizationDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+ bool IsFillSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const FillDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
bool IsFloorSupported(const TensorInfo& input,
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
#include <backendsCommon/MemCopyWorkload.hpp>
#include <backendsCommon/MemImportWorkload.hpp>
#include <backendsCommon/MakeWorkloadHelper.hpp>
+#include <reference/workloads/RefFillWorkload.hpp>
#include "RefWorkloadFactory.hpp"
#include "RefBackendId.hpp"
#include "workloads/RefWorkloads.hpp"
return CreateComparison(comparisonDescriptor, info);
}
-std::unique_ptr<IWorkload> RefWorkloadFactory::CreateFakeQuantization(
- const FakeQuantizationQueueDescriptor& descriptor,
- const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> RefWorkloadFactory::CreateFakeQuantization(const FakeQuantizationQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const
{
return MakeWorkload<RefFakeQuantizationFloat32Workload, NullWorkload>(descriptor, info);
}
+std::unique_ptr<IWorkload> RefWorkloadFactory::CreateFill(const FillQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const
+{
+ return std::make_unique<RefFillWorkload>(descriptor, info);
+}
+
std::unique_ptr<IWorkload> RefWorkloadFactory::CreateFloor(const FloorQueueDescriptor& descriptor,
const WorkloadInfo& info) const
{
std::unique_ptr<IWorkload> CreateFakeQuantization(const FakeQuantizationQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ std::unique_ptr<IWorkload> CreateFill(const FillQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const override;
+
std::unique_ptr<IWorkload> CreateFloor(const FloorQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
workloads/DetectionPostProcess.cpp \
workloads/Dequantize.cpp \
workloads/ElementwiseFunction.cpp \
+ workloads/Fill.cpp \
workloads/FullyConnected.cpp \
workloads/Gather.cpp \
workloads/InstanceNorm.cpp \
workloads/RefElementwiseWorkload.cpp \
workloads/RefElementwiseUnaryWorkload.cpp \
workloads/RefFakeQuantizationFloat32Workload.cpp \
+ workloads/RefFillWorkload.cpp \
workloads/RefFloorWorkload.cpp \
workloads/RefFullyConnectedWorkload.cpp \
workloads/RefGatherWorkload.cpp \
ARMNN_AUTO_TEST_CASE(Concat4dDiffShapeDim2Uint8, Concat4dDiffShapeDim2Uint8Test)
ARMNN_AUTO_TEST_CASE(Concat4dDiffShapeDim3Uint8, Concat4dDiffShapeDim3Uint8Test, true)
+// Fill
+ARMNN_AUTO_TEST_CASE(SimpleFill, SimpleFillTest<DataType::Float32>)
+ARMNN_AUTO_TEST_CASE(SimpleFillF16, SimpleFillTest<DataType::Float16>)
+
// Floor
ARMNN_AUTO_TEST_CASE(SimpleFloor, SimpleFloorTest<DataType::Float32>)
ARMNN_AUTO_TEST_CASE(SimpleFloorFloat16, SimpleFloorTest<DataType::Float16>)
ElementwiseFunction.hpp
Encoders.hpp
Exp.hpp
+ Fill.cpp
+ Fill.hpp
FullyConnected.cpp
FullyConnected.hpp
Gather.cpp
RefElementwiseUnaryWorkload.hpp
RefFakeQuantizationFloat32Workload.cpp
RefFakeQuantizationFloat32Workload.hpp
+ RefFillWorkload.cpp
+ RefFillWorkload.hpp
RefFloorWorkload.cpp
RefFloorWorkload.hpp
RefFullyConnectedWorkload.cpp
--- /dev/null
+//
+// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "Fill.hpp"
+
+#include "RefWorkloadUtils.hpp"
+
+namespace armnn
+{
+
+void Fill(Encoder<float>& output,
+ const TensorShape& desiredOutputShape,
+ const float value)
+{
+ for(unsigned int i = 0; i < desiredOutputShape.GetNumElements(); ++i)
+ {
+ output[i];
+ output.Set(value);
+ }
+}
+
+} //namespace armnn
--- /dev/null
+//
+// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "BaseIterator.hpp"
+#include "Decoders.hpp"
+#include "Encoders.hpp"
+#include <armnn/Tensor.hpp>
+#include <backendsCommon/WorkloadData.hpp>
+
+namespace armnn
+{
+
+/// Creates a tensor and fills it with a scalar value.
+void Fill(Encoder<float>& output,
+ const TensorShape& desiredOutputShape,
+ const float value);
+
+} //namespace armnn
--- /dev/null
+//
+// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "RefFillWorkload.hpp"
+#include "Fill.hpp"
+
+#include "Decoders.hpp"
+#include "Encoders.hpp"
+#include "RefWorkloadUtils.hpp"
+#include "Profiling.hpp"
+
+namespace armnn
+{
+
+void RefFillWorkload::Execute() const
+{
+ ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefFillWorkload_Execute");
+
+ const TensorInfo &outputTensorInfo = GetTensorInfo(m_Data.m_Outputs[0]);
+
+ std::unique_ptr<Encoder<float>> encoderPtr = MakeEncoder<float>(outputTensorInfo, m_Data.m_Outputs[0]->Map());
+ Encoder<float> &encoder = *encoderPtr;
+
+ Fill(encoder, outputTensorInfo.GetShape(), m_Data.m_Parameters.m_Value);
+}
+
+} //namespace armnn
--- /dev/null
+//
+// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <backendsCommon/Workload.hpp>
+#include <backendsCommon/WorkloadData.hpp>
+
+namespace armnn
+{
+
+class RefFillWorkload : public BaseWorkload<FillQueueDescriptor>
+{
+public:
+ using BaseWorkload<FillQueueDescriptor>::BaseWorkload;
+ virtual void Execute() const override;
+};
+
+} //namespace armnn
#include "RefDequantizeWorkload.hpp"
#include "RefElementwiseWorkload.hpp"
#include "RefElementwiseUnaryWorkload.hpp"
+#include "RefFillWorkload.hpp"
#include "RefFullyConnectedWorkload.hpp"
#include "RefFloorWorkload.hpp"
#include "RefFakeQuantizationFloat32Workload.hpp"