src/armnn/layers/QLstmLayer.cpp \
src/armnn/layers/QuantizeLayer.cpp \
src/armnn/layers/QuantizedLstmLayer.cpp \
+ src/armnn/layers/RankLayer.cpp \
src/armnn/layers/ReshapeLayer.cpp \
src/armnn/layers/ResizeLayer.cpp \
src/armnn/layers/SliceLayer.cpp \
#
-# Copyright © 2020 Arm Ltd. All rights reserved.
+# Copyright © 2020 Arm Ltd. and Contributors All rights reserved.
# Copyright 2020 NXP
# SPDX-License-Identifier: MIT
#
src/armnn/layers/PreCompiledLayer.cpp
src/armnn/layers/PreluLayer.hpp
src/armnn/layers/PreluLayer.cpp
+ src/armnn/layers/RankLayer.hpp
+ src/armnn/layers/RankLayer.cpp
src/armnn/layers/ReshapeLayer.hpp
src/armnn/layers/ReshapeLayer.cpp
src/armnn/layers/ResizeLayer.hpp
src/armnnDeserializer/test/DeserializePad.cpp
src/armnnDeserializer/test/DeserializePermute.cpp
src/armnnDeserializer/test/DeserializePooling2d.cpp
+ src/armnnDeserializer/test/DeserializeRank.cpp
src/armnnDeserializer/test/DeserializeReshape.cpp
src/armnnDeserializer/test/DeserializeResizeBilinear.cpp
src/armnnDeserializer/test/DeserializeRsqrt.cpp
const QuantizedLstmInputParamsInfo& paramsInfo,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
+ virtual bool IsRankSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
+
virtual bool IsReshapeSupported(const TensorInfo& input,
const TensorInfo& output,
const ReshapeDescriptor& descriptor,
const QuantizedLstmInputParams& params,
const char* name = nullptr) = 0;
+ /// Function a rank layer should call back to when its Accept(ILayerVisitor&) function is invoked.
+ /// @param layer - pointer to the layer which is calling back to this visit function.
+ /// @param name - Optional name for the layer.
+ virtual void VisitRankLayer(const IConnectableLayer* layer,
+ const char* name = nullptr) = 0;
+
/// Function a reshape layer should call back to when its Accept(ILayerVisitor&) function is invoked.
/// @param layer - pointer to the layer which is calling back to this visit function.
/// @param reshapeDescriptor - Parameters for the reshape operation.
const ConstTensor& gamma,
const char* name = nullptr) = 0;
+ /// Adds a rank layer to the network.
+ /// @param name - Optional name for the layer.
+ /// @return - Interface for configuring the layer.
+ virtual IConnectableLayer* AddRankLayer(const char* name = nullptr) = 0;
+
/// Adds a resize bilinear layer to the network.
/// @param resizeDesc - Parameters for the resize operation.
/// @param name - Optional name for the layer.
const QuantizedLstmInputParams&,
const char*) override { DefaultPolicy::Apply(__func__); }
+ void VisitRankLayer(const IConnectableLayer*,
+ const char*) override { DefaultPolicy::Apply(__func__); }
+
void VisitReshapeLayer(const IConnectableLayer*,
const ReshapeDescriptor&,
const char*) override { DefaultPolicy::Apply(__func__); }
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
X(Quantize) \
X(QuantizedLstm) \
X(Reshape) \
+ X(Rank) \
X(Resize) \
X(Slice) \
X(Softmax) \
#include "layers/QuantizeLayer.hpp"
#include "layers/QLstmLayer.hpp"
#include "layers/QuantizedLstmLayer.hpp"
+#include "layers/RankLayer.hpp"
#include "layers/ReshapeLayer.hpp"
#include "layers/ResizeLayer.hpp"
#include "layers/SliceLayer.hpp"
DECLARE_LAYER(Quantize)
DECLARE_LAYER(QLstm)
DECLARE_LAYER(QuantizedLstm)
+DECLARE_LAYER(Rank)
DECLARE_LAYER(Reshape)
DECLARE_LAYER(Resize)
DECLARE_LAYER(Slice)
return layer;
}
+IConnectableLayer* Network::AddRankLayer(const char* name)
+{
+ return m_Graph->AddLayer<RankLayer>(name);
+}
+
IConnectableLayer* Network::AddResizeBilinearLayer(const ResizeBilinearDescriptor& descriptor,
const char* name)
{
const ConstTensor& gamma,
const char* name = nullptr) override;
+ IConnectableLayer* AddRankLayer(const char* name = nullptr) override;
+
ARMNN_DEPRECATED_MSG("Use AddResizeLayer instead")
IConnectableLayer* AddResizeBilinearLayer(const ResizeBilinearDescriptor& resizeDesc,
const char* name = nullptr) override;
--- /dev/null
+//
+// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "RankLayer.hpp"
+
+#include "LayerCloneBase.hpp"
+
+#include <backendsCommon/WorkloadData.hpp>
+#include <backendsCommon/WorkloadFactory.hpp>
+
+namespace armnn
+{
+
+RankLayer::RankLayer(const char* name)
+ : Layer(1, 1, LayerType::Rank, name)
+{}
+
+std::unique_ptr<IWorkload> RankLayer::CreateWorkload(const IWorkloadFactory& factory) const
+{
+ RankQueueDescriptor descriptor;
+ return factory.CreateRank(descriptor, PrepInfoAndDesc(descriptor));
+}
+
+Layer* RankLayer::Clone(Graph& graph) const
+{
+ RankLayer* clone = CloneBase<RankLayer>(graph, GetName());
+ return clone;
+}
+
+void RankLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+{
+ IgnoreUnused(shapeInferenceMethod);
+ VerifyLayerConnections(1, CHECK_LOCATION());
+
+ ConditionalThrowIfNotEqual<LayerValidationException>(
+ "Rank: TensorShape set on OutputSlot[0] does not match the inferred shape.",
+ GetOutputSlot(0).GetTensorInfo().GetShape(), {TensorShape{Dimensionality::Scalar}});
+}
+
+void RankLayer::Accept(ILayerVisitor& visitor) const
+{
+ visitor.VisitRankLayer(this, GetName());
+}
+
+} //namespace armnn
\ No newline at end of file
--- /dev/null
+//
+// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <Layer.hpp>
+
+namespace armnn
+{
+
+class RankLayer : public Layer
+{
+ public:
+ /// Makes a workload for the Rank type.
+ /// @param [in] factory The workload factory which will create the workload.
+ /// @return A pointer to the created workload, or nullptr if not created.
+ virtual std::unique_ptr<IWorkload> CreateWorkload(const IWorkloadFactory& factory) const override;
+
+ Layer* Clone(Graph& graph) const override;
+
+ void ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod) override;
+
+ void Accept(ILayerVisitor& visitor) const override;
+
+ protected:
+ RankLayer(const char* name);
+ ~RankLayer() = default;
+};
+
+} //namespace armnn
+
+
TEST_SUITE_NAME_ONLY_LAYER_VISITOR(Multiplication)
TEST_SUITE_NAME_ONLY_LAYER_VISITOR(Prelu)
TEST_SUITE_NAME_ONLY_LAYER_VISITOR(Quantize)
+TEST_SUITE_NAME_ONLY_LAYER_VISITOR(Rank)
TEST_SUITE_NAME_ONLY_LAYER_VISITOR(Subtraction)
TEST_SUITE_NAME_ONLY_LAYER_VISITOR(Switch)
DECLARE_TEST_NAME_ONLY_LAYER_VISITOR_CLASS(Multiplication)
DECLARE_TEST_NAME_ONLY_LAYER_VISITOR_CLASS(Prelu)
DECLARE_TEST_NAME_ONLY_LAYER_VISITOR_CLASS(Quantize)
+DECLARE_TEST_NAME_ONLY_LAYER_VISITOR_CLASS(Rank)
DECLARE_TEST_NAME_ONLY_LAYER_VISITOR_CLASS(Subtraction)
DECLARE_TEST_NAME_ONLY_LAYER_VISITOR_CLASS(Switch)
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
m_ParserFunctions[Layer_QLstmLayer] = &Deserializer::ParseQLstm;
m_ParserFunctions[Layer_QuantizeLayer] = &Deserializer::ParseQuantize;
m_ParserFunctions[Layer_QuantizedLstmLayer] = &Deserializer::ParseQuantizedLstm;
+ m_ParserFunctions[Layer_RankLayer] = &Deserializer::ParseRank;
m_ParserFunctions[Layer_ReshapeLayer] = &Deserializer::ParseReshape;
m_ParserFunctions[Layer_ResizeBilinearLayer] = &Deserializer::ParseResizeBilinear;
m_ParserFunctions[Layer_ResizeLayer] = &Deserializer::ParseResize;
return graphPtr->layers()->Get(layerIndex)->layer_as_QuantizeLayer()->base();
case Layer::Layer_QuantizedLstmLayer:
return graphPtr->layers()->Get(layerIndex)->layer_as_QuantizedLstmLayer()->base();
+ case Layer::Layer_RankLayer:
+ return graphPtr->layers()->Get(layerIndex)->layer_as_RankLayer()->base();
case Layer::Layer_ReshapeLayer:
return graphPtr->layers()->Get(layerIndex)->layer_as_ReshapeLayer()->base();
case Layer::Layer_ResizeBilinearLayer:
}
}
+ if (tensorPtr->dimensionality() == static_cast<unsigned int>(Dimensionality::Scalar))
+ {
+ float quantizationScale = tensorPtr->quantizationScale();
+ int32_t quantizationOffset = tensorPtr->quantizationOffset();
+
+ return armnn::TensorInfo(armnn::TensorShape{armnn::Dimensionality::Scalar},
+ type,
+ quantizationScale,
+ quantizationOffset);
+ }
auto dimensions = tensorPtr->dimensions();
unsigned int size = dimensions->size();
return reshapeInfo;
}
+void Deserializer::ParseRank(GraphPtr graph, unsigned int layerIndex)
+{
+ CHECK_LAYERS(graph, 0, layerIndex);
+
+ Deserializer::TensorRawPtrVector inputs = GetInputs(graph, layerIndex);
+ CHECK_VALID_SIZE(inputs.size(), 1);
+
+ Deserializer::TensorRawPtrVector outputs = GetOutputs(graph, layerIndex);
+ CHECK_VALID_SIZE(outputs.size(), 1);
+
+ auto layerName = GetLayerName(graph, layerIndex);
+ IConnectableLayer* layer = m_Network->AddRankLayer( layerName.c_str());
+
+ armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
+ layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
+
+ RegisterInputSlots(graph, layerIndex, layer);
+ RegisterOutputSlots(graph, layerIndex, layer);
+}
+
void Deserializer::ParseReshape(GraphPtr graph, unsigned int layerIndex)
{
CHECK_LAYERS(graph, 0, layerIndex);
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
void ParsePrelu(GraphPtr graph, unsigned int layerIndex);
void ParseQLstm(GraphPtr graph, unsigned int layerIndex);
void ParseQuantize(GraphPtr graph, unsigned int layerIndex);
+ void ParseRank(GraphPtr graph, unsigned int layerIndex);
void ParseReshape(GraphPtr graph, unsigned int layerIndex);
void ParseResize(GraphPtr graph, unsigned int layerIndex);
void ParseResizeBilinear(GraphPtr graph, unsigned int layerIndex);
* Quantize
* QLstm
* QuantizedLstm
+* Rank
* Reshape
* Resize
* ResizeBilinear
--- /dev/null
+//
+// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include <boost/test/unit_test.hpp>
+#include "ParserFlatbuffersSerializeFixture.hpp"
+#include "../Deserializer.hpp"
+
+#include <string>
+
+BOOST_AUTO_TEST_SUITE(Deserializer)
+
+struct RankFixture : public ParserFlatbuffersSerializeFixture
+{
+ explicit RankFixture(const std::string &inputShape,
+ const std::string &dataType)
+ {
+ m_JsonString = R"(
+ {
+ inputIds: [0],
+ outputIds: [2],
+ layers: [
+ {
+ layer_type: "InputLayer",
+ layer: {
+ base: {
+ base: {
+ layerName: "",
+ layerType: "Input",
+ inputSlots: [
+
+ ],
+ outputSlots: [
+ {
+ tensorInfo: {
+ dimensions: )" + inputShape + R"(,
+ dataType: )" + dataType + R"(,
+ quantizationScale: 0.0
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ layer_type: "RankLayer",
+ layer: {
+ base: {
+ index: 1,
+ layerName: "rank",
+ layerType: "Rank",
+ inputSlots: [
+ {
+ connection: {
+ sourceLayerIndex: 0,
+ outputSlotIndex: 0
+ }
+ }
+ ],
+ outputSlots: [
+ {
+ tensorInfo: {
+ dimensions: [ 1 ],
+ dataType: "Signed32",
+ quantizationScale: 0.0,
+ dimensionality: 2
+ }
+ }
+ ]
+ }
+ }
+ },
+ {
+ layer_type: "OutputLayer",
+ layer: {
+ base: {
+ base: {
+ index: 2,
+ layerName: "",
+ layerType: "Output",
+ inputSlots: [
+ {
+ connection: {
+ sourceLayerIndex: 1,
+ outputSlotIndex: 0
+ }
+ }
+ ],
+ outputSlots: []
+ }
+ }
+ }
+ }
+ ],
+ }
+ )";
+ Setup();
+ }
+};
+
+struct SimpleRankDimSize1Fixture : RankFixture
+{
+ SimpleRankDimSize1Fixture() : RankFixture("[ 8 ]", "QSymmS16") {}
+};
+
+struct SimpleRankDimSize2Fixture : RankFixture
+{
+ SimpleRankDimSize2Fixture() : RankFixture("[ 3, 3 ]", "QSymmS8") {}
+};
+
+struct SimpleRankDimSize3Fixture : RankFixture
+{
+ SimpleRankDimSize3Fixture() : RankFixture("[ 2, 2, 1 ]", "Signed32") {}
+};
+
+struct SimpleRankDimSize4Fixture : RankFixture
+{
+ SimpleRankDimSize4Fixture() : RankFixture("[ 2, 2, 1, 1 ]", "Float32") {}
+};
+
+BOOST_FIXTURE_TEST_CASE(RankDimSize1Float16, SimpleRankDimSize1Fixture)
+{
+ RunTest<1, armnn::DataType::QSymmS16, armnn::DataType::Signed32>( 0,
+ { 1, 2, 3, 4, 5, 6, 7, 8 },
+ { 1 });
+}
+
+BOOST_FIXTURE_TEST_CASE(RankDimSize2QAsymmU8, SimpleRankDimSize2Fixture)
+{
+ RunTest<1, armnn::DataType::QSymmS8, armnn::DataType::Signed32>( 0,
+ { 1, 2, 3, 4, 5, 6, 7, 8, 9 },
+ { 2 });
+}
+
+BOOST_FIXTURE_TEST_CASE(RankDimSize3Signed32, SimpleRankDimSize3Fixture)
+{
+ RunTest<1, armnn::DataType::Signed32, armnn::DataType::Signed32>( 0,
+ { 111, 85, 226, 3 },
+ { 3 });
+}
+
+BOOST_FIXTURE_TEST_CASE(RankDimSize4Float32, SimpleRankDimSize4Fixture)
+{
+ RunTest<1, armnn::DataType::Float32, armnn::DataType::Signed32>( 0,
+ { 111, 85, 226, 3 },
+ { 4 });
+}
+
+BOOST_AUTO_TEST_SUITE_END()
\ No newline at end of file
quantizationOffset:int = 0;
quantizationScales:[float];
quantizationDim:uint;
+ dimensionality:uint = 1;
}
struct Connection {
ElementwiseUnary = 54,
Transpose = 55,
QLstm = 56,
- Fill = 57
+ Fill = 57,
+ Rank = 58
}
// Base layer table to be used as part of other layers
descriptor:StandInDescriptor;
}
+table RankLayer {
+ base:LayerBase;
+}
+
union Layer {
ActivationLayer,
AdditionLayer,
ElementwiseUnaryLayer,
TransposeLayer,
QLstmLayer,
- FillLayer
+ FillLayer,
+ RankLayer
}
table AnyLayer {
// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
-
#include "Serializer.hpp"
#include <armnn/Descriptors.hpp>
CreateAnyLayer(flatBufferPermuteLayer.o, serializer::Layer::Layer_PermuteLayer);
}
+// Build FlatBuffer for Rank Layer
+void SerializerVisitor::VisitRankLayer(const armnn::IConnectableLayer* layer,
+ const char* name)
+{
+ IgnoreUnused(name);
+ auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Rank);
+ auto flatBufferRankLayer = serializer::CreateRankLayer(m_flatBufferBuilder, flatBufferBaseLayer);
+
+ CreateAnyLayer(flatBufferRankLayer.o, serializer::Layer::Layer_RankLayer);
+}
// Build FlatBuffer for Reshape Layer
void SerializerVisitor::VisitReshapeLayer(const armnn::IConnectableLayer* layer,
const armnn::ReshapeDescriptor& reshapeDescriptor,
tensorInfo.GetQuantizationScales()[0],
tensorInfo.GetQuantizationOffset(),
m_flatBufferBuilder.CreateVector(tensorInfo.GetQuantizationScales()),
- tensorInfo.GetQuantizationDim().value());
+ tensorInfo.GetQuantizationDim().value(),
+ static_cast<unsigned int>
+ (tensorInfo.GetShape().GetDimensionality()));
return flatBufferTensorInfo;
}
m_flatBufferBuilder.CreateVector(shape),
GetFlatBufferDataType(tensorInfo.GetDataType()),
tensorInfo.GetQuantizationScale(),
- tensorInfo.GetQuantizationOffset());
+ tensorInfo.GetQuantizationOffset(),
+ 0,
+ 0,
+ static_cast<unsigned int>
+ (tensorInfo.GetShape().GetDimensionality()));
return flatBufferTensorInfo;
}
return !stream.bad();
}
-} // namespace armnnSerializer
\ No newline at end of file
+} // namespace armnnSerializer
const armnn::QuantizedLstmInputParams& params,
const char* name = nullptr) override;
+ void VisitRankLayer(const armnn::IConnectableLayer* layer,
+ const char* name = nullptr) override;
+
void VisitReshapeLayer(const armnn::IConnectableLayer* layer,
const armnn::ReshapeDescriptor& reshapeDescriptor,
const char* name = nullptr) override;
* QLstm
* Quantize
* QuantizedLstm
+* Rank
* Reshape
* Resize
* Slice
deserializedNetwork->Accept(verifier);
}
+BOOST_AUTO_TEST_CASE(SerializeRank)
+{
+ DECLARE_LAYER_VERIFIER_CLASS(Rank)
+
+ const std::string layerName("rank");
+ const armnn::TensorInfo inputInfo({1, 9}, armnn::DataType::Float32);
+ const armnn::TensorInfo outputInfo({1}, armnn::DataType::Signed32);
+
+ armnn::INetworkPtr network = armnn::INetwork::Create();
+ armnn::IConnectableLayer* const inputLayer = network->AddInputLayer(0);
+ armnn::IConnectableLayer* const rankLayer = network->AddRankLayer(layerName.c_str());
+ armnn::IConnectableLayer* const outputLayer = network->AddOutputLayer(0);
+
+ inputLayer->GetOutputSlot(0).Connect(rankLayer->GetInputSlot(0));
+ rankLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0));
+
+ inputLayer->GetOutputSlot(0).SetTensorInfo(inputInfo);
+ rankLayer->GetOutputSlot(0).SetTensorInfo(outputInfo);
+
+ armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network));
+ BOOST_CHECK(deserializedNetwork);
+
+ RankLayerVerifier verifier(layerName, {inputInfo}, {outputInfo});
+ deserializedNetwork->Accept(verifier);
+}
+
BOOST_AUTO_TEST_CASE(SerializeReshape)
{
DECLARE_LAYER_VERIFIER_CLASS_WITH_DESCRIPTOR(Reshape)
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
+bool LayerSupportBase::IsRankSupported(const TensorInfo&, // input
+ const TensorInfo&, // output
+ Optional<std::string&> reasonIfUnsupported) const
+{
+ return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
+}
+
bool LayerSupportBase::IsReshapeSupported(const TensorInfo&, // input
const TensorInfo&, // output
const ReshapeDescriptor&, // descriptor
const QuantizedLstmInputParamsInfo& paramsInfo,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+ bool IsRankSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported) const override;
+
bool IsReshapeSupported(const TensorInfo& input,
const TensorInfo& output,
const ReshapeDescriptor& descriptor,
ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
}
+void RankQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
+{
+ const std::string descriptorName{"RankQueueDescriptor"};
+
+ ValidateNumInputs(workloadInfo, descriptorName, 1);
+ ValidateNumOutputs(workloadInfo, descriptorName, 1);
+
+ const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
+ const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
+
+ ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 1, "output");
+ ValidateTensorNumElements(outputTensorInfo, descriptorName, 1, "output");
+
+ std::vector<DataType> supportedTypes =
+ {
+ DataType::BFloat16,
+ DataType::Float16,
+ DataType::Float32,
+ DataType::QAsymmS8,
+ DataType::QAsymmU8,
+ DataType::QSymmS8,
+ DataType::QSymmS16,
+ DataType::Signed32
+ };
+
+ ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
+ ValidateDataTypes(outputTensorInfo, { DataType::Signed32 }, descriptorName);
+}
+
} // namespace armnn
void Validate(const WorkloadInfo& workloadInfo) const;
};
+struct RankQueueDescriptor : QueueDescriptor
+{
+ void Validate(const WorkloadInfo& workloadInfo) const;
+};
+
struct ResizeBilinearQueueDescriptor : QueueDescriptorWithParameters<ResizeBilinearDescriptor>
{
void Validate(const WorkloadInfo& workloadInfo) const;
#include <armnn/utility/PolymorphicDowncast.hpp>
#include <backendsCommon/WorkloadFactory.hpp>
-#include <armnn/backends/IBackendInternal.hpp>
#include <backendsCommon/CpuTensorHandle.hpp>
-#include <backendsCommon/WorkloadFactory.hpp>
#include <backendsCommon/test/WorkloadTestUtils.hpp>
#include <boost/iterator/transform_iterator.hpp>
-#include <cstring>
#include <sstream>
namespace armnn
reason);
break;
}
+ case LayerType::Rank:
+ {
+ const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
+ const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
+ result = layerSupportObject->IsRankSupported(OverrideDataType(input, dataType),
+ OverrideDataType(output, dataType),
+ reason);
+ break;
+ }
case LayerType::Reshape:
{
auto cLayer = PolymorphicDowncast<const ReshapeLayer*>(&layer);
{
return std::unique_ptr<IWorkload>();
}
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateRank(const RankQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const
+{
+ return std::unique_ptr<IWorkload>();
+}
std::unique_ptr<IWorkload> IWorkloadFactory::CreateReshape(const ReshapeQueueDescriptor& /*descriptor*/,
const WorkloadInfo& /*info*/) const
virtual std::unique_ptr<IWorkload> CreateQuantizedLstm(const QuantizedLstmQueueDescriptor& descriptor,
const WorkloadInfo& info) const;
+ virtual std::unique_ptr<IWorkload> CreateRank(const RankQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const;
+
virtual std::unique_ptr<IWorkload> CreateReshape(const ReshapeQueueDescriptor& descriptor,
const WorkloadInfo& info) const;
const WorkloadInfo& /*info*/) const override
{ return nullptr; }
+ std::unique_ptr<IWorkload> CreateRank(const RankQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const override
+ { return nullptr; }
+
std::unique_ptr<IWorkload> CreateReshape(const ReshapeQueueDescriptor& /*descriptor*/,
const WorkloadInfo& /*info*/) const override
{ return nullptr; }
test/layerTests/NormalizationTestImpl.cpp \
test/layerTests/PadTestImpl.cpp \
test/layerTests/Pooling2dTestImpl.cpp \
+ test/layerTests/RankTestImpl.cpp \
test/layerTests/ReshapeTestImpl.cpp \
test/layerTests/ResizeTestImpl.cpp \
test/layerTests/RsqrtTestImpl.cpp \
layerTests/PreluTestImpl.hpp
layerTests/QuantizeTestImpl.cpp
layerTests/QuantizeTestImpl.hpp
+ layerTests/RankTestImpl.cpp
+ layerTests/RankTestImpl.hpp
layerTests/ReshapeTestImpl.cpp
layerTests/ReshapeTestImpl.hpp
layerTests/ResizeTestImpl.cpp
DECLARE_LAYER_POLICY_1_PARAM(Division)
+DECLARE_LAYER_POLICY_1_PARAM(Rank)
+
DECLARE_LAYER_POLICY_2_PARAM(Resize)
DECLARE_LAYER_POLICY_2_PARAM(Reshape)
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#include <backendsCommon/test/layerTests/Pooling2dTestImpl.hpp>
#include <backendsCommon/test/layerTests/PreluTestImpl.hpp>
#include <backendsCommon/test/layerTests/QuantizeTestImpl.hpp>
+#include <backendsCommon/test/layerTests/RankTestImpl.hpp>
#include <backendsCommon/test/layerTests/ReshapeTestImpl.hpp>
#include <backendsCommon/test/layerTests/ResizeTestImpl.hpp>
#include <backendsCommon/test/layerTests/RsqrtTestImpl.hpp>
--- /dev/null
+//
+// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "RankTestImpl.hpp"
+
+#include <backendsCommon/test/DataTypeUtils.hpp>
+#include <backendsCommon/test/TensorCopyUtils.hpp>
+#include <backendsCommon/test/WorkloadTestUtils.hpp>
+
+#include <test/TensorHelpers.hpp>
+
+template<typename T, std::size_t n>
+LayerTestResult<int32_t, 1> RankTest(
+ armnn::TensorInfo inputTensorInfo,
+ boost::multi_array<T, n> input,
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+ IgnoreUnused(memoryManager);
+
+ const armnn::TensorShape outputShape{armnn::Dimensionality::Scalar};
+ armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Signed32);
+
+ LayerTestResult<int32_t , 1> ret(outputTensorInfo);
+ ret.outputExpected = MakeTensor<uint32_t, 1>(outputTensorInfo, { n });
+
+ std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
+ std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
+
+ armnn::RankQueueDescriptor data;
+ armnn::WorkloadInfo info;
+ AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
+ AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
+
+ std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateRank(data, info);
+
+ inputHandle->Allocate();
+ outputHandle->Allocate();
+
+ CopyDataToITensorHandle(inputHandle.get(), input.origin());
+
+ workload->Execute();
+
+ CopyDataFromITensorHandle(&ret.output[0], outputHandle.get());
+
+ return ret;
+}
+
+template<armnn::DataType ArmnnType, typename T>
+LayerTestResult<int32_t, 1> RankDimSize1Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+ armnn::TensorInfo inputTensorInfo({6}, ArmnnType, 1.0f, 0);
+ auto input = MakeTensor<T, 1>(inputTensorInfo, ConvertToDataType<ArmnnType>(
+ { -37.5f, -15.2f, -8.76f, -2.0f, -1.3f, -0.5f },
+ inputTensorInfo));
+
+ return RankTest<T, 1>(inputTensorInfo, input, workloadFactory, memoryManager);
+}
+
+template<armnn::DataType ArmnnType, typename T>
+LayerTestResult<int32_t, 1> RankDimSize2Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+ armnn::TensorInfo inputTensorInfo({1, 3}, ArmnnType, 1.0f, 0);
+ auto input = MakeTensor<T, 2>(inputTensorInfo, ConvertToDataType<ArmnnType>(
+ { -37.5f, -15.2f, -8.76f },
+ inputTensorInfo));
+
+ return RankTest<T, 2>(inputTensorInfo, input, workloadFactory, memoryManager);
+}
+
+template<armnn::DataType ArmnnType, typename T>
+LayerTestResult<int32_t, 1> RankDimSize3Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+ armnn::TensorInfo inputTensorInfo({1, 3, 2}, ArmnnType, 1.0f, 0);
+ auto input = MakeTensor<T, 3>(inputTensorInfo, ConvertToDataType<ArmnnType>(
+ { -37.5f, -15.2f, -8.76f, -2.0f, -1.5f, -1.3f},
+ inputTensorInfo));
+
+ return RankTest<T, 3>(inputTensorInfo, input, workloadFactory, memoryManager);
+}
+
+template<armnn::DataType ArmnnType, typename T>
+LayerTestResult<int32_t, 1> RankDimSize4Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+ armnn::TensorInfo inputTensorInfo({1, 3, 2, 3}, ArmnnType, 1.0f, 0);
+ auto input = MakeTensor<T, 4>(inputTensorInfo, ConvertToDataType<ArmnnType>(
+ { -37.5f, -15.2f, -8.76f, -2.0f, -1.5f, -1.3f, -0.5f, -0.4f, 0.0f,
+ 1.0f, 0.4f, 0.5f, 1.3f, 1.5f, 2.0f, 8.76f, 15.2f, 37.5f },
+ inputTensorInfo));
+
+ return RankTest<T, 4>(inputTensorInfo, input, workloadFactory, memoryManager);
+}
+
+template LayerTestResult<int32_t, 1>
+RankDimSize4Test<armnn::DataType::Float16>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+template LayerTestResult<int32_t, 1>
+RankDimSize4Test<armnn::DataType::Float32>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+template LayerTestResult<int32_t, 1>
+RankDimSize4Test<armnn::DataType::QAsymmU8>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+template LayerTestResult<int32_t, 1>
+RankDimSize4Test<armnn::DataType::Signed32>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+template LayerTestResult<int32_t, 1>
+RankDimSize4Test<armnn::DataType::QSymmS16>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+template LayerTestResult<int32_t, 1>
+RankDimSize4Test<armnn::DataType::QSymmS8>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+template LayerTestResult<int32_t, 1>
+RankDimSize4Test<armnn::DataType::QAsymmS8>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+template LayerTestResult<int32_t, 1>
+RankDimSize4Test<armnn::DataType::BFloat16>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+template LayerTestResult<int32_t, 1>
+RankDimSize3Test<armnn::DataType::Float16>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+template LayerTestResult<int32_t, 1>
+RankDimSize3Test<armnn::DataType::Float32>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+template LayerTestResult<int32_t, 1>
+RankDimSize3Test<armnn::DataType::QAsymmU8>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+template LayerTestResult<int32_t, 1>
+RankDimSize3Test<armnn::DataType::Signed32>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+template LayerTestResult<int32_t, 1>
+RankDimSize3Test<armnn::DataType::QSymmS16>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+template LayerTestResult<int32_t, 1>
+RankDimSize3Test<armnn::DataType::QSymmS8>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+template LayerTestResult<int32_t, 1>
+RankDimSize3Test<armnn::DataType::QAsymmS8>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+template LayerTestResult<int32_t, 1>
+RankDimSize3Test<armnn::DataType::BFloat16>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+template LayerTestResult<int32_t, 1>
+RankDimSize2Test<armnn::DataType::Float16>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+template LayerTestResult<int32_t, 1>
+RankDimSize2Test<armnn::DataType::Float32>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+template LayerTestResult<int32_t, 1>
+RankDimSize2Test<armnn::DataType::QAsymmU8>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+template LayerTestResult<int32_t, 1>
+RankDimSize2Test<armnn::DataType::Signed32>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+template LayerTestResult<int32_t, 1>
+RankDimSize2Test<armnn::DataType::QSymmS16>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+template LayerTestResult<int32_t, 1>
+RankDimSize2Test<armnn::DataType::QSymmS8>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+template LayerTestResult<int32_t, 1>
+RankDimSize2Test<armnn::DataType::QAsymmS8>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+template LayerTestResult<int32_t, 1>
+RankDimSize2Test<armnn::DataType::BFloat16>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+template LayerTestResult<int32_t, 1>
+RankDimSize1Test<armnn::DataType::Float16>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+template LayerTestResult<int32_t, 1>
+RankDimSize1Test<armnn::DataType::Float32>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+template LayerTestResult<int32_t, 1>
+RankDimSize1Test<armnn::DataType::QAsymmU8>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+template LayerTestResult<int32_t, 1>
+RankDimSize1Test<armnn::DataType::Signed32>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+template LayerTestResult<int32_t, 1>
+RankDimSize1Test<armnn::DataType::QSymmS16>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+template LayerTestResult<int32_t, 1>
+RankDimSize1Test<armnn::DataType::QSymmS8>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+template LayerTestResult<int32_t, 1>
+RankDimSize1Test<armnn::DataType::QAsymmS8>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+template LayerTestResult<int32_t, 1>
+RankDimSize1Test<armnn::DataType::BFloat16>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
\ No newline at end of file
--- /dev/null
+//
+// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "LayerTestResult.hpp"
+
+#include <ResolveType.hpp>
+
+#include <armnn/backends/IBackendInternal.hpp>
+#include <backendsCommon/WorkloadFactory.hpp>
+
+template<typename T , std::size_t n>
+LayerTestResult<int32_t, 1> RankTest(
+ armnn::TensorInfo inputTensorInfo,
+ boost::multi_array<T, n> input,
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<int32_t, 1> RankDimSize1Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<int32_t, 1> RankDimSize2Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<int32_t, 1> RankDimSize3Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<int32_t, 1> RankDimSize4Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
return supported;
}
+bool RefLayerSupport::IsRankSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported) const
+{
+ IgnoreUnused(input);
+ // Define supported output types.
+ std::array<DataType,1> supportedOutputTypes =
+ {
+ DataType::Signed32,
+ };
+
+ return CheckSupportRule(TypeAnyOf(output, supportedOutputTypes), reasonIfUnsupported,
+ "Reference rank: input type not supported.");
+}
+
bool RefLayerSupport::IsReshapeSupported(const TensorInfo& input,
const TensorInfo& output,
const ReshapeDescriptor& descriptor,
const LstmInputParamsInfo& paramsInfo,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+ bool IsRankSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
bool IsReshapeSupported(const TensorInfo& input,
const TensorInfo& output,
const ReshapeDescriptor& descriptor,
return std::make_unique<RefQuantizeWorkload>(descriptor, info);
}
+std::unique_ptr<IWorkload> RefWorkloadFactory::CreateRank(const RankQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const
+{
+ return std::make_unique<RefRankWorkload>(descriptor, info);
+}
+
std::unique_ptr<IWorkload> RefWorkloadFactory::CreateReshape(const ReshapeQueueDescriptor& descriptor,
const WorkloadInfo& info) const
{
std::unique_ptr<IWorkload> CreateQuantize(const QuantizeQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ std::unique_ptr<IWorkload> CreateRank(const RankQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const override;
+
std::unique_ptr<IWorkload> CreateReshape(const ReshapeQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#include <reference/RefWorkloadFactory.hpp>
-#include <test/TensorHelpers.hpp>
#include <test/UnitTests.hpp>
#include <boost/test/unit_test.hpp>
ARMNN_AUTO_TEST_CASE(BatchNormInt16, BatchNormInt16Test)
ARMNN_AUTO_TEST_CASE(BatchNormInt16Nhwc, BatchNormInt16NhwcTest)
+// Rank
+ARMNN_AUTO_TEST_CASE(RankDimSize1Float16, RankDimSize1Test<DataType::Float16>)
+ARMNN_AUTO_TEST_CASE(RankDimSize1Float32, RankDimSize1Test<DataType::Float32>)
+ARMNN_AUTO_TEST_CASE(RankDimSize1QAsymmU8, RankDimSize1Test<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(RankDimSize1Signed32, RankDimSize1Test<DataType::Signed32>)
+ARMNN_AUTO_TEST_CASE(RankDimSize1QSymmS16, RankDimSize1Test<DataType::QSymmS16>)
+ARMNN_AUTO_TEST_CASE(RankDimSize1QSymmS8, RankDimSize1Test<DataType::QSymmS8>)
+ARMNN_AUTO_TEST_CASE(RankDimSize1QAsymmS8, RankDimSize1Test<DataType::QAsymmS8>)
+ARMNN_AUTO_TEST_CASE(RankDimSize1BFloat16, RankDimSize1Test<DataType::BFloat16>)
+
+ARMNN_AUTO_TEST_CASE(RankDimSize2Float16, RankDimSize2Test<DataType::Float16>)
+ARMNN_AUTO_TEST_CASE(RankDimSize2Float32, RankDimSize2Test<DataType::Float32>)
+ARMNN_AUTO_TEST_CASE(RankDimSize2QAsymmU8, RankDimSize2Test<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(RankDimSize2Signed32, RankDimSize2Test<DataType::Signed32>)
+ARMNN_AUTO_TEST_CASE(RankDimSize2QSymmS16, RankDimSize2Test<DataType::QSymmS16>)
+ARMNN_AUTO_TEST_CASE(RankDimSize2QSymmS8, RankDimSize2Test<DataType::QSymmS8>)
+ARMNN_AUTO_TEST_CASE(RankDimSize2QAsymmS8, RankDimSize2Test<DataType::QAsymmS8>)
+ARMNN_AUTO_TEST_CASE(RankDimSize2BFloat16, RankDimSize2Test<DataType::BFloat16>)
+
+ARMNN_AUTO_TEST_CASE(RankDimSize3Float16, RankDimSize3Test<DataType::Float16>)
+ARMNN_AUTO_TEST_CASE(RankDimSize3Float32, RankDimSize3Test<DataType::Float32>)
+ARMNN_AUTO_TEST_CASE(RankDimSize3QAsymmU8, RankDimSize3Test<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(RankDimSize3Signed32, RankDimSize3Test<DataType::Signed32>)
+ARMNN_AUTO_TEST_CASE(RankDimSize3QSymmS16, RankDimSize3Test<DataType::QSymmS16>)
+ARMNN_AUTO_TEST_CASE(RankDimSize3QSymmS8, RankDimSize3Test<DataType::QSymmS8>)
+ARMNN_AUTO_TEST_CASE(RankDimSize3QAsymmS8, RankDimSize3Test<DataType::QAsymmS8>)
+ARMNN_AUTO_TEST_CASE(RankDimSize3BFloat16, RankDimSize3Test<DataType::BFloat16>)
+
+ARMNN_AUTO_TEST_CASE(RankDimSize4Float16, RankDimSize4Test<DataType::Float16>)
+ARMNN_AUTO_TEST_CASE(RankDimSize4Float32, RankDimSize4Test<DataType::Float32>)
+ARMNN_AUTO_TEST_CASE(RankDimSize4QAsymmU8, RankDimSize4Test<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(RankDimSize4Signed32, RankDimSize4Test<DataType::Signed32>)
+ARMNN_AUTO_TEST_CASE(RankDimSize4QSymmS16, RankDimSize4Test<DataType::QSymmS16>)
+ARMNN_AUTO_TEST_CASE(RankDimSize4QSymmS8, RankDimSize4Test<DataType::QSymmS8>)
+ARMNN_AUTO_TEST_CASE(RankDimSize4QAsymmS8, RankDimSize4Test<DataType::QAsymmS8>)
+ARMNN_AUTO_TEST_CASE(RankDimSize4BFloat16, RankDimSize4Test<DataType::BFloat16>)
+
// Resize Bilinear - NCHW
ARMNN_AUTO_TEST_CASE(SimpleResizeBilinear,
SimpleResizeBilinearTest<DataType::Float32>,
#
-# Copyright © 2017 Arm Ltd. All rights reserved.
+# Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
# SPDX-License-Identifier: MIT
#
RefQuantizeWorkload.hpp
RefQLstmWorkload.cpp
RefQLstmWorkload.hpp
+ RefRankWorkload.hpp
RefReshapeWorkload.cpp
RefReshapeWorkload.hpp
RefResizeBilinearWorkload.cpp
--- /dev/null
+//
+// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <backendsCommon/Workload.hpp>
+#include <backendsCommon/WorkloadData.hpp>
+
+#include "RefWorkloadUtils.hpp"
+
+namespace armnn
+{
+
+struct RefRankWorkload : public BaseWorkload<RankQueueDescriptor>
+{
+public:
+ using BaseWorkload<RankQueueDescriptor>::BaseWorkload;
+ virtual void Execute() const override
+ {
+ const int32_t rank = static_cast<int32_t>(GetTensorInfo(m_Data.m_Inputs[0]).GetNumDimensions());
+
+ std::memcpy(GetOutputTensorData<void>(0, m_Data), &rank, sizeof(int32_t));
+ }
+};
+
+} //namespace armnn
+
+
+
+
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#include "RefPreluWorkload.hpp"
#include "RefQLstmWorkload.hpp"
#include "RefQuantizeWorkload.hpp"
+#include "RefRankWorkload.hpp"
#include "RefReshapeWorkload.hpp"
#include "RefResizeBilinearWorkload.hpp"
#include "RefResizeWorkload.hpp"