From: Jim Flynn Date: Wed, 22 May 2019 13:24:13 +0000 (+0100) Subject: IVGCVSW-3119 Rename MergerLayer to ConcatLayer X-Git-Tag: submit/tizen/20200316.035456~598 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=e242f2dc646f41e9162aaaf74e057ce39fcb92df;p=platform%2Fupstream%2Farmnn.git IVGCVSW-3119 Rename MergerLayer to ConcatLayer !android-nn-driver:1210 Change-Id: I940b3b9e421c92bfd55ae996f7bc54ac077f2604 Signed-off-by: Jim Flynn --- diff --git a/Android.mk b/Android.mk index d57e486..f791c1a 100644 --- a/Android.mk +++ b/Android.mk @@ -91,6 +91,7 @@ LOCAL_SRC_FILES := \ src/armnn/layers/AdditionLayer.cpp \ src/armnn/layers/BatchNormalizationLayer.cpp \ src/armnn/layers/BatchToSpaceNdLayer.cpp \ + src/armnn/layers/ConcatLayer.cpp \ src/armnn/layers/ConstantLayer.cpp \ src/armnn/layers/Convolution2dLayer.cpp \ src/armnn/layers/ConvertFp16ToFp32Layer.cpp \ @@ -114,7 +115,6 @@ LOCAL_SRC_FILES := \ src/armnn/layers/MeanLayer.cpp \ src/armnn/layers/MemCopyLayer.cpp \ src/armnn/layers/MergeLayer.cpp \ - src/armnn/layers/MergerLayer.cpp \ src/armnn/layers/MinimumLayer.cpp \ src/armnn/layers/MultiplicationLayer.cpp \ src/armnn/layers/NormalizationLayer.cpp \ diff --git a/CMakeLists.txt b/CMakeLists.txt index b6c9776..c9ca70e 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -227,6 +227,8 @@ list(APPEND armnn_sources src/armnn/layers/BatchNormalizationLayer.cpp src/armnn/layers/BatchToSpaceNdLayer.hpp src/armnn/layers/BatchToSpaceNdLayer.cpp + src/armnn/layers/ConcatLayer.hpp + src/armnn/layers/ConcatLayer.cpp src/armnn/layers/ConstantLayer.hpp src/armnn/layers/ConstantLayer.cpp src/armnn/layers/Convolution2dLayer.hpp @@ -271,8 +273,6 @@ list(APPEND armnn_sources src/armnn/layers/MemCopyLayer.cpp src/armnn/layers/MergeLayer.hpp src/armnn/layers/MergeLayer.cpp - src/armnn/layers/MergerLayer.hpp - src/armnn/layers/MergerLayer.cpp src/armnn/layers/MinimumLayer.cpp src/armnn/layers/MinimumLayer.hpp src/armnn/layers/MultiplicationLayer.hpp diff --git a/include/armnn/Descriptors.hpp b/include/armnn/Descriptors.hpp index dcbb8c4..25bf818 100644 --- a/include/armnn/Descriptors.hpp +++ b/include/armnn/Descriptors.hpp @@ -54,8 +54,8 @@ struct SoftmaxDescriptor float m_Beta; }; -/// @brief An OriginsDescriptor for the MergerLayer. -/// Descriptor to configure the merging process. Number of views must be equal to the number of inputs, and +/// @brief An OriginsDescriptor for the ConcatLayer. +/// Descriptor to configure the concatenation process. Number of views must be equal to the number of inputs, and /// their order must match - e.g. first view corresponds to the first input, second view to the second input, etc. struct OriginsDescriptor { @@ -144,12 +144,12 @@ OriginsDescriptor CreateMergerDescriptorForConcatenation(TensorShapeIt first, return CreateDescriptorForConcatenation(first, last, concatenationDimension); } -/// @brief Convenience template to create an OriginsDescriptor to use when creating a MergerLayer for performing +/// @brief Convenience template to create an OriginsDescriptor to use when creating a ConcatLayer for performing /// concatenation of a number of input tensors. template OriginsDescriptor CreateDescriptorForConcatenation(TensorShapeIt first, - TensorShapeIt last, - unsigned int concatenationDimension) + TensorShapeIt last, + unsigned int concatenationDimension) { auto numInputs = std::distance(first, last); diff --git a/include/armnn/DescriptorsFwd.hpp b/include/armnn/DescriptorsFwd.hpp index affd6e2..4f47738 100644 --- a/include/armnn/DescriptorsFwd.hpp +++ b/include/armnn/DescriptorsFwd.hpp @@ -31,6 +31,8 @@ struct SpaceToBatchNdDescriptor; struct StridedSliceDescriptor; struct ViewsDescriptor; +// MergerDescriptor is deprecated use ConcatDescriptor instead using MergerDescriptor = OriginsDescriptor; +using ConcatDescriptor = OriginsDescriptor; using SplitterDescriptor = ViewsDescriptor; } diff --git a/include/armnn/ILayerVisitor.hpp b/include/armnn/ILayerVisitor.hpp index 10d0cc6..cbddb2d 100644 --- a/include/armnn/ILayerVisitor.hpp +++ b/include/armnn/ILayerVisitor.hpp @@ -4,6 +4,7 @@ // #pragma once +#include #include #include #include @@ -60,16 +61,19 @@ public: /// Function that a concat layer should call back to when its Accept(ILayerVisitor&) function is invoked. /// @param layer - pointer to the layer which is calling back to this visit function. - /// @param mergerDescriptor - WindowsDescriptor to configure the concatenation process. Number of Views must be - /// equal to the number of inputs, and their order must match - e.g. first view - /// corresponds to the first input, second view to the second input, etc.... + /// @param concatDescriptor - ConcatDescriptor (synonym for OriginsDescriptor) to configure the concatenation + /// process. Number of Views must be equal to the number of inputs, and their order + /// must match - e.g. first view corresponds to the first input, second view to the + /// second input, etc.... /// @param name - Optional name for the layer. virtual void VisitConcatLayer(const IConnectableLayer* layer, - const OriginsDescriptor& mergerDescriptor, + const OriginsDescriptor& concatDescriptor, const char* name = nullptr) { // default implementation to ease transition while MergerLayer is being deprecated - VisitMergerLayer(layer, mergerDescriptor, name); + ARMNN_NO_DEPRECATE_WARN_BEGIN + VisitMergerLayer(layer, concatDescriptor, name); + ARMNN_NO_DEPRECATE_WARN_END } /// Function a layer with no inputs and a single output, which always corresponds to @@ -221,13 +225,14 @@ public: /// Function that a merger layer should call back to when its Accept(ILayerVisitor&) function is invoked. /// @param layer - pointer to the layer which is calling back to this visit function. - /// @param mergerDescriptor - WindowsDescriptor to configure the merging process. Number of Views must be equal to - /// the number of inputs, and their order must match - e.g. first view corresponds to - /// the first input, second view to the second input, etc.... + /// @param mergerDescriptor - MergerDescriptor (synonym for OriginsDescriptor) to configure the concatenation + /// process. Number of Views must be equal to the number of inputs, and their order + /// must match - e.g. first view corresponds to the first input, second view to the + /// second input, etc.... /// @param name - Optional name for the layer. - // NOTE: this method will be deprecated and replaced by VisitConcatLayer + ARMNN_DEPRECATED_MSG("Use VisitConcatLayer instead") virtual void VisitMergerLayer(const IConnectableLayer* layer, - const OriginsDescriptor& mergerDescriptor, + const MergerDescriptor& mergerDescriptor, const char* name = nullptr) = 0; /// Function a Minimum layer should call back to when its Accept(ILayerVisitor&) function is invoked. @@ -333,7 +338,7 @@ public: /// Function that a splitter layer should call back to when its Accept(ILayerVisitor&) function is invoked. /// @param layer - pointer to the layer which is calling back to this visit function. - /// @param splitterDescriptor - WindowsDescriptor to configure the splitting process. + /// @param splitterDescriptor - ViewsDescriptor to configure the splitting process. /// Number of Views must be equal to the number of outputs, /// and their order must match - e.g. first view corresponds to /// the first output, second view to the second output, etc.... diff --git a/include/armnn/INetwork.hpp b/include/armnn/INetwork.hpp index ef85243..f3dfcd8 100644 --- a/include/armnn/INetwork.hpp +++ b/include/armnn/INetwork.hpp @@ -103,12 +103,13 @@ public: virtual IConnectableLayer* AddInputLayer(LayerBindingId id, const char* name = nullptr) = 0; /// Adds a concatenation layer to the network. - /// @param mergerDescriptor - WindowsDescriptor to configure the concatenation process. Number of Views must - /// be equal to the number of inputs, and their order must match - e.g. first view - /// corresponds to the first input, second view to the second input, etc.... + /// @param concatDescriptor - ConcatDescriptor (synonym for OriginsDescriptor) to configure the concatenation + /// process. Number of Views must be equal to the number of inputs, and their order + /// must match - e.g. first view corresponds to the first input, second view to the + /// second input, etc.... /// @param name - Optional name for the layer. /// @return - Interface for configuring the layer. - virtual IConnectableLayer* AddConcatLayer(const OriginsDescriptor& mergerDescriptor, + virtual IConnectableLayer* AddConcatLayer(const ConcatDescriptor& concatDescriptor, const char* name = nullptr) = 0; /// Adds a 2D convolution layer to the network. @@ -239,7 +240,7 @@ public: const char* name = nullptr) = 0; /// Adds a splitter layer to the network. - /// @param splitterDescriptor - WindowsDescriptor to configure the splitting process. + /// @param splitterDescriptor - ViewsDescriptor to configure the splitting process. /// Number of Views must be equal to the number of outputs, /// and their order must match - e.g. first view corresponds to /// the first output, second view to the second output, etc.... @@ -253,14 +254,15 @@ public: /// @return - Interface for configuring the layer. virtual IConnectableLayer* AddMergeLayer(const char* name = nullptr) = 0; - /// Adds a merger layer to the network. - /// @param mergerDescriptor - WindowsDescriptor to configure the merging process. Number of Views must be equal to - /// the number of inputs, and their order must match - e.g. first view corresponds to - /// the first input, second view to the second input, etc.... + /// Adds a concat layer to the network. + /// @param mergerDescriptor - MergerDescriptor (synonym for OriginsDescriptor) to configure the concatenation + /// process. Number of Views must be equal to the number of inputs, and their order + /// must match - e.g. first view corresponds to the first input, second view to the + /// second input, etc.... /// @param name - Optional name for the layer. /// @return - Interface for configuring the layer. ARMNN_DEPRECATED_MSG("Use AddConcatLayer instead") - virtual IConnectableLayer* AddMergerLayer(const OriginsDescriptor& mergerDescriptor, + virtual IConnectableLayer* AddMergerLayer(const MergerDescriptor& mergerDescriptor, const char* name = nullptr) = 0; /// Adds an addition layer to the network. diff --git a/include/armnn/LayerVisitorBase.hpp b/include/armnn/LayerVisitorBase.hpp index 657051f..47a8384 100644 --- a/include/armnn/LayerVisitorBase.hpp +++ b/include/armnn/LayerVisitorBase.hpp @@ -50,7 +50,7 @@ public: const char*) override { DefaultPolicy::Apply(__func__); } void VisitConcatLayer(const IConnectableLayer*, - const OriginsDescriptor&, + const ConcatDescriptor&, const char*) override { DefaultPolicy::Apply(__func__); } void VisitConstantLayer(const IConnectableLayer*, @@ -122,7 +122,7 @@ public: const char*) override { DefaultPolicy::Apply(__func__); } void VisitMergerLayer(const IConnectableLayer*, - const OriginsDescriptor&, + const MergerDescriptor&, const char*) override { DefaultPolicy::Apply(__func__); } void VisitMinimumLayer(const IConnectableLayer*, diff --git a/include/armnn/Tensor.hpp b/include/armnn/Tensor.hpp index 3cde1ad..b3a4629 100644 --- a/include/armnn/Tensor.hpp +++ b/include/armnn/Tensor.hpp @@ -3,6 +3,7 @@ // SPDX-License-Identifier: MIT // #pragma once + #include "TensorFwd.hpp" #include "Exceptions.hpp" diff --git a/src/armnn/DynamicQuantizationVisitor.cpp b/src/armnn/DynamicQuantizationVisitor.cpp index 9b33fb7..d4e0c90 100644 --- a/src/armnn/DynamicQuantizationVisitor.cpp +++ b/src/armnn/DynamicQuantizationVisitor.cpp @@ -242,8 +242,8 @@ void DynamicQuantizationVisitor::VisitConstantLayer(const IConnectableLayer* lay SetRange(layer, 0, min, max); } -void DynamicQuantizationVisitor::VisitMergerLayer(const IConnectableLayer* layer, - const OriginsDescriptor& mergerDescriptor, +void DynamicQuantizationVisitor::VisitConcatLayer(const IConnectableLayer* layer, + const ConcatDescriptor& originsDescriptor, const char* name) { float min = std::numeric_limits::max(); diff --git a/src/armnn/DynamicQuantizationVisitor.hpp b/src/armnn/DynamicQuantizationVisitor.hpp index 6d430f1..43768fd 100644 --- a/src/armnn/DynamicQuantizationVisitor.hpp +++ b/src/armnn/DynamicQuantizationVisitor.hpp @@ -71,14 +71,14 @@ public: const SoftmaxDescriptor& softmaxDescriptor, const char* name = nullptr) override; + void VisitConcatLayer(const IConnectableLayer* layer, + const ConcatDescriptor& originsDescriptor, + const char* name = nullptr) override; + void VisitConstantLayer(const IConnectableLayer* layer, const ConstTensor& input, const char* name = nullptr) override; - void VisitMergerLayer(const IConnectableLayer* layer, - const OriginsDescriptor& mergerDescriptor, - const char* name = nullptr) override; - void VisitReshapeLayer(const IConnectableLayer* layer, const ReshapeDescriptor& reshapeDescriptor, const char* name = nullptr) override; diff --git a/src/armnn/InternalTypes.cpp b/src/armnn/InternalTypes.cpp index a811706..47a6f60 100644 --- a/src/armnn/InternalTypes.cpp +++ b/src/armnn/InternalTypes.cpp @@ -18,6 +18,7 @@ char const* GetLayerTypeAsCString(LayerType type) case LayerType::Addition: return "Addition"; case LayerType::BatchNormalization: return "BatchNormalization"; case LayerType::BatchToSpaceNd: return "BatchToSpaceNd"; + case LayerType::Concat: return "Concat"; case LayerType::Constant: return "Constant"; case LayerType::ConvertFp16ToFp32: return "ConvertFp16ToFp32"; case LayerType::ConvertFp32ToFp16: return "ConvertFp32ToFp16"; @@ -40,7 +41,6 @@ char const* GetLayerTypeAsCString(LayerType type) case LayerType::Mean: return "Mean"; case LayerType::MemCopy: return "MemCopy"; case LayerType::Merge: return "Merge"; - case LayerType::Merger: return "Merger"; case LayerType::Minimum: return "Minimum"; case LayerType::Multiplication: return "Multiplication"; case LayerType::Normalization: return "Normalization"; diff --git a/src/armnn/InternalTypes.hpp b/src/armnn/InternalTypes.hpp index 5765b5b..9a215e6 100644 --- a/src/armnn/InternalTypes.hpp +++ b/src/armnn/InternalTypes.hpp @@ -18,6 +18,7 @@ enum class LayerType Addition, BatchNormalization, BatchToSpaceNd, + Concat, Constant, ConvertFp16ToFp32, ConvertFp32ToFp16, @@ -40,7 +41,6 @@ enum class LayerType Mean, MemCopy, Merge, - Merger, Minimum, Multiplication, Normalization, diff --git a/src/armnn/LayerSupport.cpp b/src/armnn/LayerSupport.cpp index 5867fab..5324e5f 100644 --- a/src/armnn/LayerSupport.cpp +++ b/src/armnn/LayerSupport.cpp @@ -137,9 +137,9 @@ bool IsConcatSupported(const BackendId& backend, char* reasonIfUnsupported, size_t reasonIfUnsupportedMaxLength) { - ARMNN_NO_DEPRECATE_WARN_BEGIN - return IsMergerSupported(backend, inputs, output, descriptor, reasonIfUnsupported, reasonIfUnsupportedMaxLength); - ARMNN_NO_DEPRECATE_WARN_END + BOOST_ASSERT(inputs.size() > 0); + + FORWARD_LAYER_SUPPORT_FUNC(backend, IsConcatSupported, inputs, output, descriptor); } bool IsConstantSupported(const BackendId& backend, diff --git a/src/armnn/LayersFwd.hpp b/src/armnn/LayersFwd.hpp index 31cfa66..40330f2 100644 --- a/src/armnn/LayersFwd.hpp +++ b/src/armnn/LayersFwd.hpp @@ -10,6 +10,7 @@ #include "layers/AdditionLayer.hpp" #include "layers/BatchNormalizationLayer.hpp" #include "layers/BatchToSpaceNdLayer.hpp" +#include "layers/ConcatLayer.hpp" #include "layers/ConstantLayer.hpp" #include "layers/ConvertFp16ToFp32Layer.hpp" #include "layers/ConvertFp32ToFp16Layer.hpp" @@ -32,7 +33,6 @@ #include "layers/MeanLayer.hpp" #include "layers/MemCopyLayer.hpp" #include "layers/MergeLayer.hpp" -#include "layers/MergerLayer.hpp" #include "layers/MinimumLayer.hpp" #include "layers/MultiplicationLayer.hpp" #include "layers/NormalizationLayer.hpp" @@ -83,6 +83,7 @@ DECLARE_LAYER(Activation) DECLARE_LAYER(Addition) DECLARE_LAYER(BatchNormalization) DECLARE_LAYER(BatchToSpaceNd) +DECLARE_LAYER(Concat) DECLARE_LAYER(Constant) DECLARE_LAYER(ConvertFp16ToFp32) DECLARE_LAYER(ConvertFp32ToFp16) @@ -105,7 +106,6 @@ DECLARE_LAYER(Maximum) DECLARE_LAYER(Mean) DECLARE_LAYER(MemCopy) DECLARE_LAYER(Merge) -DECLARE_LAYER(Merger) DECLARE_LAYER(Minimum) DECLARE_LAYER(Multiplication) DECLARE_LAYER(Normalization) diff --git a/src/armnn/LoadedNetwork.cpp b/src/armnn/LoadedNetwork.cpp index 7f00dbe..3c7dfb0 100644 --- a/src/armnn/LoadedNetwork.cpp +++ b/src/armnn/LoadedNetwork.cpp @@ -82,7 +82,7 @@ LoadedNetwork::LoadedNetwork(std::unique_ptr net) //First create tensor handlers, backends and workload factories. //Handlers are created before workloads are. //Because workload creation can modify some of the handlers, - //(for example the splitter and merger layers). + //(for example the splitter and concat layers). for (auto&& layer : order) { auto const& backend = layer->GetBackendId(); diff --git a/src/armnn/Network.cpp b/src/armnn/Network.cpp index b80e0e7..6bd365b 100644 --- a/src/armnn/Network.cpp +++ b/src/armnn/Network.cpp @@ -603,12 +603,10 @@ IConnectableLayer* Network::AddFullyConnectedLayer(const FullyConnectedDescripto return AddFullyConnectedLayerImpl(fullyConnectedDescriptor, weights, optionalBiases, name); } -IConnectableLayer* Network::AddConcatLayer(const OriginsDescriptor& mergerDescriptor, +IConnectableLayer* Network::AddConcatLayer(const ConcatDescriptor& concatDescriptor, const char* name) { - ARMNN_NO_DEPRECATE_WARN_BEGIN - return AddMergerLayer(mergerDescriptor, name); - ARMNN_NO_DEPRECATE_WARN_END + return m_Graph->AddLayer(concatDescriptor, name); } IConnectableLayer* Network::AddConvolution2dLayerImpl(const Convolution2dDescriptor& convolution2dDescriptor, @@ -766,10 +764,10 @@ IConnectableLayer* Network::AddMinimumLayer(const char* name) return m_Graph->AddLayer(name); } -IConnectableLayer* Network::AddMergerLayer(const OriginsDescriptor& mergerDescriptor, +IConnectableLayer* Network::AddMergerLayer(const MergerDescriptor& mergerDescriptor, const char* name) { - return m_Graph->AddLayer(mergerDescriptor, name); + return AddConcatLayer(mergerDescriptor, name); } IConnectableLayer* Network::AddAdditionLayer(const char* name) diff --git a/src/armnn/Network.hpp b/src/armnn/Network.hpp index d26c286..52a2714 100644 --- a/src/armnn/Network.hpp +++ b/src/armnn/Network.hpp @@ -37,7 +37,7 @@ public: IConnectableLayer* AddBatchToSpaceNdLayer(const BatchToSpaceNdDescriptor& batchToSpaceNdDescriptor, const char* name = nullptr) override; - IConnectableLayer* AddConcatLayer(const OriginsDescriptor& mergerDescriptor, + IConnectableLayer* AddConcatLayer(const ConcatDescriptor& concatDescriptor, const char* name = nullptr) override; IConnectableLayer* AddConvolution2dLayer(const Convolution2dDescriptor& convolution2dDescriptor, @@ -119,7 +119,7 @@ public: const char* name = nullptr) override; ARMNN_DEPRECATED_MSG("Use AddConcatLayer instead") - IConnectableLayer* AddMergerLayer(const OriginsDescriptor& mergerDescriptor, + IConnectableLayer* AddMergerLayer(const MergerDescriptor& mergerDescriptor, const char* name = nullptr) override; IConnectableLayer* AddAdditionLayer(const char* name = nullptr) override; diff --git a/src/armnn/QuantizerVisitor.cpp b/src/armnn/QuantizerVisitor.cpp index f30ab52..47ddc4e 100644 --- a/src/armnn/QuantizerVisitor.cpp +++ b/src/armnn/QuantizerVisitor.cpp @@ -122,6 +122,15 @@ void QuantizerVisitor::VisitBatchToSpaceNdLayer(const IConnectableLayer* layer, SetQuantizedInputConnections(layer, newLayer); } +void QuantizerVisitor::VisitConcatLayer(const IConnectableLayer* layer, + const OriginsDescriptor& originsDescriptor, + const char* name) +{ + IConnectableLayer* newLayer = m_QuantizedNetwork->AddConcatLayer(originsDescriptor, name); + RecordLayer(layer, newLayer); + SetQuantizedInputConnections(layer, newLayer); +} + void QuantizerVisitor::VisitConstantLayer(const IConnectableLayer* layer, const ConstTensor& input, const char* name) @@ -238,15 +247,6 @@ void QuantizerVisitor::VisitMeanLayer(const IConnectableLayer* layer, SetQuantizedInputConnections(layer, newLayer); } -void QuantizerVisitor::VisitMergerLayer(const IConnectableLayer* layer, - const OriginsDescriptor& mergerDescriptor, - const char* name) -{ - IConnectableLayer* newLayer = m_QuantizedNetwork->AddConcatLayer(mergerDescriptor, name); - RecordLayer(layer, newLayer); - SetQuantizedInputConnections(layer, newLayer); -} - void QuantizerVisitor::VisitMultiplicationLayer(const IConnectableLayer* layer, const char* name) { diff --git a/src/armnn/QuantizerVisitor.hpp b/src/armnn/QuantizerVisitor.hpp index 5d00e31..6e5609d 100644 --- a/src/armnn/QuantizerVisitor.hpp +++ b/src/armnn/QuantizerVisitor.hpp @@ -50,6 +50,10 @@ public: const BatchToSpaceNdDescriptor& batchToSpaceNdDescriptor, const char* name = nullptr) override; + void VisitConcatLayer(const IConnectableLayer* layer, + const OriginsDescriptor& originsDescriptor, + const char* name = nullptr) override; + void VisitConstantLayer(const IConnectableLayer* layer, const ConstTensor& input, const char* name = nullptr) override; @@ -78,10 +82,6 @@ public: const MeanDescriptor& meanDescriptor, const char* name = nullptr) override; - void VisitMergerLayer(const IConnectableLayer* layer, - const OriginsDescriptor& mergerDescriptor, - const char* name = nullptr) override; - void VisitMultiplicationLayer(const IConnectableLayer* layer, const char* name = nullptr) override; diff --git a/src/armnn/StaticRangeVisitor.cpp b/src/armnn/StaticRangeVisitor.cpp index 815730b..d437a99 100644 --- a/src/armnn/StaticRangeVisitor.cpp +++ b/src/armnn/StaticRangeVisitor.cpp @@ -152,6 +152,24 @@ void StaticRangeVisitor::VisitSoftmaxLayer(const IConnectableLayer* layer, SetRange(layer, 0, 0.f, 1.f); } +void StaticRangeVisitor::VisitConcatLayer(const IConnectableLayer* layer, + const OriginsDescriptor& originsDescriptor, + const char* name) +{ + float min = std::numeric_limits::max(); + float max = std::numeric_limits::lowest(); + for (unsigned int i = 0; i < layer->GetNumInputSlots(); ++i) + { + const IOutputSlot* outputSlot = layer->GetInputSlot(i).GetConnection(); + LayerGuid layerId = outputSlot->GetOwningLayerGuid(); + unsigned int slotIndex = outputSlot->CalculateIndexOnOwner(); + RangeTracker::MinMaxRange range = m_RangeTracker.GetRange(layerId, slotIndex); + min = std::min(min, range.first); + max = std::max(max, range.second); + } + SetRange(layer, 0, min, max); +} + void StaticRangeVisitor::VisitConstantLayer(const IConnectableLayer* layer, const ConstTensor& input, const char* name) @@ -180,24 +198,6 @@ void StaticRangeVisitor::VisitConstantLayer(const IConnectableLayer* layer, SetRange(layer, 0, min, max); } -void StaticRangeVisitor::VisitMergerLayer(const IConnectableLayer* layer, - const OriginsDescriptor& mergerDescriptor, - const char* name) -{ - float min = std::numeric_limits::max(); - float max = std::numeric_limits::lowest(); - for (unsigned int i = 0; i < layer->GetNumInputSlots(); ++i) - { - const IOutputSlot* outputSlot = layer->GetInputSlot(i).GetConnection(); - LayerGuid layerId = outputSlot->GetOwningLayerGuid(); - unsigned int slotIndex = outputSlot->CalculateIndexOnOwner(); - RangeTracker::MinMaxRange range = m_RangeTracker.GetRange(layerId, slotIndex); - min = std::min(min, range.first); - max = std::max(max, range.second); - } - SetRange(layer, 0, min, max); -} - void StaticRangeVisitor::VisitReshapeLayer(const IConnectableLayer* layer, const ReshapeDescriptor& reshapeDescriptor, const char* name) diff --git a/src/armnn/StaticRangeVisitor.hpp b/src/armnn/StaticRangeVisitor.hpp index 8f2e698..a393a8e 100644 --- a/src/armnn/StaticRangeVisitor.hpp +++ b/src/armnn/StaticRangeVisitor.hpp @@ -71,14 +71,14 @@ public: const SoftmaxDescriptor& softmaxDescriptor, const char* name = nullptr) override; + void VisitConcatLayer(const IConnectableLayer* layer, + const OriginsDescriptor& originsDescriptor, + const char* name = nullptr) override; + void VisitConstantLayer(const IConnectableLayer* layer, const ConstTensor& input, const char* name = nullptr) override; - void VisitMergerLayer(const IConnectableLayer* layer, - const OriginsDescriptor& mergerDescriptor, - const char* name = nullptr) override; - void VisitReshapeLayer(const IConnectableLayer* layer, const ReshapeDescriptor& reshapeDescriptor, const char* name = nullptr) override; diff --git a/src/armnn/layers/MergerLayer.cpp b/src/armnn/layers/ConcatLayer.cpp similarity index 81% rename from src/armnn/layers/MergerLayer.cpp rename to src/armnn/layers/ConcatLayer.cpp index 9dbfdcc..1d2641c 100644 --- a/src/armnn/layers/MergerLayer.cpp +++ b/src/armnn/layers/ConcatLayer.cpp @@ -2,7 +2,7 @@ // Copyright © 2017 Arm Ltd. All rights reserved. // SPDX-License-Identifier: MIT // -#include "MergerLayer.hpp" +#include "ConcatLayer.hpp" #include "LayerCloneBase.hpp" #include @@ -14,14 +14,14 @@ namespace armnn { -MergerLayer::MergerLayer(const OriginsDescriptor& param, const char* name) - : LayerWithParameters(param.GetNumViews(), 1, LayerType::Merger, param, name) +ConcatLayer::ConcatLayer(const OriginsDescriptor& param, const char* name) + : LayerWithParameters(param.GetNumViews(), 1, LayerType::Concat, param, name) { } -std::unique_ptr MergerLayer::CreateWorkload(const Graph& graph, const IWorkloadFactory& factory) const +std::unique_ptr ConcatLayer::CreateWorkload(const Graph& graph, const IWorkloadFactory& factory) const { - MergerQueueDescriptor descriptor; + ConcatQueueDescriptor descriptor; // Copies the view origins to the descriptor. descriptor.m_ViewOrigins.reserve(m_Param.GetNumViews()); @@ -34,24 +34,24 @@ std::unique_ptr MergerLayer::CreateWorkload(const Graph& graph, const return factory.CreateConcat(descriptor, PrepInfoAndDesc(descriptor, graph)); } -void MergerLayer::CreateTensorHandles(Graph& graph, const IWorkloadFactory& factory) +void ConcatLayer::CreateTensorHandles(Graph& graph, const IWorkloadFactory& factory) { - //If sub tensors are supported then the merger + //If sub tensors are supported then the concat //just needs to make sure that the outputs of the prev layer - //are made subtensors of the output of the merger layer. + //are made subtensors of the output of the concat layer. m_OutputHandlers[0].CreateTensorHandles(factory); if (factory.SupportsSubTensors()) { - std::queue m_MergerLayers; + std::queue m_ConcatLayers; - m_MergerLayers.push(this); - while (!m_MergerLayers.empty()) + m_ConcatLayers.push(this); + while (!m_ConcatLayers.empty()) { - MergerLayer* currentLayer = m_MergerLayers.front(); + ConcatLayer* currentLayer = m_ConcatLayers.front(); ITensorHandle* parentTensor = currentLayer->GetOutputHandler(0).GetData(); const TensorInfo& parentInfo = currentLayer->GetOutputHandler(0).GetTensorInfo(); - m_MergerLayers.pop(); + m_ConcatLayers.pop(); const unsigned int numInputSlots = currentLayer->GetNumInputSlots(); @@ -99,14 +99,14 @@ void MergerLayer::CreateTensorHandles(Graph& graph, const IWorkloadFactory& fact OutputSlot* slot = currentLayer->GetInputSlot(i).GetConnectedOutputSlot(); OutputHandler& outputHandler = slot->GetOutputHandler(); - BOOST_ASSERT_MSG(subTensor, "MergerLayer: Expected a valid sub-tensor for substitution."); + BOOST_ASSERT_MSG(subTensor, "ConcatLayer: Expected a valid sub-tensor for substitution."); outputHandler.SetData(std::move(subTensor)); Layer& inputLayer = slot->GetOwningLayer(); - if (inputLayer.GetType() == LayerType::Merger) + if (inputLayer.GetType() == LayerType::Concat) { - // Continue with the substitution if the connected inputs are also merger layers - m_MergerLayers.push(boost::polymorphic_downcast(&inputLayer)); + // Continue with the substitution if the connected inputs are also concat layers + m_ConcatLayers.push(boost::polymorphic_downcast(&inputLayer)); } ++i; } @@ -114,12 +114,12 @@ void MergerLayer::CreateTensorHandles(Graph& graph, const IWorkloadFactory& fact } } -MergerLayer* MergerLayer::Clone(Graph& graph) const +ConcatLayer* ConcatLayer::Clone(Graph& graph) const { - return CloneBase(graph, m_Param, GetName()); + return CloneBase(graph, m_Param, GetName()); } -std::vector MergerLayer::InferOutputShapes(const std::vector& inputShapes) const +std::vector ConcatLayer::InferOutputShapes(const std::vector& inputShapes) const { BOOST_ASSERT(inputShapes.size() == m_Param.GetNumViews()); @@ -129,7 +129,7 @@ std::vector MergerLayer::InferOutputShapes(const std::vector( - "MergerLayer: Num Dimensions must match all inputs.", + "ConcatLayer: Num Dimensions must match all inputs.", numDims, inputShape.GetNumDimensions()); } @@ -151,7 +151,7 @@ std::vector MergerLayer::InferOutputShapes(const std::vector MergerLayer::InferOutputShapes(const std::vector MergerLayer::InferOutputShapes(const std::vector( - "MergerLayer: there are some gaps between views", + "ConcatLayer: there are some gaps between views", totalViewsVolume, outputVolume); return std::vector({ TensorShape({numDims, extentMax.data()}) }); } -void MergerLayer::ValidateTensorShapesFromInputs() +void ConcatLayer::ValidateTensorShapesFromInputs() { - // Validates Merger layer. + // Validates Concat layer. ConditionalThrowIfNotEqual( - "MergerLayer: Num Inputs must match num views.", + "ConcatLayer: Num Inputs must match num views.", m_Param.GetNumViews(), GetNumInputSlots()); @@ -230,14 +230,14 @@ void MergerLayer::ValidateTensorShapesFromInputs() BOOST_ASSERT(inferredShapes.size() == 1); ConditionalThrowIfNotEqual( - "MergerLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.", + "ConcatLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.", GetOutputSlot(0).GetTensorInfo().GetShape(), inferredShapes[0]); } -void MergerLayer::Accept(ILayerVisitor& visitor) const +void ConcatLayer::Accept(ILayerVisitor& visitor) const { - visitor.VisitMergerLayer(this, GetParameters(), GetName()); + visitor.VisitConcatLayer(this, GetParameters(), GetName()); } } // namespace armnn armnn diff --git a/src/armnn/layers/ConcatLayer.hpp b/src/armnn/layers/ConcatLayer.hpp new file mode 100644 index 0000000..4268291 --- /dev/null +++ b/src/armnn/layers/ConcatLayer.hpp @@ -0,0 +1,55 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// +#pragma once + +#include "LayerWithParameters.hpp" + +namespace armnn +{ + +/// This layer represents a merge operation. +class ConcatLayer : public LayerWithParameters +{ +public: + /// Makes a workload for the Concat type. + /// @param [in] graph The graph where this layer can be found. + /// @param [in] factory The workload factory which will create the workload. + /// @return A pointer to the created workload, or nullptr if not created. + virtual std::unique_ptr CreateWorkload(const Graph& graph, + const IWorkloadFactory& factory) const override; + + /// Set the outputs to be appropriate sub tensors of the input if sub tensors are supported + /// otherwise creates tensor handlers. + /// @param [in] graph The graph where this layer can be found. + /// @param [in] factory The workload factory which will create the workload. + virtual void CreateTensorHandles(Graph& graph, const IWorkloadFactory& factory) override; + + /// Creates a dynamically-allocated copy of this layer. + /// @param [in] graph The graph into which this layer is being cloned. + ConcatLayer* Clone(Graph& graph) const override; + + /// Check if the input tensor shape(s) + /// will lead to a valid configuration of @ref ConcatLayer. + void ValidateTensorShapesFromInputs() override; + + /// By default returns inputShapes if the number of inputs are equal to number of outputs, + /// otherwise infers the output shapes from given input shapes and layer properties. + /// @param [in] inputShapes The input shapes layer has. + /// @return A vector to the inferred output shape. + std::vector InferOutputShapes(const std::vector& inputShapes) const override; + + void Accept(ILayerVisitor& visitor) const override; + +protected: + /// Constructor to create a ConcatLayer. + /// @param [in] param OriginsDescriptor to configure the concat operation. + /// @param [in] name Optional name for the layer. + ConcatLayer(const OriginsDescriptor& param, const char* name); + + /// Default destructor + ~ConcatLayer() = default; +}; + +} // namespace diff --git a/src/armnn/layers/MergerLayer.hpp b/src/armnn/layers/MergerLayer.hpp index 6f0c148..3271060 100644 --- a/src/armnn/layers/MergerLayer.hpp +++ b/src/armnn/layers/MergerLayer.hpp @@ -4,52 +4,6 @@ // #pragma once -#include "LayerWithParameters.hpp" +#include "ConcatLayer.hpp" -namespace armnn -{ - -/// This layer represents a merge operation. -class MergerLayer : public LayerWithParameters -{ -public: - /// Makes a workload for the Merger type. - /// @param [in] graph The graph where this layer can be found. - /// @param [in] factory The workload factory which will create the workload. - /// @return A pointer to the created workload, or nullptr if not created. - virtual std::unique_ptr CreateWorkload(const Graph& graph, - const IWorkloadFactory& factory) const override; - - /// Set the outputs to be appropriate sub tensors of the input if sub tensors are supported - /// otherwise creates tensor handlers. - /// @param [in] graph The graph where this layer can be found. - /// @param [in] factory The workload factory which will create the workload. - virtual void CreateTensorHandles(Graph& graph, const IWorkloadFactory& factory) override; - - /// Creates a dynamically-allocated copy of this layer. - /// @param [in] graph The graph into which this layer is being cloned. - MergerLayer* Clone(Graph& graph) const override; - - /// Check if the input tensor shape(s) - /// will lead to a valid configuration of @ref MergerLayer. - void ValidateTensorShapesFromInputs() override; - - /// By default returns inputShapes if the number of inputs are equal to number of outputs, - /// otherwise infers the output shapes from given input shapes and layer properties. - /// @param [in] inputShapes The input shapes layer has. - /// @return A vector to the inferred output shape. - std::vector InferOutputShapes(const std::vector& inputShapes) const override; - - void Accept(ILayerVisitor& visitor) const override; - -protected: - /// Constructor to create a MergerLayer. - /// @param [in] param OriginsDescriptor to configure the merger operation. - /// @param [in] name Optional name for the layer. - MergerLayer(const OriginsDescriptor& param, const char* name); - - /// Default destructor - ~MergerLayer() = default; -}; - -} // namespace +using MergerLayer = ConcatLayer; \ No newline at end of file diff --git a/src/armnn/test/CreateWorkload.hpp b/src/armnn/test/CreateWorkload.hpp index be52ead..135a442 100644 --- a/src/armnn/test/CreateWorkload.hpp +++ b/src/armnn/test/CreateWorkload.hpp @@ -706,10 +706,10 @@ std::unique_ptr return workload; } -/// This function constructs a graph with both a splitter and a merger, and returns a pair of the workloads. -template -std::pair, std::unique_ptr> - CreateSplitterMergerWorkloadTest(armnn::IWorkloadFactory& factory, armnn::Graph& graph) +/// This function constructs a graph with both a splitter and a concat, and returns a pair of the workloads. +template +std::pair, std::unique_ptr> + CreateSplitterConcatWorkloadTest(armnn::IWorkloadFactory &factory, armnn::Graph &graph) { armnn::TensorInfo inputTensorInfo({ 1, 2, 100, 10 }, DataType); @@ -733,41 +733,41 @@ std::pair, std::unique_ptr> Layer* const splitter = graph.AddLayer(splitterViews, "splitter"); BOOST_TEST_CHECKPOINT("created splitter layer"); - armnn::OriginsDescriptor mergerViews(2); - mergerViews.SetViewOriginCoord(0, 0, 0); - mergerViews.SetViewOriginCoord(0, 1, 1); - mergerViews.SetViewOriginCoord(0, 2, 0); - mergerViews.SetViewOriginCoord(0, 3, 0); + armnn::OriginsDescriptor concatViews(2); + concatViews.SetViewOriginCoord(0, 0, 0); + concatViews.SetViewOriginCoord(0, 1, 1); + concatViews.SetViewOriginCoord(0, 2, 0); + concatViews.SetViewOriginCoord(0, 3, 0); - mergerViews.SetViewOriginCoord(1, 0, 0); - mergerViews.SetViewOriginCoord(1, 1, 0); - mergerViews.SetViewOriginCoord(1, 2, 0); - mergerViews.SetViewOriginCoord(1, 3, 0); + concatViews.SetViewOriginCoord(1, 0, 0); + concatViews.SetViewOriginCoord(1, 1, 0); + concatViews.SetViewOriginCoord(1, 2, 0); + concatViews.SetViewOriginCoord(1, 3, 0); - Layer* const merger = graph.AddLayer(mergerViews, "merger"); - BOOST_TEST_CHECKPOINT("created merger layer"); + Layer* const concat = graph.AddLayer(concatViews, "concat"); + BOOST_TEST_CHECKPOINT("created concat layer"); Layer* const output = graph.AddLayer(0, "output"); // Adds connections. Connect(input, splitter, inputTensorInfo, 0, 0); BOOST_TEST_CHECKPOINT("connect input to splitter"); - Connect(splitter, merger, splitTensorInfo1, 0, 1); // The splitter & merger are connected up. - BOOST_TEST_CHECKPOINT("connect splitter[0] to merger[1]"); - Connect(splitter, merger, splitTensorInfo2, 1, 0); // So that the outputs are flipped round. - BOOST_TEST_CHECKPOINT("connect splitter[1] to merger[0]"); - Connect(merger, output, inputTensorInfo, 0, 0); - BOOST_TEST_CHECKPOINT("connect merger to output"); + Connect(splitter, concat, splitTensorInfo1, 0, 1); // The splitter & concat are connected up. + BOOST_TEST_CHECKPOINT("connect splitter[0] to concat[1]"); + Connect(splitter, concat, splitTensorInfo2, 1, 0); // So that the outputs are flipped round. + BOOST_TEST_CHECKPOINT("connect splitter[1] to concat[0]"); + Connect(concat, output, inputTensorInfo, 0, 0); + BOOST_TEST_CHECKPOINT("connect concat to output"); CreateTensorHandles(graph, factory); BOOST_TEST_CHECKPOINT("created tensor handles"); auto workloadSplitter = MakeAndCheckWorkload(*splitter, graph, factory); BOOST_TEST_CHECKPOINT("created splitter workload"); - auto workloadMerger = MakeAndCheckWorkload(*merger, graph, factory); - BOOST_TEST_CHECKPOINT("created merger workload"); + auto workloadConcat = MakeAndCheckWorkload(*concat, graph, factory); + BOOST_TEST_CHECKPOINT("created concat workload"); - return {std::move(workloadSplitter), std::move(workloadMerger)}; + return {std::move(workloadSplitter), std::move(workloadConcat)}; } @@ -1053,10 +1053,10 @@ std::unique_ptr CreateMeanWorkloadTest(armnn::IWorkloadFactory& fa return workload; } -template -std::unique_ptr CreateMergerWorkloadTest(armnn::IWorkloadFactory& factory, - armnn::Graph& graph, - const armnn::TensorShape& outputShape, +template +std::unique_ptr CreateConcatWorkloadTest(armnn::IWorkloadFactory &factory, + armnn::Graph &graph, + const armnn::TensorShape &outputShape, unsigned int concatAxis) { armnn::TensorInfo inputTensorInfo({ 2, 3, 2, 5 }, DataType); @@ -1073,26 +1073,26 @@ std::unique_ptr CreateMergerWorkloadTest(armnn::IWorkloadFactory inputShapes.end(), concatAxis); - Layer* const merger = graph.AddLayer(descriptor, "merger"); - BOOST_TEST_CHECKPOINT("created merger layer"); + Layer* const concat = graph.AddLayer(descriptor, "concat"); + BOOST_TEST_CHECKPOINT("created concat layer"); Layer* const output = graph.AddLayer(0, "output"); // Adds connections. - Connect(input0, merger, inputTensorInfo, 0, 0); - BOOST_TEST_CHECKPOINT("connect input0 to merger"); - Connect(input1, merger, inputTensorInfo, 0, 1); - BOOST_TEST_CHECKPOINT("connect input1 to merger"); - Connect(merger, output, outputTensorInfo, 0, 0); - BOOST_TEST_CHECKPOINT("connect merger to output"); + Connect(input0, concat, inputTensorInfo, 0, 0); + BOOST_TEST_CHECKPOINT("connect input0 to concat"); + Connect(input1, concat, inputTensorInfo, 0, 1); + BOOST_TEST_CHECKPOINT("connect input1 to concat"); + Connect(concat, output, outputTensorInfo, 0, 0); + BOOST_TEST_CHECKPOINT("connect concat to output"); CreateTensorHandles(graph, factory); BOOST_TEST_CHECKPOINT("created tensor handles"); - auto workloadMerger = MakeAndCheckWorkload(*merger, graph, factory); - BOOST_TEST_CHECKPOINT("created merger workload"); + auto workloadConcat = MakeAndCheckWorkload(*concat, graph, factory); + BOOST_TEST_CHECKPOINT("created concat workload"); - return std::move(workloadMerger); + return std::move(workloadConcat); } template diff --git a/src/armnn/test/GraphTests.cpp b/src/armnn/test/GraphTests.cpp index cca4653..0777d98 100644 --- a/src/armnn/test/GraphTests.cpp +++ b/src/armnn/test/GraphTests.cpp @@ -454,18 +454,18 @@ struct CopyLayersFixture convLayer1->GetOutputSlot(0).Connect(convLayer2->GetInputSlot(0)); - armnn::OriginsDescriptor mergerDefaults(2); - Layer* const mergerLayer = AddLayer(mergerDefaults, "merger"); - mergerLayer->SetBackendId(armnn::Compute::CpuRef); + armnn::OriginsDescriptor concatDefaults(2); + Layer* const concatLayer = AddLayer(concatDefaults, "concat"); + concatLayer->SetBackendId(armnn::Compute::CpuRef); - convLayer1->GetOutputSlot(0).Connect(mergerLayer->GetInputSlot(0)); - convLayer2->GetOutputSlot(0).Connect(mergerLayer->GetInputSlot(1)); + convLayer1->GetOutputSlot(0).Connect(concatLayer->GetInputSlot(0)); + convLayer2->GetOutputSlot(0).Connect(concatLayer->GetInputSlot(1)); armnn::ActivationDescriptor activationDefaults; Layer* const actLayer = AddLayer(activationDefaults, "act"); actLayer->SetBackendId(armnn::Compute::CpuRef); - mergerLayer->GetOutputSlot(0).Connect(actLayer->GetInputSlot(0)); + concatLayer->GetOutputSlot(0).Connect(actLayer->GetInputSlot(0)); armnn::SoftmaxDescriptor softmaxDefaults; Layer* const softmaxLayer = AddLayer(softmaxDefaults, "softmax"); diff --git a/src/armnn/test/NetworkTests.cpp b/src/armnn/test/NetworkTests.cpp index 47fd67b..14b67a1 100644 --- a/src/armnn/test/NetworkTests.cpp +++ b/src/armnn/test/NetworkTests.cpp @@ -226,7 +226,7 @@ BOOST_AUTO_TEST_CASE(NetworkModification) checkOneOutputToOneInputConnection(multiplicationLayer, outputLayer, 2, 0); } -BOOST_AUTO_TEST_CASE(NetworkModification_SplitterMerger) +BOOST_AUTO_TEST_CASE(NetworkModification_SplitterConcat) { armnn::Network net; @@ -255,22 +255,20 @@ BOOST_AUTO_TEST_CASE(NetworkModification_SplitterMerger) splitterLayer->GetOutputSlot(1).Connect(softmaxLayer2->GetInputSlot(0)); - // Adds a merger layer. - armnn::OriginsDescriptor mergerDesc(2, 4); + // Adds a concat layer. + armnn::OriginsDescriptor concatDesc(2, 4); - ARMNN_NO_DEPRECATE_WARN_BEGIN - armnn::IConnectableLayer* mergerLayer = net.AddMergerLayer(mergerDesc, "merger layer"); - ARMNN_NO_DEPRECATE_WARN_END - BOOST_TEST(mergerLayer); + armnn::IConnectableLayer* concatLayer = net.AddConcatLayer(concatDesc, "concat layer"); + BOOST_TEST(concatLayer); - softmaxLayer1->GetOutputSlot(0).Connect(mergerLayer->GetInputSlot(0)); - softmaxLayer2->GetOutputSlot(0).Connect(mergerLayer->GetInputSlot(1)); + softmaxLayer1->GetOutputSlot(0).Connect(concatLayer->GetInputSlot(0)); + softmaxLayer2->GetOutputSlot(0).Connect(concatLayer->GetInputSlot(1)); // Adds an output layer. armnn::IConnectableLayer* outputLayer = net.AddOutputLayer(0, "output layer"); BOOST_TEST(outputLayer); - mergerLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0)); + concatLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0)); BOOST_TEST(splitterLayer->GetNumOutputSlots() == 2); BOOST_TEST(splitterLayer->GetOutputSlot(0).GetConnection(0) == &softmaxLayer1->GetInputSlot(0)); @@ -278,11 +276,11 @@ BOOST_AUTO_TEST_CASE(NetworkModification_SplitterMerger) BOOST_TEST(splitterLayer->GetOutputSlot(1).GetConnection(0) == &softmaxLayer2->GetInputSlot(0)); BOOST_TEST(&splitterLayer->GetOutputSlot(1) == softmaxLayer2->GetInputSlot(0).GetConnection()); - BOOST_TEST(mergerLayer->GetNumInputSlots() == 2); - BOOST_TEST(softmaxLayer1->GetOutputSlot(0).GetConnection(0) == &mergerLayer->GetInputSlot(0)); - BOOST_TEST(&softmaxLayer1->GetOutputSlot(0) == mergerLayer->GetInputSlot(0).GetConnection()); - BOOST_TEST(softmaxLayer2->GetOutputSlot(0).GetConnection(0) == &mergerLayer->GetInputSlot(1)); - BOOST_TEST(&softmaxLayer2->GetOutputSlot(0) == mergerLayer->GetInputSlot(1).GetConnection()); + BOOST_TEST(concatLayer->GetNumInputSlots() == 2); + BOOST_TEST(softmaxLayer1->GetOutputSlot(0).GetConnection(0) == &concatLayer->GetInputSlot(0)); + BOOST_TEST(&softmaxLayer1->GetOutputSlot(0) == concatLayer->GetInputSlot(0).GetConnection()); + BOOST_TEST(softmaxLayer2->GetOutputSlot(0).GetConnection(0) == &concatLayer->GetInputSlot(1)); + BOOST_TEST(&softmaxLayer2->GetOutputSlot(0) == concatLayer->GetInputSlot(1).GetConnection()); } BOOST_AUTO_TEST_CASE(NetworkModification_SplitterAddition) diff --git a/src/armnn/test/QuantizerTest.cpp b/src/armnn/test/QuantizerTest.cpp index f2c739d..337c615 100644 --- a/src/armnn/test/QuantizerTest.cpp +++ b/src/armnn/test/QuantizerTest.cpp @@ -1238,15 +1238,15 @@ BOOST_AUTO_TEST_CASE(QuantizeConstant) VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16); } -BOOST_AUTO_TEST_CASE(QuantizeMerger) +BOOST_AUTO_TEST_CASE(QuantizeConcat) { - class TestMergerQuantization : public TestQuantization + class TestConcatQuantization : public TestQuantization { public: - TestMergerQuantization(const TensorShape& inputShape, const TensorShape& outputShape) + TestConcatQuantization(const TensorShape& inputShape, const TensorShape& outputShape) : TestQuantization(inputShape, outputShape) {} - TestMergerQuantization(const QuantizerOptions& options, + TestConcatQuantization(const QuantizerOptions& options, const TensorShape& inputShape, const TensorShape& outputShape) : TestQuantization(options, inputShape, outputShape) {} @@ -1259,8 +1259,8 @@ BOOST_AUTO_TEST_CASE(QuantizeMerger) LayerBindingId id, const char* name = nullptr) override {} - void VisitMergerLayer(const IConnectableLayer* layer, - const OriginsDescriptor& mergerDescriptor, + void VisitConcatLayer(const IConnectableLayer* layer, + const OriginsDescriptor& originsDescriptor, const char* name = nullptr) override { TensorInfo info = layer->GetOutputSlot(0).GetTensorInfo(); @@ -1277,17 +1277,15 @@ BOOST_AUTO_TEST_CASE(QuantizeMerger) IConnectableLayer* input2 = network->AddInputLayer(2); OriginsDescriptor descriptor(3, 1); - ARMNN_NO_DEPRECATE_WARN_BEGIN - IConnectableLayer* merger = network->AddMergerLayer(descriptor); - ARMNN_NO_DEPRECATE_WARN_END + IConnectableLayer* concatLayer = network->AddConcatLayer(descriptor); IConnectableLayer* output0 = network->AddOutputLayer(3); // Establish connections - input0->GetOutputSlot(0).Connect(merger->GetInputSlot(0)); - input1->GetOutputSlot(0).Connect(merger->GetInputSlot(1)); - input2->GetOutputSlot(0).Connect(merger->GetInputSlot(2)); - merger->GetOutputSlot(0).Connect(output0->GetInputSlot(0)); + input0->GetOutputSlot(0).Connect(concatLayer->GetInputSlot(0)); + input1->GetOutputSlot(0).Connect(concatLayer->GetInputSlot(1)); + input2->GetOutputSlot(0).Connect(concatLayer->GetInputSlot(2)); + concatLayer->GetOutputSlot(0).Connect(output0->GetInputSlot(0)); // Set TensorInfo const TensorShape shape{1U}; @@ -1296,7 +1294,7 @@ BOOST_AUTO_TEST_CASE(QuantizeMerger) input0->GetOutputSlot(0).SetTensorInfo(info); input1->GetOutputSlot(0).SetTensorInfo(info); input2->GetOutputSlot(0).SetTensorInfo(info); - merger->GetOutputSlot(0).SetTensorInfo(info); + concatLayer->GetOutputSlot(0).SetTensorInfo(info); const QuantizerOptions options(DataType::QuantisedSymm16); INetworkQuantizerPtr quantizerPtrQAsymm8 = INetworkQuantizer::Create(network.get()); @@ -1314,11 +1312,11 @@ BOOST_AUTO_TEST_CASE(QuantizeMerger) quantizerPtrQSymm16->OverrideInputRange(2, min, (max - 7.8f)); INetworkPtr quantizedNetworkQAsymm8 = quantizerPtrQAsymm8->ExportNetwork(); - TestMergerQuantization validatorQAsymm8(shape, shape); + TestConcatQuantization validatorQAsymm8(shape, shape); VisitLayersTopologically(quantizedNetworkQAsymm8.get(), validatorQAsymm8); INetworkPtr quantizedNetworkQSymm16 = quantizerPtrQSymm16->ExportNetwork(); - TestMergerQuantization validatorQSymm16(options, shape, shape); + TestConcatQuantization validatorQSymm16(options, shape, shape); VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16); } diff --git a/src/armnn/test/SubgraphViewTests.cpp b/src/armnn/test/SubgraphViewTests.cpp index 8369fc4..3e762e2 100644 --- a/src/armnn/test/SubgraphViewTests.cpp +++ b/src/armnn/test/SubgraphViewTests.cpp @@ -223,21 +223,21 @@ BOOST_AUTO_TEST_CASE(MultiInputSingleOutput) Layer* const convLayer1 = graph.AddLayer(convDescriptor, "conv1"); Layer* const convLayer2 = graph.AddLayer(convDescriptor, "conv2"); - OriginsDescriptor mergerDescriptor(2); - Layer* const mergerLayer = graph.AddLayer(mergerDescriptor, "merger"); + OriginsDescriptor concatDescriptor(2); + Layer* const concatLayer = graph.AddLayer(concatDescriptor, "concat"); Layer* const outputLayer = graph.AddLayer(0, "output"); inputLayer->GetOutputSlot(0).Connect(splitterLayer->GetInputSlot(0)); splitterLayer->GetOutputSlot(0).Connect(convLayer1->GetInputSlot(0)); splitterLayer->GetOutputSlot(1).Connect(convLayer2->GetInputSlot(0)); - convLayer1->GetOutputSlot(0).Connect(mergerLayer->GetInputSlot(0)); - convLayer2->GetOutputSlot(0).Connect(mergerLayer->GetInputSlot(1)); - mergerLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0)); + convLayer1->GetOutputSlot(0).Connect(concatLayer->GetInputSlot(0)); + convLayer2->GetOutputSlot(0).Connect(concatLayer->GetInputSlot(1)); + concatLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0)); // Construct sub-graph SubgraphViewSelector::SubgraphViewPtr subgraph = CreateSubgraphViewFrom(CreateInputsFrom({convLayer1, convLayer2}), - CreateOutputsFrom({mergerLayer}), + CreateOutputsFrom({concatLayer}), {}); // Save sub-graph connections for comparison after substitution @@ -270,8 +270,8 @@ BOOST_AUTO_TEST_CASE(SingleInputMultiOutput) Convolution2dDescriptor convDescriptor; Layer* const convLayer1 = graph.AddLayer(convDescriptor, "conv1"); Layer* const convLayer2 = graph.AddLayer(convDescriptor, "conv2"); - OriginsDescriptor mergerDescriptor(2); - Layer* const mergerLayer = graph.AddLayer(mergerDescriptor, "merger"); + OriginsDescriptor concatDescriptor(2); + Layer* const concatLayer = graph.AddLayer(concatDescriptor, "concat"); Layer* const outputLayer = graph.AddLayer(0, "output"); ViewsDescriptor splitterDescriptor(2); @@ -280,9 +280,9 @@ BOOST_AUTO_TEST_CASE(SingleInputMultiOutput) inputLayer->GetOutputSlot(0).Connect(splitterLayer->GetInputSlot(0)); splitterLayer->GetOutputSlot(0).Connect(convLayer1->GetInputSlot(0)); splitterLayer->GetOutputSlot(1).Connect(convLayer2->GetInputSlot(0)); - convLayer1->GetOutputSlot(0).Connect(mergerLayer->GetInputSlot(0)); - convLayer2->GetOutputSlot(0).Connect(mergerLayer->GetInputSlot(1)); - mergerLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0)); + convLayer1->GetOutputSlot(0).Connect(concatLayer->GetInputSlot(0)); + convLayer2->GetOutputSlot(0).Connect(concatLayer->GetInputSlot(1)); + concatLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0)); // Construct sub-graph SubgraphViewSelector::SubgraphViewPtr subgraph = CreateSubgraphViewFrom(CreateInputsFrom({splitterLayer}), @@ -323,17 +323,17 @@ BOOST_AUTO_TEST_CASE(MultiInputMultiOutput) Layer* const convLayer1 = graph.AddLayer(convDescriptor, "conv1"); Layer* const convLayer2 = graph.AddLayer(convDescriptor, "conv2"); - OriginsDescriptor mergerDescriptor(2); - Layer* const mergerLayer = graph.AddLayer(mergerDescriptor, "merger"); + OriginsDescriptor concatDescriptor(2); + Layer* const concatLayer = graph.AddLayer(concatDescriptor, "concat"); Layer* const outputLayer = graph.AddLayer(0, "output"); inputLayer->GetOutputSlot(0).Connect(splitterLayer->GetInputSlot(0)); splitterLayer->GetOutputSlot(0).Connect(convLayer1->GetInputSlot(0)); splitterLayer->GetOutputSlot(1).Connect(convLayer2->GetInputSlot(0)); - convLayer1->GetOutputSlot(0).Connect(mergerLayer->GetInputSlot(0)); - convLayer2->GetOutputSlot(0).Connect(mergerLayer->GetInputSlot(1)); - mergerLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0)); + convLayer1->GetOutputSlot(0).Connect(concatLayer->GetInputSlot(0)); + convLayer2->GetOutputSlot(0).Connect(concatLayer->GetInputSlot(1)); + concatLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0)); // Construct sub-graph SubgraphViewSelector::SubgraphViewPtr subgraph = CreateSubgraphViewFrom(CreateInputsFrom({convLayer1, convLayer2}), @@ -376,8 +376,8 @@ BOOST_AUTO_TEST_CASE(EraseReplacedLayers) Layer* const convLayer1 = graph.AddLayer(convDescriptor, "conv1"); Layer* const convLayer2 = graph.AddLayer(convDescriptor, "conv2"); - OriginsDescriptor mergerDescriptor(2); - Layer* const mergerLayer = graph.AddLayer(mergerDescriptor, "merger"); + OriginsDescriptor concatDescriptor(2); + Layer* const concatLayer = graph.AddLayer(concatDescriptor, "concat"); graph.AddLayer(0, "output"); @@ -387,7 +387,7 @@ BOOST_AUTO_TEST_CASE(EraseReplacedLayers) {splitterLayer, convLayer1, convLayer2, - mergerLayer}); + concatLayer}); // Construct dummy pre-compiled layer PreCompiledDescriptor preCompiledDescriptor(0, 0); @@ -538,8 +538,8 @@ BOOST_AUTO_TEST_CASE(IslandInTheMiddle) // Graph graph; - OriginsDescriptor mergerDescriptor(2); - auto x2 = graph.AddLayer(mergerDescriptor, "x2"); + OriginsDescriptor concatDescriptor(2); + auto x2 = graph.AddLayer(concatDescriptor, "x2"); auto m3 = graph.InsertNewLayer(x2->GetInputSlot(0), ActivationDescriptor{}, "m3"); @@ -856,14 +856,14 @@ BOOST_AUTO_TEST_CASE(MultiInputMultiOutput) Graph graph; ActivationDescriptor activationDefaults; - OriginsDescriptor mergerDescriptor(2); + OriginsDescriptor concatDescriptor(2); auto x1 = graph.AddLayer(0, "x1"); auto x2 = graph.AddLayer(1, "x2"); auto m1 = graph.AddLayer(activationDefaults, "m1"); auto m2 = graph.AddLayer(activationDefaults, "m2"); - auto m3 = graph.AddLayer(mergerDescriptor, "m3"); + auto m3 = graph.AddLayer(concatDescriptor, "m3"); auto m4 = graph.AddLayer(activationDefaults, "m4"); auto m5 = graph.AddLayer(activationDefaults, "m5"); @@ -887,11 +887,11 @@ BOOST_AUTO_TEST_CASE(MultiInputMultiOutput) SubgraphViewSelector::Subgraphs subgraphs = SubgraphViewSelector::SelectSubgraphs( graph, - // select Activation and Merger Layers M1, M2, M3, M4, M5 + // select Activation and Concat Layers M1, M2, M3, M4, M5 [](const Layer & l) { bool toSelect = (l.GetType() == LayerType::Activation - || l.GetType() == LayerType::Merger); + || l.GetType() == LayerType::Concat); return toSelect; }); @@ -994,18 +994,18 @@ BOOST_AUTO_TEST_CASE(MultipleSubgraphs) Layer* const convLayer1 = graph.AddLayer(convDescriptor, "conv1"); Layer* const convLayer2 = graph.AddLayer(convDescriptor, "conv2"); - OriginsDescriptor mergerDescriptor(2); - Layer* const mergerLayer = graph.AddLayer(mergerDescriptor, "merger"); - mergerLayer->SetBackendId(Compute::CpuAcc); + OriginsDescriptor concatDescriptor(2); + Layer* const pConcatLayer = graph.AddLayer(concatDescriptor, "concat"); + pConcatLayer->SetBackendId(Compute::CpuAcc); Layer* const outputLayer = graph.AddLayer(0, "output"); inputLayer->GetOutputSlot(0).Connect(splitterLayer->GetInputSlot(0)); splitterLayer->GetOutputSlot(0).Connect(convLayer1->GetInputSlot(0)); splitterLayer->GetOutputSlot(1).Connect(convLayer2->GetInputSlot(0)); - convLayer1->GetOutputSlot(0).Connect(mergerLayer->GetInputSlot(0)); - convLayer2->GetOutputSlot(0).Connect(mergerLayer->GetInputSlot(1)); - mergerLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0)); + convLayer1->GetOutputSlot(0).Connect(pConcatLayer->GetInputSlot(0)); + convLayer2->GetOutputSlot(0).Connect(pConcatLayer->GetInputSlot(1)); + pConcatLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0)); // CpuAcc sub graph selector SubgraphViewSelector::Subgraphs subgraphs = @@ -1096,7 +1096,7 @@ BOOST_AUTO_TEST_CASE(SubgraphCycles) // Graph graph; - OriginsDescriptor mergerDescriptor(2); + OriginsDescriptor originsDescriptor(2); auto x0 = graph.AddLayer(0, "x0"); auto m0 = graph.AddLayer(ActivationDescriptor{}, "m0"); auto x1 = graph.AddLayer(ActivationDescriptor{}, "x1"); diff --git a/src/armnn/test/TestNameAndDescriptorLayerVisitor.cpp b/src/armnn/test/TestNameAndDescriptorLayerVisitor.cpp index f94906d..478f029 100644 --- a/src/armnn/test/TestNameAndDescriptorLayerVisitor.cpp +++ b/src/armnn/test/TestNameAndDescriptorLayerVisitor.cpp @@ -20,7 +20,7 @@ void Set2dDataValues(SplitterDescriptor descriptor, u_int32_t value) } } -void Set2dDataValues(MergerDescriptor descriptor, u_int32_t value) +void Set2dDataValues(OriginsDescriptor& descriptor, u_int32_t value) { for (unsigned int i = 0; i < descriptor.GetNumViews(); ++i) { @@ -230,32 +230,28 @@ BOOST_AUTO_TEST_CASE(CheckSplitterLayerVisitorNameNullAndDescriptor) layer->Accept(visitor); } -BOOST_AUTO_TEST_CASE(CheckMergerLayerVisitorNameAndDescriptor) +BOOST_AUTO_TEST_CASE(CheckConcatLayerVisitorNameAndDescriptor) { - const char* layerName = "MergerLayer"; - MergerDescriptor descriptor(2, 2); + const char* layerName = "ConcatLayer"; + OriginsDescriptor descriptor(2, 2); Set2dDataValues(descriptor, 1); descriptor.SetConcatAxis(1); - TestMergerLayerVisitor visitor(descriptor, layerName); + TestConcatLayerVisitor visitor(descriptor, layerName); Network net; - ARMNN_NO_DEPRECATE_WARN_BEGIN - IConnectableLayer *const layer = net.AddMergerLayer(descriptor, layerName); - ARMNN_NO_DEPRECATE_WARN_END + IConnectableLayer *const layer = net.AddConcatLayer(descriptor, layerName); layer->Accept(visitor); } -BOOST_AUTO_TEST_CASE(CheckMergerLayerVisitorNameNullAndDescriptor) +BOOST_AUTO_TEST_CASE(CheckConcatLayerVisitorNameNullAndDescriptor) { - MergerDescriptor descriptor(2, 2); + OriginsDescriptor descriptor(2, 2); Set2dDataValues(descriptor, 1); descriptor.SetConcatAxis(1); - TestMergerLayerVisitor visitor(descriptor); + TestConcatLayerVisitor visitor(descriptor); Network net; - ARMNN_NO_DEPRECATE_WARN_BEGIN - IConnectableLayer *const layer = net.AddMergerLayer(descriptor); - ARMNN_NO_DEPRECATE_WARN_END + IConnectableLayer *const layer = net.AddConcatLayer(descriptor); layer->Accept(visitor); } diff --git a/src/armnn/test/TestNameAndDescriptorLayerVisitor.hpp b/src/armnn/test/TestNameAndDescriptorLayerVisitor.hpp index bf23332..0db956d 100644 --- a/src/armnn/test/TestNameAndDescriptorLayerVisitor.hpp +++ b/src/armnn/test/TestNameAndDescriptorLayerVisitor.hpp @@ -317,70 +317,70 @@ public: }; }; -class TestMergerLayerVisitor : public TestLayerVisitor +class TestConcatLayerVisitor : public TestLayerVisitor { private: OriginsDescriptor m_VisitorDescriptor; public: - explicit TestMergerLayerVisitor(const OriginsDescriptor& mergerDescriptor, const char* name = nullptr) + explicit TestConcatLayerVisitor(const OriginsDescriptor& concatDescriptor, const char* name = nullptr) : TestLayerVisitor(name) - , m_VisitorDescriptor(mergerDescriptor.GetNumViews(), mergerDescriptor.GetNumDimensions()) + , m_VisitorDescriptor(concatDescriptor.GetNumViews(), concatDescriptor.GetNumDimensions()) { - m_VisitorDescriptor.SetConcatAxis(mergerDescriptor.GetConcatAxis()); + m_VisitorDescriptor.SetConcatAxis(concatDescriptor.GetConcatAxis()); - if (mergerDescriptor.GetNumViews() != m_VisitorDescriptor.GetNumViews()) + if (concatDescriptor.GetNumViews() != m_VisitorDescriptor.GetNumViews()) { BOOST_ERROR("Unequal number of views in splitter descriptor."); } - else if (mergerDescriptor.GetNumDimensions() != m_VisitorDescriptor.GetNumDimensions()) + else if (concatDescriptor.GetNumDimensions() != m_VisitorDescriptor.GetNumDimensions()) { BOOST_ERROR("Unequal number of dimensions in splitter descriptor."); } else { - for (unsigned int i = 0; i < mergerDescriptor.GetNumViews(); ++i) + for (unsigned int i = 0; i < concatDescriptor.GetNumViews(); ++i) { - for (unsigned int j = 0; j < mergerDescriptor.GetNumDimensions(); ++j) + for (unsigned int j = 0; j < concatDescriptor.GetNumDimensions(); ++j) { - m_VisitorDescriptor.SetViewOriginCoord(i, j, mergerDescriptor.GetViewOrigin(i)[j]); + m_VisitorDescriptor.SetViewOriginCoord(i, j, concatDescriptor.GetViewOrigin(i)[j]); } } } }; - void CheckDescriptor(const OriginsDescriptor& mergerDescriptor) + void CheckDescriptor(const OriginsDescriptor& concatDescriptor) { - BOOST_CHECK_EQUAL(mergerDescriptor.GetNumViews(), m_VisitorDescriptor.GetNumViews()); - BOOST_CHECK_EQUAL(mergerDescriptor.GetNumDimensions(), m_VisitorDescriptor.GetNumDimensions()); - BOOST_CHECK_EQUAL(mergerDescriptor.GetConcatAxis(), m_VisitorDescriptor.GetConcatAxis()); + BOOST_CHECK_EQUAL(concatDescriptor.GetNumViews(), m_VisitorDescriptor.GetNumViews()); + BOOST_CHECK_EQUAL(concatDescriptor.GetNumDimensions(), m_VisitorDescriptor.GetNumDimensions()); + BOOST_CHECK_EQUAL(concatDescriptor.GetConcatAxis(), m_VisitorDescriptor.GetConcatAxis()); - if (mergerDescriptor.GetNumViews() != m_VisitorDescriptor.GetNumViews()) + if (concatDescriptor.GetNumViews() != m_VisitorDescriptor.GetNumViews()) { BOOST_ERROR("Unequal number of views in splitter descriptor."); } - else if (mergerDescriptor.GetNumDimensions() != m_VisitorDescriptor.GetNumDimensions()) + else if (concatDescriptor.GetNumDimensions() != m_VisitorDescriptor.GetNumDimensions()) { BOOST_ERROR("Unequal number of dimensions in splitter descriptor."); } else { - for (unsigned int i = 0; i < mergerDescriptor.GetNumViews(); ++i) + for (unsigned int i = 0; i < concatDescriptor.GetNumViews(); ++i) { - for (unsigned int j = 0; j < mergerDescriptor.GetNumDimensions(); ++j) + for (unsigned int j = 0; j < concatDescriptor.GetNumDimensions(); ++j) { - BOOST_CHECK_EQUAL(mergerDescriptor.GetViewOrigin(i)[j], m_VisitorDescriptor.GetViewOrigin(i)[j]); + BOOST_CHECK_EQUAL(concatDescriptor.GetViewOrigin(i)[j], m_VisitorDescriptor.GetViewOrigin(i)[j]); } } } } - void VisitMergerLayer(const IConnectableLayer* layer, - const OriginsDescriptor& mergerDescriptor, + void VisitConcatLayer(const IConnectableLayer* layer, + const OriginsDescriptor& concatDescriptor, const char* name = nullptr) override { CheckLayerPointer(layer); - CheckDescriptor(mergerDescriptor); + CheckDescriptor(concatDescriptor); CheckLayerName(name); }; }; diff --git a/src/armnnCaffeParser/CaffeParser.cpp b/src/armnnCaffeParser/CaffeParser.cpp index a27abc7..4027e94 100644 --- a/src/armnnCaffeParser/CaffeParser.cpp +++ b/src/armnnCaffeParser/CaffeParser.cpp @@ -564,37 +564,37 @@ void CaffeParserBase::AddConvLayerWithSplits(const caffe::LayerParameter& layerP convLayer->GetOutputSlot(0).SetTensorInfo(BlobShapeToTensorInfo(outputShape)); } - // If the convolution was performed in chunks, add a layer to merge the results + // If the convolution was performed in chunks, add a layer to concatenate the results // The merge input shape matches that of the convolution output - unsigned int mergeDimSizes[4] = {static_cast(outputShape.dim(0)), - static_cast(outputShape.dim(1)), - static_cast(outputShape.dim(2)), - static_cast(outputShape.dim(3))}; + unsigned int concatDimSizes[4] = {static_cast(outputShape.dim(0)), + static_cast(outputShape.dim(1)), + static_cast(outputShape.dim(2)), + static_cast(outputShape.dim(3))}; - // This is used to describe how the input is to be merged - OriginsDescriptor mergeDesc(numGroups); + // This is used to describe how the input is to be concatenated + OriginsDescriptor concatDesc(numGroups); // Now create an input node for each group, using the name from // the output of the corresponding convolution for (unsigned int g = 0; g < numGroups; ++g) { - mergeDesc.SetViewOriginCoord(g, 1, mergeDimSizes[1] * g); + concatDesc.SetViewOriginCoord(g, 1, concatDimSizes[1] * g); } - // Make sure the output from the merge is the correct size to hold the data for all groups - mergeDimSizes[1] *= numGroups; - outputShape.set_dim(1, mergeDimSizes[1]); + // Make sure the output from the concat is the correct size to hold the data for all groups + concatDimSizes[1] *= numGroups; + outputShape.set_dim(1, concatDimSizes[1]); - // Finally add the merge layer - IConnectableLayer* mergerLayer = m_Network->AddConcatLayer(mergeDesc, layerParam.name().c_str()); + // Finally add the concat layer + IConnectableLayer* concatLayer = m_Network->AddConcatLayer(concatDesc, layerParam.name().c_str()); - if (!mergerLayer) + if (!concatLayer) { throw ParseException( boost::str( boost::format( - "Failed to create final merger layer for Split+Convolution+Merger. " + "Failed to create final concat layer for Split+Convolution+Concat. " "Layer=%1% #groups=%2% #filters=%3% %4%") % layerParam.name() % numGroups % @@ -604,10 +604,10 @@ void CaffeParserBase::AddConvLayerWithSplits(const caffe::LayerParameter& layerP for (unsigned int g = 0; g < numGroups; ++g) { - convLayers[g]->GetOutputSlot(0).Connect(mergerLayer->GetInputSlot(g)); + convLayers[g]->GetOutputSlot(0).Connect(concatLayer->GetInputSlot(g)); } - mergerLayer->GetOutputSlot(0).SetTensorInfo(armnn::TensorInfo(4, mergeDimSizes, DataType::Float32)); - SetArmnnOutputSlotForCaffeTop(layerParam.top(0), mergerLayer->GetOutputSlot(0)); + concatLayer->GetOutputSlot(0).SetTensorInfo(armnn::TensorInfo(4, concatDimSizes, DataType::Float32)); + SetArmnnOutputSlotForCaffeTop(layerParam.top(0), concatLayer->GetOutputSlot(0)); } void CaffeParserBase::AddConvLayerWithDepthwiseConv(const caffe::LayerParameter& layerParam, @@ -798,7 +798,7 @@ void CaffeParserBase::ParseConvLayer(const LayerParameter& layerParam) else { // we split the input by channels into channels/groups separate convolutions - // and merger the results afterwards + // and concatenate the results afterwards AddConvLayerWithSplits(layerParam, convolution2dDescriptor, kernelW, kernelH); return; } diff --git a/src/armnnDeserializer/Deserializer.cpp b/src/armnnDeserializer/Deserializer.cpp index 14cf232..75c258b 100644 --- a/src/armnnDeserializer/Deserializer.cpp +++ b/src/armnnDeserializer/Deserializer.cpp @@ -192,6 +192,7 @@ m_ParserFunctions(Layer_MAX+1, &Deserializer::ParseUnsupportedLayer) m_ParserFunctions[Layer_AdditionLayer] = &Deserializer::ParseAdd; m_ParserFunctions[Layer_BatchToSpaceNdLayer] = &Deserializer::ParseBatchToSpaceNd; m_ParserFunctions[Layer_BatchNormalizationLayer] = &Deserializer::ParseBatchNormalization; + m_ParserFunctions[Layer_ConcatLayer] = &Deserializer::ParseConcat; m_ParserFunctions[Layer_ConstantLayer] = &Deserializer::ParseConstant; m_ParserFunctions[Layer_Convolution2dLayer] = &Deserializer::ParseConvolution2d; m_ParserFunctions[Layer_DepthwiseConvolution2dLayer] = &Deserializer::ParseDepthwiseConvolution2d; @@ -241,6 +242,8 @@ Deserializer::LayerBaseRawPtr Deserializer::GetBaseLayer(const GraphPtr& graphPt return graphPtr->layers()->Get(layerIndex)->layer_as_BatchToSpaceNdLayer()->base(); case Layer::Layer_BatchNormalizationLayer: return graphPtr->layers()->Get(layerIndex)->layer_as_BatchNormalizationLayer()->base(); + case Layer::Layer_ConcatLayer: + return graphPtr->layers()->Get(layerIndex)->layer_as_ConcatLayer()->base(); case Layer::Layer_ConstantLayer: return graphPtr->layers()->Get(layerIndex)->layer_as_ConstantLayer()->base(); case Layer::Layer_Convolution2dLayer: @@ -1229,6 +1232,22 @@ void Deserializer::ParseMaximum(GraphPtr graph, unsigned int layerIndex) RegisterOutputSlots(graph, layerIndex, layer); } +const armnnSerializer::OriginsDescriptor* GetOriginsDescriptor(const armnnSerializer::SerializedGraph* graph, + unsigned int layerIndex) +{ + auto layerType = graph->layers()->Get(layerIndex)->layer_type(); + + switch (layerType) + { + case Layer::Layer_ConcatLayer: + return graph->layers()->Get(layerIndex)->layer_as_ConcatLayer()->descriptor(); + case Layer::Layer_MergerLayer: + return graph->layers()->Get(layerIndex)->layer_as_MergerLayer()->descriptor(); + default: + throw armnn::Exception("unknown layer type, should be concat or merger"); + } +} + void Deserializer::ParseConcat(GraphPtr graph, unsigned int layerIndex) { CHECK_LAYERS(graph, 0, layerIndex); @@ -1237,18 +1256,17 @@ void Deserializer::ParseConcat(GraphPtr graph, unsigned int layerIndex) auto outputs = GetOutputs(graph, layerIndex); CHECK_VALID_SIZE(outputs.size(), 1); - auto mergerLayer = graph->layers()->Get(layerIndex)->layer_as_MergerLayer(); auto layerName = GetLayerName(graph, layerIndex); - auto mergerDescriptor = mergerLayer->descriptor(); - unsigned int numViews = mergerDescriptor->numViews(); - unsigned int numDimensions = mergerDescriptor->numDimensions(); + auto originsDescriptor = GetOriginsDescriptor(graph, layerIndex); + unsigned int numViews = originsDescriptor->numViews(); + unsigned int numDimensions = originsDescriptor->numDimensions(); // can now check the number of inputs == number of views auto inputs = GetInputs(graph, layerIndex); CHECK_VALID_SIZE(inputs.size(), numViews); armnn::OriginsDescriptor descriptor(numViews, numDimensions); - auto originsPtr = mergerDescriptor->viewOrigins(); + auto originsPtr = originsDescriptor->viewOrigins(); for (unsigned int v = 0; v < numViews; ++v) { auto originPtr = originsPtr->Get(v); @@ -1258,7 +1276,7 @@ void Deserializer::ParseConcat(GraphPtr graph, unsigned int layerIndex) descriptor.SetViewOriginCoord(v, d, value); } } - descriptor.SetConcatAxis(mergerDescriptor->concatAxis()); + descriptor.SetConcatAxis(originsDescriptor->concatAxis()); IConnectableLayer* layer = m_Network->AddConcatLayer(descriptor, layerName.c_str()); armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]); diff --git a/src/armnnOnnxParser/OnnxParser.cpp b/src/armnnOnnxParser/OnnxParser.cpp index 2b4ce7d..6f83dc5 100644 --- a/src/armnnOnnxParser/OnnxParser.cpp +++ b/src/armnnOnnxParser/OnnxParser.cpp @@ -1294,7 +1294,7 @@ void OnnxParser::ParseConv(const onnx::NodeProto& node) else { // TODO: split the input by channels into channels/groups separate convolutions - // and merger the results afterwards + // and concatenate the results afterwards throw ParseException(boost::str( boost::format("Error parsing Convolution node: %1%. " "The 'group'=%2% parameter should be 1 or be equal to the " diff --git a/src/armnnSerializer/ArmnnSchema.fbs b/src/armnnSerializer/ArmnnSchema.fbs index 0419c4b..5a001de 100644 --- a/src/armnnSerializer/ArmnnSchema.fbs +++ b/src/armnnSerializer/ArmnnSchema.fbs @@ -120,7 +120,8 @@ enum LayerType : uint { Quantize = 35, Dequantize = 36, Merge = 37, - Switch = 38 + Switch = 38, + Concat = 39 } // Base layer table to be used as part of other layers @@ -442,6 +443,11 @@ table StridedSliceDescriptor { dataLayout:DataLayout; } +table ConcatLayer { + base:LayerBase; + descriptor:OriginsDescriptor; +} + table MergerLayer { base:LayerBase; descriptor:OriginsDescriptor; @@ -577,7 +583,8 @@ union Layer { QuantizeLayer, DequantizeLayer, MergeLayer, - SwitchLayer + SwitchLayer, + ConcatLayer } table AnyLayer { diff --git a/src/armnnSerializer/Serializer.cpp b/src/armnnSerializer/Serializer.cpp index 865ed7a..c49f6f9 100644 --- a/src/armnnSerializer/Serializer.cpp +++ b/src/armnnSerializer/Serializer.cpp @@ -515,17 +515,24 @@ void SerializerVisitor::VisitMergeLayer(const armnn::IConnectableLayer* layer, c } void SerializerVisitor::VisitMergerLayer(const armnn::IConnectableLayer* layer, - const armnn::OriginsDescriptor& mergerDescriptor, + const armnn::MergerDescriptor& mergerDescriptor, const char* name) { - auto flatBufferMergerBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Merger); + VisitConcatLayer(layer, mergerDescriptor, name); +} + +void SerializerVisitor::VisitConcatLayer(const armnn::IConnectableLayer* layer, + const armnn::ConcatDescriptor& concatDescriptor, + const char* name) +{ + auto flatBufferConcatBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Concat); std::vector> views; - for (unsigned int v = 0; v < mergerDescriptor.GetNumViews(); ++v) + for (unsigned int v = 0; v < concatDescriptor.GetNumViews(); ++v) { - const uint32_t* origin = mergerDescriptor.GetViewOrigin(v); + const uint32_t* origin = concatDescriptor.GetViewOrigin(v); std::vector origins; - for (unsigned int d = 0; d < mergerDescriptor.GetNumDimensions(); ++d) + for (unsigned int d = 0; d < concatDescriptor.GetNumDimensions(); ++d) { origins.push_back(origin[d]); } @@ -534,17 +541,17 @@ void SerializerVisitor::VisitMergerLayer(const armnn::IConnectableLayer* layer, views.push_back(uintVector); } - auto flatBufferMergerDescriptor = CreateOriginsDescriptor(m_flatBufferBuilder, - mergerDescriptor.GetConcatAxis(), - mergerDescriptor.GetNumViews(), - mergerDescriptor.GetNumDimensions(), + auto flatBufferConcatDescriptor = CreateOriginsDescriptor(m_flatBufferBuilder, + concatDescriptor.GetConcatAxis(), + concatDescriptor.GetNumViews(), + concatDescriptor.GetNumDimensions(), m_flatBufferBuilder.CreateVector(views)); - auto flatBufferLayer = CreateMergerLayer(m_flatBufferBuilder, - flatBufferMergerBaseLayer, - flatBufferMergerDescriptor); + auto flatBufferLayer = CreateConcatLayer(m_flatBufferBuilder, + flatBufferConcatBaseLayer, + flatBufferConcatDescriptor); - CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_MergerLayer); + CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_ConcatLayer); } void SerializerVisitor::VisitMultiplicationLayer(const armnn::IConnectableLayer* layer, const char* name) diff --git a/src/armnnSerializer/Serializer.hpp b/src/armnnSerializer/Serializer.hpp index 4a71837..2e2816a 100644 --- a/src/armnnSerializer/Serializer.hpp +++ b/src/armnnSerializer/Serializer.hpp @@ -61,6 +61,10 @@ public: const armnn::ConstTensor& gamma, const char* name = nullptr) override; + void VisitConcatLayer(const armnn::IConnectableLayer* layer, + const armnn::ConcatDescriptor& concatDescriptor, + const char* name = nullptr) override; + void VisitConstantLayer(const armnn::IConnectableLayer* layer, const armnn::ConstTensor& input, const char* = nullptr) override; @@ -132,8 +136,9 @@ public: void VisitMergeLayer(const armnn::IConnectableLayer* layer, const char* name = nullptr) override; + ARMNN_DEPRECATED_MSG("Use VisitConcatLayer instead") void VisitMergerLayer(const armnn::IConnectableLayer* layer, - const armnn::OriginsDescriptor& mergerDescriptor, + const armnn::MergerDescriptor& mergerDescriptor, const char* name = nullptr) override; void VisitMultiplicationLayer(const armnn::IConnectableLayer* layer, diff --git a/src/armnnSerializer/SerializerSupport.md b/src/armnnSerializer/SerializerSupport.md index f1b3365..832c1a7 100644 --- a/src/armnnSerializer/SerializerSupport.md +++ b/src/armnnSerializer/SerializerSupport.md @@ -26,7 +26,7 @@ The Arm NN SDK Serializer currently supports the following layers: * Maximum * Mean * Merge -* Merger +* Concat * Minimum * Multiplication * Normalization diff --git a/src/armnnSerializer/test/SerializerTests.cpp b/src/armnnSerializer/test/SerializerTests.cpp index b21ae58..752cf0c 100644 --- a/src/armnnSerializer/test/SerializerTests.cpp +++ b/src/armnnSerializer/test/SerializerTests.cpp @@ -1248,6 +1248,13 @@ public: const armnn::OriginsDescriptor& descriptor, const char* name) override { + throw armnn::Exception("MergerLayer should have translated to ConcatLayer"); + } + + void VisitConcatLayer(const armnn::IConnectableLayer* layer, + const armnn::OriginsDescriptor& descriptor, + const char* name) override + { VerifyNameAndConnections(layer, name); VerifyDescriptor(descriptor); } @@ -1271,6 +1278,9 @@ private: armnn::OriginsDescriptor m_Descriptor; }; +// NOTE: until the deprecated AddMergerLayer disappears this test checks that calling +// AddMergerLayer places a ConcatLayer into the serialized format and that +// when this deserialises we have a ConcatLayer BOOST_AUTO_TEST_CASE(SerializeMerger) { const std::string layerName("merger"); @@ -1309,17 +1319,10 @@ BOOST_AUTO_TEST_CASE(SerializeMerger) BOOST_AUTO_TEST_CASE(EnsureMergerLayerBackwardCompatibility) { // The hex array below is a flat buffer containing a simple network with two inputs - // a merger layer (soon to be a thing of the past) and an output layer with dimensions - // as per the tensor infos below. - // The intention is that this test will be repurposed as soon as the MergerLayer - // is replaced by a ConcatLayer to verify that we can still read back these old style + // a merger layer (now deprecated) and an output layer with dimensions as per the tensor infos below. + // + // This test verifies that we can still read back these old style // models replacing the MergerLayers with ConcatLayers with the same parameters. - // To do this the MergerLayerVerifier will be changed to have a VisitConcatLayer - // which will do the work that the VisitMergerLayer currently does and the VisitMergerLayer - // so long as it remains (public API will drop Merger Layer at some future point) - // will throw an error if invoked because none of the graphs we create should contain - // Merger layers now regardless of whether we attempt to insert the Merger layer via - // the INetwork.AddMergerLayer call or by deserializing an old style flatbuffer file. unsigned int size = 760; const unsigned char mergerModel[] = { 0x10,0x00,0x00,0x00,0x00,0x00,0x0A,0x00,0x10,0x00,0x04,0x00,0x08,0x00,0x0C,0x00,0x0A,0x00,0x00,0x00, @@ -1381,6 +1384,41 @@ BOOST_AUTO_TEST_CASE(EnsureMergerLayerBackwardCompatibility) deserializedNetwork->Accept(verifier); } +BOOST_AUTO_TEST_CASE(SerializeConcat) +{ + const std::string layerName("concat"); + const armnn::TensorInfo inputInfo = armnn::TensorInfo({2, 3, 2, 2}, armnn::DataType::Float32); + const armnn::TensorInfo outputInfo = armnn::TensorInfo({4, 3, 2, 2}, armnn::DataType::Float32); + + const std::vector shapes({inputInfo.GetShape(), inputInfo.GetShape()}); + + armnn::OriginsDescriptor descriptor = + armnn::CreateDescriptorForConcatenation(shapes.begin(), shapes.end(), 0); + + armnn::INetworkPtr network = armnn::INetwork::Create(); + armnn::IConnectableLayer* const inputLayerOne = network->AddInputLayer(0); + armnn::IConnectableLayer* const inputLayerTwo = network->AddInputLayer(1); + armnn::IConnectableLayer* const concatLayer = network->AddConcatLayer(descriptor, layerName.c_str()); + armnn::IConnectableLayer* const outputLayer = network->AddOutputLayer(0); + + inputLayerOne->GetOutputSlot(0).Connect(concatLayer->GetInputSlot(0)); + inputLayerTwo->GetOutputSlot(0).Connect(concatLayer->GetInputSlot(1)); + concatLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0)); + + inputLayerOne->GetOutputSlot(0).SetTensorInfo(inputInfo); + inputLayerTwo->GetOutputSlot(0).SetTensorInfo(inputInfo); + concatLayer->GetOutputSlot(0).SetTensorInfo(outputInfo); + + std::string concatLayerNetwork = SerializeNetwork(*network); + armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(concatLayerNetwork); + BOOST_CHECK(deserializedNetwork); + + // NOTE: using the MergerLayerVerifier to ensure that it is a concat layer and not a + // merger layer that gets placed into the graph. + MergerLayerVerifier verifier(layerName, {inputInfo, inputInfo}, {outputInfo}, descriptor); + deserializedNetwork->Accept(verifier); +} + BOOST_AUTO_TEST_CASE(SerializeMinimum) { class MinimumLayerVerifier : public LayerVerifierBase diff --git a/src/backends/backendsCommon/LayerSupportBase.cpp b/src/backends/backendsCommon/LayerSupportBase.cpp index 9fcb496..71b1745 100644 --- a/src/backends/backendsCommon/LayerSupportBase.cpp +++ b/src/backends/backendsCommon/LayerSupportBase.cpp @@ -73,9 +73,7 @@ bool LayerSupportBase::IsConcatSupported(const std::vector in const OriginsDescriptor& descriptor, Optional reasonIfUnsupported) const { - ARMNN_NO_DEPRECATE_WARN_BEGIN - return IsMergerSupported(inputs, output, descriptor, reasonIfUnsupported); - ARMNN_NO_DEPRECATE_WARN_END + return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported); } bool LayerSupportBase::IsConstantSupported(const TensorInfo& output, @@ -286,7 +284,7 @@ bool LayerSupportBase::IsMergerSupported(const std::vector in const OriginsDescriptor& descriptor, Optional reasonIfUnsupported) const { - return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported); + return IsConcatSupported(inputs, output, descriptor, reasonIfUnsupported); } bool LayerSupportBase::IsMinimumSupported(const TensorInfo& input0, diff --git a/src/backends/backendsCommon/WorkloadData.cpp b/src/backends/backendsCommon/WorkloadData.cpp index ea84c0b..9bb95f6 100644 --- a/src/backends/backendsCommon/WorkloadData.cpp +++ b/src/backends/backendsCommon/WorkloadData.cpp @@ -378,26 +378,26 @@ void SplitterQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const } //--------------------------------------------------------------- -void MergerQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const +void ConcatQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const { - ValidateNumOutputs(workloadInfo, "MergerQueueDescriptor", 1); + ValidateNumOutputs(workloadInfo, "ConcatQueueDescriptor", 1); if (m_Inputs.size() <= 0) { - throw InvalidArgumentException("MergerQueueDescriptor: At least one input needs to be provided."); + throw InvalidArgumentException("ConcatQueueDescriptor: At least one input needs to be provided."); } if (m_Outputs.size() <= 0) { - throw InvalidArgumentException("MergerQueueDescriptor: At least one output needs to be provided."); + throw InvalidArgumentException("ConcatQueueDescriptor: At least one output needs to be provided."); } if (workloadInfo.m_InputTensorInfos.size() <= 0) { - throw InvalidArgumentException("MergerQueueDescriptor: At least one TensorInfo input needs to be provided."); + throw InvalidArgumentException("ConcatQueueDescriptor: At least one TensorInfo input needs to be provided."); } if (workloadInfo.m_OutputTensorInfos.size() <= 0) { - throw InvalidArgumentException("MergerQueueDescriptor: At least one TensorInfo output needs to be provided."); + throw InvalidArgumentException("ConcatQueueDescriptor: At least one TensorInfo output needs to be provided."); } if(m_Parameters.GetConcatAxis() > workloadInfo.m_InputTensorInfos[0].GetShape().GetNumDimensions()) @@ -413,7 +413,7 @@ void MergerQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const if (workloadInfo.m_InputTensorInfos.size() != m_ViewOrigins.size()) { throw InvalidArgumentException( - "MergerQueueDescriptor: Number of split windows " + "ConcatQueueDescriptor: Number of split windows " "has to match number of workloadInfo.m_InputTensorInfos. " "Number of windows: " + to_string(m_ViewOrigins.size()) + @@ -428,7 +428,7 @@ void MergerQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const ViewOrigin const& e = m_ViewOrigins[w]; if (e.m_Origin.size() != outputDims) { - throw InvalidArgumentException("MergerQueueDescriptor: Window origin have to " + throw InvalidArgumentException("ConcatQueueDescriptor: Window origin have to " "have the same dimensionality as the output tensor. " "Window origin (index: " + to_string(w) + ") has " + to_string(e.m_Origin.size()) + @@ -442,7 +442,7 @@ void MergerQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const if (e.m_Origin[i] + workloadInfo.m_InputTensorInfos[w].GetShape()[i] > workloadInfo.m_OutputTensorInfos[0].GetShape()[i]) { - throw InvalidArgumentException("MergerQueueDescriptor: Window extent coordinates have to " + throw InvalidArgumentException("ConcatQueueDescriptor: Window extent coordinates have to " "be smaller or equal than the size of the output in that coord."); } } @@ -463,11 +463,11 @@ void MergerQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const { ValidateDataTypes(workloadInfo.m_InputTensorInfos[i], supportedTypes, - "MergerQueueDescriptor"); + "ConcatQueueDescriptor"); } ValidateDataTypes(workloadInfo.m_OutputTensorInfos[0], {workloadInfo.m_InputTensorInfos[0].GetDataType()}, - "MergerQueueDescriptor"); + "ConcatQueueDescriptor"); } //--------------------------------------------------------------- diff --git a/src/backends/backendsCommon/WorkloadData.hpp b/src/backends/backendsCommon/WorkloadData.hpp index 689c6d2..3e33b94 100644 --- a/src/backends/backendsCommon/WorkloadData.hpp +++ b/src/backends/backendsCommon/WorkloadData.hpp @@ -9,6 +9,7 @@ #include +#include #include #include #include @@ -87,8 +88,8 @@ struct SplitterQueueDescriptor : QueueDescriptorWithParameters void Validate(const WorkloadInfo& workloadInfo) const; }; -// Merger layer workload data. -struct MergerQueueDescriptor : QueueDescriptorWithParameters +// Concat layer workload data. +struct ConcatQueueDescriptor : QueueDescriptorWithParameters { struct ViewOrigin { @@ -106,6 +107,9 @@ struct MergerQueueDescriptor : QueueDescriptorWithParameters void Validate(const WorkloadInfo& workloadInfo) const; }; +// Deprecated. Use ConcatQueueDescriptor instead +using MergerQueueDescriptor = ConcatQueueDescriptor; + // Activation layer workload data. struct ActivationQueueDescriptor : QueueDescriptorWithParameters { diff --git a/src/backends/backendsCommon/WorkloadDataFwd.hpp b/src/backends/backendsCommon/WorkloadDataFwd.hpp index 9fbd81b..abee316 100644 --- a/src/backends/backendsCommon/WorkloadDataFwd.hpp +++ b/src/backends/backendsCommon/WorkloadDataFwd.hpp @@ -12,7 +12,7 @@ template struct QueueDescriptorWithParameters; struct SoftmaxQueueDescriptor; struct SplitterQueueDescriptor; -struct MergerQueueDescriptor; +struct ConcatQueueDescriptor; struct ActivationQueueDescriptor; struct FullyConnectedQueueDescriptor; struct PermuteQueueDescriptor; diff --git a/src/backends/backendsCommon/WorkloadFactory.cpp b/src/backends/backendsCommon/WorkloadFactory.cpp index 7631071..f026e1e 100644 --- a/src/backends/backendsCommon/WorkloadFactory.cpp +++ b/src/backends/backendsCommon/WorkloadFactory.cpp @@ -512,9 +512,9 @@ bool IWorkloadFactory::IsLayerSupported(const BackendId& backendId, reason); break; } - case LayerType::Merger: + case LayerType::Concat: { - auto cLayer = boost::polymorphic_downcast(&layer); + auto cLayer = boost::polymorphic_downcast(&layer); // Get vector of all inputs. auto getTensorInfo = [&dataType](const InputSlot& slot) @@ -535,9 +535,9 @@ bool IWorkloadFactory::IsLayerSupported(const BackendId& backendId, const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); - ARMNN_NO_DEPRECATE_WARN_BEGIN - result = layerSupportObject->IsMergerSupported(inputPtrs, output, cLayer->GetParameters(), reason); - ARMNN_NO_DEPRECATE_WARN_END + result = layerSupportObject->IsConcatSupported(inputPtrs, output, cLayer->GetParameters(), reason); + + break; } case LayerType::Multiplication: @@ -816,7 +816,7 @@ std::unique_ptr IWorkloadFactory::CreateBatchToSpaceNd(const BatchToS return std::unique_ptr(); } -std::unique_ptr IWorkloadFactory::CreateConcat(const MergerQueueDescriptor& descriptor, +std::unique_ptr IWorkloadFactory::CreateConcat(const ConcatQueueDescriptor& descriptor, const WorkloadInfo& info) const { return std::unique_ptr(); diff --git a/src/backends/backendsCommon/WorkloadFactory.hpp b/src/backends/backendsCommon/WorkloadFactory.hpp index 0b0ba7d..11c36eb 100644 --- a/src/backends/backendsCommon/WorkloadFactory.hpp +++ b/src/backends/backendsCommon/WorkloadFactory.hpp @@ -61,7 +61,7 @@ public: virtual std::unique_ptr CreateBatchToSpaceNd(const BatchToSpaceNdQueueDescriptor& descriptor, const WorkloadInfo& Info) const; - virtual std::unique_ptr CreateConcat(const MergerQueueDescriptor& descriptor, + virtual std::unique_ptr CreateConcat(const ConcatQueueDescriptor& descriptor, const WorkloadInfo& info) const; virtual std::unique_ptr CreateConstant(const ConstantQueueDescriptor& descriptor, diff --git a/src/backends/backendsCommon/test/CMakeLists.txt b/src/backends/backendsCommon/test/CMakeLists.txt index 8050a0a..508fc77 100644 --- a/src/backends/backendsCommon/test/CMakeLists.txt +++ b/src/backends/backendsCommon/test/CMakeLists.txt @@ -30,7 +30,7 @@ list(APPEND armnnBackendsCommonUnitTests_sources LayerTests.hpp LstmTestImpl.hpp NormTestImpl.hpp - MergerTestImpl.hpp + ConcatTestImpl.hpp MockBackend.cpp MockBackend.hpp MockBackendId.hpp diff --git a/src/backends/backendsCommon/test/MergerTestImpl.hpp b/src/backends/backendsCommon/test/ConcatTestImpl.hpp similarity index 88% rename from src/backends/backendsCommon/test/MergerTestImpl.hpp rename to src/backends/backendsCommon/test/ConcatTestImpl.hpp index 8483cf0..ded3857 100644 --- a/src/backends/backendsCommon/test/MergerTestImpl.hpp +++ b/src/backends/backendsCommon/test/ConcatTestImpl.hpp @@ -18,8 +18,8 @@ namespace { template -INetworkPtr CreateMergerNetwork(const std::vector& inputShapes, - const TensorShape& outputShape, +INetworkPtr CreateConcatNetwork(const std::vector& inputShapes, + const TensorShape &outputShape, unsigned int concatAxis, const float qScale = 1.0f, const int32_t qOffset = 0) @@ -33,26 +33,24 @@ INetworkPtr CreateMergerNetwork(const std::vector& inputShapes, descriptor = CreateDescriptorForConcatenation(inputShapes.begin(), inputShapes.end(), concatAxis); - ARMNN_NO_DEPRECATE_WARN_BEGIN - IConnectableLayer* merger = net->AddMergerLayer(descriptor, "merger"); - ARMNN_NO_DEPRECATE_WARN_END + IConnectableLayer* concat = net->AddConcatLayer(descriptor, "concat"); for (unsigned int i = 0; i < inputShapes.size(); ++i) { TensorInfo inputTensorInfo(inputShapes[i], DataType, qScale, qOffset); IConnectableLayer* input = net->AddInputLayer(boost::numeric_cast(i)); - Connect(input, merger, inputTensorInfo, 0, i); + Connect(input, concat, inputTensorInfo, 0, i); } TensorInfo outputTensorInfo(outputShape, DataType, qScale, qOffset); IConnectableLayer* output = net->AddOutputLayer(0, "output"); - Connect(merger, output, outputTensorInfo, 0, 0); + Connect(concat, output, outputTensorInfo, 0, 0); return net; } template -void MergerDim0EndToEnd(const std::vector& backends) +void ConcatDim0EndToEnd(const std::vector& backends) { using namespace armnn; using T = ResolveType; @@ -62,7 +60,7 @@ void MergerDim0EndToEnd(const std::vector& backends) const TensorShape& outputShape = { 4, 3, 2, 2 }; // Builds up the structure of the network - INetworkPtr net = CreateMergerNetwork(inputShapes, outputShape, concatAxis); + INetworkPtr net = CreateConcatNetwork(inputShapes, outputShape, concatAxis); BOOST_TEST_CHECKPOINT("create a network"); @@ -116,7 +114,7 @@ void MergerDim0EndToEnd(const std::vector& backends) } template -void MergerDim1EndToEnd(const std::vector& backends) +void ConcatDim1EndToEnd(const std::vector& backends) { using namespace armnn; using T = ResolveType; @@ -126,7 +124,7 @@ void MergerDim1EndToEnd(const std::vector& backends) const TensorShape& outputShape = { 2, 6, 2, 2 }; // Builds up the structure of the network - INetworkPtr net = CreateMergerNetwork(inputShapes, outputShape, concatAxis); + INetworkPtr net = CreateConcatNetwork(inputShapes, outputShape, concatAxis); BOOST_TEST_CHECKPOINT("create a network"); @@ -180,7 +178,7 @@ void MergerDim1EndToEnd(const std::vector& backends) } template -void MergerDim2EndToEnd(const std::vector& backends) +void ConcatDim2EndToEnd(const std::vector& backends) { using namespace armnn; using T = ResolveType; @@ -190,7 +188,7 @@ void MergerDim2EndToEnd(const std::vector& backends) const TensorShape& outputShape = { 2, 3, 4, 2 }; // Builds up the structure of the network - INetworkPtr net = CreateMergerNetwork(inputShapes, outputShape, concatAxis); + INetworkPtr net = CreateConcatNetwork(inputShapes, outputShape, concatAxis); BOOST_TEST_CHECKPOINT("create a network"); @@ -244,7 +242,7 @@ void MergerDim2EndToEnd(const std::vector& backends) } template> -void MergerDim3EndToEnd(const std::vector& backends) +void ConcatDim3EndToEnd(const std::vector& backends) { using namespace armnn; @@ -253,7 +251,7 @@ void MergerDim3EndToEnd(const std::vector& backends) const TensorShape& outputShape = { 2, 3, 2, 4 }; // Builds up the structure of the network - INetworkPtr net = CreateMergerNetwork(inputShapes, outputShape, concatAxis); + INetworkPtr net = CreateConcatNetwork(inputShapes, outputShape, concatAxis); BOOST_TEST_CHECKPOINT("create a network"); diff --git a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp index 3ff7376..7161464 100644 --- a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp +++ b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp @@ -135,19 +135,19 @@ struct DummyLayer }; template<> -struct DummyLayer +struct DummyLayer { DummyLayer() { armnn::OriginsDescriptor desc(2); - m_Layer = dummyGraph.AddLayer(desc, ""); + m_Layer = dummyGraph.AddLayer(desc, ""); } ~DummyLayer() { dummyGraph.EraseLayer(m_Layer); } - armnn::MergerLayer* m_Layer; + armnn::ConcatLayer* m_Layer; }; template<> @@ -322,6 +322,8 @@ DECLARE_LAYER_POLICY_2_PARAM(BatchNormalization) DECLARE_LAYER_POLICY_2_PARAM(BatchToSpaceNd) +DECLARE_LAYER_POLICY_2_PARAM(Concat) + DECLARE_LAYER_POLICY_1_PARAM(Constant) DECLARE_LAYER_POLICY_1_PARAM(ConvertFp16ToFp32) @@ -364,10 +366,6 @@ DECLARE_LAYER_POLICY_2_PARAM(Mean) DECLARE_LAYER_POLICY_1_PARAM(Merge) -ARMNN_NO_DEPRECATE_WARN_BEGIN -DECLARE_LAYER_POLICY_2_PARAM(Merger) -ARMNN_NO_DEPRECATE_WARN_END - DECLARE_LAYER_POLICY_1_PARAM(Minimum) DECLARE_LAYER_POLICY_1_PARAM(Multiplication) @@ -422,7 +420,7 @@ unsigned int GetNumOutputs(const armnn::Layer& layer) } template<> -unsigned int GetNumInputs(const armnn::Layer& layer) +unsigned int GetNumInputs(const armnn::Layer& layer) { boost::ignore_unused(layer); return 2; diff --git a/src/backends/backendsCommon/test/LayerTests.cpp b/src/backends/backendsCommon/test/LayerTests.cpp index c84a530..402e86d 100644 --- a/src/backends/backendsCommon/test/LayerTests.cpp +++ b/src/backends/backendsCommon/test/LayerTests.cpp @@ -1362,10 +1362,10 @@ LayerTestResult ConcatTest( ); std::vector wOrigin1 = {0, 0, 0}; //Extent of the window is defined by size of input[0]. - armnn::MergerQueueDescriptor::ViewOrigin window1(wOrigin1); + armnn::ConcatQueueDescriptor::ViewOrigin window1(wOrigin1); std::vector wOrigin2 = {2, 0, 0}; //Extent of the window is defined by size of input[1]. - armnn::MergerQueueDescriptor::ViewOrigin window2(wOrigin2); + armnn::ConcatQueueDescriptor::ViewOrigin window2(wOrigin2); std::unique_ptr outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); @@ -1381,7 +1381,7 @@ LayerTestResult ConcatTest( workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) : workloadFactory.CreateTensorHandle(inputTensorInfo2); - armnn::MergerQueueDescriptor data; + armnn::ConcatQueueDescriptor data; armnn::WorkloadInfo info; AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get()); AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get()); @@ -3554,7 +3554,7 @@ void Concatenate( std::unique_ptr outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); - armnn::MergerQueueDescriptor queueDescriptor; + armnn::ConcatQueueDescriptor queueDescriptor; armnn::OriginsDescriptor viewsDescriptor = CreateDescriptorForConcatenation(inputTensorInfos, concatDim); queueDescriptor.m_Parameters = viewsDescriptor; @@ -6625,10 +6625,10 @@ LayerTestResult ConcatUint8DifferentQParamsTest( inputTensorInfo2.SetQuantizationOffset(inputOffset2); std::vector wOrigin1 = { 0, 0, 0 }; //Extent of the window is defined by size of input[0]. - armnn::MergerQueueDescriptor::ViewOrigin window1(wOrigin1); + armnn::ConcatQueueDescriptor::ViewOrigin window1(wOrigin1); std::vector wOrigin2 = { 2, 0, 0 }; //Extent of the window is defined by size of input[1]. - armnn::MergerQueueDescriptor::ViewOrigin window2(wOrigin2); + armnn::ConcatQueueDescriptor::ViewOrigin window2(wOrigin2); std::unique_ptr outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); @@ -6644,7 +6644,7 @@ LayerTestResult ConcatUint8DifferentQParamsTest( workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) : workloadFactory.CreateTensorHandle(inputTensorInfo2); - armnn::MergerQueueDescriptor data; + armnn::ConcatQueueDescriptor data; armnn::WorkloadInfo info; AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get()); AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get()); @@ -6759,10 +6759,10 @@ LayerTestResult ConcatUint8Test( ); std::vector wOrigin1 = { 0, 0, 0 }; //Extent of the window is defined by size of input[0]. - armnn::MergerQueueDescriptor::ViewOrigin window1(wOrigin1); + armnn::ConcatQueueDescriptor::ViewOrigin window1(wOrigin1); std::vector wOrigin2 = { 2, 0, 0 }; //Extent of the window is defined by size of input[1]. - armnn::MergerQueueDescriptor::ViewOrigin window2(wOrigin2); + armnn::ConcatQueueDescriptor::ViewOrigin window2(wOrigin2); std::unique_ptr outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); @@ -6780,7 +6780,7 @@ LayerTestResult ConcatUint8Test( workloadFactory.CreateTensorHandle(inputTensorInfo2); - armnn::MergerQueueDescriptor data; + armnn::ConcatQueueDescriptor data; armnn::WorkloadInfo info; AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get()); AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get()); @@ -6892,10 +6892,10 @@ LayerTestResult ConcatUint16Test( })); std::vector wOrigin1 = { 0, 0, 0 }; //Extent of the window is defined by size of input[0]. - armnn::MergerQueueDescriptor::ViewOrigin window1(wOrigin1); + armnn::ConcatQueueDescriptor::ViewOrigin window1(wOrigin1); std::vector wOrigin2 = { 2, 0, 0 }; //Extent of the window is defined by size of input[1]. - armnn::MergerQueueDescriptor::ViewOrigin window2(wOrigin2); + armnn::ConcatQueueDescriptor::ViewOrigin window2(wOrigin2); std::unique_ptr outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); @@ -6913,7 +6913,7 @@ LayerTestResult ConcatUint16Test( workloadFactory.CreateTensorHandle(inputTensorInfo2); - armnn::MergerQueueDescriptor data; + armnn::ConcatQueueDescriptor data; armnn::WorkloadInfo info; AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get()); AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get()); diff --git a/src/backends/backendsCommon/test/QuantizeHelper.hpp b/src/backends/backendsCommon/test/QuantizeHelper.hpp index b3b0631..a0c6553 100644 --- a/src/backends/backendsCommon/test/QuantizeHelper.hpp +++ b/src/backends/backendsCommon/test/QuantizeHelper.hpp @@ -2,6 +2,7 @@ // Copyright © 2017 Arm Ltd. All rights reserved. // SPDX-License-Identifier: MIT // + #pragma once #include diff --git a/src/backends/backendsCommon/test/WorkloadDataValidation.cpp b/src/backends/backendsCommon/test/WorkloadDataValidation.cpp index 067cca8..94bef9b 100644 --- a/src/backends/backendsCommon/test/WorkloadDataValidation.cpp +++ b/src/backends/backendsCommon/test/WorkloadDataValidation.cpp @@ -234,7 +234,7 @@ BOOST_AUTO_TEST_CASE(SplitterQueueDescriptor_Validate_WrongWindow) } -BOOST_AUTO_TEST_CASE(MergerQueueDescriptor_Validate_WrongWindow) +BOOST_AUTO_TEST_CASE(ConcatQueueDescriptor_Validate_WrongWindow) { constexpr unsigned int inputNum = 1; constexpr unsigned int inputChannels = 3; @@ -256,7 +256,7 @@ BOOST_AUTO_TEST_CASE(MergerQueueDescriptor_Validate_WrongWindow) inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::DataType::Float32); outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::DataType::Float32); - MergerQueueDescriptor invalidData; + ConcatQueueDescriptor invalidData; WorkloadInfo invalidInfo; AddInputToWorkload(invalidData, invalidInfo, inputTensorInfo, nullptr); @@ -264,7 +264,7 @@ BOOST_AUTO_TEST_CASE(MergerQueueDescriptor_Validate_WrongWindow) // Invalid, since it has only 3 dimensions while the input tensor is 4d. std::vector wOrigin = {0, 0, 0}; - armnn::MergerQueueDescriptor::ViewOrigin window(wOrigin); + armnn::ConcatQueueDescriptor::ViewOrigin window(wOrigin); invalidData.m_ViewOrigins.push_back(window); BOOST_TEST_INFO("Invalid argument exception is expected, because merge window dimensionality does not " @@ -273,18 +273,18 @@ BOOST_AUTO_TEST_CASE(MergerQueueDescriptor_Validate_WrongWindow) // Invalid, since window extends past the boundary of output tensor. std::vector wOrigin3 = {0, 0, 15, 0}; - armnn::MergerQueueDescriptor::ViewOrigin window3(wOrigin3); + armnn::ConcatQueueDescriptor::ViewOrigin window3(wOrigin3); invalidData.m_ViewOrigins[0] = window3; BOOST_TEST_INFO("Invalid argument exception is expected (wOrigin3[2]+ inputHeight > outputHeight"); BOOST_CHECK_THROW(RefConcatWorkload(invalidData, invalidInfo), armnn::InvalidArgumentException); std::vector wOrigin4 = {0, 0, 0, 0}; - armnn::MergerQueueDescriptor::ViewOrigin window4(wOrigin4); + armnn::ConcatQueueDescriptor::ViewOrigin window4(wOrigin4); invalidData.m_ViewOrigins[0] = window4; std::vector wOrigin5 = {1, 16, 20, 2}; - armnn::MergerQueueDescriptor::ViewOrigin window5(wOrigin4); + armnn::ConcatQueueDescriptor::ViewOrigin window5(wOrigin4); invalidData.m_ViewOrigins.push_back(window5); BOOST_TEST_INFO("Invalid exception due to number of merge windows not matching number of inputs."); diff --git a/src/backends/cl/ClLayerSupport.cpp b/src/backends/cl/ClLayerSupport.cpp index dfac289..78ac0e6 100644 --- a/src/backends/cl/ClLayerSupport.cpp +++ b/src/backends/cl/ClLayerSupport.cpp @@ -189,12 +189,43 @@ bool ClLayerSupport::IsBatchToSpaceNdSupported(const TensorInfo& input, bool ClLayerSupport::IsConcatSupported(const std::vector inputs, const TensorInfo& output, - const OriginsDescriptor& descriptor, + const ConcatDescriptor& descriptor, Optional reasonIfUnsupported) const { - ARMNN_NO_DEPRECATE_WARN_BEGIN - return IsMergerSupported(inputs, output, descriptor, reasonIfUnsupported); - ARMNN_NO_DEPRECATE_WARN_END + if (descriptor.GetNumDimensions() <= descriptor.GetConcatAxis()) + { + SetValueChecked(reasonIfUnsupported, "Cl Concat: Concat axis > Number of dimensions."); + return false; + } + + unsigned int concatInnerAxis = (descriptor.GetNumDimensions() - descriptor.GetConcatAxis()) - 1; + if(concatInnerAxis < 3) // Width, height, or channels + { + FORWARD_WORKLOAD_VALIDATE_FUNC(ClConcatWorkloadValidate, + reasonIfUnsupported, + inputs, + output, + descriptor); + } + else if (concatInnerAxis == 3) + { + // We rely on the sub-tensor optimization to handle the batch dimension for 4D tensors. If we can't use + // sub-tensors for this then we can't support it. Here is where we check that the sub-tensors will work. + for (auto& input : inputs) + { + if (input && !output.IsTypeSpaceMatch(*input)) // Cannot use sub-tensors if the types are not same space + { + SetValueChecked(reasonIfUnsupported, "Cl Concat: Types and quantization parameters must match."); + return false; + } + } + return true; // Sub-tensors support concat along batch + } + else // > 4 dimensions not supported. + { + SetValueChecked(reasonIfUnsupported, "Cl Concat: Maximum of 4 dimensions supported."); + return false; + } } bool ClLayerSupport::IsConstantSupported(const TensorInfo& output, @@ -442,43 +473,10 @@ bool ClLayerSupport::IsMemCopySupported(const TensorInfo &input, bool ClLayerSupport::IsMergerSupported(const std::vector inputs, const TensorInfo& output, - const OriginsDescriptor& descriptor, + const MergerDescriptor& descriptor, Optional reasonIfUnsupported) const { - if (descriptor.GetNumDimensions() <= descriptor.GetConcatAxis()) - { - SetValueChecked(reasonIfUnsupported, "Cl Merger: Concat axis > Number of dimensions."); - return false; - } - - unsigned int concatInnerAxis = (descriptor.GetNumDimensions() - descriptor.GetConcatAxis()) - 1; - if(concatInnerAxis < 3) // Width, height, or channels - { - FORWARD_WORKLOAD_VALIDATE_FUNC(ClConcatWorkloadValidate, - reasonIfUnsupported, - inputs, - output, - descriptor); - } - else if (concatInnerAxis == 3) - { - // We rely on the sub-tensor optimization to handle the batch dimension for 4D tensors. If we can't use - // sub-tensors for this then we can't support it. Here is where we check that the sub-tensors will work. - for (auto& input : inputs) - { - if (input && !output.IsTypeSpaceMatch(*input)) // Cannot use sub-tensors if the types are not same space - { - SetValueChecked(reasonIfUnsupported, "Cl Merger: Types and quantization parameters must match."); - return false; - } - } - return true; // Sub-tensors support concat along batch - } - else // > 4 dimensions not supported. - { - SetValueChecked(reasonIfUnsupported, "Cl Merger: Maximum of 4 dimensions supported."); - return false; - } + return IsConcatSupported(inputs, output, descriptor, reasonIfUnsupported); } bool ClLayerSupport::IsMinimumSupported(const TensorInfo& input0, diff --git a/src/backends/cl/ClLayerSupport.hpp b/src/backends/cl/ClLayerSupport.hpp index fca0bfd..64c1079 100644 --- a/src/backends/cl/ClLayerSupport.hpp +++ b/src/backends/cl/ClLayerSupport.hpp @@ -38,7 +38,7 @@ public: bool IsConcatSupported(const std::vector inputs, const TensorInfo& output, - const OriginsDescriptor& descriptor, + const ConcatDescriptor& descriptor, Optional reasonIfUnsupported = EmptyOptional()) const override; bool IsConstantSupported(const TensorInfo& output, @@ -146,7 +146,7 @@ public: ARMNN_DEPRECATED_MSG("Use IsConcatSupported instead") bool IsMergerSupported(const std::vector inputs, const TensorInfo& output, - const OriginsDescriptor& descriptor, + const MergerDescriptor& descriptor, Optional reasonIfUnsupported = EmptyOptional()) const override; bool IsMinimumSupported(const TensorInfo& input0, diff --git a/src/backends/cl/ClWorkloadFactory.cpp b/src/backends/cl/ClWorkloadFactory.cpp index e7cf191..214b88d 100644 --- a/src/backends/cl/ClWorkloadFactory.cpp +++ b/src/backends/cl/ClWorkloadFactory.cpp @@ -265,7 +265,7 @@ std::unique_ptr ClWorkloadFactory::CreateL2Normalization(const L2Norm return MakeWorkload(descriptor, info); } -std::unique_ptr ClWorkloadFactory::CreateConcat(const MergerQueueDescriptor& descriptor, +std::unique_ptr ClWorkloadFactory::CreateConcat(const ConcatQueueDescriptor& descriptor, const WorkloadInfo& info) const { return MakeWorkload(descriptor, info); diff --git a/src/backends/cl/ClWorkloadFactory.hpp b/src/backends/cl/ClWorkloadFactory.hpp index e00672f..2722171 100644 --- a/src/backends/cl/ClWorkloadFactory.hpp +++ b/src/backends/cl/ClWorkloadFactory.hpp @@ -97,7 +97,7 @@ public: std::unique_ptr CreateL2Normalization(const L2NormalizationQueueDescriptor& descriptor, const WorkloadInfo& info) const override; - std::unique_ptr CreateConcat(const MergerQueueDescriptor& descriptor, + std::unique_ptr CreateConcat(const ConcatQueueDescriptor& descriptor, const WorkloadInfo& info) const override; std::unique_ptr CreateConstant(const ConstantQueueDescriptor& descriptor, diff --git a/src/backends/cl/test/ClCreateWorkloadTests.cpp b/src/backends/cl/test/ClCreateWorkloadTests.cpp index 7f08b80..dc884e0 100644 --- a/src/backends/cl/test/ClCreateWorkloadTests.cpp +++ b/src/backends/cl/test/ClCreateWorkloadTests.cpp @@ -551,30 +551,30 @@ BOOST_AUTO_TEST_CASE(CreateSplitterFloat16Workload) } template -static void ClSplitterMergerTest() +static void ClSplitterConcatTest() { // Tests that it is possible to decide which output of the splitter layer - // should be lined to which input of the merger layer. + // should be lined to which input of the concat layer. // We test that is is possible to specify 0th output - // of the splitter to be the 1st input to the merger and the 1st output of the splitter to be 0th input - // of the merger. + // of the splitter to be the 1st input to the concat and the 1st output of the splitter to be 0th input + // of the concat. Graph graph; ClWorkloadFactory factory = ClWorkloadFactoryHelper::GetFactory(ClWorkloadFactoryHelper::GetMemoryManager()); auto workloads = - CreateSplitterMergerWorkloadTest + CreateSplitterConcatWorkloadTest (factory, graph); auto wlSplitter = std::move(workloads.first); - auto wlMerger = std::move(workloads.second); + auto wlConcat = std::move(workloads.second); //Checks that the index of inputs/outputs matches what we declared on InputDescriptor construction. armnn::ClSubTensorHandle* sOut0 = dynamic_cast(wlSplitter->GetData().m_Outputs[0]); armnn::ClSubTensorHandle* sOut1 = dynamic_cast(wlSplitter->GetData().m_Outputs[1]); - armnn::ClSubTensorHandle* mIn0 = dynamic_cast(wlMerger->GetData().m_Inputs[0]); - armnn::ClSubTensorHandle* mIn1 = dynamic_cast(wlMerger->GetData().m_Inputs[1]); + armnn::ClSubTensorHandle* mIn0 = dynamic_cast(wlConcat->GetData().m_Inputs[0]); + armnn::ClSubTensorHandle* mIn1 = dynamic_cast(wlConcat->GetData().m_Inputs[1]); BOOST_TEST(sOut0); BOOST_TEST(sOut1); @@ -593,14 +593,14 @@ static void ClSplitterMergerTest() BOOST_TEST(validSubTensorParents); } -BOOST_AUTO_TEST_CASE(CreateSplitterMergerFloatWorkload) +BOOST_AUTO_TEST_CASE(CreateSplitterConcatFloatWorkload) { - ClSplitterMergerTest(); + ClSplitterConcatTest(); } -BOOST_AUTO_TEST_CASE(CreateSplitterMergerFloat16Workload) +BOOST_AUTO_TEST_CASE(CreateSplitterConcatFloat16Workload) { - ClSplitterMergerTest(); + ClSplitterConcatTest(); } @@ -801,17 +801,17 @@ BOOST_AUTO_TEST_CASE(CreateMeanUint8Workload) ClMeanWorkloadTest(); } -template -static void ClCreateMergerWorkloadTest(std::initializer_list outputShape, +template +static void ClCreateConcatWorkloadTest(std::initializer_list outputShape, unsigned int concatAxis) { Graph graph; ClWorkloadFactory factory = ClWorkloadFactoryHelper::GetFactory(ClWorkloadFactoryHelper::GetMemoryManager()); - auto workload = CreateMergerWorkloadTest(factory, graph, outputShape, concatAxis); + auto workload = CreateConcatWorkloadTest(factory, graph, outputShape, concatAxis); - MergerQueueDescriptor queueDescriptor = workload->GetData(); + ConcatQueueDescriptor queueDescriptor = workload->GetData(); auto inputHandle0 = boost::polymorphic_downcast(queueDescriptor.m_Inputs[0]); auto inputHandle1 = boost::polymorphic_downcast(queueDescriptor.m_Inputs[1]); auto outputHandle = boost::polymorphic_downcast(queueDescriptor.m_Outputs[0]); @@ -821,34 +821,34 @@ static void ClCreateMergerWorkloadTest(std::initializer_list outpu BOOST_TEST(CompareIClTensorHandleShape(outputHandle, outputShape)); } -BOOST_AUTO_TEST_CASE(CreateMergerDim0Float32Workload) +BOOST_AUTO_TEST_CASE(CreateConcatDim0Float32Workload) { - ClCreateMergerWorkloadTest({ 4, 3, 2, 5 }, 0); + ClCreateConcatWorkloadTest({ 4, 3, 2, 5 }, 0); } -BOOST_AUTO_TEST_CASE(CreateMergerDim1Float32Workload) +BOOST_AUTO_TEST_CASE(CreateConcatDim1Float32Workload) { - ClCreateMergerWorkloadTest({ 2, 6, 2, 5 }, 1); + ClCreateConcatWorkloadTest({ 2, 6, 2, 5 }, 1); } -BOOST_AUTO_TEST_CASE(CreateMergerDim3Float32Workload) +BOOST_AUTO_TEST_CASE(CreateConcatDim3Float32Workload) { - ClCreateMergerWorkloadTest({ 2, 3, 2, 10 }, 3); + ClCreateConcatWorkloadTest({ 2, 3, 2, 10 }, 3); } -BOOST_AUTO_TEST_CASE(CreateMergerDim0Uint8Workload) +BOOST_AUTO_TEST_CASE(CreateConcatDim0Uint8Workload) { - ClCreateMergerWorkloadTest({ 4, 3, 2, 5 }, 0); + ClCreateConcatWorkloadTest({ 4, 3, 2, 5 }, 0); } -BOOST_AUTO_TEST_CASE(CreateMergerDim1Uint8Workload) +BOOST_AUTO_TEST_CASE(CreateConcatDim1Uint8Workload) { - ClCreateMergerWorkloadTest({ 2, 6, 2, 5 }, 1); + ClCreateConcatWorkloadTest({ 2, 6, 2, 5 }, 1); } -BOOST_AUTO_TEST_CASE(CreateMergerDim3Uint8Workload) +BOOST_AUTO_TEST_CASE(CreateConcatDim3Uint8Workload) { - ClCreateMergerWorkloadTest({ 2, 3, 2, 10 }, 3); + ClCreateConcatWorkloadTest({ 2, 3, 2, 10 }, 3); } BOOST_AUTO_TEST_SUITE_END() diff --git a/src/backends/cl/test/ClEndToEndTests.cpp b/src/backends/cl/test/ClEndToEndTests.cpp index 9c010fc..3235f26 100644 --- a/src/backends/cl/test/ClEndToEndTests.cpp +++ b/src/backends/cl/test/ClEndToEndTests.cpp @@ -4,7 +4,7 @@ // #include -#include +#include #include #include @@ -19,34 +19,34 @@ BOOST_AUTO_TEST_CASE(ConstantUsage_Cl_Float32) ConstantUsageFloat32Test(defaultBackends); } -BOOST_AUTO_TEST_CASE(ClMergerEndToEndDim0Test) +BOOST_AUTO_TEST_CASE(ClConcatEndToEndDim0Test) { - MergerDim0EndToEnd(defaultBackends); + ConcatDim0EndToEnd(defaultBackends); } -BOOST_AUTO_TEST_CASE(ClMergerEndToEndDim0Uint8Test) +BOOST_AUTO_TEST_CASE(ClConcatEndToEndDim0Uint8Test) { - MergerDim0EndToEnd(defaultBackends); + ConcatDim0EndToEnd(defaultBackends); } -BOOST_AUTO_TEST_CASE(ClMergerEndToEndDim1Test) +BOOST_AUTO_TEST_CASE(ClConcatEndToEndDim1Test) { - MergerDim1EndToEnd(defaultBackends); + ConcatDim1EndToEnd(defaultBackends); } -BOOST_AUTO_TEST_CASE(ClMergerEndToEndDim1Uint8Test) +BOOST_AUTO_TEST_CASE(ClConcatEndToEndDim1Uint8Test) { - MergerDim1EndToEnd(defaultBackends); + ConcatDim1EndToEnd(defaultBackends); } -BOOST_AUTO_TEST_CASE(ClMergerEndToEndDim3Test) +BOOST_AUTO_TEST_CASE(ClConcatEndToEndDim3Test) { - MergerDim3EndToEnd(defaultBackends); + ConcatDim3EndToEnd(defaultBackends); } -BOOST_AUTO_TEST_CASE(ClMergerEndToEndDim3Uint8Test) +BOOST_AUTO_TEST_CASE(ClConcatEndToEndDim3Uint8Test) { - MergerDim3EndToEnd(defaultBackends); + ConcatDim3EndToEnd(defaultBackends); } BOOST_AUTO_TEST_CASE(ClGreaterSimpleEndToEndTest) diff --git a/src/backends/cl/workloads/ClConcatWorkload.cpp b/src/backends/cl/workloads/ClConcatWorkload.cpp index ee4ba6b..fb28946 100644 --- a/src/backends/cl/workloads/ClConcatWorkload.cpp +++ b/src/backends/cl/workloads/ClConcatWorkload.cpp @@ -19,7 +19,7 @@ using namespace armcomputetensorutils; namespace { -size_t CalcAxis(const MergerDescriptor& desc) +size_t CalcAxis(const OriginsDescriptor& desc) { return (desc.GetNumDimensions() - desc.GetConcatAxis()) - 1; } @@ -27,7 +27,7 @@ size_t CalcAxis(const MergerDescriptor& desc) arm_compute::Status ClConcatWorkloadValidate(const std::vector& inputs, const TensorInfo& output, - const MergerDescriptor& descriptor) + const OriginsDescriptor& descriptor) { std::vector aclInputs; for (const TensorInfo* input : inputs) @@ -46,8 +46,8 @@ arm_compute::Status ClConcatWorkloadValidate(const std::vector(descriptor, info) +ClConcatWorkload::ClConcatWorkload(const ConcatQueueDescriptor& descriptor, const WorkloadInfo& info) +: BaseWorkload(descriptor, info) { bool allInputsAreSubtensors = true; @@ -56,7 +56,7 @@ ClConcatWorkload::ClConcatWorkload(const MergerQueueDescriptor& descriptor, cons { if (!input->GetParent()) { - // Non sub-tensor input found so we need to execute the merger function + // Non sub-tensor input found so we need to execute the concat function allInputsAreSubtensors = false; break; } @@ -64,7 +64,7 @@ ClConcatWorkload::ClConcatWorkload(const MergerQueueDescriptor& descriptor, cons if (allInputsAreSubtensors) { - // Can skip configuring the merger function since it's not executed + // Can skip configuring the concat function since it's not executed return; } diff --git a/src/backends/cl/workloads/ClConcatWorkload.hpp b/src/backends/cl/workloads/ClConcatWorkload.hpp index 106193d..c34de9f 100644 --- a/src/backends/cl/workloads/ClConcatWorkload.hpp +++ b/src/backends/cl/workloads/ClConcatWorkload.hpp @@ -14,12 +14,12 @@ namespace armnn arm_compute::Status ClConcatWorkloadValidate(const std::vector& inputs, const TensorInfo& output, - const MergerDescriptor& descriptor); + const OriginsDescriptor& descriptor); -class ClConcatWorkload : public BaseWorkload +class ClConcatWorkload : public BaseWorkload { public: - ClConcatWorkload(const MergerQueueDescriptor& descriptor, const WorkloadInfo& info); + ClConcatWorkload(const ConcatQueueDescriptor& descriptor, const WorkloadInfo& info); void Execute() const override; diff --git a/src/backends/neon/NeonLayerSupport.cpp b/src/backends/neon/NeonLayerSupport.cpp index fd9aac5..e84eb79 100644 --- a/src/backends/neon/NeonLayerSupport.cpp +++ b/src/backends/neon/NeonLayerSupport.cpp @@ -146,12 +146,41 @@ bool NeonLayerSupport::IsBatchNormalizationSupported(const TensorInfo& input, bool NeonLayerSupport::IsConcatSupported(const std::vector inputs, const TensorInfo& output, - const OriginsDescriptor& descriptor, + const ConcatDescriptor& descriptor, Optional reasonIfUnsupported) const { - ARMNN_NO_DEPRECATE_WARN_BEGIN - return IsMergerSupported(inputs, output, descriptor, reasonIfUnsupported); - ARMNN_NO_DEPRECATE_WARN_END + if (descriptor.GetNumDimensions() <= descriptor.GetConcatAxis()) + { + SetValueChecked(reasonIfUnsupported, "Neon Concat: Concat axis > Number of dimensions."); + return false; + } + + unsigned int concatInnerAxis = (descriptor.GetNumDimensions() - descriptor.GetConcatAxis()) - 1; + if(concatInnerAxis < 3) // Width, height, or channels + { + FORWARD_WORKLOAD_VALIDATE_FUNC(NeonConcatWorkloadValidate, + reasonIfUnsupported, + inputs, + output, + descriptor); + } + else if (concatInnerAxis == 3) + { + for (auto& input : inputs) + { + if (input && !output.IsTypeSpaceMatch(*input)) // Cannot use sub-tensors if the types are not same space + { + SetValueChecked(reasonIfUnsupported, "Neon Concat: Types and quantization parameters must match."); + return false; + } + } + return true; // Sub-tensors support concat along batch + } + else // > 4 dimensions not supported. + { + SetValueChecked(reasonIfUnsupported, "Neon Concat: Maximum of 4 dimensions supported."); + return false; + } } bool NeonLayerSupport::IsConstantSupported(const TensorInfo& output, @@ -326,41 +355,10 @@ bool NeonLayerSupport::IsMemCopySupported(const TensorInfo &input, bool NeonLayerSupport::IsMergerSupported(const std::vector inputs, const TensorInfo& output, - const OriginsDescriptor& descriptor, + const MergerDescriptor& descriptor, Optional reasonIfUnsupported) const { - if (descriptor.GetNumDimensions() <= descriptor.GetConcatAxis()) - { - SetValueChecked(reasonIfUnsupported, "Neon Merger: Concat axis > Number of dimensions."); - return false; - } - - unsigned int concatInnerAxis = (descriptor.GetNumDimensions() - descriptor.GetConcatAxis()) - 1; - if(concatInnerAxis < 3) // Width, height, or channels - { - FORWARD_WORKLOAD_VALIDATE_FUNC(NeonConcatWorkloadValidate, - reasonIfUnsupported, - inputs, - output, - descriptor); - } - else if (concatInnerAxis == 3) - { - for (auto& input : inputs) - { - if (input && !output.IsTypeSpaceMatch(*input)) // Cannot use sub-tensors if the types are not same space - { - SetValueChecked(reasonIfUnsupported, "Neon Merger: Types and quantization parameters must match."); - return false; - } - } - return true; // Sub-tensors support concat along batch - } - else // > 4 dimensions not supported. - { - SetValueChecked(reasonIfUnsupported, "Neon Merger: Maximum of 4 dimensions supported."); - return false; - } + return IsConcatSupported(inputs, output, descriptor, reasonIfUnsupported); } bool NeonLayerSupport::IsMinimumSupported(const TensorInfo& input0, diff --git a/src/backends/neon/NeonLayerSupport.hpp b/src/backends/neon/NeonLayerSupport.hpp index 5e8e0bd..dd6ed79 100644 --- a/src/backends/neon/NeonLayerSupport.hpp +++ b/src/backends/neon/NeonLayerSupport.hpp @@ -33,7 +33,7 @@ public: bool IsConcatSupported(const std::vector inputs, const TensorInfo& output, - const OriginsDescriptor& descriptor, + const ConcatDescriptor& descriptor, Optional reasonIfUnsupported = EmptyOptional()) const override; bool IsConstantSupported(const TensorInfo& output, @@ -109,7 +109,7 @@ public: ARMNN_DEPRECATED_MSG("Use IsConcatSupported instead") bool IsMergerSupported(const std::vector inputs, const TensorInfo& output, - const OriginsDescriptor& descriptor, + const MergerDescriptor& descriptor, Optional reasonIfUnsupported = EmptyOptional()) const override; bool IsMinimumSupported(const TensorInfo& input0, diff --git a/src/backends/neon/NeonWorkloadFactory.cpp b/src/backends/neon/NeonWorkloadFactory.cpp index 3005dae..4b6225f 100644 --- a/src/backends/neon/NeonWorkloadFactory.cpp +++ b/src/backends/neon/NeonWorkloadFactory.cpp @@ -233,7 +233,7 @@ std::unique_ptr NeonWorkloadFactory::CreateL2Normalization(const L2No m_MemoryManager->GetIntraLayerManager()); } -std::unique_ptr NeonWorkloadFactory::CreateConcat(const MergerQueueDescriptor& descriptor, +std::unique_ptr NeonWorkloadFactory::CreateConcat(const ConcatQueueDescriptor& descriptor, const WorkloadInfo& info) const { return std::make_unique(descriptor, info); diff --git a/src/backends/neon/NeonWorkloadFactory.hpp b/src/backends/neon/NeonWorkloadFactory.hpp index 60dbb90..6a28d12 100644 --- a/src/backends/neon/NeonWorkloadFactory.hpp +++ b/src/backends/neon/NeonWorkloadFactory.hpp @@ -98,7 +98,7 @@ public: std::unique_ptr CreateL2Normalization(const L2NormalizationQueueDescriptor& descriptor, const WorkloadInfo& info) const override; - std::unique_ptr CreateConcat(const MergerQueueDescriptor& descriptor, + std::unique_ptr CreateConcat(const ConcatQueueDescriptor& descriptor, const WorkloadInfo& info) const override; std::unique_ptr CreateConstant(const ConstantQueueDescriptor& descriptor, diff --git a/src/backends/neon/test/NeonCreateWorkloadTests.cpp b/src/backends/neon/test/NeonCreateWorkloadTests.cpp index b41d62f..8382365 100644 --- a/src/backends/neon/test/NeonCreateWorkloadTests.cpp +++ b/src/backends/neon/test/NeonCreateWorkloadTests.cpp @@ -504,30 +504,30 @@ BOOST_AUTO_TEST_CASE(CreateSplitterWorkload) BOOST_TEST(TestNeonTensorHandleInfo(outputHandle2, TensorInfo({2, 7, 7}, DataType::Float32))); } -BOOST_AUTO_TEST_CASE(CreateSplitterMerger) +BOOST_AUTO_TEST_CASE(CreateSplitterConcat) { // Tests that it is possible to decide which output of the splitter layer - // should be lined to which input of the merger layer. + // should be lined to which input of the concat layer. // We tested that is is possible to specify 0th output - // of the splitter to be the 1st input to the merger, and the 1st output of the splitter to be 0th input - // of the merger. + // of the splitter to be the 1st input to the concat, and the 1st output of the splitter to be 0th input + // of the concat. Graph graph; NeonWorkloadFactory factory = NeonWorkloadFactoryHelper::GetFactory(NeonWorkloadFactoryHelper::GetMemoryManager()); auto workloads = - CreateSplitterMergerWorkloadTest(factory, graph); auto wlSplitter = std::move(workloads.first); - auto wlMerger = std::move(workloads.second); + auto wlConcat = std::move(workloads.second); //Checks that the index of inputs/outputs matches what we declared on InputDescriptor construction. armnn::INeonTensorHandle* sOut0 = dynamic_cast(wlSplitter->GetData().m_Outputs[0]); armnn::INeonTensorHandle* sOut1 = dynamic_cast(wlSplitter->GetData().m_Outputs[1]); - armnn::INeonTensorHandle* mIn0 = dynamic_cast(wlMerger->GetData().m_Inputs[0]); - armnn::INeonTensorHandle* mIn1 = dynamic_cast(wlMerger->GetData().m_Inputs[1]); + armnn::INeonTensorHandle* mIn0 = dynamic_cast(wlConcat->GetData().m_Inputs[0]); + armnn::INeonTensorHandle* mIn1 = dynamic_cast(wlConcat->GetData().m_Inputs[1]); BOOST_TEST(sOut0); BOOST_TEST(sOut1); @@ -632,17 +632,17 @@ BOOST_AUTO_TEST_CASE(CreateL2NormalizationNhwcWorkload) NeonCreateL2NormalizationWorkloadTest(DataLayout::NHWC); } -template -static void NeonCreateMergerWorkloadTest(std::initializer_list outputShape, +template +static void NeonCreateConcatWorkloadTest(std::initializer_list outputShape, unsigned int concatAxis) { Graph graph; NeonWorkloadFactory factory = NeonWorkloadFactoryHelper::GetFactory(NeonWorkloadFactoryHelper::GetMemoryManager()); - auto workload = CreateMergerWorkloadTest(factory, graph, outputShape, concatAxis); + auto workload = CreateConcatWorkloadTest(factory, graph, outputShape, concatAxis); - MergerQueueDescriptor queueDescriptor = workload->GetData(); + ConcatQueueDescriptor queueDescriptor = workload->GetData(); auto inputHandle0 = boost::polymorphic_downcast(queueDescriptor.m_Inputs[0]); auto inputHandle1 = boost::polymorphic_downcast(queueDescriptor.m_Inputs[1]); auto outputHandle = boost::polymorphic_downcast(queueDescriptor.m_Outputs[0]); @@ -652,34 +652,34 @@ static void NeonCreateMergerWorkloadTest(std::initializer_list out BOOST_TEST(TestNeonTensorHandleInfo(outputHandle, TensorInfo(outputShape, DataType))); } -BOOST_AUTO_TEST_CASE(CreateMergerDim0Float32Workload) +BOOST_AUTO_TEST_CASE(CreateConcatDim0Float32Workload) { - NeonCreateMergerWorkloadTest({ 4, 3, 2, 5 }, 0); + NeonCreateConcatWorkloadTest({ 4, 3, 2, 5 }, 0); } -BOOST_AUTO_TEST_CASE(CreateMergerDim1Float32Workload) +BOOST_AUTO_TEST_CASE(CreateConcatDim1Float32Workload) { - NeonCreateMergerWorkloadTest({ 2, 6, 2, 5 }, 1); + NeonCreateConcatWorkloadTest({ 2, 6, 2, 5 }, 1); } -BOOST_AUTO_TEST_CASE(CreateMergerDim3Float32Workload) +BOOST_AUTO_TEST_CASE(CreateConcatDim3Float32Workload) { - NeonCreateMergerWorkloadTest({ 2, 3, 2, 10 }, 3); + NeonCreateConcatWorkloadTest({ 2, 3, 2, 10 }, 3); } -BOOST_AUTO_TEST_CASE(CreateMergerDim0Uint8Workload) +BOOST_AUTO_TEST_CASE(CreateConcatDim0Uint8Workload) { - NeonCreateMergerWorkloadTest({ 4, 3, 2, 5 }, 0); + NeonCreateConcatWorkloadTest({ 4, 3, 2, 5 }, 0); } -BOOST_AUTO_TEST_CASE(CreateMergerDim1Uint8Workload) +BOOST_AUTO_TEST_CASE(CreateConcatDim1Uint8Workload) { - NeonCreateMergerWorkloadTest({ 2, 6, 2, 5 }, 1); + NeonCreateConcatWorkloadTest({ 2, 6, 2, 5 }, 1); } -BOOST_AUTO_TEST_CASE(CreateMergerDim3Uint8Workload) +BOOST_AUTO_TEST_CASE(CreateConcatDim3Uint8Workload) { - NeonCreateMergerWorkloadTest({ 2, 3, 2, 10 }, 3); + NeonCreateConcatWorkloadTest({ 2, 3, 2, 10 }, 3); } BOOST_AUTO_TEST_SUITE_END() diff --git a/src/backends/neon/test/NeonEndToEndTests.cpp b/src/backends/neon/test/NeonEndToEndTests.cpp index 441efed..15f5fc3 100644 --- a/src/backends/neon/test/NeonEndToEndTests.cpp +++ b/src/backends/neon/test/NeonEndToEndTests.cpp @@ -4,7 +4,7 @@ // #include -#include +#include #include #include @@ -93,34 +93,34 @@ BOOST_AUTO_TEST_CASE(NeonGreaterBroadcastEndToEndUint8Test) expectedOutput); } -BOOST_AUTO_TEST_CASE(NeonMergerEndToEndDim0Test) +BOOST_AUTO_TEST_CASE(NeonConcatEndToEndDim0Test) { - MergerDim0EndToEnd(defaultBackends); + ConcatDim0EndToEnd(defaultBackends); } -BOOST_AUTO_TEST_CASE(NeonMergerEndToEndDim0Uint8Test) +BOOST_AUTO_TEST_CASE(NeonConcatEndToEndDim0Uint8Test) { - MergerDim0EndToEnd(defaultBackends); + ConcatDim0EndToEnd(defaultBackends); } -BOOST_AUTO_TEST_CASE(NeonMergerEndToEndDim1Test) +BOOST_AUTO_TEST_CASE(NeonConcatEndToEndDim1Test) { - MergerDim1EndToEnd(defaultBackends); + ConcatDim1EndToEnd(defaultBackends); } -BOOST_AUTO_TEST_CASE(NeonMergerEndToEndDim1Uint8Test) +BOOST_AUTO_TEST_CASE(NeonConcatEndToEndDim1Uint8Test) { - MergerDim1EndToEnd(defaultBackends); + ConcatDim1EndToEnd(defaultBackends); } -BOOST_AUTO_TEST_CASE(NeonMergerEndToEndDim3Test) +BOOST_AUTO_TEST_CASE(NeonConcatEndToEndDim3Test) { - MergerDim3EndToEnd(defaultBackends); + ConcatDim3EndToEnd(defaultBackends); } -BOOST_AUTO_TEST_CASE(NeonMergerEndToEndDim3Uint8Test) +BOOST_AUTO_TEST_CASE(NeonConcatEndToEndDim3Uint8Test) { - MergerDim3EndToEnd(defaultBackends); + ConcatDim3EndToEnd(defaultBackends); } BOOST_AUTO_TEST_CASE(NeonSplitDim0EndToEndTest) diff --git a/src/backends/neon/workloads/NeonConcatWorkload.cpp b/src/backends/neon/workloads/NeonConcatWorkload.cpp index 91f8109..8ea535b 100644 --- a/src/backends/neon/workloads/NeonConcatWorkload.cpp +++ b/src/backends/neon/workloads/NeonConcatWorkload.cpp @@ -19,7 +19,7 @@ using namespace armcomputetensorutils; namespace { -size_t CalcAxis(const armnn::MergerDescriptor& desc) +size_t CalcAxis(const armnn::OriginsDescriptor& desc) { return (desc.GetNumDimensions() - desc.GetConcatAxis()) - 1; } @@ -27,7 +27,7 @@ size_t CalcAxis(const armnn::MergerDescriptor& desc) arm_compute::Status NeonConcatWorkloadValidate(const std::vector& inputs, const TensorInfo& output, - const MergerDescriptor& descriptor) + const OriginsDescriptor& descriptor) { std::vector aclInputs; @@ -48,8 +48,8 @@ arm_compute::Status NeonConcatWorkloadValidate(const std::vector(descriptor, info) +const ConcatQueueDescriptor& descriptor, const WorkloadInfo& info) + : BaseWorkload(descriptor, info) { bool allInputsAreSubtensors = true; @@ -58,7 +58,7 @@ const MergerQueueDescriptor& descriptor, const WorkloadInfo& info) { if (!input->GetParent()) { - // Non sub-tensor input found so we need to execute the merger function + // Non sub-tensor input found so we need to execute the concat function allInputsAreSubtensors = false; break; } @@ -66,7 +66,7 @@ const MergerQueueDescriptor& descriptor, const WorkloadInfo& info) if (allInputsAreSubtensors) { - // Can skip configuring the merger function since it's not executed + // Can skip configuring the concat function since it's not executed return; } diff --git a/src/backends/neon/workloads/NeonConcatWorkload.hpp b/src/backends/neon/workloads/NeonConcatWorkload.hpp index e5a8d15..bf0733b 100644 --- a/src/backends/neon/workloads/NeonConcatWorkload.hpp +++ b/src/backends/neon/workloads/NeonConcatWorkload.hpp @@ -17,14 +17,14 @@ namespace armnn { arm_compute::Status NeonConcatWorkloadValidate(const std::vector& inputs, const TensorInfo& output, - const MergerDescriptor& descriptor); + const OriginsDescriptor& descriptor); -class NeonConcatWorkload : public BaseWorkload +class NeonConcatWorkload : public BaseWorkload { public: - NeonConcatWorkload(const MergerQueueDescriptor& descriptor, const WorkloadInfo& info); + NeonConcatWorkload(const ConcatQueueDescriptor& descriptor, const WorkloadInfo& info); - using BaseWorkload::BaseWorkload; + using BaseWorkload::BaseWorkload; void Execute() const override; private: diff --git a/src/backends/reference/RefLayerSupport.cpp b/src/backends/reference/RefLayerSupport.cpp index 2adcb10..9a691a6 100644 --- a/src/backends/reference/RefLayerSupport.cpp +++ b/src/backends/reference/RefLayerSupport.cpp @@ -316,18 +316,38 @@ bool RefLayerSupport::IsBatchToSpaceNdSupported(const TensorInfo& input, bool RefLayerSupport::IsConcatSupported(const std::vector inputs, const TensorInfo& output, - const OriginsDescriptor& descriptor, + const ConcatDescriptor& descriptor, Optional reasonIfUnsupported) const { - ARMNN_NO_DEPRECATE_WARN_BEGIN - return IsMergerSupported(inputs, output, descriptor, reasonIfUnsupported); - ARMNN_NO_DEPRECATE_WARN_END + ignore_unused(descriptor); + + bool supported = true; + std::array supportedTypes = + { + DataType::Float32, + DataType::QuantisedAsymm8, + DataType::QuantisedSymm16 + }; + + supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported, + "Reference concatenation: output type not supported"); + for (const TensorInfo* input : inputs) + { + supported &= CheckSupportRule(TypeAnyOf(*input, supportedTypes), reasonIfUnsupported, + "Reference concatenation: input type not supported"); + + supported &= CheckSupportRule(TypesAreEqual(*input, output), reasonIfUnsupported, + "Reference concatenation: input and output types mismatched."); + } + + return supported; } bool RefLayerSupport::IsConstantSupported(const TensorInfo& output, Optional reasonIfUnsupported) const { - std::array supportedTypes = { + std::array supportedTypes = + { DataType::Float32, DataType::Signed32, DataType::QuantisedAsymm8, @@ -815,31 +835,10 @@ bool RefLayerSupport::IsMeanSupported(const TensorInfo& input, bool RefLayerSupport::IsMergerSupported(const std::vector inputs, const TensorInfo& output, - const OriginsDescriptor& descriptor, + const MergerDescriptor& descriptor, Optional reasonIfUnsupported) const { - ignore_unused(descriptor); - - bool supported = true; - std::array supportedTypes = - { - DataType::Float32, - DataType::QuantisedAsymm8, - DataType::QuantisedSymm16 - }; - - supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported, - "Reference concatenation: output type not supported"); - for (const TensorInfo* input : inputs) - { - supported &= CheckSupportRule(TypeAnyOf(*input, supportedTypes), reasonIfUnsupported, - "Reference concatenation: input type not supported"); - - supported &= CheckSupportRule(TypesAreEqual(*input, output), reasonIfUnsupported, - "Reference concatenation: input and output types mismatched."); - } - - return supported; + return IsConcatSupported(inputs, output, descriptor, reasonIfUnsupported); } bool RefLayerSupport::IsMemCopySupported(const TensorInfo &input, diff --git a/src/backends/reference/RefLayerSupport.hpp b/src/backends/reference/RefLayerSupport.hpp index 944061d..8850c6e 100644 --- a/src/backends/reference/RefLayerSupport.hpp +++ b/src/backends/reference/RefLayerSupport.hpp @@ -38,7 +38,7 @@ public: bool IsConcatSupported(const std::vector inputs, const TensorInfo& output, - const OriginsDescriptor& descriptor, + const ConcatDescriptor& descriptor, Optional reasonIfUnsupported = EmptyOptional()) const override; bool IsConstantSupported(const TensorInfo& output, @@ -170,7 +170,7 @@ public: ARMNN_DEPRECATED_MSG("Use IsConcatSupported instead") bool IsMergerSupported(const std::vector inputs, const TensorInfo& output, - const OriginsDescriptor& descriptor, + const MergerDescriptor& descriptor, Optional reasonIfUnsupported = EmptyOptional()) const override; bool IsMemCopySupported(const TensorInfo& input, diff --git a/src/backends/reference/RefWorkloadFactory.cpp b/src/backends/reference/RefWorkloadFactory.cpp index 1243328..a21becd 100644 --- a/src/backends/reference/RefWorkloadFactory.cpp +++ b/src/backends/reference/RefWorkloadFactory.cpp @@ -245,7 +245,7 @@ std::unique_ptr RefWorkloadFactory::CreateL2Normalization(const L2Nor return MakeWorkload(descriptor, info); } -std::unique_ptr RefWorkloadFactory::CreateConcat(const MergerQueueDescriptor& descriptor, +std::unique_ptr RefWorkloadFactory::CreateConcat(const ConcatQueueDescriptor& descriptor, const WorkloadInfo& info) const { if (IsFloat16(info)) diff --git a/src/backends/reference/RefWorkloadFactory.hpp b/src/backends/reference/RefWorkloadFactory.hpp index 985b634..78f6bab 100644 --- a/src/backends/reference/RefWorkloadFactory.hpp +++ b/src/backends/reference/RefWorkloadFactory.hpp @@ -115,7 +115,7 @@ public: std::unique_ptr CreateL2Normalization(const L2NormalizationQueueDescriptor& descriptor, const WorkloadInfo& info) const override; - std::unique_ptr CreateConcat(const MergerQueueDescriptor& descriptor, + std::unique_ptr CreateConcat(const ConcatQueueDescriptor& descriptor, const WorkloadInfo& info) const override; std::unique_ptr CreateConstant(const ConstantQueueDescriptor& descriptor, diff --git a/src/backends/reference/backend.mk b/src/backends/reference/backend.mk index 1c7f8dc..9a4cf14 100644 --- a/src/backends/reference/backend.mk +++ b/src/backends/reference/backend.mk @@ -21,7 +21,7 @@ BACKEND_SOURCES := \ workloads/FullyConnected.cpp \ workloads/Gather.cpp \ workloads/Mean.cpp \ - workloads/Merger.cpp \ + workloads/Concatenate.cpp \ workloads/Pad.cpp \ workloads/Pooling2d.cpp \ workloads/RefActivationWorkload.cpp \ diff --git a/src/backends/reference/test/RefCreateWorkloadTests.cpp b/src/backends/reference/test/RefCreateWorkloadTests.cpp index 3f4cc75..a96d656 100644 --- a/src/backends/reference/test/RefCreateWorkloadTests.cpp +++ b/src/backends/reference/test/RefCreateWorkloadTests.cpp @@ -473,28 +473,28 @@ BOOST_AUTO_TEST_CASE(CreateSplitterUint8Workload) RefCreateSplitterWorkloadTest(); } -template -static void RefCreateSplitterMergerWorkloadTest() +template +static void RefCreateSplitterConcatWorkloadTest() { // Tests that it is possible to decide which output of the splitter layer - // should be lined to which input of the merger layer. + // should be lined to which input of the concat layer. // We tested that is is possible to specify 0th output - // of the splitter to be the 1st input to the merger and the 1st output of the splitter to be 0th input - // of the merger. + // of the splitter to be the 1st input to the concat and the 1st output of the splitter to be 0th input + // of the concat. Graph graph; RefWorkloadFactory factory; - auto workloads = CreateSplitterMergerWorkloadTest - (factory, graph); + auto workloads = CreateSplitterConcatWorkloadTest + (factory, graph); auto wlSplitter = std::move(workloads.first); - auto wlMerger = std::move(workloads.second); + auto wlConcat = std::move(workloads.second); //Checks that the index of inputs/outputs matches what we declared on InputDescriptor construction. armnn::CpuTensorHandle* sOut0 = dynamic_cast(wlSplitter->GetData().m_Outputs[0]); armnn::CpuTensorHandle* sOut1 = dynamic_cast(wlSplitter->GetData().m_Outputs[1]); - armnn::CpuTensorHandle* mIn0 = dynamic_cast(wlMerger->GetData().m_Inputs[0]); - armnn::CpuTensorHandle* mIn1 = dynamic_cast(wlMerger->GetData().m_Inputs[1]); + armnn::CpuTensorHandle* mIn0 = dynamic_cast(wlConcat->GetData().m_Inputs[0]); + armnn::CpuTensorHandle* mIn1 = dynamic_cast(wlConcat->GetData().m_Inputs[1]); BOOST_TEST(sOut0); BOOST_TEST(sOut1); @@ -506,14 +506,14 @@ static void RefCreateSplitterMergerWorkloadTest() BOOST_TEST(validDataPointers); } -BOOST_AUTO_TEST_CASE(CreateSplitterMergerFloat32) +BOOST_AUTO_TEST_CASE(CreateSplitterConcatFloat32) { - RefCreateSplitterMergerWorkloadTest(); + RefCreateSplitterConcatWorkloadTest(); } -BOOST_AUTO_TEST_CASE(CreateSplitterMergerUint8) +BOOST_AUTO_TEST_CASE(CreateSplitterConcatUint8) { - RefCreateSplitterMergerWorkloadTest(); + RefCreateSplitterConcatWorkloadTest(); } template @@ -671,13 +671,13 @@ BOOST_AUTO_TEST_CASE(CreateReshapeUint8Workload) RefCreateReshapeWorkloadTest(); } -template -static void RefCreateMergerWorkloadTest(const armnn::TensorShape& outputShape, +template +static void RefCreateConcatWorkloadTest(const armnn::TensorShape& outputShape, unsigned int concatAxis) { Graph graph; RefWorkloadFactory factory; - auto workload = CreateMergerWorkloadTest(factory, graph, outputShape, concatAxis); + auto workload = CreateConcatWorkloadTest(factory, graph, outputShape, concatAxis); CheckInputsOutput(std::move(workload), TensorInfo({ 2, 3, 2, 5 }, DataType), @@ -685,49 +685,49 @@ static void RefCreateMergerWorkloadTest(const armnn::TensorShape& outputShape, TensorInfo(outputShape, DataType)); } -BOOST_AUTO_TEST_CASE(CreateMergerDim0Float32Workload) +BOOST_AUTO_TEST_CASE(CreateConcatDim0Float32Workload) { - RefCreateMergerWorkloadTest({ 4, 3, 2, 5 }, 0); + RefCreateConcatWorkloadTest({ 4, 3, 2, 5 }, 0); } -BOOST_AUTO_TEST_CASE(CreateMergerDim0Uint8Workload) +BOOST_AUTO_TEST_CASE(CreateConcatDim0Uint8Workload) { - RefCreateMergerWorkloadTest({ 4, 3, 2, 5 }, 0); + RefCreateConcatWorkloadTest({ 4, 3, 2, 5 }, 0); } -BOOST_AUTO_TEST_CASE(CreateMergerDim0Uint16Workload) +BOOST_AUTO_TEST_CASE(CreateConcatDim0Uint16Workload) { - RefCreateMergerWorkloadTest({ 4, 3, 2, 5 }, 0); + RefCreateConcatWorkloadTest({ 4, 3, 2, 5 }, 0); } -BOOST_AUTO_TEST_CASE(CreateMergerDim1Float32Workload) +BOOST_AUTO_TEST_CASE(CreateConcatDim1Float32Workload) { - RefCreateMergerWorkloadTest({ 2, 6, 2, 5 }, 1); + RefCreateConcatWorkloadTest({ 2, 6, 2, 5 }, 1); } -BOOST_AUTO_TEST_CASE(CreateMergerDim1Uint8Workload) +BOOST_AUTO_TEST_CASE(CreateConcatDim1Uint8Workload) { - RefCreateMergerWorkloadTest({ 2, 6, 2, 5 }, 1); + RefCreateConcatWorkloadTest({ 2, 6, 2, 5 }, 1); } -BOOST_AUTO_TEST_CASE(CreateMergerDim2Float32Workload) +BOOST_AUTO_TEST_CASE(CreateConcatDim2Float32Workload) { - RefCreateMergerWorkloadTest({ 2, 3, 4, 5 }, 2); + RefCreateConcatWorkloadTest({ 2, 3, 4, 5 }, 2); } -BOOST_AUTO_TEST_CASE(CreateMergerDim2Uint8Workload) +BOOST_AUTO_TEST_CASE(CreateConcatDim2Uint8Workload) { - RefCreateMergerWorkloadTest({ 2, 3, 4, 5 }, 2); + RefCreateConcatWorkloadTest({ 2, 3, 4, 5 }, 2); } -BOOST_AUTO_TEST_CASE(CreateMergerDim3Float32Workload) +BOOST_AUTO_TEST_CASE(CreateConcatDim3Float32Workload) { - RefCreateMergerWorkloadTest({ 2, 3, 2, 10 }, 3); + RefCreateConcatWorkloadTest({ 2, 3, 2, 10 }, 3); } -BOOST_AUTO_TEST_CASE(CreateMergerDim3Uint8Workload) +BOOST_AUTO_TEST_CASE(CreateConcatDim3Uint8Workload) { - RefCreateMergerWorkloadTest({ 2, 3, 2, 10 }, 3); + RefCreateConcatWorkloadTest({ 2, 3, 2, 10 }, 3); } template diff --git a/src/backends/reference/test/RefEndToEndTests.cpp b/src/backends/reference/test/RefEndToEndTests.cpp index 6dacfab..2b7fb77 100644 --- a/src/backends/reference/test/RefEndToEndTests.cpp +++ b/src/backends/reference/test/RefEndToEndTests.cpp @@ -7,7 +7,7 @@ #include #include -#include +#include #include #include @@ -396,44 +396,44 @@ BOOST_AUTO_TEST_CASE(RefGreaterBroadcastEndToEndUint8Test) expectedOutput); } -BOOST_AUTO_TEST_CASE(RefMergerEndToEndDim0Test) +BOOST_AUTO_TEST_CASE(RefConcatEndToEndDim0Test) { - MergerDim0EndToEnd(defaultBackends); + ConcatDim0EndToEnd(defaultBackends); } -BOOST_AUTO_TEST_CASE(RefMergerEndToEndDim0Uint8Test) +BOOST_AUTO_TEST_CASE(RefConcatEndToEndDim0Uint8Test) { - MergerDim0EndToEnd(defaultBackends); + ConcatDim0EndToEnd(defaultBackends); } -BOOST_AUTO_TEST_CASE(RefMergerEndToEndDim1Test) +BOOST_AUTO_TEST_CASE(RefConcatEndToEndDim1Test) { - MergerDim1EndToEnd(defaultBackends); + ConcatDim1EndToEnd(defaultBackends); } -BOOST_AUTO_TEST_CASE(RefMergerEndToEndDim1Uint8Test) +BOOST_AUTO_TEST_CASE(RefConcatEndToEndDim1Uint8Test) { - MergerDim1EndToEnd(defaultBackends); + ConcatDim1EndToEnd(defaultBackends); } -BOOST_AUTO_TEST_CASE(RefMergerEndToEndDim2Test) +BOOST_AUTO_TEST_CASE(RefConcatEndToEndDim2Test) { - MergerDim2EndToEnd(defaultBackends); + ConcatDim2EndToEnd(defaultBackends); } -BOOST_AUTO_TEST_CASE(RefMergerEndToEndDim2Uint8Test) +BOOST_AUTO_TEST_CASE(RefConcatEndToEndDim2Uint8Test) { - MergerDim2EndToEnd(defaultBackends); + ConcatDim2EndToEnd(defaultBackends); } -BOOST_AUTO_TEST_CASE(RefMergerEndToEndDim3Test) +BOOST_AUTO_TEST_CASE(RefConcatEndToEndDim3Test) { - MergerDim3EndToEnd(defaultBackends); + ConcatDim3EndToEnd(defaultBackends); } -BOOST_AUTO_TEST_CASE(RefMergerEndToEndDim3Uint8Test) +BOOST_AUTO_TEST_CASE(RefConcatEndToEndDim3Uint8Test) { - MergerDim3EndToEnd(defaultBackends); + ConcatDim3EndToEnd(defaultBackends); } BOOST_AUTO_TEST_CASE(RefGatherFloatTest) diff --git a/src/backends/reference/workloads/CMakeLists.txt b/src/backends/reference/workloads/CMakeLists.txt index 508dfdc..3db0314 100644 --- a/src/backends/reference/workloads/CMakeLists.txt +++ b/src/backends/reference/workloads/CMakeLists.txt @@ -28,8 +28,8 @@ list(APPEND armnnRefBackendWorkloads_sources Gather.hpp LstmUtils.hpp Maximum.hpp - Merger.hpp - Merger.cpp + Concatenate.hpp + Concatenate.cpp Minimum.hpp Pad.cpp Pad.hpp diff --git a/src/backends/reference/workloads/Merger.cpp b/src/backends/reference/workloads/Concatenate.cpp similarity index 95% rename from src/backends/reference/workloads/Merger.cpp rename to src/backends/reference/workloads/Concatenate.cpp index e0b70ee..bb55424 100644 --- a/src/backends/reference/workloads/Merger.cpp +++ b/src/backends/reference/workloads/Concatenate.cpp @@ -3,7 +3,7 @@ // SPDX-License-Identifier: MIT // -#include "Merger.hpp" +#include "Concatenate.hpp" #include "RefWorkloadUtils.hpp" #include "Decoders.hpp" #include "Encoders.hpp" @@ -11,7 +11,7 @@ namespace armnn { -void Merger(const MergerQueueDescriptor& data) +void Concatenate(const ConcatQueueDescriptor &data) { const TensorInfo& outputInfo0 = GetTensorInfo(data.m_Outputs[0]); @@ -34,7 +34,7 @@ void Merger(const MergerQueueDescriptor& data) for (unsigned int viewIdx = 0; viewIdx < data.m_ViewOrigins.size(); ++viewIdx) { - MergerQueueDescriptor::ViewOrigin const& view = data.m_ViewOrigins[viewIdx]; + ConcatQueueDescriptor::ViewOrigin const& view = data.m_ViewOrigins[viewIdx]; //Split view extents are defined by the size of (the corresponding) input tensor. const TensorInfo& inputInfo = GetTensorInfo(data.m_Inputs[viewIdx]); diff --git a/src/backends/reference/workloads/Merger.hpp b/src/backends/reference/workloads/Concatenate.hpp similarity index 80% rename from src/backends/reference/workloads/Merger.hpp rename to src/backends/reference/workloads/Concatenate.hpp index eaa154d..ac82a87 100644 --- a/src/backends/reference/workloads/Merger.hpp +++ b/src/backends/reference/workloads/Concatenate.hpp @@ -10,5 +10,5 @@ namespace armnn { -void Merger(const MergerQueueDescriptor& data); +void Concatenate(const ConcatQueueDescriptor &data); } //namespace armnn diff --git a/src/backends/reference/workloads/RefConcatWorkload.cpp b/src/backends/reference/workloads/RefConcatWorkload.cpp index 9abddc0..152eae9 100644 --- a/src/backends/reference/workloads/RefConcatWorkload.cpp +++ b/src/backends/reference/workloads/RefConcatWorkload.cpp @@ -5,7 +5,7 @@ #include "RefConcatWorkload.hpp" -#include "Merger.hpp" +#include "Concatenate.hpp" #include "Profiling.hpp" @@ -15,7 +15,7 @@ namespace armnn void RefConcatWorkload::Execute() const { ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefConcatWorkload_Execute"); - Merger(m_Data); + Concatenate(m_Data); } } //namespace armnn diff --git a/src/backends/reference/workloads/RefConcatWorkload.hpp b/src/backends/reference/workloads/RefConcatWorkload.hpp index 9fc9c7e..7d0b6b7 100644 --- a/src/backends/reference/workloads/RefConcatWorkload.hpp +++ b/src/backends/reference/workloads/RefConcatWorkload.hpp @@ -11,10 +11,10 @@ namespace armnn { -class RefConcatWorkload : public BaseWorkload +class RefConcatWorkload : public BaseWorkload { public: - using BaseWorkload::BaseWorkload; + using BaseWorkload::BaseWorkload; virtual void Execute() const override; }; diff --git a/src/backends/reference/workloads/RefWorkloads.hpp b/src/backends/reference/workloads/RefWorkloads.hpp index 20649d9..6ffec2b 100644 --- a/src/backends/reference/workloads/RefWorkloads.hpp +++ b/src/backends/reference/workloads/RefWorkloads.hpp @@ -38,7 +38,7 @@ #include "RefPooling2dUint8Workload.hpp" #include "BatchNormImpl.hpp" #include "Activation.hpp" -#include "Merger.hpp" +#include "Concatenate.hpp" #include "RefSpaceToBatchNdWorkload.hpp" #include "RefSplitterFloat32Workload.hpp" #include "RefStridedSliceWorkload.hpp"