18 using namespace armcomputetensorutils;
33 std::vector<arm_compute::TensorInfo> aclInputs;
37 aclInputs.emplace_back(aclInputInfo);
39 const arm_compute::TensorInfo aclOutputInfo = BuildArmComputeTensorInfo(output);
40 std::vector<arm_compute::ITensorInfo*> aclInputPtrs;
41 for (arm_compute::ITensorInfo& input : aclInputs)
43 aclInputPtrs.emplace_back(&input);
46 size_t aclAxis = CalcAxis(descriptor);
47 return arm_compute::NEConcatenateLayer::validate(aclInputPtrs, &aclOutputInfo, aclAxis);
54 bool allInputsAreSubtensors =
true;
57 for (
auto input : descriptor.
m_Inputs)
59 if (!input->GetParent())
62 allInputsAreSubtensors =
false;
67 if (allInputsAreSubtensors)
73 std::vector<arm_compute::ITensor *> aclInputs;
76 arm_compute::ITensor& aclInput = boost::polymorphic_pointer_downcast<IAclTensorHandle>(input)->GetTensor();
77 aclInputs.emplace_back(&aclInput);
79 arm_compute::ITensor& output = boost::polymorphic_pointer_downcast<IAclTensorHandle>(
83 m_Layer.reset(
new arm_compute::NEConcatenateLayer());
87 m_Layer->configure(aclInputs, &output, aclAxis);
arm_compute::Status NeonConcatWorkloadValidate(const std::vector< const TensorInfo *> &inputs, const TensorInfo &output, const OriginsDescriptor &descriptor)
#define ARMNN_SCOPED_PROFILING_EVENT_NEON(name)
LayerDescriptor m_Parameters
const ConcatQueueDescriptor m_Data
uint32_t GetNumDimensions() const
Get the number of dimensions.
unsigned int GetConcatAxis() const
Get the concatenation axis value.
NeonConcatWorkload(const ConcatQueueDescriptor &descriptor, const WorkloadInfo &info)
std::vector< ITensorHandle * > m_Outputs
std::vector< ITensorHandle * > m_Inputs
void Execute() const override
An OriginsDescriptor for the ConcatLayer. Descriptor to configure the concatenation process...