#DEFAULT_CLFRAMEWORKREVISION="branches/arm_compute_20_02" # Release 20.02
#
# For pinning to a revision use this:
-DEFAULT_CLFRAMEWORKREVISION="6f8b17dedb7b53b550e6210fd1c78c3a3e086271" #[ONCPUML-7] arm_compute support for ND parallelism.
+DEFAULT_CLFRAMEWORKREVISION="4efe5dc9b39a87eface43e7468e08279976ae9ef" # COMPMID-3484: Regression in Transpose convolution Android R CTS test.
usage() {
echo "Usage: $CMD (Use the default clframework SHA)"
const arm_compute::TensorInfo aclInputInfo = armcomputetensorutils::BuildArmComputeTensorInfo(input);
const arm_compute::TensorInfo aclOutputInfo = armcomputetensorutils::BuildArmComputeTensorInfo(output);
- unsigned int aclAxis = ComputeSoftmaxAclAxis<unsigned int>(descriptor, input);
+ int aclAxis = ComputeSoftmaxAclAxis<int>(descriptor, input);
return arm_compute::NESoftmaxLayer::validate(&aclInputInfo, &aclOutputInfo, descriptor.m_Beta, aclAxis);
}
arm_compute::ITensor& output = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
auto layer = std::make_unique<arm_compute::NESoftmaxLayer>(memoryManager);
- unsigned int aclAxis = ComputeSoftmaxAclAxis<unsigned int>(m_Data.m_Parameters, info.m_InputTensorInfos[0]);
+ int aclAxis = ComputeSoftmaxAclAxis<int>(m_Data.m_Parameters, info.m_InputTensorInfos[0]);
layer->configure(&input, &output, m_Data.m_Parameters.m_Beta, aclAxis);
m_SoftmaxLayer.reset(layer.release());
}