::arm_compute::DimensionRoundingType::FLOOR};
}
+::arm_compute::ActivationLayerInfo asActInfo(FuseCode act)
+{
+ if (act == ANEURALNETWORKS_FUSED_NONE)
+ {
+ return ::arm_compute::ActivationLayerInfo();
+ }
+ else if (act == ANEURALNETWORKS_FUSED_RELU)
+ {
+ return ::arm_compute::ActivationLayerInfo(
+ ::arm_compute::ActivationLayerInfo::ActivationFunction::RELU);
+ }
+ else if (act == ANEURALNETWORKS_FUSED_RELU1)
+ {
+ return ::arm_compute::ActivationLayerInfo(
+ ::arm_compute::ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 1.0f, -1.0f);
+ }
+ else if (act == ANEURALNETWORKS_FUSED_RELU6)
+ {
+ return ::arm_compute::ActivationLayerInfo(
+ ::arm_compute::ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 6.0f, 0.0f);
+ }
+ else
+ {
+ throw std::runtime_error("Not supported, yet");
+ }
+}
+
struct IAllocationContext
{
virtual ~IAllocationContext() = default;
auto bias_alloc = ctx.at(::internal::tflite::operand::Index{param.bias_index});
const auto conv_info = asPadStrideInfo(param.padding, param.stride);
+ const auto fused_act = asActInfo(param.activation);
if (::internal::arm_compute::isGpuMode())
{
std::unique_ptr<::arm_compute::CLConvolutionLayer> fn{new ::arm_compute::CLConvolutionLayer};
+ // To pass the fused_act parameter, it calls the WeightsInfo() and Size2D(1U, 1U) (dilation)
+ // functions like the default parameter.
fn->configure(CAST_CL(ifm_alloc), CAST_CL(ker_alloc), CAST_CL(bias_alloc), CAST_CL(ofm_alloc),
- conv_info);
+ conv_info, ::arm_compute::WeightsInfo(), ::arm_compute::Size2D(1U, 1U),
+ fused_act);
builder.append("Conv2D", std::move(fn));
}
{
std::unique_ptr<::arm_compute::NEConvolutionLayer> fn{new ::arm_compute::NEConvolutionLayer};
- fn->configure(ifm_alloc, ker_alloc, bias_alloc, ofm_alloc, conv_info);
+ // To pass the fused_act parameter, it calls the WeightsInfo() and Size2D(1U, 1U) (dilation)
+ // functions like the default parameter.
+ fn->configure(ifm_alloc, ker_alloc, bias_alloc, ofm_alloc, conv_info,
+ ::arm_compute::WeightsInfo(), ::arm_compute::Size2D(1U, 1U), fused_act);
builder.append("Conv2D", std::move(fn));
}
-
- ActivationBuilder{builder}.append(param.activation, ofm_alloc);
};
_builder.addStage(stage);
auto bias_alloc = ctx.at(::internal::tflite::operand::Index{param.bias_index});
const auto conv_info = asPadStrideInfo(param.padding, param.stride);
+ const auto fused_act = asActInfo(param.activation);
if (::internal::arm_compute::isGpuMode())
{
std::unique_ptr<::arm_compute::CLConvolutionLayer> fn{new ::arm_compute::CLConvolutionLayer};
+ // To pass the fused_act parameter, it calls the WeightsInfo() and Size2D(1U, 1U) (dilation)
+ // functions like the default parameter.
fn->configure(CAST_CL(ifm_alloc), CAST_CL(ker_alloc), CAST_CL(bias_alloc), CAST_CL(ofm_alloc),
- conv_info);
+ conv_info, ::arm_compute::WeightsInfo(), ::arm_compute::Size2D(1U, 1U),
+ fused_act);
builder.append("Conv2D", std::move(fn));
}
{
std::unique_ptr<::arm_compute::NEConvolutionLayer> fn{new ::arm_compute::NEConvolutionLayer};
- fn->configure(ifm_alloc, ker_alloc, bias_alloc, ofm_alloc, conv_info);
+ // To pass the fused_act parameter, it calls the WeightsInfo() and Size2D(1U, 1U) (dilation)
+ // functions like the default parameter.
+ fn->configure(ifm_alloc, ker_alloc, bias_alloc, ofm_alloc, conv_info,
+ ::arm_compute::WeightsInfo(), ::arm_compute::Size2D(1U, 1U), fused_act);
builder.append("Conv2D", std::move(fn));
}
-
- ActivationBuilder{builder}.append(param.activation, ofm_alloc);
};
_builder.addStage(stage);