{
"GpuAcc",
{
- {"TuningLevel", static_cast<int>(mCLTuner.tuning_mode)},
+ {"TuningLevel", static_cast<int>(ConvertTuningType(mCLTuner.tuning_mode))},
{"TuningFile", tune_path.c_str()}
}
}
armnn::Compute::GpuAcc) != mAccelType.end());
}
+ armnn::IGpuAccTunedParameters::TuningLevel InferenceARMNN::ConvertTuningType(inference_engine_cltuner_mode_e tuning_mode)
+ {
+ armnn::IGpuAccTunedParameters::TuningLevel armnn_tuning_level = armnn::IGpuAccTunedParameters::TuningLevel::Rapid;
+
+ switch (tuning_mode) {
+ case INFERENCE_ENGINE_CLTUNER_READ:
+ // In case of CLTuner update mode, Rapid is used as UseOnly and it means read mode.
+ armnn_tuning_level = armnn::IGpuAccTunedParameters::TuningLevel::Rapid;
+ break;
+ case INFERENCE_ENGINE_CLTUNER_RAPID:
+ armnn_tuning_level = armnn::IGpuAccTunedParameters::TuningLevel::Rapid;
+ break;
+ case INFERENCE_ENGINE_CLTUNER_NORMAL:
+ armnn_tuning_level = armnn::IGpuAccTunedParameters::TuningLevel::Normal;
+ break;
+ case INFERENCE_ENGINE_CLTUNER_EXHAUSTIVE:
+ armnn_tuning_level = armnn::IGpuAccTunedParameters::TuningLevel::Exhaustive;
+ break;
+ default:
+ // There must not be this case because a given tuning_mode is checked
+ // by inference engine interface framework.
+ LOGE("Invalid tuning type.");
+ break;
+ }
+
+ LOGI("Tuning mode is %d", static_cast<int>(armnn_tuning_level));
+
+ return armnn_tuning_level;
+ }
+
int InferenceARMNN::Run(
std::vector<inference_engine_tensor_buffer> &input_buffers,
std::vector<inference_engine_tensor_buffer> &output_buffers)