bool intel_engines_are_idle(struct intel_gt *gt);
bool intel_engine_is_idle(struct intel_engine_cs *engine);
-bool intel_engine_flush_submission(struct intel_engine_cs *engine);
+void intel_engine_flush_submission(struct intel_engine_cs *engine);
void intel_engines_reset_default_submission(struct intel_gt *gt);
return idle;
}
-bool intel_engine_flush_submission(struct intel_engine_cs *engine)
+void intel_engine_flush_submission(struct intel_engine_cs *engine)
{
struct tasklet_struct *t = &engine->execlists.tasklet;
- bool active = tasklet_is_locked(t);
if (__tasklet_is_scheduled(t)) {
local_bh_disable();
tasklet_unlock(t);
}
local_bh_enable();
- active = true;
}
/* Otherwise flush the tasklet if it was running on another cpu */
tasklet_unlock_wait(t);
-
- return active;
}
/**
return !i915_active_fence_isset(&tl->last_request);
}
-static bool flush_submission(struct intel_gt *gt)
+static void flush_submission(struct intel_gt *gt)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
- bool active = false;
if (!intel_gt_pm_is_awake(gt))
- return false;
+ return;
for_each_engine(engine, gt, id) {
- active |= intel_engine_flush_submission(engine);
- active |= flush_work(&engine->retire_work);
+ intel_engine_flush_submission(engine);
+ flush_work(&engine->retire_work);
}
-
- return active;
}
static void engine_retire(struct work_struct *work)
timeout = -timeout, interruptible = false;
flush_submission(gt); /* kick the ksoftirqd tasklets */
-
spin_lock(&timelines->lock);
list_for_each_entry_safe(tl, tn, &timelines->active_list, link) {
if (!mutex_trylock(&tl->mutex)) {
active_count += !retire_requests(tl);
+ flush_submission(gt); /* sync with concurrent retirees */
spin_lock(&timelines->lock);
/* Resume iteration after dropping lock */
list_for_each_entry_safe(tl, tn, &free, link)
__intel_timeline_free(&tl->kref);
- if (flush_submission(gt))
- active_count++;
-
return active_count ? timeout : 0;
}