return !i915_active_fence_isset(&tl->last_request);
}
-static void flush_submission(struct intel_gt *gt)
+static bool flush_submission(struct intel_gt *gt)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
+ bool active = false;
if (!intel_gt_pm_is_awake(gt))
- return;
+ return false;
for_each_engine(engine, gt, id) {
intel_engine_flush_submission(engine);
- flush_work(&engine->retire_work);
+ active |= flush_work(&engine->retire_work);
+ active |= flush_work(&engine->wakeref.work);
}
+
+ return active;
}
static void engine_retire(struct work_struct *work)
}
}
- active_count += !retire_requests(tl);
+ if (!retire_requests(tl) || flush_submission(gt))
+ active_count++;
- flush_submission(gt); /* sync with concurrent retirees */
spin_lock(&timelines->lock);
/* Resume iteration after dropping lock */
void __intel_wakeref_init(struct intel_wakeref *wf,
struct intel_runtime_pm *rpm,
const struct intel_wakeref_ops *ops,
- struct lock_class_key *key)
+ struct intel_wakeref_lockclass *key)
{
wf->rpm = rpm;
wf->ops = ops;
- __mutex_init(&wf->mutex, "wakeref", key);
+ __mutex_init(&wf->mutex, "wakeref.mutex", &key->mutex);
atomic_set(&wf->count, 0);
wf->wakeref = 0;
INIT_WORK(&wf->work, __intel_wakeref_put_work);
+ lockdep_init_map(&wf->work.lockdep_map, "wakeref.work", &key->work, 0);
}
int intel_wakeref_wait_for_idle(struct intel_wakeref *wf)
struct work_struct work;
};
+struct intel_wakeref_lockclass {
+ struct lock_class_key mutex;
+ struct lock_class_key work;
+};
+
void __intel_wakeref_init(struct intel_wakeref *wf,
struct intel_runtime_pm *rpm,
const struct intel_wakeref_ops *ops,
- struct lock_class_key *key);
+ struct intel_wakeref_lockclass *key);
#define intel_wakeref_init(wf, rpm, ops) do { \
- static struct lock_class_key __key; \
+ static struct intel_wakeref_lockclass __key; \
\
__intel_wakeref_init((wf), (rpm), (ops), &__key); \
} while (0)