drm/i915/guc: Use correct lock for accessing guc->mmio_msg
authorTvrtko Ursulin <tvrtko.ursulin@intel.com>
Fri, 20 Nov 2020 09:56:35 +0000 (09:56 +0000)
committerTvrtko Ursulin <tvrtko.ursulin@intel.com>
Tue, 24 Nov 2020 09:10:37 +0000 (09:10 +0000)
Guc->mmio_msg is set under the guc->irq_lock in guc_get_mmio_msg so it
should be consumed under the same lock from guc_handle_mmio_msg.

I am not sure if the overall flow here makes complete sense but at least
the correct lock is now used.

Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20201120095636.1987395-1-tvrtko.ursulin@linux.intel.com
drivers/gpu/drm/i915/gt/uc/intel_uc.c

index 4e6070e..220626c 100644 (file)
@@ -175,19 +175,15 @@ static void guc_get_mmio_msg(struct intel_guc *guc)
 
 static void guc_handle_mmio_msg(struct intel_guc *guc)
 {
-       struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
-
        /* we need communication to be enabled to reply to GuC */
        GEM_BUG_ON(!guc_communication_enabled(guc));
 
-       if (!guc->mmio_msg)
-               return;
-
-       spin_lock_irq(&i915->irq_lock);
-       intel_guc_to_host_process_recv_msg(guc, &guc->mmio_msg, 1);
-       spin_unlock_irq(&i915->irq_lock);
-
-       guc->mmio_msg = 0;
+       spin_lock_irq(&guc->irq_lock);
+       if (guc->mmio_msg) {
+               intel_guc_to_host_process_recv_msg(guc, &guc->mmio_msg, 1);
+               guc->mmio_msg = 0;
+       }
+       spin_unlock_irq(&guc->irq_lock);
 }
 
 static void guc_reset_interrupts(struct intel_guc *guc)