Merge drm-next into drm-intel-next-queued (this time for real)
authorJoonas Lahtinen <joonas.lahtinen@linux.intel.com>
Thu, 1 Mar 2018 09:14:24 +0000 (11:14 +0200)
committerJoonas Lahtinen <joonas.lahtinen@linux.intel.com>
Thu, 1 Mar 2018 09:14:24 +0000 (11:14 +0200)
To pull in the HDCP changes, especially wait_for changes to drm/i915
that Chris wants to build on top of.

Signed-off-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
69 files changed:
Documentation/gpu/todo.rst
drivers/gpu/drm/drm_vblank.c
drivers/gpu/drm/i915/Makefile
drivers/gpu/drm/i915/gvt/scheduler.c
drivers/gpu/drm/i915/gvt/scheduler.h
drivers/gpu/drm/i915/i915_debugfs.c
drivers/gpu/drm/i915/i915_drv.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem.h
drivers/gpu/drm/i915/i915_gem_batch_pool.c
drivers/gpu/drm/i915/i915_gem_context.c
drivers/gpu/drm/i915/i915_gem_context.h
drivers/gpu/drm/i915/i915_gem_evict.c
drivers/gpu/drm/i915/i915_gem_execbuffer.c
drivers/gpu/drm/i915/i915_gem_gtt.c
drivers/gpu/drm/i915/i915_gem_gtt.h
drivers/gpu/drm/i915/i915_gem_object.h
drivers/gpu/drm/i915/i915_gem_render_state.c
drivers/gpu/drm/i915/i915_gem_render_state.h
drivers/gpu/drm/i915/i915_gem_shrinker.c
drivers/gpu/drm/i915/i915_gem_timeline.h
drivers/gpu/drm/i915/i915_gpu_error.c
drivers/gpu/drm/i915/i915_irq.c
drivers/gpu/drm/i915/i915_pci.c
drivers/gpu/drm/i915/i915_perf.c
drivers/gpu/drm/i915/i915_reg.h
drivers/gpu/drm/i915/i915_request.c [moved from drivers/gpu/drm/i915/i915_gem_request.c with 79% similarity]
drivers/gpu/drm/i915/i915_request.h [moved from drivers/gpu/drm/i915/i915_gem_request.h with 79% similarity]
drivers/gpu/drm/i915/i915_trace.h
drivers/gpu/drm/i915/i915_vma.c
drivers/gpu/drm/i915/i915_vma.h
drivers/gpu/drm/i915/intel_breadcrumbs.c
drivers/gpu/drm/i915/intel_color.c
drivers/gpu/drm/i915/intel_device_info.c
drivers/gpu/drm/i915/intel_device_info.h
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_display.h
drivers/gpu/drm/i915/intel_dp.c
drivers/gpu/drm/i915/intel_drv.h
drivers/gpu/drm/i915/intel_engine_cs.c
drivers/gpu/drm/i915/intel_fbc.c
drivers/gpu/drm/i915/intel_fbdev.c
drivers/gpu/drm/i915/intel_frontbuffer.c
drivers/gpu/drm/i915/intel_guc.c
drivers/gpu/drm/i915/intel_guc_submission.c
drivers/gpu/drm/i915/intel_lrc.c
drivers/gpu/drm/i915/intel_mocs.c
drivers/gpu/drm/i915/intel_mocs.h
drivers/gpu/drm/i915/intel_overlay.c
drivers/gpu/drm/i915/intel_pm.c
drivers/gpu/drm/i915/intel_psr.c
drivers/gpu/drm/i915/intel_ringbuffer.c
drivers/gpu/drm/i915/intel_ringbuffer.h
drivers/gpu/drm/i915/intel_runtime_pm.c
drivers/gpu/drm/i915/selftests/huge_pages.c
drivers/gpu/drm/i915/selftests/i915_gem_coherency.c
drivers/gpu/drm/i915/selftests/i915_gem_context.c
drivers/gpu/drm/i915/selftests/i915_gem_evict.c
drivers/gpu/drm/i915/selftests/i915_gem_object.c
drivers/gpu/drm/i915/selftests/i915_live_selftests.h
drivers/gpu/drm/i915/selftests/i915_mock_selftests.h
drivers/gpu/drm/i915/selftests/i915_request.c [moved from drivers/gpu/drm/i915/selftests/i915_gem_request.c with 87% similarity]
drivers/gpu/drm/i915/selftests/intel_hangcheck.c
drivers/gpu/drm/i915/selftests/mock_engine.c
drivers/gpu/drm/i915/selftests/mock_gem_device.c
drivers/gpu/drm/i915/selftests/mock_request.c
drivers/gpu/drm/i915/selftests/mock_request.h
include/drm/i915_pciids.h

index 1a0a413..f4d0b34 100644 (file)
@@ -450,5 +450,12 @@ See drivers/gpu/drm/amd/display/TODO for tasks.
 
 Contact: Harry Wentland, Alex Deucher
 
+i915
+----
+
+- Our early/late pm callbacks could be removed in favour of using
+  device_link_add to model the dependency between i915 and snd_had. See
+  https://dri.freedesktop.org/docs/drm/driver-api/device_link.html
+
 Outside DRM
 ===========
index c781cb4..51041ee 100644 (file)
@@ -1238,12 +1238,15 @@ void drm_crtc_vblank_on(struct drm_crtc *crtc)
 EXPORT_SYMBOL(drm_crtc_vblank_on);
 
 /**
- * drm_vblank_restore - estimated vblanks using timestamps and update it.
+ * drm_vblank_restore - estimate missed vblanks and update vblank count.
+ * @dev: DRM device
+ * @pipe: CRTC index
  *
  * Power manamement features can cause frame counter resets between vblank
- * disable and enable. Drivers can then use this function in their
- * &drm_crtc_funcs.enable_vblank implementation to estimate the vblanks since
- * the last &drm_crtc_funcs.disable_vblank.
+ * disable and enable. Drivers can use this function in their
+ * &drm_crtc_funcs.enable_vblank implementation to estimate missed vblanks since
+ * the last &drm_crtc_funcs.disable_vblank using timestamps and update the
+ * vblank counter.
  *
  * This function is the legacy version of drm_crtc_vblank_restore().
  */
@@ -1284,11 +1287,14 @@ void drm_vblank_restore(struct drm_device *dev, unsigned int pipe)
 EXPORT_SYMBOL(drm_vblank_restore);
 
 /**
- * drm_crtc_vblank_restore - estimate vblanks using timestamps and update it.
+ * drm_crtc_vblank_restore - estimate missed vblanks and update vblank count.
+ * @crtc: CRTC in question
+ *
  * Power manamement features can cause frame counter resets between vblank
- * disable and enable. Drivers can then use this function in their
- * &drm_crtc_funcs.enable_vblank implementation to estimate the vblanks since
- * the last &drm_crtc_funcs.disable_vblank.
+ * disable and enable. Drivers can use this function in their
+ * &drm_crtc_funcs.enable_vblank implementation to estimate missed vblanks since
+ * the last &drm_crtc_funcs.disable_vblank using timestamps and update the
+ * vblank counter.
  */
 void drm_crtc_vblank_restore(struct drm_crtc *crtc)
 {
index f55cc02..881d712 100644 (file)
@@ -63,13 +63,13 @@ i915-y += i915_cmd_parser.o \
          i915_gem.o \
          i915_gem_object.o \
          i915_gem_render_state.o \
-         i915_gem_request.o \
          i915_gem_shrinker.o \
          i915_gem_stolen.o \
          i915_gem_tiling.o \
          i915_gem_timeline.o \
          i915_gem_userptr.o \
          i915_gemfs.o \
+         i915_request.o \
          i915_trace_points.o \
          i915_vma.o \
          intel_breadcrumbs.o \
index b55b358..92df1b4 100644 (file)
@@ -126,7 +126,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
        return 0;
 }
 
-static inline bool is_gvt_request(struct drm_i915_gem_request *req)
+static inline bool is_gvt_request(struct i915_request *req)
 {
        return i915_gem_context_force_single_submission(req->ctx);
 }
@@ -148,7 +148,7 @@ static void save_ring_hw_state(struct intel_vgpu *vgpu, int ring_id)
 static int shadow_context_status_change(struct notifier_block *nb,
                unsigned long action, void *data)
 {
-       struct drm_i915_gem_request *req = (struct drm_i915_gem_request *)data;
+       struct i915_request *req = data;
        struct intel_gvt *gvt = container_of(nb, struct intel_gvt,
                                shadow_ctx_notifier_block[req->engine->id]);
        struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
@@ -333,13 +333,13 @@ static int intel_gvt_generate_request(struct intel_vgpu_workload *workload)
        int ring_id = workload->ring_id;
        struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
        struct intel_engine_cs *engine = dev_priv->engine[ring_id];
-       struct drm_i915_gem_request *rq;
+       struct i915_request *rq;
        struct intel_vgpu *vgpu = workload->vgpu;
        struct intel_vgpu_submission *s = &vgpu->submission;
        struct i915_gem_context *shadow_ctx = s->shadow_ctx;
        int ret;
 
-       rq = i915_gem_request_alloc(dev_priv->engine[ring_id], shadow_ctx);
+       rq = i915_request_alloc(dev_priv->engine[ring_id], shadow_ctx);
        if (IS_ERR(rq)) {
                gvt_vgpu_err("fail to allocate gem request\n");
                ret = PTR_ERR(rq);
@@ -348,7 +348,7 @@ static int intel_gvt_generate_request(struct intel_vgpu_workload *workload)
 
        gvt_dbg_sched("ring id %d get i915 gem request %p\n", ring_id, rq);
 
-       workload->req = i915_gem_request_get(rq);
+       workload->req = i915_request_get(rq);
        ret = copy_workload_to_ring_buffer(workload);
        if (ret)
                goto err_unpin;
@@ -582,7 +582,7 @@ out:
        if (!IS_ERR_OR_NULL(workload->req)) {
                gvt_dbg_sched("ring id %d submit workload to i915 %p\n",
                                ring_id, workload->req);
-               i915_add_request(workload->req);
+               i915_request_add(workload->req);
                workload->dispatched = true;
        }
 
@@ -769,7 +769,7 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
                                workload->status = 0;
                }
 
-               i915_gem_request_put(fetch_and_zero(&workload->req));
+               i915_request_put(fetch_and_zero(&workload->req));
 
                if (!workload->status && !(vgpu->resetting_eng &
                                           ENGINE_MASK(ring_id))) {
@@ -886,7 +886,7 @@ static int workload_thread(void *priv)
 
                gvt_dbg_sched("ring id %d wait workload %p\n",
                                workload->ring_id, workload);
-               i915_wait_request(workload->req, 0, MAX_SCHEDULE_TIMEOUT);
+               i915_request_wait(workload->req, 0, MAX_SCHEDULE_TIMEOUT);
 
 complete:
                gvt_dbg_sched("will complete workload %p, status: %d\n",
index ff175a9..bab4097 100644 (file)
@@ -80,7 +80,7 @@ struct intel_shadow_wa_ctx {
 struct intel_vgpu_workload {
        struct intel_vgpu *vgpu;
        int ring_id;
-       struct drm_i915_gem_request *req;
+       struct i915_request *req;
        /* if this workload has been dispatched to i915? */
        bool dispatched;
        bool shadowed;
index 05b4104..33fbf39 100644 (file)
@@ -519,7 +519,7 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
        list_for_each_entry_reverse(file, &dev->filelist, lhead) {
                struct file_stats stats;
                struct drm_i915_file_private *file_priv = file->driver_priv;
-               struct drm_i915_gem_request *request;
+               struct i915_request *request;
                struct task_struct *task;
 
                mutex_lock(&dev->struct_mutex);
@@ -536,7 +536,7 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
                 * Therefore, we need to protect this ->comm access using RCU.
                 */
                request = list_first_entry_or_null(&file_priv->mm.request_list,
-                                                  struct drm_i915_gem_request,
+                                                  struct i915_request,
                                                   client_link);
                rcu_read_lock();
                task = pid_task(request && request->ctx->pid ?
@@ -646,6 +646,56 @@ static int i915_gem_batch_pool_info(struct seq_file *m, void *data)
        return 0;
 }
 
+static void gen8_display_interrupt_info(struct seq_file *m)
+{
+       struct drm_i915_private *dev_priv = node_to_i915(m->private);
+       int pipe;
+
+       for_each_pipe(dev_priv, pipe) {
+               enum intel_display_power_domain power_domain;
+
+               power_domain = POWER_DOMAIN_PIPE(pipe);
+               if (!intel_display_power_get_if_enabled(dev_priv,
+                                                       power_domain)) {
+                       seq_printf(m, "Pipe %c power disabled\n",
+                                  pipe_name(pipe));
+                       continue;
+               }
+               seq_printf(m, "Pipe %c IMR:\t%08x\n",
+                          pipe_name(pipe),
+                          I915_READ(GEN8_DE_PIPE_IMR(pipe)));
+               seq_printf(m, "Pipe %c IIR:\t%08x\n",
+                          pipe_name(pipe),
+                          I915_READ(GEN8_DE_PIPE_IIR(pipe)));
+               seq_printf(m, "Pipe %c IER:\t%08x\n",
+                          pipe_name(pipe),
+                          I915_READ(GEN8_DE_PIPE_IER(pipe)));
+
+               intel_display_power_put(dev_priv, power_domain);
+       }
+
+       seq_printf(m, "Display Engine port interrupt mask:\t%08x\n",
+                  I915_READ(GEN8_DE_PORT_IMR));
+       seq_printf(m, "Display Engine port interrupt identity:\t%08x\n",
+                  I915_READ(GEN8_DE_PORT_IIR));
+       seq_printf(m, "Display Engine port interrupt enable:\t%08x\n",
+                  I915_READ(GEN8_DE_PORT_IER));
+
+       seq_printf(m, "Display Engine misc interrupt mask:\t%08x\n",
+                  I915_READ(GEN8_DE_MISC_IMR));
+       seq_printf(m, "Display Engine misc interrupt identity:\t%08x\n",
+                  I915_READ(GEN8_DE_MISC_IIR));
+       seq_printf(m, "Display Engine misc interrupt enable:\t%08x\n",
+                  I915_READ(GEN8_DE_MISC_IER));
+
+       seq_printf(m, "PCU interrupt mask:\t%08x\n",
+                  I915_READ(GEN8_PCU_IMR));
+       seq_printf(m, "PCU interrupt identity:\t%08x\n",
+                  I915_READ(GEN8_PCU_IIR));
+       seq_printf(m, "PCU interrupt enable:\t%08x\n",
+                  I915_READ(GEN8_PCU_IER));
+}
+
 static int i915_interrupt_info(struct seq_file *m, void *data)
 {
        struct drm_i915_private *dev_priv = node_to_i915(m->private);
@@ -709,6 +759,27 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
                           I915_READ(GEN8_PCU_IIR));
                seq_printf(m, "PCU interrupt enable:\t%08x\n",
                           I915_READ(GEN8_PCU_IER));
+       } else if (INTEL_GEN(dev_priv) >= 11) {
+               seq_printf(m, "Master Interrupt Control:  %08x\n",
+                          I915_READ(GEN11_GFX_MSTR_IRQ));
+
+               seq_printf(m, "Render/Copy Intr Enable:   %08x\n",
+                          I915_READ(GEN11_RENDER_COPY_INTR_ENABLE));
+               seq_printf(m, "VCS/VECS Intr Enable:      %08x\n",
+                          I915_READ(GEN11_VCS_VECS_INTR_ENABLE));
+               seq_printf(m, "GUC/SG Intr Enable:\t   %08x\n",
+                          I915_READ(GEN11_GUC_SG_INTR_ENABLE));
+               seq_printf(m, "GPM/WGBOXPERF Intr Enable: %08x\n",
+                          I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE));
+               seq_printf(m, "Crypto Intr Enable:\t   %08x\n",
+                          I915_READ(GEN11_CRYPTO_RSVD_INTR_ENABLE));
+               seq_printf(m, "GUnit/CSME Intr Enable:\t   %08x\n",
+                          I915_READ(GEN11_GUNIT_CSME_INTR_ENABLE));
+
+               seq_printf(m, "Display Interrupt Control:\t%08x\n",
+                          I915_READ(GEN11_DISPLAY_INT_CTL));
+
+               gen8_display_interrupt_info(m);
        } else if (INTEL_GEN(dev_priv) >= 8) {
                seq_printf(m, "Master Interrupt Control:\t%08x\n",
                           I915_READ(GEN8_MASTER_IRQ));
@@ -722,49 +793,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
                                   i, I915_READ(GEN8_GT_IER(i)));
                }
 
-               for_each_pipe(dev_priv, pipe) {
-                       enum intel_display_power_domain power_domain;
-
-                       power_domain = POWER_DOMAIN_PIPE(pipe);
-                       if (!intel_display_power_get_if_enabled(dev_priv,
-                                                               power_domain)) {
-                               seq_printf(m, "Pipe %c power disabled\n",
-                                          pipe_name(pipe));
-                               continue;
-                       }
-                       seq_printf(m, "Pipe %c IMR:\t%08x\n",
-                                  pipe_name(pipe),
-                                  I915_READ(GEN8_DE_PIPE_IMR(pipe)));
-                       seq_printf(m, "Pipe %c IIR:\t%08x\n",
-                                  pipe_name(pipe),
-                                  I915_READ(GEN8_DE_PIPE_IIR(pipe)));
-                       seq_printf(m, "Pipe %c IER:\t%08x\n",
-                                  pipe_name(pipe),
-                                  I915_READ(GEN8_DE_PIPE_IER(pipe)));
-
-                       intel_display_power_put(dev_priv, power_domain);
-               }
-
-               seq_printf(m, "Display Engine port interrupt mask:\t%08x\n",
-                          I915_READ(GEN8_DE_PORT_IMR));
-               seq_printf(m, "Display Engine port interrupt identity:\t%08x\n",
-                          I915_READ(GEN8_DE_PORT_IIR));
-               seq_printf(m, "Display Engine port interrupt enable:\t%08x\n",
-                          I915_READ(GEN8_DE_PORT_IER));
-
-               seq_printf(m, "Display Engine misc interrupt mask:\t%08x\n",
-                          I915_READ(GEN8_DE_MISC_IMR));
-               seq_printf(m, "Display Engine misc interrupt identity:\t%08x\n",
-                          I915_READ(GEN8_DE_MISC_IIR));
-               seq_printf(m, "Display Engine misc interrupt enable:\t%08x\n",
-                          I915_READ(GEN8_DE_MISC_IER));
-
-               seq_printf(m, "PCU interrupt mask:\t%08x\n",
-                          I915_READ(GEN8_PCU_IMR));
-               seq_printf(m, "PCU interrupt identity:\t%08x\n",
-                          I915_READ(GEN8_PCU_IIR));
-               seq_printf(m, "PCU interrupt enable:\t%08x\n",
-                          I915_READ(GEN8_PCU_IER));
+               gen8_display_interrupt_info(m);
        } else if (IS_VALLEYVIEW(dev_priv)) {
                seq_printf(m, "Display IER:\t%08x\n",
                           I915_READ(VLV_IER));
@@ -846,13 +875,35 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
                seq_printf(m, "Graphics Interrupt mask:         %08x\n",
                           I915_READ(GTIMR));
        }
-       if (INTEL_GEN(dev_priv) >= 6) {
+
+       if (INTEL_GEN(dev_priv) >= 11) {
+               seq_printf(m, "RCS Intr Mask:\t %08x\n",
+                          I915_READ(GEN11_RCS0_RSVD_INTR_MASK));
+               seq_printf(m, "BCS Intr Mask:\t %08x\n",
+                          I915_READ(GEN11_BCS_RSVD_INTR_MASK));
+               seq_printf(m, "VCS0/VCS1 Intr Mask:\t %08x\n",
+                          I915_READ(GEN11_VCS0_VCS1_INTR_MASK));
+               seq_printf(m, "VCS2/VCS3 Intr Mask:\t %08x\n",
+                          I915_READ(GEN11_VCS2_VCS3_INTR_MASK));
+               seq_printf(m, "VECS0/VECS1 Intr Mask:\t %08x\n",
+                          I915_READ(GEN11_VECS0_VECS1_INTR_MASK));
+               seq_printf(m, "GUC/SG Intr Mask:\t %08x\n",
+                          I915_READ(GEN11_GUC_SG_INTR_MASK));
+               seq_printf(m, "GPM/WGBOXPERF Intr Mask: %08x\n",
+                          I915_READ(GEN11_GPM_WGBOXPERF_INTR_MASK));
+               seq_printf(m, "Crypto Intr Mask:\t %08x\n",
+                          I915_READ(GEN11_CRYPTO_RSVD_INTR_MASK));
+               seq_printf(m, "Gunit/CSME Intr Mask:\t %08x\n",
+                          I915_READ(GEN11_GUNIT_CSME_INTR_MASK));
+
+       } else if (INTEL_GEN(dev_priv) >= 6) {
                for_each_engine(engine, dev_priv, id) {
                        seq_printf(m,
                                   "Graphics Interrupt mask (%s):       %08x\n",
                                   engine->name, I915_READ_IMR(engine));
                }
        }
+
        intel_runtime_pm_put(dev_priv);
 
        return 0;
@@ -4060,7 +4111,7 @@ i915_drop_caches_set(void *data, u64 val)
                                                     I915_WAIT_LOCKED);
 
                if (val & DROP_RETIRE)
-                       i915_gem_retire_requests(dev_priv);
+                       i915_retire_requests(dev_priv);
 
                mutex_unlock(&dev->struct_mutex);
        }
index d09f8e6..aaa861b 100644 (file)
@@ -808,7 +808,7 @@ static int i915_workqueues_init(struct drm_i915_private *dev_priv)
        /*
         * The i915 workqueue is primarily used for batched retirement of
         * requests (and thus managing bo) once the task has been completed
-        * by the GPU. i915_gem_retire_requests() is called directly when we
+        * by the GPU. i915_retire_requests() is called directly when we
         * need high-priority retirement, such as waiting for an explicit
         * bo.
         *
@@ -1992,7 +1992,7 @@ taint:
        add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
 error:
        i915_gem_set_wedged(i915);
-       i915_gem_retire_requests(i915);
+       i915_retire_requests(i915);
        intel_gpu_reset(i915, ALL_ENGINES);
        goto finish;
 }
@@ -2019,7 +2019,7 @@ static inline int intel_gt_reset_engine(struct drm_i915_private *dev_priv,
 int i915_reset_engine(struct intel_engine_cs *engine, unsigned int flags)
 {
        struct i915_gpu_error *error = &engine->i915->gpu_error;
-       struct drm_i915_gem_request *active_request;
+       struct i915_request *active_request;
        int ret;
 
        GEM_BUG_ON(!test_bit(I915_RESET_ENGINE + engine->id, &error->flags));
index 92883a4..7bbec55 100644 (file)
@@ -71,9 +71,9 @@
 #include "i915_gem_fence_reg.h"
 #include "i915_gem_object.h"
 #include "i915_gem_gtt.h"
-#include "i915_gem_request.h"
 #include "i915_gem_timeline.h"
 
+#include "i915_request.h"
 #include "i915_vma.h"
 
 #include "intel_gvt.h"
@@ -1231,7 +1231,7 @@ struct i915_gpu_error {
         *
         * #I915_WEDGED - If reset fails and we can no longer use the GPU,
         * we set the #I915_WEDGED bit. Prior to command submission, e.g.
-        * i915_gem_request_alloc(), this bit is checked and the sequence
+        * i915_request_alloc(), this bit is checked and the sequence
         * aborted (with -EIO reported to userspace) if set.
         */
        unsigned long flags;
@@ -2788,9 +2788,10 @@ intel_info(const struct drm_i915_private *dev_priv)
 /* Early gen2 have a totally busted CS tlb and require pinned batches. */
 #define HAS_BROKEN_CS_TLB(dev_priv)    (IS_I830(dev_priv) || IS_I845G(dev_priv))
 
-/* WaRsDisableCoarsePowerGating:skl,bxt */
+/* WaRsDisableCoarsePowerGating:skl,cnl */
 #define NEEDS_WaRsDisableCoarsePowerGating(dev_priv) \
-       (IS_SKL_GT3(dev_priv) || IS_SKL_GT4(dev_priv))
+       (IS_CANNONLAKE(dev_priv) || \
+        IS_SKL_GT3(dev_priv) || IS_SKL_GT4(dev_priv))
 
 /*
  * dp aux and gmbus irq on gen4 seems to be able to generate legacy interrupts
@@ -3329,7 +3330,7 @@ i915_gem_obj_finish_shmem_access(struct drm_i915_gem_object *obj)
 
 int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
 void i915_vma_move_to_active(struct i915_vma *vma,
-                            struct drm_i915_gem_request *req,
+                            struct i915_request *rq,
                             unsigned int flags);
 int i915_gem_dumb_create(struct drm_file *file_priv,
                         struct drm_device *dev,
@@ -3344,11 +3345,9 @@ void i915_gem_track_fb(struct drm_i915_gem_object *old,
 
 int __must_check i915_gem_set_global_seqno(struct drm_device *dev, u32 seqno);
 
-struct drm_i915_gem_request *
+struct i915_request *
 i915_gem_find_active_request(struct intel_engine_cs *engine);
 
-void i915_gem_retire_requests(struct drm_i915_private *dev_priv);
-
 static inline bool i915_reset_backoff(struct i915_gpu_error *error)
 {
        return unlikely(test_bit(I915_RESET_BACKOFF, &error->flags));
@@ -3380,7 +3379,7 @@ static inline u32 i915_reset_engine_count(struct i915_gpu_error *error,
        return READ_ONCE(error->reset_engine_count[engine->id]);
 }
 
-struct drm_i915_gem_request *
+struct i915_request *
 i915_gem_reset_prepare_engine(struct intel_engine_cs *engine);
 int i915_gem_reset_prepare(struct drm_i915_private *dev_priv);
 void i915_gem_reset(struct drm_i915_private *dev_priv);
@@ -3389,7 +3388,7 @@ void i915_gem_reset_finish(struct drm_i915_private *dev_priv);
 void i915_gem_set_wedged(struct drm_i915_private *dev_priv);
 bool i915_gem_unset_wedged(struct drm_i915_private *dev_priv);
 void i915_gem_reset_engine(struct intel_engine_cs *engine,
-                          struct drm_i915_gem_request *request);
+                          struct i915_request *request);
 
 void i915_gem_init_mmio(struct drm_i915_private *i915);
 int __must_check i915_gem_init(struct drm_i915_private *dev_priv);
@@ -4008,9 +4007,9 @@ wait_remaining_ms_from_jiffies(unsigned long timestamp_jiffies, int to_wait_ms)
 }
 
 static inline bool
-__i915_request_irq_complete(const struct drm_i915_gem_request *req)
+__i915_request_irq_complete(const struct i915_request *rq)
 {
-       struct intel_engine_cs *engine = req->engine;
+       struct intel_engine_cs *engine = rq->engine;
        u32 seqno;
 
        /* Note that the engine may have wrapped around the seqno, and
@@ -4019,7 +4018,7 @@ __i915_request_irq_complete(const struct drm_i915_gem_request *req)
         * this by kicking all the waiters before resetting the seqno
         * in hardware, and also signal the fence.
         */
-       if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &req->fence.flags))
+       if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags))
                return true;
 
        /* The request was dequeued before we were awoken. We check after
@@ -4028,14 +4027,14 @@ __i915_request_irq_complete(const struct drm_i915_gem_request *req)
         * the request execution are sufficient to ensure that a check
         * after reading the value from hw matches this request.
         */
-       seqno = i915_gem_request_global_seqno(req);
+       seqno = i915_request_global_seqno(rq);
        if (!seqno)
                return false;
 
        /* Before we do the heavier coherent read of the seqno,
         * check the value (hopefully) in the CPU cacheline.
         */
-       if (__i915_gem_request_completed(req, seqno))
+       if (__i915_request_completed(rq, seqno))
                return true;
 
        /* Ensure our read of the seqno is coherent so that we
@@ -4084,7 +4083,7 @@ __i915_request_irq_complete(const struct drm_i915_gem_request *req)
                        wake_up_process(b->irq_wait->tsk);
                spin_unlock_irq(&b->irq_lock);
 
-               if (__i915_gem_request_completed(req, seqno))
+               if (__i915_request_completed(rq, seqno))
                        return true;
        }
 
index 43afa1c..14c855b 100644 (file)
@@ -353,7 +353,7 @@ i915_gem_object_wait_fence(struct dma_fence *fence,
                           long timeout,
                           struct intel_rps_client *rps_client)
 {
-       struct drm_i915_gem_request *rq;
+       struct i915_request *rq;
 
        BUILD_BUG_ON(I915_WAIT_INTERRUPTIBLE != 0x1);
 
@@ -366,7 +366,7 @@ i915_gem_object_wait_fence(struct dma_fence *fence,
                                              timeout);
 
        rq = to_request(fence);
-       if (i915_gem_request_completed(rq))
+       if (i915_request_completed(rq))
                goto out;
 
        /*
@@ -385,16 +385,16 @@ i915_gem_object_wait_fence(struct dma_fence *fence,
         * forcing the clocks too high for the whole system, we only allow
         * each client to waitboost once in a busy period.
         */
-       if (rps_client && !i915_gem_request_started(rq)) {
+       if (rps_client && !i915_request_started(rq)) {
                if (INTEL_GEN(rq->i915) >= 6)
                        gen6_rps_boost(rq, rps_client);
        }
 
-       timeout = i915_wait_request(rq, flags, timeout);
+       timeout = i915_request_wait(rq, flags, timeout);
 
 out:
-       if (flags & I915_WAIT_LOCKED && i915_gem_request_completed(rq))
-               i915_gem_request_retire_upto(rq);
+       if (flags & I915_WAIT_LOCKED && i915_request_completed(rq))
+               i915_request_retire_upto(rq);
 
        return timeout;
 }
@@ -463,7 +463,7 @@ i915_gem_object_wait_reservation(struct reservation_object *resv,
 
 static void __fence_set_priority(struct dma_fence *fence, int prio)
 {
-       struct drm_i915_gem_request *rq;
+       struct i915_request *rq;
        struct intel_engine_cs *engine;
 
        if (dma_fence_is_signaled(fence) || !dma_fence_is_i915(fence))
@@ -2856,10 +2856,10 @@ static void i915_gem_context_mark_innocent(struct i915_gem_context *ctx)
        atomic_inc(&ctx->active_count);
 }
 
-struct drm_i915_gem_request *
+struct i915_request *
 i915_gem_find_active_request(struct intel_engine_cs *engine)
 {
-       struct drm_i915_gem_request *request, *active = NULL;
+       struct i915_request *request, *active = NULL;
        unsigned long flags;
 
        /* We are called by the error capture and reset at a random
@@ -2872,8 +2872,7 @@ i915_gem_find_active_request(struct intel_engine_cs *engine)
         */
        spin_lock_irqsave(&engine->timeline->lock, flags);
        list_for_each_entry(request, &engine->timeline->requests, link) {
-               if (__i915_gem_request_completed(request,
-                                                request->global_seqno))
+               if (__i915_request_completed(request, request->global_seqno))
                        continue;
 
                GEM_BUG_ON(request->engine != engine);
@@ -2906,10 +2905,10 @@ static bool engine_stalled(struct intel_engine_cs *engine)
  * Ensure irq handler finishes, and not run again.
  * Also return the active request so that we only search for it once.
  */
-struct drm_i915_gem_request *
+struct i915_request *
 i915_gem_reset_prepare_engine(struct intel_engine_cs *engine)
 {
-       struct drm_i915_gem_request *request = NULL;
+       struct i915_request *request = NULL;
 
        /*
         * During the reset sequence, we must prevent the engine from
@@ -2967,7 +2966,7 @@ i915_gem_reset_prepare_engine(struct intel_engine_cs *engine)
 int i915_gem_reset_prepare(struct drm_i915_private *dev_priv)
 {
        struct intel_engine_cs *engine;
-       struct drm_i915_gem_request *request;
+       struct i915_request *request;
        enum intel_engine_id id;
        int err = 0;
 
@@ -2986,7 +2985,7 @@ int i915_gem_reset_prepare(struct drm_i915_private *dev_priv)
        return err;
 }
 
-static void skip_request(struct drm_i915_gem_request *request)
+static void skip_request(struct i915_request *request)
 {
        void *vaddr = request->ring->vaddr;
        u32 head;
@@ -3005,7 +3004,7 @@ static void skip_request(struct drm_i915_gem_request *request)
        dma_fence_set_error(&request->fence, -EIO);
 }
 
-static void engine_skip_context(struct drm_i915_gem_request *request)
+static void engine_skip_context(struct i915_request *request)
 {
        struct intel_engine_cs *engine = request->engine;
        struct i915_gem_context *hung_ctx = request->ctx;
@@ -3029,9 +3028,9 @@ static void engine_skip_context(struct drm_i915_gem_request *request)
 }
 
 /* Returns the request if it was guilty of the hang */
-static struct drm_i915_gem_request *
+static struct i915_request *
 i915_gem_reset_request(struct intel_engine_cs *engine,
-                      struct drm_i915_gem_request *request)
+                      struct i915_request *request)
 {
        /* The guilty request will get skipped on a hung engine.
         *
@@ -3085,7 +3084,7 @@ i915_gem_reset_request(struct intel_engine_cs *engine,
 }
 
 void i915_gem_reset_engine(struct intel_engine_cs *engine,
-                          struct drm_i915_gem_request *request)
+                          struct i915_request *request)
 {
        /*
         * Make sure this write is visible before we re-enable the interrupt
@@ -3113,7 +3112,7 @@ void i915_gem_reset(struct drm_i915_private *dev_priv)
 
        lockdep_assert_held(&dev_priv->drm.struct_mutex);
 
-       i915_gem_retire_requests(dev_priv);
+       i915_retire_requests(dev_priv);
 
        for_each_engine(engine, dev_priv, id) {
                struct i915_gem_context *ctx;
@@ -3134,12 +3133,12 @@ void i915_gem_reset(struct drm_i915_private *dev_priv)
                 * empty request appears sufficient to paper over the glitch.
                 */
                if (intel_engine_is_idle(engine)) {
-                       struct drm_i915_gem_request *rq;
+                       struct i915_request *rq;
 
-                       rq = i915_gem_request_alloc(engine,
-                                                   dev_priv->kernel_context);
+                       rq = i915_request_alloc(engine,
+                                               dev_priv->kernel_context);
                        if (!IS_ERR(rq))
-                               __i915_add_request(rq, false);
+                               __i915_request_add(rq, false);
                }
        }
 
@@ -3174,21 +3173,21 @@ void i915_gem_reset_finish(struct drm_i915_private *dev_priv)
        }
 }
 
-static void nop_submit_request(struct drm_i915_gem_request *request)
+static void nop_submit_request(struct i915_request *request)
 {
        dma_fence_set_error(&request->fence, -EIO);
 
-       i915_gem_request_submit(request);
+       i915_request_submit(request);
 }
 
-static void nop_complete_submit_request(struct drm_i915_gem_request *request)
+static void nop_complete_submit_request(struct i915_request *request)
 {
        unsigned long flags;
 
        dma_fence_set_error(&request->fence, -EIO);
 
        spin_lock_irqsave(&request->engine->timeline->lock, flags);
-       __i915_gem_request_submit(request);
+       __i915_request_submit(request);
        intel_engine_init_global_seqno(request->engine, request->global_seqno);
        spin_unlock_irqrestore(&request->engine->timeline->lock, flags);
 }
@@ -3281,7 +3280,7 @@ bool i915_gem_unset_wedged(struct drm_i915_private *i915)
         */
        list_for_each_entry(tl, &i915->gt.timelines, link) {
                for (i = 0; i < ARRAY_SIZE(tl->engine); i++) {
-                       struct drm_i915_gem_request *rq;
+                       struct i915_request *rq;
 
                        rq = i915_gem_active_peek(&tl->engine[i].last_request,
                                                  &i915->drm.struct_mutex);
@@ -3330,7 +3329,7 @@ i915_gem_retire_work_handler(struct work_struct *work)
 
        /* Come back later if the device is busy... */
        if (mutex_trylock(&dev->struct_mutex)) {
-               i915_gem_retire_requests(dev_priv);
+               i915_retire_requests(dev_priv);
                mutex_unlock(&dev->struct_mutex);
        }
 
@@ -3684,7 +3683,7 @@ int i915_gem_wait_for_idle(struct drm_i915_private *i915, unsigned int flags)
                        if (ret)
                                return ret;
                }
-               i915_gem_retire_requests(i915);
+               i915_retire_requests(i915);
 
                ret = wait_for_engines(i915);
        } else {
@@ -4224,7 +4223,7 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
        struct drm_i915_private *dev_priv = to_i915(dev);
        struct drm_i915_file_private *file_priv = file->driver_priv;
        unsigned long recent_enough = jiffies - DRM_I915_THROTTLE_JIFFIES;
-       struct drm_i915_gem_request *request, *target = NULL;
+       struct i915_request *request, *target = NULL;
        long ret;
 
        /* ABI: return -EIO if already wedged */
@@ -4244,16 +4243,16 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
                target = request;
        }
        if (target)
-               i915_gem_request_get(target);
+               i915_request_get(target);
        spin_unlock(&file_priv->mm.lock);
 
        if (target == NULL)
                return 0;
 
-       ret = i915_wait_request(target,
+       ret = i915_request_wait(target,
                                I915_WAIT_INTERRUPTIBLE,
                                MAX_SCHEDULE_TIMEOUT);
-       i915_gem_request_put(target);
+       i915_request_put(target);
 
        return ret < 0 ? ret : 0;
 }
@@ -4367,7 +4366,7 @@ static __always_inline unsigned int
 __busy_set_if_active(const struct dma_fence *fence,
                     unsigned int (*flag)(unsigned int id))
 {
-       struct drm_i915_gem_request *rq;
+       struct i915_request *rq;
 
        /* We have to check the current hw status of the fence as the uABI
         * guarantees forward progress. We could rely on the idle worker
@@ -4380,8 +4379,8 @@ __busy_set_if_active(const struct dma_fence *fence,
                return 0;
 
        /* opencode to_request() in order to avoid const warnings */
-       rq = container_of(fence, struct drm_i915_gem_request, fence);
-       if (i915_gem_request_completed(rq))
+       rq = container_of(fence, struct i915_request, fence);
+       if (i915_request_completed(rq))
                return 0;
 
        return flag(rq->engine->uabi_id);
@@ -4526,8 +4525,7 @@ out:
 }
 
 static void
-frontbuffer_retire(struct i915_gem_active *active,
-                  struct drm_i915_gem_request *request)
+frontbuffer_retire(struct i915_gem_active *active, struct i915_request *request)
 {
        struct drm_i915_gem_object *obj =
                container_of(active, typeof(*obj), frontbuffer_write);
@@ -5161,9 +5159,9 @@ static int __intel_engines_record_defaults(struct drm_i915_private *i915)
                return PTR_ERR(ctx);
 
        for_each_engine(engine, i915, id) {
-               struct drm_i915_gem_request *rq;
+               struct i915_request *rq;
 
-               rq = i915_gem_request_alloc(engine, ctx);
+               rq = i915_request_alloc(engine, ctx);
                if (IS_ERR(rq)) {
                        err = PTR_ERR(rq);
                        goto out_ctx;
@@ -5173,7 +5171,7 @@ static int __intel_engines_record_defaults(struct drm_i915_private *i915)
                if (engine->init_context)
                        err = engine->init_context(rq);
 
-               __i915_add_request(rq, true);
+               __i915_request_add(rq, true);
                if (err)
                        goto err_active;
        }
@@ -5479,7 +5477,7 @@ i915_gem_load_init(struct drm_i915_private *dev_priv)
        if (!dev_priv->luts)
                goto err_vmas;
 
-       dev_priv->requests = KMEM_CACHE(drm_i915_gem_request,
+       dev_priv->requests = KMEM_CACHE(i915_request,
                                        SLAB_HWCACHE_ALIGN |
                                        SLAB_RECLAIM_ACCOUNT |
                                        SLAB_TYPESAFE_BY_RCU);
@@ -5612,7 +5610,7 @@ int i915_gem_freeze_late(struct drm_i915_private *dev_priv)
 void i915_gem_release(struct drm_device *dev, struct drm_file *file)
 {
        struct drm_i915_file_private *file_priv = file->driver_priv;
-       struct drm_i915_gem_request *request;
+       struct i915_request *request;
 
        /* Clean up our request list when the client is going away, so that
         * later retire_requests won't dereference our soon-to-be-gone
index e920dab..d9f0709 100644 (file)
 
 #ifdef CONFIG_DRM_I915_DEBUG_GEM
 #define GEM_BUG_ON(condition) do { if (unlikely((condition))) {        \
-               printk(KERN_ERR "GEM_BUG_ON(%s)\n", __stringify(condition)); \
+               pr_err("%s:%d GEM_BUG_ON(%s)\n", \
+                      __func__, __LINE__, __stringify(condition)); \
+               GEM_TRACE("%s:%d GEM_BUG_ON(%s)\n", \
+                         __func__, __LINE__, __stringify(condition)); \
                BUG(); \
                } \
        } while(0)
index c93005c..d3cbe84 100644 (file)
@@ -119,7 +119,7 @@ i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool,
                        if (!reservation_object_test_signaled_rcu(resv, true))
                                break;
 
-                       i915_gem_retire_requests(pool->engine->i915);
+                       i915_retire_requests(pool->engine->i915);
                        GEM_BUG_ON(i915_gem_object_is_active(obj));
 
                        /*
index 3d75f48..a73340a 100644 (file)
@@ -219,7 +219,7 @@ static int assign_hw_id(struct drm_i915_private *dev_priv, unsigned *out)
                 * Flush any pending retires to hopefully release some
                 * stale contexts and try again.
                 */
-               i915_gem_retire_requests(dev_priv);
+               i915_retire_requests(dev_priv);
                ret = ida_simple_get(&dev_priv->contexts.hw_ida,
                                     0, MAX_CONTEXT_HW_ID, GFP_KERNEL);
                if (ret < 0)
@@ -590,28 +590,28 @@ int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv)
 
        lockdep_assert_held(&dev_priv->drm.struct_mutex);
 
-       i915_gem_retire_requests(dev_priv);
+       i915_retire_requests(dev_priv);
 
        for_each_engine(engine, dev_priv, id) {
-               struct drm_i915_gem_request *req;
+               struct i915_request *rq;
 
                if (engine_has_idle_kernel_context(engine))
                        continue;
 
-               req = i915_gem_request_alloc(engine, dev_priv->kernel_context);
-               if (IS_ERR(req))
-                       return PTR_ERR(req);
+               rq = i915_request_alloc(engine, dev_priv->kernel_context);
+               if (IS_ERR(rq))
+                       return PTR_ERR(rq);
 
                /* Queue this switch after all other activity */
                list_for_each_entry(timeline, &dev_priv->gt.timelines, link) {
-                       struct drm_i915_gem_request *prev;
+                       struct i915_request *prev;
                        struct intel_timeline *tl;
 
                        tl = &timeline->engine[engine->id];
                        prev = i915_gem_active_raw(&tl->last_request,
                                                   &dev_priv->drm.struct_mutex);
                        if (prev)
-                               i915_sw_fence_await_sw_fence_gfp(&req->submit,
+                               i915_sw_fence_await_sw_fence_gfp(&rq->submit,
                                                                 &prev->submit,
                                                                 I915_FENCE_GFP);
                }
@@ -623,7 +623,7 @@ int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv)
                 * but an extra layer of paranoia before we declare the system
                 * idle (on suspend etc) is advisable!
                 */
-               __i915_add_request(req, true);
+               __i915_request_add(rq, true);
        }
 
        return 0;
index a681c5b..7854262 100644 (file)
@@ -38,8 +38,8 @@ struct drm_file;
 
 struct drm_i915_private;
 struct drm_i915_file_private;
-struct drm_i915_gem_request;
 struct i915_hw_ppgtt;
+struct i915_request;
 struct i915_vma;
 struct intel_ring;
 
@@ -276,7 +276,7 @@ int i915_gem_context_open(struct drm_i915_private *i915,
                          struct drm_file *file);
 void i915_gem_context_close(struct drm_file *file);
 
-int i915_switch_context(struct drm_i915_gem_request *req);
+int i915_switch_context(struct i915_request *rq);
 int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv);
 
 void i915_gem_context_release(struct kref *ctx_ref);
index 60ca4f0..54814a1 100644 (file)
@@ -168,7 +168,7 @@ i915_gem_evict_something(struct i915_address_space *vm,
         * retiring.
         */
        if (!(flags & PIN_NONBLOCK))
-               i915_gem_retire_requests(dev_priv);
+               i915_retire_requests(dev_priv);
        else
                phases[1] = NULL;
 
@@ -293,7 +293,7 @@ int i915_gem_evict_for_node(struct i915_address_space *vm,
         * retiring.
         */
        if (!(flags & PIN_NONBLOCK))
-               i915_gem_retire_requests(vm->i915);
+               i915_retire_requests(vm->i915);
 
        check_color = vm->mm.color_adjust;
        if (check_color) {
index 4eb28e8..8c170db 100644 (file)
@@ -200,7 +200,7 @@ struct i915_execbuffer {
        struct i915_gem_context *ctx; /** context for building the request */
        struct i915_address_space *vm; /** GTT and vma for the request */
 
-       struct drm_i915_gem_request *request; /** our request to build */
+       struct i915_request *request; /** our request to build */
        struct i915_vma *batch; /** identity of the batch obj/vma */
 
        /** actual size of execobj[] as we may extend it for the cmdparser */
@@ -227,7 +227,7 @@ struct i915_execbuffer {
                bool has_fence : 1;
                bool needs_unfenced : 1;
 
-               struct drm_i915_gem_request *rq;
+               struct i915_request *rq;
                u32 *rq_cmd;
                unsigned int rq_size;
        } reloc_cache;
@@ -886,7 +886,7 @@ static void reloc_gpu_flush(struct reloc_cache *cache)
        i915_gem_object_unpin_map(cache->rq->batch->obj);
        i915_gem_chipset_flush(cache->rq->i915);
 
-       __i915_add_request(cache->rq, true);
+       __i915_request_add(cache->rq, true);
        cache->rq = NULL;
 }
 
@@ -1070,7 +1070,7 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
 {
        struct reloc_cache *cache = &eb->reloc_cache;
        struct drm_i915_gem_object *obj;
-       struct drm_i915_gem_request *rq;
+       struct i915_request *rq;
        struct i915_vma *batch;
        u32 *cmd;
        int err;
@@ -1103,13 +1103,13 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
        if (err)
                goto err_unmap;
 
-       rq = i915_gem_request_alloc(eb->engine, eb->ctx);
+       rq = i915_request_alloc(eb->engine, eb->ctx);
        if (IS_ERR(rq)) {
                err = PTR_ERR(rq);
                goto err_unpin;
        }
 
-       err = i915_gem_request_await_object(rq, vma->obj, true);
+       err = i915_request_await_object(rq, vma->obj, true);
        if (err)
                goto err_request;
 
@@ -1141,7 +1141,7 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
        return 0;
 
 err_request:
-       i915_add_request(rq);
+       i915_request_add(rq);
 err_unpin:
        i915_vma_unpin(batch);
 err_unmap:
@@ -1727,7 +1727,7 @@ slow:
 }
 
 static void eb_export_fence(struct i915_vma *vma,
-                           struct drm_i915_gem_request *req,
+                           struct i915_request *rq,
                            unsigned int flags)
 {
        struct reservation_object *resv = vma->resv;
@@ -1739,9 +1739,9 @@ static void eb_export_fence(struct i915_vma *vma,
         */
        reservation_object_lock(resv, NULL);
        if (flags & EXEC_OBJECT_WRITE)
-               reservation_object_add_excl_fence(resv, &req->fence);
+               reservation_object_add_excl_fence(resv, &rq->fence);
        else if (reservation_object_reserve_shared(resv) == 0)
-               reservation_object_add_shared_fence(resv, &req->fence);
+               reservation_object_add_shared_fence(resv, &rq->fence);
        reservation_object_unlock(resv);
 }
 
@@ -1757,7 +1757,7 @@ static int eb_move_to_gpu(struct i915_execbuffer *eb)
                struct drm_i915_gem_object *obj = vma->obj;
 
                if (flags & EXEC_OBJECT_CAPTURE) {
-                       struct i915_gem_capture_list *capture;
+                       struct i915_capture_list *capture;
 
                        capture = kmalloc(sizeof(*capture), GFP_KERNEL);
                        if (unlikely(!capture))
@@ -1788,7 +1788,7 @@ static int eb_move_to_gpu(struct i915_execbuffer *eb)
                if (flags & EXEC_OBJECT_ASYNC)
                        continue;
 
-               err = i915_gem_request_await_object
+               err = i915_request_await_object
                        (eb->request, obj, flags & EXEC_OBJECT_WRITE);
                if (err)
                        return err;
@@ -1840,13 +1840,13 @@ static bool i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
 }
 
 void i915_vma_move_to_active(struct i915_vma *vma,
-                            struct drm_i915_gem_request *req,
+                            struct i915_request *rq,
                             unsigned int flags)
 {
        struct drm_i915_gem_object *obj = vma->obj;
-       const unsigned int idx = req->engine->id;
+       const unsigned int idx = rq->engine->id;
 
-       lockdep_assert_held(&req->i915->drm.struct_mutex);
+       lockdep_assert_held(&rq->i915->drm.struct_mutex);
        GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
 
        /*
@@ -1860,7 +1860,7 @@ void i915_vma_move_to_active(struct i915_vma *vma,
        if (!i915_vma_is_active(vma))
                obj->active_count++;
        i915_vma_set_active(vma, idx);
-       i915_gem_active_set(&vma->last_read[idx], req);
+       i915_gem_active_set(&vma->last_read[idx], rq);
        list_move_tail(&vma->vm_link, &vma->vm->active_list);
 
        obj->write_domain = 0;
@@ -1868,27 +1868,27 @@ void i915_vma_move_to_active(struct i915_vma *vma,
                obj->write_domain = I915_GEM_DOMAIN_RENDER;
 
                if (intel_fb_obj_invalidate(obj, ORIGIN_CS))
-                       i915_gem_active_set(&obj->frontbuffer_write, req);
+                       i915_gem_active_set(&obj->frontbuffer_write, rq);
 
                obj->read_domains = 0;
        }
        obj->read_domains |= I915_GEM_GPU_DOMAINS;
 
        if (flags & EXEC_OBJECT_NEEDS_FENCE)
-               i915_gem_active_set(&vma->last_fence, req);
+               i915_gem_active_set(&vma->last_fence, rq);
 }
 
-static int i915_reset_gen7_sol_offsets(struct drm_i915_gem_request *req)
+static int i915_reset_gen7_sol_offsets(struct i915_request *rq)
 {
        u32 *cs;
        int i;
 
-       if (!IS_GEN7(req->i915) || req->engine->id != RCS) {
+       if (!IS_GEN7(rq->i915) || rq->engine->id != RCS) {
                DRM_DEBUG("sol reset is gen7/rcs only\n");
                return -EINVAL;
        }
 
-       cs = intel_ring_begin(req, 4 * 2 + 2);
+       cs = intel_ring_begin(rq, 4 * 2 + 2);
        if (IS_ERR(cs))
                return PTR_ERR(cs);
 
@@ -1898,7 +1898,7 @@ static int i915_reset_gen7_sol_offsets(struct drm_i915_gem_request *req)
                *cs++ = 0;
        }
        *cs++ = MI_NOOP;
-       intel_ring_advance(req, cs);
+       intel_ring_advance(rq, cs);
 
        return 0;
 }
@@ -1944,10 +1944,10 @@ out:
 }
 
 static void
-add_to_client(struct drm_i915_gem_request *req, struct drm_file *file)
+add_to_client(struct i915_request *rq, struct drm_file *file)
 {
-       req->file_priv = file->driver_priv;
-       list_add_tail(&req->client_link, &req->file_priv->mm.request_list);
+       rq->file_priv = file->driver_priv;
+       list_add_tail(&rq->client_link, &rq->file_priv->mm.request_list);
 }
 
 static int eb_submit(struct i915_execbuffer *eb)
@@ -2151,7 +2151,7 @@ await_fence_array(struct i915_execbuffer *eb,
                if (!fence)
                        return -EINVAL;
 
-               err = i915_gem_request_await_dma_fence(eb->request, fence);
+               err = i915_request_await_dma_fence(eb->request, fence);
                dma_fence_put(fence);
                if (err < 0)
                        return err;
@@ -2365,14 +2365,14 @@ i915_gem_do_execbuffer(struct drm_device *dev,
        GEM_BUG_ON(eb.reloc_cache.rq);
 
        /* Allocate a request for this batch buffer nice and early. */
-       eb.request = i915_gem_request_alloc(eb.engine, eb.ctx);
+       eb.request = i915_request_alloc(eb.engine, eb.ctx);
        if (IS_ERR(eb.request)) {
                err = PTR_ERR(eb.request);
                goto err_batch_unpin;
        }
 
        if (in_fence) {
-               err = i915_gem_request_await_dma_fence(eb.request, in_fence);
+               err = i915_request_await_dma_fence(eb.request, in_fence);
                if (err < 0)
                        goto err_request;
        }
@@ -2400,10 +2400,10 @@ i915_gem_do_execbuffer(struct drm_device *dev,
         */
        eb.request->batch = eb.batch;
 
-       trace_i915_gem_request_queue(eb.request, eb.batch_flags);
+       trace_i915_request_queue(eb.request, eb.batch_flags);
        err = eb_submit(&eb);
 err_request:
-       __i915_add_request(eb.request, err == 0);
+       __i915_request_add(eb.request, err == 0);
        add_to_client(eb.request, file);
 
        if (fences)
index cd59842..21d72f6 100644 (file)
@@ -765,16 +765,16 @@ static void gen8_initialize_pml4(struct i915_address_space *vm,
 }
 
 /* Broadwell Page Directory Pointer Descriptors */
-static int gen8_write_pdp(struct drm_i915_gem_request *req,
+static int gen8_write_pdp(struct i915_request *rq,
                          unsigned entry,
                          dma_addr_t addr)
 {
-       struct intel_engine_cs *engine = req->engine;
+       struct intel_engine_cs *engine = rq->engine;
        u32 *cs;
 
        BUG_ON(entry >= 4);
 
-       cs = intel_ring_begin(req, 6);
+       cs = intel_ring_begin(rq, 6);
        if (IS_ERR(cs))
                return PTR_ERR(cs);
 
@@ -784,20 +784,20 @@ static int gen8_write_pdp(struct drm_i915_gem_request *req,
        *cs++ = MI_LOAD_REGISTER_IMM(1);
        *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(engine, entry));
        *cs++ = lower_32_bits(addr);
-       intel_ring_advance(req, cs);
+       intel_ring_advance(rq, cs);
 
        return 0;
 }
 
 static int gen8_mm_switch_3lvl(struct i915_hw_ppgtt *ppgtt,
-                              struct drm_i915_gem_request *req)
+                              struct i915_request *rq)
 {
        int i, ret;
 
        for (i = GEN8_3LVL_PDPES - 1; i >= 0; i--) {
                const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
 
-               ret = gen8_write_pdp(req, i, pd_daddr);
+               ret = gen8_write_pdp(rq, i, pd_daddr);
                if (ret)
                        return ret;
        }
@@ -806,9 +806,9 @@ static int gen8_mm_switch_3lvl(struct i915_hw_ppgtt *ppgtt,
 }
 
 static int gen8_mm_switch_4lvl(struct i915_hw_ppgtt *ppgtt,
-                              struct drm_i915_gem_request *req)
+                              struct i915_request *rq)
 {
-       return gen8_write_pdp(req, 0, px_dma(&ppgtt->pml4));
+       return gen8_write_pdp(rq, 0, px_dma(&ppgtt->pml4));
 }
 
 /* PDE TLBs are a pain to invalidate on GEN8+. When we modify
@@ -1732,13 +1732,13 @@ static inline u32 get_pd_offset(struct i915_hw_ppgtt *ppgtt)
 }
 
 static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
-                        struct drm_i915_gem_request *req)
+                        struct i915_request *rq)
 {
-       struct intel_engine_cs *engine = req->engine;
+       struct intel_engine_cs *engine = rq->engine;
        u32 *cs;
 
        /* NB: TLBs must be flushed and invalidated before a switch */
-       cs = intel_ring_begin(req, 6);
+       cs = intel_ring_begin(rq, 6);
        if (IS_ERR(cs))
                return PTR_ERR(cs);
 
@@ -1748,19 +1748,19 @@ static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
        *cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine));
        *cs++ = get_pd_offset(ppgtt);
        *cs++ = MI_NOOP;
-       intel_ring_advance(req, cs);
+       intel_ring_advance(rq, cs);
 
        return 0;
 }
 
 static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
-                         struct drm_i915_gem_request *req)
+                         struct i915_request *rq)
 {
-       struct intel_engine_cs *engine = req->engine;
+       struct intel_engine_cs *engine = rq->engine;
        u32 *cs;
 
        /* NB: TLBs must be flushed and invalidated before a switch */
-       cs = intel_ring_begin(req, 6);
+       cs = intel_ring_begin(rq, 6);
        if (IS_ERR(cs))
                return PTR_ERR(cs);
 
@@ -1770,16 +1770,16 @@ static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
        *cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine));
        *cs++ = get_pd_offset(ppgtt);
        *cs++ = MI_NOOP;
-       intel_ring_advance(req, cs);
+       intel_ring_advance(rq, cs);
 
        return 0;
 }
 
 static int gen6_mm_switch(struct i915_hw_ppgtt *ppgtt,
-                         struct drm_i915_gem_request *req)
+                         struct i915_request *rq)
 {
-       struct intel_engine_cs *engine = req->engine;
-       struct drm_i915_private *dev_priv = req->i915;
+       struct intel_engine_cs *engine = rq->engine;
+       struct drm_i915_private *dev_priv = rq->i915;
 
        I915_WRITE(RING_PP_DIR_DCLV(engine), PP_DIR_DCLV_2G);
        I915_WRITE(RING_PP_DIR_BASE(engine), get_pd_offset(ppgtt));
index a42890d..6efc017 100644 (file)
@@ -39,7 +39,8 @@
 #include <linux/pagevec.h>
 
 #include "i915_gem_timeline.h"
-#include "i915_gem_request.h"
+
+#include "i915_request.h"
 #include "i915_selftest.h"
 
 #define I915_GTT_PAGE_SIZE_4K BIT(12)
@@ -398,7 +399,7 @@ struct i915_hw_ppgtt {
        gen6_pte_t __iomem *pd_addr;
 
        int (*switch_mm)(struct i915_hw_ppgtt *ppgtt,
-                        struct drm_i915_gem_request *req);
+                        struct i915_request *rq);
        void (*debug_dump)(struct i915_hw_ppgtt *ppgtt, struct seq_file *m);
 };
 
index ca2b3b6..54f00b3 100644 (file)
@@ -33,7 +33,7 @@
 
 #include <drm/i915_drm.h>
 
-#include "i915_gem_request.h"
+#include "i915_request.h"
 #include "i915_selftest.h"
 
 struct drm_i915_gem_object;
index f7fc0df..1036e86 100644 (file)
@@ -177,7 +177,7 @@ err:
 
 #undef OUT_BATCH
 
-int i915_gem_render_state_emit(struct drm_i915_gem_request *rq)
+int i915_gem_render_state_emit(struct i915_request *rq)
 {
        struct intel_engine_cs *engine = rq->engine;
        struct intel_render_state so = {}; /* keep the compiler happy */
index 8636952..112cda8 100644 (file)
@@ -24,8 +24,8 @@
 #ifndef _I915_GEM_RENDER_STATE_H_
 #define _I915_GEM_RENDER_STATE_H_
 
-struct drm_i915_gem_request;
+struct i915_request;
 
-int i915_gem_render_state_emit(struct drm_i915_gem_request *rq);
+int i915_gem_render_state_emit(struct i915_request *rq);
 
 #endif /* _I915_GEM_RENDER_STATE_H_ */
index 0e158f9..5757fb7 100644 (file)
@@ -175,7 +175,7 @@ i915_gem_shrink(struct drm_i915_private *i915,
                i915_gem_wait_for_idle(i915, I915_WAIT_LOCKED);
 
        trace_i915_gem_shrink(i915, target, flags);
-       i915_gem_retire_requests(i915);
+       i915_retire_requests(i915);
 
        /*
         * Unbinding of objects will require HW access; Let us not wake the
@@ -267,7 +267,7 @@ i915_gem_shrink(struct drm_i915_private *i915,
        if (flags & I915_SHRINK_BOUND)
                intel_runtime_pm_put(i915);
 
-       i915_gem_retire_requests(i915);
+       i915_retire_requests(i915);
 
        shrinker_unlock(i915, unlock);
 
index b5a2240..33e01bf 100644 (file)
@@ -27,9 +27,9 @@
 
 #include <linux/list.h>
 
-#include "i915_utils.h"
-#include "i915_gem_request.h"
+#include "i915_request.h"
 #include "i915_syncmap.h"
+#include "i915_utils.h"
 
 struct i915_gem_timeline;
 
index 65c0bef..a7933c9 100644 (file)
@@ -991,7 +991,7 @@ out:
 static inline uint32_t
 __active_get_seqno(struct i915_gem_active *active)
 {
-       struct drm_i915_gem_request *request;
+       struct i915_request *request;
 
        request = __i915_gem_active_peek(active);
        return request ? request->global_seqno : 0;
@@ -1000,7 +1000,7 @@ __active_get_seqno(struct i915_gem_active *active)
 static inline int
 __active_get_engine_id(struct i915_gem_active *active)
 {
-       struct drm_i915_gem_request *request;
+       struct i915_request *request;
 
        request = __i915_gem_active_peek(active);
        return request ? request->engine->id : -1;
@@ -1293,7 +1293,7 @@ static void error_record_engine_registers(struct i915_gpu_state *error,
        }
 }
 
-static void record_request(struct drm_i915_gem_request *request,
+static void record_request(struct i915_request *request,
                           struct drm_i915_error_request *erq)
 {
        erq->context = request->ctx->hw_id;
@@ -1310,10 +1310,10 @@ static void record_request(struct drm_i915_gem_request *request,
 }
 
 static void engine_record_requests(struct intel_engine_cs *engine,
-                                  struct drm_i915_gem_request *first,
+                                  struct i915_request *first,
                                   struct drm_i915_error_engine *ee)
 {
-       struct drm_i915_gem_request *request;
+       struct i915_request *request;
        int count;
 
        count = 0;
@@ -1363,7 +1363,7 @@ static void error_record_engine_execlists(struct intel_engine_cs *engine,
        unsigned int n;
 
        for (n = 0; n < execlists_num_ports(execlists); n++) {
-               struct drm_i915_gem_request *rq = port_request(&execlists->port[n]);
+               struct i915_request *rq = port_request(&execlists->port[n]);
 
                if (!rq)
                        break;
@@ -1398,10 +1398,10 @@ static void record_context(struct drm_i915_error_context *e,
        e->active = atomic_read(&ctx->active_count);
 }
 
-static void request_record_user_bo(struct drm_i915_gem_request *request,
+static void request_record_user_bo(struct i915_request *request,
                                   struct drm_i915_error_engine *ee)
 {
-       struct i915_gem_capture_list *c;
+       struct i915_capture_list *c;
        struct drm_i915_error_object **bo;
        long count;
 
@@ -1454,7 +1454,7 @@ static void i915_gem_record_rings(struct drm_i915_private *dev_priv,
        for (i = 0; i < I915_NUM_ENGINES; i++) {
                struct intel_engine_cs *engine = dev_priv->engine[i];
                struct drm_i915_error_engine *ee = &error->engine[i];
-               struct drm_i915_gem_request *request;
+               struct i915_request *request;
 
                ee->engine_id = -1;
 
index 17de6ce..0a7ed99 100644 (file)
@@ -1071,7 +1071,7 @@ static void ironlake_rps_change_irq_handler(struct drm_i915_private *dev_priv)
 
 static void notify_ring(struct intel_engine_cs *engine)
 {
-       struct drm_i915_gem_request *rq = NULL;
+       struct i915_request *rq = NULL;
        struct intel_wait *wait;
 
        if (!engine->breadcrumbs.irq_armed)
@@ -1098,13 +1098,13 @@ static void notify_ring(struct intel_engine_cs *engine)
                 */
                if (i915_seqno_passed(intel_engine_get_seqno(engine),
                                      wait->seqno)) {
-                       struct drm_i915_gem_request *waiter = wait->request;
+                       struct i915_request *waiter = wait->request;
 
                        wakeup = true;
                        if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
                                      &waiter->fence.flags) &&
                            intel_wait_check_request(wait, waiter))
-                               rq = i915_gem_request_get(waiter);
+                               rq = i915_request_get(waiter);
                }
 
                if (wakeup)
@@ -1117,7 +1117,7 @@ static void notify_ring(struct intel_engine_cs *engine)
 
        if (rq) {
                dma_fence_signal(&rq->fence);
-               i915_gem_request_put(rq);
+               i915_request_put(rq);
        }
 
        trace_intel_engine_notify(engine, wait);
index 1eaabf2..26e8f5c 100644 (file)
@@ -664,6 +664,7 @@ static const struct pci_device_id pciidlist[] = {
        INTEL_CFL_U_GT2_IDS(&intel_coffeelake_gt2_info),
        INTEL_CFL_U_GT3_IDS(&intel_coffeelake_gt3_info),
        INTEL_CNL_IDS(&intel_cannonlake_info),
+       INTEL_ICL_11_IDS(&intel_icelake_11_info),
        {0, 0, 0}
 };
 MODULE_DEVICE_TABLE(pci, pciidlist);
index 0be50e4..2741b1b 100644 (file)
@@ -1630,10 +1630,10 @@ static void gen8_update_reg_state_unlocked(struct i915_gem_context *ctx,
  * Same as gen8_update_reg_state_unlocked only through the batchbuffer. This
  * is only used by the kernel context.
  */
-static int gen8_emit_oa_config(struct drm_i915_gem_request *req,
+static int gen8_emit_oa_config(struct i915_request *rq,
                               const struct i915_oa_config *oa_config)
 {
-       struct drm_i915_private *dev_priv = req->i915;
+       struct drm_i915_private *dev_priv = rq->i915;
        /* The MMIO offsets for Flex EU registers aren't contiguous */
        u32 flex_mmio[] = {
                i915_mmio_reg_offset(EU_PERF_CNTL0),
@@ -1647,7 +1647,7 @@ static int gen8_emit_oa_config(struct drm_i915_gem_request *req,
        u32 *cs;
        int i;
 
-       cs = intel_ring_begin(req, ARRAY_SIZE(flex_mmio) * 2 + 4);
+       cs = intel_ring_begin(rq, ARRAY_SIZE(flex_mmio) * 2 + 4);
        if (IS_ERR(cs))
                return PTR_ERR(cs);
 
@@ -1685,7 +1685,7 @@ static int gen8_emit_oa_config(struct drm_i915_gem_request *req,
        }
 
        *cs++ = MI_NOOP;
-       intel_ring_advance(req, cs);
+       intel_ring_advance(rq, cs);
 
        return 0;
 }
@@ -1695,38 +1695,38 @@ static int gen8_switch_to_updated_kernel_context(struct drm_i915_private *dev_pr
 {
        struct intel_engine_cs *engine = dev_priv->engine[RCS];
        struct i915_gem_timeline *timeline;
-       struct drm_i915_gem_request *req;
+       struct i915_request *rq;
        int ret;
 
        lockdep_assert_held(&dev_priv->drm.struct_mutex);
 
-       i915_gem_retire_requests(dev_priv);
+       i915_retire_requests(dev_priv);
 
-       req = i915_gem_request_alloc(engine, dev_priv->kernel_context);
-       if (IS_ERR(req))
-               return PTR_ERR(req);
+       rq = i915_request_alloc(engine, dev_priv->kernel_context);
+       if (IS_ERR(rq))
+               return PTR_ERR(rq);
 
-       ret = gen8_emit_oa_config(req, oa_config);
+       ret = gen8_emit_oa_config(rq, oa_config);
        if (ret) {
-               i915_add_request(req);
+               i915_request_add(rq);
                return ret;
        }
 
        /* Queue this switch after all other activity */
        list_for_each_entry(timeline, &dev_priv->gt.timelines, link) {
-               struct drm_i915_gem_request *prev;
+               struct i915_request *prev;
                struct intel_timeline *tl;
 
                tl = &timeline->engine[engine->id];
                prev = i915_gem_active_raw(&tl->last_request,
                                           &dev_priv->drm.struct_mutex);
                if (prev)
-                       i915_sw_fence_await_sw_fence_gfp(&req->submit,
+                       i915_sw_fence_await_sw_fence_gfp(&rq->submit,
                                                         &prev->submit,
                                                         GFP_KERNEL);
        }
 
-       i915_add_request(req);
+       i915_request_add(rq);
 
        return 0;
 }
index 1412abc..eea5b2c 100644 (file)
@@ -2807,6 +2807,13 @@ enum i915_power_well_id {
 #define GEN9_RCS_FE_FSM2 _MMIO(0x22a4)
 
 /* Fuse readout registers for GT */
+#define HSW_PAVP_FUSE1                 _MMIO(0x911C)
+#define   HSW_F1_EU_DIS_SHIFT          16
+#define   HSW_F1_EU_DIS_MASK           (0x3 << HSW_F1_EU_DIS_SHIFT)
+#define   HSW_F1_EU_DIS_10EUS          0
+#define   HSW_F1_EU_DIS_8EUS           1
+#define   HSW_F1_EU_DIS_6EUS           2
+
 #define CHV_FUSE_GT                    _MMIO(VLV_DISPLAY_BASE + 0x2168)
 #define   CHV_FGT_DISABLE_SS0          (1 << 10)
 #define   CHV_FGT_DISABLE_SS1          (1 << 11)
@@ -5347,8 +5354,8 @@ enum {
 #define _DPF_AUX_CH_DATA4      (dev_priv->info.display_mmio_offset + 0x64520)
 #define _DPF_AUX_CH_DATA5      (dev_priv->info.display_mmio_offset + 0x64524)
 
-#define DP_AUX_CH_CTL(port)    _MMIO_PORT(port, _DPA_AUX_CH_CTL, _DPB_AUX_CH_CTL)
-#define DP_AUX_CH_DATA(port, i)        _MMIO(_PORT(port, _DPA_AUX_CH_DATA1, _DPB_AUX_CH_DATA1) + (i) * 4) /* 5 registers */
+#define DP_AUX_CH_CTL(aux_ch)  _MMIO_PORT(aux_ch, _DPA_AUX_CH_CTL, _DPB_AUX_CH_CTL)
+#define DP_AUX_CH_DATA(aux_ch, i)      _MMIO(_PORT(aux_ch, _DPA_AUX_CH_DATA1, _DPB_AUX_CH_DATA1) + (i) * 4) /* 5 registers */
 
 #define   DP_AUX_CH_CTL_SEND_BUSY          (1 << 31)
 #define   DP_AUX_CH_CTL_DONE               (1 << 30)
@@ -7875,8 +7882,8 @@ enum {
 #define _PCH_DPD_AUX_CH_DATA4  0xe4320
 #define _PCH_DPD_AUX_CH_DATA5  0xe4324
 
-#define PCH_DP_AUX_CH_CTL(port)                _MMIO_PORT((port) - PORT_B, _PCH_DPB_AUX_CH_CTL, _PCH_DPC_AUX_CH_CTL)
-#define PCH_DP_AUX_CH_DATA(port, i)    _MMIO(_PORT((port) - PORT_B, _PCH_DPB_AUX_CH_DATA1, _PCH_DPC_AUX_CH_DATA1) + (i) * 4) /* 5 registers */
+#define PCH_DP_AUX_CH_CTL(aux_ch)              _MMIO_PORT((aux_ch) - AUX_CH_B, _PCH_DPB_AUX_CH_CTL, _PCH_DPC_AUX_CH_CTL)
+#define PCH_DP_AUX_CH_DATA(aux_ch, i)  _MMIO(_PORT((aux_ch) - AUX_CH_B, _PCH_DPB_AUX_CH_DATA1, _PCH_DPC_AUX_CH_DATA1) + (i) * 4) /* 5 registers */
 
 /* CPT */
 #define  PORT_TRANS_A_SEL_CPT  0
similarity index 79%
rename from drivers/gpu/drm/i915/i915_gem_request.c
rename to drivers/gpu/drm/i915/i915_request.c
index 8bc7c50..2265bb8 100644 (file)
@@ -37,7 +37,8 @@ static const char *i915_fence_get_driver_name(struct dma_fence *fence)
 
 static const char *i915_fence_get_timeline_name(struct dma_fence *fence)
 {
-       /* The timeline struct (as part of the ppgtt underneath a context)
+       /*
+        * The timeline struct (as part of the ppgtt underneath a context)
         * may be freed when the request is no longer in use by the GPU.
         * We could extend the life of a context to beyond that of all
         * fences, possibly keeping the hw resource around indefinitely,
@@ -53,7 +54,7 @@ static const char *i915_fence_get_timeline_name(struct dma_fence *fence)
 
 static bool i915_fence_signaled(struct dma_fence *fence)
 {
-       return i915_gem_request_completed(to_request(fence));
+       return i915_request_completed(to_request(fence));
 }
 
 static bool i915_fence_enable_signaling(struct dma_fence *fence)
@@ -69,22 +70,23 @@ static signed long i915_fence_wait(struct dma_fence *fence,
                                   bool interruptible,
                                   signed long timeout)
 {
-       return i915_wait_request(to_request(fence), interruptible, timeout);
+       return i915_request_wait(to_request(fence), interruptible, timeout);
 }
 
 static void i915_fence_release(struct dma_fence *fence)
 {
-       struct drm_i915_gem_request *req = to_request(fence);
+       struct i915_request *rq = to_request(fence);
 
-       /* The request is put onto a RCU freelist (i.e. the address
+       /*
+        * The request is put onto a RCU freelist (i.e. the address
         * is immediately reused), mark the fences as being freed now.
         * Otherwise the debugobjects for the fences are only marked as
         * freed when the slab cache itself is freed, and so we would get
         * caught trying to reuse dead objects.
         */
-       i915_sw_fence_fini(&req->submit);
+       i915_sw_fence_fini(&rq->submit);
 
-       kmem_cache_free(req->i915->requests, req);
+       kmem_cache_free(rq->i915->requests, rq);
 }
 
 const struct dma_fence_ops i915_fence_ops = {
@@ -97,7 +99,7 @@ const struct dma_fence_ops i915_fence_ops = {
 };
 
 static inline void
-i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
+i915_request_remove_from_client(struct i915_request *request)
 {
        struct drm_i915_file_private *file_priv;
 
@@ -238,17 +240,15 @@ static int reset_all_global_seqno(struct drm_i915_private *i915, u32 seqno)
 
 int i915_gem_set_global_seqno(struct drm_device *dev, u32 seqno)
 {
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *i915 = to_i915(dev);
 
-       lockdep_assert_held(&dev_priv->drm.struct_mutex);
+       lockdep_assert_held(&i915->drm.struct_mutex);
 
        if (seqno == 0)
                return -EINVAL;
 
-       /* HWS page needs to be set less than what we
-        * will inject to ring
-        */
-       return reset_all_global_seqno(dev_priv, seqno - 1);
+       /* HWS page needs to be set less than what we will inject to ring */
+       return reset_all_global_seqno(i915, seqno - 1);
 }
 
 static void mark_busy(struct drm_i915_private *i915)
@@ -331,16 +331,17 @@ static void unreserve_engine(struct intel_engine_cs *engine)
 }
 
 void i915_gem_retire_noop(struct i915_gem_active *active,
-                         struct drm_i915_gem_request *request)
+                         struct i915_request *request)
 {
        /* Space left intentionally blank */
 }
 
-static void advance_ring(struct drm_i915_gem_request *request)
+static void advance_ring(struct i915_request *request)
 {
        unsigned int tail;
 
-       /* We know the GPU must have read the request to have
+       /*
+        * We know the GPU must have read the request to have
         * sent us the seqno + interrupt, so use the position
         * of tail of the request to update the last known position
         * of the GPU head.
@@ -349,7 +350,8 @@ static void advance_ring(struct drm_i915_gem_request *request)
         * completion order.
         */
        if (list_is_last(&request->ring_link, &request->ring->request_list)) {
-               /* We may race here with execlists resubmitting this request
+               /*
+                * We may race here with execlists resubmitting this request
                 * as we retire it. The resubmission will move the ring->tail
                 * forwards (to request->wa_tail). We either read the
                 * current value that was written to hw, or the value that
@@ -365,30 +367,30 @@ static void advance_ring(struct drm_i915_gem_request *request)
        request->ring->head = tail;
 }
 
-static void free_capture_list(struct drm_i915_gem_request *request)
+static void free_capture_list(struct i915_request *request)
 {
-       struct i915_gem_capture_list *capture;
+       struct i915_capture_list *capture;
 
        capture = request->capture_list;
        while (capture) {
-               struct i915_gem_capture_list *next = capture->next;
+               struct i915_capture_list *next = capture->next;
 
                kfree(capture);
                capture = next;
        }
 }
 
-static void i915_gem_request_retire(struct drm_i915_gem_request *request)
+static void i915_request_retire(struct i915_request *request)
 {
        struct intel_engine_cs *engine = request->engine;
        struct i915_gem_active *active, *next;
 
        lockdep_assert_held(&request->i915->drm.struct_mutex);
        GEM_BUG_ON(!i915_sw_fence_signaled(&request->submit));
-       GEM_BUG_ON(!i915_gem_request_completed(request));
+       GEM_BUG_ON(!i915_request_completed(request));
        GEM_BUG_ON(!request->i915->gt.active_requests);
 
-       trace_i915_gem_request_retire(request);
+       trace_i915_request_retire(request);
 
        spin_lock_irq(&engine->timeline->lock);
        list_del_init(&request->link);
@@ -399,7 +401,8 @@ static void i915_gem_request_retire(struct drm_i915_gem_request *request)
 
        free_capture_list(request);
 
-       /* Walk through the active list, calling retire on each. This allows
+       /*
+        * Walk through the active list, calling retire on each. This allows
         * objects to track their GPU activity and mark themselves as idle
         * when their *last* active request is completed (updating state
         * tracking lists for eviction, active references for GEM, etc).
@@ -409,7 +412,8 @@ static void i915_gem_request_retire(struct drm_i915_gem_request *request)
         * the node after the callback).
         */
        list_for_each_entry_safe(active, next, &request->active_list, link) {
-               /* In microbenchmarks or focusing upon time inside the kernel,
+               /*
+                * In microbenchmarks or focusing upon time inside the kernel,
                 * we may spend an inordinate amount of time simply handling
                 * the retirement of requests and processing their callbacks.
                 * Of which, this loop itself is particularly hot due to the
@@ -426,15 +430,16 @@ static void i915_gem_request_retire(struct drm_i915_gem_request *request)
                active->retire(active, request);
        }
 
-       i915_gem_request_remove_from_client(request);
+       i915_request_remove_from_client(request);
 
        /* Retirement decays the ban score as it is a sign of ctx progress */
        atomic_dec_if_positive(&request->ctx->ban_score);
 
-       /* The backing object for the context is done after switching to the
+       /*
+        * The backing object for the context is done after switching to the
         * *next* context. Therefore we cannot retire the previous context until
         * the next context has already started running. However, since we
-        * cannot take the required locks at i915_gem_request_submit() we
+        * cannot take the required locks at i915_request_submit() we
         * defer the unpinning of the active context to now, retirement of
         * the subsequent request.
         */
@@ -454,26 +459,26 @@ static void i915_gem_request_retire(struct drm_i915_gem_request *request)
        spin_unlock_irq(&request->lock);
 
        i915_priotree_fini(request->i915, &request->priotree);
-       i915_gem_request_put(request);
+       i915_request_put(request);
 }
 
-void i915_gem_request_retire_upto(struct drm_i915_gem_request *req)
+void i915_request_retire_upto(struct i915_request *rq)
 {
-       struct intel_engine_cs *engine = req->engine;
-       struct drm_i915_gem_request *tmp;
+       struct intel_engine_cs *engine = rq->engine;
+       struct i915_request *tmp;
 
-       lockdep_assert_held(&req->i915->drm.struct_mutex);
-       GEM_BUG_ON(!i915_gem_request_completed(req));
+       lockdep_assert_held(&rq->i915->drm.struct_mutex);
+       GEM_BUG_ON(!i915_request_completed(rq));
 
-       if (list_empty(&req->link))
+       if (list_empty(&rq->link))
                return;
 
        do {
                tmp = list_first_entry(&engine->timeline->requests,
                                       typeof(*tmp), link);
 
-               i915_gem_request_retire(tmp);
-       } while (tmp != req);
+               i915_request_retire(tmp);
+       } while (tmp != rq);
 }
 
 static u32 timeline_get_seqno(struct intel_timeline *tl)
@@ -481,7 +486,7 @@ static u32 timeline_get_seqno(struct intel_timeline *tl)
        return ++tl->seqno;
 }
 
-void __i915_gem_request_submit(struct drm_i915_gem_request *request)
+void __i915_request_submit(struct i915_request *request)
 {
        struct intel_engine_cs *engine = request->engine;
        struct intel_timeline *timeline;
@@ -490,8 +495,6 @@ void __i915_gem_request_submit(struct drm_i915_gem_request *request)
        GEM_BUG_ON(!irqs_disabled());
        lockdep_assert_held(&engine->timeline->lock);
 
-       trace_i915_gem_request_execute(request);
-
        /* Transfer from per-context onto the global per-engine timeline */
        timeline = engine->timeline;
        GEM_BUG_ON(timeline == request->timeline);
@@ -515,10 +518,12 @@ void __i915_gem_request_submit(struct drm_i915_gem_request *request)
        list_move_tail(&request->link, &timeline->requests);
        spin_unlock(&request->timeline->lock);
 
+       trace_i915_request_execute(request);
+
        wake_up_all(&request->execute);
 }
 
-void i915_gem_request_submit(struct drm_i915_gem_request *request)
+void i915_request_submit(struct i915_request *request)
 {
        struct intel_engine_cs *engine = request->engine;
        unsigned long flags;
@@ -526,12 +531,12 @@ void i915_gem_request_submit(struct drm_i915_gem_request *request)
        /* Will be called from irq-context when using foreign fences. */
        spin_lock_irqsave(&engine->timeline->lock, flags);
 
-       __i915_gem_request_submit(request);
+       __i915_request_submit(request);
 
        spin_unlock_irqrestore(&engine->timeline->lock, flags);
 }
 
-void __i915_gem_request_unsubmit(struct drm_i915_gem_request *request)
+void __i915_request_unsubmit(struct i915_request *request)
 {
        struct intel_engine_cs *engine = request->engine;
        struct intel_timeline *timeline;
@@ -539,7 +544,8 @@ void __i915_gem_request_unsubmit(struct drm_i915_gem_request *request)
        GEM_BUG_ON(!irqs_disabled());
        lockdep_assert_held(&engine->timeline->lock);
 
-       /* Only unwind in reverse order, required so that the per-context list
+       /*
+        * Only unwind in reverse order, required so that the per-context list
         * is kept in seqno/ring order.
         */
        GEM_BUG_ON(!request->global_seqno);
@@ -563,15 +569,16 @@ void __i915_gem_request_unsubmit(struct drm_i915_gem_request *request)
        list_move(&request->link, &timeline->requests);
        spin_unlock(&timeline->lock);
 
-       /* We don't need to wake_up any waiters on request->execute, they
+       /*
+        * We don't need to wake_up any waiters on request->execute, they
         * will get woken by any other event or us re-adding this request
-        * to the engine timeline (__i915_gem_request_submit()). The waiters
+        * to the engine timeline (__i915_request_submit()). The waiters
         * should be quite adapt at finding that the request now has a new
         * global_seqno to the one they went to sleep on.
         */
 }
 
-void i915_gem_request_unsubmit(struct drm_i915_gem_request *request)
+void i915_request_unsubmit(struct i915_request *request)
 {
        struct intel_engine_cs *engine = request->engine;
        unsigned long flags;
@@ -579,7 +586,7 @@ void i915_gem_request_unsubmit(struct drm_i915_gem_request *request)
        /* Will be called from irq-context when using foreign fences. */
        spin_lock_irqsave(&engine->timeline->lock, flags);
 
-       __i915_gem_request_unsubmit(request);
+       __i915_request_unsubmit(request);
 
        spin_unlock_irqrestore(&engine->timeline->lock, flags);
 }
@@ -587,18 +594,19 @@ void i915_gem_request_unsubmit(struct drm_i915_gem_request *request)
 static int __i915_sw_fence_call
 submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
 {
-       struct drm_i915_gem_request *request =
+       struct i915_request *request =
                container_of(fence, typeof(*request), submit);
 
        switch (state) {
        case FENCE_COMPLETE:
-               trace_i915_gem_request_submit(request);
+               trace_i915_request_submit(request);
                /*
-                * We need to serialize use of the submit_request() callback with its
-                * hotplugging performed during an emergency i915_gem_set_wedged().
-                * We use the RCU mechanism to mark the critical section in order to
-                * force i915_gem_set_wedged() to wait until the submit_request() is
-                * completed before proceeding.
+                * We need to serialize use of the submit_request() callback
+                * with its hotplugging performed during an emergency
+                * i915_gem_set_wedged().  We use the RCU mechanism to mark the
+                * critical section in order to force i915_gem_set_wedged() to
+                * wait until the submit_request() is completed before
+                * proceeding.
                 */
                rcu_read_lock();
                request->engine->submit_request(request);
@@ -606,7 +614,7 @@ submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
                break;
 
        case FENCE_FREE:
-               i915_gem_request_put(request);
+               i915_request_put(request);
                break;
        }
 
@@ -614,7 +622,7 @@ submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
 }
 
 /**
- * i915_gem_request_alloc - allocate a request structure
+ * i915_request_alloc - allocate a request structure
  *
  * @engine: engine that we wish to issue the request on.
  * @ctx: context that the request will be associated with.
@@ -622,31 +630,32 @@ submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
  * Returns a pointer to the allocated request if successful,
  * or an error code if not.
  */
-struct drm_i915_gem_request *
-i915_gem_request_alloc(struct intel_engine_cs *engine,
-                      struct i915_gem_context *ctx)
+struct i915_request *
+i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
 {
-       struct drm_i915_private *dev_priv = engine->i915;
-       struct drm_i915_gem_request *req;
+       struct drm_i915_private *i915 = engine->i915;
+       struct i915_request *rq;
        struct intel_ring *ring;
        int ret;
 
-       lockdep_assert_held(&dev_priv->drm.struct_mutex);
+       lockdep_assert_held(&i915->drm.struct_mutex);
 
        /*
         * Preempt contexts are reserved for exclusive use to inject a
         * preemption context switch. They are never to be used for any trivial
         * request!
         */
-       GEM_BUG_ON(ctx == dev_priv->preempt_context);
+       GEM_BUG_ON(ctx == i915->preempt_context);
 
-       /* ABI: Before userspace accesses the GPU (e.g. execbuffer), report
+       /*
+        * ABI: Before userspace accesses the GPU (e.g. execbuffer), report
         * EIO if the GPU is already wedged.
         */
-       if (i915_terminally_wedged(&dev_priv->gpu_error))
+       if (i915_terminally_wedged(&i915->gpu_error))
                return ERR_PTR(-EIO);
 
-       /* Pinning the contexts may generate requests in order to acquire
+       /*
+        * Pinning the contexts may generate requests in order to acquire
         * GGTT space, so do this first before we reserve a seqno for
         * ourselves.
         */
@@ -664,12 +673,13 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
                goto err_unreserve;
 
        /* Move the oldest request to the slab-cache (if not in use!) */
-       req = list_first_entry_or_null(&engine->timeline->requests,
-                                      typeof(*req), link);
-       if (req && i915_gem_request_completed(req))
-               i915_gem_request_retire(req);
+       rq = list_first_entry_or_null(&engine->timeline->requests,
+                                     typeof(*rq), link);
+       if (rq && i915_request_completed(rq))
+               i915_request_retire(rq);
 
-       /* Beware: Dragons be flying overhead.
+       /*
+        * Beware: Dragons be flying overhead.
         *
         * We use RCU to look up requests in flight. The lookups may
         * race with the request being allocated from the slab freelist.
@@ -697,11 +707,11 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
         *
         * Do not use kmem_cache_zalloc() here!
         */
-       req = kmem_cache_alloc(dev_priv->requests,
-                              GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
-       if (unlikely(!req)) {
+       rq = kmem_cache_alloc(i915->requests,
+                             GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
+       if (unlikely(!rq)) {
                /* Ratelimit ourselves to prevent oom from malicious clients */
-               ret = i915_gem_wait_for_idle(dev_priv,
+               ret = i915_gem_wait_for_idle(i915,
                                             I915_WAIT_LOCKED |
                                             I915_WAIT_INTERRUPTIBLE);
                if (ret)
@@ -715,55 +725,55 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
                 * Having already penalized the client to stall, we spend
                 * a little extra time to re-optimise page allocation.
                 */
-               kmem_cache_shrink(dev_priv->requests);
+               kmem_cache_shrink(i915->requests);
                rcu_barrier(); /* Recover the TYPESAFE_BY_RCU pages */
 
-               req = kmem_cache_alloc(dev_priv->requests, GFP_KERNEL);
-               if (!req) {
+               rq = kmem_cache_alloc(i915->requests, GFP_KERNEL);
+               if (!rq) {
                        ret = -ENOMEM;
                        goto err_unreserve;
                }
        }
 
-       req->timeline = i915_gem_context_lookup_timeline(ctx, engine);
-       GEM_BUG_ON(req->timeline == engine->timeline);
+       rq->timeline = i915_gem_context_lookup_timeline(ctx, engine);
+       GEM_BUG_ON(rq->timeline == engine->timeline);
 
-       spin_lock_init(&req->lock);
-       dma_fence_init(&req->fence,
+       spin_lock_init(&rq->lock);
+       dma_fence_init(&rq->fence,
                       &i915_fence_ops,
-                      &req->lock,
-                      req->timeline->fence_context,
-                      timeline_get_seqno(req->timeline));
+                      &rq->lock,
+                      rq->timeline->fence_context,
+                      timeline_get_seqno(rq->timeline));
 
        /* We bump the ref for the fence chain */
-       i915_sw_fence_init(&i915_gem_request_get(req)->submit, submit_notify);
-       init_waitqueue_head(&req->execute);
+       i915_sw_fence_init(&i915_request_get(rq)->submit, submit_notify);
+       init_waitqueue_head(&rq->execute);
 
-       i915_priotree_init(&req->priotree);
+       i915_priotree_init(&rq->priotree);
 
-       INIT_LIST_HEAD(&req->active_list);
-       req->i915 = dev_priv;
-       req->engine = engine;
-       req->ctx = ctx;
-       req->ring = ring;
+       INIT_LIST_HEAD(&rq->active_list);
+       rq->i915 = i915;
+       rq->engine = engine;
+       rq->ctx = ctx;
+       rq->ring = ring;
 
        /* No zalloc, must clear what we need by hand */
-       req->global_seqno = 0;
-       req->signaling.wait.seqno = 0;
-       req->file_priv = NULL;
-       req->batch = NULL;
-       req->capture_list = NULL;
-       req->waitboost = false;
+       rq->global_seqno = 0;
+       rq->signaling.wait.seqno = 0;
+       rq->file_priv = NULL;
+       rq->batch = NULL;
+       rq->capture_list = NULL;
+       rq->waitboost = false;
 
        /*
         * Reserve space in the ring buffer for all the commands required to
         * eventually emit this request. This is to guarantee that the
-        * i915_add_request() call can't fail. Note that the reserve may need
+        * i915_request_add() call can't fail. Note that the reserve may need
         * to be redone if the request is not actually submitted straight
         * away, e.g. because a GPU scheduler has deferred it.
         */
-       req->reserved_space = MIN_SPACE_FOR_ADD_REQUEST;
-       GEM_BUG_ON(req->reserved_space < engine->emit_breadcrumb_sz);
+       rq->reserved_space = MIN_SPACE_FOR_ADD_REQUEST;
+       GEM_BUG_ON(rq->reserved_space < engine->emit_breadcrumb_sz);
 
        /*
         * Record the position of the start of the request so that
@@ -771,30 +781,30 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
         * GPU processing the request, we never over-estimate the
         * position of the head.
         */
-       req->head = req->ring->emit;
+       rq->head = rq->ring->emit;
 
        /* Unconditionally invalidate GPU caches and TLBs. */
-       ret = engine->emit_flush(req, EMIT_INVALIDATE);
+       ret = engine->emit_flush(rq, EMIT_INVALIDATE);
        if (ret)
                goto err_unwind;
 
-       ret = engine->request_alloc(req);
+       ret = engine->request_alloc(rq);
        if (ret)
                goto err_unwind;
 
        /* Check that we didn't interrupt ourselves with a new request */
-       GEM_BUG_ON(req->timeline->seqno != req->fence.seqno);
-       return req;
+       GEM_BUG_ON(rq->timeline->seqno != rq->fence.seqno);
+       return rq;
 
 err_unwind:
-       req->ring->emit = req->head;
+       rq->ring->emit = rq->head;
 
        /* Make sure we didn't add ourselves to external state before freeing */
-       GEM_BUG_ON(!list_empty(&req->active_list));
-       GEM_BUG_ON(!list_empty(&req->priotree.signalers_list));
-       GEM_BUG_ON(!list_empty(&req->priotree.waiters_list));
+       GEM_BUG_ON(!list_empty(&rq->active_list));
+       GEM_BUG_ON(!list_empty(&rq->priotree.signalers_list));
+       GEM_BUG_ON(!list_empty(&rq->priotree.waiters_list));
 
-       kmem_cache_free(dev_priv->requests, req);
+       kmem_cache_free(i915->requests, rq);
 err_unreserve:
        unreserve_engine(engine);
 err_unpin:
@@ -803,15 +813,14 @@ err_unpin:
 }
 
 static int
-i915_gem_request_await_request(struct drm_i915_gem_request *to,
-                              struct drm_i915_gem_request *from)
+i915_request_await_request(struct i915_request *to, struct i915_request *from)
 {
        int ret;
 
        GEM_BUG_ON(to == from);
        GEM_BUG_ON(to->timeline == from->timeline);
 
-       if (i915_gem_request_completed(from))
+       if (i915_request_completed(from))
                return 0;
 
        if (to->engine->schedule) {
@@ -834,7 +843,7 @@ i915_gem_request_await_request(struct drm_i915_gem_request *to,
 
                GEM_BUG_ON(!from->engine->semaphore.signal);
 
-               seqno = i915_gem_request_global_seqno(from);
+               seqno = i915_request_global_seqno(from);
                if (!seqno)
                        goto await_dma_fence;
 
@@ -858,14 +867,14 @@ await_dma_fence:
 }
 
 int
-i915_gem_request_await_dma_fence(struct drm_i915_gem_request *req,
-                                struct dma_fence *fence)
+i915_request_await_dma_fence(struct i915_request *rq, struct dma_fence *fence)
 {
        struct dma_fence **child = &fence;
        unsigned int nchild = 1;
        int ret;
 
-       /* Note that if the fence-array was created in signal-on-any mode,
+       /*
+        * Note that if the fence-array was created in signal-on-any mode,
         * we should *not* decompose it into its individual fences. However,
         * we don't currently store which mode the fence-array is operating
         * in. Fortunately, the only user of signal-on-any is private to
@@ -887,37 +896,36 @@ i915_gem_request_await_dma_fence(struct drm_i915_gem_request *req,
 
                /*
                 * Requests on the same timeline are explicitly ordered, along
-                * with their dependencies, by i915_add_request() which ensures
+                * with their dependencies, by i915_request_add() which ensures
                 * that requests are submitted in-order through each ring.
                 */
-               if (fence->context == req->fence.context)
+               if (fence->context == rq->fence.context)
                        continue;
 
                /* Squash repeated waits to the same timelines */
-               if (fence->context != req->i915->mm.unordered_timeline &&
-                   intel_timeline_sync_is_later(req->timeline, fence))
+               if (fence->context != rq->i915->mm.unordered_timeline &&
+                   intel_timeline_sync_is_later(rq->timeline, fence))
                        continue;
 
                if (dma_fence_is_i915(fence))
-                       ret = i915_gem_request_await_request(req,
-                                                            to_request(fence));
+                       ret = i915_request_await_request(rq, to_request(fence));
                else
-                       ret = i915_sw_fence_await_dma_fence(&req->submit, fence,
+                       ret = i915_sw_fence_await_dma_fence(&rq->submit, fence,
                                                            I915_FENCE_TIMEOUT,
                                                            I915_FENCE_GFP);
                if (ret < 0)
                        return ret;
 
                /* Record the latest fence used against each timeline */
-               if (fence->context != req->i915->mm.unordered_timeline)
-                       intel_timeline_sync_set(req->timeline, fence);
+               if (fence->context != rq->i915->mm.unordered_timeline)
+                       intel_timeline_sync_set(rq->timeline, fence);
        } while (--nchild);
 
        return 0;
 }
 
 /**
- * i915_gem_request_await_object - set this request to (async) wait upon a bo
+ * i915_request_await_object - set this request to (async) wait upon a bo
  * @to: request we are wishing to use
  * @obj: object which may be in use on another ring.
  * @write: whether the wait is on behalf of a writer
@@ -937,9 +945,9 @@ i915_gem_request_await_dma_fence(struct drm_i915_gem_request *req,
  * Returns 0 if successful, else propagates up the lower layer error.
  */
 int
-i915_gem_request_await_object(struct drm_i915_gem_request *to,
-                             struct drm_i915_gem_object *obj,
-                             bool write)
+i915_request_await_object(struct i915_request *to,
+                         struct drm_i915_gem_object *obj,
+                         bool write)
 {
        struct dma_fence *excl;
        int ret = 0;
@@ -954,7 +962,7 @@ i915_gem_request_await_object(struct drm_i915_gem_request *to,
                        return ret;
 
                for (i = 0; i < count; i++) {
-                       ret = i915_gem_request_await_dma_fence(to, shared[i]);
+                       ret = i915_request_await_dma_fence(to, shared[i]);
                        if (ret)
                                break;
 
@@ -970,7 +978,7 @@ i915_gem_request_await_object(struct drm_i915_gem_request *to,
 
        if (excl) {
                if (ret == 0)
-                       ret = i915_gem_request_await_dma_fence(to, excl);
+                       ret = i915_request_await_dma_fence(to, excl);
 
                dma_fence_put(excl);
        }
@@ -983,21 +991,21 @@ i915_gem_request_await_object(struct drm_i915_gem_request *to,
  * request is not being tracked for completion but the work itself is
  * going to happen on the hardware. This would be a Bad Thing(tm).
  */
-void __i915_add_request(struct drm_i915_gem_request *request, bool flush_caches)
+void __i915_request_add(struct i915_request *request, bool flush_caches)
 {
        struct intel_engine_cs *engine = request->engine;
        struct intel_ring *ring = request->ring;
        struct intel_timeline *timeline = request->timeline;
-       struct drm_i915_gem_request *prev;
+       struct i915_request *prev;
        u32 *cs;
        int err;
 
        lockdep_assert_held(&request->i915->drm.struct_mutex);
-       trace_i915_gem_request_add(request);
+       trace_i915_request_add(request);
 
        /*
         * Make sure that no request gazumped us - if it was allocated after
-        * our i915_gem_request_alloc() and called __i915_add_request() before
+        * our i915_request_alloc() and called __i915_request_add() before
         * us, the timeline will hold its seqno which is later than ours.
         */
        GEM_BUG_ON(timeline->seqno != request->fence.seqno);
@@ -1042,7 +1050,7 @@ void __i915_add_request(struct drm_i915_gem_request *request, bool flush_caches)
 
        prev = i915_gem_active_raw(&timeline->last_request,
                                   &request->i915->drm.struct_mutex);
-       if (prev && !i915_gem_request_completed(prev)) {
+       if (prev && !i915_request_completed(prev)) {
                i915_sw_fence_await_sw_fence(&request->submit, &prev->submit,
                                             &request->submitq);
                if (engine->schedule)
@@ -1097,15 +1105,16 @@ void __i915_add_request(struct drm_i915_gem_request *request, bool flush_caches)
         * work on behalf of others -- but instead we should benefit from
         * improved resource management. (Well, that's the theory at least.)
         */
-       if (prev && i915_gem_request_completed(prev))
-               i915_gem_request_retire_upto(prev);
+       if (prev && i915_request_completed(prev))
+               i915_request_retire_upto(prev);
 }
 
 static unsigned long local_clock_us(unsigned int *cpu)
 {
        unsigned long t;
 
-       /* Cheaply and approximately convert from nanoseconds to microseconds.
+       /*
+        * Cheaply and approximately convert from nanoseconds to microseconds.
         * The result and subsequent calculations are also defined in the same
         * approximate microseconds units. The principal source of timing
         * error here is from the simple truncation.
@@ -1133,10 +1142,10 @@ static bool busywait_stop(unsigned long timeout, unsigned int cpu)
        return this_cpu != cpu;
 }
 
-static bool __i915_spin_request(const struct drm_i915_gem_request *req,
+static bool __i915_spin_request(const struct i915_request *rq,
                                u32 seqno, int state, unsigned long timeout_us)
 {
-       struct intel_engine_cs *engine = req->engine;
+       struct intel_engine_cs *engine = rq->engine;
        unsigned int irq, cpu;
 
        GEM_BUG_ON(!seqno);
@@ -1155,7 +1164,8 @@ static bool __i915_spin_request(const struct drm_i915_gem_request *req,
        if (!i915_seqno_passed(intel_engine_get_seqno(engine), seqno - 1))
                return false;
 
-       /* When waiting for high frequency requests, e.g. during synchronous
+       /*
+        * When waiting for high frequency requests, e.g. during synchronous
         * rendering split between the CPU and GPU, the finite amount of time
         * required to set up the irq and wait upon it limits the response
         * rate. By busywaiting on the request completion for a short while we
@@ -1169,9 +1179,10 @@ static bool __i915_spin_request(const struct drm_i915_gem_request *req,
        timeout_us += local_clock_us(&cpu);
        do {
                if (i915_seqno_passed(intel_engine_get_seqno(engine), seqno))
-                       return seqno == i915_gem_request_global_seqno(req);
+                       return seqno == i915_request_global_seqno(rq);
 
-               /* Seqno are meant to be ordered *before* the interrupt. If
+               /*
+                * Seqno are meant to be ordered *before* the interrupt. If
                 * we see an interrupt without a corresponding seqno advance,
                 * assume we won't see one in the near future but require
                 * the engine->seqno_barrier() to fixup coherency.
@@ -1191,7 +1202,7 @@ static bool __i915_spin_request(const struct drm_i915_gem_request *req,
        return false;
 }
 
-static bool __i915_wait_request_check_and_reset(struct drm_i915_gem_request *request)
+static bool __i915_wait_request_check_and_reset(struct i915_request *request)
 {
        if (likely(!i915_reset_handoff(&request->i915->gpu_error)))
                return false;
@@ -1202,12 +1213,12 @@ static bool __i915_wait_request_check_and_reset(struct drm_i915_gem_request *req
 }
 
 /**
- * i915_wait_request - wait until execution of request has finished
- * @req: the request to wait upon
+ * i915_request_wait - wait until execution of request has finished
+ * @rq: the request to wait upon
  * @flags: how to wait
  * @timeout: how long to wait in jiffies
  *
- * i915_wait_request() waits for the request to be completed, for a
+ * i915_request_wait() waits for the request to be completed, for a
  * maximum of @timeout jiffies (with MAX_SCHEDULE_TIMEOUT implying an
  * unbounded wait).
  *
@@ -1220,13 +1231,13 @@ static bool __i915_wait_request_check_and_reset(struct drm_i915_gem_request *req
  * May return -EINTR is called with I915_WAIT_INTERRUPTIBLE and a signal is
  * pending before the request completes.
  */
-long i915_wait_request(struct drm_i915_gem_request *req,
+long i915_request_wait(struct i915_request *rq,
                       unsigned int flags,
                       long timeout)
 {
        const int state = flags & I915_WAIT_INTERRUPTIBLE ?
                TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
-       wait_queue_head_t *errq = &req->i915->gpu_error.wait_queue;
+       wait_queue_head_t *errq = &rq->i915->gpu_error.wait_queue;
        DEFINE_WAIT_FUNC(reset, default_wake_function);
        DEFINE_WAIT_FUNC(exec, default_wake_function);
        struct intel_wait wait;
@@ -1234,33 +1245,33 @@ long i915_wait_request(struct drm_i915_gem_request *req,
        might_sleep();
 #if IS_ENABLED(CONFIG_LOCKDEP)
        GEM_BUG_ON(debug_locks &&
-                  !!lockdep_is_held(&req->i915->drm.struct_mutex) !=
+                  !!lockdep_is_held(&rq->i915->drm.struct_mutex) !=
                   !!(flags & I915_WAIT_LOCKED));
 #endif
        GEM_BUG_ON(timeout < 0);
 
-       if (i915_gem_request_completed(req))
+       if (i915_request_completed(rq))
                return timeout;
 
        if (!timeout)
                return -ETIME;
 
-       trace_i915_gem_request_wait_begin(req, flags);
+       trace_i915_request_wait_begin(rq, flags);
 
-       add_wait_queue(&req->execute, &exec);
+       add_wait_queue(&rq->execute, &exec);
        if (flags & I915_WAIT_LOCKED)
                add_wait_queue(errq, &reset);
 
-       intel_wait_init(&wait, req);
+       intel_wait_init(&wait, rq);
 
 restart:
        do {
                set_current_state(state);
-               if (intel_wait_update_request(&wait, req))
+               if (intel_wait_update_request(&wait, rq))
                        break;
 
                if (flags & I915_WAIT_LOCKED &&
-                   __i915_wait_request_check_and_reset(req))
+                   __i915_wait_request_check_and_reset(rq))
                        continue;
 
                if (signal_pending_state(state, current)) {
@@ -1277,22 +1288,23 @@ restart:
        } while (1);
 
        GEM_BUG_ON(!intel_wait_has_seqno(&wait));
-       GEM_BUG_ON(!i915_sw_fence_signaled(&req->submit));
+       GEM_BUG_ON(!i915_sw_fence_signaled(&rq->submit));
 
        /* Optimistic short spin before touching IRQs */
-       if (__i915_spin_request(req, wait.seqno, state, 5))
+       if (__i915_spin_request(rq, wait.seqno, state, 5))
                goto complete;
 
        set_current_state(state);
-       if (intel_engine_add_wait(req->engine, &wait))
-               /* In order to check that we haven't missed the interrupt
+       if (intel_engine_add_wait(rq->engine, &wait))
+               /*
+                * In order to check that we haven't missed the interrupt
                 * as we enabled it, we need to kick ourselves to do a
                 * coherent check on the seqno before we sleep.
                 */
                goto wakeup;
 
        if (flags & I915_WAIT_LOCKED)
-               __i915_wait_request_check_and_reset(req);
+               __i915_wait_request_check_and_reset(rq);
 
        for (;;) {
                if (signal_pending_state(state, current)) {
@@ -1308,21 +1320,23 @@ restart:
                timeout = io_schedule_timeout(timeout);
 
                if (intel_wait_complete(&wait) &&
-                   intel_wait_check_request(&wait, req))
+                   intel_wait_check_request(&wait, rq))
                        break;
 
                set_current_state(state);
 
 wakeup:
-               /* Carefully check if the request is complete, giving time
+               /*
+                * Carefully check if the request is complete, giving time
                 * for the seqno to be visible following the interrupt.
                 * We also have to check in case we are kicked by the GPU
                 * reset in order to drop the struct_mutex.
                 */
-               if (__i915_request_irq_complete(req))
+               if (__i915_request_irq_complete(rq))
                        break;
 
-               /* If the GPU is hung, and we hold the lock, reset the GPU
+               /*
+                * If the GPU is hung, and we hold the lock, reset the GPU
                 * and then check for completion. On a full reset, the engine's
                 * HW seqno will be advanced passed us and we are complete.
                 * If we do a partial reset, we have to wait for the GPU to
@@ -1333,33 +1347,33 @@ wakeup:
                 * itself, or indirectly by recovering the GPU).
                 */
                if (flags & I915_WAIT_LOCKED &&
-                   __i915_wait_request_check_and_reset(req))
+                   __i915_wait_request_check_and_reset(rq))
                        continue;
 
                /* Only spin if we know the GPU is processing this request */
-               if (__i915_spin_request(req, wait.seqno, state, 2))
+               if (__i915_spin_request(rq, wait.seqno, state, 2))
                        break;
 
-               if (!intel_wait_check_request(&wait, req)) {
-                       intel_engine_remove_wait(req->engine, &wait);
+               if (!intel_wait_check_request(&wait, rq)) {
+                       intel_engine_remove_wait(rq->engine, &wait);
                        goto restart;
                }
        }
 
-       intel_engine_remove_wait(req->engine, &wait);
+       intel_engine_remove_wait(rq->engine, &wait);
 complete:
        __set_current_state(TASK_RUNNING);
        if (flags & I915_WAIT_LOCKED)
                remove_wait_queue(errq, &reset);
-       remove_wait_queue(&req->execute, &exec);
-       trace_i915_gem_request_wait_end(req);
+       remove_wait_queue(&rq->execute, &exec);
+       trace_i915_request_wait_end(rq);
 
        return timeout;
 }
 
 static void engine_retire_requests(struct intel_engine_cs *engine)
 {
-       struct drm_i915_gem_request *request, *next;
+       struct i915_request *request, *next;
        u32 seqno = intel_engine_get_seqno(engine);
        LIST_HEAD(retire);
 
@@ -1374,24 +1388,24 @@ static void engine_retire_requests(struct intel_engine_cs *engine)
        spin_unlock_irq(&engine->timeline->lock);
 
        list_for_each_entry_safe(request, next, &retire, link)
-               i915_gem_request_retire(request);
+               i915_request_retire(request);
 }
 
-void i915_gem_retire_requests(struct drm_i915_private *dev_priv)
+void i915_retire_requests(struct drm_i915_private *i915)
 {
        struct intel_engine_cs *engine;
        enum intel_engine_id id;
 
-       lockdep_assert_held(&dev_priv->drm.struct_mutex);
+       lockdep_assert_held(&i915->drm.struct_mutex);
 
-       if (!dev_priv->gt.active_requests)
+       if (!i915->gt.active_requests)
                return;
 
-       for_each_engine(engine, dev_priv, id)
+       for_each_engine(engine, i915, id)
                engine_retire_requests(engine);
 }
 
 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
 #include "selftests/mock_request.c"
-#include "selftests/i915_gem_request.c"
+#include "selftests/i915_request.c"
 #endif
similarity index 79%
rename from drivers/gpu/drm/i915/i915_gem_request.h
rename to drivers/gpu/drm/i915/i915_request.h
index 2236e91..74311fc 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright Â© 2008-2015 Intel Corporation
+ * Copyright Â© 2008-2018 Intel Corporation
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
  * copy of this software and associated documentation files (the "Software"),
@@ -22,8 +22,8 @@
  *
  */
 
-#ifndef I915_GEM_REQUEST_H
-#define I915_GEM_REQUEST_H
+#ifndef I915_REQUEST_H
+#define I915_REQUEST_H
 
 #include <linux/dma-fence.h>
 
 
 struct drm_file;
 struct drm_i915_gem_object;
-struct drm_i915_gem_request;
+struct i915_request;
 
 struct intel_wait {
        struct rb_node node;
        struct task_struct *tsk;
-       struct drm_i915_gem_request *request;
+       struct i915_request *request;
        u32 seqno;
 };
 
@@ -57,7 +57,12 @@ struct i915_dependency {
 #define I915_DEPENDENCY_ALLOC BIT(0)
 };
 
-/* Requests exist in a complex web of interdependencies. Each request
+/*
+ * "People assume that time is a strict progression of cause to effect, but
+ * actually, from a nonlinear, non-subjective viewpoint, it's more like a big
+ * ball of wibbly-wobbly, timey-wimey ... stuff." -The Doctor, 2015
+ *
+ * Requests exist in a complex web of interdependencies. Each request
  * has to wait for some other request to complete before it is ready to be run
  * (e.g. we have to wait until the pixels have been rendering into a texture
  * before we can copy from it). We track the readiness of a request in terms
@@ -81,8 +86,8 @@ enum {
        I915_PRIORITY_INVALID = INT_MIN
 };
 
-struct i915_gem_capture_list {
-       struct i915_gem_capture_list *next;
+struct i915_capture_list {
+       struct i915_capture_list *next;
        struct i915_vma *vma;
 };
 
@@ -106,7 +111,7 @@ struct i915_gem_capture_list {
  *
  * The requests are reference counted.
  */
-struct drm_i915_gem_request {
+struct i915_request {
        struct dma_fence fence;
        spinlock_t lock;
 
@@ -120,7 +125,7 @@ struct drm_i915_gem_request {
         * it persists while any request is linked to it. Requests themselves
         * are also refcounted, so the request will only be freed when the last
         * reference to it is dismissed, and the code in
-        * i915_gem_request_free() will then decrement the refcount on the
+        * i915_request_free() will then decrement the refcount on the
         * context.
         */
        struct i915_gem_context *ctx;
@@ -129,7 +134,8 @@ struct drm_i915_gem_request {
        struct intel_timeline *timeline;
        struct intel_signal_node signaling;
 
-       /* Fences for the various phases in the request's lifetime.
+       /*
+        * Fences for the various phases in the request's lifetime.
         *
         * The submit fence is used to await upon all of the request's
         * dependencies. When it is signaled, the request is ready to run.
@@ -139,7 +145,8 @@ struct drm_i915_gem_request {
        wait_queue_entry_t submitq;
        wait_queue_head_t execute;
 
-       /* A list of everyone we wait upon, and everyone who waits upon us.
+       /*
+        * A list of everyone we wait upon, and everyone who waits upon us.
         * Even though we will not be submitted to the hardware before the
         * submit fence is signaled (it waits for all external events as well
         * as our own requests), the scheduler still needs to know the
@@ -150,7 +157,8 @@ struct drm_i915_gem_request {
        struct i915_priotree priotree;
        struct i915_dependency dep;
 
-       /** GEM sequence number associated with this request on the
+       /**
+        * GEM sequence number associated with this request on the
         * global execution timeline. It is zero when the request is not
         * on the HW queue (i.e. not on the engine timeline list).
         * Its value is guarded by the timeline spinlock.
@@ -180,12 +188,13 @@ struct drm_i915_gem_request {
         * error state dump only).
         */
        struct i915_vma *batch;
-       /** Additional buffers requested by userspace to be captured upon
+       /**
+        * Additional buffers requested by userspace to be captured upon
         * a GPU hang. The vma/obj on this list are protected by their
         * active reference - all objects on this list must also be
         * on the active_list (of their final request).
         */
-       struct i915_gem_capture_list *capture_list;
+       struct i915_capture_list *capture_list;
        struct list_head active_list;
 
        /** Time at which this request was emitted, in jiffies. */
@@ -213,40 +222,40 @@ static inline bool dma_fence_is_i915(const struct dma_fence *fence)
        return fence->ops == &i915_fence_ops;
 }
 
-struct drm_i915_gem_request * __must_check
-i915_gem_request_alloc(struct intel_engine_cs *engine,
-                      struct i915_gem_context *ctx);
-void i915_gem_request_retire_upto(struct drm_i915_gem_request *req);
+struct i915_request * __must_check
+i915_request_alloc(struct intel_engine_cs *engine,
+                  struct i915_gem_context *ctx);
+void i915_request_retire_upto(struct i915_request *rq);
 
-static inline struct drm_i915_gem_request *
+static inline struct i915_request *
 to_request(struct dma_fence *fence)
 {
        /* We assume that NULL fence/request are interoperable */
-       BUILD_BUG_ON(offsetof(struct drm_i915_gem_request, fence) != 0);
+       BUILD_BUG_ON(offsetof(struct i915_request, fence) != 0);
        GEM_BUG_ON(fence && !dma_fence_is_i915(fence));
-       return container_of(fence, struct drm_i915_gem_request, fence);
+       return container_of(fence, struct i915_request, fence);
 }
 
-static inline struct drm_i915_gem_request *
-i915_gem_request_get(struct drm_i915_gem_request *req)
+static inline struct i915_request *
+i915_request_get(struct i915_request *rq)
 {
-       return to_request(dma_fence_get(&req->fence));
+       return to_request(dma_fence_get(&rq->fence));
 }
 
-static inline struct drm_i915_gem_request *
-i915_gem_request_get_rcu(struct drm_i915_gem_request *req)
+static inline struct i915_request *
+i915_request_get_rcu(struct i915_request *rq)
 {
-       return to_request(dma_fence_get_rcu(&req->fence));
+       return to_request(dma_fence_get_rcu(&rq->fence));
 }
 
 static inline void
-i915_gem_request_put(struct drm_i915_gem_request *req)
+i915_request_put(struct i915_request *rq)
 {
-       dma_fence_put(&req->fence);
+       dma_fence_put(&rq->fence);
 }
 
 /**
- * i915_gem_request_global_seqno - report the current global seqno
+ * i915_request_global_seqno - report the current global seqno
  * @request - the request
  *
  * A request is assigned a global seqno only when it is on the hardware
@@ -264,34 +273,28 @@ i915_gem_request_put(struct drm_i915_gem_request *req)
  * after the read, it is indeed complete).
  */
 static u32
-i915_gem_request_global_seqno(const struct drm_i915_gem_request *request)
+i915_request_global_seqno(const struct i915_request *request)
 {
        return READ_ONCE(request->global_seqno);
 }
 
-int
-i915_gem_request_await_object(struct drm_i915_gem_request *to,
+int i915_request_await_object(struct i915_request *to,
                              struct drm_i915_gem_object *obj,
                              bool write);
-int i915_gem_request_await_dma_fence(struct drm_i915_gem_request *req,
-                                    struct dma_fence *fence);
-
-void __i915_add_request(struct drm_i915_gem_request *req, bool flush_caches);
-#define i915_add_request(req) \
-       __i915_add_request(req, false)
+int i915_request_await_dma_fence(struct i915_request *rq,
+                                struct dma_fence *fence);
 
-void __i915_gem_request_submit(struct drm_i915_gem_request *request);
-void i915_gem_request_submit(struct drm_i915_gem_request *request);
+void __i915_request_add(struct i915_request *rq, bool flush_caches);
+#define i915_request_add(rq) \
+       __i915_request_add(rq, false)
 
-void __i915_gem_request_unsubmit(struct drm_i915_gem_request *request);
-void i915_gem_request_unsubmit(struct drm_i915_gem_request *request);
+void __i915_request_submit(struct i915_request *request);
+void i915_request_submit(struct i915_request *request);
 
-struct intel_rps_client;
-#define NO_WAITBOOST ERR_PTR(-1)
-#define IS_RPS_CLIENT(p) (!IS_ERR(p))
-#define IS_RPS_USER(p) (!IS_ERR_OR_NULL(p))
+void __i915_request_unsubmit(struct i915_request *request);
+void i915_request_unsubmit(struct i915_request *request);
 
-long i915_wait_request(struct drm_i915_gem_request *req,
+long i915_request_wait(struct i915_request *rq,
                       unsigned int flags,
                       long timeout)
        __attribute__((nonnull(1)));
@@ -310,47 +313,48 @@ static inline bool i915_seqno_passed(u32 seq1, u32 seq2)
 }
 
 static inline bool
-__i915_gem_request_completed(const struct drm_i915_gem_request *req, u32 seqno)
+__i915_request_completed(const struct i915_request *rq, u32 seqno)
 {
        GEM_BUG_ON(!seqno);
-       return i915_seqno_passed(intel_engine_get_seqno(req->engine), seqno) &&
-               seqno == i915_gem_request_global_seqno(req);
+       return i915_seqno_passed(intel_engine_get_seqno(rq->engine), seqno) &&
+               seqno == i915_request_global_seqno(rq);
 }
 
-static inline bool
-i915_gem_request_completed(const struct drm_i915_gem_request *req)
+static inline bool i915_request_completed(const struct i915_request *rq)
 {
        u32 seqno;
 
-       seqno = i915_gem_request_global_seqno(req);
+       seqno = i915_request_global_seqno(rq);
        if (!seqno)
                return false;
 
-       return __i915_gem_request_completed(req, seqno);
+       return __i915_request_completed(rq, seqno);
 }
 
-static inline bool
-i915_gem_request_started(const struct drm_i915_gem_request *req)
+static inline bool i915_request_started(const struct i915_request *rq)
 {
        u32 seqno;
 
-       seqno = i915_gem_request_global_seqno(req);
+       seqno = i915_request_global_seqno(rq);
        if (!seqno)
                return false;
 
-       return i915_seqno_passed(intel_engine_get_seqno(req->engine),
+       return i915_seqno_passed(intel_engine_get_seqno(rq->engine),
                                 seqno - 1);
 }
 
 static inline bool i915_priotree_signaled(const struct i915_priotree *pt)
 {
-       const struct drm_i915_gem_request *rq =
-               container_of(pt, const struct drm_i915_gem_request, priotree);
+       const struct i915_request *rq =
+               container_of(pt, const struct i915_request, priotree);
 
-       return i915_gem_request_completed(rq);
+       return i915_request_completed(rq);
 }
 
-/* We treat requests as fences. This is not be to confused with our
+void i915_retire_requests(struct drm_i915_private *i915);
+
+/*
+ * We treat requests as fences. This is not be to confused with our
  * "fence registers" but pipeline synchronisation objects ala GL_ARB_sync.
  * We use the fences to synchronize access from the CPU with activity on the
  * GPU, for example, we should not rewrite an object's PTE whilst the GPU
@@ -380,16 +384,16 @@ static inline bool i915_priotree_signaled(const struct i915_priotree *pt)
 struct i915_gem_active;
 
 typedef void (*i915_gem_retire_fn)(struct i915_gem_active *,
-                                  struct drm_i915_gem_request *);
+                                  struct i915_request *);
 
 struct i915_gem_active {
-       struct drm_i915_gem_request __rcu *request;
+       struct i915_request __rcu *request;
        struct list_head link;
        i915_gem_retire_fn retire;
 };
 
 void i915_gem_retire_noop(struct i915_gem_active *,
-                         struct drm_i915_gem_request *request);
+                         struct i915_request *request);
 
 /**
  * init_request_active - prepares the activity tracker for use
@@ -421,7 +425,7 @@ init_request_active(struct i915_gem_active *active,
  */
 static inline void
 i915_gem_active_set(struct i915_gem_active *active,
-                   struct drm_i915_gem_request *request)
+                   struct i915_request *request)
 {
        list_move(&active->link, &request->active_list);
        rcu_assign_pointer(active->request, request);
@@ -446,10 +450,11 @@ i915_gem_active_set_retire_fn(struct i915_gem_active *active,
        active->retire = fn ?: i915_gem_retire_noop;
 }
 
-static inline struct drm_i915_gem_request *
+static inline struct i915_request *
 __i915_gem_active_peek(const struct i915_gem_active *active)
 {
-       /* Inside the error capture (running with the driver in an unknown
+       /*
+        * Inside the error capture (running with the driver in an unknown
         * state), we want to bend the rules slightly (a lot).
         *
         * Work is in progress to make it safer, in the meantime this keeps
@@ -466,7 +471,7 @@ __i915_gem_active_peek(const struct i915_gem_active *active)
  * It does not obtain a reference on the request for the caller, so the caller
  * must hold struct_mutex.
  */
-static inline struct drm_i915_gem_request *
+static inline struct i915_request *
 i915_gem_active_raw(const struct i915_gem_active *active, struct mutex *mutex)
 {
        return rcu_dereference_protected(active->request,
@@ -481,13 +486,13 @@ i915_gem_active_raw(const struct i915_gem_active *active, struct mutex *mutex)
  * still active, or NULL. It does not obtain a reference on the request
  * for the caller, so the caller must hold struct_mutex.
  */
-static inline struct drm_i915_gem_request *
+static inline struct i915_request *
 i915_gem_active_peek(const struct i915_gem_active *active, struct mutex *mutex)
 {
-       struct drm_i915_gem_request *request;
+       struct i915_request *request;
 
        request = i915_gem_active_raw(active, mutex);
-       if (!request || i915_gem_request_completed(request))
+       if (!request || i915_request_completed(request))
                return NULL;
 
        return request;
@@ -500,10 +505,10 @@ i915_gem_active_peek(const struct i915_gem_active *active, struct mutex *mutex)
  * i915_gem_active_get() returns a reference to the active request, or NULL
  * if the active tracker is idle. The caller must hold struct_mutex.
  */
-static inline struct drm_i915_gem_request *
+static inline struct i915_request *
 i915_gem_active_get(const struct i915_gem_active *active, struct mutex *mutex)
 {
-       return i915_gem_request_get(i915_gem_active_peek(active, mutex));
+       return i915_request_get(i915_gem_active_peek(active, mutex));
 }
 
 /**
@@ -514,10 +519,11 @@ i915_gem_active_get(const struct i915_gem_active *active, struct mutex *mutex)
  * if the active tracker is idle. The caller must hold the RCU read lock, but
  * the returned pointer is safe to use outside of RCU.
  */
-static inline struct drm_i915_gem_request *
+static inline struct i915_request *
 __i915_gem_active_get_rcu(const struct i915_gem_active *active)
 {
-       /* Performing a lockless retrieval of the active request is super
+       /*
+        * Performing a lockless retrieval of the active request is super
         * tricky. SLAB_TYPESAFE_BY_RCU merely guarantees that the backing
         * slab of request objects will not be freed whilst we hold the
         * RCU read lock. It does not guarantee that the request itself
@@ -525,13 +531,13 @@ __i915_gem_active_get_rcu(const struct i915_gem_active *active)
         *
         * Thread A                     Thread B
         *
-        * req = active.request
-        *                              retire(req) -> free(req);
-        *                              (req is now first on the slab freelist)
+        * rq = active.request
+        *                              retire(rq) -> free(rq);
+        *                              (rq is now first on the slab freelist)
         *                              active.request = NULL
         *
-        *                              req = new submission on a new object
-        * ref(req)
+        *                              rq = new submission on a new object
+        * ref(rq)
         *
         * To prevent the request from being reused whilst the caller
         * uses it, we take a reference like normal. Whilst acquiring
@@ -560,32 +566,34 @@ __i915_gem_active_get_rcu(const struct i915_gem_active *active)
         *
         * It is then imperative that we do not zero the request on
         * reallocation, so that we can chase the dangling pointers!
-        * See i915_gem_request_alloc().
+        * See i915_request_alloc().
         */
        do {
-               struct drm_i915_gem_request *request;
+               struct i915_request *request;
 
                request = rcu_dereference(active->request);
-               if (!request || i915_gem_request_completed(request))
+               if (!request || i915_request_completed(request))
                        return NULL;
 
-               /* An especially silly compiler could decide to recompute the
-                * result of i915_gem_request_completed, more specifically
+               /*
+                * An especially silly compiler could decide to recompute the
+                * result of i915_request_completed, more specifically
                 * re-emit the load for request->fence.seqno. A race would catch
                 * a later seqno value, which could flip the result from true to
                 * false. Which means part of the instructions below might not
                 * be executed, while later on instructions are executed. Due to
                 * barriers within the refcounting the inconsistency can't reach
-                * past the call to i915_gem_request_get_rcu, but not executing
-                * that while still executing i915_gem_request_put() creates
+                * past the call to i915_request_get_rcu, but not executing
+                * that while still executing i915_request_put() creates
                 * havoc enough.  Prevent this with a compiler barrier.
                 */
                barrier();
 
-               request = i915_gem_request_get_rcu(request);
+               request = i915_request_get_rcu(request);
 
-               /* What stops the following rcu_access_pointer() from occurring
-                * before the above i915_gem_request_get_rcu()? If we were
+               /*
+                * What stops the following rcu_access_pointer() from occurring
+                * before the above i915_request_get_rcu()? If we were
                 * to read the value before pausing to get the reference to
                 * the request, we may not notice a change in the active
                 * tracker.
@@ -599,9 +607,9 @@ __i915_gem_active_get_rcu(const struct i915_gem_active *active)
                 * compiler.
                 *
                 * The atomic operation at the heart of
-                * i915_gem_request_get_rcu(), see dma_fence_get_rcu(), is
+                * i915_request_get_rcu(), see dma_fence_get_rcu(), is
                 * atomic_inc_not_zero() which is only a full memory barrier
-                * when successful. That is, if i915_gem_request_get_rcu()
+                * when successful. That is, if i915_request_get_rcu()
                 * returns the request (and so with the reference counted
                 * incremented) then the following read for rcu_access_pointer()
                 * must occur after the atomic operation and so confirm
@@ -613,7 +621,7 @@ __i915_gem_active_get_rcu(const struct i915_gem_active *active)
                if (!request || request == rcu_access_pointer(active->request))
                        return rcu_pointer_handoff(request);
 
-               i915_gem_request_put(request);
+               i915_request_put(request);
        } while (1);
 }
 
@@ -625,12 +633,12 @@ __i915_gem_active_get_rcu(const struct i915_gem_active *active)
  * or NULL if the active tracker is idle. The reference is obtained under RCU,
  * so no locking is required by the caller.
  *
- * The reference should be freed with i915_gem_request_put().
+ * The reference should be freed with i915_request_put().
  */
-static inline struct drm_i915_gem_request *
+static inline struct i915_request *
 i915_gem_active_get_unlocked(const struct i915_gem_active *active)
 {
-       struct drm_i915_gem_request *request;
+       struct i915_request *request;
 
        rcu_read_lock();
        request = __i915_gem_active_get_rcu(active);
@@ -670,7 +678,7 @@ i915_gem_active_isset(const struct i915_gem_active *active)
  * can then wait upon the request, and afterwards release our reference,
  * free of any locking.
  *
- * This function wraps i915_wait_request(), see it for the full details on
+ * This function wraps i915_request_wait(), see it for the full details on
  * the arguments.
  *
  * Returns 0 if successful, or a negative error code.
@@ -678,13 +686,13 @@ i915_gem_active_isset(const struct i915_gem_active *active)
 static inline int
 i915_gem_active_wait(const struct i915_gem_active *active, unsigned int flags)
 {
-       struct drm_i915_gem_request *request;
+       struct i915_request *request;
        long ret = 0;
 
        request = i915_gem_active_get_unlocked(active);
        if (request) {
-               ret = i915_wait_request(request, flags, MAX_SCHEDULE_TIMEOUT);
-               i915_gem_request_put(request);
+               ret = i915_request_wait(request, flags, MAX_SCHEDULE_TIMEOUT);
+               i915_request_put(request);
        }
 
        return ret < 0 ? ret : 0;
@@ -703,14 +711,14 @@ static inline int __must_check
 i915_gem_active_retire(struct i915_gem_active *active,
                       struct mutex *mutex)
 {
-       struct drm_i915_gem_request *request;
+       struct i915_request *request;
        long ret;
 
        request = i915_gem_active_raw(active, mutex);
        if (!request)
                return 0;
 
-       ret = i915_wait_request(request,
+       ret = i915_request_wait(request,
                                I915_WAIT_INTERRUPTIBLE | I915_WAIT_LOCKED,
                                MAX_SCHEDULE_TIMEOUT);
        if (ret < 0)
@@ -727,4 +735,4 @@ i915_gem_active_retire(struct i915_gem_active *active,
 #define for_each_active(mask, idx) \
        for (; mask ? idx = ffs(mask) - 1, 1 : 0; mask &= ~BIT(idx))
 
-#endif /* I915_GEM_REQUEST_H */
+#endif /* I915_REQUEST_H */
index e1169c0..408827b 100644 (file)
@@ -586,8 +586,7 @@ TRACE_EVENT(i915_gem_evict_vm,
 );
 
 TRACE_EVENT(i915_gem_ring_sync_to,
-           TP_PROTO(struct drm_i915_gem_request *to,
-                    struct drm_i915_gem_request *from),
+           TP_PROTO(struct i915_request *to, struct i915_request *from),
            TP_ARGS(to, from),
 
            TP_STRUCT__entry(
@@ -610,9 +609,9 @@ TRACE_EVENT(i915_gem_ring_sync_to,
                      __entry->seqno)
 );
 
-TRACE_EVENT(i915_gem_request_queue,
-           TP_PROTO(struct drm_i915_gem_request *req, u32 flags),
-           TP_ARGS(req, flags),
+TRACE_EVENT(i915_request_queue,
+           TP_PROTO(struct i915_request *rq, u32 flags),
+           TP_ARGS(rq, flags),
 
            TP_STRUCT__entry(
                             __field(u32, dev)
@@ -624,11 +623,11 @@ TRACE_EVENT(i915_gem_request_queue,
                             ),
 
            TP_fast_assign(
-                          __entry->dev = req->i915->drm.primary->index;
-                          __entry->hw_id = req->ctx->hw_id;
-                          __entry->ring = req->engine->id;
-                          __entry->ctx = req->fence.context;
-                          __entry->seqno = req->fence.seqno;
+                          __entry->dev = rq->i915->drm.primary->index;
+                          __entry->hw_id = rq->ctx->hw_id;
+                          __entry->ring = rq->engine->id;
+                          __entry->ctx = rq->fence.context;
+                          __entry->seqno = rq->fence.seqno;
                           __entry->flags = flags;
                           ),
 
@@ -637,9 +636,9 @@ TRACE_EVENT(i915_gem_request_queue,
                      __entry->seqno, __entry->flags)
 );
 
-DECLARE_EVENT_CLASS(i915_gem_request,
-           TP_PROTO(struct drm_i915_gem_request *req),
-           TP_ARGS(req),
+DECLARE_EVENT_CLASS(i915_request,
+           TP_PROTO(struct i915_request *rq),
+           TP_ARGS(rq),
 
            TP_STRUCT__entry(
                             __field(u32, dev)
@@ -651,12 +650,12 @@ DECLARE_EVENT_CLASS(i915_gem_request,
                             ),
 
            TP_fast_assign(
-                          __entry->dev = req->i915->drm.primary->index;
-                          __entry->hw_id = req->ctx->hw_id;
-                          __entry->ring = req->engine->id;
-                          __entry->ctx = req->fence.context;
-                          __entry->seqno = req->fence.seqno;
-                          __entry->global = req->global_seqno;
+                          __entry->dev = rq->i915->drm.primary->index;
+                          __entry->hw_id = rq->ctx->hw_id;
+                          __entry->ring = rq->engine->id;
+                          __entry->ctx = rq->fence.context;
+                          __entry->seqno = rq->fence.seqno;
+                          __entry->global = rq->global_seqno;
                           ),
 
            TP_printk("dev=%u, hw_id=%u, ring=%u, ctx=%u, seqno=%u, global=%u",
@@ -664,26 +663,25 @@ DECLARE_EVENT_CLASS(i915_gem_request,
                      __entry->seqno, __entry->global)
 );
 
-DEFINE_EVENT(i915_gem_request, i915_gem_request_add,
-           TP_PROTO(struct drm_i915_gem_request *req),
-           TP_ARGS(req)
+DEFINE_EVENT(i915_request, i915_request_add,
+           TP_PROTO(struct i915_request *rq),
+           TP_ARGS(rq)
 );
 
 #if defined(CONFIG_DRM_I915_LOW_LEVEL_TRACEPOINTS)
-DEFINE_EVENT(i915_gem_request, i915_gem_request_submit,
-            TP_PROTO(struct drm_i915_gem_request *req),
-            TP_ARGS(req)
+DEFINE_EVENT(i915_request, i915_request_submit,
+            TP_PROTO(struct i915_request *rq),
+            TP_ARGS(rq)
 );
 
-DEFINE_EVENT(i915_gem_request, i915_gem_request_execute,
-            TP_PROTO(struct drm_i915_gem_request *req),
-            TP_ARGS(req)
+DEFINE_EVENT(i915_request, i915_request_execute,
+            TP_PROTO(struct i915_request *rq),
+            TP_ARGS(rq)
 );
 
-DECLARE_EVENT_CLASS(i915_gem_request_hw,
-                   TP_PROTO(struct drm_i915_gem_request *req,
-                            unsigned int port),
-                   TP_ARGS(req, port),
+DECLARE_EVENT_CLASS(i915_request_hw,
+                   TP_PROTO(struct i915_request *rq, unsigned int port),
+                   TP_ARGS(rq, port),
 
                    TP_STRUCT__entry(
                                     __field(u32, dev)
@@ -696,14 +694,14 @@ DECLARE_EVENT_CLASS(i915_gem_request_hw,
                                    ),
 
                    TP_fast_assign(
-                                  __entry->dev = req->i915->drm.primary->index;
-                                  __entry->hw_id = req->ctx->hw_id;
-                                  __entry->ring = req->engine->id;
-                                  __entry->ctx = req->fence.context;
-                                  __entry->seqno = req->fence.seqno;
-                                  __entry->global_seqno = req->global_seqno;
-                                  __entry->port = port;
-                                 ),
+                                  __entry->dev = rq->i915->drm.primary->index;
+                                  __entry->hw_id = rq->ctx->hw_id;
+                                  __entry->ring = rq->engine->id;
+                                  __entry->ctx = rq->fence.context;
+                                  __entry->seqno = rq->fence.seqno;
+                                  __entry->global_seqno = rq->global_seqno;
+                                  __entry->port = port;
+                                 ),
 
                    TP_printk("dev=%u, hw_id=%u, ring=%u, ctx=%u, seqno=%u, global=%u, port=%u",
                              __entry->dev, __entry->hw_id, __entry->ring,
@@ -711,34 +709,34 @@ DECLARE_EVENT_CLASS(i915_gem_request_hw,
                              __entry->global_seqno, __entry->port)
 );
 
-DEFINE_EVENT(i915_gem_request_hw, i915_gem_request_in,
-            TP_PROTO(struct drm_i915_gem_request *req, unsigned int port),
-            TP_ARGS(req, port)
+DEFINE_EVENT(i915_request_hw, i915_request_in,
+            TP_PROTO(struct i915_request *rq, unsigned int port),
+            TP_ARGS(rq, port)
 );
 
-DEFINE_EVENT(i915_gem_request, i915_gem_request_out,
-            TP_PROTO(struct drm_i915_gem_request *req),
-            TP_ARGS(req)
+DEFINE_EVENT(i915_request, i915_request_out,
+            TP_PROTO(struct i915_request *rq),
+            TP_ARGS(rq)
 );
 #else
 #if !defined(TRACE_HEADER_MULTI_READ)
 static inline void
-trace_i915_gem_request_submit(struct drm_i915_gem_request *req)
+trace_i915_request_submit(struct i915_request *rq)
 {
 }
 
 static inline void
-trace_i915_gem_request_execute(struct drm_i915_gem_request *req)
+trace_i915_request_execute(struct i915_request *rq)
 {
 }
 
 static inline void
-trace_i915_gem_request_in(struct drm_i915_gem_request *req, unsigned int port)
+trace_i915_request_in(struct i915_request *rq, unsigned int port)
 {
 }
 
 static inline void
-trace_i915_gem_request_out(struct drm_i915_gem_request *req)
+trace_i915_request_out(struct i915_request *rq)
 {
 }
 #endif
@@ -767,14 +765,14 @@ TRACE_EVENT(intel_engine_notify,
                      __entry->waiters)
 );
 
-DEFINE_EVENT(i915_gem_request, i915_gem_request_retire,
-           TP_PROTO(struct drm_i915_gem_request *req),
-           TP_ARGS(req)
+DEFINE_EVENT(i915_request, i915_request_retire,
+           TP_PROTO(struct i915_request *rq),
+           TP_ARGS(rq)
 );
 
-TRACE_EVENT(i915_gem_request_wait_begin,
-           TP_PROTO(struct drm_i915_gem_request *req, unsigned int flags),
-           TP_ARGS(req, flags),
+TRACE_EVENT(i915_request_wait_begin,
+           TP_PROTO(struct i915_request *rq, unsigned int flags),
+           TP_ARGS(rq, flags),
 
            TP_STRUCT__entry(
                             __field(u32, dev)
@@ -793,12 +791,12 @@ TRACE_EVENT(i915_gem_request_wait_begin,
             * less desirable.
             */
            TP_fast_assign(
-                          __entry->dev = req->i915->drm.primary->index;
-                          __entry->hw_id = req->ctx->hw_id;
-                          __entry->ring = req->engine->id;
-                          __entry->ctx = req->fence.context;
-                          __entry->seqno = req->fence.seqno;
-                          __entry->global = req->global_seqno;
+                          __entry->dev = rq->i915->drm.primary->index;
+                          __entry->hw_id = rq->ctx->hw_id;
+                          __entry->ring = rq->engine->id;
+                          __entry->ctx = rq->fence.context;
+                          __entry->seqno = rq->fence.seqno;
+                          __entry->global = rq->global_seqno;
                           __entry->flags = flags;
                           ),
 
@@ -808,9 +806,9 @@ TRACE_EVENT(i915_gem_request_wait_begin,
                      !!(__entry->flags & I915_WAIT_LOCKED), __entry->flags)
 );
 
-DEFINE_EVENT(i915_gem_request, i915_gem_request_wait_end,
-           TP_PROTO(struct drm_i915_gem_request *req),
-           TP_ARGS(req)
+DEFINE_EVENT(i915_request, i915_request_wait_end,
+           TP_PROTO(struct i915_request *rq),
+           TP_ARGS(rq)
 );
 
 TRACE_EVENT(i915_flip_request,
index e0e7c48..4bda3bd 100644 (file)
@@ -31,8 +31,7 @@
 #include <drm/drm_gem.h>
 
 static void
-i915_vma_retire(struct i915_gem_active *active,
-               struct drm_i915_gem_request *rq)
+i915_vma_retire(struct i915_gem_active *active, struct i915_request *rq)
 {
        const unsigned int idx = rq->engine->id;
        struct i915_vma *vma =
index fd5b849..8c50220 100644 (file)
@@ -32,8 +32,8 @@
 #include "i915_gem_gtt.h"
 #include "i915_gem_fence_reg.h"
 #include "i915_gem_object.h"
-#include "i915_gem_request.h"
 
+#include "i915_request.h"
 
 enum i915_cache_level;
 
index b955f7d..a836906 100644 (file)
@@ -588,7 +588,7 @@ void intel_engine_remove_wait(struct intel_engine_cs *engine,
        spin_unlock_irq(&b->rb_lock);
 }
 
-static bool signal_complete(const struct drm_i915_gem_request *request)
+static bool signal_complete(const struct i915_request *request)
 {
        if (!request)
                return false;
@@ -600,9 +600,9 @@ static bool signal_complete(const struct drm_i915_gem_request *request)
        return __i915_request_irq_complete(request);
 }
 
-static struct drm_i915_gem_request *to_signaler(struct rb_node *rb)
+static struct i915_request *to_signaler(struct rb_node *rb)
 {
-       return rb_entry(rb, struct drm_i915_gem_request, signaling.node);
+       return rb_entry(rb, struct i915_request, signaling.node);
 }
 
 static void signaler_set_rtpriority(void)
@@ -613,7 +613,7 @@ static void signaler_set_rtpriority(void)
 }
 
 static void __intel_engine_remove_signal(struct intel_engine_cs *engine,
-                                        struct drm_i915_gem_request *request)
+                                        struct i915_request *request)
 {
        struct intel_breadcrumbs *b = &engine->breadcrumbs;
 
@@ -644,7 +644,7 @@ static void __intel_engine_remove_signal(struct intel_engine_cs *engine,
        }
 }
 
-static struct drm_i915_gem_request *
+static struct i915_request *
 get_first_signal_rcu(struct intel_breadcrumbs *b)
 {
        /*
@@ -654,18 +654,18 @@ get_first_signal_rcu(struct intel_breadcrumbs *b)
         * the required memory barriers.
         */
        do {
-               struct drm_i915_gem_request *request;
+               struct i915_request *request;
 
                request = rcu_dereference(b->first_signal);
                if (request)
-                       request = i915_gem_request_get_rcu(request);
+                       request = i915_request_get_rcu(request);
 
                barrier();
 
                if (!request || request == rcu_access_pointer(b->first_signal))
                        return rcu_pointer_handoff(request);
 
-               i915_gem_request_put(request);
+               i915_request_put(request);
        } while (1);
 }
 
@@ -673,7 +673,7 @@ static int intel_breadcrumbs_signaler(void *arg)
 {
        struct intel_engine_cs *engine = arg;
        struct intel_breadcrumbs *b = &engine->breadcrumbs;
-       struct drm_i915_gem_request *request;
+       struct i915_request *request;
 
        /* Install ourselves with high priority to reduce signalling latency */
        signaler_set_rtpriority();
@@ -699,7 +699,7 @@ static int intel_breadcrumbs_signaler(void *arg)
                                      &request->fence.flags)) {
                                local_bh_disable();
                                dma_fence_signal(&request->fence);
-                               GEM_BUG_ON(!i915_gem_request_completed(request));
+                               GEM_BUG_ON(!i915_request_completed(request));
                                local_bh_enable(); /* kick start the tasklets */
                        }
 
@@ -718,7 +718,7 @@ static int intel_breadcrumbs_signaler(void *arg)
                         */
                        do_schedule = need_resched();
                }
-               i915_gem_request_put(request);
+               i915_request_put(request);
 
                if (unlikely(do_schedule)) {
                        if (kthread_should_park())
@@ -735,8 +735,7 @@ static int intel_breadcrumbs_signaler(void *arg)
        return 0;
 }
 
-void intel_engine_enable_signaling(struct drm_i915_gem_request *request,
-                                  bool wakeup)
+void intel_engine_enable_signaling(struct i915_request *request, bool wakeup)
 {
        struct intel_engine_cs *engine = request->engine;
        struct intel_breadcrumbs *b = &engine->breadcrumbs;
@@ -753,7 +752,7 @@ void intel_engine_enable_signaling(struct drm_i915_gem_request *request,
        GEM_BUG_ON(!irqs_disabled());
        lockdep_assert_held(&request->lock);
 
-       seqno = i915_gem_request_global_seqno(request);
+       seqno = i915_request_global_seqno(request);
        if (!seqno)
                return;
 
@@ -774,7 +773,7 @@ void intel_engine_enable_signaling(struct drm_i915_gem_request *request,
         */
        wakeup &= __intel_engine_add_wait(engine, &request->signaling.wait);
 
-       if (!__i915_gem_request_completed(request, seqno)) {
+       if (!__i915_request_completed(request, seqno)) {
                struct rb_node *parent, **p;
                bool first;
 
@@ -811,7 +810,7 @@ void intel_engine_enable_signaling(struct drm_i915_gem_request *request,
                wake_up_process(b->signaler);
 }
 
-void intel_engine_cancel_signaling(struct drm_i915_gem_request *request)
+void intel_engine_cancel_signaling(struct i915_request *request)
 {
        GEM_BUG_ON(!irqs_disabled());
        lockdep_assert_held(&request->lock);
index aa66e95..89ab0f7 100644 (file)
  * of the CTM coefficient and we write the value from bit 3. We also round the
  * value.
  */
-#define I9XX_CSC_COEFF_FP(coeff, fbits)        \
+#define ILK_CSC_COEFF_FP(coeff, fbits) \
        (clamp_val(((coeff) >> (32 - (fbits) - 3)) + 4, 0, 0xfff) & 0xff8)
 
-#define I9XX_CSC_COEFF_LIMITED_RANGE   \
-       I9XX_CSC_COEFF_FP(CTM_COEFF_LIMITED_RANGE, 9)
-#define I9XX_CSC_COEFF_1_0             \
-       ((7 << 12) | I9XX_CSC_COEFF_FP(CTM_COEFF_1_0, 8))
+#define ILK_CSC_COEFF_LIMITED_RANGE    \
+       ILK_CSC_COEFF_FP(CTM_COEFF_LIMITED_RANGE, 9)
+#define ILK_CSC_COEFF_1_0              \
+       ((7 << 12) | ILK_CSC_COEFF_FP(CTM_COEFF_1_0, 8))
 
 static bool crtc_state_is_legacy_gamma(struct drm_crtc_state *state)
 {
@@ -84,30 +84,31 @@ static bool crtc_state_is_legacy_gamma(struct drm_crtc_state *state)
 
 /*
  * When using limited range, multiply the matrix given by userspace by
- * the matrix that we would use for the limited range. We do the
- * multiplication in U2.30 format.
+ * the matrix that we would use for the limited range.
  */
-static void ctm_mult_by_limited(uint64_t *result, int64_t *input)
+static u64 *ctm_mult_by_limited(u64 *result, const u64 *input)
 {
        int i;
 
-       for (i = 0; i < 9; i++)
-               result[i] = 0;
+       for (i = 0; i < 9; i++) {
+               u64 user_coeff = input[i];
+               u32 limited_coeff = CTM_COEFF_LIMITED_RANGE;
+               u32 abs_coeff = clamp_val(CTM_COEFF_ABS(user_coeff), 0,
+                                         CTM_COEFF_4_0 - 1) >> 2;
 
-       for (i = 0; i < 3; i++) {
-               int64_t user_coeff = input[i * 3 + i];
-               uint64_t limited_coeff = CTM_COEFF_LIMITED_RANGE >> 2;
-               uint64_t abs_coeff = clamp_val(CTM_COEFF_ABS(user_coeff),
-                                              0,
-                                              CTM_COEFF_4_0 - 1) >> 2;
-
-               result[i * 3 + i] = (limited_coeff * abs_coeff) >> 27;
-               if (CTM_COEFF_NEGATIVE(user_coeff))
-                       result[i * 3 + i] |= CTM_COEFF_SIGN;
+               /*
+                * By scaling every co-efficient with limited range (16-235)
+                * vs full range (0-255) the final o/p will be scaled down to
+                * fit in the limited range supported by the panel.
+                */
+               result[i] = mul_u32_u32(limited_coeff, abs_coeff) >> 30;
+               result[i] |= user_coeff & CTM_COEFF_SIGN;
        }
+
+       return result;
 }
 
-static void i9xx_load_ycbcr_conversion_matrix(struct intel_crtc *intel_crtc)
+static void ilk_load_ycbcr_conversion_matrix(struct intel_crtc *intel_crtc)
 {
        int pipe = intel_crtc->pipe;
        struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
@@ -131,8 +132,7 @@ static void i9xx_load_ycbcr_conversion_matrix(struct intel_crtc *intel_crtc)
        I915_WRITE(PIPE_CSC_MODE(pipe), 0);
 }
 
-/* Set up the pipe CSC unit. */
-static void i9xx_load_csc_matrix(struct drm_crtc_state *crtc_state)
+static void ilk_load_csc_matrix(struct drm_crtc_state *crtc_state)
 {
        struct drm_crtc *crtc = crtc_state->crtc;
        struct drm_i915_private *dev_priv = to_i915(crtc->dev);
@@ -140,21 +140,28 @@ static void i9xx_load_csc_matrix(struct drm_crtc_state *crtc_state)
        int i, pipe = intel_crtc->pipe;
        uint16_t coeffs[9] = { 0, };
        struct intel_crtc_state *intel_crtc_state = to_intel_crtc_state(crtc_state);
+       bool limited_color_range = false;
+
+       /*
+        * FIXME if there's a gamma LUT after the CSC, we should
+        * do the range compression using the gamma LUT instead.
+        */
+       if (INTEL_GEN(dev_priv) >= 8 || IS_HASWELL(dev_priv))
+               limited_color_range = intel_crtc_state->limited_color_range;
 
        if (intel_crtc_state->ycbcr420) {
-               i9xx_load_ycbcr_conversion_matrix(intel_crtc);
+               ilk_load_ycbcr_conversion_matrix(intel_crtc);
                return;
        } else if (crtc_state->ctm) {
                struct drm_color_ctm *ctm =
                        (struct drm_color_ctm *)crtc_state->ctm->data;
-               uint64_t input[9] = { 0, };
+               const u64 *input;
+               u64 temp[9];
 
-               if (intel_crtc_state->limited_color_range) {
-                       ctm_mult_by_limited(input, ctm->matrix);
-               } else {
-                       for (i = 0; i < ARRAY_SIZE(input); i++)
-                               input[i] = ctm->matrix[i];
-               }
+               if (limited_color_range)
+                       input = ctm_mult_by_limited(temp, ctm->matrix);
+               else
+                       input = ctm->matrix;
 
                /*
                 * Convert fixed point S31.32 input to format supported by the
@@ -175,21 +182,21 @@ static void i9xx_load_csc_matrix(struct drm_crtc_state *crtc_state)
 
                        if (abs_coeff < CTM_COEFF_0_125)
                                coeffs[i] |= (3 << 12) |
-                                       I9XX_CSC_COEFF_FP(abs_coeff, 12);
+                                       ILK_CSC_COEFF_FP(abs_coeff, 12);
                        else if (abs_coeff < CTM_COEFF_0_25)
                                coeffs[i] |= (2 << 12) |
-                                       I9XX_CSC_COEFF_FP(abs_coeff, 11);
+                                       ILK_CSC_COEFF_FP(abs_coeff, 11);
                        else if (abs_coeff < CTM_COEFF_0_5)
                                coeffs[i] |= (1 << 12) |
-                                       I9XX_CSC_COEFF_FP(abs_coeff, 10);
+                                       ILK_CSC_COEFF_FP(abs_coeff, 10);
                        else if (abs_coeff < CTM_COEFF_1_0)
-                               coeffs[i] |= I9XX_CSC_COEFF_FP(abs_coeff, 9);
+                               coeffs[i] |= ILK_CSC_COEFF_FP(abs_coeff, 9);
                        else if (abs_coeff < CTM_COEFF_2_0)
                                coeffs[i] |= (7 << 12) |
-                                       I9XX_CSC_COEFF_FP(abs_coeff, 8);
+                                       ILK_CSC_COEFF_FP(abs_coeff, 8);
                        else
                                coeffs[i] |= (6 << 12) |
-                                       I9XX_CSC_COEFF_FP(abs_coeff, 7);
+                                       ILK_CSC_COEFF_FP(abs_coeff, 7);
                }
        } else {
                /*
@@ -201,11 +208,11 @@ static void i9xx_load_csc_matrix(struct drm_crtc_state *crtc_state)
                 * into consideration.
                 */
                for (i = 0; i < 3; i++) {
-                       if (intel_crtc_state->limited_color_range)
+                       if (limited_color_range)
                                coeffs[i * 3 + i] =
-                                       I9XX_CSC_COEFF_LIMITED_RANGE;
+                                       ILK_CSC_COEFF_LIMITED_RANGE;
                        else
-                               coeffs[i * 3 + i] = I9XX_CSC_COEFF_1_0;
+                               coeffs[i * 3 + i] = ILK_CSC_COEFF_1_0;
                }
        }
 
@@ -225,7 +232,7 @@ static void i9xx_load_csc_matrix(struct drm_crtc_state *crtc_state)
        if (INTEL_GEN(dev_priv) > 6) {
                uint16_t postoff = 0;
 
-               if (intel_crtc_state->limited_color_range)
+               if (limited_color_range)
                        postoff = (16 * (1 << 12) / 255) & 0x1fff;
 
                I915_WRITE(PIPE_CSC_POSTOFF_HI(pipe), postoff);
@@ -236,7 +243,7 @@ static void i9xx_load_csc_matrix(struct drm_crtc_state *crtc_state)
        } else {
                uint32_t mode = CSC_MODE_YUV_TO_RGB;
 
-               if (intel_crtc_state->limited_color_range)
+               if (limited_color_range)
                        mode |= CSC_BLACK_SCREEN_OFFSET;
 
                I915_WRITE(PIPE_CSC_MODE(pipe), mode);
@@ -651,14 +658,14 @@ void intel_color_init(struct drm_crtc *crtc)
                dev_priv->display.load_csc_matrix = cherryview_load_csc_matrix;
                dev_priv->display.load_luts = cherryview_load_luts;
        } else if (IS_HASWELL(dev_priv)) {
-               dev_priv->display.load_csc_matrix = i9xx_load_csc_matrix;
+               dev_priv->display.load_csc_matrix = ilk_load_csc_matrix;
                dev_priv->display.load_luts = haswell_load_luts;
        } else if (IS_BROADWELL(dev_priv) || IS_GEN9_BC(dev_priv) ||
                   IS_BROXTON(dev_priv)) {
-               dev_priv->display.load_csc_matrix = i9xx_load_csc_matrix;
+               dev_priv->display.load_csc_matrix = ilk_load_csc_matrix;
                dev_priv->display.load_luts = broadwell_load_luts;
        } else if (IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) {
-               dev_priv->display.load_csc_matrix = i9xx_load_csc_matrix;
+               dev_priv->display.load_csc_matrix = ilk_load_csc_matrix;
                dev_priv->display.load_luts = glk_load_luts;
        } else {
                dev_priv->display.load_luts = i9xx_load_luts;
index 298f899..1c780cc 100644 (file)
@@ -357,6 +357,59 @@ static void broadwell_sseu_info_init(struct drm_i915_private *dev_priv)
        sseu->has_eu_pg = 0;
 }
 
+static void haswell_sseu_info_init(struct drm_i915_private *dev_priv)
+{
+       struct intel_device_info *info = mkwrite_device_info(dev_priv);
+       struct sseu_dev_info *sseu = &info->sseu;
+       u32 fuse1;
+
+       /*
+        * There isn't a register to tell us how many slices/subslices. We
+        * work off the PCI-ids here.
+        */
+       switch (info->gt) {
+       default:
+               MISSING_CASE(info->gt);
+               /* fall through */
+       case 1:
+               sseu->slice_mask = BIT(0);
+               sseu->subslice_mask = BIT(0);
+               break;
+       case 2:
+               sseu->slice_mask = BIT(0);
+               sseu->subslice_mask = BIT(0) | BIT(1);
+               break;
+       case 3:
+               sseu->slice_mask = BIT(0) | BIT(1);
+               sseu->subslice_mask = BIT(0) | BIT(1);
+               break;
+       }
+
+       fuse1 = I915_READ(HSW_PAVP_FUSE1);
+       switch ((fuse1 & HSW_F1_EU_DIS_MASK) >> HSW_F1_EU_DIS_SHIFT) {
+       default:
+               MISSING_CASE((fuse1 & HSW_F1_EU_DIS_MASK) >>
+                            HSW_F1_EU_DIS_SHIFT);
+               /* fall through */
+       case HSW_F1_EU_DIS_10EUS:
+               sseu->eu_per_subslice = 10;
+               break;
+       case HSW_F1_EU_DIS_8EUS:
+               sseu->eu_per_subslice = 8;
+               break;
+       case HSW_F1_EU_DIS_6EUS:
+               sseu->eu_per_subslice = 6;
+               break;
+       }
+
+       sseu->eu_total = sseu_subslice_total(sseu) * sseu->eu_per_subslice;
+
+       /* No powergating for you. */
+       sseu->has_slice_pg = 0;
+       sseu->has_subslice_pg = 0;
+       sseu->has_eu_pg = 0;
+}
+
 static u32 read_reference_ts_freq(struct drm_i915_private *dev_priv)
 {
        u32 ts_override = I915_READ(GEN9_TIMESTAMP_OVERRIDE);
@@ -574,7 +627,9 @@ void intel_device_info_runtime_init(struct intel_device_info *info)
        }
 
        /* Initialize slice/subslice/EU info */
-       if (IS_CHERRYVIEW(dev_priv))
+       if (IS_HASWELL(dev_priv))
+               haswell_sseu_info_init(dev_priv);
+       else if (IS_CHERRYVIEW(dev_priv))
                cherryview_sseu_info_init(dev_priv);
        else if (IS_BROADWELL(dev_priv))
                broadwell_sseu_info_init(dev_priv);
index 71fdfb0..8904ad8 100644 (file)
@@ -137,14 +137,14 @@ struct intel_device_info {
        enum intel_platform platform;
        u32 platform_mask;
 
+       unsigned int page_sizes; /* page sizes supported by the HW */
+
        u32 display_mmio_offset;
 
        u8 num_pipes;
        u8 num_sprites[I915_MAX_PIPES];
        u8 num_scalers[I915_MAX_PIPES];
 
-       unsigned int page_sizes; /* page sizes supported by the HW */
-
 #define DEFINE_FLAG(name) u8 name:1
        DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG);
 #undef DEFINE_FLAG
index 5d46771..65c8487 100644 (file)
@@ -2067,9 +2067,18 @@ static unsigned int intel_surf_alignment(const struct drm_framebuffer *fb,
        }
 }
 
+static bool intel_plane_uses_fence(const struct intel_plane_state *plane_state)
+{
+       struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+       struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+
+       return INTEL_GEN(dev_priv) < 4 || plane->has_fbc;
+}
+
 struct i915_vma *
 intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
                           unsigned int rotation,
+                          bool uses_fence,
                           unsigned long *out_flags)
 {
        struct drm_device *dev = fb->dev;
@@ -2122,7 +2131,9 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
        if (IS_ERR(vma))
                goto err;
 
-       if (i915_vma_is_map_and_fenceable(vma)) {
+       if (uses_fence && i915_vma_is_map_and_fenceable(vma)) {
+               int ret;
+
                /* Install a fence for tiled scan-out. Pre-i965 always needs a
                 * fence, whereas 965+ only requires a fence if using
                 * framebuffer compression.  For simplicity, we always, when
@@ -2139,7 +2150,13 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
                 * something and try to run the system in a "less than optimal"
                 * mode that matches the user configuration.
                 */
-               if (i915_vma_pin_fence(vma) == 0 && vma->fence)
+               ret = i915_vma_pin_fence(vma);
+               if (ret != 0 && INTEL_GEN(dev_priv) < 4) {
+                       vma = ERR_PTR(ret);
+                       goto err;
+               }
+
+               if (ret == 0 && vma->fence)
                        *out_flags |= PLANE_HAS_FENCE;
        }
 
@@ -2828,6 +2845,7 @@ valid_fb:
        intel_state->vma =
                intel_pin_and_fence_fb_obj(fb,
                                           primary->state->rotation,
+                                          intel_plane_uses_fence(intel_state),
                                           &intel_state->flags);
        mutex_unlock(&dev->struct_mutex);
        if (IS_ERR(intel_state->vma)) {
@@ -12598,23 +12616,23 @@ struct wait_rps_boost {
        struct wait_queue_entry wait;
 
        struct drm_crtc *crtc;
-       struct drm_i915_gem_request *request;
+       struct i915_request *request;
 };
 
 static int do_rps_boost(struct wait_queue_entry *_wait,
                        unsigned mode, int sync, void *key)
 {
        struct wait_rps_boost *wait = container_of(_wait, typeof(*wait), wait);
-       struct drm_i915_gem_request *rq = wait->request;
+       struct i915_request *rq = wait->request;
 
        /*
         * If we missed the vblank, but the request is already running it
         * is reasonable to assume that it will complete before the next
         * vblank without our intervention, so leave RPS alone.
         */
-       if (!i915_gem_request_started(rq))
+       if (!i915_request_started(rq))
                gen6_rps_boost(rq, NULL);
-       i915_gem_request_put(rq);
+       i915_request_put(rq);
 
        drm_crtc_vblank_put(wait->crtc);
 
@@ -12652,6 +12670,42 @@ static void add_rps_boost_after_vblank(struct drm_crtc *crtc,
        add_wait_queue(drm_crtc_vblank_waitqueue(crtc), &wait->wait);
 }
 
+static int intel_plane_pin_fb(struct intel_plane_state *plane_state)
+{
+       struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+       struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+       struct drm_framebuffer *fb = plane_state->base.fb;
+       struct i915_vma *vma;
+
+       if (plane->id == PLANE_CURSOR &&
+           INTEL_INFO(dev_priv)->cursor_needs_physical) {
+               struct drm_i915_gem_object *obj = intel_fb_obj(fb);
+               const int align = intel_cursor_alignment(dev_priv);
+
+               return i915_gem_object_attach_phys(obj, align);
+       }
+
+       vma = intel_pin_and_fence_fb_obj(fb,
+                                        plane_state->base.rotation,
+                                        intel_plane_uses_fence(plane_state),
+                                        &plane_state->flags);
+       if (IS_ERR(vma))
+               return PTR_ERR(vma);
+
+       plane_state->vma = vma;
+
+       return 0;
+}
+
+static void intel_plane_unpin_fb(struct intel_plane_state *old_plane_state)
+{
+       struct i915_vma *vma;
+
+       vma = fetch_and_zero(&old_plane_state->vma);
+       if (vma)
+               intel_unpin_fb_vma(vma, old_plane_state->flags);
+}
+
 /**
  * intel_prepare_plane_fb - Prepare fb for usage on plane
  * @plane: drm plane to prepare for
@@ -12726,22 +12780,7 @@ intel_prepare_plane_fb(struct drm_plane *plane,
                return ret;
        }
 
-       if (plane->type == DRM_PLANE_TYPE_CURSOR &&
-           INTEL_INFO(dev_priv)->cursor_needs_physical) {
-               const int align = intel_cursor_alignment(dev_priv);
-
-               ret = i915_gem_object_attach_phys(obj, align);
-       } else {
-               struct i915_vma *vma;
-
-               vma = intel_pin_and_fence_fb_obj(fb,
-                                                new_state->rotation,
-                                                &to_intel_plane_state(new_state)->flags);
-               if (!IS_ERR(vma))
-                       to_intel_plane_state(new_state)->vma = vma;
-               else
-                       ret =  PTR_ERR(vma);
-       }
+       ret = intel_plane_pin_fb(to_intel_plane_state(new_state));
 
        i915_gem_object_wait_priority(obj, 0, I915_PRIORITY_DISPLAY);
 
@@ -12785,15 +12824,12 @@ void
 intel_cleanup_plane_fb(struct drm_plane *plane,
                       struct drm_plane_state *old_state)
 {
-       struct i915_vma *vma;
+       struct drm_i915_private *dev_priv = to_i915(plane->dev);
 
        /* Should only be called after a successful intel_prepare_plane_fb()! */
-       vma = fetch_and_zero(&to_intel_plane_state(old_state)->vma);
-       if (vma) {
-               mutex_lock(&plane->dev->struct_mutex);
-               intel_unpin_fb_vma(vma, to_intel_plane_state(old_state)->flags);
-               mutex_unlock(&plane->dev->struct_mutex);
-       }
+       mutex_lock(&dev_priv->drm.struct_mutex);
+       intel_plane_unpin_fb(to_intel_plane_state(old_state));
+       mutex_unlock(&dev_priv->drm.struct_mutex);
 }
 
 int
@@ -13084,7 +13120,6 @@ intel_legacy_cursor_update(struct drm_plane *plane,
        struct intel_plane *intel_plane = to_intel_plane(plane);
        struct drm_framebuffer *old_fb;
        struct drm_crtc_state *crtc_state = crtc->state;
-       struct i915_vma *old_vma, *vma;
 
        /*
         * When crtc is inactive or there is a modeset pending,
@@ -13143,27 +13178,9 @@ intel_legacy_cursor_update(struct drm_plane *plane,
        if (ret)
                goto out_free;
 
-       if (INTEL_INFO(dev_priv)->cursor_needs_physical) {
-               int align = intel_cursor_alignment(dev_priv);
-
-               ret = i915_gem_object_attach_phys(intel_fb_obj(fb), align);
-               if (ret) {
-                       DRM_DEBUG_KMS("failed to attach phys object\n");
-                       goto out_unlock;
-               }
-       } else {
-               vma = intel_pin_and_fence_fb_obj(fb,
-                                                new_plane_state->rotation,
-                                                &to_intel_plane_state(new_plane_state)->flags);
-               if (IS_ERR(vma)) {
-                       DRM_DEBUG_KMS("failed to pin object\n");
-
-                       ret = PTR_ERR(vma);
-                       goto out_unlock;
-               }
-
-               to_intel_plane_state(new_plane_state)->vma = vma;
-       }
+       ret = intel_plane_pin_fb(to_intel_plane_state(new_plane_state));
+       if (ret)
+               goto out_unlock;
 
        old_fb = old_plane_state->fb;
 
@@ -13183,10 +13200,7 @@ intel_legacy_cursor_update(struct drm_plane *plane,
                intel_plane->disable_plane(intel_plane, to_intel_crtc(crtc));
        }
 
-       old_vma = fetch_and_zero(&to_intel_plane_state(old_plane_state)->vma);
-       if (old_vma)
-               intel_unpin_fb_vma(old_vma,
-                                  to_intel_plane_state(old_plane_state)->flags);
+       intel_plane_unpin_fb(to_intel_plane_state(old_plane_state));
 
 out_unlock:
        mutex_unlock(&dev_priv->drm.struct_mutex);
@@ -13214,6 +13228,32 @@ static const struct drm_plane_funcs intel_cursor_plane_funcs = {
        .format_mod_supported = intel_cursor_plane_format_mod_supported,
 };
 
+static bool i9xx_plane_has_fbc(struct drm_i915_private *dev_priv,
+                              enum i9xx_plane_id i9xx_plane)
+{
+       if (!HAS_FBC(dev_priv))
+               return false;
+
+       if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
+               return i9xx_plane == PLANE_A; /* tied to pipe A */
+       else if (IS_IVYBRIDGE(dev_priv))
+               return i9xx_plane == PLANE_A || i9xx_plane == PLANE_B ||
+                       i9xx_plane == PLANE_C;
+       else if (INTEL_GEN(dev_priv) >= 4)
+               return i9xx_plane == PLANE_A || i9xx_plane == PLANE_B;
+       else
+               return i9xx_plane == PLANE_A;
+}
+
+static bool skl_plane_has_fbc(struct drm_i915_private *dev_priv,
+                             enum pipe pipe, enum plane_id plane_id)
+{
+       if (!HAS_FBC(dev_priv))
+               return false;
+
+       return pipe == PIPE_A && plane_id == PLANE_PRIMARY;
+}
+
 static struct intel_plane *
 intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
 {
@@ -13256,6 +13296,21 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
                primary->i9xx_plane = (enum i9xx_plane_id) pipe;
        primary->id = PLANE_PRIMARY;
        primary->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, primary->id);
+
+       if (INTEL_GEN(dev_priv) >= 9)
+               primary->has_fbc = skl_plane_has_fbc(dev_priv,
+                                                    primary->pipe,
+                                                    primary->id);
+       else
+               primary->has_fbc = i9xx_plane_has_fbc(dev_priv,
+                                                     primary->i9xx_plane);
+
+       if (primary->has_fbc) {
+               struct intel_fbc *fbc = &dev_priv->fbc;
+
+               fbc->possible_framebuffer_bits |= primary->frontbuffer_bit;
+       }
+
        primary->check_plane = intel_check_primary_plane;
 
        if (INTEL_GEN(dev_priv) >= 9) {
index c4042e3..4e7418b 100644 (file)
@@ -139,6 +139,17 @@ enum dpio_phy {
 
 #define I915_NUM_PHYS_VLV 2
 
+enum aux_ch {
+       AUX_CH_A,
+       AUX_CH_B,
+       AUX_CH_C,
+       AUX_CH_D,
+       _AUX_CH_E, /* does not exist */
+       AUX_CH_F,
+};
+
+#define aux_ch_name(a) ((a) + 'A')
+
 enum intel_display_power_domain {
        POWER_DOMAIN_PIPE_A,
        POWER_DOMAIN_PIPE_B,
@@ -175,6 +186,7 @@ enum intel_display_power_domain {
        POWER_DOMAIN_AUX_C,
        POWER_DOMAIN_AUX_D,
        POWER_DOMAIN_AUX_F,
+       POWER_DOMAIN_AUX_IO_A,
        POWER_DOMAIN_GMBUS,
        POWER_DOMAIN_MODESET,
        POWER_DOMAIN_GT_IRQ,
index f20b25f..aba2f45 100644 (file)
@@ -96,15 +96,6 @@ static const struct dp_link_dpll chv_dpll[] = {
                { .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }
 };
 
-static const int bxt_rates[] = { 162000, 216000, 243000, 270000,
-                                 324000, 432000, 540000 };
-static const int skl_rates[] = { 162000, 216000, 270000,
-                                 324000, 432000, 540000 };
-static const int cnl_rates[] = { 162000, 216000, 270000,
-                                324000, 432000, 540000,
-                                648000, 810000 };
-static const int default_rates[] = { 162000, 270000, 540000 };
-
 /**
  * intel_dp_is_edp - is the given port attached to an eDP panel (either CPU or PCH)
  * @intel_dp: DP struct
@@ -144,14 +135,17 @@ static void intel_dp_unset_edid(struct intel_dp *intel_dp);
 /* update sink rates from dpcd */
 static void intel_dp_set_sink_rates(struct intel_dp *intel_dp)
 {
+       static const int dp_rates[] = {
+               162000, 270000, 540000, 810000
+       };
        int i, max_rate;
 
        max_rate = drm_dp_bw_code_to_link_rate(intel_dp->dpcd[DP_MAX_LINK_RATE]);
 
-       for (i = 0; i < ARRAY_SIZE(default_rates); i++) {
-               if (default_rates[i] > max_rate)
+       for (i = 0; i < ARRAY_SIZE(dp_rates); i++) {
+               if (dp_rates[i] > max_rate)
                        break;
-               intel_dp->sink_rates[i] = default_rates[i];
+               intel_dp->sink_rates[i] = dp_rates[i];
        }
 
        intel_dp->num_sink_rates = i;
@@ -268,6 +262,22 @@ static int cnl_max_source_rate(struct intel_dp *intel_dp)
 static void
 intel_dp_set_source_rates(struct intel_dp *intel_dp)
 {
+       /* The values must be in increasing order */
+       static const int cnl_rates[] = {
+               162000, 216000, 270000, 324000, 432000, 540000, 648000, 810000
+       };
+       static const int bxt_rates[] = {
+               162000, 216000, 243000, 270000, 324000, 432000, 540000
+       };
+       static const int skl_rates[] = {
+               162000, 216000, 270000, 324000, 432000, 540000
+       };
+       static const int hsw_rates[] = {
+               162000, 270000, 540000
+       };
+       static const int g4x_rates[] = {
+               162000, 270000
+       };
        struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
        struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
        const struct ddi_vbt_port_info *info =
@@ -278,23 +288,23 @@ intel_dp_set_source_rates(struct intel_dp *intel_dp)
        /* This should only be done once */
        WARN_ON(intel_dp->source_rates || intel_dp->num_source_rates);
 
-       if (IS_GEN9_LP(dev_priv)) {
-               source_rates = bxt_rates;
-               size = ARRAY_SIZE(bxt_rates);
-       } else if (IS_CANNONLAKE(dev_priv)) {
+       if (IS_CANNONLAKE(dev_priv)) {
                source_rates = cnl_rates;
                size = ARRAY_SIZE(cnl_rates);
                max_rate = cnl_max_source_rate(intel_dp);
+       } else if (IS_GEN9_LP(dev_priv)) {
+               source_rates = bxt_rates;
+               size = ARRAY_SIZE(bxt_rates);
        } else if (IS_GEN9_BC(dev_priv)) {
                source_rates = skl_rates;
                size = ARRAY_SIZE(skl_rates);
        } else if ((IS_HASWELL(dev_priv) && !IS_HSW_ULX(dev_priv)) ||
                   IS_BROADWELL(dev_priv)) {
-               source_rates = default_rates;
-               size = ARRAY_SIZE(default_rates);
+               source_rates = hsw_rates;
+               size = ARRAY_SIZE(hsw_rates);
        } else {
-               source_rates = default_rates;
-               size = ARRAY_SIZE(default_rates) - 1;
+               source_rates = g4x_rates;
+               size = ARRAY_SIZE(g4x_rates);
        }
 
        if (max_rate && vbt_max_rate)
@@ -356,7 +366,7 @@ static void intel_dp_set_common_rates(struct intel_dp *intel_dp)
 
        /* Paranoia, there should always be something in common. */
        if (WARN_ON(intel_dp->num_common_rates == 0)) {
-               intel_dp->common_rates[0] = default_rates[0];
+               intel_dp->common_rates[0] = 162000;
                intel_dp->num_common_rates = 1;
        }
 }
@@ -656,19 +666,15 @@ static int
 bxt_power_sequencer_idx(struct intel_dp *intel_dp)
 {
        struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
+       int backlight_controller = dev_priv->vbt.backlight.controller;
 
        lockdep_assert_held(&dev_priv->pps_mutex);
 
        /* We should never land here with regular DP ports */
        WARN_ON(!intel_dp_is_edp(intel_dp));
 
-       /*
-        * TODO: BXT has 2 PPS instances. The correct port->PPS instance
-        * mapping needs to be retrieved from VBT, for now just hard-code to
-        * use instance #0 always.
-        */
        if (!intel_dp->pps_reset)
-               return 0;
+               return backlight_controller;
 
        intel_dp->pps_reset = false;
 
@@ -678,7 +684,7 @@ bxt_power_sequencer_idx(struct intel_dp *intel_dp)
         */
        intel_dp_init_panel_power_sequencer_registers(intel_dp, false);
 
-       return 0;
+       return backlight_controller;
 }
 
 typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
@@ -936,7 +942,7 @@ static uint32_t
 intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
 {
        struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
-       i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg;
+       i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp);
        uint32_t status;
        bool done;
 
@@ -956,8 +962,7 @@ intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
 
 static uint32_t g4x_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
 {
-       struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
-       struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
+       struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
 
        if (index)
                return 0;
@@ -971,8 +976,7 @@ static uint32_t g4x_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
 
 static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
 {
-       struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
-       struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
+       struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
 
        if (index)
                return 0;
@@ -982,7 +986,7 @@ static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
         * like to run at 2MHz.  So, take the cdclk or PCH rawclk value and
         * divide by 2000 and use that
         */
-       if (intel_dig_port->base.port == PORT_A)
+       if (intel_dp->aux_ch == AUX_CH_A)
                return DIV_ROUND_CLOSEST(dev_priv->cdclk.hw.cdclk, 2000);
        else
                return DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 2000);
@@ -990,10 +994,9 @@ static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
 
 static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
 {
-       struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
-       struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
+       struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
 
-       if (intel_dig_port->base.port != PORT_A && HAS_PCH_LPT_H(dev_priv)) {
+       if (intel_dp->aux_ch != AUX_CH_A && HAS_PCH_LPT_H(dev_priv)) {
                /* Workaround for non-ULT HSW */
                switch (index) {
                case 0: return 63;
@@ -1089,7 +1092,7 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
        struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
        struct drm_i915_private *dev_priv =
                        to_i915(intel_dig_port->base.base.dev);
-       i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg;
+       i915_reg_t ch_ctl, ch_data[5];
        uint32_t aux_clock_divider;
        int i, ret, recv_bytes;
        uint32_t status;
@@ -1097,6 +1100,10 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
        bool has_aux_irq = HAS_AUX_IRQ(dev_priv);
        bool vdd;
 
+       ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp);
+       for (i = 0; i < ARRAY_SIZE(ch_data); i++)
+               ch_data[i] = intel_dp->aux_ch_data_reg(intel_dp, i);
+
        pps_lock(intel_dp);
 
        /*
@@ -1154,7 +1161,7 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
                for (try = 0; try < 5; try++) {
                        /* Load the send data into the aux channel data registers */
                        for (i = 0; i < send_bytes; i += 4)
-                               I915_WRITE(intel_dp->aux_ch_data_reg[i >> 2],
+                               I915_WRITE(ch_data[i >> 2],
                                           intel_dp_pack_aux(send + i,
                                                             send_bytes - i));
 
@@ -1170,14 +1177,14 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
                                   DP_AUX_CH_CTL_TIME_OUT_ERROR |
                                   DP_AUX_CH_CTL_RECEIVE_ERROR);
 
-                       if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR)
-                               continue;
-
                        /* DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2
                         *   400us delay required for errors and timeouts
                         *   Timeout errors from the HW already meet this
                         *   requirement so skip to next iteration
                         */
+                       if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR)
+                               continue;
+
                        if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
                                usleep_range(400, 500);
                                continue;
@@ -1223,14 +1230,6 @@ done:
        if (recv_bytes == 0 || recv_bytes > 20) {
                DRM_DEBUG_KMS("Forbidden recv_bytes = %d on aux transaction\n",
                              recv_bytes);
-               /*
-                * FIXME: This patch was created on top of a series that
-                * organize the retries at drm level. There EBUSY should
-                * also take care for 1ms wait before retrying.
-                * That aux retries re-org is still needed and after that is
-                * merged we remove this sleep from here.
-                */
-               usleep_range(1000, 1500);
                ret = -EBUSY;
                goto out;
        }
@@ -1239,7 +1238,7 @@ done:
                recv_bytes = recv_size;
 
        for (i = 0; i < recv_bytes; i += 4)
-               intel_dp_unpack_aux(I915_READ(intel_dp->aux_ch_data_reg[i >> 2]),
+               intel_dp_unpack_aux(I915_READ(ch_data[i >> 2]),
                                    recv + i, recv_bytes - i);
 
        ret = recv_bytes;
@@ -1331,171 +1330,173 @@ intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
        return ret;
 }
 
-static enum port intel_aux_port(struct drm_i915_private *dev_priv,
-                               enum port port)
+static enum aux_ch intel_aux_ch(struct intel_dp *intel_dp)
 {
+       struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+       enum port port = encoder->port;
        const struct ddi_vbt_port_info *info =
                &dev_priv->vbt.ddi_port_info[port];
-       enum port aux_port;
+       enum aux_ch aux_ch;
 
        if (!info->alternate_aux_channel) {
+               aux_ch = (enum aux_ch) port;
+
                DRM_DEBUG_KMS("using AUX %c for port %c (platform default)\n",
-                             port_name(port), port_name(port));
-               return port;
+                             aux_ch_name(aux_ch), port_name(port));
+               return aux_ch;
        }
 
        switch (info->alternate_aux_channel) {
        case DP_AUX_A:
-               aux_port = PORT_A;
+               aux_ch = AUX_CH_A;
                break;
        case DP_AUX_B:
-               aux_port = PORT_B;
+               aux_ch = AUX_CH_B;
                break;
        case DP_AUX_C:
-               aux_port = PORT_C;
+               aux_ch = AUX_CH_C;
                break;
        case DP_AUX_D:
-               aux_port = PORT_D;
+               aux_ch = AUX_CH_D;
                break;
        case DP_AUX_F:
-               aux_port = PORT_F;
+               aux_ch = AUX_CH_F;
                break;
        default:
                MISSING_CASE(info->alternate_aux_channel);
-               aux_port = PORT_A;
+               aux_ch = AUX_CH_A;
                break;
        }
 
        DRM_DEBUG_KMS("using AUX %c for port %c (VBT)\n",
-                     port_name(aux_port), port_name(port));
+                     aux_ch_name(aux_ch), port_name(port));
 
-       return aux_port;
+       return aux_ch;
 }
 
-static i915_reg_t g4x_aux_ctl_reg(struct drm_i915_private *dev_priv,
-                                 enum port port)
+static enum intel_display_power_domain
+intel_aux_power_domain(struct intel_dp *intel_dp)
 {
-       switch (port) {
-       case PORT_B:
-       case PORT_C:
-       case PORT_D:
-               return DP_AUX_CH_CTL(port);
+       switch (intel_dp->aux_ch) {
+       case AUX_CH_A:
+               return POWER_DOMAIN_AUX_A;
+       case AUX_CH_B:
+               return POWER_DOMAIN_AUX_B;
+       case AUX_CH_C:
+               return POWER_DOMAIN_AUX_C;
+       case AUX_CH_D:
+               return POWER_DOMAIN_AUX_D;
+       case AUX_CH_F:
+               return POWER_DOMAIN_AUX_F;
        default:
-               MISSING_CASE(port);
-               return DP_AUX_CH_CTL(PORT_B);
+               MISSING_CASE(intel_dp->aux_ch);
+               return POWER_DOMAIN_AUX_A;
        }
 }
 
-static i915_reg_t g4x_aux_data_reg(struct drm_i915_private *dev_priv,
-                                  enum port port, int index)
+static i915_reg_t g4x_aux_ctl_reg(struct intel_dp *intel_dp)
 {
-       switch (port) {
-       case PORT_B:
-       case PORT_C:
-       case PORT_D:
-               return DP_AUX_CH_DATA(port, index);
+       struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
+       enum aux_ch aux_ch = intel_dp->aux_ch;
+
+       switch (aux_ch) {
+       case AUX_CH_B:
+       case AUX_CH_C:
+       case AUX_CH_D:
+               return DP_AUX_CH_CTL(aux_ch);
        default:
-               MISSING_CASE(port);
-               return DP_AUX_CH_DATA(PORT_B, index);
+               MISSING_CASE(aux_ch);
+               return DP_AUX_CH_CTL(AUX_CH_B);
        }
 }
 
-static i915_reg_t ilk_aux_ctl_reg(struct drm_i915_private *dev_priv,
-                                 enum port port)
+static i915_reg_t g4x_aux_data_reg(struct intel_dp *intel_dp, int index)
 {
-       switch (port) {
-       case PORT_A:
-               return DP_AUX_CH_CTL(port);
-       case PORT_B:
-       case PORT_C:
-       case PORT_D:
-               return PCH_DP_AUX_CH_CTL(port);
+       struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
+       enum aux_ch aux_ch = intel_dp->aux_ch;
+
+       switch (aux_ch) {
+       case AUX_CH_B:
+       case AUX_CH_C:
+       case AUX_CH_D:
+               return DP_AUX_CH_DATA(aux_ch, index);
        default:
-               MISSING_CASE(port);
-               return DP_AUX_CH_CTL(PORT_A);
+               MISSING_CASE(aux_ch);
+               return DP_AUX_CH_DATA(AUX_CH_B, index);
        }
 }
 
-static i915_reg_t ilk_aux_data_reg(struct drm_i915_private *dev_priv,
-                                  enum port port, int index)
+static i915_reg_t ilk_aux_ctl_reg(struct intel_dp *intel_dp)
 {
-       switch (port) {
-       case PORT_A:
-               return DP_AUX_CH_DATA(port, index);
-       case PORT_B:
-       case PORT_C:
-       case PORT_D:
-               return PCH_DP_AUX_CH_DATA(port, index);
+       struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
+       enum aux_ch aux_ch = intel_dp->aux_ch;
+
+       switch (aux_ch) {
+       case AUX_CH_A:
+               return DP_AUX_CH_CTL(aux_ch);
+       case AUX_CH_B:
+       case AUX_CH_C:
+       case AUX_CH_D:
+               return PCH_DP_AUX_CH_CTL(aux_ch);
        default:
-               MISSING_CASE(port);
-               return DP_AUX_CH_DATA(PORT_A, index);
+               MISSING_CASE(aux_ch);
+               return DP_AUX_CH_CTL(AUX_CH_A);
        }
 }
 
-static i915_reg_t skl_aux_ctl_reg(struct drm_i915_private *dev_priv,
-                                 enum port port)
+static i915_reg_t ilk_aux_data_reg(struct intel_dp *intel_dp, int index)
 {
-       switch (port) {
-       case PORT_A:
-       case PORT_B:
-       case PORT_C:
-       case PORT_D:
-       case PORT_F:
-               return DP_AUX_CH_CTL(port);
+       struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
+       enum aux_ch aux_ch = intel_dp->aux_ch;
+
+       switch (aux_ch) {
+       case AUX_CH_A:
+               return DP_AUX_CH_DATA(aux_ch, index);
+       case AUX_CH_B:
+       case AUX_CH_C:
+       case AUX_CH_D:
+               return PCH_DP_AUX_CH_DATA(aux_ch, index);
        default:
-               MISSING_CASE(port);
-               return DP_AUX_CH_CTL(PORT_A);
+               MISSING_CASE(aux_ch);
+               return DP_AUX_CH_DATA(AUX_CH_A, index);
        }
 }
 
-static i915_reg_t skl_aux_data_reg(struct drm_i915_private *dev_priv,
-                                  enum port port, int index)
+static i915_reg_t skl_aux_ctl_reg(struct intel_dp *intel_dp)
 {
-       switch (port) {
-       case PORT_A:
-       case PORT_B:
-       case PORT_C:
-       case PORT_D:
-       case PORT_F:
-               return DP_AUX_CH_DATA(port, index);
+       struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
+       enum aux_ch aux_ch = intel_dp->aux_ch;
+
+       switch (aux_ch) {
+       case AUX_CH_A:
+       case AUX_CH_B:
+       case AUX_CH_C:
+       case AUX_CH_D:
+       case AUX_CH_F:
+               return DP_AUX_CH_CTL(aux_ch);
        default:
-               MISSING_CASE(port);
-               return DP_AUX_CH_DATA(PORT_A, index);
+               MISSING_CASE(aux_ch);
+               return DP_AUX_CH_CTL(AUX_CH_A);
        }
 }
 
-static i915_reg_t intel_aux_ctl_reg(struct drm_i915_private *dev_priv,
-                                   enum port port)
-{
-       if (INTEL_GEN(dev_priv) >= 9)
-               return skl_aux_ctl_reg(dev_priv, port);
-       else if (HAS_PCH_SPLIT(dev_priv))
-               return ilk_aux_ctl_reg(dev_priv, port);
-       else
-               return g4x_aux_ctl_reg(dev_priv, port);
-}
-
-static i915_reg_t intel_aux_data_reg(struct drm_i915_private *dev_priv,
-                                    enum port port, int index)
-{
-       if (INTEL_GEN(dev_priv) >= 9)
-               return skl_aux_data_reg(dev_priv, port, index);
-       else if (HAS_PCH_SPLIT(dev_priv))
-               return ilk_aux_data_reg(dev_priv, port, index);
-       else
-               return g4x_aux_data_reg(dev_priv, port, index);
-}
-
-static void intel_aux_reg_init(struct intel_dp *intel_dp)
+static i915_reg_t skl_aux_data_reg(struct intel_dp *intel_dp, int index)
 {
        struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
-       enum port port = intel_aux_port(dev_priv,
-                                       dp_to_dig_port(intel_dp)->base.port);
-       int i;
-
-       intel_dp->aux_ch_ctl_reg = intel_aux_ctl_reg(dev_priv, port);
-       for (i = 0; i < ARRAY_SIZE(intel_dp->aux_ch_data_reg); i++)
-               intel_dp->aux_ch_data_reg[i] = intel_aux_data_reg(dev_priv, port, i);
+       enum aux_ch aux_ch = intel_dp->aux_ch;
+
+       switch (aux_ch) {
+       case AUX_CH_A:
+       case AUX_CH_B:
+       case AUX_CH_C:
+       case AUX_CH_D:
+       case AUX_CH_F:
+               return DP_AUX_CH_DATA(aux_ch, index);
+       default:
+               MISSING_CASE(aux_ch);
+               return DP_AUX_CH_DATA(AUX_CH_A, index);
+       }
 }
 
 static void
@@ -1507,14 +1508,42 @@ intel_dp_aux_fini(struct intel_dp *intel_dp)
 static void
 intel_dp_aux_init(struct intel_dp *intel_dp)
 {
-       struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
-       enum port port = intel_dig_port->base.port;
+       struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
+       struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+
+       intel_dp->aux_ch = intel_aux_ch(intel_dp);
+       intel_dp->aux_power_domain = intel_aux_power_domain(intel_dp);
+
+       if (INTEL_GEN(dev_priv) >= 9) {
+               intel_dp->aux_ch_ctl_reg = skl_aux_ctl_reg;
+               intel_dp->aux_ch_data_reg = skl_aux_data_reg;
+       } else if (HAS_PCH_SPLIT(dev_priv)) {
+               intel_dp->aux_ch_ctl_reg = ilk_aux_ctl_reg;
+               intel_dp->aux_ch_data_reg = ilk_aux_data_reg;
+       } else {
+               intel_dp->aux_ch_ctl_reg = g4x_aux_ctl_reg;
+               intel_dp->aux_ch_data_reg = g4x_aux_data_reg;
+       }
+
+       if (INTEL_GEN(dev_priv) >= 9)
+               intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
+       else if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
+               intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
+       else if (HAS_PCH_SPLIT(dev_priv))
+               intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
+       else
+               intel_dp->get_aux_clock_divider = g4x_get_aux_clock_divider;
+
+       if (INTEL_GEN(dev_priv) >= 9)
+               intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
+       else
+               intel_dp->get_aux_send_ctl = g4x_get_aux_send_ctl;
 
-       intel_aux_reg_init(intel_dp);
        drm_dp_aux_init(&intel_dp->aux);
 
        /* Failure to allocate our preferred name is not critical */
-       intel_dp->aux.name = kasprintf(GFP_KERNEL, "DPDDC-%c", port_name(port));
+       intel_dp->aux.name = kasprintf(GFP_KERNEL, "DPDDC-%c",
+                                      port_name(encoder->port));
        intel_dp->aux.transfer = intel_dp_aux_transfer;
 }
 
@@ -3172,35 +3201,6 @@ intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_
                                DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
 }
 
-static bool intel_dp_get_y_cord_status(struct intel_dp *intel_dp)
-{
-       uint8_t psr_caps = 0;
-
-       if (drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_CAPS, &psr_caps) != 1)
-               return false;
-       return psr_caps & DP_PSR2_SU_Y_COORDINATE_REQUIRED;
-}
-
-static bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp)
-{
-       uint8_t dprx = 0;
-
-       if (drm_dp_dpcd_readb(&intel_dp->aux, DP_DPRX_FEATURE_ENUMERATION_LIST,
-                             &dprx) != 1)
-               return false;
-       return dprx & DP_VSC_SDP_EXT_FOR_COLORIMETRY_SUPPORTED;
-}
-
-static bool intel_dp_get_alpm_status(struct intel_dp *intel_dp)
-{
-       uint8_t alpm_caps = 0;
-
-       if (drm_dp_dpcd_readb(&intel_dp->aux, DP_RECEIVER_ALPM_CAP,
-                             &alpm_caps) != 1)
-               return false;
-       return alpm_caps & DP_ALPM_CAP;
-}
-
 /* These are source-specific values. */
 uint8_t
 intel_dp_voltage_max(struct intel_dp *intel_dp)
@@ -3751,40 +3751,7 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp)
                dev_priv->no_aux_handshake = intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
                        DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
 
-       /* Check if the panel supports PSR */
-       drm_dp_dpcd_read(&intel_dp->aux, DP_PSR_SUPPORT,
-                        intel_dp->psr_dpcd,
-                        sizeof(intel_dp->psr_dpcd));
-       if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
-               dev_priv->psr.sink_support = true;
-               DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
-       }
-
-       if (INTEL_GEN(dev_priv) >= 9 &&
-           (intel_dp->psr_dpcd[0] & DP_PSR2_IS_SUPPORTED)) {
-               uint8_t frame_sync_cap;
-
-               dev_priv->psr.sink_support = true;
-               if (drm_dp_dpcd_readb(&intel_dp->aux,
-                                     DP_SINK_DEVICE_AUX_FRAME_SYNC_CAP,
-                                     &frame_sync_cap) != 1)
-                       frame_sync_cap = 0;
-               dev_priv->psr.aux_frame_sync = frame_sync_cap ? true : false;
-               /* PSR2 needs frame sync as well */
-               dev_priv->psr.psr2_support = dev_priv->psr.aux_frame_sync;
-               DRM_DEBUG_KMS("PSR2 %s on sink",
-                             dev_priv->psr.psr2_support ? "supported" : "not supported");
-
-               if (dev_priv->psr.psr2_support) {
-                       dev_priv->psr.y_cord_support =
-                               intel_dp_get_y_cord_status(intel_dp);
-                       dev_priv->psr.colorimetry_support =
-                               intel_dp_get_colorimetry_status(intel_dp);
-                       dev_priv->psr.alpm =
-                               intel_dp_get_alpm_status(intel_dp);
-               }
-
-       }
+       intel_psr_init_dpcd(intel_dp);
 
        /*
         * Read the eDP display control registers.
@@ -6266,42 +6233,6 @@ out_vdd_off:
        return false;
 }
 
-/* Set up the hotplug pin and aux power domain. */
-static void
-intel_dp_init_connector_port_info(struct intel_digital_port *intel_dig_port)
-{
-       struct intel_encoder *encoder = &intel_dig_port->base;
-       struct intel_dp *intel_dp = &intel_dig_port->dp;
-       struct intel_encoder *intel_encoder = &intel_dig_port->base;
-       struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
-
-       encoder->hpd_pin = intel_hpd_pin_default(dev_priv, encoder->port);
-
-       switch (encoder->port) {
-       case PORT_A:
-               intel_dp->aux_power_domain = POWER_DOMAIN_AUX_A;
-               break;
-       case PORT_B:
-               intel_dp->aux_power_domain = POWER_DOMAIN_AUX_B;
-               break;
-       case PORT_C:
-               intel_dp->aux_power_domain = POWER_DOMAIN_AUX_C;
-               break;
-       case PORT_D:
-               intel_dp->aux_power_domain = POWER_DOMAIN_AUX_D;
-               break;
-       case PORT_E:
-               /* FIXME: Check VBT for actual wiring of PORT E */
-               intel_dp->aux_power_domain = POWER_DOMAIN_AUX_D;
-               break;
-       case PORT_F:
-               intel_dp->aux_power_domain = POWER_DOMAIN_AUX_F;
-               break;
-       default:
-               MISSING_CASE(encoder->port);
-       }
-}
-
 static void intel_dp_modeset_retry_work_fn(struct work_struct *work)
 {
        struct intel_connector *intel_connector;
@@ -6353,20 +6284,6 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
        intel_dp->active_pipe = INVALID_PIPE;
 
        /* intel_dp vfuncs */
-       if (INTEL_GEN(dev_priv) >= 9)
-               intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
-       else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
-               intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
-       else if (HAS_PCH_SPLIT(dev_priv))
-               intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
-       else
-               intel_dp->get_aux_clock_divider = g4x_get_aux_clock_divider;
-
-       if (INTEL_GEN(dev_priv) >= 9)
-               intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
-       else
-               intel_dp->get_aux_send_ctl = g4x_get_aux_send_ctl;
-
        if (HAS_DDI(dev_priv))
                intel_dp->prepare_link_retrain = intel_ddi_prepare_link_retrain;
 
@@ -6407,7 +6324,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
                connector->interlace_allowed = true;
        connector->doublescan_allowed = 0;
 
-       intel_dp_init_connector_port_info(intel_dig_port);
+       intel_encoder->hpd_pin = intel_hpd_pin_default(dev_priv, port);
 
        intel_dp_aux_init(intel_dp);
 
index 50874f4..652b11e 100644 (file)
@@ -935,6 +935,7 @@ struct intel_plane {
        enum plane_id id;
        enum pipe pipe;
        bool can_scale;
+       bool has_fbc;
        int max_downscale;
        uint32_t frontbuffer_bit;
 
@@ -1041,8 +1042,6 @@ struct intel_dp_compliance {
 
 struct intel_dp {
        i915_reg_t output_reg;
-       i915_reg_t aux_ch_ctl_reg;
-       i915_reg_t aux_ch_data_reg[5];
        uint32_t DP;
        int link_rate;
        uint8_t lane_count;
@@ -1052,6 +1051,7 @@ struct intel_dp {
        bool detect_done;
        bool channel_eq_status;
        bool reset_link_params;
+       enum aux_ch aux_ch;
        uint8_t dpcd[DP_RECEIVER_CAP_SIZE];
        uint8_t psr_dpcd[EDP_PSR_RECEIVER_CAP_SIZE];
        uint8_t downstream_ports[DP_MAX_DOWNSTREAM_PORTS];
@@ -1126,6 +1126,9 @@ struct intel_dp {
                                     int send_bytes,
                                     uint32_t aux_clock_divider);
 
+       i915_reg_t (*aux_ch_ctl_reg)(struct intel_dp *dp);
+       i915_reg_t (*aux_ch_data_reg)(struct intel_dp *dp, int index);
+
        /* This is called before a link training is starterd */
        void (*prepare_link_retrain)(struct intel_dp *intel_dp);
 
@@ -1508,6 +1511,7 @@ void intel_release_load_detect_pipe(struct drm_connector *connector,
 struct i915_vma *
 intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
                           unsigned int rotation,
+                          bool uses_fence,
                           unsigned long *out_flags);
 void intel_unpin_fb_vma(struct i915_vma *vma, unsigned long flags);
 struct drm_framebuffer *
@@ -1862,6 +1866,7 @@ bool is_hdcp_supported(struct drm_i915_private *dev_priv, enum port port);
 
 /* intel_psr.c */
 #define CAN_PSR(dev_priv) (HAS_PSR(dev_priv) && dev_priv->psr.sink_support)
+void intel_psr_init_dpcd(struct intel_dp *intel_dp);
 void intel_psr_enable(struct intel_dp *intel_dp,
                      const struct intel_crtc_state *crtc_state);
 void intel_psr_disable(struct intel_dp *intel_dp,
@@ -1988,8 +1993,7 @@ void intel_suspend_gt_powersave(struct drm_i915_private *dev_priv);
 void gen6_rps_busy(struct drm_i915_private *dev_priv);
 void gen6_rps_reset_ei(struct drm_i915_private *dev_priv);
 void gen6_rps_idle(struct drm_i915_private *dev_priv);
-void gen6_rps_boost(struct drm_i915_gem_request *rq,
-                   struct intel_rps_client *rps);
+void gen6_rps_boost(struct i915_request *rq, struct intel_rps_client *rps);
 void g4x_wm_get_hw_state(struct drm_device *dev);
 void vlv_wm_get_hw_state(struct drm_device *dev);
 void ilk_wm_get_hw_state(struct drm_device *dev);
index f3c5100..3e1107e 100644 (file)
@@ -423,6 +423,7 @@ static void intel_engine_init_execlist(struct intel_engine_cs *engine)
        BUILD_BUG_ON_NOT_POWER_OF_2(execlists_num_ports(execlists));
        GEM_BUG_ON(execlists_num_ports(execlists) > EXECLIST_MAX_PORTS);
 
+       execlists->queue_priority = INT_MIN;
        execlists->queue = RB_ROOT;
        execlists->first = NULL;
 }
@@ -1426,20 +1427,20 @@ int init_workarounds_ring(struct intel_engine_cs *engine)
        return 0;
 }
 
-int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
+int intel_ring_workarounds_emit(struct i915_request *rq)
 {
-       struct i915_workarounds *w = &req->i915->workarounds;
+       struct i915_workarounds *w = &rq->i915->workarounds;
        u32 *cs;
        int ret, i;
 
        if (w->count == 0)
                return 0;
 
-       ret = req->engine->emit_flush(req, EMIT_BARRIER);
+       ret = rq->engine->emit_flush(rq, EMIT_BARRIER);
        if (ret)
                return ret;
 
-       cs = intel_ring_begin(req, (w->count * 2 + 2));
+       cs = intel_ring_begin(rq, w->count * 2 + 2);
        if (IS_ERR(cs))
                return PTR_ERR(cs);
 
@@ -1450,9 +1451,9 @@ int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
        }
        *cs++ = MI_NOOP;
 
-       intel_ring_advance(req, cs);
+       intel_ring_advance(rq, cs);
 
-       ret = req->engine->emit_flush(req, EMIT_BARRIER);
+       ret = rq->engine->emit_flush(rq, EMIT_BARRIER);
        if (ret)
                return ret;
 
@@ -1552,7 +1553,7 @@ bool intel_engine_has_kernel_context(const struct intel_engine_cs *engine)
 {
        const struct i915_gem_context * const kernel_context =
                engine->i915->kernel_context;
-       struct drm_i915_gem_request *rq;
+       struct i915_request *rq;
 
        lockdep_assert_held(&engine->i915->drm.struct_mutex);
 
@@ -1664,13 +1665,13 @@ unsigned int intel_engines_has_context_isolation(struct drm_i915_private *i915)
 }
 
 static void print_request(struct drm_printer *m,
-                         struct drm_i915_gem_request *rq,
+                         struct i915_request *rq,
                          const char *prefix)
 {
-       drm_printf(m, "%s%x%s [%x:%x] prio=%d @ %dms: %s\n", prefix,
+       drm_printf(m, "%s%x%s [%llx:%x] prio=%d @ %dms: %s\n", prefix,
                   rq->global_seqno,
-                  i915_gem_request_completed(rq) ? "!" : "",
-                  rq->ctx->hw_id, rq->fence.seqno,
+                  i915_request_completed(rq) ? "!" : "",
+                  rq->fence.context, rq->fence.seqno,
                   rq->priotree.priority,
                   jiffies_to_msecs(jiffies - rq->emitted_jiffies),
                   rq->timeline->common->name);
@@ -1803,7 +1804,7 @@ static void intel_engine_print_registers(const struct intel_engine_cs *engine,
 
                rcu_read_lock();
                for (idx = 0; idx < execlists_num_ports(execlists); idx++) {
-                       struct drm_i915_gem_request *rq;
+                       struct i915_request *rq;
                        unsigned int count;
 
                        rq = port_unpack(&execlists->port[idx], &count);
@@ -1837,7 +1838,7 @@ void intel_engine_dump(struct intel_engine_cs *engine,
        struct intel_breadcrumbs * const b = &engine->breadcrumbs;
        const struct intel_engine_execlists * const execlists = &engine->execlists;
        struct i915_gpu_error * const error = &engine->i915->gpu_error;
-       struct drm_i915_gem_request *rq;
+       struct i915_request *rq;
        struct rb_node *rb;
 
        if (header) {
@@ -1866,12 +1867,12 @@ void intel_engine_dump(struct intel_engine_cs *engine,
        drm_printf(m, "\tRequests:\n");
 
        rq = list_first_entry(&engine->timeline->requests,
-                             struct drm_i915_gem_request, link);
+                             struct i915_request, link);
        if (&rq->link != &engine->timeline->requests)
                print_request(m, rq, "\t\tfirst  ");
 
        rq = list_last_entry(&engine->timeline->requests,
-                            struct drm_i915_gem_request, link);
+                            struct i915_request, link);
        if (&rq->link != &engine->timeline->requests)
                print_request(m, rq, "\t\tlast   ");
 
@@ -1903,6 +1904,7 @@ void intel_engine_dump(struct intel_engine_cs *engine,
        spin_lock_irq(&engine->timeline->lock);
        list_for_each_entry(rq, &engine->timeline->requests, link)
                print_request(m, rq, "\t\tE ");
+       drm_printf(m, "\t\tQueue priority: %d\n", execlists->queue_priority);
        for (rb = execlists->first; rb; rb = rb_next(rb)) {
                struct i915_priolist *p =
                        rb_entry(rb, typeof(*p), node);
index f66f6fb..38b036c 100644 (file)
@@ -46,16 +46,6 @@ static inline bool fbc_supported(struct drm_i915_private *dev_priv)
        return HAS_FBC(dev_priv);
 }
 
-static inline bool fbc_on_pipe_a_only(struct drm_i915_private *dev_priv)
-{
-       return IS_HASWELL(dev_priv) || INTEL_GEN(dev_priv) >= 8;
-}
-
-static inline bool fbc_on_plane_a_only(struct drm_i915_private *dev_priv)
-{
-       return INTEL_GEN(dev_priv) < 4;
-}
-
 static inline bool no_fbc_on_multiple_pipes(struct drm_i915_private *dev_priv)
 {
        return INTEL_GEN(dev_priv) <= 3;
@@ -819,6 +809,12 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc)
         * Note that is possible for a tiled surface to be unmappable (and
         * so have no fence associated with it) due to aperture constaints
         * at the time of pinning.
+        *
+        * FIXME with 90/270 degree rotation we should use the fence on
+        * the normal GTT view (the rotated view doesn't even have a
+        * fence). Would need changes to the FBC fence Y offset as well.
+        * For now this will effecively disable FBC with 90/270 degree
+        * rotation.
         */
        if (!(cache->flags & PLANE_HAS_FENCE)) {
                fbc->no_fbc_reason = "framebuffer not tiled or fenced";
@@ -1094,13 +1090,10 @@ void intel_fbc_choose_crtc(struct drm_i915_private *dev_priv,
                struct intel_crtc_state *crtc_state;
                struct intel_crtc *crtc = to_intel_crtc(plane_state->base.crtc);
 
-               if (!plane_state->base.visible)
-                       continue;
-
-               if (fbc_on_pipe_a_only(dev_priv) && crtc->pipe != PIPE_A)
+               if (!plane->has_fbc)
                        continue;
 
-               if (fbc_on_plane_a_only(dev_priv) && plane->i9xx_plane != PLANE_A)
+               if (!plane_state->base.visible)
                        continue;
 
                crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
@@ -1357,7 +1350,6 @@ static bool need_fbc_vtd_wa(struct drm_i915_private *dev_priv)
 void intel_fbc_init(struct drm_i915_private *dev_priv)
 {
        struct intel_fbc *fbc = &dev_priv->fbc;
-       enum pipe pipe;
 
        INIT_WORK(&fbc->work.work, intel_fbc_work_fn);
        INIT_WORK(&fbc->underrun_work, intel_fbc_underrun_work_fn);
@@ -1378,14 +1370,6 @@ void intel_fbc_init(struct drm_i915_private *dev_priv)
                return;
        }
 
-       for_each_pipe(dev_priv, pipe) {
-               fbc->possible_framebuffer_bits |=
-                       INTEL_FRONTBUFFER(pipe, PLANE_PRIMARY);
-
-               if (fbc_on_pipe_a_only(dev_priv))
-                       break;
-       }
-
        /* This value was pulled out of someone's hat */
        if (INTEL_GEN(dev_priv) <= 4 && !IS_GM45(dev_priv))
                I915_WRITE(FBC_CONTROL, 500 << FBC_CTL_INTERVAL_SHIFT);
index 055f409..6f12adc 100644 (file)
@@ -215,7 +215,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
         */
        vma = intel_pin_and_fence_fb_obj(&ifbdev->fb->base,
                                         DRM_MODE_ROTATE_0,
-                                        &flags);
+                                        false, &flags);
        if (IS_ERR(vma)) {
                ret = PTR_ERR(vma);
                goto out_unlock;
index fcfc217..3a8d3d0 100644 (file)
@@ -79,6 +79,7 @@ void __intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
                spin_unlock(&dev_priv->fb_tracking.lock);
        }
 
+       might_sleep();
        intel_psr_invalidate(dev_priv, frontbuffer_bits);
        intel_edp_drrs_invalidate(dev_priv, frontbuffer_bits);
        intel_fbc_invalidate(dev_priv, frontbuffer_bits, origin);
@@ -108,6 +109,7 @@ static void intel_frontbuffer_flush(struct drm_i915_private *dev_priv,
        if (!frontbuffer_bits)
                return;
 
+       might_sleep();
        intel_edp_drrs_flush(dev_priv, frontbuffer_bits);
        intel_psr_flush(dev_priv, frontbuffer_bits, origin);
        intel_fbc_flush(dev_priv, frontbuffer_bits, origin);
index 21140cc..e6512cc 100644 (file)
@@ -370,7 +370,7 @@ int intel_guc_sample_forcewake(struct intel_guc *guc)
        u32 action[2];
 
        action[0] = INTEL_GUC_ACTION_SAMPLE_FORCEWAKE;
-       /* WaRsDisableCoarsePowerGating:skl,bxt */
+       /* WaRsDisableCoarsePowerGating:skl,cnl */
        if (!HAS_RC6(dev_priv) || NEEDS_WaRsDisableCoarsePowerGating(dev_priv))
                action[1] = 0;
        else
index 946766b..8a8ad2f 100644 (file)
 #include <trace/events/dma_fence.h>
 
 #include "intel_guc_submission.h"
+#include "intel_lrc_reg.h"
 #include "i915_drv.h"
 
+#define GUC_PREEMPT_FINISHED           0x1
+#define GUC_PREEMPT_BREADCRUMB_DWORDS  0x8
+#define GUC_PREEMPT_BREADCRUMB_BYTES   \
+       (sizeof(u32) * GUC_PREEMPT_BREADCRUMB_DWORDS)
+
 /**
  * DOC: GuC-based command submission
  *
  *
  */
 
+static inline struct i915_priolist *to_priolist(struct rb_node *rb)
+{
+       return rb_entry(rb, struct i915_priolist, node);
+}
+
 static inline bool is_high_priority(struct intel_guc_client *client)
 {
        return (client->priority == GUC_CLIENT_PRIORITY_KMD_HIGH ||
@@ -496,8 +507,7 @@ static void guc_ring_doorbell(struct intel_guc_client *client)
        GEM_BUG_ON(db->db_status != GUC_DOORBELL_ENABLED);
 }
 
-static void guc_add_request(struct intel_guc *guc,
-                           struct drm_i915_gem_request *rq)
+static void guc_add_request(struct intel_guc *guc, struct i915_request *rq)
 {
        struct intel_guc_client *client = guc->execbuf_client;
        struct intel_engine_cs *engine = rq->engine;
@@ -531,8 +541,6 @@ static void flush_ggtt_writes(struct i915_vma *vma)
                POSTING_READ_FW(GUC_STATUS);
 }
 
-#define GUC_PREEMPT_FINISHED 0x1
-#define GUC_PREEMPT_BREADCRUMB_DWORDS 0x8
 static void inject_preempt_context(struct work_struct *work)
 {
        struct guc_preempt_work *preempt_work =
@@ -542,37 +550,17 @@ static void inject_preempt_context(struct work_struct *work)
                                             preempt_work[engine->id]);
        struct intel_guc_client *client = guc->preempt_client;
        struct guc_stage_desc *stage_desc = __get_stage_desc(client);
-       struct intel_ring *ring = client->owner->engine[engine->id].ring;
        u32 ctx_desc = lower_32_bits(intel_lr_context_descriptor(client->owner,
                                                                 engine));
-       u32 *cs = ring->vaddr + ring->tail;
        u32 data[7];
 
-       if (engine->id == RCS) {
-               cs = gen8_emit_ggtt_write_rcs(cs, GUC_PREEMPT_FINISHED,
-                               intel_hws_preempt_done_address(engine));
-       } else {
-               cs = gen8_emit_ggtt_write(cs, GUC_PREEMPT_FINISHED,
-                               intel_hws_preempt_done_address(engine));
-               *cs++ = MI_NOOP;
-               *cs++ = MI_NOOP;
-       }
-       *cs++ = MI_USER_INTERRUPT;
-       *cs++ = MI_NOOP;
-
-       GEM_BUG_ON(!IS_ALIGNED(ring->size,
-                              GUC_PREEMPT_BREADCRUMB_DWORDS * sizeof(u32)));
-       GEM_BUG_ON((void *)cs - (ring->vaddr + ring->tail) !=
-                  GUC_PREEMPT_BREADCRUMB_DWORDS * sizeof(u32));
-
-       ring->tail += GUC_PREEMPT_BREADCRUMB_DWORDS * sizeof(u32);
-       ring->tail &= (ring->size - 1);
-
-       flush_ggtt_writes(ring->vma);
-
+       /*
+        * The ring contains commands to write GUC_PREEMPT_FINISHED into HWSP.
+        * See guc_fill_preempt_context().
+        */
        spin_lock_irq(&client->wq_lock);
        guc_wq_item_append(client, engine->guc_id, ctx_desc,
-                          ring->tail / sizeof(u64), 0);
+                          GUC_PREEMPT_BREADCRUMB_BYTES / sizeof(u64), 0);
        spin_unlock_irq(&client->wq_lock);
 
        /*
@@ -648,7 +636,7 @@ static void guc_submit(struct intel_engine_cs *engine)
        unsigned int n;
 
        for (n = 0; n < execlists_num_ports(execlists); n++) {
-               struct drm_i915_gem_request *rq;
+               struct i915_request *rq;
                unsigned int count;
 
                rq = port_unpack(&port[n], &count);
@@ -662,19 +650,18 @@ static void guc_submit(struct intel_engine_cs *engine)
        }
 }
 
-static void port_assign(struct execlist_port *port,
-                       struct drm_i915_gem_request *rq)
+static void port_assign(struct execlist_port *port, struct i915_request *rq)
 {
        GEM_BUG_ON(port_isset(port));
 
-       port_set(port, i915_gem_request_get(rq));
+       port_set(port, i915_request_get(rq));
 }
 
 static void guc_dequeue(struct intel_engine_cs *engine)
 {
        struct intel_engine_execlists * const execlists = &engine->execlists;
        struct execlist_port *port = execlists->port;
-       struct drm_i915_gem_request *last = NULL;
+       struct i915_request *last = NULL;
        const struct execlist_port * const last_port =
                &execlists->port[execlists->port_mask];
        bool submit = false;
@@ -684,15 +671,12 @@ static void guc_dequeue(struct intel_engine_cs *engine)
        rb = execlists->first;
        GEM_BUG_ON(rb_first(&execlists->queue) != rb);
 
-       if (!rb)
-               goto unlock;
-
        if (port_isset(port)) {
                if (engine->i915->preempt_context) {
                        struct guc_preempt_work *preempt_work =
                                &engine->i915->guc.preempt_work[engine->id];
 
-                       if (rb_entry(rb, struct i915_priolist, node)->priority >
+                       if (execlists->queue_priority >
                            max(port_request(port)->priotree.priority, 0)) {
                                execlists_set_active(execlists,
                                                     EXECLISTS_ACTIVE_PREEMPT);
@@ -708,9 +692,9 @@ static void guc_dequeue(struct intel_engine_cs *engine)
        }
        GEM_BUG_ON(port_isset(port));
 
-       do {
-               struct i915_priolist *p = rb_entry(rb, typeof(*p), node);
-               struct drm_i915_gem_request *rq, *rn;
+       while (rb) {
+               struct i915_priolist *p = to_priolist(rb);
+               struct i915_request *rq, *rn;
 
                list_for_each_entry_safe(rq, rn, &p->requests, priotree.link) {
                        if (last && rq->ctx != last->ctx) {
@@ -727,9 +711,8 @@ static void guc_dequeue(struct intel_engine_cs *engine)
 
                        INIT_LIST_HEAD(&rq->priotree.link);
 
-                       __i915_gem_request_submit(rq);
-                       trace_i915_gem_request_in(rq,
-                                                 port_index(port, execlists));
+                       __i915_request_submit(rq);
+                       trace_i915_request_in(rq, port_index(port, execlists));
                        last = rq;
                        submit = true;
                }
@@ -739,8 +722,9 @@ static void guc_dequeue(struct intel_engine_cs *engine)
                INIT_LIST_HEAD(&p->requests);
                if (p->priority != I915_PRIORITY_NORMAL)
                        kmem_cache_free(engine->i915->priorities, p);
-       } while (rb);
+       }
 done:
+       execlists->queue_priority = rb ? to_priolist(rb)->priority : INT_MIN;
        execlists->first = rb;
        if (submit) {
                port_assign(port, last);
@@ -762,12 +746,12 @@ static void guc_submission_tasklet(unsigned long data)
        struct intel_engine_cs * const engine = (struct intel_engine_cs *)data;
        struct intel_engine_execlists * const execlists = &engine->execlists;
        struct execlist_port *port = execlists->port;
-       struct drm_i915_gem_request *rq;
+       struct i915_request *rq;
 
        rq = port_request(&port[0]);
-       while (rq && i915_gem_request_completed(rq)) {
-               trace_i915_gem_request_out(rq);
-               i915_gem_request_put(rq);
+       while (rq && i915_request_completed(rq)) {
+               trace_i915_request_out(rq);
+               i915_request_put(rq);
 
                execlists_port_complete(execlists, port);
 
@@ -972,6 +956,62 @@ static void guc_client_free(struct intel_guc_client *client)
        kfree(client);
 }
 
+static inline bool ctx_save_restore_disabled(struct intel_context *ce)
+{
+       u32 sr = ce->lrc_reg_state[CTX_CONTEXT_CONTROL + 1];
+
+#define SR_DISABLED \
+       _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT | \
+                          CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT)
+
+       return (sr & SR_DISABLED) == SR_DISABLED;
+
+#undef SR_DISABLED
+}
+
+static void guc_fill_preempt_context(struct intel_guc *guc)
+{
+       struct drm_i915_private *dev_priv = guc_to_i915(guc);
+       struct intel_guc_client *client = guc->preempt_client;
+       struct intel_engine_cs *engine;
+       enum intel_engine_id id;
+
+       for_each_engine(engine, dev_priv, id) {
+               struct intel_context *ce = &client->owner->engine[id];
+               u32 addr = intel_hws_preempt_done_address(engine);
+               u32 *cs;
+
+               GEM_BUG_ON(!ce->pin_count);
+
+               /*
+                * We rely on this context image *not* being saved after
+                * preemption. This ensures that the RING_HEAD / RING_TAIL
+                * remain pointing at initial values forever.
+                */
+               GEM_BUG_ON(!ctx_save_restore_disabled(ce));
+
+               cs = ce->ring->vaddr;
+               if (id == RCS) {
+                       cs = gen8_emit_ggtt_write_rcs(cs,
+                                                     GUC_PREEMPT_FINISHED,
+                                                     addr);
+               } else {
+                       cs = gen8_emit_ggtt_write(cs,
+                                                 GUC_PREEMPT_FINISHED,
+                                                 addr);
+                       *cs++ = MI_NOOP;
+                       *cs++ = MI_NOOP;
+               }
+               *cs++ = MI_USER_INTERRUPT;
+               *cs++ = MI_NOOP;
+
+               GEM_BUG_ON((void *)cs - ce->ring->vaddr !=
+                          GUC_PREEMPT_BREADCRUMB_BYTES);
+
+               flush_ggtt_writes(ce->ring->vma);
+       }
+}
+
 static int guc_clients_create(struct intel_guc *guc)
 {
        struct drm_i915_private *dev_priv = guc_to_i915(guc);
@@ -1002,6 +1042,8 @@ static int guc_clients_create(struct intel_guc *guc)
                        return PTR_ERR(client);
                }
                guc->preempt_client = client;
+
+               guc_fill_preempt_context(guc);
        }
 
        return 0;
index 9b6d781..1428874 100644 (file)
@@ -169,6 +169,23 @@ static void execlists_init_reg_state(u32 *reg_state,
                                     struct intel_engine_cs *engine,
                                     struct intel_ring *ring);
 
+static inline struct i915_priolist *to_priolist(struct rb_node *rb)
+{
+       return rb_entry(rb, struct i915_priolist, node);
+}
+
+static inline int rq_prio(const struct i915_request *rq)
+{
+       return rq->priotree.priority;
+}
+
+static inline bool need_preempt(const struct intel_engine_cs *engine,
+                               const struct i915_request *last,
+                               int prio)
+{
+       return engine->i915->preempt_context && prio > max(rq_prio(last), 0);
+}
+
 /**
  * intel_lr_context_descriptor_update() - calculate & cache the descriptor
  *                                       descriptor for a pinned context
@@ -224,7 +241,7 @@ find_priolist:
        parent = &execlists->queue.rb_node;
        while (*parent) {
                rb = *parent;
-               p = rb_entry(rb, typeof(*p), node);
+               p = to_priolist(rb);
                if (prio > p->priority) {
                        parent = &rb->rb_left;
                } else if (prio < p->priority) {
@@ -264,10 +281,10 @@ find_priolist:
        if (first)
                execlists->first = &p->node;
 
-       return ptr_pack_bits(p, first, 1);
+       return p;
 }
 
-static void unwind_wa_tail(struct drm_i915_gem_request *rq)
+static void unwind_wa_tail(struct i915_request *rq)
 {
        rq->tail = intel_ring_wrap(rq->ring, rq->wa_tail - WA_TAIL_BYTES);
        assert_ring_tail_valid(rq->ring, rq->tail);
@@ -275,7 +292,7 @@ static void unwind_wa_tail(struct drm_i915_gem_request *rq)
 
 static void __unwind_incomplete_requests(struct intel_engine_cs *engine)
 {
-       struct drm_i915_gem_request *rq, *rn;
+       struct i915_request *rq, *rn;
        struct i915_priolist *uninitialized_var(p);
        int last_prio = I915_PRIORITY_INVALID;
 
@@ -284,20 +301,16 @@ static void __unwind_incomplete_requests(struct intel_engine_cs *engine)
        list_for_each_entry_safe_reverse(rq, rn,
                                         &engine->timeline->requests,
                                         link) {
-               if (i915_gem_request_completed(rq))
+               if (i915_request_completed(rq))
                        return;
 
-               __i915_gem_request_unsubmit(rq);
+               __i915_request_unsubmit(rq);
                unwind_wa_tail(rq);
 
-               GEM_BUG_ON(rq->priotree.priority == I915_PRIORITY_INVALID);
-               if (rq->priotree.priority != last_prio) {
-                       p = lookup_priolist(engine,
-                                           &rq->priotree,
-                                           rq->priotree.priority);
-                       p = ptr_mask_bits(p, 1);
-
-                       last_prio = rq->priotree.priority;
+               GEM_BUG_ON(rq_prio(rq) == I915_PRIORITY_INVALID);
+               if (rq_prio(rq) != last_prio) {
+                       last_prio = rq_prio(rq);
+                       p = lookup_priolist(engine, &rq->priotree, last_prio);
                }
 
                list_add(&rq->priotree.link, &p->requests);
@@ -316,8 +329,7 @@ execlists_unwind_incomplete_requests(struct intel_engine_execlists *execlists)
 }
 
 static inline void
-execlists_context_status_change(struct drm_i915_gem_request *rq,
-                               unsigned long status)
+execlists_context_status_change(struct i915_request *rq, unsigned long status)
 {
        /*
         * Only used when GVT-g is enabled now. When GVT-g is disabled,
@@ -331,14 +343,14 @@ execlists_context_status_change(struct drm_i915_gem_request *rq,
 }
 
 static inline void
-execlists_context_schedule_in(struct drm_i915_gem_request *rq)
+execlists_context_schedule_in(struct i915_request *rq)
 {
        execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_IN);
        intel_engine_context_in(rq->engine);
 }
 
 static inline void
-execlists_context_schedule_out(struct drm_i915_gem_request *rq)
+execlists_context_schedule_out(struct i915_request *rq)
 {
        intel_engine_context_out(rq->engine);
        execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_OUT);
@@ -353,7 +365,7 @@ execlists_update_context_pdps(struct i915_hw_ppgtt *ppgtt, u32 *reg_state)
        ASSIGN_CTX_PDP(ppgtt, reg_state, 0);
 }
 
-static u64 execlists_update_context(struct drm_i915_gem_request *rq)
+static u64 execlists_update_context(struct i915_request *rq)
 {
        struct intel_context *ce = &rq->ctx->engine[rq->engine->id];
        struct i915_hw_ppgtt *ppgtt =
@@ -385,7 +397,7 @@ static void execlists_submit_ports(struct intel_engine_cs *engine)
        unsigned int n;
 
        for (n = execlists_num_ports(&engine->execlists); n--; ) {
-               struct drm_i915_gem_request *rq;
+               struct i915_request *rq;
                unsigned int count;
                u64 desc;
 
@@ -398,10 +410,11 @@ static void execlists_submit_ports(struct intel_engine_cs *engine)
                        desc = execlists_update_context(rq);
                        GEM_DEBUG_EXEC(port[n].context_id = upper_32_bits(desc));
 
-                       GEM_TRACE("%s in[%d]:  ctx=%d.%d, seqno=%x\n",
+                       GEM_TRACE("%s in[%d]:  ctx=%d.%d, seqno=%x, prio=%d\n",
                                  engine->name, n,
                                  port[n].context_id, count,
-                                 rq->global_seqno);
+                                 rq->global_seqno,
+                                 rq_prio(rq));
                } else {
                        GEM_BUG_ON(!n);
                        desc = 0;
@@ -430,15 +443,14 @@ static bool can_merge_ctx(const struct i915_gem_context *prev,
        return true;
 }
 
-static void port_assign(struct execlist_port *port,
-                       struct drm_i915_gem_request *rq)
+static void port_assign(struct execlist_port *port, struct i915_request *rq)
 {
        GEM_BUG_ON(rq == port_request(port));
 
        if (port_isset(port))
-               i915_gem_request_put(port_request(port));
+               i915_request_put(port_request(port));
 
-       port_set(port, port_pack(i915_gem_request_get(rq), port_count(port)));
+       port_set(port, port_pack(i915_request_get(rq), port_count(port)));
 }
 
 static void inject_preempt_context(struct intel_engine_cs *engine)
@@ -449,25 +461,23 @@ static void inject_preempt_context(struct intel_engine_cs *engine)
 
        GEM_BUG_ON(engine->execlists.preempt_complete_status !=
                   upper_32_bits(ce->lrc_desc));
-       GEM_BUG_ON(!IS_ALIGNED(ce->ring->size, WA_TAIL_BYTES));
-
-       memset(ce->ring->vaddr + ce->ring->tail, 0, WA_TAIL_BYTES);
-       ce->ring->tail += WA_TAIL_BYTES;
-       ce->ring->tail &= (ce->ring->size - 1);
-       ce->lrc_reg_state[CTX_RING_TAIL+1] = ce->ring->tail;
-
        GEM_BUG_ON((ce->lrc_reg_state[CTX_CONTEXT_CONTROL + 1] &
                    _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT |
                                       CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT)) !=
                   _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT |
                                      CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT));
 
+       /*
+        * Switch to our empty preempt context so
+        * the state of the GPU is known (idle).
+        */
        GEM_TRACE("%s\n", engine->name);
        for (n = execlists_num_ports(&engine->execlists); --n; )
                elsp_write(0, engine->execlists.elsp);
 
        elsp_write(ce->lrc_desc, engine->execlists.elsp);
        execlists_clear_active(&engine->execlists, EXECLISTS_ACTIVE_HWACK);
+       execlists_set_active(&engine->execlists, EXECLISTS_ACTIVE_PREEMPT);
 }
 
 static void execlists_dequeue(struct intel_engine_cs *engine)
@@ -476,7 +486,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
        struct execlist_port *port = execlists->port;
        const struct execlist_port * const last_port =
                &execlists->port[execlists->port_mask];
-       struct drm_i915_gem_request *last = port_request(port);
+       struct i915_request *last = port_request(port);
        struct rb_node *rb;
        bool submit = false;
 
@@ -504,8 +514,6 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
        spin_lock_irq(&engine->timeline->lock);
        rb = execlists->first;
        GEM_BUG_ON(rb_first(&execlists->queue) != rb);
-       if (!rb)
-               goto unlock;
 
        if (last) {
                /*
@@ -528,55 +536,49 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
                if (!execlists_is_active(execlists, EXECLISTS_ACTIVE_HWACK))
                        goto unlock;
 
-               if (engine->i915->preempt_context &&
-                   rb_entry(rb, struct i915_priolist, node)->priority >
-                   max(last->priotree.priority, 0)) {
-                       /*
-                        * Switch to our empty preempt context so
-                        * the state of the GPU is known (idle).
-                        */
+               if (need_preempt(engine, last, execlists->queue_priority)) {
                        inject_preempt_context(engine);
-                       execlists_set_active(execlists,
-                                            EXECLISTS_ACTIVE_PREEMPT);
                        goto unlock;
-               } else {
-                       /*
-                        * In theory, we could coalesce more requests onto
-                        * the second port (the first port is active, with
-                        * no preemptions pending). However, that means we
-                        * then have to deal with the possible lite-restore
-                        * of the second port (as we submit the ELSP, there
-                        * may be a context-switch) but also we may complete
-                        * the resubmission before the context-switch. Ergo,
-                        * coalescing onto the second port will cause a
-                        * preemption event, but we cannot predict whether
-                        * that will affect port[0] or port[1].
-                        *
-                        * If the second port is already active, we can wait
-                        * until the next context-switch before contemplating
-                        * new requests. The GPU will be busy and we should be
-                        * able to resubmit the new ELSP before it idles,
-                        * avoiding pipeline bubbles (momentary pauses where
-                        * the driver is unable to keep up the supply of new
-                        * work).
-                        */
-                       if (port_count(&port[1]))
-                               goto unlock;
-
-                       /* WaIdleLiteRestore:bdw,skl
-                        * Apply the wa NOOPs to prevent
-                        * ring:HEAD == req:TAIL as we resubmit the
-                        * request. See gen8_emit_breadcrumb() for
-                        * where we prepare the padding after the
-                        * end of the request.
-                        */
-                       last->tail = last->wa_tail;
                }
+
+               /*
+                * In theory, we could coalesce more requests onto
+                * the second port (the first port is active, with
+                * no preemptions pending). However, that means we
+                * then have to deal with the possible lite-restore
+                * of the second port (as we submit the ELSP, there
+                * may be a context-switch) but also we may complete
+                * the resubmission before the context-switch. Ergo,
+                * coalescing onto the second port will cause a
+                * preemption event, but we cannot predict whether
+                * that will affect port[0] or port[1].
+                *
+                * If the second port is already active, we can wait
+                * until the next context-switch before contemplating
+                * new requests. The GPU will be busy and we should be
+                * able to resubmit the new ELSP before it idles,
+                * avoiding pipeline bubbles (momentary pauses where
+                * the driver is unable to keep up the supply of new
+                * work). However, we have to double check that the
+                * priorities of the ports haven't been switch.
+                */
+               if (port_count(&port[1]))
+                       goto unlock;
+
+               /*
+                * WaIdleLiteRestore:bdw,skl
+                * Apply the wa NOOPs to prevent
+                * ring:HEAD == rq:TAIL as we resubmit the
+                * request. See gen8_emit_breadcrumb() for
+                * where we prepare the padding after the
+                * end of the request.
+                */
+               last->tail = last->wa_tail;
        }
 
-       do {
-               struct i915_priolist *p = rb_entry(rb, typeof(*p), node);
-               struct drm_i915_gem_request *rq, *rn;
+       while (rb) {
+               struct i915_priolist *p = to_priolist(rb);
+               struct i915_request *rq, *rn;
 
                list_for_each_entry_safe(rq, rn, &p->requests, priotree.link) {
                        /*
@@ -626,8 +628,8 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
                        }
 
                        INIT_LIST_HEAD(&rq->priotree.link);
-                       __i915_gem_request_submit(rq);
-                       trace_i915_gem_request_in(rq, port_index(port, execlists));
+                       __i915_request_submit(rq);
+                       trace_i915_request_in(rq, port_index(port, execlists));
                        last = rq;
                        submit = true;
                }
@@ -637,8 +639,9 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
                INIT_LIST_HEAD(&p->requests);
                if (p->priority != I915_PRIORITY_NORMAL)
                        kmem_cache_free(engine->i915->priorities, p);
-       } while (rb);
+       }
 done:
+       execlists->queue_priority = rb ? to_priolist(rb)->priority : INT_MIN;
        execlists->first = rb;
        if (submit)
                port_assign(port, last);
@@ -665,12 +668,12 @@ execlists_cancel_port_requests(struct intel_engine_execlists * const execlists)
        unsigned int num_ports = execlists_num_ports(execlists);
 
        while (num_ports-- && port_isset(port)) {
-               struct drm_i915_gem_request *rq = port_request(port);
+               struct i915_request *rq = port_request(port);
 
                GEM_BUG_ON(!execlists->active);
                intel_engine_context_out(rq->engine);
                execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_PREEMPTED);
-               i915_gem_request_put(rq);
+               i915_request_put(rq);
 
                memset(port, 0, sizeof(*port));
                port++;
@@ -680,7 +683,7 @@ execlists_cancel_port_requests(struct intel_engine_execlists * const execlists)
 static void execlists_cancel_requests(struct intel_engine_cs *engine)
 {
        struct intel_engine_execlists * const execlists = &engine->execlists;
-       struct drm_i915_gem_request *rq, *rn;
+       struct i915_request *rq, *rn;
        struct rb_node *rb;
        unsigned long flags;
 
@@ -692,20 +695,20 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine)
        /* Mark all executing requests as skipped. */
        list_for_each_entry(rq, &engine->timeline->requests, link) {
                GEM_BUG_ON(!rq->global_seqno);
-               if (!i915_gem_request_completed(rq))
+               if (!i915_request_completed(rq))
                        dma_fence_set_error(&rq->fence, -EIO);
        }
 
        /* Flush the queued requests to the timeline list (for retiring). */
        rb = execlists->first;
        while (rb) {
-               struct i915_priolist *p = rb_entry(rb, typeof(*p), node);
+               struct i915_priolist *p = to_priolist(rb);
 
                list_for_each_entry_safe(rq, rn, &p->requests, priotree.link) {
                        INIT_LIST_HEAD(&rq->priotree.link);
 
                        dma_fence_set_error(&rq->fence, -EIO);
-                       __i915_gem_request_submit(rq);
+                       __i915_request_submit(rq);
                }
 
                rb = rb_next(rb);
@@ -717,7 +720,7 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine)
 
        /* Remaining _unready_ requests will be nop'ed when submitted */
 
-
+       execlists->queue_priority = INT_MIN;
        execlists->queue = RB_ROOT;
        execlists->first = NULL;
        GEM_BUG_ON(port_isset(execlists->port));
@@ -806,7 +809,7 @@ static void execlists_submission_tasklet(unsigned long data)
                          tail, GEN8_CSB_WRITE_PTR(readl(dev_priv->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_PTR(engine)))), fw ? "" : "?");
 
                while (head != tail) {
-                       struct drm_i915_gem_request *rq;
+                       struct i915_request *rq;
                        unsigned int status;
                        unsigned int count;
 
@@ -872,23 +875,28 @@ static void execlists_submission_tasklet(unsigned long data)
                        GEM_BUG_ON(!execlists_is_active(execlists,
                                                        EXECLISTS_ACTIVE_USER));
 
-                       /* Check the context/desc id for this event matches */
-                       GEM_DEBUG_BUG_ON(buf[2 * head + 1] != port->context_id);
-
                        rq = port_unpack(port, &count);
-                       GEM_TRACE("%s out[0]: ctx=%d.%d, seqno=%x\n",
+                       GEM_TRACE("%s out[0]: ctx=%d.%d, seqno=%x, prio=%d\n",
                                  engine->name,
                                  port->context_id, count,
-                                 rq ? rq->global_seqno : 0);
+                                 rq ? rq->global_seqno : 0,
+                                 rq ? rq_prio(rq) : 0);
+
+                       /* Check the context/desc id for this event matches */
+                       GEM_DEBUG_BUG_ON(buf[2 * head + 1] != port->context_id);
+
                        GEM_BUG_ON(count == 0);
                        if (--count == 0) {
                                GEM_BUG_ON(status & GEN8_CTX_STATUS_PREEMPTED);
                                GEM_BUG_ON(port_isset(&port[1]) &&
                                           !(status & GEN8_CTX_STATUS_ELEMENT_SWITCH));
-                               GEM_BUG_ON(!i915_gem_request_completed(rq));
+                               GEM_BUG_ON(!i915_request_completed(rq));
                                execlists_context_schedule_out(rq);
-                               trace_i915_gem_request_out(rq);
-                               i915_gem_request_put(rq);
+                               trace_i915_request_out(rq);
+                               i915_request_put(rq);
+
+                               GEM_TRACE("%s completed ctx=%d\n",
+                                         engine->name, port->context_id);
 
                                execlists_port_complete(execlists, port);
                        } else {
@@ -917,18 +925,22 @@ static void execlists_submission_tasklet(unsigned long data)
                intel_uncore_forcewake_put(dev_priv, execlists->fw_domains);
 }
 
-static void insert_request(struct intel_engine_cs *engine,
-                          struct i915_priotree *pt,
-                          int prio)
+static void queue_request(struct intel_engine_cs *engine,
+                         struct i915_priotree *pt,
+                         int prio)
 {
-       struct i915_priolist *p = lookup_priolist(engine, pt, prio);
+       list_add_tail(&pt->link, &lookup_priolist(engine, pt, prio)->requests);
+}
 
-       list_add_tail(&pt->link, &ptr_mask_bits(p, 1)->requests);
-       if (ptr_unmask_bits(p, 1))
+static void submit_queue(struct intel_engine_cs *engine, int prio)
+{
+       if (prio > engine->execlists.queue_priority) {
+               engine->execlists.queue_priority = prio;
                tasklet_hi_schedule(&engine->execlists.tasklet);
+       }
 }
 
-static void execlists_submit_request(struct drm_i915_gem_request *request)
+static void execlists_submit_request(struct i915_request *request)
 {
        struct intel_engine_cs *engine = request->engine;
        unsigned long flags;
@@ -936,7 +948,8 @@ static void execlists_submit_request(struct drm_i915_gem_request *request)
        /* Will be called from irq-context when using foreign fences. */
        spin_lock_irqsave(&engine->timeline->lock, flags);
 
-       insert_request(engine, &request->priotree, request->priotree.priority);
+       queue_request(engine, &request->priotree, rq_prio(request));
+       submit_queue(engine, rq_prio(request));
 
        GEM_BUG_ON(!engine->execlists.first);
        GEM_BUG_ON(list_empty(&request->priotree.link));
@@ -944,9 +957,9 @@ static void execlists_submit_request(struct drm_i915_gem_request *request)
        spin_unlock_irqrestore(&engine->timeline->lock, flags);
 }
 
-static struct drm_i915_gem_request *pt_to_request(struct i915_priotree *pt)
+static struct i915_request *pt_to_request(struct i915_priotree *pt)
 {
-       return container_of(pt, struct drm_i915_gem_request, priotree);
+       return container_of(pt, struct i915_request, priotree);
 }
 
 static struct intel_engine_cs *
@@ -964,7 +977,7 @@ pt_lock_engine(struct i915_priotree *pt, struct intel_engine_cs *locked)
        return engine;
 }
 
-static void execlists_schedule(struct drm_i915_gem_request *request, int prio)
+static void execlists_schedule(struct i915_request *request, int prio)
 {
        struct intel_engine_cs *engine;
        struct i915_dependency *dep, *p;
@@ -973,7 +986,7 @@ static void execlists_schedule(struct drm_i915_gem_request *request, int prio)
 
        GEM_BUG_ON(prio == I915_PRIORITY_INVALID);
 
-       if (i915_gem_request_completed(request))
+       if (i915_request_completed(request))
                return;
 
        if (prio <= READ_ONCE(request->priotree.priority))
@@ -992,7 +1005,7 @@ static void execlists_schedule(struct drm_i915_gem_request *request, int prio)
         * static void update_priorities(struct i915_priotree *pt, prio) {
         *      list_for_each_entry(dep, &pt->signalers_list, signal_link)
         *              update_priorities(dep->signal, prio)
-        *      insert_request(pt);
+        *      queue_request(pt);
         * }
         * but that may have unlimited recursion depth and so runs a very
         * real risk of overunning the kernel stack. Instead, we build
@@ -1055,8 +1068,9 @@ static void execlists_schedule(struct drm_i915_gem_request *request, int prio)
                pt->priority = prio;
                if (!list_empty(&pt->link)) {
                        __list_del_entry(&pt->link);
-                       insert_request(engine, pt, prio);
+                       queue_request(engine, pt, prio);
                }
+               submit_queue(engine, prio);
        }
 
        spin_unlock_irq(&engine->timeline->lock);
@@ -1158,7 +1172,7 @@ static void execlists_context_unpin(struct intel_engine_cs *engine,
        i915_gem_context_put(ctx);
 }
 
-static int execlists_request_alloc(struct drm_i915_gem_request *request)
+static int execlists_request_alloc(struct i915_request *request)
 {
        struct intel_engine_cs *engine = request->engine;
        struct intel_context *ce = &request->ctx->engine[engine->id];
@@ -1590,7 +1604,7 @@ static void reset_irq(struct intel_engine_cs *engine)
 }
 
 static void reset_common_ring(struct intel_engine_cs *engine,
-                             struct drm_i915_gem_request *request)
+                             struct i915_request *request)
 {
        struct intel_engine_execlists * const execlists = &engine->execlists;
        struct intel_context *ce;
@@ -1658,15 +1672,15 @@ static void reset_common_ring(struct intel_engine_cs *engine,
        unwind_wa_tail(request);
 }
 
-static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req)
+static int intel_logical_ring_emit_pdps(struct i915_request *rq)
 {
-       struct i915_hw_ppgtt *ppgtt = req->ctx->ppgtt;
-       struct intel_engine_cs *engine = req->engine;
+       struct i915_hw_ppgtt *ppgtt = rq->ctx->ppgtt;
+       struct intel_engine_cs *engine = rq->engine;
        const int num_lri_cmds = GEN8_3LVL_PDPES * 2;
        u32 *cs;
        int i;
 
-       cs = intel_ring_begin(req, num_lri_cmds * 2 + 2);
+       cs = intel_ring_begin(rq, num_lri_cmds * 2 + 2);
        if (IS_ERR(cs))
                return PTR_ERR(cs);
 
@@ -1681,12 +1695,12 @@ static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req)
        }
 
        *cs++ = MI_NOOP;
-       intel_ring_advance(req, cs);
+       intel_ring_advance(rq, cs);
 
        return 0;
 }
 
-static int gen8_emit_bb_start(struct drm_i915_gem_request *req,
+static int gen8_emit_bb_start(struct i915_request *rq,
                              u64 offset, u32 len,
                              const unsigned int flags)
 {
@@ -1699,18 +1713,18 @@ static int gen8_emit_bb_start(struct drm_i915_gem_request *req,
         * it is unsafe in case of lite-restore (because the ctx is
         * not idle). PML4 is allocated during ppgtt init so this is
         * not needed in 48-bit.*/
-       if (req->ctx->ppgtt &&
-           (intel_engine_flag(req->engine) & req->ctx->ppgtt->pd_dirty_rings) &&
-           !i915_vm_is_48bit(&req->ctx->ppgtt->base) &&
-           !intel_vgpu_active(req->i915)) {
-               ret = intel_logical_ring_emit_pdps(req);
+       if (rq->ctx->ppgtt &&
+           (intel_engine_flag(rq->engine) & rq->ctx->ppgtt->pd_dirty_rings) &&
+           !i915_vm_is_48bit(&rq->ctx->ppgtt->base) &&
+           !intel_vgpu_active(rq->i915)) {
+               ret = intel_logical_ring_emit_pdps(rq);
                if (ret)
                        return ret;
 
-               req->ctx->ppgtt->pd_dirty_rings &= ~intel_engine_flag(req->engine);
+               rq->ctx->ppgtt->pd_dirty_rings &= ~intel_engine_flag(rq->engine);
        }
 
-       cs = intel_ring_begin(req, 4);
+       cs = intel_ring_begin(rq, 4);
        if (IS_ERR(cs))
                return PTR_ERR(cs);
 
@@ -1739,7 +1753,7 @@ static int gen8_emit_bb_start(struct drm_i915_gem_request *req,
                (flags & I915_DISPATCH_RS ? MI_BATCH_RESOURCE_STREAMER : 0);
        *cs++ = lower_32_bits(offset);
        *cs++ = upper_32_bits(offset);
-       intel_ring_advance(req, cs);
+       intel_ring_advance(rq, cs);
 
        return 0;
 }
@@ -1758,7 +1772,7 @@ static void gen8_logical_ring_disable_irq(struct intel_engine_cs *engine)
        I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
 }
 
-static int gen8_emit_flush(struct drm_i915_gem_request *request, u32 mode)
+static int gen8_emit_flush(struct i915_request *request, u32 mode)
 {
        u32 cmd, *cs;
 
@@ -1790,7 +1804,7 @@ static int gen8_emit_flush(struct drm_i915_gem_request *request, u32 mode)
        return 0;
 }
 
-static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
+static int gen8_emit_flush_render(struct i915_request *request,
                                  u32 mode)
 {
        struct intel_engine_cs *engine = request->engine;
@@ -1865,7 +1879,7 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
  * used as a workaround for not being allowed to do lite
  * restore with HEAD==TAIL (WaIdleLiteRestore).
  */
-static void gen8_emit_wa_tail(struct drm_i915_gem_request *request, u32 *cs)
+static void gen8_emit_wa_tail(struct i915_request *request, u32 *cs)
 {
        /* Ensure there's always at least one preemption point per-request. */
        *cs++ = MI_ARB_CHECK;
@@ -1873,7 +1887,7 @@ static void gen8_emit_wa_tail(struct drm_i915_gem_request *request, u32 *cs)
        request->wa_tail = intel_ring_offset(request, cs);
 }
 
-static void gen8_emit_breadcrumb(struct drm_i915_gem_request *request, u32 *cs)
+static void gen8_emit_breadcrumb(struct i915_request *request, u32 *cs)
 {
        /* w/a: bit 5 needs to be zero for MI_FLUSH_DW address. */
        BUILD_BUG_ON(I915_GEM_HWS_INDEX_ADDR & (1 << 5));
@@ -1889,8 +1903,7 @@ static void gen8_emit_breadcrumb(struct drm_i915_gem_request *request, u32 *cs)
 }
 static const int gen8_emit_breadcrumb_sz = 6 + WA_TAIL_DWORDS;
 
-static void gen8_emit_breadcrumb_rcs(struct drm_i915_gem_request *request,
-                                       u32 *cs)
+static void gen8_emit_breadcrumb_rcs(struct i915_request *request, u32 *cs)
 {
        /* We're using qword write, seqno should be aligned to 8 bytes. */
        BUILD_BUG_ON(I915_GEM_HWS_INDEX & 1);
@@ -1906,15 +1919,15 @@ static void gen8_emit_breadcrumb_rcs(struct drm_i915_gem_request *request,
 }
 static const int gen8_emit_breadcrumb_rcs_sz = 8 + WA_TAIL_DWORDS;
 
-static int gen8_init_rcs_context(struct drm_i915_gem_request *req)
+static int gen8_init_rcs_context(struct i915_request *rq)
 {
        int ret;
 
-       ret = intel_ring_workarounds_emit(req);
+       ret = intel_ring_workarounds_emit(rq);
        if (ret)
                return ret;
 
-       ret = intel_rcs_context_init_mocs(req);
+       ret = intel_rcs_context_init_mocs(rq);
        /*
         * Failing to program the MOCS is non-fatal.The system will not
         * run at peak performance. So generate an error and carry on.
@@ -1922,7 +1935,7 @@ static int gen8_init_rcs_context(struct drm_i915_gem_request *req)
        if (ret)
                DRM_ERROR("MOCS failed to program: expect performance issues.\n");
 
-       return i915_gem_render_state_emit(req);
+       return i915_gem_render_state_emit(rq);
 }
 
 /**
index abb7a8c..c0b34b7 100644 (file)
@@ -265,7 +265,7 @@ int intel_mocs_init_engine(struct intel_engine_cs *engine)
 
 /**
  * emit_mocs_control_table() - emit the mocs control table
- * @req:       Request to set up the MOCS table for.
+ * @rq:        Request to set up the MOCS table for.
  * @table:     The values to program into the control regs.
  *
  * This function simply emits a MI_LOAD_REGISTER_IMM command for the
@@ -273,17 +273,17 @@ int intel_mocs_init_engine(struct intel_engine_cs *engine)
  *
  * Return: 0 on success, otherwise the error status.
  */
-static int emit_mocs_control_table(struct drm_i915_gem_request *req,
+static int emit_mocs_control_table(struct i915_request *rq,
                                   const struct drm_i915_mocs_table *table)
 {
-       enum intel_engine_id engine = req->engine->id;
+       enum intel_engine_id engine = rq->engine->id;
        unsigned int index;
        u32 *cs;
 
        if (WARN_ON(table->size > GEN9_NUM_MOCS_ENTRIES))
                return -ENODEV;
 
-       cs = intel_ring_begin(req, 2 + 2 * GEN9_NUM_MOCS_ENTRIES);
+       cs = intel_ring_begin(rq, 2 + 2 * GEN9_NUM_MOCS_ENTRIES);
        if (IS_ERR(cs))
                return PTR_ERR(cs);
 
@@ -308,7 +308,7 @@ static int emit_mocs_control_table(struct drm_i915_gem_request *req,
        }
 
        *cs++ = MI_NOOP;
-       intel_ring_advance(req, cs);
+       intel_ring_advance(rq, cs);
 
        return 0;
 }
@@ -323,7 +323,7 @@ static inline u32 l3cc_combine(const struct drm_i915_mocs_table *table,
 
 /**
  * emit_mocs_l3cc_table() - emit the mocs control table
- * @req:       Request to set up the MOCS table for.
+ * @rq:        Request to set up the MOCS table for.
  * @table:     The values to program into the control regs.
  *
  * This function simply emits a MI_LOAD_REGISTER_IMM command for the
@@ -332,7 +332,7 @@ static inline u32 l3cc_combine(const struct drm_i915_mocs_table *table,
  *
  * Return: 0 on success, otherwise the error status.
  */
-static int emit_mocs_l3cc_table(struct drm_i915_gem_request *req,
+static int emit_mocs_l3cc_table(struct i915_request *rq,
                                const struct drm_i915_mocs_table *table)
 {
        unsigned int i;
@@ -341,7 +341,7 @@ static int emit_mocs_l3cc_table(struct drm_i915_gem_request *req,
        if (WARN_ON(table->size > GEN9_NUM_MOCS_ENTRIES))
                return -ENODEV;
 
-       cs = intel_ring_begin(req, 2 + GEN9_NUM_MOCS_ENTRIES);
+       cs = intel_ring_begin(rq, 2 + GEN9_NUM_MOCS_ENTRIES);
        if (IS_ERR(cs))
                return PTR_ERR(cs);
 
@@ -370,7 +370,7 @@ static int emit_mocs_l3cc_table(struct drm_i915_gem_request *req,
        }
 
        *cs++ = MI_NOOP;
-       intel_ring_advance(req, cs);
+       intel_ring_advance(rq, cs);
 
        return 0;
 }
@@ -417,7 +417,7 @@ void intel_mocs_init_l3cc_table(struct drm_i915_private *dev_priv)
 
 /**
  * intel_rcs_context_init_mocs() - program the MOCS register.
- * @req:       Request to set up the MOCS tables for.
+ * @rq:        Request to set up the MOCS tables for.
  *
  * This function will emit a batch buffer with the values required for
  * programming the MOCS register values for all the currently supported
@@ -431,19 +431,19 @@ void intel_mocs_init_l3cc_table(struct drm_i915_private *dev_priv)
  *
  * Return: 0 on success, otherwise the error status.
  */
-int intel_rcs_context_init_mocs(struct drm_i915_gem_request *req)
+int intel_rcs_context_init_mocs(struct i915_request *rq)
 {
        struct drm_i915_mocs_table t;
        int ret;
 
-       if (get_mocs_settings(req->i915, &t)) {
+       if (get_mocs_settings(rq->i915, &t)) {
                /* Program the RCS control registers */
-               ret = emit_mocs_control_table(req, &t);
+               ret = emit_mocs_control_table(rq, &t);
                if (ret)
                        return ret;
 
                /* Now program the l3cc registers */
-               ret = emit_mocs_l3cc_table(req, &t);
+               ret = emit_mocs_l3cc_table(rq, &t);
                if (ret)
                        return ret;
        }
index ce4a5df..d1751f9 100644 (file)
@@ -52,7 +52,7 @@
 #include <drm/drmP.h>
 #include "i915_drv.h"
 
-int intel_rcs_context_init_mocs(struct drm_i915_gem_request *req);
+int intel_rcs_context_init_mocs(struct i915_request *rq);
 void intel_mocs_init_l3cc_table(struct drm_i915_private *dev_priv);
 int intel_mocs_init_engine(struct intel_engine_cs *engine);
 
index 89f568e..36671a9 100644 (file)
@@ -234,50 +234,50 @@ static void intel_overlay_unmap_regs(struct intel_overlay *overlay,
 }
 
 static void intel_overlay_submit_request(struct intel_overlay *overlay,
-                                        struct drm_i915_gem_request *req,
+                                        struct i915_request *rq,
                                         i915_gem_retire_fn retire)
 {
        GEM_BUG_ON(i915_gem_active_peek(&overlay->last_flip,
                                        &overlay->i915->drm.struct_mutex));
        i915_gem_active_set_retire_fn(&overlay->last_flip, retire,
                                      &overlay->i915->drm.struct_mutex);
-       i915_gem_active_set(&overlay->last_flip, req);
-       i915_add_request(req);
+       i915_gem_active_set(&overlay->last_flip, rq);
+       i915_request_add(rq);
 }
 
 static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
-                                        struct drm_i915_gem_request *req,
+                                        struct i915_request *rq,
                                         i915_gem_retire_fn retire)
 {
-       intel_overlay_submit_request(overlay, req, retire);
+       intel_overlay_submit_request(overlay, rq, retire);
        return i915_gem_active_retire(&overlay->last_flip,
                                      &overlay->i915->drm.struct_mutex);
 }
 
-static struct drm_i915_gem_request *alloc_request(struct intel_overlay *overlay)
+static struct i915_request *alloc_request(struct intel_overlay *overlay)
 {
        struct drm_i915_private *dev_priv = overlay->i915;
        struct intel_engine_cs *engine = dev_priv->engine[RCS];
 
-       return i915_gem_request_alloc(engine, dev_priv->kernel_context);
+       return i915_request_alloc(engine, dev_priv->kernel_context);
 }
 
 /* overlay needs to be disable in OCMD reg */
 static int intel_overlay_on(struct intel_overlay *overlay)
 {
        struct drm_i915_private *dev_priv = overlay->i915;
-       struct drm_i915_gem_request *req;
+       struct i915_request *rq;
        u32 *cs;
 
        WARN_ON(overlay->active);
 
-       req = alloc_request(overlay);
-       if (IS_ERR(req))
-               return PTR_ERR(req);
+       rq = alloc_request(overlay);
+       if (IS_ERR(rq))
+               return PTR_ERR(rq);
 
-       cs = intel_ring_begin(req, 4);
+       cs = intel_ring_begin(rq, 4);
        if (IS_ERR(cs)) {
-               i915_add_request(req);
+               i915_request_add(rq);
                return PTR_ERR(cs);
        }
 
@@ -290,9 +290,9 @@ static int intel_overlay_on(struct intel_overlay *overlay)
        *cs++ = overlay->flip_addr | OFC_UPDATE;
        *cs++ = MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP;
        *cs++ = MI_NOOP;
-       intel_ring_advance(req, cs);
+       intel_ring_advance(rq, cs);
 
-       return intel_overlay_do_wait_request(overlay, req, NULL);
+       return intel_overlay_do_wait_request(overlay, rq, NULL);
 }
 
 static void intel_overlay_flip_prepare(struct intel_overlay *overlay,
@@ -322,7 +322,7 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
                                  bool load_polyphase_filter)
 {
        struct drm_i915_private *dev_priv = overlay->i915;
-       struct drm_i915_gem_request *req;
+       struct i915_request *rq;
        u32 flip_addr = overlay->flip_addr;
        u32 tmp, *cs;
 
@@ -336,23 +336,23 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
        if (tmp & (1 << 17))
                DRM_DEBUG("overlay underrun, DOVSTA: %x\n", tmp);
 
-       req = alloc_request(overlay);
-       if (IS_ERR(req))
-               return PTR_ERR(req);
+       rq = alloc_request(overlay);
+       if (IS_ERR(rq))
+               return PTR_ERR(rq);
 
-       cs = intel_ring_begin(req, 2);
+       cs = intel_ring_begin(rq, 2);
        if (IS_ERR(cs)) {
-               i915_add_request(req);
+               i915_request_add(rq);
                return PTR_ERR(cs);
        }
 
        *cs++ = MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE;
        *cs++ = flip_addr;
-       intel_ring_advance(req, cs);
+       intel_ring_advance(rq, cs);
 
        intel_overlay_flip_prepare(overlay, vma);
 
-       intel_overlay_submit_request(overlay, req, NULL);
+       intel_overlay_submit_request(overlay, rq, NULL);
 
        return 0;
 }
@@ -373,7 +373,7 @@ static void intel_overlay_release_old_vma(struct intel_overlay *overlay)
 }
 
 static void intel_overlay_release_old_vid_tail(struct i915_gem_active *active,
-                                              struct drm_i915_gem_request *req)
+                                              struct i915_request *rq)
 {
        struct intel_overlay *overlay =
                container_of(active, typeof(*overlay), last_flip);
@@ -382,7 +382,7 @@ static void intel_overlay_release_old_vid_tail(struct i915_gem_active *active,
 }
 
 static void intel_overlay_off_tail(struct i915_gem_active *active,
-                                  struct drm_i915_gem_request *req)
+                                  struct i915_request *rq)
 {
        struct intel_overlay *overlay =
                container_of(active, typeof(*overlay), last_flip);
@@ -401,7 +401,7 @@ static void intel_overlay_off_tail(struct i915_gem_active *active,
 /* overlay needs to be disabled in OCMD reg */
 static int intel_overlay_off(struct intel_overlay *overlay)
 {
-       struct drm_i915_gem_request *req;
+       struct i915_request *rq;
        u32 *cs, flip_addr = overlay->flip_addr;
 
        WARN_ON(!overlay->active);
@@ -412,13 +412,13 @@ static int intel_overlay_off(struct intel_overlay *overlay)
         * of the hw. Do it in both cases */
        flip_addr |= OFC_UPDATE;
 
-       req = alloc_request(overlay);
-       if (IS_ERR(req))
-               return PTR_ERR(req);
+       rq = alloc_request(overlay);
+       if (IS_ERR(rq))
+               return PTR_ERR(rq);
 
-       cs = intel_ring_begin(req, 6);
+       cs = intel_ring_begin(rq, 6);
        if (IS_ERR(cs)) {
-               i915_add_request(req);
+               i915_request_add(rq);
                return PTR_ERR(cs);
        }
 
@@ -432,11 +432,11 @@ static int intel_overlay_off(struct intel_overlay *overlay)
        *cs++ = flip_addr;
        *cs++ = MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP;
 
-       intel_ring_advance(req, cs);
+       intel_ring_advance(rq, cs);
 
        intel_overlay_flip_prepare(overlay, NULL);
 
-       return intel_overlay_do_wait_request(overlay, req,
+       return intel_overlay_do_wait_request(overlay, rq,
                                             intel_overlay_off_tail);
 }
 
@@ -468,23 +468,23 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
 
        if (I915_READ(ISR) & I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT) {
                /* synchronous slowpath */
-               struct drm_i915_gem_request *req;
+               struct i915_request *rq;
 
-               req = alloc_request(overlay);
-               if (IS_ERR(req))
-                       return PTR_ERR(req);
+               rq = alloc_request(overlay);
+               if (IS_ERR(rq))
+                       return PTR_ERR(rq);
 
-               cs = intel_ring_begin(req, 2);
+               cs = intel_ring_begin(rq, 2);
                if (IS_ERR(cs)) {
-                       i915_add_request(req);
+                       i915_request_add(rq);
                        return PTR_ERR(cs);
                }
 
                *cs++ = MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP;
                *cs++ = MI_NOOP;
-               intel_ring_advance(req, cs);
+               intel_ring_advance(rq, cs);
 
-               ret = intel_overlay_do_wait_request(overlay, req,
+               ret = intel_overlay_do_wait_request(overlay, rq,
                                                    intel_overlay_release_old_vid_tail);
                if (ret)
                        return ret;
index abf80e4..3c14996 100644 (file)
@@ -6360,7 +6360,7 @@ void gen6_rps_idle(struct drm_i915_private *dev_priv)
        mutex_unlock(&dev_priv->pcu_lock);
 }
 
-void gen6_rps_boost(struct drm_i915_gem_request *rq,
+void gen6_rps_boost(struct i915_request *rq,
                    struct intel_rps_client *rps_client)
 {
        struct intel_rps *rps = &rq->i915->gt_pm.rps;
@@ -6376,7 +6376,7 @@ void gen6_rps_boost(struct drm_i915_gem_request *rq,
        if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags))
                return;
 
-       /* Serializes with i915_gem_request_retire() */
+       /* Serializes with i915_request_retire() */
        boost = false;
        spin_lock_irqsave(&rq->lock, flags);
        if (!rq->waitboost && !dma_fence_is_signaled_locked(&rq->fence)) {
@@ -6715,7 +6715,7 @@ static void gen9_enable_rc6(struct drm_i915_private *dev_priv)
 
        /*
         * 3b: Enable Coarse Power Gating only when RC6 is enabled.
-        * WaRsDisableCoarsePowerGating:skl,bxt - Render/Media PG need to be disabled with RC6.
+        * WaRsDisableCoarsePowerGating:skl,cnl - Render/Media PG need to be disabled with RC6.
         */
        if (NEEDS_WaRsDisableCoarsePowerGating(dev_priv))
                I915_WRITE(GEN9_PG_ENABLE, 0);
index 2ef374f..0577079 100644 (file)
 #include "intel_drv.h"
 #include "i915_drv.h"
 
+static inline enum intel_display_power_domain
+psr_aux_domain(struct intel_dp *intel_dp)
+{
+       /* CNL HW requires corresponding AUX IOs to be powered up for PSR.
+        * However, for non-A AUX ports the corresponding non-EDP transcoders
+        * would have already enabled power well 2 and DC_OFF. This means we can
+        * acquire a wider POWER_DOMAIN_AUX_{B,C,D,F} reference instead of a
+        * specific AUX_IO reference without powering up any extra wells.
+        * Note that PSR is enabled only on Port A even though this function
+        * returns the correct domain for other ports too.
+        */
+       return intel_dp->aux_ch == AUX_CH_A ? POWER_DOMAIN_AUX_IO_A :
+                                             intel_dp->aux_power_domain;
+}
+
+static void psr_aux_io_power_get(struct intel_dp *intel_dp)
+{
+       struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+       struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
+
+       if (INTEL_GEN(dev_priv) < 10)
+               return;
+
+       intel_display_power_get(dev_priv, psr_aux_domain(intel_dp));
+}
+
+static void psr_aux_io_power_put(struct intel_dp *intel_dp)
+{
+       struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+       struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
+
+       if (INTEL_GEN(dev_priv) < 10)
+               return;
+
+       intel_display_power_put(dev_priv, psr_aux_domain(intel_dp));
+}
+
+static bool intel_dp_get_y_cord_status(struct intel_dp *intel_dp)
+{
+       uint8_t psr_caps = 0;
+
+       if (drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_CAPS, &psr_caps) != 1)
+               return false;
+       return psr_caps & DP_PSR2_SU_Y_COORDINATE_REQUIRED;
+}
+
+static bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp)
+{
+       uint8_t dprx = 0;
+
+       if (drm_dp_dpcd_readb(&intel_dp->aux, DP_DPRX_FEATURE_ENUMERATION_LIST,
+                             &dprx) != 1)
+               return false;
+       return dprx & DP_VSC_SDP_EXT_FOR_COLORIMETRY_SUPPORTED;
+}
+
+static bool intel_dp_get_alpm_status(struct intel_dp *intel_dp)
+{
+       uint8_t alpm_caps = 0;
+
+       if (drm_dp_dpcd_readb(&intel_dp->aux, DP_RECEIVER_ALPM_CAP,
+                             &alpm_caps) != 1)
+               return false;
+       return alpm_caps & DP_ALPM_CAP;
+}
+
+void intel_psr_init_dpcd(struct intel_dp *intel_dp)
+{
+       struct drm_i915_private *dev_priv =
+               to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
+
+       drm_dp_dpcd_read(&intel_dp->aux, DP_PSR_SUPPORT, intel_dp->psr_dpcd,
+                        sizeof(intel_dp->psr_dpcd));
+
+       if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
+               dev_priv->psr.sink_support = true;
+               DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
+       }
+
+       if (INTEL_GEN(dev_priv) >= 9 &&
+           (intel_dp->psr_dpcd[0] & DP_PSR2_IS_SUPPORTED)) {
+               uint8_t frame_sync_cap;
+
+               dev_priv->psr.sink_support = true;
+               if (drm_dp_dpcd_readb(&intel_dp->aux,
+                                     DP_SINK_DEVICE_AUX_FRAME_SYNC_CAP,
+                                     &frame_sync_cap) != 1)
+                       frame_sync_cap = 0;
+               dev_priv->psr.aux_frame_sync = frame_sync_cap & DP_AUX_FRAME_SYNC_CAP;
+               /* PSR2 needs frame sync as well */
+               dev_priv->psr.psr2_support = dev_priv->psr.aux_frame_sync;
+               DRM_DEBUG_KMS("PSR2 %s on sink",
+                             dev_priv->psr.psr2_support ? "supported" : "not supported");
+
+               if (dev_priv->psr.psr2_support) {
+                       dev_priv->psr.y_cord_support =
+                               intel_dp_get_y_cord_status(intel_dp);
+                       dev_priv->psr.colorimetry_support =
+                               intel_dp_get_colorimetry_status(intel_dp);
+                       dev_priv->psr.alpm =
+                               intel_dp_get_alpm_status(intel_dp);
+               }
+       }
+}
+
 static bool vlv_is_psr_active_on_pipe(struct drm_device *dev, int pipe)
 {
        struct drm_i915_private *dev_priv = to_i915(dev);
@@ -341,6 +446,41 @@ static void hsw_psr_activate(struct intel_dp *intel_dp)
                hsw_activate_psr1(intel_dp);
 }
 
+static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
+                                   struct intel_crtc_state *crtc_state)
+{
+       struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+       struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
+       const struct drm_display_mode *adjusted_mode =
+               &crtc_state->base.adjusted_mode;
+
+       /*
+        * FIXME psr2_support is messed up. It's both computed
+        * dynamically during PSR enable, and extracted from sink
+        * caps during eDP detection.
+        */
+       if (!dev_priv->psr.psr2_support)
+               return false;
+
+       /* PSR2 is restricted to work with panel resolutions up to 3640x2304 */
+       if (adjusted_mode->crtc_hdisplay > 3640 ||
+           adjusted_mode->crtc_vdisplay > 2304) {
+               DRM_DEBUG_KMS("PSR2 not enabled, panel resolution too big\n");
+               return false;
+       }
+
+       /*
+        * FIXME:enable psr2 only for y-cordinate psr2 panels
+        * After gtc implementation , remove this restriction.
+        */
+       if (!dev_priv->psr.y_cord_support) {
+               DRM_DEBUG_KMS("PSR2 not enabled, panel does not support Y coordinate\n");
+               return false;
+       }
+
+       return true;
+}
+
 void intel_psr_compute_config(struct intel_dp *intel_dp,
                              struct intel_crtc_state *crtc_state)
 {
@@ -403,34 +543,14 @@ void intel_psr_compute_config(struct intel_dp *intel_dp,
                return;
        }
 
-       /*
-        * FIXME psr2_support is messed up. It's both computed
-        * dynamically during PSR enable, and extracted from sink
-        * caps during eDP detection.
-        */
-       if (!dev_priv->psr.psr2_support) {
-               crtc_state->has_psr = true;
-               return;
-       }
-
-       /* PSR2 is restricted to work with panel resolutions upto 3200x2000 */
-       if (adjusted_mode->crtc_hdisplay > 3200 ||
-           adjusted_mode->crtc_vdisplay > 2000) {
-               DRM_DEBUG_KMS("PSR2 disabled, panel resolution too big\n");
-               return;
-       }
-
-       /*
-        * FIXME:enable psr2 only for y-cordinate psr2 panels
-        * After gtc implementation , remove this restriction.
-        */
-       if (!dev_priv->psr.y_cord_support) {
-               DRM_DEBUG_KMS("PSR2 disabled, panel does not support Y coordinate\n");
+       if (!(intel_dp->edp_dpcd[1] & DP_EDP_SET_POWER_CAP)) {
+               DRM_DEBUG_KMS("PSR condition failed: panel lacks power state control\n");
                return;
        }
 
        crtc_state->has_psr = true;
-       crtc_state->has_psr2 = true;
+       crtc_state->has_psr2 = intel_psr2_config_valid(intel_dp, crtc_state);
+       DRM_DEBUG_KMS("Enabling PSR%s\n", crtc_state->has_psr2 ? "2" : "");
 }
 
 static void intel_psr_activate(struct intel_dp *intel_dp)
@@ -459,6 +579,8 @@ static void hsw_psr_enable_source(struct intel_dp *intel_dp,
        enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
        u32 chicken;
 
+       psr_aux_io_power_get(intel_dp);
+
        if (dev_priv->psr.psr2_support) {
                chicken = PSR2_VSC_ENABLE_PROG_HEADER;
                if (dev_priv->psr.y_cord_support)
@@ -617,6 +739,8 @@ static void hsw_psr_disable(struct intel_dp *intel_dp,
                else
                        WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE);
        }
+
+       psr_aux_io_power_put(intel_dp);
 }
 
 /**
index 5718f37..1d59952 100644 (file)
@@ -66,7 +66,7 @@ unsigned int intel_ring_update_space(struct intel_ring *ring)
 }
 
 static int
-gen2_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
+gen2_render_ring_flush(struct i915_request *rq, u32 mode)
 {
        u32 cmd, *cs;
 
@@ -75,19 +75,19 @@ gen2_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
        if (mode & EMIT_INVALIDATE)
                cmd |= MI_READ_FLUSH;
 
-       cs = intel_ring_begin(req, 2);
+       cs = intel_ring_begin(rq, 2);
        if (IS_ERR(cs))
                return PTR_ERR(cs);
 
        *cs++ = cmd;
        *cs++ = MI_NOOP;
-       intel_ring_advance(req, cs);
+       intel_ring_advance(rq, cs);
 
        return 0;
 }
 
 static int
-gen4_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
+gen4_render_ring_flush(struct i915_request *rq, u32 mode)
 {
        u32 cmd, *cs;
 
@@ -122,17 +122,17 @@ gen4_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
        cmd = MI_FLUSH;
        if (mode & EMIT_INVALIDATE) {
                cmd |= MI_EXE_FLUSH;
-               if (IS_G4X(req->i915) || IS_GEN5(req->i915))
+               if (IS_G4X(rq->i915) || IS_GEN5(rq->i915))
                        cmd |= MI_INVALIDATE_ISP;
        }
 
-       cs = intel_ring_begin(req, 2);
+       cs = intel_ring_begin(rq, 2);
        if (IS_ERR(cs))
                return PTR_ERR(cs);
 
        *cs++ = cmd;
        *cs++ = MI_NOOP;
-       intel_ring_advance(req, cs);
+       intel_ring_advance(rq, cs);
 
        return 0;
 }
@@ -175,13 +175,13 @@ gen4_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
  * really our business.  That leaves only stall at scoreboard.
  */
 static int
-intel_emit_post_sync_nonzero_flush(struct drm_i915_gem_request *req)
+intel_emit_post_sync_nonzero_flush(struct i915_request *rq)
 {
        u32 scratch_addr =
-               i915_ggtt_offset(req->engine->scratch) + 2 * CACHELINE_BYTES;
+               i915_ggtt_offset(rq->engine->scratch) + 2 * CACHELINE_BYTES;
        u32 *cs;
 
-       cs = intel_ring_begin(req, 6);
+       cs = intel_ring_begin(rq, 6);
        if (IS_ERR(cs))
                return PTR_ERR(cs);
 
@@ -191,9 +191,9 @@ intel_emit_post_sync_nonzero_flush(struct drm_i915_gem_request *req)
        *cs++ = 0; /* low dword */
        *cs++ = 0; /* high dword */
        *cs++ = MI_NOOP;
-       intel_ring_advance(req, cs);
+       intel_ring_advance(rq, cs);
 
-       cs = intel_ring_begin(req, 6);
+       cs = intel_ring_begin(rq, 6);
        if (IS_ERR(cs))
                return PTR_ERR(cs);
 
@@ -203,21 +203,21 @@ intel_emit_post_sync_nonzero_flush(struct drm_i915_gem_request *req)
        *cs++ = 0;
        *cs++ = 0;
        *cs++ = MI_NOOP;
-       intel_ring_advance(req, cs);
+       intel_ring_advance(rq, cs);
 
        return 0;
 }
 
 static int
-gen6_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
+gen6_render_ring_flush(struct i915_request *rq, u32 mode)
 {
        u32 scratch_addr =
-               i915_ggtt_offset(req->engine->scratch) + 2 * CACHELINE_BYTES;
+               i915_ggtt_offset(rq->engine->scratch) + 2 * CACHELINE_BYTES;
        u32 *cs, flags = 0;
        int ret;
 
        /* Force SNB workarounds for PIPE_CONTROL flushes */
-       ret = intel_emit_post_sync_nonzero_flush(req);
+       ret = intel_emit_post_sync_nonzero_flush(rq);
        if (ret)
                return ret;
 
@@ -247,7 +247,7 @@ gen6_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
                flags |= PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_CS_STALL;
        }
 
-       cs = intel_ring_begin(req, 4);
+       cs = intel_ring_begin(rq, 4);
        if (IS_ERR(cs))
                return PTR_ERR(cs);
 
@@ -255,17 +255,17 @@ gen6_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
        *cs++ = flags;
        *cs++ = scratch_addr | PIPE_CONTROL_GLOBAL_GTT;
        *cs++ = 0;
-       intel_ring_advance(req, cs);
+       intel_ring_advance(rq, cs);
 
        return 0;
 }
 
 static int
-gen7_render_ring_cs_stall_wa(struct drm_i915_gem_request *req)
+gen7_render_ring_cs_stall_wa(struct i915_request *rq)
 {
        u32 *cs;
 
-       cs = intel_ring_begin(req, 4);
+       cs = intel_ring_begin(rq, 4);
        if (IS_ERR(cs))
                return PTR_ERR(cs);
 
@@ -273,16 +273,16 @@ gen7_render_ring_cs_stall_wa(struct drm_i915_gem_request *req)
        *cs++ = PIPE_CONTROL_CS_STALL | PIPE_CONTROL_STALL_AT_SCOREBOARD;
        *cs++ = 0;
        *cs++ = 0;
-       intel_ring_advance(req, cs);
+       intel_ring_advance(rq, cs);
 
        return 0;
 }
 
 static int
-gen7_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
+gen7_render_ring_flush(struct i915_request *rq, u32 mode)
 {
        u32 scratch_addr =
-               i915_ggtt_offset(req->engine->scratch) + 2 * CACHELINE_BYTES;
+               i915_ggtt_offset(rq->engine->scratch) + 2 * CACHELINE_BYTES;
        u32 *cs, flags = 0;
 
        /*
@@ -324,10 +324,10 @@ gen7_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
                /* Workaround: we must issue a pipe_control with CS-stall bit
                 * set before a pipe_control command that has the state cache
                 * invalidate bit set. */
-               gen7_render_ring_cs_stall_wa(req);
+               gen7_render_ring_cs_stall_wa(rq);
        }
 
-       cs = intel_ring_begin(req, 4);
+       cs = intel_ring_begin(rq, 4);
        if (IS_ERR(cs))
                return PTR_ERR(cs);
 
@@ -335,7 +335,7 @@ gen7_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
        *cs++ = flags;
        *cs++ = scratch_addr;
        *cs++ = 0;
-       intel_ring_advance(req, cs);
+       intel_ring_advance(rq, cs);
 
        return 0;
 }
@@ -531,7 +531,7 @@ out:
 }
 
 static void reset_ring_common(struct intel_engine_cs *engine,
-                             struct drm_i915_gem_request *request)
+                             struct i915_request *request)
 {
        /*
         * RC6 must be prevented until the reset is complete and the engine
@@ -595,15 +595,15 @@ static void reset_ring_common(struct intel_engine_cs *engine,
        }
 }
 
-static int intel_rcs_ctx_init(struct drm_i915_gem_request *req)
+static int intel_rcs_ctx_init(struct i915_request *rq)
 {
        int ret;
 
-       ret = intel_ring_workarounds_emit(req);
+       ret = intel_ring_workarounds_emit(rq);
        if (ret != 0)
                return ret;
 
-       ret = i915_gem_render_state_emit(req);
+       ret = i915_gem_render_state_emit(rq);
        if (ret)
                return ret;
 
@@ -661,9 +661,9 @@ static int init_render_ring(struct intel_engine_cs *engine)
        return init_workarounds_ring(engine);
 }
 
-static u32 *gen6_signal(struct drm_i915_gem_request *req, u32 *cs)
+static u32 *gen6_signal(struct i915_request *rq, u32 *cs)
 {
-       struct drm_i915_private *dev_priv = req->i915;
+       struct drm_i915_private *dev_priv = rq->i915;
        struct intel_engine_cs *engine;
        enum intel_engine_id id;
        int num_rings = 0;
@@ -674,11 +674,11 @@ static u32 *gen6_signal(struct drm_i915_gem_request *req, u32 *cs)
                if (!(BIT(engine->hw_id) & GEN6_SEMAPHORES_MASK))
                        continue;
 
-               mbox_reg = req->engine->semaphore.mbox.signal[engine->hw_id];
+               mbox_reg = rq->engine->semaphore.mbox.signal[engine->hw_id];
                if (i915_mmio_reg_valid(mbox_reg)) {
                        *cs++ = MI_LOAD_REGISTER_IMM(1);
                        *cs++ = i915_mmio_reg_offset(mbox_reg);
-                       *cs++ = req->global_seqno;
+                       *cs++ = rq->global_seqno;
                        num_rings++;
                }
        }
@@ -690,7 +690,7 @@ static u32 *gen6_signal(struct drm_i915_gem_request *req, u32 *cs)
 
 static void cancel_requests(struct intel_engine_cs *engine)
 {
-       struct drm_i915_gem_request *request;
+       struct i915_request *request;
        unsigned long flags;
 
        spin_lock_irqsave(&engine->timeline->lock, flags);
@@ -698,7 +698,7 @@ static void cancel_requests(struct intel_engine_cs *engine)
        /* Mark all submitted requests as skipped. */
        list_for_each_entry(request, &engine->timeline->requests, link) {
                GEM_BUG_ON(!request->global_seqno);
-               if (!i915_gem_request_completed(request))
+               if (!i915_request_completed(request))
                        dma_fence_set_error(&request->fence, -EIO);
        }
        /* Remaining _unready_ requests will be nop'ed when submitted */
@@ -706,48 +706,46 @@ static void cancel_requests(struct intel_engine_cs *engine)
        spin_unlock_irqrestore(&engine->timeline->lock, flags);
 }
 
-static void i9xx_submit_request(struct drm_i915_gem_request *request)
+static void i9xx_submit_request(struct i915_request *request)
 {
        struct drm_i915_private *dev_priv = request->i915;
 
-       i915_gem_request_submit(request);
+       i915_request_submit(request);
 
        I915_WRITE_TAIL(request->engine,
                        intel_ring_set_tail(request->ring, request->tail));
 }
 
-static void i9xx_emit_breadcrumb(struct drm_i915_gem_request *req, u32 *cs)
+static void i9xx_emit_breadcrumb(struct i915_request *rq, u32 *cs)
 {
        *cs++ = MI_STORE_DWORD_INDEX;
        *cs++ = I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT;
-       *cs++ = req->global_seqno;
+       *cs++ = rq->global_seqno;
        *cs++ = MI_USER_INTERRUPT;
 
-       req->tail = intel_ring_offset(req, cs);
-       assert_ring_tail_valid(req->ring, req->tail);
+       rq->tail = intel_ring_offset(rq, cs);
+       assert_ring_tail_valid(rq->ring, rq->tail);
 }
 
 static const int i9xx_emit_breadcrumb_sz = 4;
 
-static void gen6_sema_emit_breadcrumb(struct drm_i915_gem_request *req, u32 *cs)
+static void gen6_sema_emit_breadcrumb(struct i915_request *rq, u32 *cs)
 {
-       return i9xx_emit_breadcrumb(req,
-                                   req->engine->semaphore.signal(req, cs));
+       return i9xx_emit_breadcrumb(rq, rq->engine->semaphore.signal(rq, cs));
 }
 
 static int
-gen6_ring_sync_to(struct drm_i915_gem_request *req,
-                 struct drm_i915_gem_request *signal)
+gen6_ring_sync_to(struct i915_request *rq, struct i915_request *signal)
 {
        u32 dw1 = MI_SEMAPHORE_MBOX |
                  MI_SEMAPHORE_COMPARE |
                  MI_SEMAPHORE_REGISTER;
-       u32 wait_mbox = signal->engine->semaphore.mbox.wait[req->engine->hw_id];
+       u32 wait_mbox = signal->engine->semaphore.mbox.wait[rq->engine->hw_id];
        u32 *cs;
 
        WARN_ON(wait_mbox == MI_SEMAPHORE_SYNC_INVALID);
 
-       cs = intel_ring_begin(req, 4);
+       cs = intel_ring_begin(rq, 4);
        if (IS_ERR(cs))
                return PTR_ERR(cs);
 
@@ -759,7 +757,7 @@ gen6_ring_sync_to(struct drm_i915_gem_request *req,
        *cs++ = signal->global_seqno - 1;
        *cs++ = 0;
        *cs++ = MI_NOOP;
-       intel_ring_advance(req, cs);
+       intel_ring_advance(rq, cs);
 
        return 0;
 }
@@ -858,17 +856,17 @@ i8xx_irq_disable(struct intel_engine_cs *engine)
 }
 
 static int
-bsd_ring_flush(struct drm_i915_gem_request *req, u32 mode)
+bsd_ring_flush(struct i915_request *rq, u32 mode)
 {
        u32 *cs;
 
-       cs = intel_ring_begin(req, 2);
+       cs = intel_ring_begin(rq, 2);
        if (IS_ERR(cs))
                return PTR_ERR(cs);
 
        *cs++ = MI_FLUSH;
        *cs++ = MI_NOOP;
-       intel_ring_advance(req, cs);
+       intel_ring_advance(rq, cs);
        return 0;
 }
 
@@ -911,20 +909,20 @@ hsw_vebox_irq_disable(struct intel_engine_cs *engine)
 }
 
 static int
-i965_emit_bb_start(struct drm_i915_gem_request *req,
+i965_emit_bb_start(struct i915_request *rq,
                   u64 offset, u32 length,
                   unsigned int dispatch_flags)
 {
        u32 *cs;
 
-       cs = intel_ring_begin(req, 2);
+       cs = intel_ring_begin(rq, 2);
        if (IS_ERR(cs))
                return PTR_ERR(cs);
 
        *cs++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT | (dispatch_flags &
                I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965);
        *cs++ = offset;
-       intel_ring_advance(req, cs);
+       intel_ring_advance(rq, cs);
 
        return 0;
 }
@@ -934,13 +932,13 @@ i965_emit_bb_start(struct drm_i915_gem_request *req,
 #define I830_TLB_ENTRIES (2)
 #define I830_WA_SIZE max(I830_TLB_ENTRIES*4096, I830_BATCH_LIMIT)
 static int
-i830_emit_bb_start(struct drm_i915_gem_request *req,
+i830_emit_bb_start(struct i915_request *rq,
                   u64 offset, u32 len,
                   unsigned int dispatch_flags)
 {
-       u32 *cs, cs_offset = i915_ggtt_offset(req->engine->scratch);
+       u32 *cs, cs_offset = i915_ggtt_offset(rq->engine->scratch);
 
-       cs = intel_ring_begin(req, 6);
+       cs = intel_ring_begin(rq, 6);
        if (IS_ERR(cs))
                return PTR_ERR(cs);
 
@@ -951,13 +949,13 @@ i830_emit_bb_start(struct drm_i915_gem_request *req,
        *cs++ = cs_offset;
        *cs++ = 0xdeadbeef;
        *cs++ = MI_NOOP;
-       intel_ring_advance(req, cs);
+       intel_ring_advance(rq, cs);
 
        if ((dispatch_flags & I915_DISPATCH_PINNED) == 0) {
                if (len > I830_BATCH_LIMIT)
                        return -ENOSPC;
 
-               cs = intel_ring_begin(req, 6 + 2);
+               cs = intel_ring_begin(rq, 6 + 2);
                if (IS_ERR(cs))
                        return PTR_ERR(cs);
 
@@ -974,39 +972,39 @@ i830_emit_bb_start(struct drm_i915_gem_request *req,
 
                *cs++ = MI_FLUSH;
                *cs++ = MI_NOOP;
-               intel_ring_advance(req, cs);
+               intel_ring_advance(rq, cs);
 
                /* ... and execute it. */
                offset = cs_offset;
        }
 
-       cs = intel_ring_begin(req, 2);
+       cs = intel_ring_begin(rq, 2);
        if (IS_ERR(cs))
                return PTR_ERR(cs);
 
        *cs++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT;
        *cs++ = offset | (dispatch_flags & I915_DISPATCH_SECURE ? 0 :
                MI_BATCH_NON_SECURE);
-       intel_ring_advance(req, cs);
+       intel_ring_advance(rq, cs);
 
        return 0;
 }
 
 static int
-i915_emit_bb_start(struct drm_i915_gem_request *req,
+i915_emit_bb_start(struct i915_request *rq,
                   u64 offset, u32 len,
                   unsigned int dispatch_flags)
 {
        u32 *cs;
 
-       cs = intel_ring_begin(req, 2);
+       cs = intel_ring_begin(rq, 2);
        if (IS_ERR(cs))
                return PTR_ERR(cs);
 
        *cs++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT;
        *cs++ = offset | (dispatch_flags & I915_DISPATCH_SECURE ? 0 :
                MI_BATCH_NON_SECURE);
-       intel_ring_advance(req, cs);
+       intel_ring_advance(rq, cs);
 
        return 0;
 }
@@ -1377,7 +1375,7 @@ void intel_legacy_submission_resume(struct drm_i915_private *dev_priv)
                intel_ring_reset(engine->buffer, 0);
 }
 
-static inline int mi_set_context(struct drm_i915_gem_request *rq, u32 flags)
+static inline int mi_set_context(struct i915_request *rq, u32 flags)
 {
        struct drm_i915_private *i915 = rq->i915;
        struct intel_engine_cs *engine = rq->engine;
@@ -1463,7 +1461,7 @@ static inline int mi_set_context(struct drm_i915_gem_request *rq, u32 flags)
        return 0;
 }
 
-static int remap_l3(struct drm_i915_gem_request *rq, int slice)
+static int remap_l3(struct i915_request *rq, int slice)
 {
        u32 *cs, *remap_info = rq->i915->l3_parity.remap_info[slice];
        int i;
@@ -1491,7 +1489,7 @@ static int remap_l3(struct drm_i915_gem_request *rq, int slice)
        return 0;
 }
 
-static int switch_context(struct drm_i915_gem_request *rq)
+static int switch_context(struct i915_request *rq)
 {
        struct intel_engine_cs *engine = rq->engine;
        struct i915_gem_context *to_ctx = rq->ctx;
@@ -1561,7 +1559,7 @@ err:
        return ret;
 }
 
-static int ring_request_alloc(struct drm_i915_gem_request *request)
+static int ring_request_alloc(struct i915_request *request)
 {
        int ret;
 
@@ -1587,7 +1585,7 @@ static int ring_request_alloc(struct drm_i915_gem_request *request)
 
 static noinline int wait_for_space(struct intel_ring *ring, unsigned int bytes)
 {
-       struct drm_i915_gem_request *target;
+       struct i915_request *target;
        long timeout;
 
        lockdep_assert_held(&ring->vma->vm->i915->drm.struct_mutex);
@@ -1605,13 +1603,13 @@ static noinline int wait_for_space(struct intel_ring *ring, unsigned int bytes)
        if (WARN_ON(&target->ring_link == &ring->request_list))
                return -ENOSPC;
 
-       timeout = i915_wait_request(target,
+       timeout = i915_request_wait(target,
                                    I915_WAIT_INTERRUPTIBLE | I915_WAIT_LOCKED,
                                    MAX_SCHEDULE_TIMEOUT);
        if (timeout < 0)
                return timeout;
 
-       i915_gem_request_retire_upto(target);
+       i915_request_retire_upto(target);
 
        intel_ring_update_space(ring);
        GEM_BUG_ON(ring->space < bytes);
@@ -1634,10 +1632,9 @@ int intel_ring_wait_for_space(struct intel_ring *ring, unsigned int bytes)
        return 0;
 }
 
-u32 *intel_ring_begin(struct drm_i915_gem_request *req,
-                     unsigned int num_dwords)
+u32 *intel_ring_begin(struct i915_request *rq, unsigned int num_dwords)
 {
-       struct intel_ring *ring = req->ring;
+       struct intel_ring *ring = rq->ring;
        const unsigned int remain_usable = ring->effective_size - ring->emit;
        const unsigned int bytes = num_dwords * sizeof(u32);
        unsigned int need_wrap = 0;
@@ -1647,7 +1644,7 @@ u32 *intel_ring_begin(struct drm_i915_gem_request *req,
        /* Packets must be qword aligned. */
        GEM_BUG_ON(num_dwords & 1);
 
-       total_bytes = bytes + req->reserved_space;
+       total_bytes = bytes + rq->reserved_space;
        GEM_BUG_ON(total_bytes > ring->effective_size);
 
        if (unlikely(total_bytes > remain_usable)) {
@@ -1668,7 +1665,7 @@ u32 *intel_ring_begin(struct drm_i915_gem_request *req,
                         * wrap and only need to effectively wait for the
                         * reserved size from the start of ringbuffer.
                         */
-                       total_bytes = req->reserved_space + remain_actual;
+                       total_bytes = rq->reserved_space + remain_actual;
                }
        }
 
@@ -1682,9 +1679,9 @@ u32 *intel_ring_begin(struct drm_i915_gem_request *req,
                 * overallocation and the assumption is that then we never need
                 * to wait (which has the risk of failing with EINTR).
                 *
-                * See also i915_gem_request_alloc() and i915_add_request().
+                * See also i915_request_alloc() and i915_request_add().
                 */
-               GEM_BUG_ON(!req->reserved_space);
+               GEM_BUG_ON(!rq->reserved_space);
 
                ret = wait_for_space(ring, total_bytes);
                if (unlikely(ret))
@@ -1713,29 +1710,28 @@ u32 *intel_ring_begin(struct drm_i915_gem_request *req,
 }
 
 /* Align the ring tail to a cacheline boundary */
-int intel_ring_cacheline_align(struct drm_i915_gem_request *req)
+int intel_ring_cacheline_align(struct i915_request *rq)
 {
-       int num_dwords =
-               (req->ring->emit & (CACHELINE_BYTES - 1)) / sizeof(uint32_t);
+       int num_dwords = (rq->ring->emit & (CACHELINE_BYTES - 1)) / sizeof(u32);
        u32 *cs;
 
        if (num_dwords == 0)
                return 0;
 
-       num_dwords = CACHELINE_BYTES / sizeof(uint32_t) - num_dwords;
-       cs = intel_ring_begin(req, num_dwords);
+       num_dwords = CACHELINE_BYTES / sizeof(u32) - num_dwords;
+       cs = intel_ring_begin(rq, num_dwords);
        if (IS_ERR(cs))
                return PTR_ERR(cs);
 
        while (num_dwords--)
                *cs++ = MI_NOOP;
 
-       intel_ring_advance(req, cs);
+       intel_ring_advance(rq, cs);
 
        return 0;
 }
 
-static void gen6_bsd_submit_request(struct drm_i915_gem_request *request)
+static void gen6_bsd_submit_request(struct i915_request *request)
 {
        struct drm_i915_private *dev_priv = request->i915;
 
@@ -1772,11 +1768,11 @@ static void gen6_bsd_submit_request(struct drm_i915_gem_request *request)
        intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
 }
 
-static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req, u32 mode)
+static int gen6_bsd_ring_flush(struct i915_request *rq, u32 mode)
 {
        u32 cmd, *cs;
 
-       cs = intel_ring_begin(req, 4);
+       cs = intel_ring_begin(rq, 4);
        if (IS_ERR(cs))
                return PTR_ERR(cs);
 
@@ -1802,18 +1798,18 @@ static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req, u32 mode)
        *cs++ = I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT;
        *cs++ = 0;
        *cs++ = MI_NOOP;
-       intel_ring_advance(req, cs);
+       intel_ring_advance(rq, cs);
        return 0;
 }
 
 static int
-hsw_emit_bb_start(struct drm_i915_gem_request *req,
+hsw_emit_bb_start(struct i915_request *rq,
                  u64 offset, u32 len,
                  unsigned int dispatch_flags)
 {
        u32 *cs;
 
-       cs = intel_ring_begin(req, 2);
+       cs = intel_ring_begin(rq, 2);
        if (IS_ERR(cs))
                return PTR_ERR(cs);
 
@@ -1823,19 +1819,19 @@ hsw_emit_bb_start(struct drm_i915_gem_request *req,
                MI_BATCH_RESOURCE_STREAMER : 0);
        /* bit0-7 is the length on GEN6+ */
        *cs++ = offset;
-       intel_ring_advance(req, cs);
+       intel_ring_advance(rq, cs);
 
        return 0;
 }
 
 static int
-gen6_emit_bb_start(struct drm_i915_gem_request *req,
+gen6_emit_bb_start(struct i915_request *rq,
                   u64 offset, u32 len,
                   unsigned int dispatch_flags)
 {
        u32 *cs;
 
-       cs = intel_ring_begin(req, 2);
+       cs = intel_ring_begin(rq, 2);
        if (IS_ERR(cs))
                return PTR_ERR(cs);
 
@@ -1843,18 +1839,18 @@ gen6_emit_bb_start(struct drm_i915_gem_request *req,
                0 : MI_BATCH_NON_SECURE_I965);
        /* bit0-7 is the length on GEN6+ */
        *cs++ = offset;
-       intel_ring_advance(req, cs);
+       intel_ring_advance(rq, cs);
 
        return 0;
 }
 
 /* Blitter support (SandyBridge+) */
 
-static int gen6_ring_flush(struct drm_i915_gem_request *req, u32 mode)
+static int gen6_ring_flush(struct i915_request *rq, u32 mode)
 {
        u32 cmd, *cs;
 
-       cs = intel_ring_begin(req, 4);
+       cs = intel_ring_begin(rq, 4);
        if (IS_ERR(cs))
                return PTR_ERR(cs);
 
@@ -1879,7 +1875,7 @@ static int gen6_ring_flush(struct drm_i915_gem_request *req, u32 mode)
        *cs++ = I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT;
        *cs++ = 0;
        *cs++ = MI_NOOP;
-       intel_ring_advance(req, cs);
+       intel_ring_advance(rq, cs);
 
        return 0;
 }
index 51523ad..bbacf4d 100644 (file)
@@ -3,10 +3,12 @@
 #define _INTEL_RINGBUFFER_H_
 
 #include <linux/hashtable.h>
+
 #include "i915_gem_batch_pool.h"
-#include "i915_gem_request.h"
 #include "i915_gem_timeline.h"
+
 #include "i915_pmu.h"
+#include "i915_request.h"
 #include "i915_selftest.h"
 
 struct drm_printer;
@@ -115,7 +117,7 @@ struct intel_engine_hangcheck {
        unsigned long action_timestamp;
        int deadlock;
        struct intel_instdone instdone;
-       struct drm_i915_gem_request *active_request;
+       struct i915_request *active_request;
        bool stalled;
 };
 
@@ -156,7 +158,7 @@ struct i915_ctx_workarounds {
        struct i915_vma *vma;
 };
 
-struct drm_i915_gem_request;
+struct i915_request;
 
 /*
  * Engine IDs definitions.
@@ -218,7 +220,7 @@ struct intel_engine_execlists {
                /**
                 * @request_count: combined request and submission count
                 */
-               struct drm_i915_gem_request *request_count;
+               struct i915_request *request_count;
 #define EXECLIST_COUNT_BITS 2
 #define port_request(p) ptr_mask_bits((p)->request_count, EXECLIST_COUNT_BITS)
 #define port_count(p) ptr_unmask_bits((p)->request_count, EXECLIST_COUNT_BITS)
@@ -256,6 +258,16 @@ struct intel_engine_execlists {
        unsigned int port_mask;
 
        /**
+        * @queue_priority: Highest pending priority.
+        *
+        * When we add requests into the queue, or adjust the priority of
+        * executing requests, we compute the maximum priority of those
+        * pending requests. We can then use this value to determine if
+        * we need to preempt the executing requests to service the queue.
+        */
+       int queue_priority;
+
+       /**
         * @queue: queue of requests, in priority lists
         */
        struct rb_root queue;
@@ -339,7 +351,7 @@ struct intel_engine_cs {
                struct rb_root waiters; /* sorted by retirement, priority */
                struct rb_root signals; /* sorted by retirement */
                struct task_struct *signaler; /* used for fence signalling */
-               struct drm_i915_gem_request __rcu *first_signal;
+               struct i915_request __rcu *first_signal;
                struct timer_list fake_irq; /* used after a missed interrupt */
                struct timer_list hangcheck; /* detect missed interrupts */
 
@@ -391,7 +403,7 @@ struct intel_engine_cs {
 
        int             (*init_hw)(struct intel_engine_cs *engine);
        void            (*reset_hw)(struct intel_engine_cs *engine,
-                                   struct drm_i915_gem_request *req);
+                                   struct i915_request *rq);
 
        void            (*park)(struct intel_engine_cs *engine);
        void            (*unpark)(struct intel_engine_cs *engine);
@@ -402,22 +414,20 @@ struct intel_engine_cs {
                                          struct i915_gem_context *ctx);
        void            (*context_unpin)(struct intel_engine_cs *engine,
                                         struct i915_gem_context *ctx);
-       int             (*request_alloc)(struct drm_i915_gem_request *req);
-       int             (*init_context)(struct drm_i915_gem_request *req);
+       int             (*request_alloc)(struct i915_request *rq);
+       int             (*init_context)(struct i915_request *rq);
 
-       int             (*emit_flush)(struct drm_i915_gem_request *request,
-                                     u32 mode);
+       int             (*emit_flush)(struct i915_request *request, u32 mode);
 #define EMIT_INVALIDATE        BIT(0)
 #define EMIT_FLUSH     BIT(1)
 #define EMIT_BARRIER   (EMIT_INVALIDATE | EMIT_FLUSH)
-       int             (*emit_bb_start)(struct drm_i915_gem_request *req,
+       int             (*emit_bb_start)(struct i915_request *rq,
                                         u64 offset, u32 length,
                                         unsigned int dispatch_flags);
 #define I915_DISPATCH_SECURE BIT(0)
 #define I915_DISPATCH_PINNED BIT(1)
 #define I915_DISPATCH_RS     BIT(2)
-       void            (*emit_breadcrumb)(struct drm_i915_gem_request *req,
-                                          u32 *cs);
+       void            (*emit_breadcrumb)(struct i915_request *rq, u32 *cs);
        int             emit_breadcrumb_sz;
 
        /* Pass the request to the hardware queue (e.g. directly into
@@ -426,7 +436,7 @@ struct intel_engine_cs {
         * This is called from an atomic context with irqs disabled; must
         * be irq safe.
         */
-       void            (*submit_request)(struct drm_i915_gem_request *req);
+       void            (*submit_request)(struct i915_request *rq);
 
        /* Call when the priority on a request has changed and it and its
         * dependencies may need rescheduling. Note the request itself may
@@ -434,8 +444,7 @@ struct intel_engine_cs {
         *
         * Called under the struct_mutex.
         */
-       void            (*schedule)(struct drm_i915_gem_request *request,
-                                   int priority);
+       void            (*schedule)(struct i915_request *request, int priority);
 
        /*
         * Cancel all requests on the hardware, or queued for execution.
@@ -503,9 +512,9 @@ struct intel_engine_cs {
                } mbox;
 
                /* AKA wait() */
-               int     (*sync_to)(struct drm_i915_gem_request *req,
-                                  struct drm_i915_gem_request *signal);
-               u32     *(*signal)(struct drm_i915_gem_request *req, u32 *cs);
+               int     (*sync_to)(struct i915_request *rq,
+                                  struct i915_request *signal);
+               u32     *(*signal)(struct i915_request *rq, u32 *cs);
        } semaphore;
 
        struct intel_engine_execlists execlists;
@@ -726,14 +735,12 @@ void intel_engine_cleanup(struct intel_engine_cs *engine);
 
 void intel_legacy_submission_resume(struct drm_i915_private *dev_priv);
 
-int __must_check intel_ring_cacheline_align(struct drm_i915_gem_request *req);
+int __must_check intel_ring_cacheline_align(struct i915_request *rq);
 
 int intel_ring_wait_for_space(struct intel_ring *ring, unsigned int bytes);
-u32 __must_check *intel_ring_begin(struct drm_i915_gem_request *req,
-                                  unsigned int n);
+u32 __must_check *intel_ring_begin(struct i915_request *rq, unsigned int n);
 
-static inline void
-intel_ring_advance(struct drm_i915_gem_request *req, u32 *cs)
+static inline void intel_ring_advance(struct i915_request *rq, u32 *cs)
 {
        /* Dummy function.
         *
@@ -743,22 +750,20 @@ intel_ring_advance(struct drm_i915_gem_request *req, u32 *cs)
         * reserved for the command packet (i.e. the value passed to
         * intel_ring_begin()).
         */
-       GEM_BUG_ON((req->ring->vaddr + req->ring->emit) != cs);
+       GEM_BUG_ON((rq->ring->vaddr + rq->ring->emit) != cs);
 }
 
-static inline u32
-intel_ring_wrap(const struct intel_ring *ring, u32 pos)
+static inline u32 intel_ring_wrap(const struct intel_ring *ring, u32 pos)
 {
        return pos & (ring->size - 1);
 }
 
-static inline u32
-intel_ring_offset(const struct drm_i915_gem_request *req, void *addr)
+static inline u32 intel_ring_offset(const struct i915_request *rq, void *addr)
 {
        /* Don't write ring->size (equivalent to 0) as that hangs some GPUs. */
-       u32 offset = addr - req->ring->vaddr;
-       GEM_BUG_ON(offset > req->ring->size);
-       return intel_ring_wrap(req->ring, offset);
+       u32 offset = addr - rq->ring->vaddr;
+       GEM_BUG_ON(offset > rq->ring->size);
+       return intel_ring_wrap(rq->ring, offset);
 }
 
 static inline void
@@ -796,7 +801,7 @@ intel_ring_set_tail(struct intel_ring *ring, unsigned int tail)
 {
        /* Whilst writes to the tail are strictly order, there is no
         * serialisation between readers and the writers. The tail may be
-        * read by i915_gem_request_retire() just as it is being updated
+        * read by i915_request_retire() just as it is being updated
         * by execlists, as although the breadcrumb is complete, the context
         * switch hasn't been seen.
         */
@@ -838,7 +843,7 @@ static inline u32 intel_engine_last_submit(struct intel_engine_cs *engine)
 }
 
 int init_workarounds_ring(struct intel_engine_cs *engine);
-int intel_ring_workarounds_emit(struct drm_i915_gem_request *req);
+int intel_ring_workarounds_emit(struct i915_request *rq);
 
 void intel_engine_get_instdone(struct intel_engine_cs *engine,
                               struct intel_instdone *instdone);
@@ -866,7 +871,7 @@ static inline u32 intel_hws_preempt_done_address(struct intel_engine_cs *engine)
 int intel_engine_init_breadcrumbs(struct intel_engine_cs *engine);
 
 static inline void intel_wait_init(struct intel_wait *wait,
-                                  struct drm_i915_gem_request *rq)
+                                  struct i915_request *rq)
 {
        wait->tsk = current;
        wait->request = rq;
@@ -892,9 +897,9 @@ intel_wait_update_seqno(struct intel_wait *wait, u32 seqno)
 
 static inline bool
 intel_wait_update_request(struct intel_wait *wait,
-                         const struct drm_i915_gem_request *rq)
+                         const struct i915_request *rq)
 {
-       return intel_wait_update_seqno(wait, i915_gem_request_global_seqno(rq));
+       return intel_wait_update_seqno(wait, i915_request_global_seqno(rq));
 }
 
 static inline bool
@@ -905,9 +910,9 @@ intel_wait_check_seqno(const struct intel_wait *wait, u32 seqno)
 
 static inline bool
 intel_wait_check_request(const struct intel_wait *wait,
-                        const struct drm_i915_gem_request *rq)
+                        const struct i915_request *rq)
 {
-       return intel_wait_check_seqno(wait, i915_gem_request_global_seqno(rq));
+       return intel_wait_check_seqno(wait, i915_request_global_seqno(rq));
 }
 
 static inline bool intel_wait_complete(const struct intel_wait *wait)
@@ -919,9 +924,8 @@ bool intel_engine_add_wait(struct intel_engine_cs *engine,
                           struct intel_wait *wait);
 void intel_engine_remove_wait(struct intel_engine_cs *engine,
                              struct intel_wait *wait);
-void intel_engine_enable_signaling(struct drm_i915_gem_request *request,
-                                  bool wakeup);
-void intel_engine_cancel_signaling(struct drm_i915_gem_request *request);
+void intel_engine_enable_signaling(struct i915_request *request, bool wakeup);
+void intel_engine_cancel_signaling(struct i915_request *request);
 
 static inline bool intel_engine_has_waiter(const struct intel_engine_cs *engine)
 {
index b7924fe..53ea564 100644 (file)
@@ -130,6 +130,8 @@ intel_display_power_domain_str(enum intel_display_power_domain domain)
                return "AUX_D";
        case POWER_DOMAIN_AUX_F:
                return "AUX_F";
+       case POWER_DOMAIN_AUX_IO_A:
+               return "AUX_IO_A";
        case POWER_DOMAIN_GMBUS:
                return "GMBUS";
        case POWER_DOMAIN_INIT:
@@ -1853,6 +1855,7 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
        BIT_ULL(POWER_DOMAIN_INIT))
 #define CNL_DISPLAY_AUX_A_POWER_DOMAINS (              \
        BIT_ULL(POWER_DOMAIN_AUX_A) |                   \
+       BIT_ULL(POWER_DOMAIN_AUX_IO_A) |                \
        BIT_ULL(POWER_DOMAIN_INIT))
 #define CNL_DISPLAY_AUX_B_POWER_DOMAINS (              \
        BIT_ULL(POWER_DOMAIN_AUX_B) |                   \
index 52b1bd1..05bbef3 100644 (file)
@@ -964,7 +964,7 @@ static int gpu_write(struct i915_vma *vma,
                     u32 dword,
                     u32 value)
 {
-       struct drm_i915_gem_request *rq;
+       struct i915_request *rq;
        struct i915_vma *batch;
        int flags = 0;
        int err;
@@ -975,7 +975,7 @@ static int gpu_write(struct i915_vma *vma,
        if (err)
                return err;
 
-       rq = i915_gem_request_alloc(engine, ctx);
+       rq = i915_request_alloc(engine, ctx);
        if (IS_ERR(rq))
                return PTR_ERR(rq);
 
@@ -1003,7 +1003,7 @@ static int gpu_write(struct i915_vma *vma,
        reservation_object_unlock(vma->resv);
 
 err_request:
-       __i915_add_request(rq, err == 0);
+       __i915_request_add(rq, err == 0);
 
        return err;
 }
index 7a0d1e1..340a98c 100644 (file)
@@ -178,7 +178,7 @@ static int gpu_set(struct drm_i915_gem_object *obj,
                   u32 v)
 {
        struct drm_i915_private *i915 = to_i915(obj->base.dev);
-       struct drm_i915_gem_request *rq;
+       struct i915_request *rq;
        struct i915_vma *vma;
        u32 *cs;
        int err;
@@ -191,7 +191,7 @@ static int gpu_set(struct drm_i915_gem_object *obj,
        if (IS_ERR(vma))
                return PTR_ERR(vma);
 
-       rq = i915_gem_request_alloc(i915->engine[RCS], i915->kernel_context);
+       rq = i915_request_alloc(i915->engine[RCS], i915->kernel_context);
        if (IS_ERR(rq)) {
                i915_vma_unpin(vma);
                return PTR_ERR(rq);
@@ -199,7 +199,7 @@ static int gpu_set(struct drm_i915_gem_object *obj,
 
        cs = intel_ring_begin(rq, 4);
        if (IS_ERR(cs)) {
-               __i915_add_request(rq, false);
+               __i915_request_add(rq, false);
                i915_vma_unpin(vma);
                return PTR_ERR(cs);
        }
@@ -229,7 +229,7 @@ static int gpu_set(struct drm_i915_gem_object *obj,
        reservation_object_add_excl_fence(obj->resv, &rq->fence);
        reservation_object_unlock(obj->resv);
 
-       __i915_add_request(rq, true);
+       __i915_request_add(rq, true);
 
        return 0;
 }
index 6da2a2f..7ecaed5 100644 (file)
@@ -114,7 +114,7 @@ static int gpu_fill(struct drm_i915_gem_object *obj,
        struct drm_i915_private *i915 = to_i915(obj->base.dev);
        struct i915_address_space *vm =
                ctx->ppgtt ? &ctx->ppgtt->base : &i915->ggtt.base;
-       struct drm_i915_gem_request *rq;
+       struct i915_request *rq;
        struct i915_vma *vma;
        struct i915_vma *batch;
        unsigned int flags;
@@ -152,7 +152,7 @@ static int gpu_fill(struct drm_i915_gem_object *obj,
                goto err_vma;
        }
 
-       rq = i915_gem_request_alloc(engine, ctx);
+       rq = i915_request_alloc(engine, ctx);
        if (IS_ERR(rq)) {
                err = PTR_ERR(rq);
                goto err_batch;
@@ -180,12 +180,12 @@ static int gpu_fill(struct drm_i915_gem_object *obj,
        reservation_object_add_excl_fence(obj->resv, &rq->fence);
        reservation_object_unlock(obj->resv);
 
-       __i915_add_request(rq, true);
+       __i915_request_add(rq, true);
 
        return 0;
 
 err_request:
-       __i915_add_request(rq, false);
+       __i915_request_add(rq, false);
 err_batch:
        i915_vma_unpin(batch);
 err_vma:
index e1ddad6..ab9d7be 100644 (file)
@@ -407,7 +407,7 @@ static int igt_evict_contexts(void *arg)
                mutex_lock(&i915->drm.struct_mutex);
                onstack_fence_init(&fence);
                do {
-                       struct drm_i915_gem_request *rq;
+                       struct i915_request *rq;
                        struct i915_gem_context *ctx;
 
                        ctx = live_context(i915, file);
@@ -416,7 +416,7 @@ static int igt_evict_contexts(void *arg)
 
                        /* We will need some GGTT space for the rq's context */
                        igt_evict_ctl.fail_if_busy = true;
-                       rq = i915_gem_request_alloc(engine, ctx);
+                       rq = i915_request_alloc(engine, ctx);
                        igt_evict_ctl.fail_if_busy = false;
 
                        if (IS_ERR(rq)) {
@@ -437,7 +437,7 @@ static int igt_evict_contexts(void *arg)
                        if (err < 0)
                                break;
 
-                       i915_add_request(rq);
+                       i915_request_add(rq);
                        count++;
                        err = 0;
                } while(1);
index 3c64815..fbdb241 100644 (file)
@@ -436,7 +436,7 @@ out:
 static int make_obj_busy(struct drm_i915_gem_object *obj)
 {
        struct drm_i915_private *i915 = to_i915(obj->base.dev);
-       struct drm_i915_gem_request *rq;
+       struct i915_request *rq;
        struct i915_vma *vma;
        int err;
 
@@ -448,14 +448,14 @@ static int make_obj_busy(struct drm_i915_gem_object *obj)
        if (err)
                return err;
 
-       rq = i915_gem_request_alloc(i915->engine[RCS], i915->kernel_context);
+       rq = i915_request_alloc(i915->engine[RCS], i915->kernel_context);
        if (IS_ERR(rq)) {
                i915_vma_unpin(vma);
                return PTR_ERR(rq);
        }
 
        i915_vma_move_to_active(vma, rq, 0);
-       i915_add_request(rq);
+       i915_request_add(rq);
 
        i915_gem_object_set_active_reference(obj);
        i915_vma_unpin(vma);
index 088f45b..9c76f03 100644 (file)
@@ -11,7 +11,7 @@
  */
 selftest(sanitycheck, i915_live_sanitycheck) /* keep first (igt selfcheck) */
 selftest(uncore, intel_uncore_live_selftests)
-selftest(requests, i915_gem_request_live_selftests)
+selftest(requests, i915_request_live_selftests)
 selftest(objects, i915_gem_object_live_selftests)
 selftest(dmabuf, i915_gem_dmabuf_live_selftests)
 selftest(coherency, i915_gem_coherency_live_selftests)
index 19c6fce..9a48aa4 100644 (file)
@@ -16,7 +16,7 @@ selftest(syncmap, i915_syncmap_mock_selftests)
 selftest(uncore, intel_uncore_mock_selftests)
 selftest(breadcrumbs, intel_breadcrumbs_mock_selftests)
 selftest(timelines, i915_gem_timeline_mock_selftests)
-selftest(requests, i915_gem_request_mock_selftests)
+selftest(requests, i915_request_mock_selftests)
 selftest(objects, i915_gem_object_mock_selftests)
 selftest(dmabuf, i915_gem_dmabuf_mock_selftests)
 selftest(vma, i915_vma_mock_selftests)
@@ -32,7 +32,7 @@
 static int igt_add_request(void *arg)
 {
        struct drm_i915_private *i915 = arg;
-       struct drm_i915_gem_request *request;
+       struct i915_request *request;
        int err = -ENOMEM;
 
        /* Basic preliminary test to create a request and let it loose! */
@@ -44,7 +44,7 @@ static int igt_add_request(void *arg)
        if (!request)
                goto out_unlock;
 
-       i915_add_request(request);
+       i915_request_add(request);
 
        err = 0;
 out_unlock:
@@ -56,7 +56,7 @@ static int igt_wait_request(void *arg)
 {
        const long T = HZ / 4;
        struct drm_i915_private *i915 = arg;
-       struct drm_i915_gem_request *request;
+       struct i915_request *request;
        int err = -EINVAL;
 
        /* Submit a request, then wait upon it */
@@ -68,49 +68,49 @@ static int igt_wait_request(void *arg)
                goto out_unlock;
        }
 
-       if (i915_wait_request(request, I915_WAIT_LOCKED, 0) != -ETIME) {
+       if (i915_request_wait(request, I915_WAIT_LOCKED, 0) != -ETIME) {
                pr_err("request wait (busy query) succeeded (expected timeout before submit!)\n");
                goto out_unlock;
        }
 
-       if (i915_wait_request(request, I915_WAIT_LOCKED, T) != -ETIME) {
+       if (i915_request_wait(request, I915_WAIT_LOCKED, T) != -ETIME) {
                pr_err("request wait succeeded (expected timeout before submit!)\n");
                goto out_unlock;
        }
 
-       if (i915_gem_request_completed(request)) {
+       if (i915_request_completed(request)) {
                pr_err("request completed before submit!!\n");
                goto out_unlock;
        }
 
-       i915_add_request(request);
+       i915_request_add(request);
 
-       if (i915_wait_request(request, I915_WAIT_LOCKED, 0) != -ETIME) {
+       if (i915_request_wait(request, I915_WAIT_LOCKED, 0) != -ETIME) {
                pr_err("request wait (busy query) succeeded (expected timeout after submit!)\n");
                goto out_unlock;
        }
 
-       if (i915_gem_request_completed(request)) {
+       if (i915_request_completed(request)) {
                pr_err("request completed immediately!\n");
                goto out_unlock;
        }
 
-       if (i915_wait_request(request, I915_WAIT_LOCKED, T / 2) != -ETIME) {
+       if (i915_request_wait(request, I915_WAIT_LOCKED, T / 2) != -ETIME) {
                pr_err("request wait succeeded (expected timeout!)\n");
                goto out_unlock;
        }
 
-       if (i915_wait_request(request, I915_WAIT_LOCKED, T) == -ETIME) {
+       if (i915_request_wait(request, I915_WAIT_LOCKED, T) == -ETIME) {
                pr_err("request wait timed out!\n");
                goto out_unlock;
        }
 
-       if (!i915_gem_request_completed(request)) {
+       if (!i915_request_completed(request)) {
                pr_err("request not complete after waiting!\n");
                goto out_unlock;
        }
 
-       if (i915_wait_request(request, I915_WAIT_LOCKED, T) == -ETIME) {
+       if (i915_request_wait(request, I915_WAIT_LOCKED, T) == -ETIME) {
                pr_err("request wait timed out when already complete!\n");
                goto out_unlock;
        }
@@ -126,7 +126,7 @@ static int igt_fence_wait(void *arg)
 {
        const long T = HZ / 4;
        struct drm_i915_private *i915 = arg;
-       struct drm_i915_gem_request *request;
+       struct i915_request *request;
        int err = -EINVAL;
 
        /* Submit a request, treat it as a fence and wait upon it */
@@ -145,7 +145,7 @@ static int igt_fence_wait(void *arg)
        }
 
        mutex_lock(&i915->drm.struct_mutex);
-       i915_add_request(request);
+       i915_request_add(request);
        mutex_unlock(&i915->drm.struct_mutex);
 
        if (dma_fence_is_signaled(&request->fence)) {
@@ -185,7 +185,7 @@ out_locked:
 static int igt_request_rewind(void *arg)
 {
        struct drm_i915_private *i915 = arg;
-       struct drm_i915_gem_request *request, *vip;
+       struct i915_request *request, *vip;
        struct i915_gem_context *ctx[2];
        int err = -EINVAL;
 
@@ -197,8 +197,8 @@ static int igt_request_rewind(void *arg)
                goto err_context_0;
        }
 
-       i915_gem_request_get(request);
-       i915_add_request(request);
+       i915_request_get(request);
+       i915_request_add(request);
 
        ctx[1] = mock_context(i915, "B");
        vip = mock_request(i915->engine[RCS], ctx[1], 0);
@@ -210,35 +210,35 @@ static int igt_request_rewind(void *arg)
        /* Simulate preemption by manual reordering */
        if (!mock_cancel_request(request)) {
                pr_err("failed to cancel request (already executed)!\n");
-               i915_add_request(vip);
+               i915_request_add(vip);
                goto err_context_1;
        }
-       i915_gem_request_get(vip);
-       i915_add_request(vip);
+       i915_request_get(vip);
+       i915_request_add(vip);
        rcu_read_lock();
        request->engine->submit_request(request);
        rcu_read_unlock();
 
        mutex_unlock(&i915->drm.struct_mutex);
 
-       if (i915_wait_request(vip, 0, HZ) == -ETIME) {
+       if (i915_request_wait(vip, 0, HZ) == -ETIME) {
                pr_err("timed out waiting for high priority request, vip.seqno=%d, current seqno=%d\n",
                       vip->global_seqno, intel_engine_get_seqno(i915->engine[RCS]));
                goto err;
        }
 
-       if (i915_gem_request_completed(request)) {
+       if (i915_request_completed(request)) {
                pr_err("low priority request already completed\n");
                goto err;
        }
 
        err = 0;
 err:
-       i915_gem_request_put(vip);
+       i915_request_put(vip);
        mutex_lock(&i915->drm.struct_mutex);
 err_context_1:
        mock_context_close(ctx[1]);
-       i915_gem_request_put(request);
+       i915_request_put(request);
 err_context_0:
        mock_context_close(ctx[0]);
        mock_device_flush(i915);
@@ -246,7 +246,7 @@ err_context_0:
        return err;
 }
 
-int i915_gem_request_mock_selftests(void)
+int i915_request_mock_selftests(void)
 {
        static const struct i915_subtest tests[] = {
                SUBTEST(igt_add_request),
@@ -303,7 +303,7 @@ static int end_live_test(struct live_test *t)
 {
        struct drm_i915_private *i915 = t->i915;
 
-       i915_gem_retire_requests(i915);
+       i915_retire_requests(i915);
 
        if (wait_for(intel_engines_are_idle(i915), 10)) {
                pr_err("%s(%s): GPU not idle\n", t->func, t->name);
@@ -343,7 +343,7 @@ static int live_nop_request(void *arg)
 
        for_each_engine(engine, i915, id) {
                IGT_TIMEOUT(end_time);
-               struct drm_i915_gem_request *request;
+               struct i915_request *request;
                unsigned long n, prime;
                ktime_t times[2] = {};
 
@@ -355,8 +355,8 @@ static int live_nop_request(void *arg)
                        times[1] = ktime_get_raw();
 
                        for (n = 0; n < prime; n++) {
-                               request = i915_gem_request_alloc(engine,
-                                                                i915->kernel_context);
+                               request = i915_request_alloc(engine,
+                                                            i915->kernel_context);
                                if (IS_ERR(request)) {
                                        err = PTR_ERR(request);
                                        goto out_unlock;
@@ -375,9 +375,9 @@ static int live_nop_request(void *arg)
                                 * for latency.
                                 */
 
-                               i915_add_request(request);
+                               i915_request_add(request);
                        }
-                       i915_wait_request(request,
+                       i915_request_wait(request,
                                          I915_WAIT_LOCKED,
                                          MAX_SCHEDULE_TIMEOUT);
 
@@ -447,15 +447,14 @@ err:
        return ERR_PTR(err);
 }
 
-static struct drm_i915_gem_request *
+static struct i915_request *
 empty_request(struct intel_engine_cs *engine,
              struct i915_vma *batch)
 {
-       struct drm_i915_gem_request *request;
+       struct i915_request *request;
        int err;
 
-       request = i915_gem_request_alloc(engine,
-                                        engine->i915->kernel_context);
+       request = i915_request_alloc(engine, engine->i915->kernel_context);
        if (IS_ERR(request))
                return request;
 
@@ -467,7 +466,7 @@ empty_request(struct intel_engine_cs *engine,
                goto out_request;
 
 out_request:
-       __i915_add_request(request, err == 0);
+       __i915_request_add(request, err == 0);
        return err ? ERR_PTR(err) : request;
 }
 
@@ -495,7 +494,7 @@ static int live_empty_request(void *arg)
 
        for_each_engine(engine, i915, id) {
                IGT_TIMEOUT(end_time);
-               struct drm_i915_gem_request *request;
+               struct i915_request *request;
                unsigned long n, prime;
                ktime_t times[2] = {};
 
@@ -509,7 +508,7 @@ static int live_empty_request(void *arg)
                        err = PTR_ERR(request);
                        goto out_batch;
                }
-               i915_wait_request(request,
+               i915_request_wait(request,
                                  I915_WAIT_LOCKED,
                                  MAX_SCHEDULE_TIMEOUT);
 
@@ -523,7 +522,7 @@ static int live_empty_request(void *arg)
                                        goto out_batch;
                                }
                        }
-                       i915_wait_request(request,
+                       i915_request_wait(request,
                                          I915_WAIT_LOCKED,
                                          MAX_SCHEDULE_TIMEOUT);
 
@@ -633,7 +632,7 @@ static int live_all_engines(void *arg)
 {
        struct drm_i915_private *i915 = arg;
        struct intel_engine_cs *engine;
-       struct drm_i915_gem_request *request[I915_NUM_ENGINES];
+       struct i915_request *request[I915_NUM_ENGINES];
        struct i915_vma *batch;
        struct live_test t;
        unsigned int id;
@@ -658,8 +657,7 @@ static int live_all_engines(void *arg)
        }
 
        for_each_engine(engine, i915, id) {
-               request[id] = i915_gem_request_alloc(engine,
-                                                    i915->kernel_context);
+               request[id] = i915_request_alloc(engine, i915->kernel_context);
                if (IS_ERR(request[id])) {
                        err = PTR_ERR(request[id]);
                        pr_err("%s: Request allocation failed with err=%d\n",
@@ -680,12 +678,12 @@ static int live_all_engines(void *arg)
                }
 
                i915_vma_move_to_active(batch, request[id], 0);
-               i915_gem_request_get(request[id]);
-               i915_add_request(request[id]);
+               i915_request_get(request[id]);
+               i915_request_add(request[id]);
        }
 
        for_each_engine(engine, i915, id) {
-               if (i915_gem_request_completed(request[id])) {
+               if (i915_request_completed(request[id])) {
                        pr_err("%s(%s): request completed too early!\n",
                               __func__, engine->name);
                        err = -EINVAL;
@@ -702,7 +700,7 @@ static int live_all_engines(void *arg)
        for_each_engine(engine, i915, id) {
                long timeout;
 
-               timeout = i915_wait_request(request[id],
+               timeout = i915_request_wait(request[id],
                                            I915_WAIT_LOCKED,
                                            MAX_SCHEDULE_TIMEOUT);
                if (timeout < 0) {
@@ -712,8 +710,8 @@ static int live_all_engines(void *arg)
                        goto out_request;
                }
 
-               GEM_BUG_ON(!i915_gem_request_completed(request[id]));
-               i915_gem_request_put(request[id]);
+               GEM_BUG_ON(!i915_request_completed(request[id]));
+               i915_request_put(request[id]);
                request[id] = NULL;
        }
 
@@ -722,7 +720,7 @@ static int live_all_engines(void *arg)
 out_request:
        for_each_engine(engine, i915, id)
                if (request[id])
-                       i915_gem_request_put(request[id]);
+                       i915_request_put(request[id]);
        i915_vma_unpin(batch);
        i915_vma_put(batch);
 out_unlock:
@@ -733,8 +731,8 @@ out_unlock:
 static int live_sequential_engines(void *arg)
 {
        struct drm_i915_private *i915 = arg;
-       struct drm_i915_gem_request *request[I915_NUM_ENGINES] = {};
-       struct drm_i915_gem_request *prev = NULL;
+       struct i915_request *request[I915_NUM_ENGINES] = {};
+       struct i915_request *prev = NULL;
        struct intel_engine_cs *engine;
        struct live_test t;
        unsigned int id;
@@ -763,8 +761,7 @@ static int live_sequential_engines(void *arg)
                        goto out_unlock;
                }
 
-               request[id] = i915_gem_request_alloc(engine,
-                                                    i915->kernel_context);
+               request[id] = i915_request_alloc(engine, i915->kernel_context);
                if (IS_ERR(request[id])) {
                        err = PTR_ERR(request[id]);
                        pr_err("%s: Request allocation failed for %s with err=%d\n",
@@ -773,10 +770,10 @@ static int live_sequential_engines(void *arg)
                }
 
                if (prev) {
-                       err = i915_gem_request_await_dma_fence(request[id],
-                                                              &prev->fence);
+                       err = i915_request_await_dma_fence(request[id],
+                                                          &prev->fence);
                        if (err) {
-                               i915_add_request(request[id]);
+                               i915_request_add(request[id]);
                                pr_err("%s: Request await failed for %s with err=%d\n",
                                       __func__, engine->name, err);
                                goto out_request;
@@ -794,8 +791,8 @@ static int live_sequential_engines(void *arg)
                i915_gem_object_set_active_reference(batch->obj);
                i915_vma_get(batch);
 
-               i915_gem_request_get(request[id]);
-               i915_add_request(request[id]);
+               i915_request_get(request[id]);
+               i915_request_add(request[id]);
 
                prev = request[id];
        }
@@ -803,7 +800,7 @@ static int live_sequential_engines(void *arg)
        for_each_engine(engine, i915, id) {
                long timeout;
 
-               if (i915_gem_request_completed(request[id])) {
+               if (i915_request_completed(request[id])) {
                        pr_err("%s(%s): request completed too early!\n",
                               __func__, engine->name);
                        err = -EINVAL;
@@ -817,7 +814,7 @@ static int live_sequential_engines(void *arg)
                        goto out_request;
                }
 
-               timeout = i915_wait_request(request[id],
+               timeout = i915_request_wait(request[id],
                                            I915_WAIT_LOCKED,
                                            MAX_SCHEDULE_TIMEOUT);
                if (timeout < 0) {
@@ -827,7 +824,7 @@ static int live_sequential_engines(void *arg)
                        goto out_request;
                }
 
-               GEM_BUG_ON(!i915_gem_request_completed(request[id]));
+               GEM_BUG_ON(!i915_request_completed(request[id]));
        }
 
        err = end_live_test(&t);
@@ -849,14 +846,14 @@ out_request:
                }
 
                i915_vma_put(request[id]->batch);
-               i915_gem_request_put(request[id]);
+               i915_request_put(request[id]);
        }
 out_unlock:
        mutex_unlock(&i915->drm.struct_mutex);
        return err;
 }
 
-int i915_gem_request_live_selftests(struct drm_i915_private *i915)
+int i915_request_live_selftests(struct drm_i915_private *i915)
 {
        static const struct i915_subtest tests[] = {
                SUBTEST(live_nop_request),
index d1d2c24..df7898c 100644 (file)
@@ -92,13 +92,13 @@ err_ctx:
 }
 
 static u64 hws_address(const struct i915_vma *hws,
-                      const struct drm_i915_gem_request *rq)
+                      const struct i915_request *rq)
 {
        return hws->node.start + offset_in_page(sizeof(u32)*rq->fence.context);
 }
 
 static int emit_recurse_batch(struct hang *h,
-                             struct drm_i915_gem_request *rq)
+                             struct i915_request *rq)
 {
        struct drm_i915_private *i915 = h->i915;
        struct i915_address_space *vm = rq->ctx->ppgtt ? &rq->ctx->ppgtt->base : &i915->ggtt.base;
@@ -204,10 +204,10 @@ unpin_vma:
        return err;
 }
 
-static struct drm_i915_gem_request *
+static struct i915_request *
 hang_create_request(struct hang *h, struct intel_engine_cs *engine)
 {
-       struct drm_i915_gem_request *rq;
+       struct i915_request *rq;
        int err;
 
        if (i915_gem_object_is_active(h->obj)) {
@@ -232,21 +232,20 @@ hang_create_request(struct hang *h, struct intel_engine_cs *engine)
                h->batch = vaddr;
        }
 
-       rq = i915_gem_request_alloc(engine, h->ctx);
+       rq = i915_request_alloc(engine, h->ctx);
        if (IS_ERR(rq))
                return rq;
 
        err = emit_recurse_batch(h, rq);
        if (err) {
-               __i915_add_request(rq, false);
+               __i915_request_add(rq, false);
                return ERR_PTR(err);
        }
 
        return rq;
 }
 
-static u32 hws_seqno(const struct hang *h,
-                    const struct drm_i915_gem_request *rq)
+static u32 hws_seqno(const struct hang *h, const struct i915_request *rq)
 {
        return READ_ONCE(h->seqno[rq->fence.context % (PAGE_SIZE/sizeof(u32))]);
 }
@@ -319,7 +318,7 @@ static void hang_fini(struct hang *h)
        flush_test(h->i915, I915_WAIT_LOCKED);
 }
 
-static bool wait_for_hang(struct hang *h, struct drm_i915_gem_request *rq)
+static bool wait_for_hang(struct hang *h, struct i915_request *rq)
 {
        return !(wait_for_us(i915_seqno_passed(hws_seqno(h, rq),
                                               rq->fence.seqno),
@@ -332,7 +331,7 @@ static bool wait_for_hang(struct hang *h, struct drm_i915_gem_request *rq)
 static int igt_hang_sanitycheck(void *arg)
 {
        struct drm_i915_private *i915 = arg;
-       struct drm_i915_gem_request *rq;
+       struct i915_request *rq;
        struct intel_engine_cs *engine;
        enum intel_engine_id id;
        struct hang h;
@@ -359,17 +358,17 @@ static int igt_hang_sanitycheck(void *arg)
                        goto fini;
                }
 
-               i915_gem_request_get(rq);
+               i915_request_get(rq);
 
                *h.batch = MI_BATCH_BUFFER_END;
                i915_gem_chipset_flush(i915);
 
-               __i915_add_request(rq, true);
+               __i915_request_add(rq, true);
 
-               timeout = i915_wait_request(rq,
+               timeout = i915_request_wait(rq,
                                            I915_WAIT_LOCKED,
                                            MAX_SCHEDULE_TIMEOUT);
-               i915_gem_request_put(rq);
+               i915_request_put(rq);
 
                if (timeout < 0) {
                        err = timeout;
@@ -485,7 +484,7 @@ static int __igt_reset_engine(struct drm_i915_private *i915, bool active)
                set_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags);
                do {
                        if (active) {
-                               struct drm_i915_gem_request *rq;
+                               struct i915_request *rq;
 
                                mutex_lock(&i915->drm.struct_mutex);
                                rq = hang_create_request(&h, engine);
@@ -495,8 +494,8 @@ static int __igt_reset_engine(struct drm_i915_private *i915, bool active)
                                        break;
                                }
 
-                               i915_gem_request_get(rq);
-                               __i915_add_request(rq, true);
+                               i915_request_get(rq);
+                               __i915_request_add(rq, true);
                                mutex_unlock(&i915->drm.struct_mutex);
 
                                if (!wait_for_hang(&h, rq)) {
@@ -507,12 +506,12 @@ static int __igt_reset_engine(struct drm_i915_private *i915, bool active)
                                        intel_engine_dump(engine, &p,
                                                          "%s\n", engine->name);
 
-                                       i915_gem_request_put(rq);
+                                       i915_request_put(rq);
                                        err = -EIO;
                                        break;
                                }
 
-                               i915_gem_request_put(rq);
+                               i915_request_put(rq);
                        }
 
                        engine->hangcheck.stalled = true;
@@ -577,7 +576,7 @@ static int igt_reset_active_engine(void *arg)
 static int active_engine(void *data)
 {
        struct intel_engine_cs *engine = data;
-       struct drm_i915_gem_request *rq[2] = {};
+       struct i915_request *rq[2] = {};
        struct i915_gem_context *ctx[2];
        struct drm_file *file;
        unsigned long count = 0;
@@ -606,29 +605,29 @@ static int active_engine(void *data)
 
        while (!kthread_should_stop()) {
                unsigned int idx = count++ & 1;
-               struct drm_i915_gem_request *old = rq[idx];
-               struct drm_i915_gem_request *new;
+               struct i915_request *old = rq[idx];
+               struct i915_request *new;
 
                mutex_lock(&engine->i915->drm.struct_mutex);
-               new = i915_gem_request_alloc(engine, ctx[idx]);
+               new = i915_request_alloc(engine, ctx[idx]);
                if (IS_ERR(new)) {
                        mutex_unlock(&engine->i915->drm.struct_mutex);
                        err = PTR_ERR(new);
                        break;
                }
 
-               rq[idx] = i915_gem_request_get(new);
-               i915_add_request(new);
+               rq[idx] = i915_request_get(new);
+               i915_request_add(new);
                mutex_unlock(&engine->i915->drm.struct_mutex);
 
                if (old) {
-                       i915_wait_request(old, 0, MAX_SCHEDULE_TIMEOUT);
-                       i915_gem_request_put(old);
+                       i915_request_wait(old, 0, MAX_SCHEDULE_TIMEOUT);
+                       i915_request_put(old);
                }
        }
 
        for (count = 0; count < ARRAY_SIZE(rq); count++)
-               i915_gem_request_put(rq[count]);
+               i915_request_put(rq[count]);
 
 err_file:
        mock_file_free(engine->i915, file);
@@ -692,7 +691,7 @@ static int __igt_reset_engine_others(struct drm_i915_private *i915,
                set_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags);
                do {
                        if (active) {
-                               struct drm_i915_gem_request *rq;
+                               struct i915_request *rq;
 
                                mutex_lock(&i915->drm.struct_mutex);
                                rq = hang_create_request(&h, engine);
@@ -702,8 +701,8 @@ static int __igt_reset_engine_others(struct drm_i915_private *i915,
                                        break;
                                }
 
-                               i915_gem_request_get(rq);
-                               __i915_add_request(rq, true);
+                               i915_request_get(rq);
+                               __i915_request_add(rq, true);
                                mutex_unlock(&i915->drm.struct_mutex);
 
                                if (!wait_for_hang(&h, rq)) {
@@ -714,12 +713,12 @@ static int __igt_reset_engine_others(struct drm_i915_private *i915,
                                        intel_engine_dump(engine, &p,
                                                          "%s\n", engine->name);
 
-                                       i915_gem_request_put(rq);
+                                       i915_request_put(rq);
                                        err = -EIO;
                                        break;
                                }
 
-                               i915_gem_request_put(rq);
+                               i915_request_put(rq);
                        }
 
                        engine->hangcheck.stalled = true;
@@ -814,7 +813,7 @@ static int igt_reset_active_engine_others(void *arg)
        return __igt_reset_engine_others(arg, true);
 }
 
-static u32 fake_hangcheck(struct drm_i915_gem_request *rq)
+static u32 fake_hangcheck(struct i915_request *rq)
 {
        u32 reset_count;
 
@@ -832,7 +831,7 @@ static u32 fake_hangcheck(struct drm_i915_gem_request *rq)
 static int igt_wait_reset(void *arg)
 {
        struct drm_i915_private *i915 = arg;
-       struct drm_i915_gem_request *rq;
+       struct i915_request *rq;
        unsigned int reset_count;
        struct hang h;
        long timeout;
@@ -856,8 +855,8 @@ static int igt_wait_reset(void *arg)
                goto fini;
        }
 
-       i915_gem_request_get(rq);
-       __i915_add_request(rq, true);
+       i915_request_get(rq);
+       __i915_request_add(rq, true);
 
        if (!wait_for_hang(&h, rq)) {
                struct drm_printer p = drm_info_printer(i915->drm.dev);
@@ -875,9 +874,9 @@ static int igt_wait_reset(void *arg)
 
        reset_count = fake_hangcheck(rq);
 
-       timeout = i915_wait_request(rq, I915_WAIT_LOCKED, 10);
+       timeout = i915_request_wait(rq, I915_WAIT_LOCKED, 10);
        if (timeout < 0) {
-               pr_err("i915_wait_request failed on a stuck request: err=%ld\n",
+               pr_err("i915_request_wait failed on a stuck request: err=%ld\n",
                       timeout);
                err = timeout;
                goto out_rq;
@@ -891,7 +890,7 @@ static int igt_wait_reset(void *arg)
        }
 
 out_rq:
-       i915_gem_request_put(rq);
+       i915_request_put(rq);
 fini:
        hang_fini(&h);
 unlock:
@@ -922,7 +921,7 @@ static int igt_reset_queue(void *arg)
                goto unlock;
 
        for_each_engine(engine, i915, id) {
-               struct drm_i915_gem_request *prev;
+               struct i915_request *prev;
                IGT_TIMEOUT(end_time);
                unsigned int count;
 
@@ -935,12 +934,12 @@ static int igt_reset_queue(void *arg)
                        goto fini;
                }
 
-               i915_gem_request_get(prev);
-               __i915_add_request(prev, true);
+               i915_request_get(prev);
+               __i915_request_add(prev, true);
 
                count = 0;
                do {
-                       struct drm_i915_gem_request *rq;
+                       struct i915_request *rq;
                        unsigned int reset_count;
 
                        rq = hang_create_request(&h, engine);
@@ -949,8 +948,8 @@ static int igt_reset_queue(void *arg)
                                goto fini;
                        }
 
-                       i915_gem_request_get(rq);
-                       __i915_add_request(rq, true);
+                       i915_request_get(rq);
+                       __i915_request_add(rq, true);
 
                        if (!wait_for_hang(&h, prev)) {
                                struct drm_printer p = drm_info_printer(i915->drm.dev);
@@ -960,8 +959,8 @@ static int igt_reset_queue(void *arg)
                                intel_engine_dump(prev->engine, &p,
                                                  "%s\n", prev->engine->name);
 
-                               i915_gem_request_put(rq);
-                               i915_gem_request_put(prev);
+                               i915_request_put(rq);
+                               i915_request_put(prev);
 
                                i915_reset(i915, 0);
                                i915_gem_set_wedged(i915);
@@ -980,8 +979,8 @@ static int igt_reset_queue(void *arg)
                        if (prev->fence.error != -EIO) {
                                pr_err("GPU reset not recorded on hanging request [fence.error=%d]!\n",
                                       prev->fence.error);
-                               i915_gem_request_put(rq);
-                               i915_gem_request_put(prev);
+                               i915_request_put(rq);
+                               i915_request_put(prev);
                                err = -EINVAL;
                                goto fini;
                        }
@@ -989,21 +988,21 @@ static int igt_reset_queue(void *arg)
                        if (rq->fence.error) {
                                pr_err("Fence error status not zero [%d] after unrelated reset\n",
                                       rq->fence.error);
-                               i915_gem_request_put(rq);
-                               i915_gem_request_put(prev);
+                               i915_request_put(rq);
+                               i915_request_put(prev);
                                err = -EINVAL;
                                goto fini;
                        }
 
                        if (i915_reset_count(&i915->gpu_error) == reset_count) {
                                pr_err("No GPU reset recorded!\n");
-                               i915_gem_request_put(rq);
-                               i915_gem_request_put(prev);
+                               i915_request_put(rq);
+                               i915_request_put(prev);
                                err = -EINVAL;
                                goto fini;
                        }
 
-                       i915_gem_request_put(prev);
+                       i915_request_put(prev);
                        prev = rq;
                        count++;
                } while (time_before(jiffies, end_time));
@@ -1012,7 +1011,7 @@ static int igt_reset_queue(void *arg)
                *h.batch = MI_BATCH_BUFFER_END;
                i915_gem_chipset_flush(i915);
 
-               i915_gem_request_put(prev);
+               i915_request_put(prev);
 
                err = flush_test(i915, I915_WAIT_LOCKED);
                if (err)
@@ -1036,7 +1035,7 @@ static int igt_handle_error(void *arg)
        struct drm_i915_private *i915 = arg;
        struct intel_engine_cs *engine = i915->engine[RCS];
        struct hang h;
-       struct drm_i915_gem_request *rq;
+       struct i915_request *rq;
        struct i915_gpu_state *error;
        int err;
 
@@ -1060,8 +1059,8 @@ static int igt_handle_error(void *arg)
                goto err_fini;
        }
 
-       i915_gem_request_get(rq);
-       __i915_add_request(rq, true);
+       i915_request_get(rq);
+       __i915_request_add(rq, true);
 
        if (!wait_for_hang(&h, rq)) {
                struct drm_printer p = drm_info_printer(i915->drm.dev);
@@ -1098,7 +1097,7 @@ static int igt_handle_error(void *arg)
        }
 
 err_request:
-       i915_gem_request_put(rq);
+       i915_request_put(rq);
 err_fini:
        hang_fini(&h);
 err_unlock:
index 55c0e2c..78a89ef 100644 (file)
@@ -81,7 +81,7 @@ static void mock_context_unpin(struct intel_engine_cs *engine,
        i915_gem_context_put(ctx);
 }
 
-static int mock_request_alloc(struct drm_i915_gem_request *request)
+static int mock_request_alloc(struct i915_request *request)
 {
        struct mock_request *mock = container_of(request, typeof(*mock), base);
 
@@ -91,24 +91,24 @@ static int mock_request_alloc(struct drm_i915_gem_request *request)
        return 0;
 }
 
-static int mock_emit_flush(struct drm_i915_gem_request *request,
+static int mock_emit_flush(struct i915_request *request,
                           unsigned int flags)
 {
        return 0;
 }
 
-static void mock_emit_breadcrumb(struct drm_i915_gem_request *request,
+static void mock_emit_breadcrumb(struct i915_request *request,
                                 u32 *flags)
 {
 }
 
-static void mock_submit_request(struct drm_i915_gem_request *request)
+static void mock_submit_request(struct i915_request *request)
 {
        struct mock_request *mock = container_of(request, typeof(*mock), base);
        struct mock_engine *engine =
                container_of(request->engine, typeof(*engine), base);
 
-       i915_gem_request_submit(request);
+       i915_request_submit(request);
        GEM_BUG_ON(!request->global_seqno);
 
        spin_lock_irq(&engine->hw_lock);
index 3175db7..e6d4b88 100644 (file)
@@ -43,7 +43,7 @@ void mock_device_flush(struct drm_i915_private *i915)
        for_each_engine(engine, i915, id)
                mock_engine_flush(engine);
 
-       i915_gem_retire_requests(i915);
+       i915_retire_requests(i915);
 }
 
 static void mock_device_release(struct drm_device *dev)
index 8097e36..0dc29e2 100644 (file)
 #include "mock_engine.h"
 #include "mock_request.h"
 
-struct drm_i915_gem_request *
+struct i915_request *
 mock_request(struct intel_engine_cs *engine,
             struct i915_gem_context *context,
             unsigned long delay)
 {
-       struct drm_i915_gem_request *request;
+       struct i915_request *request;
        struct mock_request *mock;
 
        /* NB the i915->requests slab cache is enlarged to fit mock_request */
-       request = i915_gem_request_alloc(engine, context);
+       request = i915_request_alloc(engine, context);
        if (IS_ERR(request))
                return NULL;
 
@@ -44,7 +44,7 @@ mock_request(struct intel_engine_cs *engine,
        return &mock->base;
 }
 
-bool mock_cancel_request(struct drm_i915_gem_request *request)
+bool mock_cancel_request(struct i915_request *request)
 {
        struct mock_request *mock = container_of(request, typeof(*mock), base);
        struct mock_engine *engine =
@@ -57,7 +57,7 @@ bool mock_cancel_request(struct drm_i915_gem_request *request)
        spin_unlock_irq(&engine->hw_lock);
 
        if (was_queued)
-               i915_gem_request_unsubmit(request);
+               i915_request_unsubmit(request);
 
        return was_queued;
 }
index 4dea74c..995fb72 100644 (file)
 
 #include <linux/list.h>
 
-#include "../i915_gem_request.h"
+#include "../i915_request.h"
 
 struct mock_request {
-       struct drm_i915_gem_request base;
+       struct i915_request base;
 
        struct list_head link;
        unsigned long delay;
 };
 
-struct drm_i915_gem_request *
+struct i915_request *
 mock_request(struct intel_engine_cs *engine,
             struct i915_gem_context *context,
             unsigned long delay);
 
-bool mock_cancel_request(struct drm_i915_gem_request *request);
+bool mock_cancel_request(struct i915_request *request);
 
 #endif /* !__MOCK_REQUEST__ */
index 0b2ba46..70f0c25 100644 (file)
        INTEL_VGA_DEVICE(0x5A44, info), \
        INTEL_VGA_DEVICE(0x5A4C, info)
 
+/* ICL */
+#define INTEL_ICL_11_IDS(info) \
+       INTEL_VGA_DEVICE(0x8A50, info), \
+       INTEL_VGA_DEVICE(0x8A51, info), \
+       INTEL_VGA_DEVICE(0x8A5C, info), \
+       INTEL_VGA_DEVICE(0x8A5D, info), \
+       INTEL_VGA_DEVICE(0x8A52, info), \
+       INTEL_VGA_DEVICE(0x8A5A, info), \
+       INTEL_VGA_DEVICE(0x8A5B, info), \
+       INTEL_VGA_DEVICE(0x8A71, info), \
+       INTEL_VGA_DEVICE(0x8A70, info)
+
 #endif /* _I915_PCIIDS_H */