Backmerge tag 'v4.11-rc4' into drm-next
authorDave Airlie <airlied@redhat.com>
Tue, 28 Mar 2017 07:34:19 +0000 (17:34 +1000)
committerDave Airlie <airlied@redhat.com>
Tue, 28 Mar 2017 07:34:19 +0000 (17:34 +1000)
Linux 4.11-rc4

The i915 GVT team need the rc4 code to base some more code on.

1  2 
MAINTAINERS
drivers/gpu/drm/drm_fb_helper.c
drivers/gpu/drm/exynos/exynos_drm_crtc.c
drivers/gpu/drm/exynos/exynos_drm_crtc.h
drivers/gpu/drm/exynos/exynos_drm_dsi.c
drivers/gpu/drm/exynos/exynos_drm_vidi.c
drivers/gpu/drm/i915/gvt/cmd_parser.c
drivers/gpu/drm/i915/gvt/scheduler.c
drivers/gpu/drm/i915/i915_irq.c

diff --cc MAINTAINERS
Simple merge
Simple merge
@@@ -2633,23 -2668,26 +2665,23 @@@ static int shadow_workload_ring_buffer(
        /* head > tail --> copy head <-> top */
        if (gma_head > gma_tail) {
                ret = copy_gma_to_hva(vgpu, vgpu->gtt.ggtt_mm,
 -                              gma_head, gma_top,
 -                              workload->shadow_ring_buffer_va);
 +                                    gma_head, gma_top, cs);
-               if (ret < 0) {
-                       gvt_err("fail to copy guest ring buffer\n");
+               if (ret) {
+                       gvt_vgpu_err("fail to copy guest ring buffer\n");
                        return ret;
                }
 -              copy_len = gma_top - gma_head;
 +              cs += ret / sizeof(u32);
                gma_head = workload->rb_start;
        }
  
        /* copy head or start <-> tail */
 -      ret = copy_gma_to_hva(vgpu, vgpu->gtt.ggtt_mm,
 -                      gma_head, gma_tail,
 -                      workload->shadow_ring_buffer_va + copy_len);
 +      ret = copy_gma_to_hva(vgpu, vgpu->gtt.ggtt_mm, gma_head, gma_tail, cs);
-       if (ret < 0) {
-               gvt_err("fail to copy guest ring buffer\n");
+       if (ret) {
+               gvt_vgpu_err("fail to copy guest ring buffer\n");
                return ret;
        }
 -      ring->tail += workload->rb_len;
 -      intel_ring_advance(ring);
 +      cs += ret / sizeof(u32);
 +      intel_ring_advance(workload->req, cs);
        return 0;
  }
  
@@@ -224,7 -244,10 +244,10 @@@ out
                workload->status = ret;
  
        if (!IS_ERR_OR_NULL(rq))
 -              i915_add_request_no_flush(rq);
 +              i915_add_request(rq);
+       else
+               engine->context_unpin(engine, shadow_ctx);
        mutex_unlock(&dev_priv->drm.struct_mutex);
        return ret;
  }
@@@ -1098,12 -1061,18 +1098,13 @@@ static u32 vlv_wa_c0_ei(struct drm_i915
                return 0;
  
        vlv_c0_read(dev_priv, &now);
 -      if (now.cz_clock == 0)
 -              return 0;
  
 -      if (prev->cz_clock) {
 +      if (prev->ktime) {
                u64 time, c0;
 -              unsigned int mul;
 +              u32 render, media;
  
 -              mul = VLV_CZ_CLOCK_TO_MILLI_SEC * 100; /* scale to threshold% */
 -              if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH)
 -                      mul <<= 8;
 +              time = ktime_us_delta(now.ktime, prev->ktime);
 -              time = now.cz_clock - prev->cz_clock;
                time *= dev_priv->czclk_freq;
  
                /* Workload can be split between render + media,