Merge branch 'linux-4.12' of git://github.com/skeggsb/linux into drm-next
authorDave Airlie <airlied@redhat.com>
Fri, 12 May 2017 04:25:22 +0000 (14:25 +1000)
committerDave Airlie <airlied@redhat.com>
Fri, 12 May 2017 04:25:22 +0000 (14:25 +1000)
Quite a few patches, but not much code changed:
- Fixes regression from atomic when only the source rect of a plane
changes (ie. xrandr --right-of)
- Fixes another issue where atomic changed behaviour underneath us,
potentially causing laggy cursor position updates
- Fixes for a bunch of races in thermal code, which lead to random
lockups for a lot of users

* 'linux-4.12' of git://github.com/skeggsb/linux:
  drm/nouveau/therm: remove ineffective workarounds for alarm bugs
  drm/nouveau/tmr: avoid processing completed alarms when adding a new one
  drm/nouveau/tmr: fix corruption of the pending list when rescheduling an alarm
  drm/nouveau/tmr: handle races with hw when updating the next alarm time
  drm/nouveau/tmr: ack interrupt before processing alarms
  drm/nouveau/core: fix static checker warning
  drm/nouveau/fb/ram/gf100-: remove 0x10f200 read
  drm/nouveau/kms/nv50: skip core channel cursor update on position-only changes
  drm/nouveau/kms/nv50: fix source-rect-only plane updates
  drm/nouveau/kms/nv50: remove pointless argument to window atomic_check_acquire()

drivers/gpu/drm/nouveau/nv50_display.c
drivers/gpu/drm/nouveau/nvkm/core/object.c
drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf100.c
drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c
drivers/gpu/drm/nouveau/nvkm/subdev/therm/fan.c
drivers/gpu/drm/nouveau/nvkm/subdev/therm/fantog.c
drivers/gpu/drm/nouveau/nvkm/subdev/therm/temp.c
drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c
drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv04.c

index 0e58537..a766324 100644 (file)
@@ -831,8 +831,7 @@ nv50_wndw_atomic_check_release(struct nv50_wndw *wndw,
 static int
 nv50_wndw_atomic_check_acquire(struct nv50_wndw *wndw,
                               struct nv50_wndw_atom *asyw,
-                              struct nv50_head_atom *asyh,
-                              u32 pflip_flags)
+                              struct nv50_head_atom *asyh)
 {
        struct nouveau_framebuffer *fb = nouveau_framebuffer(asyw->state.fb);
        struct nouveau_drm *drm = nouveau_drm(wndw->plane.dev);
@@ -848,7 +847,10 @@ nv50_wndw_atomic_check_acquire(struct nv50_wndw *wndw,
        asyw->image.h = fb->base.height;
        asyw->image.kind = (fb->nvbo->tile_flags & 0x0000ff00) >> 8;
 
-       asyw->interval = pflip_flags & DRM_MODE_PAGE_FLIP_ASYNC ? 0 : 1;
+       if (asyh->state.pageflip_flags & DRM_MODE_PAGE_FLIP_ASYNC)
+               asyw->interval = 0;
+       else
+               asyw->interval = 1;
 
        if (asyw->image.kind) {
                asyw->image.layout = 0;
@@ -887,7 +889,6 @@ nv50_wndw_atomic_check(struct drm_plane *plane, struct drm_plane_state *state)
        struct nv50_head_atom *harm = NULL, *asyh = NULL;
        bool varm = false, asyv = false, asym = false;
        int ret;
-       u32 pflip_flags = 0;
 
        NV_ATOMIC(drm, "%s atomic_check\n", plane->name);
        if (asyw->state.crtc) {
@@ -896,7 +897,6 @@ nv50_wndw_atomic_check(struct drm_plane *plane, struct drm_plane_state *state)
                        return PTR_ERR(asyh);
                asym = drm_atomic_crtc_needs_modeset(&asyh->state);
                asyv = asyh->state.active;
-               pflip_flags = asyh->state.pageflip_flags;
        }
 
        if (armw->state.crtc) {
@@ -912,12 +912,9 @@ nv50_wndw_atomic_check(struct drm_plane *plane, struct drm_plane_state *state)
                if (memcmp(&armw->point, &asyw->point, sizeof(asyw->point)))
                        asyw->set.point = true;
 
-               if (!varm || asym || armw->state.fb != asyw->state.fb) {
-                       ret = nv50_wndw_atomic_check_acquire(
-                                       wndw, asyw, asyh, pflip_flags);
-                       if (ret)
-                               return ret;
-               }
+               ret = nv50_wndw_atomic_check_acquire(wndw, asyw, asyh);
+               if (ret)
+                       return ret;
        } else
        if (varm) {
                nv50_wndw_atomic_check_release(wndw, asyw, harm);
@@ -1122,9 +1119,13 @@ static void
 nv50_curs_prepare(struct nv50_wndw *wndw, struct nv50_head_atom *asyh,
                  struct nv50_wndw_atom *asyw)
 {
-       asyh->curs.handle = nv50_disp(wndw->plane.dev)->mast.base.vram.handle;
-       asyh->curs.offset = asyw->image.offset;
-       asyh->set.curs = asyh->curs.visible;
+       u32 handle = nv50_disp(wndw->plane.dev)->mast.base.vram.handle;
+       u32 offset = asyw->image.offset;
+       if (asyh->curs.handle != handle || asyh->curs.offset != offset) {
+               asyh->curs.handle = handle;
+               asyh->curs.offset = offset;
+               asyh->set.curs = asyh->curs.visible;
+       }
 }
 
 static void
index 89d2e9d..acd76fd 100644 (file)
@@ -295,7 +295,7 @@ nvkm_object_ctor(const struct nvkm_object_func *func,
        INIT_LIST_HEAD(&object->head);
        INIT_LIST_HEAD(&object->tree);
        RB_CLEAR_NODE(&object->node);
-       WARN_ON(oclass->engine && !object->engine);
+       WARN_ON(IS_ERR(object->engine));
 }
 
 int
index c639759..4a9bd4f 100644 (file)
@@ -638,7 +638,6 @@ gf100_ram_ctor(const struct nvkm_ram_func *func, struct nvkm_fb *fb,
                        return ret;
        }
 
-       ram->ranks = (nvkm_rd32(device, 0x10f200) & 0x00000004) ? 2 : 1;
        return 0;
 }
 
index df949fa..be691a7 100644 (file)
@@ -146,7 +146,7 @@ nvkm_therm_update(struct nvkm_therm *therm, int mode)
                poll = false;
        }
 
-       if (list_empty(&therm->alarm.head) && poll)
+       if (poll)
                nvkm_timer_alarm(tmr, 1000000000ULL, &therm->alarm);
        spin_unlock_irqrestore(&therm->lock, flags);
 
index 91198d7..e2fecce 100644 (file)
@@ -83,7 +83,7 @@ nvkm_fan_update(struct nvkm_fan *fan, bool immediate, int target)
        spin_unlock_irqrestore(&fan->lock, flags);
 
        /* schedule next fan update, if not at target speed already */
-       if (list_empty(&fan->alarm.head) && target != duty) {
+       if (target != duty) {
                u16 bump_period = fan->bios.bump_period;
                u16 slow_down_period = fan->bios.slow_down_period;
                u64 delay;
index 59701b7..ff9fbe7 100644 (file)
@@ -53,7 +53,7 @@ nvkm_fantog_update(struct nvkm_fantog *fan, int percent)
        duty = !nvkm_gpio_get(gpio, 0, DCB_GPIO_FAN, 0xff);
        nvkm_gpio_set(gpio, 0, DCB_GPIO_FAN, 0xff, duty);
 
-       if (list_empty(&fan->alarm.head) && percent != (duty * 100)) {
+       if (percent != (duty * 100)) {
                u64 next_change = (percent * fan->period_us) / 100;
                if (!duty)
                        next_change = fan->period_us - next_change;
index b9703c0..9a79e91 100644 (file)
@@ -185,7 +185,7 @@ alarm_timer_callback(struct nvkm_alarm *alarm)
        spin_unlock_irqrestore(&therm->sensor.alarm_program_lock, flags);
 
        /* schedule the next poll in one second */
-       if (therm->func->temp_get(therm) >= 0 && list_empty(&alarm->head))
+       if (therm->func->temp_get(therm) >= 0)
                nvkm_timer_alarm(tmr, 1000000000ULL, alarm);
 }
 
index 07dc82b..f2a86ea 100644 (file)
@@ -36,23 +36,29 @@ nvkm_timer_alarm_trigger(struct nvkm_timer *tmr)
        unsigned long flags;
        LIST_HEAD(exec);
 
-       /* move any due alarms off the pending list */
+       /* Process pending alarms. */
        spin_lock_irqsave(&tmr->lock, flags);
        list_for_each_entry_safe(alarm, atemp, &tmr->alarms, head) {
-               if (alarm->timestamp <= nvkm_timer_read(tmr))
-                       list_move_tail(&alarm->head, &exec);
+               /* Have we hit the earliest alarm that hasn't gone off? */
+               if (alarm->timestamp > nvkm_timer_read(tmr)) {
+                       /* Schedule it.  If we didn't race, we're done. */
+                       tmr->func->alarm_init(tmr, alarm->timestamp);
+                       if (alarm->timestamp > nvkm_timer_read(tmr))
+                               break;
+               }
+
+               /* Move to completed list.  We'll drop the lock before
+                * executing the callback so it can reschedule itself.
+                */
+               list_move_tail(&alarm->head, &exec);
        }
 
-       /* reschedule interrupt for next alarm time */
-       if (!list_empty(&tmr->alarms)) {
-               alarm = list_first_entry(&tmr->alarms, typeof(*alarm), head);
-               tmr->func->alarm_init(tmr, alarm->timestamp);
-       } else {
+       /* Shut down interrupt if no more pending alarms. */
+       if (list_empty(&tmr->alarms))
                tmr->func->alarm_fini(tmr);
-       }
        spin_unlock_irqrestore(&tmr->lock, flags);
 
-       /* execute any pending alarm handlers */
+       /* Execute completed callbacks. */
        list_for_each_entry_safe(alarm, atemp, &exec, head) {
                list_del_init(&alarm->head);
                alarm->func(alarm);
@@ -65,24 +71,37 @@ nvkm_timer_alarm(struct nvkm_timer *tmr, u32 nsec, struct nvkm_alarm *alarm)
        struct nvkm_alarm *list;
        unsigned long flags;
 
-       alarm->timestamp = nvkm_timer_read(tmr) + nsec;
-
-       /* append new alarm to list, in soonest-alarm-first order */
+       /* Remove alarm from pending list.
+        *
+        * This both protects against the corruption of the list,
+        * and implements alarm rescheduling/cancellation.
+        */
        spin_lock_irqsave(&tmr->lock, flags);
-       if (!nsec) {
-               if (!list_empty(&alarm->head))
-                       list_del(&alarm->head);
-       } else {
+       list_del_init(&alarm->head);
+
+       if (nsec) {
+               /* Insert into pending list, ordered earliest to latest. */
+               alarm->timestamp = nvkm_timer_read(tmr) + nsec;
                list_for_each_entry(list, &tmr->alarms, head) {
                        if (list->timestamp > alarm->timestamp)
                                break;
                }
+
                list_add_tail(&alarm->head, &list->head);
+
+               /* Update HW if this is now the earliest alarm. */
+               list = list_first_entry(&tmr->alarms, typeof(*list), head);
+               if (list == alarm) {
+                       tmr->func->alarm_init(tmr, alarm->timestamp);
+                       /* This shouldn't happen if callers aren't stupid.
+                        *
+                        * Worst case scenario is that it'll take roughly
+                        * 4 seconds for the next alarm to trigger.
+                        */
+                       WARN_ON(alarm->timestamp <= nvkm_timer_read(tmr));
+               }
        }
        spin_unlock_irqrestore(&tmr->lock, flags);
-
-       /* process pending alarms */
-       nvkm_timer_alarm_trigger(tmr);
 }
 
 void
index 7b9ce87..7f48249 100644 (file)
@@ -76,8 +76,8 @@ nv04_timer_intr(struct nvkm_timer *tmr)
        u32 stat = nvkm_rd32(device, NV04_PTIMER_INTR_0);
 
        if (stat & 0x00000001) {
-               nvkm_timer_alarm_trigger(tmr);
                nvkm_wr32(device, NV04_PTIMER_INTR_0, 0x00000001);
+               nvkm_timer_alarm_trigger(tmr);
                stat &= ~0x00000001;
        }