[intel-gem] Wait for rendering to complete before unbinding.
[platform/upstream/libdrm.git] / bsd-core / drm_irq.c
1 /*-
2  * Copyright 2003 Eric Anholt
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice (including the next
13  * paragraph) shall be included in all copies or substantial portions of the
14  * Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * ERIC ANHOLT BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
20  * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
21  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <anholt@FreeBSD.org>
25  *
26  */
27
28 /** @file drm_irq.c
29  * Support code for handling setup/teardown of interrupt handlers and
30  * handing interrupt handlers off to the drivers.
31  */
32
33 #include "drmP.h"
34 #include "drm.h"
35
36 static void drm_locked_task(void *context, int pending __unused);
37
38 int drm_irq_by_busid(struct drm_device *dev, void *data,
39                      struct drm_file *file_priv)
40 {
41         drm_irq_busid_t *irq = data;
42
43         if ((irq->busnum >> 8) != dev->pci_domain ||
44             (irq->busnum & 0xff) != dev->pci_bus ||
45             irq->devnum != dev->pci_slot ||
46             irq->funcnum != dev->pci_func)
47                 return EINVAL;
48
49         irq->irq = dev->irq;
50
51         DRM_DEBUG("%d:%d:%d => IRQ %d\n",
52                   irq->busnum, irq->devnum, irq->funcnum, irq->irq);
53
54         return 0;
55 }
56
57 #if defined(__FreeBSD__) && __FreeBSD_version >= 500000
58 static irqreturn_t
59 drm_irq_handler_wrap(DRM_IRQ_ARGS)
60 {
61         struct drm_device *dev = arg;
62
63         DRM_SPINLOCK(&dev->irq_lock);
64         dev->driver.irq_handler(arg);
65         DRM_SPINUNLOCK(&dev->irq_lock);
66 }
67 #endif
68
69 int drm_irq_install(struct drm_device *dev)
70 {
71         int retcode;
72 #ifdef __NetBSD__
73         pci_intr_handle_t ih;
74 #endif
75
76         if (dev->irq == 0 || dev->dev_private == NULL)
77                 return EINVAL;
78
79         DRM_DEBUG( "%s: irq=%d\n", __FUNCTION__, dev->irq );
80
81         DRM_LOCK();
82         if (dev->irq_enabled) {
83                 DRM_UNLOCK();
84                 return EBUSY;
85         }
86         dev->irq_enabled = 1;
87
88         dev->context_flag = 0;
89
90         DRM_SPININIT(&dev->irq_lock, "DRM IRQ lock");
91
92                                 /* Before installing handler */
93         dev->driver.irq_preinstall(dev);
94         DRM_UNLOCK();
95
96                                 /* Install handler */
97 #ifdef __FreeBSD__
98         dev->irqrid = 0;
99         dev->irqr = bus_alloc_resource_any(dev->device, SYS_RES_IRQ, 
100                                       &dev->irqrid, RF_SHAREABLE);
101         if (!dev->irqr) {
102                 retcode = ENOENT;
103                 goto err;
104         }
105 #if __FreeBSD_version >= 700031
106         retcode = bus_setup_intr(dev->device, dev->irqr,
107                                  INTR_TYPE_TTY | INTR_MPSAFE,
108                                  NULL, drm_irq_handler_wrap, dev, &dev->irqh);
109 #else
110         retcode = bus_setup_intr(dev->device, dev->irqr,
111                                  INTR_TYPE_TTY | INTR_MPSAFE,
112                                  drm_irq_handler_wrap, dev, &dev->irqh);
113 #endif
114         if (retcode != 0)
115                 goto err;
116 #elif defined(__NetBSD__) || defined(__OpenBSD__)
117         if (pci_intr_map(&dev->pa, &ih) != 0) {
118                 retcode = ENOENT;
119                 goto err;
120         }
121         dev->irqh = pci_intr_establish(&dev->pa.pa_pc, ih, IPL_TTY,
122             (irqreturn_t (*)(void *))dev->irq_handler, dev);
123         if (!dev->irqh) {
124                 retcode = ENOENT;
125                 goto err;
126         }
127 #endif
128
129                                 /* After installing handler */
130         DRM_LOCK();
131         dev->driver.irq_postinstall(dev);
132         DRM_UNLOCK();
133
134         TASK_INIT(&dev->locked_task, 0, drm_locked_task, dev);
135         return 0;
136 err:
137         DRM_LOCK();
138         dev->irq_enabled = 0;
139 #ifdef ___FreeBSD__
140         if (dev->irqrid != 0) {
141                 bus_release_resource(dev->device, SYS_RES_IRQ, dev->irqrid,
142                     dev->irqr);
143                 dev->irqrid = 0;
144         }
145 #endif
146         DRM_SPINUNINIT(&dev->irq_lock);
147         DRM_UNLOCK();
148         return retcode;
149 }
150
151 int drm_irq_uninstall(struct drm_device *dev)
152 {
153 #ifdef __FreeBSD__
154         int irqrid;
155 #endif
156
157         if (!dev->irq_enabled)
158                 return EINVAL;
159
160         dev->irq_enabled = 0;
161 #ifdef __FreeBSD__
162         irqrid = dev->irqrid;
163         dev->irqrid = 0;
164 #endif
165
166         DRM_DEBUG( "%s: irq=%d\n", __FUNCTION__, dev->irq );
167
168         dev->driver.irq_uninstall(dev);
169
170 #ifdef __FreeBSD__
171         DRM_UNLOCK();
172         bus_teardown_intr(dev->device, dev->irqr, dev->irqh);
173         bus_release_resource(dev->device, SYS_RES_IRQ, irqrid, dev->irqr);
174         DRM_LOCK();
175 #elif defined(__NetBSD__) || defined(__OpenBSD__)
176         pci_intr_disestablish(&dev->pa.pa_pc, dev->irqh);
177 #endif
178         DRM_SPINUNINIT(&dev->irq_lock);
179
180         return 0;
181 }
182
183 int drm_control(struct drm_device *dev, void *data, struct drm_file *file_priv)
184 {
185         drm_control_t *ctl = data;
186         int err;
187
188         switch ( ctl->func ) {
189         case DRM_INST_HANDLER:
190                 /* Handle drivers whose DRM used to require IRQ setup but the
191                  * no longer does.
192                  */
193                 if (!dev->driver.use_irq)
194                         return 0;
195                 if (dev->if_version < DRM_IF_VERSION(1, 2) &&
196                     ctl->irq != dev->irq)
197                         return EINVAL;
198                 return drm_irq_install(dev);
199         case DRM_UNINST_HANDLER:
200                 if (!dev->driver.use_irq)
201                         return 0;
202                 DRM_LOCK();
203                 err = drm_irq_uninstall(dev);
204                 DRM_UNLOCK();
205                 return err;
206         default:
207                 return EINVAL;
208         }
209 }
210
211 static void vblank_disable_fn(void *arg)
212 {
213         struct drm_device *dev = (struct drm_device *)arg;
214         unsigned long irqflags;
215         int i;
216
217         for (i = 0; i < dev->num_crtcs; i++) {
218                 DRM_SPINLOCK_IRQSAVE(&dev->vbl_lock, irqflags);
219                 if (atomic_read(&dev->vblank_refcount[i]) == 0 &&
220                     dev->vblank_enabled[i]) {
221                         dev->driver.disable_vblank(dev, i);
222                         dev->vblank_enabled[i] = 0;
223                 }
224                 DRM_SPINUNLOCK_IRQRESTORE(&dev->vbl_lock, irqflags);
225         }
226 }
227
228 u32 drm_vblank_count(struct drm_device *dev, int crtc)
229 {
230         return atomic_read(&dev->_vblank_count[crtc]) +
231             dev->vblank_offset[crtc];
232 }
233
234 int drm_vblank_get(struct drm_device *dev, int crtc)
235 {
236         unsigned long irqflags;
237         int ret = 0;
238
239         DRM_SPINLOCK_IRQSAVE(&dev->vbl_lock, irqflags);
240         /* Going from 0->1 means we have to enable interrupts again */
241         atomic_add_acq_int(&dev->vblank_refcount[crtc], 1);
242         if (dev->vblank_refcount[crtc] == 1 &&
243             !dev->vblank_enabled[crtc]) {
244                 ret = dev->driver.enable_vblank(dev, crtc);
245                 if (ret)
246                         atomic_dec(&dev->vblank_refcount[crtc]);
247                 else
248                         dev->vblank_enabled[crtc] = 1;
249         }
250         DRM_SPINUNLOCK_IRQRESTORE(&dev->vbl_lock, irqflags);
251
252         return ret;
253 }
254
255 void drm_vblank_put(struct drm_device *dev, int crtc)
256 {
257         /* Last user schedules interrupt disable */
258         atomic_subtract_acq_int(&dev->vblank_refcount[crtc], 1);
259         if (dev->vblank_refcount[crtc] == 0)
260             callout_reset(&dev->vblank_disable_timer, jiffies + 5*DRM_HZ,
261                 (timeout_t *)vblank_disable_fn, (void *)dev);
262 }
263
264 void drm_handle_vblank(struct drm_device *dev, int crtc)
265 {
266         drm_update_vblank_count(dev, crtc);
267         DRM_WAKEUP(&dev->vbl_queue[crtc]);
268         drm_vbl_send_signals(dev, crtc);
269 }
270
271 void drm_update_vblank_count(struct drm_device *dev, int crtc)
272 {
273         unsigned long irqflags;
274         u32 cur_vblank, diff;
275
276         /*
277          * Interrupts were disabled prior to this call, so deal with counter
278          * wrap if needed.
279          * NOTE!  It's possible we lost a full dev->max_vblank_count events
280          * here if the register is small or we had vblank interrupts off for
281          * a long time.
282          */
283         cur_vblank = dev->driver.get_vblank_counter(dev, crtc);
284         DRM_SPINLOCK_IRQSAVE(&dev->vbl_lock, irqflags);
285         if (cur_vblank < dev->last_vblank[crtc]) {
286                 diff = dev->max_vblank_count -
287                         dev->last_vblank[crtc];
288                 diff += cur_vblank;
289         } else {
290                 diff = cur_vblank - dev->last_vblank[crtc];
291         }
292         dev->last_vblank[crtc] = cur_vblank;
293         DRM_SPINUNLOCK_IRQRESTORE(&dev->vbl_lock, irqflags);
294
295         atomic_add(diff, &dev->_vblank_count[crtc]);
296 }
297
298 int drm_modeset_ctl(struct drm_device *dev, void *data,
299                     struct drm_file *file_priv)
300 {
301         struct drm_modeset_ctl *modeset = data;
302         int crtc, ret = 0;
303         u32 new;
304
305         crtc = modeset->crtc;
306         if (crtc >= dev->num_crtcs) {
307                 ret = -EINVAL;
308                 goto out;
309         }
310
311         switch (modeset->cmd) {
312         case _DRM_PRE_MODESET:
313                 dev->vblank_premodeset[crtc] =
314                     dev->driver.get_vblank_counter(dev, crtc);
315                 break;
316         case _DRM_POST_MODESET:
317                 new = dev->driver.get_vblank_counter(dev, crtc);
318                 dev->vblank_offset[crtc] = dev->vblank_premodeset[crtc] - new;
319                 break;
320         default:
321                 ret = -EINVAL;
322                 break;
323         }
324
325 out:
326         return ret;
327 }
328
329 int drm_wait_vblank(struct drm_device *dev, void *data, struct drm_file *file_priv)
330 {
331         drm_wait_vblank_t *vblwait = data;
332         struct timeval now;
333         int ret = 0;
334         int flags, seq, crtc;
335
336         if (!dev->irq_enabled)
337                 return EINVAL;
338
339         if (vblwait->request.type &
340             ~(_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_FLAGS_MASK)) {
341                 DRM_ERROR("Unsupported type value 0x%x, supported mask 0x%x\n",
342                     vblwait->request.type,
343                     (_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_FLAGS_MASK));
344                 return EINVAL;
345         }
346
347         flags = vblwait->request.type & _DRM_VBLANK_FLAGS_MASK;
348         crtc = flags & _DRM_VBLANK_SECONDARY ? 1 : 0;
349
350         if (crtc >= dev->num_crtcs)
351                 return EINVAL;
352
353         drm_update_vblank_count(dev, crtc);
354         seq = drm_vblank_count(dev, crtc);
355
356         switch (vblwait->request.type & _DRM_VBLANK_TYPES_MASK) {
357         case _DRM_VBLANK_RELATIVE:
358                 vblwait->request.sequence += seq;
359                 vblwait->request.type &= ~_DRM_VBLANK_RELATIVE;
360         case _DRM_VBLANK_ABSOLUTE:
361                 break;
362         default:
363                 return EINVAL;
364         }
365
366         if ((flags & _DRM_VBLANK_NEXTONMISS) &&
367             (seq - vblwait->request.sequence) <= (1<<23)) {
368                 vblwait->request.sequence = seq + 1;
369         }
370
371         if (flags & _DRM_VBLANK_SIGNAL) {
372 #if 0 /* disabled */
373                 drm_vbl_sig_t *vbl_sig = malloc(sizeof(drm_vbl_sig_t), M_DRM,
374                     M_NOWAIT | M_ZERO);
375                 if (vbl_sig == NULL)
376                         return ENOMEM;
377
378                 vbl_sig->sequence = vblwait->request.sequence;
379                 vbl_sig->signo = vblwait->request.signal;
380                 vbl_sig->pid = DRM_CURRENTPID;
381
382                 vblwait->reply.sequence = atomic_read(&dev->vbl_received);
383                 
384                 DRM_SPINLOCK(&dev->irq_lock);
385                 TAILQ_INSERT_HEAD(&dev->vbl_sig_list, vbl_sig, link);
386                 DRM_SPINUNLOCK(&dev->irq_lock);
387                 ret = 0;
388 #endif
389                 ret = EINVAL;
390         } else {
391                 unsigned long cur_vblank;
392
393                 DRM_LOCK();
394                 /* shared code returns -errno */
395
396                 ret = drm_vblank_get(dev, crtc);
397                 if (ret)
398                     return ret;
399                 DRM_WAIT_ON(ret, dev->vbl_queue[crtc], 3 * DRM_HZ,
400                             (((cur_vblank = drm_vblank_count(dev, crtc))
401                               - vblwait->request.sequence) <= (1 << 23)));
402                 drm_vblank_put(dev, crtc);
403                 DRM_UNLOCK();
404
405                 microtime(&now);
406                 vblwait->reply.tval_sec = now.tv_sec;
407                 vblwait->reply.tval_usec = now.tv_usec;
408         }
409
410         return ret;
411 }
412
413 static void drm_vblank_cleanup(struct drm_device *dev)
414 {
415         /* Bail if the driver didn't call drm_vblank_init() */
416         if (dev->num_crtcs == 0)
417             return;
418
419         callout_stop(&dev->vblank_disable_timer);
420
421         vblank_disable_fn((void *)dev);
422
423         drm_free(dev->vbl_queue, sizeof(*dev->vbl_queue) * dev->num_crtcs,
424             DRM_MEM_DRIVER);
425         drm_free(dev->vbl_sigs, sizeof(*dev->vbl_sigs) * dev->num_crtcs,
426             DRM_MEM_DRIVER);
427         drm_free(dev->_vblank_count, sizeof(*dev->_vblank_count) *
428             dev->num_crtcs, DRM_MEM_DRIVER);
429         drm_free(dev->vblank_refcount, sizeof(*dev->vblank_refcount) *
430             dev->num_crtcs, DRM_MEM_DRIVER);
431         drm_free(dev->vblank_enabled, sizeof(*dev->vblank_enabled) *
432             dev->num_crtcs, DRM_MEM_DRIVER);
433         drm_free(dev->last_vblank, sizeof(*dev->last_vblank) * dev->num_crtcs,
434             DRM_MEM_DRIVER);
435         drm_free(dev->vblank_premodeset, sizeof(*dev->vblank_premodeset) *
436             dev->num_crtcs, DRM_MEM_DRIVER);
437         drm_free(dev->vblank_offset, sizeof(*dev->vblank_offset) * dev->num_crtcs,
438             DRM_MEM_DRIVER);
439
440         dev->num_crtcs = 0;
441 }
442
443 int drm_vblank_init(struct drm_device *dev, int num_crtcs)
444 {
445         int i, ret = -ENOMEM;
446
447         callout_init(&dev->vblank_disable_timer, 0);
448         DRM_SPININIT(&dev->vbl_lock, "drm_vblk");
449         atomic_set(&dev->vbl_signal_pending, 0);
450         dev->num_crtcs = num_crtcs;
451
452         dev->vbl_queue = drm_alloc(sizeof(wait_queue_head_t) * num_crtcs,
453             DRM_MEM_DRIVER);
454         if (!dev->vbl_queue)
455             goto err;
456
457         dev->vbl_sigs = drm_alloc(sizeof(struct drm_vbl_sig) * num_crtcs,
458             DRM_MEM_DRIVER);
459         if (!dev->vbl_sigs)
460             goto err;
461
462         dev->_vblank_count = drm_alloc(sizeof(atomic_t) * num_crtcs,
463             DRM_MEM_DRIVER);
464         if (!dev->_vblank_count)
465             goto err;
466
467         dev->vblank_refcount = drm_alloc(sizeof(atomic_t) * num_crtcs,
468             DRM_MEM_DRIVER);
469         if (!dev->vblank_refcount)
470             goto err;
471
472         dev->vblank_enabled = drm_calloc(num_crtcs, sizeof(int),
473             DRM_MEM_DRIVER);
474         if (!dev->vblank_enabled)
475             goto err;
476
477         dev->last_vblank = drm_calloc(num_crtcs, sizeof(u32), DRM_MEM_DRIVER);
478         if (!dev->last_vblank)
479             goto err;
480
481         dev->vblank_premodeset = drm_calloc(num_crtcs, sizeof(u32),
482             DRM_MEM_DRIVER);
483         if (!dev->vblank_premodeset)
484             goto err;
485
486         dev->vblank_offset = drm_calloc(num_crtcs, sizeof(u32), DRM_MEM_DRIVER);
487         if (!dev->vblank_offset)
488             goto err;
489
490         /* Zero per-crtc vblank stuff */
491         for (i = 0; i < num_crtcs; i++) {
492                 DRM_INIT_WAITQUEUE(&dev->vbl_queue[i]);
493                 TAILQ_INIT(&dev->vbl_sigs[i]);
494                 atomic_set(&dev->_vblank_count[i], 0);
495                 atomic_set(&dev->vblank_refcount[i], 0);
496         }
497
498         return 0;
499
500 err:
501         drm_vblank_cleanup(dev);
502         return ret;
503 }
504
505 void drm_vbl_send_signals(struct drm_device *dev, int crtc)
506 {
507 }
508
509 #if 0 /* disabled */
510 void drm_vbl_send_signals(struct drm_device *dev, int crtc )
511 {
512         drm_vbl_sig_t *vbl_sig;
513         unsigned int vbl_seq = atomic_read( &dev->vbl_received );
514         struct proc *p;
515
516         vbl_sig = TAILQ_FIRST(&dev->vbl_sig_list);
517         while (vbl_sig != NULL) {
518                 drm_vbl_sig_t *next = TAILQ_NEXT(vbl_sig, link);
519
520                 if ( ( vbl_seq - vbl_sig->sequence ) <= (1<<23) ) {
521                         p = pfind(vbl_sig->pid);
522                         if (p != NULL)
523                                 psignal(p, vbl_sig->signo);
524
525                         TAILQ_REMOVE(&dev->vbl_sig_list, vbl_sig, link);
526                         DRM_FREE(vbl_sig,sizeof(*vbl_sig));
527                 }
528                 vbl_sig = next;
529         }
530 }
531 #endif
532
533 static void drm_locked_task(void *context, int pending __unused)
534 {
535         struct drm_device *dev = context;
536
537         DRM_LOCK();
538         for (;;) {
539                 int ret;
540
541                 if (drm_lock_take(&dev->lock.hw_lock->lock,
542                     DRM_KERNEL_CONTEXT))
543                 {
544                         dev->lock.file_priv = NULL; /* kernel owned */
545                         dev->lock.lock_time = jiffies;
546                         atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
547                         break;  /* Got lock */
548                 }
549
550                 /* Contention */
551 #if defined(__FreeBSD__) && __FreeBSD_version > 500000
552                 ret = mtx_sleep((void *)&dev->lock.lock_queue, &dev->dev_lock,
553                     PZERO | PCATCH, "drmlk2", 0);
554 #else
555                 ret = tsleep((void *)&dev->lock.lock_queue, PZERO | PCATCH,
556                     "drmlk2", 0);
557 #endif
558                 if (ret != 0)
559                         return;
560         }
561         DRM_UNLOCK();
562
563         dev->locked_task_call(dev);
564
565         drm_lock_free(dev, &dev->lock.hw_lock->lock, DRM_KERNEL_CONTEXT);
566 }
567
568 void
569 drm_locked_tasklet(struct drm_device *dev,
570                    void (*tasklet)(struct drm_device *dev))
571 {
572         dev->locked_task_call = tasklet;
573         taskqueue_enqueue(taskqueue_swi, &dev->locked_task);
574 }