NV50: basic fbcon + misc fixes
[platform/upstream/libdrm.git] / linux-core / drm_irq.c
1 /**
2  * \file drm_irq.c
3  * IRQ support
4  *
5  * \author Rickard E. (Rik) Faith <faith@valinux.com>
6  * \author Gareth Hughes <gareth@valinux.com>
7  */
8
9 /*
10  * Created: Fri Mar 19 14:30:16 1999 by faith@valinux.com
11  *
12  * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
13  * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
14  * All Rights Reserved.
15  *
16  * Permission is hereby granted, free of charge, to any person obtaining a
17  * copy of this software and associated documentation files (the "Software"),
18  * to deal in the Software without restriction, including without limitation
19  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
20  * and/or sell copies of the Software, and to permit persons to whom the
21  * Software is furnished to do so, subject to the following conditions:
22  *
23  * The above copyright notice and this permission notice (including the next
24  * paragraph) shall be included in all copies or substantial portions of the
25  * Software.
26  *
27  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
28  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
29  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
30  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
31  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
32  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
33  * OTHER DEALINGS IN THE SOFTWARE.
34  */
35
36 #include "drmP.h"
37
38 #include <linux/interrupt.h>    /* For task queue support */
39
40 /**
41  * Get interrupt from bus id.
42  *
43  * \param inode device inode.
44  * \param file_priv DRM file private.
45  * \param cmd command.
46  * \param arg user argument, pointing to a drm_irq_busid structure.
47  * \return zero on success or a negative number on failure.
48  *
49  * Finds the PCI device with the specified bus id and gets its IRQ number.
50  * This IOCTL is deprecated, and will now return EINVAL for any busid not equal
51  * to that of the device that this DRM instance attached to.
52  */
53 int drm_irq_by_busid(struct drm_device *dev, void *data,
54                      struct drm_file *file_priv)
55 {
56         struct drm_irq_busid *p = data;
57
58         if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
59                 return -EINVAL;
60
61         if ((p->busnum >> 8) != drm_get_pci_domain(dev) ||
62             (p->busnum & 0xff) != dev->pdev->bus->number ||
63             p->devnum != PCI_SLOT(dev->pdev->devfn) || p->funcnum != PCI_FUNC(dev->pdev->devfn))
64                 return -EINVAL;
65
66         p->irq = dev->irq;
67
68         DRM_DEBUG("%d:%d:%d => IRQ %d\n", p->busnum, p->devnum, p->funcnum,
69                   p->irq);
70
71         return 0;
72 }
73
74 static void vblank_disable_fn(unsigned long arg)
75 {
76         struct drm_device *dev = (struct drm_device *)arg;
77         unsigned long irqflags;
78         int i;
79
80         for (i = 0; i < dev->num_crtcs; i++) {
81                 spin_lock_irqsave(&dev->vbl_lock, irqflags);
82                 if (atomic_read(&dev->vblank_refcount[i]) == 0 &&
83                     dev->vblank_enabled[i]) {
84                         dev->driver->disable_vblank(dev, i);
85                         dev->vblank_enabled[i] = 0;
86                 }
87                 spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
88         }
89 }
90
91 static void drm_vblank_cleanup(struct drm_device *dev)
92 {
93         del_timer(&dev->vblank_disable_timer);
94
95         vblank_disable_fn((unsigned long)dev);
96
97         if (dev->vbl_queue)
98             drm_free(dev->vbl_queue, sizeof(*dev->vbl_queue) * dev->num_crtcs,
99                  DRM_MEM_DRIVER);
100
101         if (dev->vbl_sigs)
102             drm_free(dev->vbl_sigs, sizeof(*dev->vbl_sigs) * dev->num_crtcs,
103                  DRM_MEM_DRIVER);
104
105         if (dev->_vblank_count)
106             drm_free(dev->_vblank_count, sizeof(*dev->_vblank_count) *
107                  dev->num_crtcs, DRM_MEM_DRIVER);
108
109         if (dev->vblank_refcount)
110             drm_free(dev->vblank_refcount, sizeof(*dev->vblank_refcount) *
111                  dev->num_crtcs, DRM_MEM_DRIVER);
112
113         if (dev->vblank_enabled)
114             drm_free(dev->vblank_enabled, sizeof(*dev->vblank_enabled) *
115                  dev->num_crtcs, DRM_MEM_DRIVER);
116
117         if (dev->last_vblank)
118             drm_free(dev->last_vblank, sizeof(*dev->last_vblank) * dev->num_crtcs,
119                  DRM_MEM_DRIVER);
120
121         if (dev->vblank_premodeset)
122             drm_free(dev->vblank_premodeset, sizeof(*dev->vblank_premodeset) *
123                  dev->num_crtcs, DRM_MEM_DRIVER);
124
125         if (dev->vblank_offset)
126             drm_free(dev->vblank_offset, sizeof(*dev->vblank_offset) * dev->num_crtcs,
127                  DRM_MEM_DRIVER);
128 }
129
130 int drm_vblank_init(struct drm_device *dev, int num_crtcs)
131 {
132         int i, ret = -ENOMEM;
133
134         setup_timer(&dev->vblank_disable_timer, vblank_disable_fn,
135                     (unsigned long)dev);
136         spin_lock_init(&dev->vbl_lock);
137         atomic_set(&dev->vbl_signal_pending, 0);
138         dev->num_crtcs = num_crtcs;
139
140         dev->vbl_queue = drm_alloc(sizeof(wait_queue_head_t) * num_crtcs,
141                                    DRM_MEM_DRIVER);
142         if (!dev->vbl_queue)
143                 goto err;
144
145         dev->vbl_sigs = drm_alloc(sizeof(struct list_head) * num_crtcs,
146                                   DRM_MEM_DRIVER);
147         if (!dev->vbl_sigs)
148                 goto err;
149
150         dev->_vblank_count = drm_alloc(sizeof(atomic_t) * num_crtcs,
151                                       DRM_MEM_DRIVER);
152         if (!dev->_vblank_count)
153                 goto err;
154
155         dev->vblank_refcount = drm_alloc(sizeof(atomic_t) * num_crtcs,
156                                          DRM_MEM_DRIVER);
157         if (!dev->vblank_refcount)
158                 goto err;
159
160         dev->vblank_enabled = drm_calloc(num_crtcs, sizeof(int),
161                                          DRM_MEM_DRIVER);
162         if (!dev->vblank_enabled)
163                 goto err;
164
165         dev->last_vblank = drm_calloc(num_crtcs, sizeof(u32), DRM_MEM_DRIVER);
166         if (!dev->last_vblank)
167                 goto err;
168
169         dev->vblank_premodeset = drm_calloc(num_crtcs, sizeof(u32),
170                                             DRM_MEM_DRIVER);
171         if (!dev->vblank_premodeset)
172                 goto err;
173
174         dev->vblank_offset = drm_calloc(num_crtcs, sizeof(u32), DRM_MEM_DRIVER);
175         if (!dev->vblank_offset)
176                 goto err;
177
178         /* Zero per-crtc vblank stuff */
179         for (i = 0; i < num_crtcs; i++) {
180                 init_waitqueue_head(&dev->vbl_queue[i]);
181                 INIT_LIST_HEAD(&dev->vbl_sigs[i]);
182                 atomic_set(&dev->_vblank_count[i], 0);
183                 atomic_set(&dev->vblank_refcount[i], 0);
184         }
185
186         return 0;
187
188 err:
189         drm_vblank_cleanup(dev);
190         return ret;
191 }
192 EXPORT_SYMBOL(drm_vblank_init);
193
194 int drm_wait_hotplug(struct drm_device *dev, void *data,
195                     struct drm_file *file_priv)
196 {
197         union drm_wait_hotplug *hotplugwait = data;
198         struct timeval now;
199         int ret = 0;
200         unsigned int flags;
201
202         if ((!dev->irq) || (!dev->irq_enabled))
203                 return -EINVAL;
204
205         flags = hotplugwait->request.type;
206
207         if (flags & _DRM_HOTPLUG_SIGNAL) {
208                 unsigned long irqflags;
209                 struct list_head *hotplug_sigs = dev->hotplug_sigs;
210                 struct drm_hotplug_sig *hotplug_sig;
211
212                 hotplug_sig = drm_calloc(1, sizeof(struct drm_hotplug_sig),
213                                      DRM_MEM_DRIVER);
214                 if (!hotplug_sig)
215                         return -ENOMEM;
216
217                 atomic_inc(&dev->hotplug_signal_pending);
218
219                 hotplug_sig->info.si_signo = hotplugwait->request.signal;
220                 hotplug_sig->task = current;
221                 hotplug_sig->counter = 
222                         hotplugwait->reply.counter = 
223                                         dev->mode_config.hotplug_counter;
224
225                 spin_lock_irqsave(&dev->hotplug_lock, irqflags);
226
227                 list_add_tail(&hotplug_sig->head, hotplug_sigs);
228
229                 spin_unlock_irqrestore(&dev->hotplug_lock, irqflags);
230         } else {
231                 int cur_hotplug = dev->mode_config.hotplug_counter;
232
233                 DRM_WAIT_ON(ret, dev->hotplug_queue, 3 * DRM_HZ,
234                                 dev->mode_config.hotplug_counter > cur_hotplug);
235
236                 do_gettimeofday(&now);
237
238                 hotplugwait->reply.tval_sec = now.tv_sec;
239                 hotplugwait->reply.tval_usec = now.tv_usec;
240                 hotplugwait->reply.counter = dev->mode_config.hotplug_counter;
241         }
242
243         return ret;
244 }
245
246 static void drm_hotplug_cleanup(struct drm_device *dev)
247 {
248         if (dev->hotplug_sigs)
249             drm_free(dev->hotplug_sigs, sizeof(*dev->hotplug_sigs),
250                  DRM_MEM_DRIVER);
251 }
252 EXPORT_SYMBOL(drm_hotplug_cleanup);
253
254 int drm_hotplug_init(struct drm_device *dev)
255 {
256         spin_lock_init(&dev->hotplug_lock);
257         atomic_set(&dev->hotplug_signal_pending, 0);
258
259         dev->hotplug_sigs = drm_alloc(sizeof(struct list_head), DRM_MEM_DRIVER);
260         if (!dev->hotplug_sigs)
261                 return -ENOMEM;
262
263         INIT_LIST_HEAD(dev->hotplug_sigs);
264         init_waitqueue_head(&dev->hotplug_queue);
265
266         return 0;
267 }
268 EXPORT_SYMBOL(drm_hotplug_init);
269
270 /**
271  * Install IRQ handler.
272  *
273  * \param dev DRM device.
274  *
275  * Initializes the IRQ related data. Installs the handler, calling the driver
276  * \c drm_driver_irq_preinstall() and \c drm_driver_irq_postinstall() functions
277  * before and after the installation.
278  */
279 int drm_irq_install(struct drm_device * dev)
280 {
281         int ret = 0;
282         unsigned long sh_flags = 0;
283
284         if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
285                 return -EINVAL;
286
287         if (dev->irq == 0)
288                 return -EINVAL;
289
290         mutex_lock(&dev->struct_mutex);
291
292         /* Driver must have been initialized */
293         if (!dev->dev_private) {
294                 mutex_unlock(&dev->struct_mutex);
295                 return -EINVAL;
296         }
297
298         if (dev->irq_enabled) {
299                 mutex_unlock(&dev->struct_mutex);
300                 return 0;
301         }
302         dev->irq_enabled = 1;
303         mutex_unlock(&dev->struct_mutex);
304
305         DRM_DEBUG("irq=%d\n", dev->irq);
306
307         /* Before installing handler */
308         dev->driver->irq_preinstall(dev);
309
310         /* Install handler */
311         if (drm_core_check_feature(dev, DRIVER_IRQ_SHARED))
312                 sh_flags = IRQF_SHARED;
313
314         ret = request_irq(dev->irq, dev->driver->irq_handler,
315                           sh_flags, dev->devname, dev);
316         if (ret < 0) {
317                 mutex_lock(&dev->struct_mutex);
318                 dev->irq_enabled = 0;
319                 mutex_unlock(&dev->struct_mutex);
320                 return ret;
321         }
322
323         /* After installing handler */
324         ret = dev->driver->irq_postinstall(dev);
325         if (ret < 0) {
326                 mutex_lock(&dev->struct_mutex);
327                 dev->irq_enabled = 0;
328                 mutex_unlock(&dev->struct_mutex);
329         }
330
331         return ret;
332 }
333 EXPORT_SYMBOL(drm_irq_install);
334
335 /**
336  * Uninstall the IRQ handler.
337  *
338  * \param dev DRM device.
339  *
340  * Calls the driver's \c drm_driver_irq_uninstall() function, and stops the irq.
341  */
342 int drm_irq_uninstall(struct drm_device * dev)
343 {
344         int irq_enabled;
345
346         if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
347                 return -EINVAL;
348
349         mutex_lock(&dev->struct_mutex);
350         irq_enabled = dev->irq_enabled;
351         dev->irq_enabled = 0;
352         mutex_unlock(&dev->struct_mutex);
353
354         if (!irq_enabled)
355                 return -EINVAL;
356
357         DRM_DEBUG("irq=%d\n", dev->irq);
358
359         dev->driver->irq_uninstall(dev);
360
361         free_irq(dev->irq, dev);
362
363         drm_vblank_cleanup(dev);
364
365         drm_hotplug_cleanup(dev);
366
367         dev->locked_tasklet_func = NULL;
368
369         return 0;
370 }
371 EXPORT_SYMBOL(drm_irq_uninstall);
372
373 /**
374  * IRQ control ioctl.
375  *
376  * \param inode device inode.
377  * \param file_priv DRM file private.
378  * \param cmd command.
379  * \param arg user argument, pointing to a drm_control structure.
380  * \return zero on success or a negative number on failure.
381  *
382  * Calls irq_install() or irq_uninstall() according to \p arg.
383  */
384 int drm_control(struct drm_device *dev, void *data,
385                 struct drm_file *file_priv)
386 {
387         struct drm_control *ctl = data;
388
389         /* if we haven't irq we fallback for compatibility reasons - this used to be a separate function in drm_dma.h */
390
391
392         switch (ctl->func) {
393         case DRM_INST_HANDLER:
394                 if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
395                         return 0;
396                 if (drm_core_check_feature(dev, DRIVER_MODESET))
397                         return 0;
398                 if (dev->if_version < DRM_IF_VERSION(1, 2) &&
399                     ctl->irq != dev->irq)
400                         return -EINVAL;
401                 return drm_irq_install(dev);
402         case DRM_UNINST_HANDLER:
403                 if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
404                         return 0;
405                 if (drm_core_check_feature(dev, DRIVER_MODESET))
406                         return 0;
407                 return drm_irq_uninstall(dev);
408         default:
409                 return -EINVAL;
410         }
411 }
412
413 /**
414  * drm_vblank_count - retrieve "cooked" vblank counter value
415  * @dev: DRM device
416  * @crtc: which counter to retrieve
417  *
418  * Fetches the "cooked" vblank count value that represents the number of
419  * vblank events since the system was booted, including lost events due to
420  * modesetting activity.
421  */
422 u32 drm_vblank_count(struct drm_device *dev, int crtc)
423 {
424         return atomic_read(&dev->_vblank_count[crtc]) +
425                 dev->vblank_offset[crtc];
426 }
427 EXPORT_SYMBOL(drm_vblank_count);
428
429 /**
430  * drm_update_vblank_count - update the master vblank counter
431  * @dev: DRM device
432  * @crtc: counter to update
433  *
434  * Call back into the driver to update the appropriate vblank counter
435  * (specified by @crtc).  Deal with wraparound, if it occurred, and
436  * update the last read value so we can deal with wraparound on the next
437  * call if necessary.
438  */
439 void drm_update_vblank_count(struct drm_device *dev, int crtc)
440 {
441         unsigned long irqflags;
442         u32 cur_vblank, diff;
443
444         /*
445          * Interrupts were disabled prior to this call, so deal with counter
446          * wrap if needed.
447          * NOTE!  It's possible we lost a full dev->max_vblank_count events
448          * here if the register is small or we had vblank interrupts off for
449          * a long time.
450          */
451         cur_vblank = dev->driver->get_vblank_counter(dev, crtc);
452         spin_lock_irqsave(&dev->vbl_lock, irqflags);
453         if (cur_vblank < dev->last_vblank[crtc]) {
454                 diff = dev->max_vblank_count -
455                         dev->last_vblank[crtc];
456                 diff += cur_vblank;
457         } else {
458                 diff = cur_vblank - dev->last_vblank[crtc];
459         }
460         dev->last_vblank[crtc] = cur_vblank;
461         spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
462
463         atomic_add(diff, &dev->_vblank_count[crtc]);
464 }
465 EXPORT_SYMBOL(drm_update_vblank_count);
466
467 /**
468  * drm_vblank_get - get a reference count on vblank events
469  * @dev: DRM device
470  * @crtc: which CRTC to own
471  *
472  * Acquire a reference count on vblank events to avoid having them disabled
473  * while in use.  Note callers will probably want to update the master counter
474  * using drm_update_vblank_count() above before calling this routine so that
475  * wakeups occur on the right vblank event.
476  *
477  * RETURNS
478  * Zero on success, nonzero on failure.
479  */
480 int drm_vblank_get(struct drm_device *dev, int crtc)
481 {
482         unsigned long irqflags;
483         int ret = 0;
484
485         spin_lock_irqsave(&dev->vbl_lock, irqflags);    
486         /* Going from 0->1 means we have to enable interrupts again */
487         if (atomic_add_return(1, &dev->vblank_refcount[crtc]) == 1 &&
488             !dev->vblank_enabled[crtc]) {
489                 ret = dev->driver->enable_vblank(dev, crtc);
490                 if (ret)
491                         atomic_dec(&dev->vblank_refcount[crtc]);
492                 else
493                         dev->vblank_enabled[crtc] = 1;
494         }
495         spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
496
497         return ret;
498 }
499 EXPORT_SYMBOL(drm_vblank_get);
500
501 /**
502  * drm_vblank_put - give up ownership of vblank events
503  * @dev: DRM device
504  * @crtc: which counter to give up
505  *
506  * Release ownership of a given vblank counter, turning off interrupts
507  * if possible.
508  */
509 void drm_vblank_put(struct drm_device *dev, int crtc)
510 {
511         /* Last user schedules interrupt disable */
512         if (atomic_dec_and_test(&dev->vblank_refcount[crtc]))
513             mod_timer(&dev->vblank_disable_timer, jiffies + 5*DRM_HZ);
514 }
515 EXPORT_SYMBOL(drm_vblank_put);
516
517 /**
518  * drm_modeset_ctl - handle vblank event counter changes across mode switch
519  * @DRM_IOCTL_ARGS: standard ioctl arguments
520  *
521  * Applications should call the %_DRM_PRE_MODESET and %_DRM_POST_MODESET
522  * ioctls around modesetting so that any lost vblank events are accounted for.
523  */
524 int drm_modeset_ctl(struct drm_device *dev, void *data,
525                     struct drm_file *file_priv)
526 {
527         struct drm_modeset_ctl *modeset = data;
528         int crtc, ret = 0;
529         u32 new;
530
531         crtc = modeset->crtc;
532         if (crtc >= dev->num_crtcs) {
533                 ret = -EINVAL;
534                 goto out;
535         }
536
537         switch (modeset->cmd) {
538         case _DRM_PRE_MODESET:
539                 dev->vblank_premodeset[crtc] =
540                         dev->driver->get_vblank_counter(dev, crtc);
541                 break;
542         case _DRM_POST_MODESET:
543                 new = dev->driver->get_vblank_counter(dev, crtc);
544                 dev->vblank_offset[crtc] = dev->vblank_premodeset[crtc] - new;
545                 break;
546         default:
547                 ret = -EINVAL;
548                 break;
549         }
550
551 out:
552         return ret;
553 }
554
555 /**
556  * Wait for VBLANK.
557  *
558  * \param inode device inode.
559  * \param file_priv DRM file private.
560  * \param cmd command.
561  * \param data user argument, pointing to a drm_wait_vblank structure.
562  * \return zero on success or a negative number on failure.
563  *
564  * Verifies the IRQ is installed.
565  *
566  * If a signal is requested checks if this task has already scheduled the same signal
567  * for the same vblank sequence number - nothing to be done in
568  * that case. If the number of tasks waiting for the interrupt exceeds 100 the
569  * function fails. Otherwise adds a new entry to drm_device::vbl_sigs for this
570  * task.
571  *
572  * If a signal is not requested, then calls vblank_wait().
573  */
574 int drm_wait_vblank(struct drm_device *dev, void *data,
575                     struct drm_file *file_priv)
576 {
577         union drm_wait_vblank *vblwait = data;
578         struct timeval now;
579         int ret = 0;
580         unsigned int flags, seq, crtc;
581
582         if ((!dev->irq) || (!dev->irq_enabled))
583                 return -EINVAL;
584
585         if (vblwait->request.type &
586             ~(_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_FLAGS_MASK)) {
587                 DRM_ERROR("Unsupported type value 0x%x, supported mask 0x%x\n",
588                           vblwait->request.type,
589                           (_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_FLAGS_MASK));
590                 return -EINVAL;
591         }
592
593         flags = vblwait->request.type & _DRM_VBLANK_FLAGS_MASK;
594         crtc = flags & _DRM_VBLANK_SECONDARY ? 1 : 0;
595
596         if (crtc >= dev->num_crtcs)
597                 return -EINVAL;
598
599         drm_update_vblank_count(dev, crtc);
600         seq = drm_vblank_count(dev, crtc);
601
602         switch (vblwait->request.type & _DRM_VBLANK_TYPES_MASK) {
603         case _DRM_VBLANK_RELATIVE:
604                 vblwait->request.sequence += seq;
605                 vblwait->request.type &= ~_DRM_VBLANK_RELATIVE;
606         case _DRM_VBLANK_ABSOLUTE:
607                 break;
608         default:
609                 return -EINVAL;
610         }
611
612         if ((flags & _DRM_VBLANK_NEXTONMISS) &&
613             (seq - vblwait->request.sequence) <= (1<<23)) {
614                 vblwait->request.sequence = seq + 1;
615         }
616
617         if (flags & _DRM_VBLANK_SIGNAL) {
618                 unsigned long irqflags;
619                 struct list_head *vbl_sigs = &dev->vbl_sigs[crtc];
620                 struct drm_vbl_sig *vbl_sig, *tmp;
621
622                 spin_lock_irqsave(&dev->vbl_lock, irqflags);
623
624                 /* Check if this task has already scheduled the same signal
625                  * for the same vblank sequence number; nothing to be done in
626                  * that case
627                  */
628                 list_for_each_entry_safe(vbl_sig, tmp, vbl_sigs, head) {
629                         if (vbl_sig->sequence == vblwait->request.sequence
630                             && vbl_sig->info.si_signo ==
631                             vblwait->request.signal
632                             && vbl_sig->task == current) {
633                                 spin_unlock_irqrestore(&dev->vbl_lock,
634                                                        irqflags);
635                                 vblwait->reply.sequence = seq;
636                                 goto done;
637                         }
638                 }
639
640                 if (atomic_read(&dev->vbl_signal_pending) >= 100) {
641                         spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
642                         return -EBUSY;
643                 }
644
645                 spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
646
647                 vbl_sig = drm_calloc(1, sizeof(struct drm_vbl_sig),
648                                      DRM_MEM_DRIVER);
649                 if (!vbl_sig)
650                         return -ENOMEM;
651
652                 ret = drm_vblank_get(dev, crtc);
653                 if (ret) {
654                         drm_free(vbl_sig, sizeof(struct drm_vbl_sig),
655                                  DRM_MEM_DRIVER);
656                         return ret;
657                 }
658
659                 atomic_inc(&dev->vbl_signal_pending);
660
661                 vbl_sig->sequence = vblwait->request.sequence;
662                 vbl_sig->info.si_signo = vblwait->request.signal;
663                 vbl_sig->task = current;
664
665                 spin_lock_irqsave(&dev->vbl_lock, irqflags);
666
667                 list_add_tail(&vbl_sig->head, vbl_sigs);
668
669                 spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
670
671                 vblwait->reply.sequence = seq;
672         } else {
673                 unsigned long cur_vblank;
674
675                 ret = drm_vblank_get(dev, crtc);
676                 if (ret)
677                         return ret;
678                 DRM_WAIT_ON(ret, dev->vbl_queue[crtc], 3 * DRM_HZ,
679                             (((cur_vblank = drm_vblank_count(dev, crtc))
680                               - vblwait->request.sequence) <= (1 << 23)));
681                 drm_vblank_put(dev, crtc);
682                 do_gettimeofday(&now);
683
684                 vblwait->reply.tval_sec = now.tv_sec;
685                 vblwait->reply.tval_usec = now.tv_usec;
686                 vblwait->reply.sequence = cur_vblank;
687         }
688
689       done:
690         return ret;
691 }
692
693 /**
694  * Send the VBLANK signals.
695  *
696  * \param dev DRM device.
697  * \param crtc CRTC where the vblank event occurred
698  *
699  * Sends a signal for each task in drm_device::vbl_sigs and empties the list.
700  *
701  * If a signal is not requested, then calls vblank_wait().
702  */
703 static void drm_vbl_send_signals(struct drm_device * dev, int crtc)
704 {
705         struct drm_vbl_sig *vbl_sig, *tmp;
706         struct list_head *vbl_sigs;
707         unsigned int vbl_seq;
708         unsigned long flags;
709
710         spin_lock_irqsave(&dev->vbl_lock, flags);
711
712         vbl_sigs = &dev->vbl_sigs[crtc];
713         vbl_seq = drm_vblank_count(dev, crtc);
714
715         list_for_each_entry_safe(vbl_sig, tmp, vbl_sigs, head) {
716             if ((vbl_seq - vbl_sig->sequence) <= (1 << 23)) {
717                 vbl_sig->info.si_code = vbl_seq;
718                 send_sig_info(vbl_sig->info.si_signo,
719                               &vbl_sig->info, vbl_sig->task);
720
721                 list_del(&vbl_sig->head);
722
723                 drm_free(vbl_sig, sizeof(*vbl_sig),
724                          DRM_MEM_DRIVER);
725                 atomic_dec(&dev->vbl_signal_pending);
726                 drm_vblank_put(dev, crtc);
727             }
728         }
729
730         spin_unlock_irqrestore(&dev->vbl_lock, flags);
731 }
732
733 /**
734  * drm_handle_vblank - handle a vblank event
735  * @dev: DRM device
736  * @crtc: where this event occurred
737  *
738  * Drivers should call this routine in their vblank interrupt handlers to
739  * update the vblank counter and send any signals that may be pending.
740  */
741 void drm_handle_vblank(struct drm_device *dev, int crtc)
742 {
743         drm_update_vblank_count(dev, crtc);
744         DRM_WAKEUP(&dev->vbl_queue[crtc]);
745         drm_vbl_send_signals(dev, crtc);
746 }
747 EXPORT_SYMBOL(drm_handle_vblank);
748
749 /**
750  * Send the HOTPLUG signals.
751  *
752  * \param dev DRM device.
753  *
754  * Sends a signal for each task in drm_device::hotplug_sigs and empties the list.
755  */
756 static void drm_hotplug_send_signals(struct drm_device * dev)
757 {
758         struct drm_hotplug_sig *hotplug_sig, *tmp;
759         struct list_head *hotplug_sigs;
760         unsigned long flags;
761
762         spin_lock_irqsave(&dev->hotplug_lock, flags);
763
764         hotplug_sigs = dev->hotplug_sigs;
765
766         list_for_each_entry_safe(hotplug_sig, tmp, hotplug_sigs, head) {
767             hotplug_sig->info.si_code = hotplug_sig->counter;
768
769             send_sig_info(hotplug_sig->info.si_signo,
770                               &hotplug_sig->info, hotplug_sig->task);
771
772             list_del(&hotplug_sig->head);
773
774             drm_free(hotplug_sig, sizeof(*hotplug_sig),
775                          DRM_MEM_DRIVER);
776             atomic_dec(&dev->hotplug_signal_pending);
777         }
778
779         spin_unlock_irqrestore(&dev->hotplug_lock, flags);
780 }
781
782 /**
783  * drm_handle_hotplug - handle a hotplug event
784  * @dev: DRM device
785  * @crtc: where this event occurred
786  *
787  * Drivers should call this routine in their hotplug interrupt handlers.
788  */
789 void drm_handle_hotplug(struct drm_device *dev)
790 {
791         DRM_WAKEUP(&dev->hotplug_queue);
792         drm_hotplug_send_signals(dev);
793 }
794 EXPORT_SYMBOL(drm_handle_hotplug);
795
796 /**
797  * Tasklet wrapper function.
798  *
799  * \param data DRM device in disguise.
800  *
801  * Attempts to grab the HW lock and calls the driver callback on success. On
802  * failure, leave the lock marked as contended so the callback can be called
803  * from drm_unlock().
804  */
805 static void drm_locked_tasklet_func(unsigned long data)
806 {
807         struct drm_device *dev = (struct drm_device *)data;
808         unsigned long irqflags;
809
810         spin_lock_irqsave(&dev->tasklet_lock, irqflags);
811
812         if (!dev->locked_tasklet_func ||
813             !drm_lock_take(&dev->primary->master->lock,
814                            DRM_KERNEL_CONTEXT)) {
815                 spin_unlock_irqrestore(&dev->tasklet_lock, irqflags);
816                 return;
817         }
818
819         dev->primary->master->lock.lock_time = jiffies;
820         atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
821
822         dev->locked_tasklet_func(dev);
823
824         drm_lock_free(&dev->primary->master->lock,
825                       DRM_KERNEL_CONTEXT);
826
827         dev->locked_tasklet_func = NULL;
828
829         spin_unlock_irqrestore(&dev->tasklet_lock, irqflags);
830 }
831
832 /**
833  * Schedule a tasklet to call back a driver hook with the HW lock held.
834  *
835  * \param dev DRM device.
836  * \param func Driver callback.
837  *
838  * This is intended for triggering actions that require the HW lock from an
839  * interrupt handler. The lock will be grabbed ASAP after the interrupt handler
840  * completes. Note that the callback may be called from interrupt or process
841  * context, it must not make any assumptions about this. Also, the HW lock will
842  * be held with the kernel context or any client context.
843  */
844 void drm_locked_tasklet(struct drm_device *dev, void (*func)(struct drm_device *))
845 {
846         unsigned long irqflags;
847         static DECLARE_TASKLET(drm_tasklet, drm_locked_tasklet_func, 0);
848
849         if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ) ||
850             test_bit(TASKLET_STATE_SCHED, &drm_tasklet.state))
851                 return;
852
853         spin_lock_irqsave(&dev->tasklet_lock, irqflags);
854
855         if (dev->locked_tasklet_func) {
856                 spin_unlock_irqrestore(&dev->tasklet_lock, irqflags);
857                 return;
858         }
859
860         dev->locked_tasklet_func = func;
861
862         spin_unlock_irqrestore(&dev->tasklet_lock, irqflags);
863
864         drm_tasklet.data = (unsigned long)dev;
865
866         tasklet_hi_schedule(&drm_tasklet);
867 }
868 EXPORT_SYMBOL(drm_locked_tasklet);